text
stringlengths 56
7.94M
|
---|
\begin{document}
\title{Certain Transformations for Hypergeometric series in $p$-adic setting}
\author{Rupam Barman}
\address{Department of Mathematics, Indian Institute of Technology, Hauz Khas, New Delhi-110016, INDIA}
\curraddr{}
\email{rupam@maths.iitd.ac.in}
\thanks{}
\author{Neelam Saikia}
\address{Department of Mathematics, Indian Institute of Technology, Hauz Khas, New Delhi-110016, INDIA}
\curraddr{}
\email{nlmsaikia1@gmail.com}
\thanks{}
\subjclass[2010]{Primary: 11G20, 33E50; Secondary: 33C99, 11S80,
11T24.}
\date{10th March, 2014}
\keywords{Character of finite fields, Gaussian hypergeometric series, Elliptic curves, Trace of Frobenius, Teichm\"{u}ller character,
$p$-adic Gamma function.}
\begin{abstract} In \cite{mccarthy2}, McCarthy defined a function $_{n}G_{n}[\cdots]$
using the Teichm\"{u}ller character of finite fields and quotients of the $p$-adic gamma function.
This function extends hypergeometric functions over finite fields to the $p$-adic setting.
In this paper, we give certain transformation formulas for the function $_{n}G_{n}[\cdots]$
which are not implied from the analogous hypergeometric functions over finite fields.
\end{abstract}
\maketitle
\section{Introduction and statement of results}
In \cite{greene}, Greene introduced the notion of hypergeometric functions over finite fields or
\emph{Gaussian hypergeometric series}. He established these functions as analogues of classical hypergeometric
functions. Many interesting relations between special values of Gaussian hypergeometric series and the number of
points on certain varieties over finite fields have been obtained. By definition, results involving hypergeometric functions over
finite fields are often restricted to primes in certain congruence classes. For example, the expressions for the trace
of Frobenius map on certain families of elliptic curves given in \cite{BK1, BK2, Fuselier, lennon, lennon2} are restricted to
such congruence classes.
In \cite{mccarthy2}, McCarthy defined a function
$_{n}G_{n}[\cdots]$ which can best be described as an analogue of hypergeometric series in the $p$-adic setting.
He showed how results involving Gaussian hypergeometric series can be extended to a wider class of primes using the function
$_{n}G_{n}[\cdots]$.
\par
Let $p$ be an odd prime, and let $\mathbb{F}_q$ denote the finite field with $q$ elements, where $q=p^r, r\geq 1$.
Let $\phi$ be the quadratic character on $\mathbb{F}_q^{\times}$ extended to all of $\mathbb{F}_q$ by setting $\phi(0):=0$.
Let $\mathbb{Z}_p$ denote the ring of $p$-adic integers.
Let $\Gamma_p(.)$ denote the Morita's $p$-adic gamma function, and let $\omega$ denote the
Teichm\"{u}ller character of $\mathbb{F}_q$. We denote by $\overline{\omega}$ the inverse of $\omega$.
For $x \in \mathbb{Q}$ we let $\lfloor x\rfloor$ denote the greatest integer less than
or equal to $x$ and $\langle x\rangle$ denote the fractional part of $x$, i.e., $x-\lfloor x\rfloor$.
Also, we denote by $\mathbb{Z}^{+}$ and $\mathbb{Z}_{\geq 0}$
the set of positive integers and non negative integers, respectively. The definition of the function $_{n}G_{n}[\cdots]$ is
as follows.
\begin{definition}\cite[Definition 5.1]{mccarthy2} \label{defin1}
Let $q=p^r$, for $p$ an odd prime and $r \in \mathbb{Z}^+$, and let $t \in \mathbb{F}_q$.
For $n \in \mathbb{Z}^+$ and $1\leq i\leq n$, let $a_i$, $b_i$ $\in \mathbb{Q}\cap \mathbb{Z}_p$.
Then the function $_{n}G_{n}[\cdots]$ is defined by
\begin{align}
&_nG_n\left[\begin{array}{cccc}
a_1, & a_2, & \ldots, & a_n \\
b_1, & b_2, & \ldots, & b_n
\end{array}|t
\right]_q:=\frac{-1}{q-1}\sum_{j=0}^{q-2}(-1)^{jn}~~\overline{\omega}^j(t)\notag\\
&\times \prod_{i=1}^n\prod_{k=0}^{r-1}(-p)^{-\lfloor \langle a_ip^k \rangle-\frac{jp^k}{q-1} \rfloor -\lfloor\langle -b_ip^k \rangle +\frac{jp^k}{q-1}\rfloor}
\frac{\Gamma_p(\langle (a_i-\frac{j}{q-1})p^k\rangle)}{\Gamma_p(\langle a_ip^k \rangle)}
\frac{\Gamma_p(\langle (-b_i+\frac{j}{q-1})p^k \rangle)}{\Gamma_p(\langle -b_ip^k \rangle)}.\notag
\end{align}
\end{definition}
The aim of this paper is to explore possible transformation formulas for the function $_{n}G_{n}[\cdots]$.
In \cite{mccarthy2}, McCarthy showed that transformations for hypergeometric functions over finite fields can be
re-written in terms of $_{n}G_{n}[\cdots]$. However, such transformations will hold for
all $p$ where the original characters existed over $\mathbb{F}_p$, and hence restricted to primes in certain
congruence classes. In the same paper, McCarthy posed an interesting question
about finding transformations for $_{n}G_{n}[\cdots]$ which exist for all but finitely many $p$. In \cite{BS1}, the authors
find the following two transformations for the function $_{n}G_{n}[\cdots]$ which exist for all prime $p > 3$.
\begin{result}\cite[Corollary 1.5]{BS1}\label{cor1}
Let $q=p^r$, $p>3$ be a prime. Let $a, b \in \mathbb{F}_q^{\times}$ and $-\dfrac{27b^2}{4a^3}\neq 1$. Then
\begin{align}
&{_2}G_2\left[ \begin{array}{cc}
\frac{1}{4}, & \frac{3}{4} \\
\frac{1}{3}, & \frac{2}{3}
\end{array}|-\dfrac{27b^2}{4a^3}
\right]_q\notag\\
&=\left\{
\begin{array}{ll}
\phi(b(k^3+ak+b))\cdot {_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{3}, & \frac{2}{3}
\end{array}|-\dfrac{k^3+ak+b}{4k^3}\right]_q \hbox{if~ $a=-3k^2$;}\\
\phi(-b(3h^2+a))\cdot {_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{4}, & \frac{3}{4}
\end{array}|\dfrac{4(3h^2+a)}{9h^2}
\right]_q \hbox{if ~$h^3+ah+b=0$.}
\end{array}
\right.\notag
\end{align}
\end{result}
Apart from the transformations which can be implied from the hypergeometric
functions over finite fields, the above two transformations are the only transformations for the function $_{n}G_{n}[\cdots]$ in full
generality to date. In this paper, we prove two more such transformations which are given below.
\begin{theorem}\label{MT1}
Let $q=p^r$, $p>3$ be a prime. Let $m=-27d(d^3+8)$, $n=27(d^6-20d^3-8)$ $\in \mathbb{F}_q^{\times}$ be such that $d^3\neq 1$, and
$-\dfrac{27n^2}{4m^3}\neq 1$. Then
\begin{align}
&q\phi(-3d)\cdot {_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{6}, & \frac{5}{6}
\end{array}|\dfrac{1}{d^3}\right]_q\notag\\
&=\alpha-q+\phi(-3(8+92d^3+35d^6))+q\phi(n)\cdot{_2}G_2\left[ \begin{array}{cc}
\frac{1}{4}, & \frac{3}{4} \\
\frac{1}{3}, & \frac{2}{3}
\end{array}|-\dfrac{27n^2}{4m^3}
\right]_q,\notag
\end{align}
where $\alpha=\left\{
\begin{array}{ll}
5-6\phi(-3), & \hbox{if~ $q\equiv 1\pmod{3}$;} \\
1, & \hbox{if~ $q\not\equiv 1\pmod{3}$.}
\end{array}
\right.$
\end{theorem}
Combining Result \ref{cor1} and Theorem \ref{MT1}, we have another four such transformations for the function
$_{n}G_{n}[\cdots]$ which are listed below.
\begin{corollary}\label{cor2}
Let $q=p^r$, $p>3$ be a prime.
Let $\alpha$ be defined as in Theorem \ref{MT1}, and $m=-27d(d^3+8)$, $n=27(d^6-20d^3-8)\in \mathbb{F}_q^{\times}$ be such that $d^3\neq 1$ and
$-\dfrac{27n^2}{4m^3}\neq 1$.
\begin{enumerate}
\item
If $3k^2+m=0$, then
\begin{align}
&q\phi(-3d)\cdot {_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{6}, & \frac{5}{6}
\end{array}|\dfrac{1}{d^3}\right]_q\notag\\
&=\alpha-q+\phi(-3(8+92d^3+35d^6))+q\phi(k^3+mk+n)\notag\\
&~\times{_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{3}, & \frac{2}{3}
\end{array}|-\dfrac{k^3+mk+n}{4k^3}
\right]_q.\notag
\end{align}
\item If $h^3+mh+n=0$, then
\begin{align}
&q\phi(-3d)\cdot {_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{6}, & \frac{5}{6}
\end{array}|\dfrac{1}{d^3}\right]_q\notag\\
&=\alpha-q+\phi(-3(8+92d^3+35d^6))+q\phi(-3h^2-m)\notag\\
&~\times{_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{4}, & \frac{3}{4}
\end{array}|\dfrac{4(3h^2+m)}{9h^2}
\right]_q.\notag
\end{align}
\end{enumerate}
\end{corollary}
For an elliptic curve $E$ defined over $\mathbb{F}_q$,
the trace of Frobenius of $E$ is defined as $a_q(E):=q+1-\#E(\mathbb{F}_q)$, where $\#E(\mathbb{F}_q)$ denotes the number of $\mathbb{F}_q$-
points on $E$ including the point at infinity. Also, $j(E)$ denotes the $j$-invariant of $E$.
We now state a result of McCarthy which will be used to prove our main results.
\begin{theorem}\cite[Theorem 1.2]{mccarthy2}\label{mc}
Let $p>3$ be a prime. Consider an elliptic curve $E_s/\mathbb{F}_p$ of the form $E_s: y^2=x^3+ax+b$ with $j(E_s)\neq 0, 1728$. Then
\begin{align}
a_p(E_s)=\phi(b)\cdot p\cdot {_2}G_2\left[ \begin{array}{cc}
\frac{1}{4}, & \frac{3}{4} \\
\frac{1}{3}, & \frac{2}{3}
\end{array}|-\frac{27b^2}{4a^3}
\right]_p.
\end{align}
\end{theorem}
\begin{remark} McCarthy proved Theorem \ref{mc} over $\mathbb{F}_p$
and remarked that the result could be generalized for $\mathbb{F}_q$.
We have verified that Theorem \ref{mc} is also true for $\mathbb{F}_q$.
We will apply Theorem \ref{mc} for $\mathbb{F}_q$ to prove our results.
\end{remark}
\section{Preliminaries}
Let $\widehat{\mathbb{F}_q^\times}$ denote the set of all multiplicative characters $\chi$ on $\mathbb{F}_q^{\times}$.
It is known that $\widehat{\mathbb{F}_q^\times}$ is a cyclic group of order $q-1$
under the multiplication of characters: $(\chi\psi)(x)=\chi(x)\psi(x)$, $x\in \mathbb{F}_q^{\times}$.
The domain of each
$\chi \in \mathbb{F}_q^{\times}$ is extended to $\mathbb{F}_q$ by setting $\chi(0):=0$ including the trivial character $\varepsilon$.
We now state the \emph{orthogonality relations} for multiplicative characters in the following lemma.
\begin{lemma}\emph{(\cite[Chapter 8]{ireland}).}\label{lemma2} We have
\begin{enumerate}
\item $\displaystyle\sum_{x\in\mathbb{F}_q}\chi(x)=\left\{
\begin{array}{ll}
q-1 & \hbox{if~ $\chi=\varepsilon$;} \\
0 & \hbox{if ~~$\chi\neq\varepsilon$.}
\end{array}
\right.$
\item $\displaystyle\sum_{\chi\in \widehat{\mathbb{F}_q^\times}}\chi(x)~~=\left\{
\begin{array}{ll}
q-1 & \hbox{if~~ $x=1$;} \\
0 & \hbox{if ~~$x\neq1$.}
\end{array}
\right.$
\end{enumerate}
\end{lemma}
\par Let $\mathbb{Z}_p$ and $\mathbb{Q}_p$ denote the ring of $p$-adic integers and the field of $p$-adic numbers, respectively.
Let $\overline{\mathbb{Q}_p}$ be the algebraic closure of $\mathbb{Q}_p$ and $\mathbb{C}_p$ the completion of $\overline{\mathbb{Q}_p}$.
Let $\mathbb{Z}_q$ be the ring of integers in the unique unramified extension of $\mathbb{Q}_p$ with residue field $\mathbb{F}_q$.
We know that $\chi\in \widehat{\mathbb{F}_q^{\times}}$ takes values in $\mu_{q-1}$, where $\mu_{q-1}$ is the group of
$(q-1)$-th root of unity in $\mathbb{C}^{\times}$. Since $\mathbb{Z}_q^{\times}$ contains all $(q-1)$-th root of unity,
we can consider multiplicative characters on $\mathbb{F}_q^\times$
to be maps $\chi: \mathbb{F}_q^{\times} \rightarrow \mathbb{Z}_q^{\times}$.
\par We now introduce some properties of Gauss sums. For further details, see \cite{evans}. Let $\zeta_p$ be a fixed primitive $p$-th root of unity
in $\overline{\mathbb{Q}_p}$. The trace map $\text{tr}: \mathbb{F}_q \rightarrow \mathbb{F}_p$ is given by
\begin{align}
\text{tr}(\alpha)=\alpha + \alpha^p + \alpha^{p^2}+ \cdots + \alpha^{p^{r-1}}.\notag
\end{align}
Then the additive character
$\theta: \mathbb{F}_q \rightarrow \mathbb{Q}_p(\zeta_p)$ is defined by
\begin{align}
\theta(\alpha)=\zeta_p^{\text{tr}(\alpha)}.\notag
\end{align}
For $\chi \in \widehat{\mathbb{F}_q^\times}$, the \emph{Gauss sum} is defined by
\begin{align}
G(\chi):=\sum_{x\in \mathbb{F}_q}\chi(x)\theta(x).\notag
\end{align}
We let $T$ denote a fixed generator of $\widehat{\mathbb{F}_q^\times}$ and denote by $G_m$ the Gauss sum $G(T^m)$.
We now state three results on Gauss sums which will be used to prove our main results.
\begin{lemma}\emph{(\cite[Eqn. 1.12]{greene}).}\label{fusi3}
If $k\in\mathbb{Z}$ and $T^k\neq\varepsilon$, then
$$G_kG_{-k}=qT^k(-1).$$
\end{lemma}
\begin{lemma}\emph{(\cite[Lemma 2.2]{Fuselier}).}\label{lemma1}
For all $\alpha \in \mathbb{F}_q^{\times}$, $$\theta(\alpha)=\frac{1}{q-1}\sum_{m=0}^{q-2}G_{-m}T^m(\alpha).$$
\end{lemma}
\begin{theorem}\emph{(\cite[Davenport-Hasse Relation]{Lang}).}\label{lemma3}
Let $m$ be a positive integer and let $q=p^r$ be a prime power such that $q\equiv 1 \pmod{m}$. For multiplicative characters
$\chi, \psi \in \widehat{\mathbb{F}_q^\times}$, we have
\begin{align}
\prod_{\chi^m=1}G(\chi \psi)=-G(\psi^m)\psi(m^{-m})\prod_{\chi^m=1}G(\chi).
\end{align}
\end{theorem}
\par
In the proof of our results, the Gross-Koblitz formula plays an important role.
It relates the Gauss sums and the $p$-adic gamma function.
For $n \in\mathbb{Z}^+$,
the $p$-adic gamma function $\Gamma_p(n)$ is defined as
\begin{align}
\Gamma_p(n):=(-1)^n\prod_{0<j<n,p\nmid j}j\notag
\end{align}
and one extends it to all $x\in\mathbb{Z}_p$ by setting $\Gamma_p(0):=1$ and
\begin{align}
\Gamma_p(x):=\lim_{n\rightarrow x}\Gamma_p(n)\notag
\end{align}
for $x\neq0$, where $n$ runs through any sequence of positive integers $p$-adically approaching $x$.
This limit exists, is independent of how $n$ approaches $x$,
and determines a continuous function on $\mathbb{Z}_p$ with values in $\mathbb{Z}_p^{\times}$.
\par
Let $\pi \in \mathbb{C}_p$ be the fixed root of $x^{p-1} + p=0$ which satisfies
$\pi \equiv \zeta_p-1 \pmod{(\zeta_p-1)^2}$. Then the Gross-Koblitz formula relates Gauss sums and $p$-adic gamma function as follows.
Recall that $\omega$ denotes the Teichm\"{u}ller character of $\mathbb{F}_q$.
\begin{theorem}\emph{(\cite[Gross-Koblitz]{gross}).}\label{thm4} For $a\in \mathbb{Z}$ and $q=p^r$,
\begin{align}
G(\overline{\omega}^a)=-\pi^{(p-1)\sum_{i=0}^{r-1}\langle\frac{ap^i}{q-1} \rangle}\prod_{i=0}^{r-1}\Gamma_p\left(\langle \frac{ap^i}{q-1} \rangle\right).\notag
\end{align}
\end{theorem}
\section{Proof of the results}
\par We first state a lemma which we will use to prove the main results. This lemma is a generalization of Lemma 4.1 in \cite{mccarthy2}.
For a proof, see \cite{BS1}.
\begin{lemma}\emph{(\cite[Lemma 3.1]{BS1}).}\label{lemma4}
Let $p$ be a prime and $q=p^r$. For $0\leq j\leq q-2$ and $t\in \mathbb{Z^+}$ with $p\nmid t$, we have
\begin{align}\label{eq8}
\omega(t^{tj})\prod_{i=0}^{r-1}\Gamma_p\left(\langle \frac{tp^ij}{q-1}\rangle\right)
\prod_{h=1}^{t-1}\Gamma_p\left(\langle\frac{hp^i}{t}\rangle\right)
=\prod_{i=0}^{r-1}\prod_{h=0}^{t-1}\Gamma_p\left(\langle\frac{p^ih}{t}+\frac{p^ij}{q-1}\rangle\right)
\end{align}
and
\begin{align}\label{eq9}
\omega(t^{-tj})\prod_{i=0}^{r-1}\Gamma_p\left(\langle\frac{-tp^ij}{q-1}\rangle\right)
\prod_{h=1}^{t-1}\Gamma_p\left(\langle \frac{hp^i}{t}\rangle\right)
=\prod_{i=0}^{r-1}\prod_{h=0}^{t-1}\Gamma_p\left(\langle\frac{p^i(1+h)}{t}-\frac{p^ij}{q-1}\rangle \right).
\end{align}
\end{lemma}
\begin{lemma}\label{lemma5}
For $1\leq l\leq q-2$ such that $l\neq \frac{q-1}{2}$, and $0\leq i\leq r-1$, we have
\begin{align}\label{eq-51}
&\lfloor\frac{3lp^i}{q-1}\rfloor+3\lfloor\frac{-lp^i}{q-1}\rfloor-
3\lfloor\frac{-2lp^i}{q-1}\rfloor-\lfloor\frac{6lp^i}{q-1}\rfloor\notag\\
&=-2\lfloor\langle \frac{p^i}{2}\rangle- \frac{lp^i}{q-1}\rfloor
-\lfloor\langle \frac{-p^i}{6} \rangle+ \frac{lp^i}{q-1}\rfloor-\lfloor\langle
\frac{-5p^i}{6} \rangle+\frac{lp^i}{q-1}\rfloor.
\end{align}
\end{lemma}
\begin{proof}
Since $\lfloor\frac{6lp^i}{q-1}\rfloor$ can be written as $6u+v$, for some $u,v \in \mathbb{Z}$ such that $0\leq v\leq 5$,
\eqref{eq-51} can be verified by considering the cases $v=0, 1, \ldots, 5$.
For the case $v=0$ we have $\lfloor\frac{6lp^i}{q-1}\rfloor=6u$, and then it is easy to check that both the sides of
\eqref{eq-51} are equal to zero. Similarly, for other values of $v$ one can verify the result.
\end{proof}
To prove Theorem \ref{MT1}, we will first express the number of points on the Hessian form of elliptic curves. Let $a\in \mathbb{F}_q$
be such that $a^3\neq 1$. Then the Hessian curve over $\mathbb{F}_q$ is given by the cubic equation
\begin{align}\label{hessian1}
C_a: x^3+y^3+1=3axy.
\end{align}
We express the number of $\mathbb{F}_q$-points on $C_a$ in the following theorem.
Let $C_a(\mathbb{F}_q)=\{(x, y)\in \mathbb{F}_q^2: x^3+y^3+1=3axy\}$ be the set of all $\mathbb{F}_q$-points on $C_a$.
\begin{theorem}\label{hessian2}
Let $q=p^r, p > 5$. Then
\begin{align}
\#C_a(\mathbb{F}_q)=\alpha-1+q-q\phi(-3a)\cdot{_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{6}, & \frac{5}{6}
\end{array}|\dfrac{1}{a^3}
\right]_q,\notag
\end{align}
where
$\alpha=\left\{
\begin{array}{ll}
5-6\phi(-3), & \hbox{if~ $q\equiv 1\pmod{3}$;} \\
1, & \hbox{if~ $q\not\equiv 1\pmod{3}$.}
\end{array}
\right.$
\end{theorem}
\begin{proof}
We have
$\#C_{a}(\mathbb{F}_{q})=\#\{(x,y)\in\mathbb{F}_{q}\times\mathbb{F}_{q}:\ P(x,y)=0\}$,\\
where $P(x,y)=x^{3}+y^{3}-3axy+1$. Using the identity
\begin{align}
\sum_{z\in\mathbb{F}_q}\theta(zP(x,y))=\left\{
\begin{array}{ll}
q, & \hbox{if $P(x,y)=0$;} \\
0, & \hbox{if $P(x,y)\neq0$,}
\end{array}
\right.\notag
\end{align}
we obtain
\begin{align}\label{eq-53}
q\cdot\#C_{a}(\mathbb{F}_{q})&=\sum_{x,y,z\in\mathbb{F}_{q}}\theta(zP(x,y))\notag\\
&=q^2+\sum_{z\in\mathbb{F}_{q}^{\times}}\theta(z)+\sum_{y,z\in\mathbb{F}_{q}^{\times}}\theta(zy^3)\theta(z)\notag\\
&~+\sum_{x,z\in\mathbb{F}_{q}^{\times}}\theta(zx^3)\theta(z)+\sum_{x,y,z\in\mathbb{F}_{q}^{\times}}\theta(z)\theta(zx^3)
\theta(zy^3)\theta(-3axyz)\notag\\
&:=q^2+A+B+C+D.
\end{align}
Using Lemma \ref{lemma2}, Lemma \ref{fusi3} and Lemma \ref{lemma1}, we find $A$, $B$, $C$ and $D$ separately. We have
\begin{align}
A=\frac{1}{q-1}\sum_{l=0}^{q-2}G_{-l}\sum_{z\in\mathbb{F}_{q}^{\times}}T^{l}(z).\notag
\end{align}
The inner sum in the expression of $A$ is non zero only if $l=0$, and hence $A=-1$. We have
\begin{align}
B&=\sum_{y,z\in\mathbb{F}_{q}^{\times}}\theta(zy^3)\theta(z)\notag\\
&=\frac{1}{(q-1)^2}\sum_{y,z\in\mathbb{F}_{q}^{\times}}\sum_{l,m=0}^{q-2}G_{-m}T^m(zy^3)G_{-l}T^l(z)\notag\\
&=\frac{1}{(q-1)^2}\sum_{l,m=0}^{q-2}G_{-m}G_{-l}\sum_{z\in\mathbb{F}_{q}^{\times}}T^{l+m}(z)
\sum_{y\in\mathbb{F}_{q}^{\times}}T^{3m}(y),\notag
\end{align}
which is non zero only if $l+m=0$ and $3m=0$.
By considering the following two cases we find $B$.\\
Case 1: If $q\equiv 1\pmod{3}$ then $m=0,\frac{q-1}{3}$ or $\frac{2(q-1)}{3}$; and $l=0,-\frac{q-1}{3}$ or $-\frac{2(q-1)}{3}$. Hence,
\begin{align}
B&=G_{0}G_{0}+G_{-\frac{q-1}{3}}G_{\frac{q-1}{3}}+G_{-\frac{2(q-1)}{3}}G_{\frac{2(q-1)}{3}}\notag\\
&=1+2q.\notag
\end{align}
Case 2: If $q\not\equiv 1\pmod{3}$ then $l=m=0$, and hence $B=G_{0}G_{0}=1$. Also,
\begin{align}
C&=\sum_{x,z\in\mathbb{F}_{q}^{\times}}\theta(zx^3)\theta(z)\notag\\
&=B.\notag
\end{align}
Finally,
\begin{align}
D&=\sum_{x,y,z\in\mathbb{F}_{q}^{\times}}\theta(z)\theta(zx^3)\theta(zy^3)\theta(-3axyz)\notag\\
&=\frac{1}{(q-1)^4}\sum_{x,y,z\in\mathbb{F}_{q}^{\times}}\sum_{l,m,n,k=0}^{q-2}G_{-l}G_{-m}G_{-n}G_{-k}T^{l}(zx^3)\notag\\
&~\times T^{m}(zy^3)T^{n}(z)T^{k}(-3axyz)\notag\\
&=\frac{1}{(q-1)^4}\sum_{l,m,n,k=0}^{q-2}G_{-l}G_{-m}G_{-n}G_{-k}T^{k}(-3a)\notag\\
&\times~\sum_{x\in\mathbb{F}_{q}^{\times}}T^{3l+k}(x)\sum_{y\in\mathbb{F}_{q}^{\times}}T^{3m+k}(y)
\sum_{z\in\mathbb{F}_{q}^{\times}}T^{l+m+n+k}(z),\notag
\end{align}
which is non zero only if $3l+k=0$, $3m+k=0$, and $l+m+n+k=0$.
We now consider the following two cases.\\
Case 1: If $q\equiv 1\pmod{3}$ then $m=l$, $l+\frac{q-1}{3}$ or $l+\frac{2(q-1)}{3}$; $k=-3l$;
and $n=l$, $l-\frac{q-1}{3}$ or $l-\frac{2(q-1)}{3}$, and hence
\begin{align}\label{eq-54}
D&=\frac{1}{q-1}\sum_{l=0}^{q-2}G_{-l}G_{-l}G_{-l}G_{3l}T^{-3l}(-3a)\notag\\
&~+\frac{2}{q-1}\sum_{l=0}^{q-2}G_{-l}G_{-l-\frac{q-1}{3}}G_{-l-\frac{2(q-1)}{3}}G_{3l}T^{-3l}(-3a).
\end{align}
Transforming $l\rightarrow l-\frac{q-1}{2}$, we have
\begin{align}
D&=\frac{1}{q-1}\sum_{l=0}^{q-2}G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{2}}G_{3l-\frac{q-1}{2}}
T^{-3l+\frac{q-1}{2}}(-3a)\notag\\
&~+\frac{2}{q-1}\sum_{l=0}^{q-2}G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{6}}G_{-l-\frac{q-1}{6}}G_{3l-\frac{q-1}{2}}
T^{-3l+\frac{q-1}{2}}(-3a)\notag\\
&=\frac{\phi(-3a)}{q-1}\sum_{l=0}^{q-2}G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{2}}G_{3l-\frac{q-1}{2}}
T^{-3l}(-3a)\notag\\
&~+\frac{2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{6}}G_{-l-\frac{q-1}{6}}G_{3l-\frac{q-1}{2}}
T^{-3l}(-3a).\notag
\end{align}
Using Davenport-Hasse relation for certain values of $m$ and $\psi$ we deduce the following relations:
For $m=2$, $\psi=T^{-l}$, we have
\begin{align}
G_{-l+\frac{q-1}{2}}=\frac{G_{\frac{q-1}{2}}G_{-2l}T^l(4)}{G_{-l}},\notag
\end{align}
and for $m=2$, $\psi=T^{3l}$, we have
\begin{align}
G_{3l-\frac{q-1}{2}}=\frac{G_{\frac{q-1}{2}}G_{6l}T^{-3l}(4)}{G_{3l}}.\notag
\end{align}
For $m=6$, $\psi=T^{-l}$, we have
\begin{align}
&G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{3}}G_{-l+\frac{2(q-1)}{3}}G_{-l+\frac{q-1}{6}}G_{-l+\frac{5(q-1)}{6}}\notag\\
&=\frac{q^2\phi(-1)G_{\frac{q-1}{2}}G_{-6l}T^{6l}(6)}{G_{-l}},\notag
\end{align}
and for $m=3$, $\psi=T^{-l}$, we have
\begin{align}
G_{-l+\frac{q-1}{3}}G_{-l+\frac{2(q-1)}{3}}=\frac{qG_{-3l}T^{3l}(3)}{G_{-l}}.\notag
\end{align}
Using all these expressions and Lemma \ref{lemma2} and Lemma \ref{fusi3} we find that
\begin{align}
D&=\frac{\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-2l}G_{-2l}G_{-2l}G_{6l}G_{\frac{q-1}{2}}^4T^{-3l}(-3a)}
{G_{-l}G_{-l}G_{-l}G_{3l}}\notag\\
&~+\frac{2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-l+\frac{q-1}{2}}G_{-l+\frac{q-1}{3}}G_{-l+\frac{2(q-1)}{3}}
G_{-l+\frac{q-1}{6}}G_{-l+\frac{5(q-1)}{6}}G_{3l-\frac{q-1}{2}}T^{-3l}(-3a)}{G_{-l+\frac{q-1}{3}}G_{-l+\frac{2(q-1)}{3}}}\notag\\
&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-2l}G_{-2l}G_{-2l}G_{6l}T^{-3l}(-3a)}{G_{-l}G_{-l}G_{-l}G_{3l}}\notag\\
&~+\frac{2q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-6l}G_{6l}T^{-3l}(-a)}{G_{3l}G_{-3l}}\notag\\
&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-2l}^3G_{6l}}{G_{-l}^3G_{3l}}T^{-3l}(-3a)+
\frac{6q^2\phi(-3a)\phi(a)}{(q-1)q}\notag\\
&~+\frac{2q^2\phi(-3a)}{q-1}\sum_{l=0, l\neq\frac{q-1}{6},\frac{q-1}{2},\frac{5(q-1)}{6}}^{q-2}T^{3l}(\frac{1}{a})\notag\\
&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-2l}^3G_{6l}}{G_{-l}^3G_{3l}}T^{-3l}(-3a)+
\frac{6q\phi(-3)}{q-1}\notag\\
&~+\frac{2q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}T^{3l}(\frac{1}{a})-\frac{6q^2\phi(-3a)\phi(a)}{q-1}\notag\\
&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\frac{G_{-2l}^3G_{6l}}{G_{-l}^3G_{3l}}T^{-3l}(-3a)-6q\phi(-3)\notag\\
&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}\frac{G_{-2l}^3G_{6l}}{G_{-l}^3G_{3l}}T^{-3l}(-3a)
+\frac{1}{q-1}-6q\phi(-3).\notag
\end{align}
Taking $T=\overline{\omega}$ and using Gross-Koblitz formula we deduce that
\begin{align}
D&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}\pi^{(p-1)\sum_{i=0}^{r-1}
\{3\langle\frac{-2lp^i}{q-1}\rangle+\langle\frac{6lp^i}{q-1}\rangle-\langle\frac{3lp^i}{q-1}\rangle-
3\langle\frac{-lp^i}{q-1}\rangle\}}\notag\\
&~\times\overline{\omega}^l\left(-\frac{1}{27a^3}\right)
\prod_{i=0}^{r-1}\frac{\Gamma_{p}^3(\langle\frac{-2lp^i}{q-1}\rangle)\Gamma_p(\langle\frac{6lp^i}{q-1}\rangle)}
{\Gamma_{p}^3(\langle\frac{-lp^i}{q-1}\rangle)\Gamma_p(\langle\frac{3lp^i}{q-1}\rangle)}\notag\\
&~+\frac{1}{q-1}-6q\phi(-3).\notag
\end{align}
From Lemma \ref{lemma4} we deduce that
\begin{align}
D&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}\pi^{(p-1)s}~~\overline{\omega}^l\left(-\frac{1}{a^3}\right)\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_{p}^3(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_{p}^3(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{p^i}{6}\rangle)}\right\}\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_p(\langle(\frac{1}{2}+\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{5}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_p(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{5p^i}{6}\rangle)}\right\}\notag\\
&~+\frac{1}{q-1}-6q\phi(-3),\notag
\end{align}
where $s=\sum_{i=0}^{r-1}
\{3\langle\frac{-2lp^i}{q-1}\rangle+\langle\frac{6lp^i}{q-1}\rangle-\langle\frac{3lp^i}{q-1}\rangle-
3\langle\frac{-lp^i}{q-1}\rangle\}$.
\begin{align}
D&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}\pi^{(p-1)s}~~\overline{\omega}^l\left(-\frac{1}{a^3}\right)\notag\\
&~\times\underbrace{\prod_{i=0}^{r-1}\left\{\frac{\Gamma_p(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{2}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_p(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{p^i}{2}\rangle)}\right\}}\\
&\hspace{3.9cm}I_{l}\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_{p}^2(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{6}+\frac{l}{q-1})p^i\rangle)\Gamma_p(\langle(\frac{5}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_{p}^2(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{5p^i}{6}\rangle)}\right\}\notag\\
&~+\frac{1}{q-1}-6q\phi(-3).\notag
\end{align}
For $l\neq \frac{q-1}{2}$, we have
\begin{align}\label{eq-55}
I_{l}&=\prod_{i=0}^{r-1}\frac{\Gamma_p(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{2}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_p(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{p^i}{2}\rangle)}\notag\\
&=\prod_{i=0}^{r-1}\frac{\Gamma_p(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(1-\frac{l}{q-1})p^i\rangle)\Gamma_p(\langle\frac{lp^i}{q-1}\rangle)
\Gamma_p(\langle(\frac{1}{2}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_p(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{p^i}{2}\rangle)}\notag\\
&\times\frac{1}{\Gamma_p(\langle(1-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle\frac{lp^i}{q-1}\rangle)}.
\end{align}
Applying Lemma \ref{lemma4} in equation \eqref{eq-55} we deduce that
\begin{align}\label{eq-56}
I_l&=\prod_{i=0}^{r-1}\frac{\Gamma_p(\langle\frac{-2lp^i}{q-1}\rangle)\Gamma_p(\langle\frac{2lp^i}{q-1}\rangle)}
{\Gamma_p(\langle(1-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle\frac{lp^i}{q-1}\rangle)}.
\end{align}
From \cite[Eqn. 2.9]{mccarthy2} we have that for $0<l<q-1$,
$$\prod_{i=0}^{r-1}\Gamma_p(\langle(1-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle\frac{lp^i}{q-1}\rangle)=(-1)^r\overline{\omega}^l(-1).$$
Putting this value in equation \eqref{eq-56}, and using Gross-Koblitz formula [Theorem \ref{thm4}], Lemma \ref{fusi3},
and the fact that $$\langle\frac{-2lp^i}{q-1}\rangle+\langle\frac{2lp^i}{q-1}\rangle=1,$$
we have
\begin{align}
I_l&=\frac{\pi^{(p-1)\sum_{i=0}^{r-1}\langle\frac{-2lp^i}{q-1}\rangle}\prod_{i=0}^{r-1}\Gamma_p\left(\langle\frac{-2lp^i}{q-1}
\rangle\right)
\pi^{(p-1)\sum_{i=0}^{r-1}\langle\frac{2lp^i}{q-1}\rangle}\prod_{i=0}^{r-1}\Gamma_p\left(\langle\frac{2lp^i}{q-1}\rangle\right)}
{(-1)^r\overline{\omega}^l(-1)\pi^{(p-1)\sum_{i=0}^{r-1}\{\langle\frac{-2lp^i}{q-1}\rangle+\langle\frac{2lp^i}{q-1}\rangle\}}}
\notag\\
&=\frac{G(\overline{\omega}^{~-2l})G(\overline{\omega}^{~2l})}{q\overline{\omega}^l(-1)}\notag\\
&=\frac{q~\overline{\omega}^{2l}(-1)}{q~\overline{\omega}^{l}(-1)}\notag\\
&=\overline{\omega}^{l}(-1).\notag
\end{align}
Using the above relation we obtain
\begin{align}\label{eq-52}
D&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}\pi^{(p-1)s}~~
\overline{\omega}^l\left(\frac{1}{a^3}\right)\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_{p}^2(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{6}+\frac{l}{q-1})p^i\rangle)\Gamma_p(\langle(\frac{5}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_{p}^2(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{5p^i}{6}\rangle)}\right\}\notag\\
&~+\frac{1}{q-1}-6q\phi(-3).
\end{align}
Now
\begin{align}
s&=\sum_{i=0}^{r-1}
\{3\langle\frac{-2lp^i}{q-1}\rangle+\langle\frac{6lp^i}{q-1}\rangle-\langle\frac{3lp^i}{q-1}\rangle-
3\langle\frac{-lp^i}{q-1}\rangle\}\notag\\
&=\sum_{i=0}^{r-1}
\{3(\frac{-2lp^i}{q-1})+(\frac{6lp^i}{q-1})-(\frac{3lp^i}{q-1})-
3(\frac{-lp^i}{q-1})\}\notag\\
&~+\sum_{i=0}^{r-1}
\{-3\lfloor\frac{-2lp^i}{q-1}\rfloor-\lfloor\frac{6lp^i}{q-1}\rfloor+\lfloor\frac{3lp^i}{q-1}\rfloor+
3\lfloor\frac{-lp^i}{q-1}\rfloor\}\notag\\
&=\sum_{i=0}^{r-1}
\{-3\lfloor\frac{-2lp^i}{q-1}\rfloor-\lfloor\frac{6lp^i}{q-1}\rfloor+\lfloor\frac{3lp^i}{q-1}\rfloor+
3\lfloor\frac{-lp^i}{q-1}\rfloor\},\notag
\end{align}
which is an integer.
Therefore equation \eqref{eq-52} becomes
\begin{align}
D&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}(-p)^s~~
\overline{\omega}^l\left(\frac{1}{a^3}\right)\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_{p}^2(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{6}+\frac{l}{q-1})p^i\rangle)\Gamma_p(\langle(\frac{5}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_{p}^2(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{5p^i}{6}\rangle)}\right\}\notag\\
&~+\frac{1}{q-1}-6q\phi(-3).
\end{align}
Lemma \ref{lemma5} gives
\begin{align}
D&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0,l\neq\frac{q-1}{2}}^{q-2}\overline{\omega}^l\left(\frac{1}{a^3}\right)
(-p)^{\sum_{i=0}^{r-1}\{-2\lfloor\langle\frac{p^i}{2}\rangle-\frac{lp^i}{q-1}\rfloor\}}\notag\\
&~\times(-p)^{\sum_{i=0}^{r-1}\{-\lfloor\langle\frac{-p^i}{6}\rangle+\frac{lp^i}{q-1}\rfloor
-\lfloor\langle\frac{-5p^i}{6}\rangle+\frac{lp^i}{q-1}\rfloor\}}\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_{p}^2(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{6}+\frac{l}{q-1})p^i\rangle)\Gamma_p(\langle(\frac{5}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_{p}^2(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{5p^i}{6}\rangle)}\right\}\notag\\
&~+\frac{1}{q-1}-6q\phi(-3)\notag\\
&=\frac{q^2\phi(-3a)}{q-1}\sum_{l=0}^{q-2}\overline{\omega}^l\left(\frac{1}{a^3}\right)
(-p)^{\sum_{i=0}^{r-1}\{-2\lfloor\langle\frac{p^i}{2}\rangle-\frac{lp^i}{q-1}\rfloor\}}\notag\\
&~\times(-p)^{\sum_{i=0}^{r-1}\{-\lfloor\langle\frac{-p^i}{6}\rangle+\frac{lp^i}{q-1}\rfloor
-\lfloor\langle\frac{-5p^i}{6}\rangle+\frac{lp^i}{q-1}\rfloor\}}\notag\\
&~\times\prod_{i=0}^{r-1}\left\{\frac{\Gamma_{p}^2(\langle(\frac{1}{2}-\frac{l}{q-1})p^i\rangle)
\Gamma_p(\langle(\frac{1}{6}+\frac{l}{q-1})p^i\rangle)\Gamma_p(\langle(\frac{5}{6}+\frac{l}{q-1})p^i\rangle)}
{\Gamma_{p}^2(\langle\frac{p^i}{2}\rangle)\Gamma_p(\langle\frac{5p^i}{6}\rangle)}\right\}\notag\\
&~-\frac{q}{q-1}+\frac{1}{q-1}-6q\phi(-3)\notag\\
&=-1-6q\phi(-3)-q^2\phi(-3a)\cdot {_2}G_{2}\left[\begin{array}{cc}
\frac{1}{2} & \frac{1}{2} \\
\frac{1}{6} & \frac{5}{6}
\end{array}|\frac{1}{a^3}
\right]_{q}.
\end{align}
Case 2: If $q\not\equiv 1 \pmod{3}$ then
$m=l$, $k=-3l$, and $n=l$, and then
\begin{align}
D&=\frac{1}{q-1}\sum_{l=0}^{q-2}G_{-l}G_{-l}G_{-l}G_{3l}T^{-3l}(-3a),\notag
\end{align}
which is same as the first term of the equation \eqref{eq-54}.
Thus we have
$$D=-1-q^2\phi(-3a)\cdot {_2}G_{2}\left[\begin{array}{cc}
\frac{1}{2} & \frac{1}{2} \\
\frac{1}{6} & \frac{5}{6}
\end{array}|\frac{1}{a^3}
\right]_{q}.$$
Substituting the values of $A$, $B$, $C$ and $D$ in equation \eqref{eq-53} we obtain the desired result.
\end{proof}
\noindent \textbf{Proof of Theorem \ref{MT1}}:
Consider the elliptic curve $$E: y^2=x^3+mx+n,$$ where $m=-27d(d^3+8)$ and $n=27(d^6-20d^3-8)$. By the
following transformation
$x\rightarrow -\frac{36-9d^3+3dx-y}{6(9d^2+x)}$ and $y\rightarrow -\frac{36-9d^3+3dx+y}{6(9d^2+x)}$,
we obtain the equivalent form $C_d$. In the proof of Theorem 1.2, Barman and Kalita \cite{BK1} proved that
$$\#E(\mathbb{F}_q)+q=\#C_d(\mathbb{F}_q)+2+\phi(-3(8+92d^3+35d^6)).$$
For $d^3\neq 1$, from Theorem \ref{hessian2}, we have
\begin{align}
a_q(E)&=q+1-\#E(\mathbb{F}_q)\notag\\
&=2q-1-\#C_d(\mathbb{F}_q)-\phi(-3(8+92d^3+35d^6))\notag\\
&=q-\alpha-\phi(-3(8+92d^3+35d^6))+q\phi(-3d)\cdot{_2}G_2\left[ \begin{array}{cc}
\frac{1}{2}, & \frac{1}{2} \\
\frac{1}{6}, & \frac{5}{6}
\end{array}|\dfrac{1}{d^3}
\right]_q,\notag
\end{align}
where
$\alpha=\left\{
\begin{array}{ll}
5-6\phi(-3), & \hbox{if~ $q\equiv 1\pmod{3}$;} \\
1, & \hbox{if~ $q\not\equiv 1\pmod{3}$.}
\end{array}
\right.$\\
For $m, n \neq 0$ and $-\dfrac{27n^2}{4m^3}\neq 1$, we have $j(E)\neq 0, 1728$.
Now, applying Theorem \ref{mc} over $\mathbb{F}_q$, we complete the proof of the theorem.
\end{document} |
\begin{document}
\title{The existence of partitioned balanced tournament designs}
\begin{abstract}
E. R. Lamken prove in \cite{pbtd} that
there exists a partitioned balanced tournament design of side $n$, PBTD($n$),
for $n$ a positive integer, $n \ge 5$, except possibly for $n \in \{9,11,15\}$.
In this article, we show the existence of PBTD($n$) for $n \in \{9,11,15\}$.
As a consequence, the existence of PBTD($n$) has been completely determined.
\end{abstract}
\section{Introduction}
A {\it partitioned balanced tournament design of side $n$}, \textrm{PBTD($n$)}, defined on a $2n$-set $V$,
is an arrangement of the ${2n \choose n}$
distinct unordered pairs of the elements of $V$ into an $n\times (2n-1)$ arrays such that
\begin{enumerate}
\item every element of $V$ is contained in precisely one cell of each column,
\item every element of $V$ is contained in at most two cells of any row,
\item each row contains all $2n$ elements of $V$ in the first $n$ columns, and
\item each row contains all $2n$ elements of $V$ in the last $n$ columns,
\end{enumerate}
\noindent
see \cite{hb}.
E. R. Lamken prove the following theorem.
\begin{thm}[\cite{pbtd}]\label{th:Lamken}
There exists a PBTD{\normalfont ($n$)} for $n$ a positive integer, $n \ge 5$, except possibly for $n \in \{9,11,15\}$.
\end{thm}
Let $V$ be a $2n$-set.
A {\it Howell design} $H(s,2n)$ is an $s \times s$ array, $H$, that satisfies the following three conditions
\begin{enumerate}
\item every cell of $H$ is empty or contains an unordered pair of elements from $V$,
\item each element of $V$ occurs in each row and columns of $H$, and
\item each unordered pair of elements from $V$ occurs in at most one cell of $H$,
\end{enumerate}
\noindent
see \cite{howell}.
For $T$ a PBTD($n$),
let $T^L,T^C$ and $T^R$ be the first $(n-1)$ columns, the $n$-th column and the last $(n-1)$ columns of $T$, respectively.
Then ($T^L\ T^C$) and ($T^R\ T^C$) are Howell designs $H(n,2n)$.
These two designs are called {\it almost disjoint}.
Conversely, if there is a pair of almost disjoint Howell designs, then there is a partitioned balanced tournament design.
By computer calculation,
we found almost disjoint Howell designs $H(n,2n)$ for $n \in \{9,11,15\}$ in figures~$1,2$ and $3$.
Hence the following theorem holds.
\begin{thm}\label{th:ArayaTokihisa}
Partitioned balanced tournament designs of side $n$ exist for $n \in \{9,11,15\}$.
\end{thm}
It is not difficult to show that there is no PBTD($n$) for $n \le 4$.
Therefore we have the following corollary from Theorem \ref{th:Lamken} and \ref{th:ArayaTokihisa}.
\begin{cor}
There exists a PBTD{\normalfont ($n$)} if and only if $n$ is a positive integer, $n \ge 5$.
\end{cor}
\begin{figure}
\caption{a pair of almost disjoint Howells designs $H(9,18)$}
\end{figure}
\begin{figure}
\caption{a pair of almost disjoint Howell designs $H(11,22)$}
\end{figure}
\begin{landscape}
\begin{figure}
\caption{a pair of almost disjoint Howell designs $H(15,30)$}
\end{figure}
\end{landscape}
\section{Observations}
Let $V=\{0,1,\dots,2n-1\}$ be a $2n$-set and $T=(T^L\ T^C\ T^R)$ a PBTD($n$).
Suppose $A$ is the array obtained by permuting elements of $V$, the rows, the first $n-1$ columns,
the last $n-1$ columns of $T$, or $A=(T^R\ T^C\ T^L)$.
Then $A$ is also a PBTD($n$).
Two PBTD($n$) are {\it isomorphic} if one can be obtained from the other by these operations.
By permuting elements of $V$, we may assume $T^C$ is the transposed of the array
$(\{0,1\}\ \{2,3\}\ \dots\ \{2n-2,2n-1\})$.
From Dinitz and Dinitz~\cite{pbtd10}, there exist two PBTD($5$)'s up to isomorphism.
For these two PBTD($5$)'s, we find that there exists the permutation
$$\sigma=(0,1)(2,3)(4,5)(6,7)(8,9)$$ such that
$$T^L=
\begin{array}{|c|c|c|c|}
\hline
t_{11} & \sigma(t_{11}) & t_{13} & \sigma(t_{13})\\\hline
t_{21} & \sigma(t_{21}) & t_{23} & \sigma(t_{23})\\\hline
t_{31} & \sigma(t_{31}) & t_{33} & \sigma(t_{33})\\\hline
t_{41} & \sigma(t_{41}) & t_{43} & \sigma(t_{43})\\\hline
t_{51} & \sigma(t_{51}) & t_{53} & \sigma(t_{53})\\\hline
\end{array}
\text{ and } T^R=
\begin{array}{|c|c|c|c|}
\hline
t_{16} & \sigma(t_{16}) & t_{18} & \sigma(t_{18})\\\hline
t_{26} & \sigma(t_{26}) & t_{28} & \sigma(t_{28})\\\hline
t_{36} & \sigma(t_{36}) & t_{38} & \sigma(t_{38})\\\hline
t_{46} & \sigma(t_{46}) & t_{48} & \sigma(t_{48})\\\hline
t_{56} & \sigma(t_{56}) & t_{58} & \sigma(t_{58})\\\hline
\end{array}
\ .$$
Thus we observe that these two PBTD($5$)'s are determined by some $4$ columns and the permutation $\sigma$.
Seah and Stinson~\cite{pbtd7} obtained two almost disjoint Howell designs $H(7,14)$
by computer calcuation for the given $T^L$
which was constructed by E. R. Lamken.
Then for these two PBTD($7$)'s,
we find that there exists the permutation $$\sigma=(0,1)(2,3)(4,5)(6,7)(8,9)(10,11)(12,13)$$ such that
$$T^L=
\begin{array}{|c|c|c|c|c|c|}
\hline
t_{11} & \sigma(t_{11}) & t_{13} & \sigma(t_{13}) & t_{15} & \sigma(t_{15})\\\hline
t_{21} & \sigma(t_{21}) & t_{23} & \sigma(t_{23}) & t_{25} & \sigma(t_{25})\\\hline
t_{31} & \sigma(t_{31}) & t_{33} & \sigma(t_{33}) & t_{35} & \sigma(t_{35})\\\hline
t_{41} & \sigma(t_{41}) & t_{43} & \sigma(t_{43}) & t_{45} & \sigma(t_{45})\\\hline
t_{51} & \sigma(t_{51}) & t_{53} & \sigma(t_{53}) & t_{55} & \sigma(t_{55})\\\hline
t_{61} & \sigma(t_{61}) & t_{63} & \sigma(t_{63}) & t_{65} & \sigma(t_{65})\\\hline
t_{71} & \sigma(t_{71}) & t_{73} & \sigma(t_{73}) & t_{75} & \sigma(t_{75})\\\hline
\end{array}\ .$$
\noindent
Also we find that there exists the permutation
$$\tau = (0,2,4)(1,3,5)(8,10,12)(9,11,13)$$ such that
$$T^L=
\begin{array}{|c|c|c|c|c|c|}
\hline
t_{11} & t_{12} & t_{13} & t_{14} & t_{15} & t_{16}\\\hline
\tau(t_{15}) & \tau(t_{16}) & \tau(t_{11}) & \tau(t_{12}) & \tau(t_{13}) & \tau(t_{14})\\\hline
\tau^2(t_{13}) & \tau^2(t_{14}) &\tau^2(t_{15}) & \tau^2(t_{16}) & \tau^2(t_{11}) & \tau^2(t_{12}) \\\hline
t_{41} & t_{42} & \tau(t_{41}) & \tau(t_{42}) & \tau^2(t_{41}) & \tau^2(t_{42})\\\hline
t_{51} & t_{52} & t_{53} & t_{54} & t_{55} & t_{56}\\\hline
\tau(t_{55}) & \tau(t_{56}) & \tau(t_{51}) & \tau(t_{52}) & \tau(t_{53}) & \tau(t_{54})\\\hline
\tau^2(t_{53}) & \tau^2(t_{54}) &\tau^2(t_{55}) & \tau^2(t_{56}) & \tau^2(t_{51}) & \tau^2(t_{52})\\\hline
\end{array}$$
and
$$T^R=
\begin{array}{|c|c|c|c|c|c|}
\hline
t_{1,8} & t_{1,9} & t_{1,10} & t_{1,11} & t_{1,12} & t_{1,13}\\\hline
\tau(t_{1,10}) & \tau(t_{1,8}) & \tau(t_{1,9}) & \tau(t_{1,13}) & \tau(t_{1,11}) & \tau(t_{1,12})\\\hline
\tau^2(t_{1,9}) & \tau^2(t_{1,10}) & \tau^2(t_{1,8}) & \tau^2(t_{1,12}) & \tau^2(t_{1,13}) & \tau^2(t_{1,11})\\\hline
t_{4,8} & \tau(t_{4,8}) & \tau^2(t_{4,8}) & t_{4,11} & \tau^2(t_{4,11}) & \tau^2(t_{4,11})\\\hline
t_{5,8} & t_{5,9} & t_{5,10} & t_{5,11} & t_{5,12} & t_{5,13}\\\hline
\tau(t_{5,10}) & \tau(t_{5,8}) & \tau(t_{5,9}) & \tau(t_{5,13}) & \tau(t_{5,11}) & \tau(t_{5,12})\\\hline
\tau^2(t_{5,9}) & \tau^2(t_{5,10}) & \tau^2(t_{5,8}) & \tau^2(t_{5,12}) & \tau^2(t_{5,13}) & \tau^2(t_{5.11})\\\hline
\end{array}\ .$$
Thus we observe that $T^L$ is determined by some $7$ cells, the permutations $\sigma$ and $\tau$.
And $T^R$ is determined by some $14$ cells and the permutation $\tau$.
From these observations, we make GAP programs, see~\cite{GAP4}, to construct partitioned balanced tournament designs.
And we found designs in figures 1,2 and 3.
\
\end{document} |
\begin{document}
\bm{n}ewdateformat{mydate}{\THEDAY~\monthname~\THEYEAR}
\title
[Vanishing viscosity: observations]
{Observations on the vanishing viscosity limit}
\author{James P. Kelliher}
\address{Department of Mathematics, University of California, Riverside, 900 University Ave.,
Riverside, CA 92521}
\curraddr{Department of Mathematics, University of California, Riverside, 900 University Ave.,
Riverside, CA 92521}
\email{kelliher@math.ucr.edu}
\subjclass[2010]{Primary 76D05, 76B99, 76D10}
\keywords{Vanishing viscosity, boundary layer theory}
\begin{abstract}
Whether, in the presence of a boundary, solutions of the Navier-Stokes equations converge to a solution to the Euler equations in the vanishing viscosity limit is unknown. In a seminal 1983 paper, Tosio Kato showed that the vanishing viscosity limit is equivalent to having sufficient control of the gradient of the Navier-Stokes velocity in a boundary layer of width proportional to the viscosity. In a 2008 paper, the present author showed that the vanishing viscosity limit is equivalent to the formation of a vortex sheet on the boundary. We present here several observations that follow on from these two papers.
\Ignore{
We make several observations regarding the vanishing viscosity limit, primarily regarding the control of the total mass of vorticity and the conditions in Tosio Kato's seminal 1983 paper \cite{Kato1983} shown by him to be equivalent to the vanishing viscosity limit.
}
\end{abstract}
\date{(compiled on {\dayofweekname{\day}{\month}{\year} \mydate\today)}}
\maketitle
\begin{small}
\begin{flushright}
Compiled on \textit{\textbf{\dayofweekname{\day}{\month}{\year} \mydate\today}}
\end{flushright}
\end{small}
\renewcommand\contentsname{}
\begin{small}
\tableofcontents
\end{small}
\bm{n}oindent
The Navier-Stokes equations for a viscous incompressible fluid in a domain $\Omega \subseteq \ensuremath{\BB{R}}^d$, $d \ge 2$, with no-slip boundary conditions can be written,
\begin{align*}
(NS)
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t u + u \cdot \ensuremath{\bm{n}abla} u + \ensuremath{\bm{n}abla} p = \bm{n}u \Delta u + f
&\text{ in } \Omega, \\
\dv u = 0
&\text{ in } \Omega, \\
u = 0
&\text{ on } \Gamma := \ensuremath{\partial} \Omega.
\end{array}
\right.
\end{align*}
The Euler equations modeling inviscid incompressible flow on such a domain with no-penetration boundary conditions can be written,
\begin{align*}
(EE)
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t \overline{u} + \overline{u} \cdot \ensuremath{\bm{n}abla} \overline{u} + \ensuremath{\bm{n}abla} p = \bm{n}u \Delta \overline{u} + \overline{f}
&\text{ in } \Omega, \\
\dv \overline{u} = 0
&\text{ in } \Omega, \\
\overline{u} \cdot \bm{n} = 0
&\text{ on } \Gamma.
\end{array}
\right.
\end{align*}
Here, $u = u_\bm{n}u$ and $\overline{u}$ are velocity fields, while $p$ and $\overline{p}$ are pressure (scalar) fields. The external forces, $f$, $\overline{f}$, are vector fields. (We adopt here the notation of Kato in \cite{Kato1983}.)
We assume throughout that $\Omega$ is bounded and $\Gamma$ has $C^2$ regularity, and write $\bm{n}$ for the outward unit normal vector.
The limit,
\begin{align*}
(VV) \qquad
u \to \overline{u} \text{ in } L^\ensuremath{\infty}(0, T; L^2(\Omega)),
\end{align*}
we refer to as the \textit{classical vanishing viscosity limit}. Whether it holds in general, or fails in any one instance, is a major open problem in mathematical fluids mechanics.
In \cite{K2006Kato, K2008VVV} a number of conditions on the solution $u$ were shown to be equivalent to ($VV$).
The focus in \cite{K2006Kato} was on the size of the vorticity or velocity in a layer near the boundary, while the focus in \cite{K2008VVV} was on the accumulation of vorticity on the boundary. The work we present here is in many ways a follow-on to \cite{K2006Kato, K2008VVV}, each of which, especially \cite{K2006Kato}, was itself an outgrowth of Tosio Kato's seminal paper \cite{Kato1983} on the vanishing viscosity limit, ($VV$).
This paper is divided into two themes. The first theme concerns the accumulation of vorticity---on the boundary, in a boundary layer, or in the bulk of the fluid. It explores the consequences of having control of the total mass of vorticity or, more strongly, the $L^1$-norm of the vorticity for solutions to ($NS$).
We re-express in a specifically 3D form the condition for vorticity accumulation on the boundary from \cite{K2008VVV} in \cref{S:3DVersion}.
In \cref{S:LpNormsBlowUp}, we show that if ($VV$) holds then the $L^p$ norms of the vorticity for solutions to ($NS$) must blow up for all $p > 1$ as $\bm{n}u \to 0$ except in very special circumstances. This leaves only the possibility of control of the vorticity's $L^1$ norm. Assuming such control, we show in \cref{S:ImprovedConvergence} that when ($VV$) holds we can characterize the accumulation of vorticity on the boundary more strongly than in \cite{K2008VVV}.
In \cref{S:BoundaryLayerWidth}, we show that if we measure the width of the boundary layer by the size of the $L^1$-norm of the vorticity then the layer has to be wider than that of Kato if ($VV$) holds. We push this analysis further in \cref{S:OptimalConvergenceRate} to obtain the theoretically optimal convergence rate when the initial vorticity has nonzero total mass, as is generic for non-compatible initial data. We turn a related observation into a conjecture concerning the connection between the vanishing viscosity limit and the applicability of the Prandtl theory.
In \cref{S:SomeConvergence}, we show that the arguments in \cite{K2008VVV} lead to the conclusion that some kind of convergence of a subsequence of the solutions to ($NS$) always occurs in the limit as $\bm{n}u \to 0$, but not necessarily to a solution to the Euler equations.
The second theme more directly addresses Tosio Kato's conditions from \cite{Kato1983} that are equivalent to ($VV$). We also deal with the closely related condition from \cite{K2006Kato} that uses vorticity in place of the gradient of the velocity that appears in one of Kato's conditions.
We derive in \cref{S:EquivCondition} a condition on the solution to ($NS$) on the boundary that is equivalent in 2D to ($VV$), giving a number of examples to which this condition applies in \cref{S:Examples}.
In \cref{S:BardosTiti} we discuss some interesting recent results of Bardos and Titi that they developed using dissipative solutions to the Euler Equations. We show how weaker, though still useful, 2D versions of these results can be obtained using direct elementary methods.
We start, however, in \cref{S:Background} with the notation and definitions we will need, and a summary of the pertinent results of \cite{K2006Kato, K2008VVV, Kato1983}.
\section{Definitions and past results}\ensuremath{\lambda}bel{S:Background}
\bm{n}oindent
We define the classical function spaces of incompressible fluids,
\begin{align*}
H &= \set{u \in (L^2(\Omega))^d: \dv u = 0 \text{ in } \Omega, \,
u \cdot \mathbf{n} = 0 \text{ on } \Gamma}
\end{align*}
with the $L^2$-norm and
\begin{align*}
V &= \set{u \in (H_0^1(\Omega))^d: \dv u = 0 \text{ in } \Omega}
\end{align*}
with the $H^1$-norm. We denote the $L^2$ or $H$ inner product by $(\cdot, \cdot)$. If $v$, $w$ are vector fields then $(v, w) = (v^i, w^i)$, where we use here and below the common convention of summing over repeated indices. Similarly, if $M$, $N$ are matrices of the same dimensions then $M \cdot N = M^{ij} N^{ij}$ and
\begin{align*}
(M, N)
= (M^{ij}, N^{ij})
= \int_\Omega M \cdot N.
\end{align*}
We will assume that $u$ and $\overline{u}$ satisfy the same initial conditions,
\begin{align*}
u(0) = u_0, \quad \overline{u}(0) = u_0,
\end{align*}
and that $u_0$ is in $C^{k + \ensuremath{\epsilon}}(\Omega) \cap H$, $\ensuremath{\epsilon} > 0$, where $k =
1$ for two dimensions and $k = 2$ for 3 and higher dimensions, and that
$f = \overline{f} \in C^1_{loc}(\ensuremath{\BB{R}}; C^1(\Omega))$. Then as shown in
\cite{Koch2002} (Theorem 1 and the remarks on p. 508-509), there is some $T
> 0$ for which there exists a unique solution,
\begin{align}\ensuremath{\lambda}bel{e:ubarSmoothness}
\overline{u}
\text{ in } C^1([0, T]; C^{k + \ensuremath{\epsilon}}(\Omega)),
\end{align}
to ($EE$). In two dimensions, $T$ can be arbitrarily large, though it is only known that
some positive $T$ exists in three and higher dimensions.
With such initial velocities, we are assured that there are weak solutions to $(NS)$, unique in 2D. Uniqueness of these weak solutions is not known in three and higher dimensions, so by $u = u_\bm{n}u$ we mean any of these solutions chosen arbitrarily. We never employ strong or classical solutions to $(NS)$.
\Ignore{
It follows, assuming that $f$ is in $L^1([0, T]; L^2(\Omega))$, that for such solutions,
\begin{align}\ensuremath{\lambda}bel{e:NSVariationalIdentity}
\begin{split}
&(u(t), \phi(t)) - (u(0), \phi(0)) \\
&\qquad= \int_0^t \brac{(u, u \cdot \ensuremath{\bm{n}abla} \phi)
- \bm{n}u (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla}
\phi) + (f, \phi) + (u, \ensuremath{\partial}_t \phi))} \, dt
\end{split}
\end{align}
for all $\phi$ in $C^1([0, T] \times \Omega) \cap C^1([0, T]; V)$.
}
We define $\gamma_\mathbf{n}$ to be the boundary trace operator for the normal component of a vector field in $H$ and write
\begin{align}\ensuremath{\lambda}bel{e:RadonMeasures}
\ensuremath{\BB{C}}al{M}(\overline{\Omega}) \text{ for the space of Radon measures on } \overline{\Omega}.
\end{align}
That is, $\ensuremath{\BB{C}}al{M}(\overline{\Omega})$ is the dual space of $C(\overline{\Omega})$. We let $\mu$ in $\ensuremath{\BB{C}}al{M}(\overline{\Omega})$ be the measure supported on $\Gamma$ for which $\mu\vert_\Gamma$ corresponds to Lebesgue measure on $\Gamma$ (arc length for $d = 2$, area for $d = 3$). Then $\mu$ is also a member of $H^1(\Omega)^*$, the dual space of $H^1(\Omega)$.
We define the vorticity $\omega(u)$ to be the $d \times d$ antisymmetric matrix,
\begin{align}\ensuremath{\lambda}bel{e:VorticityRd}
\omega(u) = \frac{1}{2}\brac{\ensuremath{\bm{n}abla} u - (\ensuremath{\bm{n}abla} u)^T},
\end{align}
where $\ensuremath{\bm{n}abla} u$ is the Jacobian matrix for $u$: $(\ensuremath{\bm{n}abla} u)^{ij} = \ensuremath{\partial}_j u^i$.
When working specifically in two dimensions, we alternately define the vorticity as the scalar curl of $u$:
\begin{align}\ensuremath{\lambda}bel{e:VorticityR2}
\omega(u) = \ensuremath{\partial}_1 u^2 - \ensuremath{\partial}_2 u^1.
\end{align}
Letting $\omega = \omega(u)$ and $\overline{\omega} = \omega(\overline{u})$, we define the following conditions:
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
(A) & \qquad u \to \overline{u} \text{ weakly in } H
\text{ uniformly on } [0, T], \\
(A') & \qquad u \to \overline{u} \text{ weakly in } (L^2(\Omega))^d
\text{ uniformly on } [0, T], \\
(B) & \qquad u \to \overline{u} \text{ in } L^\ensuremath{\infty}([0, T]; H), \\
(C) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} \overline{u} - \innp{\gamma_\mathbf{n} \cdot, \overline{u} \mu}
\text{ in } ((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(D) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} \overline{u} \text{ in } (H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T], \\
(E) & \qquad \omega \to \overline{\omega}
- \frac{1}{2} \innp{\gamma_\mathbf{n} (\cdot - \cdot^T),
\overline{u} \mu}
\text{ in }
((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(F) & \qquad \omega \to \overline{\omega}
\text{ in }
(H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T].
\end{align*}
\endgroup
We stress that $(H^1(\Omega))^*$ is the dual space of $H^1(\Omega)$, in contrast to $H^{-1}(\Omega)$, which is the dual space of $H^1_0(\Omega)$.
The condition in $(B)$ is the classical vanishing viscosity limit of ($VV$).
We will make the most use of condition $(E)$, which more explicitly means that
\begin{align}\ensuremath{\lambda}bel{e:EExplicit}
(\omega(t), M)
\to (\overline{\omega}(t), M) - \frac{1}{2}\int_{\Gamma}
((M - M^T) \cdot \mathbf{n}) \cdot \overline{u}(t)
\text{ in } L^\ensuremath{\infty}([0, T])
\end{align}
for any $M$ in $(H^1(\Omega))^{d \times d}$.
In two dimensions, defining the vorticity as in \refE{VorticityR2}, we also define the following two conditions:
\begin{align*}
(E_2) & \qquad \omega \to \overline{\omega} - (\overline{u} \cdot \BoldTau) \mu
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T], \\
(F_2) & \qquad \omega \to \overline{\omega} \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T].
\end{align*}
Here, $\BoldTau$ is the unit tangent vector on $\Gamma$ that is obtained by rotating the outward unit normal vector $\mathbf{n}$ counterclockwise by $90$ degrees.
\Ignore{
Condition ($E_2$) means that
\begin{align*}
(\omega(t), f)
\to (\overline{\omega}(t), f) - \int_{\Gamma} (\overline{u}(t) \cdot \BoldTau) f
\text{ in } L^\ensuremath{\infty}([0, T])
\end{align*}
for any $f$ in $H^1(\Omega)$.
}
\refT{VVEquiv} is proved in \cite{K2008VVV} ($(A) \implies (B)$ having been proved in \cite{Kato1983}), to which we refer the reader for more details.
\begin{theorem}[\cite{K2008VVV}]\ensuremath{\lambda}bel{T:VVEquiv}
Conditions ($A$), ($A'$), ($B$), ($C$), ($D$), and ($E$) are equivalent
(and each implies condition ($F$)).
In two dimensions, condition ($E_2$) and, when $\Omega$ is simply connected, ($F_2$)
are equivalent to the other conditions.\footnote{The restriction that $\Omega$ be
simply connected for the equivalence of ($F_2$) was not, but should
have been, in the published version of \cite{K2008VVV}.}
\end{theorem}
\cref{T:VVEquiv} remains silent about rates of convergence, but examining the proof of it in \cite{K2008VVV} easily yields the following:
\begin{theorem}\ensuremath{\lambda}bel{T:ROC}
Assume that ($VV$) holds with
\begin{align*}
\bm{n}orm{u - \overline{u}}_{L^\ensuremath{\infty}(0, T; L^2(\Omega))}
\le F(\bm{n}u)
\end{align*}
for some fixed $T > 0$. Then
\begin{align*}
\bm{n}orm{(u(t) - \overline{u}(t), v)}_{L^\ensuremath{\infty}([0, T])}
\le F(\bm{n}u) \bm{n}orm{v}_{L^2(\Omega)}
\text{ for all } v \in (L^2(\Omega))^d
\end{align*}
and
\begin{align*}
\bm{n}orm{(\omega(t) - \overline{\omega}(t), \varphi)}_{L^\ensuremath{\infty}([0, T])}
\le F(\bm{n}u) \bm{n}orm{\ensuremath{\bm{n}abla} \varphi}_{L^2}
\text{ for all } \varphi \in H_0^1(\Omega).
\end{align*}
\end{theorem}
\begin{remark}\ensuremath{\lambda}bel{R:ROCOthers}
\cref{T:ROC} gives the rates of convergence for ($A$) and ($F_2$);
the rates for ($C$), ($D$), ($E$), and ($E_2$) are like those given for ($F_2$)
(though the test function, $\varphi$, will lie in different spaces).
\end{remark}
In \cite{Kato1983}, Tosio Kato showed that ($VV$) is equivalent to
\begin{align*}
\bm{n}u \int_0^T \bm{n}orm{\ensuremath{\bm{n}abla} u(s)}_{L^2(\Omega)}^2 \, dt \to 0
\text{ as } \bm{n}u \to 0
\end{align*}
and to
\begin{align}\ensuremath{\lambda}bel{e:KatoCondition}
\bm{n}u \int_0^T \bm{n}orm{\ensuremath{\bm{n}abla} u(s)}_{L^2(\Gamma_{c \bm{n}u})}^2 \, dt \to 0
\text{ as } \bm{n}u \to 0.
\end{align}
Here, and in what follows, $\Gamma_\delta$ is a boundary layer in $\Omega$ of width $\delta > 0$.
In \cite{K2006Kato} it is shown that in \cref{e:KatoCondition}, the gradient can be replaced by the vorticity, so ($VV$) is equivalent to
\begin{align}\ensuremath{\lambda}bel{e:KellCondition}
\bm{n}u \int_0^T \bm{n}orm{\omega(s)}_{L^2(\Gamma_{c \bm{n}u})}^2 \, dt \to 0
\text{ as } \bm{n}u \to 0.
\end{align}
Note that the necessity of \cref{e:KellCondition} follows immediately from \cref{e:KatoCondition}, but the sufficiency does not, since on the inner boundary of $\Gamma_{c \bm{n}u}$ there is no boundary condition of any kind.
We also mention the works \cite{TW1998, W2001}, which together establish conditions equivalent to \refE{KatoCondition}, with a boundary layer slightly larger than that of Kato, yet only involving the tangential derivatives of either the normal or tangential components of $u$ rather than the full gradient. These conditions will not be used in the present work, however.
\Ignore{
The setup and notation are that of \cite{K2008VVV, K2006Kato}, and is largely inherited from \cite{Kato1983}: Weak solutions to the Navier-Stokes equations in a bounded domain, $\Omega$, having $C^2$-boundary, $\Gamma$, are denoted by $u$, the viscosity, $\bm{n}u > 0$, being implied by context. Weak (or often strong) solutions to the Euler equations are denoted by $\overline{u}$. Except in \refS{NavierBCs}, we use homogeneous Dirichlet conditions ($u = 0$) for the Navier-Stokes equations and we in any case always use no-penetration conditions ($u \cdot \bm{n} = 0$) for the Euler equations. Here, $\bm{n}$ is the outward normal to the boundary. We use $\omega = \omega(u)$ to be the curl of $u$, defined to be $\ensuremath{\partial}_1 u^2 - \ensuremath{\partial}_2 u^1$ in $2D$ and the antisymmetric part of $\ensuremath{\bm{n}abla} u$ in higher dimensions. Similarly for $\overline{\omega} = \omega(\overline{u})$.
We denote the $L^2$-inner product by $(\cdot, \cdot)$, and write $V$ for the space of all divergence-free vector fields in $H_0^1(\Omega)$. We will also use the related function space $H$ of divergence-free vector fields $v$ in $L^2(\Omega)$ with $v \cdot \mathbf{n} = 0$ on $\Gamma$ in the sense of a trace.
See \cite{K2008VVV, K2006Kato} for more details.
}
\Part{Theme I: Accumulation of vorticity}
\section{A 3D version of vorticity accumulation on the boundary}\ensuremath{\lambda}bel{S:3DVersion}
\bm{n}oindent
In \cref{T:VVEquiv}, the vorticity is defined to be the antisymmetric gradient, as in \cref{e:VorticityRd}. When working in 3D, it is usually more convenient to use the language of three-vectors in condition ($E$). This leads us to the condition $(E')$ in \cref{P:EquivE}.
\begin{prop}\ensuremath{\lambda}bel{P:EquivE}
The condition (E) in \cref{T:VVEquiv} is equivalent to
\begin{align*}
(E') \qquad \curl u \to \curl \overline{u} + (\overline{u} \times \bm{n}) \mu
\text{ in } L^\ensuremath{\infty}((0, T; (H^1(\Omega)^3)^*).
\end{align*}
\end{prop}
\begin{proof}
If $A$ is an antisymmetric $3 \times 3$ matrix then
\begin{align*}
A \cdot M
&= \frac{A \cdot M + A \cdot M}{2}
= \frac{A \cdot M + A^T \cdot M^T}{2}
= \frac{A \cdot M - A \cdot M^T}{2} \\
&= A \cdot \frac{M - M^T}{2}.
\end{align*}
Thus, since $\omega$ and $\overline{\omega}$ are antisymmetric, referring to \refE{EExplicit}, we see that ($E$) is equivalent to
\begin{align*}
(\omega(t), M) \to (\overline{\omega}(t), M)
- \int_\Gamma (M \bm{n}) \cdot \overline{u}(t)
\text{ in } L^\ensuremath{\infty}([0, T])
\end{align*}
for all \textit{antisymmetric} matrices $M \in (H^1(\Omega))^{3 \times 3}$.
Now, for any three vector $\varphi$ define
\begin{align*}
F(\varphi)
&= \tmatrix{0 & -\varphi_3 & \varphi_2}
{\varphi_3 & 0 & -\varphi_1}
{-\varphi_2 & \varphi_1 & 0}.
\end{align*}
Then $F$ is a bijection from the vector space of three-vectors to the space of antisymmetric $3 \times 3$ matrices. Straightforward calculations show that
\begin{align*}
F(\varphi) \cdot F(\psi)
= 2 \varphi \cdot \psi, \qquad
F(\varphi) v
= \varphi \times v
\end{align*}
for any three-vectors, $\varphi$, $\psi$, $v$.
Also, $F(\curl u) = 2 \omega$ and $F(\curl \overline{u}) = 2 \overline{\omega}$.
For any $\varphi \in (H^1(\Omega))^3$ let $M = F(\varphi)$. Then
\begin{align*}
(\omega, M)
&= \frac{1}{2} \pr{F(\curl u), F(\varphi)}
= \pr{\curl u, \varphi}, \\
(\overline{\omega}, M)
&= \frac{1}{2} \pr{F(\curl \overline{u}), F(\varphi)}
= \pr{\curl \overline{u}, \varphi}, \\
(M \bm{n}) \cdot \overline{u}
&= (F(\varphi) \bm{n}) \cdot \overline{u}
= (\varphi \times \bm{n}) \cdot \overline{u}
= - (\overline{u} \times \bm{n}) \cdot \varphi.
\end{align*}
In the last equality, we used the scalar triple product identity $(a \times b) \cdot c = - a \cdot (c \times b)$. Because $F$ is a bijection, this gives the equivalence of ($E$) and ($E'$).
\end{proof}
\section{\texorpdfstring{$L^p$}{Lp}-norms of the vorticity blow up for \texorpdfstring{$p > 1$}{p > 1}}\ensuremath{\lambda}bel{S:LpNormsBlowUp}
\bm{n}oindent
\begin{theorem}\ensuremath{\lambda}bel{T:VorticityNotBounded}
Assume that $\overline{u}$ is not identically zero on $[0, T] \times \Gamma$.
If any of the equivalent conditions of \cref{T:VVEquiv} holds then for all $p \in (1, \ensuremath{\infty}]$,
\begin{align}\ensuremath{\lambda}bel{e:omegaBlowup}
\limsup_{\bm{n}u \to 0^+} \bm{n}orm{\omega}_{L^\ensuremath{\infty}([0, T]; L^p)}
\to \ensuremath{\infty}.
\end{align}
\end{theorem}
\begin{proof}
We prove the contrapositive. Assume that the conclusion is not true. Then for some $q' \in (1, \ensuremath{\infty}]$ it must be that for some $C_0 > 0$ and $\bm{n}u_0 > 0$,
\begin{align}\ensuremath{\lambda}bel{e:omegaBoundedCondition}
\bm{n}orm{\omega}_{L^\ensuremath{\infty}([0, T]; L^{q'})} \le C_0
\text{ for all } 0 < \bm{n}u \le \bm{n}u_0.
\end{align}
Since $\Omega$ is a bounded domain, if \cref{e:omegaBoundedCondition} holds for some $q' \in (1, \ensuremath{\infty}]$ it holds for all lower values of $q'$ in $(1, \ensuremath{\infty}]$, so we can assume without loss of generality that $q' \in (1, \ensuremath{\infty})$.
Let $q = q'/(q' - 1) \in (1, \ensuremath{\infty})$ be \Holder conjugate to $q$ and $p = 2/q + 1 \in (1, 3)$. Then $p, q, q'$ satisfy the conditions of \cref{C:TraceCor} with $(p -1) q = 2$.
Applying \cref{C:TraceCor} gives, for almost all $t \in [0, T]$,
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
&\bm{n}orm{u(t) - \overline{u}(t)}_{L^p(\Gamma)}
\le C \bm{n}orm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\bm{n}orm{\ensuremath{\bm{n}abla} u(t) - \ensuremath{\bm{n}abla} \overline{u}(t)}_{L^{q'}(\Omega)}
^{\frac{1}{p}} \\
&\qquad
\le C \bm{n}orm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\pr{\bm{n}orm{\ensuremath{\bm{n}abla} u(t)}_{L^{q'}}
+ \bm{n}orm{\ensuremath{\bm{n}abla} \overline{u}(t)}_{L^{q'}}}
^{\frac{1}{p}} \\
&\qquad
\le C \bm{n}orm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\pr{C(q') \bm{n}orm{\omega(t)}_{L^{q'}}
+ \bm{n}orm{\ensuremath{\bm{n}abla} \overline{u}(t)}_{L^{q'}}}
^{\frac{1}{p}} \\
&\qquad
\le C \bm{n}orm{u(t) - \overline{u}(t)}_{L^2(\Omega)}
^{1 - \frac{1}{p}}
\end{align*}
\endgroup
for all $0 < \bm{n}u \le \bm{n}u_0$. Here we used \cref{e:omegaBoundedCondition} and the inequality, $\bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^{q'}(\Omega)} \le C(q') \bm{n}orm{\omega}_{L^{q'}(\Omega)}$ for all $q' \in (1, \ensuremath{\infty})$ of Yudovich \cite{Y1963}. Hence,
\begin{align*}
\bm{n}orm{u - \overline{u}}_{L^\ensuremath{\infty}([0, T]; L^p(\Gamma))}
\le C \bm{n}orm{u - \overline{u}}_{L^\ensuremath{\infty}([0, T]; L^2(\Omega))}
^{1 - \frac{1}{p}}
\to 0
\end{align*}
as $\bm{n}u \to 0$. But,
\begin{align*}
\bm{n}orm{u - \overline{u}}_{L^\ensuremath{\infty}([0, T]; L^p(\Gamma))}
= \bm{n}orm{\overline{u}}_{L^\ensuremath{\infty}([0, T]; L^p(\Gamma))}
\bm{n}e 0,
\end{align*}
so condition (B) cannot hold and so neither can any of the equivalent conditions in \cref{T:VVEquiv}.
\end{proof}
\section{Improved convergence when vorticity bounded in \texorpdfstring{$L^1$}{L1}}\ensuremath{\lambda}bel{S:ImprovedConvergence}
\bm{n}oindent In \cref{S:LpNormsBlowUp} we showed that if the classical vanishing viscosity limit holds then the $L^p$ norms of $\omega$ must blow up as $\bm{n}u \to 0$ for all $p \in (1, \ensuremath{\infty}]$---unless the Eulerian velocity vanishes identically on the boundary. This leaves open the possibility that the $L^1$ norm of $\omega$ could remain bounded, however, and still have the classical vanishing viscosity limit. This happens, for instance, for radially symmetric vorticity in a disk (Examples 1a and 3 in \cref{S:Examples}), as shown in \cite{FLMT2008}.
In fact, as we show in \cref{C:EquivConvMeasure}, when ($VV$) holds and the $L^1$ norm of $\omega$ remains bounded in $\bm{n}u$, the convergence in condition ($E$) is stronger; namely, $weak^*$ in measure (as in \cite{FLMT2008}). (See \cref{e:RadonMeasures} and the comments after it for the definitions of $\ensuremath{\BB{C}}al{M}(\overline{\Omega})$ and $\mu$.)
\begin{cor}\ensuremath{\lambda}bel{C:EquivConvMeasure}
Suppose that $u \to \overline{u} \text{ in } L^\ensuremath{\infty}(0, T; H)$ and
$\curl u$ is bounded in $L^\ensuremath{\infty}(0, T; L^1(\Omega))$ uniformly in $\ensuremath{\epsilon}$.
Then in 3D,
\begin{align}\ensuremath{\lambda}bel{e:BetterConvergence}
\curl u \to \curl \overline{u} + (u_0 \times \bm{n}) \mu
\quad \weak^* \text{ in } L^\ensuremath{\infty}(0, T; \ensuremath{\BB{C}}al{M}(\overline{\Omega})).
\end{align}
Similarly, ($C$), ($E$), and ($E_2$) hold with $\weak^*$ convergences
in $L^\ensuremath{\infty}(0, T; \ensuremath{\BB{C}}al{M}(\overline{\Omega}))$ rather than uniformly in
$(H^1(\Omega))^*$.
\end{cor}
\begin{proof}
We prove \cref{e:BetterConvergence} explicitly for 3D solutions,
the results for ($C$), ($E$), and ($E_2$) following in the same way.
Let $\psi \in C(\overline{\Omega})$. What we must show is that
\begin{align*}
(\curl u(t) - \curl \overline{u}(t), \psi)
\to \int_\Gamma (u_0(t) \times \bm{n}) \cdot \psi
\text{ in } L^\ensuremath{\infty}([0, T]).
\end{align*}
So let $\ensuremath{\epsilon} > 0$ and choose $\varphi \in H^1(\Omega)^d$ with
$\bm{n}orm{\psi - \varphi}_{C(\overline{\Omega})} < \ensuremath{\epsilon}$. We can always find
such a $\varphi$ because $H^1(\Omega)$ is dense in $C(\overline{\Omega})$.
Let
\begin{align*}
M = \max \set{\bm{n}orm{\curl u
- \curl \overline{u}}_{L^\ensuremath{\infty}(0, T; L^1(\Omega))},
\bm{n}orm{\overline{u}}_{L^\ensuremath{\infty}([0, T] \times \Omega)}},
\end{align*}
which we note is finite since $\bm{n}orm{\curl u}_{L^\ensuremath{\infty}(0, T; L^1(\Omega))}$
and $\bm{n}orm{\curl \overline{u}}_{L^\ensuremath{\infty}(0, T; L^1(\Omega))}$ are both
finite. Then
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
&\abs{(\curl u(t) - \curl \overline{u}(t), \psi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot \psi} \\
&\qquad
\le \abs{(\curl u(t) - \curl \overline{u}(t), \psi - \varphi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot
(\psi - \varphi)} \\
&\qquad\qquad
+ \abs{(\curl u(t) - \curl \overline{u}(t), \varphi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot \varphi} \\
&\qquad
\le 2 M \ensuremath{\epsilon}
+ \abs{(\curl u(t) - \curl \overline{u}(t), \varphi)
- \int_\Gamma (u_0(t) \times \bm{n}) \cdot \varphi}.
\end{align*}
\endgroup
By \cref{P:EquivE}, we can make the last term above smaller
than, say, $\ensuremath{\epsilon}$, by choosing $\bm{n}u$ sufficiently small, which is sufficient
to give the result.
\end{proof}
\begin{remark}
Suppose that we have the slightly stronger condition that $\ensuremath{\bm{n}abla} u$ is bounded in $L^\ensuremath{\infty}(0, T; L^1(\Omega))$ uniformly in $\ensuremath{\epsilon}$. If we are in 2D, $W^{1, 1}(\Omega)$ is compactly embedded in $L^2(\Omega)$. This is sufficient to conclude that ($VV$) holds, as shown in \cite{GKLMN14}.
\end{remark}
\section{Width of the boundary layer}\ensuremath{\lambda}bel{S:BoundaryLayerWidth}
\bm{n}oindent
Working in two dimensions, make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}, and assume in addition that the total mass of the initial vorticity does not vanish; that is,
\begin{align}\ensuremath{\lambda}bel{e:NonzeroMass}
m := \int_\Omega \omega_0 = (\omega_0, 1) \bm{n}e 0.
\end{align}
(In particular, this means that $u_0$ is not in $V$.) The total mass of the Eulerian vorticity is conserved so
\begin{align}\ensuremath{\lambda}bel{e:mEAllTime}
(\overline{\omega}(t), 1) = m \text{ for all } t \in \ensuremath{\BB{R}}.
\end{align}
The Navier-Stokes velocity, however, is in $V$ for all positive time, so its total mass is zero; that is,
\begin{align}\ensuremath{\lambda}bel{e:mNSAllTime}
(\omega(t), 1) = 0 \text{ for all } t > 0.
\end{align}
Let us suppose that the vanishing viscosity limit holds. Fix $\delta > 0$ let $\varphi_\delta$ be a smooth cutoff function equal to $1$ on $\Gamma_\delta$ and equal to 0 on $\Omega \setminus \Gamma_{2 \delta}$. Then by ($F_2$) of \cref{T:VVEquiv} and using \cref{e:mEAllTime},
\begin{align*}
\abs{(\omega, 1 - \varphi_\delta) - m}
\to \abs{(\overline{\omega}, 1 - \varphi_\delta) - m}
= \abs{m - (\overline{\omega}, \varphi_\delta) - m}
\le C \delta,
\end{align*}
the convergence being uniform on $[0, T]$. Thus, for all sufficiently small $\bm{n}u$,
\begin{align}\ensuremath{\lambda}bel{e:omega1phiLimit}
\abs{(\omega, 1 - \varphi_\delta) - m} \le C \delta.
\end{align}
\Ignore {
\begin{align}\ensuremath{\lambda}bel{e:E2VVV}
\omega \to \overline{\omega} - (\overline{u} \cdot \BoldTau) \mu
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T].
\end{align}
Fix $\delta > 0$ let $\varphi_\delta$ be a smooth cutoff function equal to $1$ on $\Gamma_\delta$ and equal to 0 on $\Omega \setminus \Gamma_{2 \delta}$. Letting $\bm{n}u \to 0$, since $\varphi_\delta = 1$ on $\Gamma$, we have
\begin{align*}
(\omega, \varphi_\delta)
&\to (\overline{\omega}, \varphi_\delta)
- \int_\Gamma \overline{u} \cdot \BoldTau
= (\overline{\omega}, \varphi_\delta)
+ \int_\Gamma \overline{u}^\perp \cdot \mathbf{n} \\
&= (\overline{\omega}, \varphi_\delta)
+ \int_\Omega \dv \overline{u}^\perp
= (\overline{\omega}, \varphi_\delta)
- \int_\Omega \overline{\omega} \\
&= (\overline{\omega}, \varphi_\delta)
- \int_\Omega \overline{\omega}_0
= (\overline{\omega}, \varphi_\delta) - m.
\end{align*}
The convergence here is uniform over $[0, T]$.
Now,
\begin{align*}
\abs{(\overline{\omega}, \varphi_\delta)}
\le \bm{n}orm{\overline{\omega}}_{L^\ensuremath{\infty}} \abs{\Gamma_{2 \delta}}
= \bm{n}orm{\overline{\omega}_0}_{L^\ensuremath{\infty}} \abs{\Gamma_{2 \delta}}
\le C \delta.
\end{align*}
Thus, for all sufficiently small $\bm{n}u$,
\begin{align}\ensuremath{\lambda}bel{e:omegaphiLimit}
\abs{(\omega, \varphi_\delta) + m} \le C \delta.
\end{align}
For $t > 0$, $u$ is in $V$ so the total mass of $\omega$ is zero for all $t > 0$; that is,
\begin{align*}
\int_\Omega \omega = 0.
\end{align*}
It follows that for all sufficiently small $\bm{n}u$,
\begin{align}\ensuremath{\lambda}bel{e:omega1phiLimit}
\abs{(\omega, 1 - \varphi_\delta) - m} \le C \delta.
\end{align}
This reflects one of the consequences of \cref{T:VVEquiv} that
\begin{align*}
\omega \to \overline{\omega} \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T],
\end{align*}
which represents a kind of weak internal convergence of the vorticity.
}
In \cref{e:omega1phiLimit} we must hold $\delta$ fixed as we let $\bm{n}u \to 0$, for that is all we can obtain from the weak convergence in ($F_2$). Rather, this is all we can obtain without making some assumptions about the rates of convergence, a matter we will return to in the next section.
Still, it is natural to ask whether we can set $\delta = c \bm{n}u$ in \cref{e:omega1phiLimit}, this being the width of the boundary layer in Kato's seminal paper \cite{Kato1983} on the subject. If this could be shown to hold it would say that outside of Kato's layer the vorticity for solutions to ($NS$) converges in a (very) weak sense to the vorticity for the solution to ($E$). The price for such convergence, however, would be a buildup of vorticity inside the layer to satisfy the constraint in \cref{e:mNSAllTime}.
In fact, however, this is not the case, at least not by a closely related measure of vorticity buildup near the boundary.
The total mass of the vorticity (in fact, its $L^1$-norm) in any layer smaller than that of Kato goes to zero and, if the vanishing visocity limit holds, then the same holds for Kato's layer. Hence, if there is a layer in which vorticity accumulates, that layer is at least as wide as Kato's and is wider than Kato's if the vanishing viscosity limit holds. This is the content of the following theorem.
\begin{theorem}\ensuremath{\lambda}bel{T:BoundaryLayerWidth}
Make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}. For any positive function $\delta = \delta(\bm{n}u)$,
\begin{align}\ensuremath{\lambda}bel{e:OmegaL1VanishGeneral}
\bm{n}orm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta(\bm{n}u)}))}
\le C \pr{\frac{\delta(\bm{n}u)}{\bm{n}u}}^{1/2}.
\end{align}
If the vanishing viscosity limit holds and
\begin{align*}
\limsup_{\bm{n}u \to 0^+} \frac{\delta(\bm{n}u)}{\bm{n}u} < \ensuremath{\infty}
\end{align*}
then
\begin{align}\ensuremath{\lambda}bel{e:OmegaL1Vanish}
\bm{n}orm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta(\bm{n}u)}))}
\to 0 \text{ as } \bm{n}u \to 0.
\end{align}
\end{theorem}
\begin{proof}
By the Cauchy-Schwarz inequality,
\begin{align*}
\bm{n}orm{\omega}_{L^1(\Gamma_{\delta(\bm{n}u)})}
\le \bm{n}orm{1}_{L^2(\Gamma_{\delta(\bm{n}u)})} \bm{n}orm{\omega}_{L^2(\Gamma_{\delta(\bm{n}u)})}
\le C \delta^{1/2} \bm{n}orm{\omega}_{L^2(\Gamma_{\delta(\bm{n}u)})}
\end{align*}
so
\begin{align*}
\frac{C}{\delta} \bm{n}orm{\omega}_{L^1(\Gamma_{\delta(\bm{n}u)})}^2
\le \bm{n}orm{\omega}_{L^2(\Gamma_{\delta(\bm{n}u)})}^2
\end{align*}
and
\begin{align*}
\frac{C \bm{n}u}{\delta} \bm{n}orm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta(\bm{n}u)}))}^2
\le \bm{n}u \bm{n}orm{\omega}_{L^2([0, T]; L^2(\Gamma_{\delta(\bm{n}u)}))}^2.
\end{align*}
By the basic energy inequality for the Navier-Stokes equations, the right-hand side is bounded, giving \refE{OmegaL1VanishGeneral}, and if the vanishing viscosity limit holds, the right-hand side goes to zero by \cref{e:KellCondition}, giving \refE{OmegaL1Vanish}.
\end{proof}
\begin{remark}
In \refT{BoundaryLayerWidth}, we do not need the assumption in \refE{NonzeroMass} nor do we need to assume that we are in dimension two. The result is of most interest, however, when one makes these two assumptions.
\end{remark}
\begin{remark}
\refE{OmegaL1Vanish} also follows from condition (iii'') in \cite{K2006Kato} using the Cauchy-Schwarz inequality in the manner above, but that is using a sledge hammer to prove a simple inequality. Note that \refE{OmegaL1Vanish} is necessary for the vanishing viscosity limit to hold, but is not (as far as we can show) sufficient.
\end{remark}
\Ignore{
\begin{theorem}\ensuremath{\lambda}bel{T:BoundaryLayerWidth}
Make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}. Assume that the vanishing viscosity limit holds. For any nonnegative function $\delta = \delta(\bm{n}u)$,
\begin{align}\ensuremath{\lambda}bel{e:OmegaMassVanishGeneral}
\limsup_{\bm{n}u \to 0^+} \int_0^T \abs{\int_{\Gamma_{\delta(\bm{n}u)}} \omega}
\le C T \lim_{\bm{n}u \to 0} \frac{\delta(\bm{n}u)}{\bm{n}u}.
\end{align}
If
\begin{align*}
\lim_{\bm{n}u \to 0} \frac{\delta(\bm{n}u)}{\bm{n}u} = 0
\end{align*}
then
\begin{align}\ensuremath{\lambda}bel{e:OmegaMassVanish}
\int_0^T \abs{\int_{\Gamma_{\delta(\bm{n}u)}} \omega}
\to 0 \text{ as } \bm{n}u \to 0.
\end{align}
\end{theorem}
\begin{proof}
\begin{align*}
\int_{\Gamma_\delta} \omega
= \int_{A_{L, \delta}} \omega
+ \int_{\Gamma_\delta \setminus A_{L, \delta}} \omega,
\end{align*}
where
\begin{align*}
A_{L, \delta}= \set{x \in \Gamma_\delta \colon \abs{\omega} \ge L}.
\end{align*}
Thus,
\begin{align*}
\int_{\Gamma_\delta} \omega
\le \int_{A_{L, \delta}} \omega
+ L \abs{\Gamma_\delta}
\le \int_{A_{L, \delta}} \omega
+ C \delta L.
\end{align*}
Let $L$ vary with $\bm{n}u$ at a rate we will specify later.
Then,
\begin{align*}
\bm{n}u &\int_0^T \int_{\Gamma_\delta} \abs{\omega}^2
= \bm{n}u \int_0^T \int_{A_{L, \delta}} \abs{\omega}^2
+ \bm{n}u \int_0^T \int_{\Gamma_\delta \setminus A_{L, \delta}} \abs{\omega}^2 \\
&\ge \bm{n}u \int_0^T \int_{A_{L, \delta}} L \abs{\omega}
\ge L \bm{n}u \int_0^T \abs{\int_{A_{L, \delta}} \omega} \\
&\ge L \bm{n}u \brac{\int_0^T \abs{\int_{\Gamma_\delta} \omega}
- \int_0^T C \delta L}
= L \bm{n}u \int_0^T \abs{\int_{\Gamma_\delta} \omega}
- C T \bm{n}u \delta L^2.
\end{align*}
Define
\begin{align*}
M(\bm{n}u)
= \int_0^T \abs{\int_{\Gamma_{\delta(\bm{n}u)}} \omega}, \quad
M
= \limsup_{\bm{n}u \to 0^+} M(\bm{n}u).
\end{align*}
Letting $L = \bm{n}u^{-1}$, we have
\begin{align*}
\limsup_{\bm{n}u \to 0^+} &\, \bm{n}u \int_0^T \int_{\Gamma_{\delta(\bm{n}u)}} \abs{\omega}^2
\ge \limsup_{\bm{n}u \to 0^+} \brac{L_k \bm{n}u M(\bm{n}u) - CT \bm{n}u \delta(\bm{n}u) L^2} \\
&= M - CT \limsup_{\bm{n}u \to 0^+} \frac{\delta(\bm{n}u)}{\bm{n}u}
= M.
\end{align*}
But because we have assumed that the vanishing viscosity limit holds, the left-hand side vanishes with $\bm{n}u$ regardless of how the function $\delta$ is chosen. Thus,
\begin{align*}
M \le CT \limsup_{\bm{n}u \to 0^+} \frac{\delta(\bm{n}u)}{\bm{n}u},
\end{align*}
giving \refE{OmegaMassVanishGeneral} and also \refE{OmegaMassVanish}.
\end{proof}
}
\section{Optimal convergence rate}\ensuremath{\lambda}bel{S:OptimalConvergenceRate}
\bm{n}oindent Still working in two dimensions, let us return to \cref{e:omega1phiLimit}, assuming as in the previous section that the vanishing viscosity limit holds, but bringing the rate of convergence function, $F$, of \cref{T:ROC} into the analysis. We will now make $\delta = \delta(\bm{n}u) \to 0$ as $\bm{n}u \to 0$, and choose $\varphi_\delta$ slightly differently, requiring that it equal $1$ on $\Gamma_{\delta^*}$ and vanish outside of $\Gamma_\delta$ for some $0 < \delta^* = \delta^*(\bm{n}u) < \delta$. We can see from the argument that led to \cref{e:omega1phiLimit}, incorporating the convergence rate for ($F_2$) given by \cref{T:ROC}, that
\begin{align*}
\abs{(\omega, 1 - \varphi_\delta) - m}
\le C \delta + \bm{n}orm{\ensuremath{\bm{n}abla} \varphi_\delta}_{L^2(\Omega)} F(\bm{n}u).
\end{align*}
Because $\ensuremath{\partial} \Omega$ is $C^2$, we can always choose $\varphi_\delta$ so that $\abs{\ensuremath{\bm{n}abla} \varphi_\delta} \le C(\delta - \delta^*)^{-1}$. Then for all sufficiently small $\delta$,
\begin{align*}
\bm{n}orm{\ensuremath{\bm{n}abla} \varphi_\delta}_{L^2(\Omega)}
\le \pr{\int_{\Gamma_\delta \setminus \Gamma_{\delta^*}}
\pr{\frac{C}{\delta - \delta^*}}^2}^{\frac{1}{2}}
= C \frac{(\delta - \delta^*)^{\frac{1}{2}}}{\delta - \delta^*}
= C (\delta - \delta^*)^{-\frac{1}{2}}.
\end{align*}
We then have
\begin{align}\ensuremath{\lambda}bel{e:mDiffEst}
\abs{(\omega, 1 - \varphi_\delta) - m}
\le C \brac{\delta + (\delta - \delta^*)^{-\frac{1}{2}} F(\bm{n}u)}.
\end{align}
For any measurable subset $\Omega'$ of $\Omega$, define
\begin{align*}
\mathbf{M}(\Omega') = \int_{\Omega'} \omega,
\end{align*}
the total mass of vorticity on $\Omega'$. Then
\begin{align*}
\mathbf{M}(\Gamma_\delta^C)
= (\omega, 1 - \varphi_\delta)
+ \int_{\Gamma_\delta \setminus \Gamma_{\delta^*}} \varphi_\delta \omega
\end{align*}
so
\begin{align}\ensuremath{\lambda}bel{e:MDiffEst}
\begin{split}
\abs{(\omega, 1 - \varphi_\delta) - \mathbf{M}(\Gamma_\delta^C)}
&\le \bm{n}orm{\omega}_{L^2(\Gamma_\delta \setminus \Gamma_{\delta^*})}
\bm{n}orm{\varphi_\delta}_{L^2(\Gamma_\delta \setminus \Gamma_{\delta^*})} \\
&\le C (\delta - \delta^*)^{\frac{1}{2}}
\bm{n}orm{\omega}_{L^2(\Gamma_{\delta})}.
\end{split}
\end{align}
\Ignore{
To obtain any reasonable control on the total mass of vorticity, we certainly need $\delta, \delta^* \to 0$ as $\bm{n}u \to 0$, but more important, as we can see from \cref{e:mDiffEst}, we need
\begin{align}\ensuremath{\lambda}bel{e:LayerReq1}
(\delta - \delta^*)^{-\frac{1}{2}} F(\bm{n}u) \to 0
\text{ as } \bm{n}u \to 0.
\end{align}
In light of \cref{T:BoundaryLayerWidth} and its proof, we should also require at least that
\begin{align}\ensuremath{\lambda}bel{e:LayerReq2}
(\delta - \delta^*)^{\frac{1}{2}}
\bm{n}orm{\omega}_{L^2(0, T; L^2(\Gamma_\delta))} \to 0
\text{ as } \bm{n}u \to 0
\end{align}
so that the bound in \cref{e:mDiffEst} will lead, via \cref{e:MDiffEst}, to a bound on the total mass of vorticity outside the boundary layer, $\Gamma_\delta$.
Now, as in the proof of \cref{T:BoundaryLayerWidth}, if we let $\delta - \delta^* = O(\bm{n}u)$ then the condition in \cref{e:LayerReq2} will hold by \cref{e:KellCondition}. Then the requirement in \cref{e:LayerReq1} becomes \begin{align*}
F(\bm{n}u)
= o \pr{(\delta - \delta^*)^{\frac{1}{2}}}
= o (\bm{n}u^{\frac{1}{2}}).
\end{align*}
}
From these observations and those in the previous section, we have the following:
\begin{theorem}\ensuremath{\lambda}bel{T:VorticityMassControl}
\Ignore{
Assume that $\delta = \delta(\bm{n}u) \to 0$ as $\bm{n}u \to 0$ and define
\begin{align*}
M_\delta
= \bm{n}orm{\int_\Omega \omega_0
- \int_{\Gamma_\delta^C} \omega(t)}_{L^2([0, T])}.
\end{align*}
If the classical vanishing viscosity limit in ($VV$) holds with a rate that is
$o(\bm{n}u^{\frac{1}{2}})$ then $M_{\delta(\bm{n}u)} \to 0$ as $\bm{n}u \to 0$.
}
Assume that the classical vanishing viscosity limit in ($VV$) holds with a rate
of convergence,
$F(\bm{n}u) = o(\bm{n}u^{1/2})$. Then in 2D
the initial mass of the vorticity must be zero.
\end{theorem}
\begin{proof}
From \cref{e:mDiffEst,e:MDiffEst},
\begin{align*}
M_\delta
&:= \abs{m - \mathbf{M}(\Gamma_\delta^C)}
\le \abs{m - (\omega, 1 - \varphi_\delta)}
+ \abs{(\omega, 1 - \varphi_\delta) - \mathbf{M}(\Gamma_\delta^C)} \\
&\le C \brac{\delta + (\delta - \delta^*)^{-\frac{1}{2}} F(\bm{n}u)}
+ C (\delta - \delta^*)^{\frac{1}{2}}
\bm{n}orm{\omega}_{L^2(\Gamma_{\delta})}.
\end{align*}
Choosing $\delta(\bm{n}u) = \bm{n}u$, $\delta^*(\bm{n}u) = \bm{n}u/2$, we have
\begin{align*}
M_\bm{n}u
&\le C \brac{\bm{n}u + \bm{n}u^{-\frac{1}{2}} o(\bm{n}u^{\frac{1}{2}})}
+ C \bm{n}u^{\frac{1}{2}}
\bm{n}orm{\omega}_{L^2(\Gamma_{\bm{n}u})},
\end{align*}
uniformly over $[0, T]$. Squaring, integrating in time, and applying Young's
inequality gives
\begin{align*}
\bm{n}orm{M_\bm{n}u}_{L^2([0, T])}^2
= \int_0^T M_\bm{n}u^2
\le CT (\bm{n}u^2 + o(1))
+ C \bm{n}u \int_0^T \bm{n}orm{\omega}_{L^2(0, T; L^2(\Gamma_\bm{n}u))}^2
\to 0
\end{align*}
as $\bm{n}u \to 0$ by \cref{e:KellCondition}.
Then,
\begin{align*}
\bm{n}orm{m - M(\Omega)}_{L^2([0, T])}
&\le \bm{n}orm{m - M(\Gamma_\bm{n}u^C)}_{L^2([0, T])}
+ \bm{n}orm{M(\Gamma_\bm{n}u)}_{L^2([0, T])} \\
&\le \bm{n}orm{M_\bm{n}u}_{L^2([0, T])}
+ \bm{n}orm{\omega}_{L^2([0, T]; L^1(\Gamma_{\bm{n}u}))}
\to 0
\end{align*}
as $\bm{n}u \to 0$ by \cref{T:BoundaryLayerWidth}.
But $u(t)$ lies in $V$ so $M(\Omega) = 0$ for all $t > 0$.
Hence, the limit above is possible only if $m = 0$.
\end{proof}
For non-compatible initial data, that is for $u_0 \bm{n}otin V$, the total mass of vorticity will generically not be zero, so $C \sqrt{\bm{n}u}$ should be considered a bound on the rate of convergence for non-compatible initial data. As we will see in \cref{R:ROC}, however, a rate of convergence as good as $C \sqrt{\bm{n}u}$ is almost impossible unless the initial data is fairly smooth, and even then it would only occur in special circumstances.
Therefore, let us assume that the rate of convergence in ($VV$) is only $F(\bm{n}u) = C \bm{n}u^{1/4}$. As we will see in \cref{S:Examples}, this is a more typical rate of convergence for the simple examples for which ($VV$) is known to hold.
Now \cref{e:mDiffEst} still gives a useful bound as long as $\delta - \delta^*$ is slightly larger than the Prandtl layer width of $C \sqrt{\bm{n}u}$ (though \cref{e:MDiffEst} then fails to tell us anything useful). So let us set $\delta = 2 \bm{n}u^{1/2 - \ensuremath{\epsilon}}$, $\delta^* = \bm{n}u^{1/2 - \ensuremath{\epsilon}}$, $\ensuremath{\epsilon} > 0$ arbitrarily small. We are building here to a conjecture, so for these purposes we will act as though $\ensuremath{\epsilon} = 0$.
If the Prandtl theory is correct, then we should expect that $\mathbf{M}(\Gamma_\delta^C) \to m$ as $\bm{n}u \to 0$, since outside of the Prandtl layer $u$ matches $\overline{u}$. But the total mass of vorticity for all positive time is zero, and the total mass in the Kato Layer, $\Gamma_\bm{n}u$, goes to zero by \cref{T:BoundaryLayerWidth}. There would be no choice then but to have a total mass of vorticity between the Kato and Prandtl layers that approaches $-m$ as the viscosity vanishes. (Since the Kato layer is much smaller than the Prandtl layer, this does not require that there be any higher concentration of vorticity in any particular portion of the Prandtl layer, though.)
Now suppose that the rate of convergence is even slower than $C \bm{n}u^{1/4}$. Then \cref{e:mDiffEst} gives a measure of $\mathbf{M}(\Gamma_\delta^C)$ converging to $m$ well outside the Prandtl layer. This does not directly contradict any tenet of the Prandtl theory, but it suggests that for small viscosity the solution to the Navier-Stokes equations matches the solution to the Euler equations only well outside the Prandtl layer. This leads us to the following conjecture:
\begin{conj}\ensuremath{\lambda}bel{J:Prandtl}
If the vanishing viscosity limit in ($VV$) holds at a rate slower than
$C \bm{n}u^{\frac{1}{4}}$ in 2D then the Prandtl theory fails.
\end{conj}
We conjecture no further, however, as to whether the Prandtl equations become ill-posed or whether the formal asymptotics fail to hold rigorously.
\section{Some kind of convergence always happens}\ensuremath{\lambda}bel{S:SomeConvergence}
\bm{n}oindent Assume that $v$ is a vector field lying in $L^\ensuremath{\infty}([0, T]; H^1(\Omega))$. An examination of the proof given in \cite{K2008VVV} of the chain of implications in \cref{T:VVEquiv} shows that all of the conditions except (B) are still equivalent with $\overline{u}$ replaced by $v$. That is, defining
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
(A_v) & \qquad u \to v \text{ weakly in } H
\text{ uniformly on } [0, T], \\
(A'_v) & \qquad u \to v \text{ weakly in } (L^2(\Omega))^d
\text{ uniformly on } [0, T], \\
(B_v) & \qquad u \to v \text{ in } L^\ensuremath{\infty}([0, T]; H), \\
(C_v) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} v - \innp{\gamma_\mathbf{n} \cdot, v \mu}
\text{ in } ((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(D_v) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} v \text{ in } (H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T], \\
(E_v) & \qquad \omega \to \omega(v)
- \frac{1}{2} \innp{\gamma_\mathbf{n} (\cdot - \cdot^T),
v \mu}
\text{ in }
((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(E_{2, v}) & \qquad \omega \to \omega(v) - (v \cdot \BoldTau) \mu
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T], \\
(F_{2, v}) & \qquad \omega \to \omega(v) \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T],
\end{align*}
\endgroup
we have the following theorem:
\begin{theorem}\ensuremath{\lambda}bel{T:MainResultv}
Conditions ($A_v$), ($A'_v$), ($C_v$), ($D_v$), and ($E_v$) are equivalent.
In 2D, conditions ($E_{2,v}$) and, when $\Omega$ is simply connected,
($F_{2,v}$) are equivalent to the other conditions.
Also, $(B_v)$ implies all of the other conditions. Finally,
the same equivalences hold if we replace each
convergence above with the convergence of a subsequence.
\end{theorem}
But we also have the following:
\begin{theorem}\ensuremath{\lambda}bel{T:SubsequenceConvergence}
There exists $v$ in $L^\ensuremath{\infty}([0, T]; H)$ such that a subsequence $(u_\bm{n}u)$ converges weakly to $v$ in $L^\ensuremath{\infty}([0, T]; H)$.
\end{theorem}
\begin{proof}
The argument for a simply connected domain in 2D is slightly simpler so we give it first.
The sequence $(u_\bm{n}u)$ is bounded in $L^\ensuremath{\infty}([0, T]; H)$ by the basic energy inequality for the Navier-Stokes equations. Letting $\psi_\bm{n}u$ be the stream function for $u_\bm{n}u$ vanishing on $\Gamma$, it follows by the Poincare inequality that $(\psi_\bm{n}u)$ is bounded in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$. Hence, there exists a subsequence, which we relabel as $(\psi_\bm{n}u)$, converging strongly in $L^\ensuremath{\infty}([0, T]; L^2(\Omega))$ and weak-* in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$ to some $\psi$ lying in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$. Let $v = \ensuremath{\bm{n}abla}^\perp \psi$.
Let $g$ be any element of $L^\ensuremath{\infty}([0, T]; H)$. Then
\begin{align*}
(u_\bm{n}u, g)
&= (\ensuremath{\bm{n}abla}^\perp \psi_\bm{n}u, g)
= - (\ensuremath{\bm{n}abla} \psi_\bm{n}u, g^\perp)
= (\psi_\bm{n}u, - \dv g^\perp)
= (\psi_\bm{n}u, \omega(g)) \\
&\to (\psi, \omega(g))
= (v, g).
\end{align*}
In the third equality we used the membership of $\psi_v$ in $H_0^1(\Omega)$ and the last equality follows in the same way as the first four. The convergence follows from the weak-* convergence of $\psi_\bm{n}u$ in in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$ and the membership of $\omega(g)$ in $H^{-1}(\Omega)$.
In dimension $d \ge 3$, let $M_\bm{n}u$ in $(H_0^1(\Omega))^d$ satisfy $u_\bm{n}u = \dv M_\bm{n}u$; this is possible by Corollary 7.5 of \cite{K2008VVV}. Arguing as before it follows that there exists a subsequence, which we relabel as $(M_\bm{n}u)$, converging strongly in $L^\ensuremath{\infty}([0, T]; L^2(\Omega))$ and weak-* in $L^\ensuremath{\infty}([0, T]; H_0^1(\Omega))$ to some $M$ that lies in $L^\ensuremath{\infty}([0, T]; (H_0^1(\Omega))^{d \times d})$. Let $v = \dv M$.
Let $g$ be any element of $L^\ensuremath{\infty}([0, T]; H)$. Then
\begin{align*}
(u_\bm{n}u, g)
&= (\dv M_\bm{n}u, g)
= -(M_\bm{n}u, \ensuremath{\bm{n}abla} g)
\to - (M, \ensuremath{\bm{n}abla} g)
= (v, g),
\end{align*}
establishing convergence as before.
\end{proof}
It follows from \refTAnd{MainResultv}{SubsequenceConvergence} that all of the convergences in \cref{T:VVEquiv} hold except for $(B)$, but for a subseqence of solutions and the convergence is to some velocity field $v$ lying only in $L^\ensuremath{\infty}([0, T]; H)$ and not necessarily in $L^\ensuremath{\infty}([0, T]; H \cap H^1(\Omega))$ . In particular, we do not know if $v$ is a solution to the Euler equations, and, in fact, there is no reason to expect that it is.
\Ignore{
\begin{lemma}\ensuremath{\lambda}bel{L:H1Dual}
$H^{-1}(\Omega)$ is the image under $\Delta$ of $H^1_0(\Omega)$ and the image
under $\dv$ of $(L^2(\Omega))^d$.
\end{lemma}
\begin{proof}
Let $w$ be in $H^{-1}(\Omega) = H^1_0(\Omega)^*$. By the density of $\ensuremath{\BB{C}}al{D}(\Omega)$ in
$H^1_0(\Omega)$ the value of $(w, \varphi)_{H_0^1(\Omega), H_0^1(\Omega)^*}$ on
test functions $\varphi$ in $\ensuremath{\BB{C}}al{D}(\Omega)$ is enough to uniquely determine
$w$. By the Riesz representation theorem there exists a $u$ in $H^1_0(\Omega)$
such that for all $\varphi$ in $H^1_0(\Omega)$ and hence in $\ensuremath{\BB{C}}al{D}(\Omega)$,
\begin{align*}
(w, \varphi)_{H_0^1(\Omega), H_0^1(\Omega)^*}
&= \innp{u, \varphi}
= \int_\Omega \ensuremath{\bm{n}abla} u \cdot \ensuremath{\bm{n}abla} \varphi
= - \int_\Omega \Delta u \cdot \varphi \\
&= (-\Delta u, \varphi)_{\ensuremath{\BB{C}}al{D}(\Omega), \ensuremath{\BB{C}}al{D}(\Omega)^*},
\end{align*}
which shows that $w$ as a linear functional is equal to $- \Delta u$ as a
distribution, and the two can be identified.
Because the identification of $w$ and $u$ in the Riesz representation
theorem is bijective, $H^{-1}(\Omega) = \Delta H^1(\Omega)$.
Since $\Delta = \dv \ensuremath{\bm{n}abla}$, it also follows that $H^{-1}(\Omega) \subseteq
\dv (L^2(\Omega))^d$. To show the opposite containment, let $f$ be in
$(L^2(\Omega))^d$. Then by the Hodge decomposition, we can write
\begin{align*}
f = \ensuremath{\bm{n}abla} u + g
\end{align*}
with $u$ in $H^1(\Omega)$ and $g$ in $(L^2(\Omega))^d$ with $\dv g = 0$ as a
distribution. Then for any $\varphi$ in $\ensuremath{\BB{C}}al{D}(\Omega)$,
\begin{align*}
&(\dv f, \varphi)_{\ensuremath{\BB{C}}al{D}(\Omega), \ensuremath{\BB{C}}al{D}(\Omega)^*}
= - (f, \ensuremath{\bm{n}abla} \varphi)_{\ensuremath{\BB{C}}al{D}(\Omega), \ensuremath{\BB{C}}al{D}(\Omega)^*} \\
&\qquad= - (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \varphi)_{\ensuremath{\BB{C}}al{D}(\Omega), \ensuremath{\BB{C}}al{D}(\Omega)^*}
- (g, \ensuremath{\bm{n}abla} \varphi)_{\ensuremath{\BB{C}}al{D}(\Omega), \ensuremath{\BB{C}}al{D}(\Omega)^*} \\
&\qquad= - \innp{u, \varphi} + (\dv g, \varphi)_{\ensuremath{\BB{C}}al{D}(\Omega), \ensuremath{\BB{C}}al{D}(\Omega)^*}
= \innp{-u, \varphi}
= (w, \varphi)_{H_0^1(\Omega), H_0^1(\Omega)^*}
\end{align*}
for some $w$ in $H^1_0(\Omega)^*$ by the Riesz representation theorem. It
follows that $\dv f$ and $w$ can be identified, using the same identification
as before. What we have shown is that $\dv (L^2(\Omega))^d
\subseteq H^{-1}(\Omega)$, completing the proof.
\end{proof}
}
\Ignore{
\section{Convergence to another solution to the Euler equations?}
\bm{n}oindent One could imagine that the solutions $u = u_\bm{n}u$ to the Navier-Stokes equations converge, in the limit, to a solution to the Euler equations, but one different from $\overline{u}$ and possibly with lower regularity. Since such solutions are determined by their initial velocity, this means that the vector $v$ to which $(u_\bm{n}u)$ converges has initial velocity $v^0 \bm{n}e \overline{u}^0$. (This conclusion would be true even if $v$ had so little regularity that it had not been determined uniquely by its initial velocity.)
Now, $\overline{u}(t)$ is continuous in $H$, since it is a strong solution, as too, if we restrict ourselves to two dimensions, is $u(t)$. If $v$ has bounded vorticity, say, then $v(t)$ is continuous in $H$ as well. It would seem ......
}
\Ignore{
\section{Physical meaning of the vortex sheet on the boundary?}
\bm{n}oindent Calling the term $\omega^* := - (\overline{u} \cdot \BoldTau) \mu$ (in 2D) a \textit{vortex sheet} is misleading, and I regret referring to it that way in \cite{K2008VVV} without some words of explanation. The problem is that we cannot interpret $\omega^*$ as a distribution on $\Omega$ because applying it to any function in $\ensuremath{\BB{C}}al{D}(\Omega)$ gives zero. And how could we recover the velocity associated to $\omega^*$?
One natural, if unjustified, way to try to interpret $\omega^*$ is to extend it to the whole space so that it is a measure supported along the curve $\Gamma$. To determine the associated velocity $v$, let $\Omega_- = \Omega$ and $\Omega_+ = \Omega^C$ with $v_\pm = v|_{\Omega_\pm}$, and let $[v] = v_+ - v_-$. Then as on page 364 of \cite{MB2002}, we must have
\begin{align*}
[v] \cdot \mathbf{n} = 0, \quad [v] \cdot \BoldTau = - \overline{u} \cdot \BoldTau.
\end{align*}
That is, the normal component of the velocity is continuous across the boundary while the jump in the tangential component is the strength of the vortex sheet.
Now, let us assume that the vanishing viscosity limit holds, so that the limiting vorticity is $\overline{\omega} - (\overline{u} \cdot \BoldTau) \mu = \overline{\omega} - \omega^*$. Since $u \to \overline{u}$ strongly with $\omega(\overline{u}) = \overline{\omega}$, the term $\overline{\omega}$ has to account for all of the kinetic energy of the fluid. If the limit is to be physically meaningful, certainly energy cannot be \textit{gained} (though it conceivably could be lost to diffusion, even in the limit). Thus, we would need to have the velocity $v$ associated with $\omega^*$ vanish in $\Omega$; in other words, $v_- \equiv 0$. This leads to $\omega(v_+) = \dv v_+ = 0$ in $\Omega_+$, $v_+ \cdot \mathbf{n} = 0$ on $\Gamma$, $v_+ \cdot \BoldTau = \overline{u} \cdot \BoldTau$ on $\Gamma$, with some conditions on $v_+$ at infinity. But this is an overdetermined set of equations. In fact, if $\Omega$ is simply connected then $\Omega_+$ is an exterior domain, and if we ignore the last equation, then up to a multiplicative constant there is a unique solution vanishing at infinity. This cannot, in general, be reconciled with the need for the last equation to hold.
Actually, perhaps the correct physical interpretation of $\omega^*$ comes from the observation in the first paragraph of this section: that it has no physical effect at all since, as a distribution, it is zero. If the vanishing viscosity limit holds, it is reasonable to assume that if there is a boundary separation of the vorticity it weakens in magnitude as the viscosity vanishes and so contributes nothing in the limit.
Or, looked at another way, if in looking for the velocity $v$ corresponding to the vortex sheet $\omega$ as we did above we assume that $v$ is zero outside $\Omega$, we would obtain
\begin{align*}
v \cdot \mathbf{n} = 0, \quad v \cdot \BoldTau = \overline{u} \cdot \BoldTau
\end{align*}
on the boundary. For a very small viscosity, then, $u$ has almost the same effect as $\overline{u}$ in the interior of $\Omega$, while the vortex sheet that is forming on the boundary as the viscosity vanishes has nearly the same effect as $\overline{u}$ on the boundary.
}
\bm{n}ewpage
\Part{Theme II: Kato's Conditions}
\section{An equivalent 2D condition on the boundary}\ensuremath{\lambda}bel{S:EquivCondition}
\bm{n}oindent
\begin{theorem}\ensuremath{\lambda}bel{T:BoundaryIffCondition}
For ($VV$) to hold in 2D it is necessary and sufficient that
\begin{align}\ensuremath{\lambda}bel{e:BoundaryCondition2D}
\bm{n}u \int_0^T \int_\Gamma \omega \, \overline{u} \cdot \BoldTau
\to 0
\text{ as } \bm{n}u \to 0.
\end{align}
\end{theorem}
\begin{proof}
Since the solution is in 2D and $f \in L^2(0, T; H) \supseteq C^1_{loc}(\ensuremath{\BB{R}}; C^1(\Omega))$, Theorem III.3.10 of \cite{T2001} gives
\begin{align}\ensuremath{\lambda}bel{e:RegTwoD}
\begin{split}
&\sqrt{t} u \in L^2(0, T; H^2(\Omega)) \cap L^\ensuremath{\infty}(0, T; V), \\
&\sqrt{t} \ensuremath{\partial}_t u \in L^2(0, T; H),
\end{split}
\end{align}
so $\omega(t)$ is defined in the sense of a trace on the boundary. This shows that the condition in \cref{e:BoundaryCondition2D} is well-defined.
For simplicity we give the argument with $f = 0$. We perform the calculations using the $d$-dimensional form of the vorticity in \cref{e:VorticityRd}, specializing to 2D only at the end. (The argument applies formally in higher dimensions; see \cref{R:BoundaryConditionInRd}.)
Subtracting ($EE$) from ($NS$), multiplying by $w = u - \overline{u}$, integrating over $\Omega$, using \cref{L:TimeDerivAndIntegration} for the time derivative, and $u(t) \in H^2(\Omega)$, $t > 0$, for the spatial integrations by parts, leads to
\begin{align}\ensuremath{\lambda}bel{e:BasicEnergyEq}
\begin{split}
\frac{1}{2} \diff{}{t} &\bm{n}orm{w}_{L^2}^2
+ \bm{n}u \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2 \\
&= - (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})
- \bm{n}u \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u}.
\end{split}
\end{align}
Now,
\begin{align*}
\begin{split}
(\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u}
&= 2 (\frac{\ensuremath{\bm{n}abla} u - (\ensuremath{\bm{n}abla} u)^T}{2} \cdot \mathbf{n})
\cdot \overline{u}
+ ((\ensuremath{\bm{n}abla} u)^T \cdot \bm{n}) \cdot \overline{u} \\
&= 2 (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}
+ ((\ensuremath{\bm{n}abla} u)^T \cdot \bm{n}) \cdot \overline{u}.
\end{split}
\end{align*}
But,
\begin{align*}
\int_\Gamma &((\ensuremath{\bm{n}abla} u)^T \cdot \bm{n}) \cdot \overline{u}
= \int_\Gamma \ensuremath{\partial}_i u^j n^j \overline{u}^i
= \frac{1}{2} \int_\Gamma \ensuremath{\partial}_i(u \cdot \bm{n}) \overline{u}^i \\
&= \frac{1}{2} \int_\Gamma \ensuremath{\bm{n}abla} (u \cdot \bm{n}) \cdot \overline{u}
= 0,
\end{align*}
since $u \cdot \bm{n} = 0$ on $\Gamma$ and $\overline{u}$ is tangent to $\Gamma$. Hence,
\begin{align}\ensuremath{\lambda}bel{e:gradunuolEq}
\int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u}
= 2 (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}
\end{align}
and
\begin{align*}
\frac{1}{2} \diff{}{t} &\bm{n}orm{w}_{L^2}^2
+ \bm{n}u \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2 \\
&= - (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})
- 2 \bm{n}u \int_\Gamma (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}.
\end{align*}
By virtue of \cref{L:TimeDerivAndIntegration}, we can integrate over time to give
\begin{align}\ensuremath{\lambda}bel{e:VVArg}
\begin{split}
&\bm{n}orm{w(T)}_{L^2}^2
+ 2 \bm{n}u \int_0^T \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2
= - 2 \int_0^T (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ 2 \bm{n}u \int_0^T (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u}) \\
&\qquad - 2 \bm{n}u \int_0^T \int_\Gamma (\omega(u)
\cdot \mathbf{n}) \cdot \overline{u}.
\end{split}
\end{align}
In two dimensions, we have (see (4.2) of \cite{KNavier})
\begin{align}\ensuremath{\lambda}bel{e:gradunomega}
(\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u}
= ((\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \BoldTau) (\overline{u} \cdot \BoldTau)
= \omega(u) \, \overline{u} \cdot \BoldTau,
\end{align}
and \cref{e:VVArg} can be written
\begin{align}\ensuremath{\lambda}bel{e:VVArg2D}
\begin{split}
&\bm{n}orm{w(T)}_{L^2}^2
+ 2 \bm{n}u \int_0^T \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2
= - 2 \int_0^T (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ 2 \bm{n}u \int_0^T (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u}) \\
&\qquad - \bm{n}u \int_0^T \int_\Gamma \omega(u) \, \overline{u} \cdot \BoldTau.
\end{split}
\end{align}
The sufficiency of \refE{BoundaryCondition2D} for the vanishing viscosity limit ($VV$) to hold (and hence for the other conditions in \cref{T:VVEquiv} to hold) follows from the bounds,
\begin{align*}
\abs{(w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)}
&\le \bm{n}orm{\ensuremath{\bm{n}abla} \overline{u}}_{L^\ensuremath{\infty}([0, T] \times \Omega)}
\bm{n}orm{w}_{L^2}^2
\le C \bm{n}orm{w}_{L^2}^2, \\
\bm{n}u \int_0^T \abs{(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})}
&\le \sqrt{\bm{n}u} \bm{n}orm{\ensuremath{\bm{n}abla} \overline{u}}_{L^2([0, T] \times \Omega)}
\sqrt{\bm{n}u} \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2([0, T] \times \Omega)}
\le C \sqrt{\bm{n}u},
\end{align*}
and Gronwall's inequality.
Proving the necessity of \refE{BoundaryCondition2D} is just as easy. Assume that $(VV)$ holds, so that $\bm{n}orm{w}_{L^\ensuremath{\infty}([0, T]; L^2(\Omega))} \to 0$. Then by the two inequalities above, the first two terms on the right-hand side of \refE{VVArg2D} vanish with the viscosity as does the first term on the left-hand side. The second term on the left-hand side vanishes as proven in \cite{Kato1983} (it follows from a simple argument using the energy equalities for ($NS$) and ($E$)). It follows that, of necessity, \refE{BoundaryCondition2D} holds.
\Ignore{
The reason this argument is formal is twofold. First, $w$ is not a valid test function in the weak formulation of the Navier-Stokes equations because it does not vanish on the boundary and because it varies in time. Beyond time zero the solution has as much regularity as the boundary allows \ToDo{But only up to a finite time; this is a factor to deal with}, so this is a problem only when trying to reach a conclusion after integrating in time down to time zero. This is the second reason the argument is formal: in obtaining \refE{VVArg} we act as though $w$ is strongly continuous in time down to time zero. This is true in 2D, where this part of the argument is not formal, but only weak continuity is known in higher dimensions. (This is also the reason we need assume no additional regularity for the initial velocity in 2D.)
To get around these difficulties, we derive \refE{VVArg} rigorously.
Choose a sequence $(h_n)$ of nonnegative functions in $C_0^\ensuremath{\infty}((0, T])$ such that $h_n \equiv 1$ on the interval $[n^{-1}, T]$ with $h_n$ strictly increasing on $[0, n^{-1}]$. Then $h'_n = g_n \ge 0$ with $g_n \equiv 0$ on $[n^{-1}, T]$. Observe that $\smallnorm{g_n}_{L^1([0, T])} = 1$.
Letting $w = u - \overline{u}$ as before, because $h_n w$ vanishes at time zero we can legitimately subtract ($EE$) from ($NS$), multiply by $h_n w$, and integrate over $\Omega$ to obtain, in place of \refE{BasicEnergyEq},
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
\begin{split}
\frac{1}{2} \diff{}{t} &\smallnorm{h_n^{1/2} w}_{L^2}^2
- \frac{1}{2} \int_\Omega h_n'(t) \abs{w}^2
+ \bm{n}u (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} (h_n u)) \\
&= - (w \cdot \ensuremath{\bm{n}abla} \overline{u}, h_n w)
- (u \cdot \ensuremath{\bm{n}abla} w, h_n w)
+ \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} (h_n \overline{u})) \\
&\qquad\qquad
- \bm{n}u \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n})
\cdot (h_n \overline{u}) \\
&= - (w \cdot \ensuremath{\bm{n}abla} \overline{u}, h_n w)
+ \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} (h_n \overline{u}))
- \bm{n}u \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n})
\cdot (h_n \overline{u}),
\end{split}
\end{align*}
\endgroup
since $(u \cdot \ensuremath{\bm{n}abla} w, h_n w) = h_n(u \cdot \ensuremath{\bm{n}abla} w, w) = 0$.
Integrating in time gives
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
&\smallnorm{w(T)}_{L^2}^2
- \smallnorm{h_n^{1/2} w(0)}_{L^2}^2
- \int_0^T \int_\Omega h_n' \abs{w}^2
+ 2 \bm{n}u \int_0^T (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} (h_n u)) \\
&\qquad
= - 2 \int_0^T (w \cdot \ensuremath{\bm{n}abla} \overline{u}, h_n w)
+ 2 \int_0^T \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} (h_n \overline{u})) \\
&\qquad\qquad\qquad\qquad
- 2 \int_0^T \bm{n}u \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n})
\cdot (h_n \overline{u}).
\end{align*}
\endgroup
The second term on the left hand side vanishes because $h_n(0) = 0$. For the four terms containing $h_n$ without derivative, the $h_n$ becomes $1$ in the limit as $n \to \ensuremath{\infty}$. This leaves the one term containing $h_n'$.
Now, $\overline{u}(t)$ is continuous in $H$ and in 2D $u(t)$ is also continuous in $H$. Hence, in 2D $w(t)$ is continuous in $H$. In 3D if we assume that $u_0 \in V$ then $u(t)$ is continuous in $H$ (in fact, in $V$) up to some finite time, $T^* > 0$. Hence, in 3D, $w(t)$ is continuous in $H$ on $[0, T^*)$; $T^*$ may depend on $\bm{n}u$, but we will take $n$ to 0 before taking $\bm{n}u$ to $0$, so this will not matter. Hence, $F(s) = \bm{n}orm{w(s)}^2$ is continuous on $[0, T^*)$ with $T^* = T$ in 2D, so
\ToDo{Does the $0 \le$ really hold? I don't think so.}
\begin{align*}
0
&\le \lim_{n \to \ensuremath{\infty}} \int_0^T \int_\Omega h_n' \abs{w}^2
= \lim_{n \to \ensuremath{\infty}} \int_0^T g_n(s) F(s) \, ds \\
&= \lim_{n \to \ensuremath{\infty}} \int_0^{\frac{1}{n}} g_n(s) F(s) \, ds
\le \bm{n}orm{g_n}_{L^1} \bm{n}orm{F}_{L^\ensuremath{\infty} \pr{0, \frac{1}{n}}}
= 0.
\end{align*}
This gives us \refE{VVArg}.
}
\end{proof}
\begin{remark}\ensuremath{\lambda}bel{R:ROC}
It follows from the proof of \refT{BoundaryIffCondition} that in 2D,
\begin{align*}
\bm{n}orm{u(t) - \overline{u}(t)}
\le C \brac{\bm{n}u^{\frac{1}{4}}
+ \abs{\bm{n}u \int_0^T \int_\Gamma \omega \, \overline{u}
\cdot \BoldTau}^{\frac{1}{2}}} e^{C t}.
\end{align*}
Suppose that $\overline{u}_0$ is smooth enough that
$\Delta \overline{u} \in L^\ensuremath{\infty}([0, T] \times \Omega)$.
Then before integrating to obtain \cref{e:BasicEnergyEq} we
can replace the term $\bm{n}u (\Delta u, w)$ with
$\bm{n}u (\Delta w, w) + \bm{n}u (\Delta \overline{u}, w)$.
Integrating by parts gives
\begin{align*}
\bm{n}u (\Delta w, w)
= \bm{n}u \bm{n}orm{\ensuremath{\bm{n}abla} w}_{L^2}^2,
\end{align*}
and we also have,
\begin{align*}
\bm{n}u (\Delta \overline{u}, w)
\le \bm{n}u \bm{n}orm{\Delta \overline{u}}_{L^2} \bm{n}orm{w}_{L^2}
\le \frac{\bm{n}u^2}{2} \bm{n}orm{\Delta \overline{u}}_{L^2}^2
+ \frac{1}{2} \bm{n}orm{w}_{L^2}^2.
\end{align*}
This leads to the bound,
\begin{align*}
\bm{n}orm{u(t) - \overline{u}(t)}_{L^2}
\le C \brac{\bm{n}u
+ \abs{\bm{n}u \int_0^T \int_\Gamma \omega \, \overline{u}
\cdot \BoldTau}^{\frac{1}{2}}} e^{C t}
\end{align*}
(and also $\bm{n}orm{u - \overline{u}}_{L^2(0, T; H^1)} \le C \bm{n}u^{1/2} e^{Ct}$).
Thus, the bound we obtain on the rate of convergence in $\bm{n}u$ is never better
than $O(\bm{n}u^{1/4})$
unless the initial data is smooth enough, in which case it is never better
than $O(\bm{n}u)$. In any case, only in exceptional circumstances would the rate
not be determined by the integral coming from the boundary term.
\end{remark}
\begin{remark}\ensuremath{\lambda}bel{R:BoundaryConditionInRd}
Formally, the argument in the proof of \cref{T:BoundaryIffCondition} would give in any dimension the condition
\begin{align*}
\bm{n}u \int_0^T \int_\Gamma (\omega(u) \cdot \mathbf{n})
\cdot \overline{u}
\to 0
\text{ as } \bm{n}u \to 0.
\end{align*}
In 3D, one has $\omega(u) \cdot \bm{n} = (1/2) \vec{\omega} \times \bm{n}$, so the condition could be written
\begin{align*}
\bm{n}u \int_0^T \int_\Gamma (\vec{\omega} \times \bm{n})
\cdot \overline{u}
= \bm{n}u \int_0^T \int_\Gamma \vec{\omega} \cdot
(\overline{u} \times \bm{n})
\to 0
\text{ as } \bm{n}u \to 0,
\end{align*}
where $\vec{\omega}$ is the 3-vector form of the curl of $u$. We can only be assured, however, that $u(t) \in V$ for all $t > 0$, which is insufficient to define $\vec{\omega}$ on the boundary. (The normal component could be defined, though, since both $\vec{\omega}(t)$ and $\dv \vec{\omega}(t) = 0$ lie in $L^2$.) Even assuming more compatible initial data in 3D, such as $u_0 \in V$, we can only conclude that $u(t) \in H^2$ for a short time, with that time decreasing to $0$ as $\bm{n}u \to 0$ (in the presence of forcing; see, for instance, Theorem 9.9.4 of \cite{FoiasConstantin1988}).
\end{remark}
\Ignore{
\begin{remark}\ensuremath{\lambda}bel{R:BoundaryCondition2DRd}
Since $\overline{u} \times \bm{n}$ is a tangent vector, the second form of the condition in \refE{BoundaryCondition3D} shows that it is only the tangential components of $\vec{\omega}$ that matter in this condition. More specifically, only the tangential component perpendicular to $\overline{u}$ matters.
\end{remark}
}
There is nothing deep about the condition in \refE{BoundaryCondition2D}, but what it says is that there are two mechanisms by which the vanishing viscosity limit can hold: Either the blowup of $\omega$ on the boundary happens slowly enough that
\begin{align}\ensuremath{\lambda}bel{e:nuL1Bound}
\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}
\to 0
\text{ as } \bm{n}u \to 0
\end{align}
or the vorticity for ($NS$) is generated on the boundary in such a way as to oppose the sign of $\overline{u} \cdot \BoldTau$. (This latter line of reasoning is followed in \cite{CKV2014}, leading to a new condition in a boundary layer slightly thicker than that of Kato.) In the second case, it could well be that vorticity for $(NS)$ blows up fast enough that \refE{nuL1Bound} does not hold, but cancellation in the integral in \refE{BoundaryCondition2D} allows that condition to hold.
\begin{lemma}\ensuremath{\lambda}bel{L:TimeDerivAndIntegration}
Assume that $v \in L^\ensuremath{\infty}(0, T; V)$ with $\ensuremath{\partial}_t v \in L^2(0, T; V')$ as well as
$\sqrt{t} \ensuremath{\partial}_t v \in L^2(0, T; H)$.
Then $v \in C([0, T]; H)$,
\begin{align*}
\frac{1}{2} \diff{}{t} \bm{n}orm{v}_{L^2}^2
= (\ensuremath{\partial}_t v, v) \text{ in } \ensuremath{\BB{C}}al{D}'((0, T))
\text{ with } \sqrt{t} (\ensuremath{\partial}_t v, v) \in L^1(0, T),
\end{align*}
and
\begin{align*}
\int_0^T \diff{}{t} \bm{n}orm{v(t)}_{L^2}^2 \, dt
= \bm{n}orm{v(T)}_{L^2}^2 - \bm{n}orm{v(0)}_{L^2}^2.
\end{align*}
\end{lemma}
\begin{proof}
Having $v \in L^2(0, T; V)$ with $\ensuremath{\partial}_t v \in L^2(0, T; V')$ is enough to conclude that $(\ensuremath{\partial}_t v, v) = (1/2) (d/dt) \bm{n}orm{v}_{L^2}^2$ in $\ensuremath{\BB{C}}al{D}'((0, T))$ and $v \in C([0, T]; H)$ (see Lemma III.1.2 of \cite{T2001}).
Let $T_0 \in (0, T)$. Our stronger assumptions also give $(d/dt) \bm{n}orm{v}_{L^2}^2 = 2(\ensuremath{\partial}_t v, v) \in L^1(T_0, T)$. Hence, by the fundamental theorem of calculus for Lebesgue integration (Theorem 3.35 of \cite{Folland1999}) it follows that
\begin{align*}
\int_{T_0}^T \diff{}{t} \bm{n}orm{v}_{L^2}^2 \, dt
= \bm{n}orm{v(T)}_{L^2}^2 - \bm{n}orm{v(T_0)}_{L^2}^2.
\end{align*}
But $v$ is continuous in $H$ down to time zero, so taking $T_0$ to 0 completes the proof.
\end{proof}
\section{Examples where the 2D boundary condition holds}\ensuremath{\lambda}bel{S:Examples}
\bm{n}oindent All examples where the vanishing viscosity limit is known to hold have some kind of symmetry---in geometry of the domain or the initial data---or have some degree of analyticity.
Since \refE{BoundaryCondition2D} is a necessary condition, it holds for all of these examples. But though it is also a sufficient condition, it is not always practicable to apply it to establish the limit. We give here examples in which it is practicable. This includes all known 2D examples having symmetry. In all explicit cases, the initial data is a stationary solution to the Euler equations.
\Example{1} Let $\overline{u}$ be any solution to the Euler equations for which $\overline{u} = 0$ on the boundary. The integral in \refE{BoundaryCondition2D} then vanishes for all $\bm{n}u$. From \refR{ROC}, the rate of convergence (here, and below, in $\bm{n}u$) is $C \bm{n}u^{1/4}$ or, for smoother initial data, $C \bm{n}u$.
\Example{1a} Example 1 is not explicit, since we immediately encounter the question of what (nonzero) examples of such steady solutions there are. As a first example, let $D$ be the disk of radius $R > 0$ centered at the origin and let $\omega_0 \in L^\ensuremath{\infty}(D)$ be radially symmetric. Then the associated velocity field, $u_0$, is given by the Biot-Savart law. By exploiting the radial symmetry, $u_0$ can be written,
\begin{align}\ensuremath{\lambda}bel{e:u0Circular}
u_0(x)
&= \frac{x^\perp}{\abs{x}^2}
\int_0^{\abs{x}} \omega_0(r) r \, dr, \quad
\end{align}
where $B({\abs{x}})$ is the ball of radius $\abs{x}$ centered at the origin and where we abuse notation a bit in the writing of $\omega_0(r)$. Since $u_0$ is perpendicular to $\ensuremath{\bm{n}abla} u_0$ it follows from the vorticity form of the Euler equations that $\overline{u} \equiv u_0$ is a stationary solution to the Euler equations.
Now assume that the total mass of vorticity,
\begin{align}\ensuremath{\lambda}bel{e:m}
m := \int_{\ensuremath{\BB{R}}^2} \omega_0,
\end{align}
is zero. We see from \refE{u0Circular} that on $\Gamma$,
$u_0 = m x^\perp R^{-1} = 0$,
giving a steady solution to the Euler equations with velocity vanishing on the boundary.
(Note that $m = 0$ is equivalent to $u_0$ lying in the space $V$ of divergence-free vector fields vanishing on the boundary.)
\Example{1b} Let $\omega_0 \in L^1 \cap L^\ensuremath{\infty}(\ensuremath{\BB{R}}^2)$ be a compactly supported radially symmetric initial vorticity for which the total mass of vorticity vanishes; that is, $m = 0$. Then the expression for $u_0$ in \refE{u0Circular}, which continues to hold throughout all of $\ensuremath{\BB{R}}^2$, shows that $u_0$ vanishes outside of the support of its vorticity.
If we now restrict such a radially symmetric $\omega_0$ so that its support lies inside a domain (even allowing the support of $\omega_0$ to touch the boundary of the domain) then the velocity $u_0$ will vanish on the boundary. In particular, $u_0 \cdot \bm{n} = 0$ so, in fact, $u_0$ is a stationary solution to the Euler equations in the domain, being already one in the whole plane. In fact, one can use a superposition of such radially symmetric vorticities, as long as their supports do not overlap, and one will still have a stationary solution to the Euler equations whose velocity vanishes on the boundary.
Such a superposition is called a \textit{superposition of confined eddies} in \cite{FLZ1999A}, where their properties in the full plane, for lower regularity than we are considering, are analyzed. These superpositions provide a fairly wide variety of examples in which the vanishing viscosity limit holds. It might be interesting to investigate the precise manner in which the vorticity converges in the vanishing viscosity limit; that is, whether it is possible to do better than the ``vortex sheet''-convergence in condition $(E_2)$ of \cite{K2008VVV}.
In \cite{Maekawa2013}, Maekawa considers initial vorticity supported away from the boundary in a half-plane. We note that the analogous result in a disk, even were it shown to hold, would not cover this Example 1b when the support of the vorticity touches the boundary.
\Example{2 [2D shear flow]} Let $\phi$ solve the heat equation,
\begin{align}\ensuremath{\lambda}bel{e:HeatShear}
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t \phi(t, z) = \bm{n}u \ensuremath{\partial}_{zz} \phi(t, z)
& \text{on } [0, \ensuremath{\infty}) \times [0, \ensuremath{\infty}), \\
\phi(t, 0) = 0
& \text{ for all } t > 0, \\
\phi(0) = \phi_0. &
\end{array}
\right.
\end{align}
Assume for simplicity that $\phi_0 \in W^{1, \ensuremath{\infty}}((0, \ensuremath{\infty})$.
Let $u_0 = (\phi_0, 0)$ and $u(t, x) = (\phi(t, x_2), 0)$.
Let $\Omega = [-L, L] \times (0, \ensuremath{\infty})$ be periodic in the $x_1$-direction. Then $u_0 \cdot \bm{n} = 0$ and $u(t) = 0$ for all $t > 0$ on $\ensuremath{\partial} \Omega$ and
\begin{align*}
\ensuremath{\partial}_t u(t, x)
&= \bm{n}u(\ensuremath{\partial}_{x_2 x_2} \phi(t, x_2), 0)
= \bm{n}u \Delta u(t, x), \\
(u \cdot \ensuremath{\bm{n}abla} u)(t, x)
&=
\matrix{\ensuremath{\partial}_1 u^1 & \ensuremath{\partial}_1 u^2}
{\ensuremath{\partial}_2 u^1 & \ensuremath{\partial}_2 u^2}
\matrix{u^1}{u^2}
=
\matrix{0 & 0}{\ensuremath{\partial}_2 \phi(t, x_2) & 0}
\matrix{\phi(t, x_2)}{0} \\
&=
\matrix{0}{\ensuremath{\partial}_2 \phi(t, x_2) \phi(t, x_2)}
=
\frac{1}{2} \ensuremath{\bm{n}abla} \phi(t, x_2).
\end{align*}
It follows that $u$ solves the Navier-Stokes equations on $\Omega$ with pressure, $p = - \frac{1}{2} \phi(t, x_2)$.
Similarly, letting $\overline{u} \equiv u_0$, we have $\ensuremath{\partial}_t \overline{u} = 0$, $\overline{u} \cdot \ensuremath{\bm{n}abla} \overline{u} = \frac{1}{2} \ensuremath{\bm{n}abla} \phi_0$ so $\overline{u} \equiv u_0$ is a stationary solution to the Euler equations.
Now, $\omega = \ensuremath{\partial}_1 u^2 - \ensuremath{\partial}_2 u^1 = - \ensuremath{\partial}_2 \phi(t, x_2)$ so
\begin{align*}
\int_\Gamma \omega \, \overline{u} \cdot \BoldTau
&= - \int_\Gamma \ensuremath{\partial}_2 \phi(t, x_2)|_{x_2 = 0}
\phi_0(0)
= - \phi_0(0)\int_{-L}^L
\ensuremath{\partial}_{x_2} \phi(t, x_2)|_{x_2 = 0} \, d x_1 \\
&= -L \phi_0(0) \ensuremath{\partial}_{x_2} \phi(t, x_2)|_{x_2 = 0}.
\end{align*}
The explicit solution to \refE{HeatShear} is
\begin{align*}
\phi(t, z)
&= \frac{1}{\sqrt{4 \pi \bm{n}u t}}
\int_0^\ensuremath{\infty} \brac{e^{-\frac{(z - y)^2}{4 \bm{n}u t}}
- e^{-\frac{(z + y)^2}{4 \bm{n}u t}}} \phi_0(y) \, dy
\end{align*}
(see, for instance, Section 3.1 of \cite{StraussPDE}). Thus,
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
\ensuremath{\partial}_z \phi(t, z)|_{z = 0}
&= -\frac{2}{4 \bm{n}u t \sqrt{4 \pi \bm{n}u t}}
\int_0^\ensuremath{\infty} y \brac{e^{-\frac{y^2}{4 \bm{n}u t}}
+ e^{-\frac{y^2}{4 \bm{n}u t}}} \phi_0(y) \, dy \\
&= -\frac{1}{\bm{n}u t \sqrt{4 \pi \bm{n}u t}}
\int_0^\ensuremath{\infty} y e^{-\frac{y^2}{4 \bm{n}u t}} \phi_0(y) \, dy
\\
&= -\frac{1}{\bm{n}u t \sqrt{4 \pi \bm{n}u t}}
\int_0^\ensuremath{\infty} (- 2 \bm{n}u t) \diff{}{y}
e^{-\frac{y^2}{4 \bm{n}u t}} \phi_0(y) \, dy \\
&= -\frac{1}{\sqrt{\pi \bm{n}u t}}
\int_0^\ensuremath{\infty} \diff{}{y}
e^{-\frac{y^2}{4 \bm{n}u t}} \, \phi_0(y) \, dy \\
&= \frac{1}{\sqrt{\pi \bm{n}u t}}
\int_0^\ensuremath{\infty}
e^{-\frac{y^2}{4 \bm{n}u t}} \phi_0'(y) \, dy
\end{align*}
\endgroup
so that
\begin{align*}
\abs{\ensuremath{\partial}_{x_2} \phi(t, x_2)|_{x_2 = 0}}
\le \frac{C}{\sqrt{\bm{n}u t}}.
\end{align*}
We conclude that
\begin{align*}
\abs{\bm{n}u \int_0^T \int_\Gamma \omega \, \overline{u} \cdot \BoldTau}
\le C \sqrt{\bm{n}u} \int_0^T t^{-1/2} \, dt
= C \sqrt{\bm{n}u T}.
\end{align*}
The condition in \refE{BoundaryCondition2D} thus holds (as does \cref{e:nuL1Bound}). From \refR{ROC}, the rate of convergence is $C \bm{n}u^{\frac{1}{4}}$ (even for smoother initial data).
\Example{3} Consider Example 1a of radially symmetric vorticity in the unit disk, but without the assumption that $m$ given by \refE{m} vanishes. This example goes back at least to Matsui in \cite{Matsui1994}. The convergence also follows from the sufficiency of the Kato-like conditions established in \cite{TW1998}, as pointed out in \cite{W2001}. A more general convergence result in which the disk is allowed to impulsively rotate for all time appears in \cite{FLMT2008}. A simple argument to show that the vanishing viscosity limit holds is given in Theorem 6.1 \cite{K2006Disk}, though without a rate of convergence. Here we prove it with a rate of convergence by showing that the condition in \refE{BoundaryCondition2D} holds.
Because the nonlinear term disappears, the vorticity satisfies the heat equation, though with Dirichlet boundary conditions not on the vorticity but on the velocity:
\begin{align}\ensuremath{\lambda}bel{e:RadialHeat}
\left\{
\begin{array}{rl}
\ensuremath{\partial}_t \omega = \bm{n}u \Delta \omega
& \text{in } \Omega, \\
u = 0
& \text{on } \Gamma.
\end{array}
\right.
\end{align}
Unless $u_0 \in V$, however, $\omega \bm{n}otin C([0, T]; L^2)$, so we cannot easily make sense of the initial condition this way.
An orthonormal basis of eigenfunctions satisfying these boundary conditions is
\begin{align*}
u_k(r, \theta)
&= \frac{J_1(j_{1k} r)}{\pi^{1/2}\abs{J_0(j_{1k})}}
\ensuremath{\widehat}{e}_\theta,
\quad
\omega_k(r, \theta)
= \frac{j_{1k} J_0(j_{1k} r)}{\pi^{1/2}\abs{J_0(j_{1k})}},
\end{align*}
where $J_0$, $J_1$ are Bessel functions of the first kind and $j_{1k}$ is the $k$-th positive root of $J_1(x) = 0$.
(See \cite{K2006Disk} or \cite{LR2002}.) The $(u_k)$ are complete in $H$ and in $V$ and are normalized so that\footnote{This differs from the normalization in \cite{K2006Disk}, where $\bm{n}orm{u_k}_H = j_{1k}^{-1}$, $\bm{n}orm{\omega_k}_{L^2} = 1$.}
\begin{align*}
\bm{n}orm{u_k}_H = 1,
\quad
\bm{n}orm{\omega_k}_{L^2} = j_{1k}.
\end{align*}
Assume that $u_0 \in H \cap H^1$. Then
\begin{align*}
u_0 = \sum_{k = 1}^\ensuremath{\infty} a_k u_k,
\quad
\smallnorm{u_0}_H^2
= \sum_{k = 1}^\ensuremath{\infty} a_k^2
< \ensuremath{\infty}.
\end{align*}
(But,
\begin{align*}
\smallnorm{u_0}_V^2
= \sum_{k = 1}^\ensuremath{\infty} a_k^2 j_{1k}^2
= \ensuremath{\infty}
\end{align*}
unless $u_0 \in V$.) We claim that
\begin{align*}
u(t)
= \sum_{k = 1}^\ensuremath{\infty} a_k e^{- \bm{n}u j_{1k}^2 t} u_k
\end{align*}
provides a solution to the Navier-Stokes equations, ($NS$). To see this, first observe that $u \in C([0, T]; H)$, so $u(0) = u_0$ makes sense as an initial condition. Also, $u(t) \in V$ for all $t > 0$. Next observe that
\begin{align*}
\omega(t)
:= \omega(u(t))
= \sum_{k = 1}^\ensuremath{\infty} a_k e^{- \bm{n}u j_{1k}^2 t} \omega_k
\end{align*}
for all $t > 0$, this sum converging in $H^n$ for all $n \ge 0$. Since each term satisfies \cref{e:RadialHeat} so does the sum. Taken together, this shows that $\omega$ satisfies \cref{e:RadialHeat} and thus $u$ solves ($NS$).
\Ignore{
\begin{align*}
\sum_{k = 1}^\ensuremath{\infty}
a_k^2 e^{- 2 \bm{n}u j_{1k}^2 t} \bm{n}orm{\omega_k}_{L^2}^2
=
\sum_{k = 1}^\ensuremath{\infty}
a_k^2 j_{1k} e^{- 2 \bm{n}u j_{1k}^2 t}
< \ensuremath{\infty}
\end{align*}
for all $t > 0$
}
The condition in \refE{BoundaryCondition2D} becomes
\begin{align*}
\bm{n}u \int_0^T & \int_\Gamma \omega \, \overline{u} \cdot \BoldTau
= \bm{n}u \sum_{k = 1}^\ensuremath{\infty} \int_0^T \int_\Gamma
a_k e^{- \bm{n}u j_{1k}^2 t} \omega_k
\, \overline{u} \cdot \BoldTau \, dt \\
&= \bm{n}u \sum_{k = 1}^\ensuremath{\infty} \int_0^T
a_k e^{- \bm{n}u j_{1k}^2 t} \omega_k|_{r = 1}
\int_\Gamma \overline{u} \cdot \BoldTau \, dt\\
&= m \bm{n}u \sum_{k = 1}^\ensuremath{\infty} a_k
\frac{j_{1k} J_0(j_{1k})}
{\pi^{1/2}\abs{J_0(j_{1k})}}
\int_0^T
e^{- \bm{n}u j_{1k}^2 t} \, dt.
\end{align*}
In the final equality, we used
\begin{align*}
\int_\Gamma \overline{u} \cdot \BoldTau
= - \int_\Gamma \overline{u}^\perp \cdot \bm{n}
= - \int_\Omega \dv \overline{u}^\perp
= \int_\Omega \overline{\omega}
= m.
\end{align*}
(Because vorticity is transported by the Eulerian flow, $m$ is constant in time.)
Then,
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
&\abs{\bm{n}u \int_0^T \int_\Gamma \omega \, \overline{u} \cdot \BoldTau}
\le \abs{m} \bm{n}u \sum_{k = 1}^\ensuremath{\infty}
\frac{\abs{a_k}}{\pi^{1/2}} j_{1k}
\int_0^T
e^{- \bm{n}u j_{1k}^2 t} \, dt \\
&\qquad
= \abs{m} \bm{n}u \sum_{k = 1}^\ensuremath{\infty}
\frac{\abs{a_k}}{\pi^{1/2}} j_{1k}
\frac{1 - e^{- \bm{n}u j_{1k}^2 T}}{\bm{n}u j_{1k}^2} \\
&\qquad
\le \frac{\abs{m}}{\pi^{\frac{1}{2}}}
\pr{\sum_{k = 1}^\ensuremath{\infty} a_k^2}^{\frac{1}{2}}
\pr{\sum_{k = 1}^\ensuremath{\infty}
\frac{(1 - e^{- \bm{n}u j_{1k}^2 T})^2}{j_{1k}^2}}
^{\frac{1}{2}} \\
&\qquad
= \frac{\abs{m}}{\pi^{\frac{1}{2}}}
\smallnorm{u_0}_H
\pr{\sum_{k = 1}^\ensuremath{\infty}
\frac{(1 - e^{- \bm{n}u j_{1k}^2 T})^2}{j_{1k}^2}}
^{\frac{1}{2}}.
\end{align*}
\endgroup
Classical bounds on the zeros of Bessel functions give $1 + k < j_{1k} \le \pi(\frac{1}{2} + k)$ (see, for instance, Lemma A.3 of \cite{K2006Disk}). Hence, with $M = (\bm{n}u T)^{-\ensuremath{\alpha}}$, $\ensuremath{\alpha} > 0$ to be determined, we have
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
\sum_{k = 1}^\ensuremath{\infty}
&\frac{(1 - e^{- \bm{n}u j_{1k}^2 T})^2}{j_{1k}^2}
\le C \sum_{k = 1}^\ensuremath{\infty}
\frac{(1 - e^{- \bm{n}u k^2 T})^2}{k^2} \\
&\le (1 - e^{- \bm{n}u T})^2
+ \int_{k = 1}^M
\frac{(1 - e^{- \bm{n}u x^2 T})^2}{x^2} \, dx
+ \int_{k = M + 1}^\ensuremath{\infty}
\frac{(1 - e^{- \bm{n}u x^2 T})^2}{x^2} \, dx \\
&\le \bm{n}u^2 T^2
+ \bm{n}u^2 T^2 \int_{k = 1}^M
\frac{x^4}{x^2} \, dx
+ \int_{k = M + 1}^\ensuremath{\infty}
\frac{1}{x^2} \, dx \\
&\le \bm{n}u^2 T^2
+ \bm{n}u^2 T^2 \frac{1}{3} \pr{M^3 - 1}
+ \frac{1}{M}
\le \bm{n}u^2 T^2
+ \bm{n}u^2 T^2 M^3
+ \frac{1}{M} \\
&= \bm{n}u^2 T^2
+ \bm{n}u^2 T^2 \bm{n}u^{-3 \ensuremath{\alpha}} T^{- 3 \ensuremath{\alpha}}
+ (\bm{n}u T)^\ensuremath{\alpha}
= \bm{n}u^2 T^2
+ (\bm{n}u T)^{2 - 3 \ensuremath{\alpha}}
+ (\bm{n}u T)^\ensuremath{\alpha}
\end{align*}
\endgroup
as long as $\bm{n}u M^2 T \le 1$ (used in the third inequality); that is, as long as
\begin{align}\ensuremath{\lambda}bel{e:albetaReq}
(\bm{n}u T)^{1 - 2 \ensuremath{\alpha}} \le 1.
\end{align}
Thus \cref{e:BoundaryCondition2D} holds (as does \cref{e:nuL1Bound}), so ($VV$) holds.
The rate of convergence in ($VV$) is optimized when $(\bm{n}u T)^{2 - 3 \ensuremath{\alpha}} = (\bm{n}u T)^\ensuremath{\alpha}$, which occurs when $\ensuremath{\alpha} = \frac{1}{2}$. The condition in \refE{albetaReq} is then satisfied with equality. \refR{ROC} then gives a rate of convergence in the vanishing viscosity limit of $C \bm{n}u^{\frac{1}{4}}$ (even for smoother initial data), except in the special case $m = 0$, which we note reduces to Example 1a.
\ensuremath{\BB{R}}eturnExample{1a} Let us apply our analysis of Example 3 to the special case of Example 1a, in which $u_0 \in V$. Now, on the boundary,
\begin{align*}
(\ensuremath{\partial}_t u + u \cdot \ensuremath{\bm{n}abla} u + \ensuremath{\bm{n}abla} p) \cdot \BoldTau
= \bm{n}u \Delta u \cdot \BoldTau
= \bm{n}u \Delta u^\perp \cdot (- \bm{n})
= - \bm{n}u \ensuremath{\bm{n}abla}^\perp \omega \cdot \bm{n}.
\end{align*}
But $\ensuremath{\bm{n}abla} p \equiv 0$ so the left-hand side vanishes. Hence, the vorticity satisfies homogeneous Neumann boundary conditions for positive time. (This is an instance of Lighthill's formula.) Since the nonlinear term vanishes, in fact, $\omega$ satisfies the heat equation, $\ensuremath{\partial}_t \omega = \bm{n}u \Delta \omega$ with homogeneous Neumann boundary conditions and hence $\omega \in C([0, T]; L^2(\Omega))$.
Moreover, multiplying $\ensuremath{\partial}_t \omega = \bm{n}u \Delta \omega$ by $\omega$ and integrating gives
\begin{align*}
\bm{n}orm{\omega(t)}_{L^2}^2
+ 2 \bm{n}u \int_0^t \bm{n}orm{\ensuremath{\bm{n}abla} \omega(s)}_{L^2}^2 \, ds
= \bm{n}orm{\ensuremath{\bm{n}abla} \omega_0}_{L^2}^2.
\end{align*}
We conclude that the $L^2$-norm of $\omega$, and so the $L^p$-norms for all $p \le 2$, are bounded in time uniformly in $\bm{n}u$. (In fact, this holds for all $p \in [1, \ensuremath{\infty}]$. This conclusion is not incompatible with \refT{VorticityNotBounded}, since $\overline{u} \equiv 0$ on $\Gamma$.)
This argument for bounding the $L^p$-norms of the vorticity fails for Example 3 because the vorticity is no longer continuous in $L^2$ down to time zero unless $u_0 \in V$. It is shown in \cite{FLMT2008} (and see \cite{GKLMN14}) that such control is nonetheless obtained for the $L^1$ norm.
\section{On a result of Bardos and Titi}\ensuremath{\lambda}bel{S:BardosTiti}
\bm{n}oindent
Bardos and Titi in \cite{BardosTiti2013a, Bardos2014Private}, also starting from, essentially, \refE{VVArg}
make the observation that, in fact, for the vanishing viscosity limit to hold, it is necessary and sufficient that $\bm{n}u \omega$
(or, equivalently, $\bm{n}u [\ensuremath{\partial}_{\bm{n}} u]_{\BoldTau}$)
converge to zero on the boundary in a weak sense. In their result, the boundary is assumed to be $C^\ensuremath{\infty}$, but the initial velocity is assumed to only lie in $H$. Hence, the sufficiency condition does not follow immediately from \refE{VVArg}.
Their proof of sufficiency involves the use of dissipative solutions to the Euler equations. (The use of dissipative solutions for the Euler equations in a domain with boundaries was initiated in \cite{BardosGolsePaillard2012}. See also \cite{BSW2014}.) We present here the weaker version of their results in 2D that can be obtained without employing dissipative solutions. The simple and elegant proof of necessity is as in \cite{Bardos2014Private}, simplified further because of the higher regularity of our initial data.
\begin{theorem}[Bardos and Titi \cite{BardosTiti2013a, Bardos2014Private}]\ensuremath{\lambda}bel{T:BardosTiti}
Working in 2D, assume that $\ensuremath{\partial} \Omega$ is $C^2$ and that $\overline{u} \in C^1([0, T; C^1(\Omega))$. Then for $u \to \overline{u}$ in $L^\ensuremath{\infty}(0, T; H)$ to hold it is necessary and sufficient that
\begin{align}\ensuremath{\lambda}bel{e:BardosNecCond}
\bm{n}u \int_0^T \int_\Gamma \omega \, \varphi \to 0
\text{ as } \bm{n}u \to 0
\text{ for any } \varphi \in C^1([0, T] \times \Gamma).
\end{align}
\end{theorem}
\begin{proof}
Sufficiency of the condition follows immediately from setting $\varphi = (\overline{u} \cdot \BoldTau)|_\Gamma$ in \refT{BoundaryIffCondition}.
To prove necessity, let $\varphi \in C^1([0, T] \times \Gamma)$. We will need a divergence-free vector field $v_\delta \in C^1([0, T]; H \cap C^\ensuremath{\infty}(\Omega))$ such that $v_\delta \cdot \BoldTau = \varphi$. Moreover, we require of $v_\delta$ that it satisfy the same bounds as the boundary layer corrector of Kato in \cite{Kato1983}; in particular,
\begin{align}\ensuremath{\lambda}bel{e:vBounds}
\bm{n}orm{\ensuremath{\partial}_t v_\delta}_{L^1([0, T]; L^2(\Omega))}
\le C \delta^{1/2}, \qquad
\bm{n}orm{\ensuremath{\bm{n}abla} v_\delta}_{L^\ensuremath{\infty}([0, T]; L^2(\Omega))}
\le C \delta^{-1/2}.
\end{align}
This vector field can be constructed in several ways: we detail one such construction at the end of this proof.
The proof now proceeds very simply. We multiply the Navier-Stokes equations by $v_\delta$ and integrate over space and time to obtain
\begin{align}\ensuremath{\lambda}bel{e:BardosNec}
\begin{split}
\int_0^T (\ensuremath{\partial}_t &u, v_\delta)
+ \int_0^T (u \cdot \ensuremath{\bm{n}abla} u, v_\delta)
+ \bm{n}u \int_0^T (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} v_\delta) \\
&= \bm{n}u \int_0^T \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \bm{n})
\cdot v_\delta
= \bm{n}u \int_0^T \int_\Gamma \omega \, v_\delta
\cdot \BoldTau
= \bm{n}u \int_0^T \int_\Gamma \omega \, \varphi.
\end{split}
\end{align}
Here, we used \refE{gradunomega} with $v_\delta$ in place of $\overline{u}$, and we note that no integrations by parts were involved.
Now, assuming that the vanishing viscosity limit holds, Kato shows in \cite{Kato1983} that setting $\delta = c \bm{n}u$---and using the bounds in \refE{vBounds}---each of the terms on the left hand side of \refE{BardosNec} vanishes as $\bm{n}u \to 0$. By necessity, then, so does the right hand side, giving the necessity of the condition in \refE{BardosNecCond}.
It remains to construct $v_\delta$.
To do so, we place coordinates on a tubular neighborhood, $\Sigma$, of $\Gamma$ as in the proof of \cref{L:Trace}. In $\Sigma$, define
\begin{align*}
\psi(s, r) = - r \varphi(s).
\end{align*}
Write $\ensuremath{\widehat}{r}$, $\ensuremath{\widehat}{s}$ for the unit vectors in the directions of increasing $r$ and $s$. Then $\ensuremath{\widehat}{r} \cdot \ensuremath{\widehat}{s} = 0$ and $\ensuremath{\widehat}{r} = - \bm{n}$ on $\Gamma$. Thus, on the boundary,
\begin{align*}
\ensuremath{\bm{n}abla} \psi(s, r)
= -\varphi(s) \ensuremath{\widehat}{r} -r \varphi'(s) \ensuremath{\widehat}{s}.
\end{align*}
This gives
\begin{align*}
\ensuremath{\bm{n}abla} \psi(s) \cdot \bm{n}
= -\varphi(s) \ensuremath{\widehat}{r} \cdot \bm{n}
= \varphi(s).
\end{align*}
It also gives $\ensuremath{\bm{n}abla} \psi \in C^1([0, T]; C(\Sigma))$ so that $\psi \in \varphi \in C^1([0, T] \times \Sigma)$.
We now follow the procedure in \cite{Kato1983}. Let $\zeta: [0, \ensuremath{\infty}) \to [0, 1]$ be a smooth cutoff function with $\zeta \equiv 1$ on $[0, 1/2]$ and $\zeta \equiv 0$ on $[1, \ensuremath{\infty}]$. Define $\zeta_\delta(\cdot) = \zeta(\cdot/\delta)$ and
\begin{align*}
v_\delta(x)
= \ensuremath{\bm{n}abla}^\perp (\zeta_\delta(\dist(x, \ensuremath{\partial} \Omega)) \psi(x)).
\end{align*}
Note that $v_\delta$ is supported in a boundary layer of width proportional to $\delta$. The bounds in \refE{vBounds} follow as shown in \cite{K2006Kato}.
\end{proof}
To establish the necessity of the stronger condition in \refT{BardosTiti}, we used (based on Bardos's \cite{Bardos2014Private}) a vector field supported in a boundary layer of width $c \bm{n}u$, as in \cite{Kato1983}. We used it, however, to extend to the whole domain an arbitrary cutoff function defined on the boundary, rather than to correct the Eulerian velocity as in \cite{Kato1983}.
\begin{remark}
In this proof of \refT{BardosTiti}
the time regularity in the test functions could be weakened
slightly to assuming that
$\ensuremath{\partial}_t \varphi \in L^1([0, T]; C(\Gamma))$,
for this would still allow the first bound in
\refE{vBounds} to be obtained.
\end{remark}
\begin{remark}
Using the results of \cite{BardosTiti2013a, BSW2014} it is
possible to change the condition in \refE{BardosNecCond} to
apply to test functions $\varphi$ in
$C^1([0, T]; C^\ensuremath{\infty}(\Gamma))$ (\cite{Bardos2014Private}).
Moreover, this can be done
without assuming time or spatial regularity of the
solution to the Euler equations: only that the initial
velocity lies in $H$.
\end{remark}
\Ignore{
\section{Speculation on another condition for the VV limit}
\bm{n}oindent There is nothing deep about the condition in \refE{BoundaryCondition2D}, but what it says is that there are two mechanisms by which the vanishing viscosity limit can hold. First, the blowup of $\omega$ on the boundary can happen slowly enough that
\begin{align}\ensuremath{\lambda}bel{e:nuL1Bound}
\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}
\to 0
\text{ as } \bm{n}u \to 0
\end{align}
or, second, the vorticity for ($NS$) can be generated on the boundary in such a way as to oppose the sign of $\overline{u} \cdot \BoldTau$. In the second case, it could well be that vorticity for $(NS)$ blows up fast enough that \refE{nuL1Bound} does not hold, but cancellation in the integral in \refE{BoundaryCondition2D} allows that condition to hold.
A natural question to ask is whether the condition,
\begin{align*}
(G) \qquad
\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}
\to 0
\text{ as } \bm{n}u \to 0
\end{align*}
is equivalent to the conditions in \cref{T:VVEquiv}. The sufficiency of this condition follows immediately, since it implies that \refE{BoundaryCondition2D} holds.
To see why we might suspect that ($G$) is necessary for ($VV$) to hold, we start with the necessary and sufficient condition $(iii')$ of Theorem 1.2 of \cite{K2006Kato} that
\begin{align*}
\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^2(\Gamma_\bm{n}u)}^2
\to 0
\text{ as } \bm{n}u \to 0,
\end{align*}
where $\Gamma_\bm{n}u = \set{x \in \Omega \colon \dist(x, \Gamma) < \bm{n}u}$. For sufficiently regular $u_\bm{n}u^0$, for all $t > 0$, $\omega(t)$ will lie in $H^2(\Omega) \supseteq C(\overline{\Omega})$, and one might expect to have
\begin{align}\ensuremath{\lambda}bel{e:ApproxIntegral}
\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^2(\Gamma_\bm{n}u)}^2
&\cong \bm{n}u \int_0^T \int_0^\bm{n}u \bm{n}orm{\omega}_{L^2(\Gamma)}^2
= \bm{n}u^2 \int_0^T \bm{n}orm{\omega}_{L^2(\Gamma)}^2.
\end{align}
Then using \Holders inequality followed by Jensen's inequality,
\begin{align}\ensuremath{\lambda}bel{e:HJBound}
\pr{\frac{\bm{n}u}{T^{3/2}} \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}}^2
\le \pr{\frac{\bm{n}u}{T} \int_0^T \bm{n}orm{\omega}_{L^2(\Gamma)}}^2
\le \frac{\bm{n}u^2}{T} \int_0^T \bm{n}orm{\omega}_{L^2(\Gamma)}^2.
\end{align}
But the left-hand side of \refE{ApproxIntegral} must vanish, and so too must the left-hand side of \refE{HJBound}, implying that $(G$) holds.
The problem with this argument, however, is that the best we can say rigorously is that from \refT{BoundaryLayerWidth} and the continuity of $\omega(t)$ for all $t > 0$,
\begin{align*}
\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}^2
&= \bm{n}u \int_0^T
\lim_{\delta \to 0} \frac{1}{\delta^2} \bm{n}orm{\omega}_{L^1(\Gamma_{\delta})}^2
\le \bm{n}u \liminf_{\delta \to 0} \frac{1}{\delta^2}
\int_0^T \bm{n}orm{\omega}_{L^1(\Gamma_{\delta})}^2 \\
&\le \bm{n}u \lim_{\delta \to 0} \frac{1}{\delta^2} \frac{C \delta}{\bm{n}u}
\le \ensuremath{\infty},
\end{align*}
where in the first inequality we used Fatou's lemma.
If we could improve this inequality to show that $\bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}^2$ is $o(1/\bm{n}u)$, then using \Holders inequality followed by Jensen's inequality,
\begin{align*}
\pr{\frac{\bm{n}u}{T} \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}}^2
\le \frac{\bm{n}u^2}{T} \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}^2
\to 0 \text{ as } \bm{n}u \to 0.
\end{align*}
\Ignore{
Letting $f = \overline{\omega}$ in condition ($E_2$) of \cref{T:VVEquiv} gives
\begin{align*}
(\omega, \overline{\omega})
\to \bm{n}orm{\overline{\omega}}_{L^2}^2 - \int_\Gamma \overline{\omega} \, \overline{u} \cdot \BoldTau.
\end{align*}
But,
\begin{align*}
(\omega, \overline{\omega})
&= (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})
= - (\Delta u, \overline{u})
+ \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u},
\end{align*}
where we used Lemma 6.6 of \cite{K2008VVV} for scalar vorticity (in which the factor of 2 in that lemma does not appear).
By Equation (4.2) of \cite{KNavier},
\begin{align*}
(\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u}
= ((\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \BoldTau) (\overline{u} \cdot \BoldTau)
= \omega(u) \, \overline{u} \cdot \BoldTau.
\end{align*}
Thus,
\begin{align*}
- \bm{n}u (\Delta u, \overline{u})
+ \bm{n}u \int_\Gamma \omega(u) \, \overline{u} \cdot \BoldTau
\to \bm{n}u \bm{n}orm{\overline{\omega}}_{L^2}^2 - \bm{n}u \int_\Gamma \overline{\omega} \, \overline{u} \cdot \BoldTau.
\end{align*}
The right-hand side vanishes with $\bm{n}u$ since $\overline{u}$ is in $C^{1 + \ensuremath{\epsilon}}$, so
\begin{align*}
\bm{n}u \int_\Gamma \omega(u) \, \overline{u} \cdot \BoldTau
\to - \bm{n}u (\Delta u, \overline{u}).
\end{align*}
It remains to show that the right-hand side vanishes with $\bm{n}u$.
Now,
\begin{align*}
\bm{n}u (\Delta u, \overline{u})
= (\bm{n}u \Delta u, \overline{u})
= \bm{n}u (\ensuremath{\partial}_t u, \overline{u})
+ \bm{n}u (u \cdot \ensuremath{\bm{n}abla} u, \overline{u})
+ \bm{n}u (\ensuremath{\bm{n}abla} p, \overline{u})
\end{align*}
}
\Ignore{
We make the assumptions on the initial velocity and on the forcing in \cref{T:VVEquiv}.
\begin{theorem}
The vanishing viscosity limit holds over any finite time interval $[0, T]$ if and only if $A_\bm{n}u \to 0$ as $\bm{n}u \to 0$, where
\begin{align}\ensuremath{\lambda}bel{e:Anu}
A_\bm{n}u = \bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}.
\end{align}
Moreover,
\begin{align}\ensuremath{\lambda}bel{e:RateOfConvergence}
\bm{n}orm{u(t) - \overline{u}(t)}_{L^2}^2
\le (C\bm{n}u + C A_\bm{n}u + \smallnorm{u_\bm{n}u^0 - \overline{u}^0}_{L^2}^2)^{1/2} e^{Ct}
\end{align}
for all sufficiently small $\bm{n}u > 0$, with $C$ depending only upon the initial velocities and $T$.
\end{theorem}
\begin{proof}
Subtracting ($EE$) from ($NS$), multiplying by $w = u - \overline{u}$, and integrating over $\Omega$ leads to
\begin{align*}
\frac{1}{2} \diff{}{t} &\bm{n}orm{\omega}_{L^2}^2
+ \bm{n}u \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2
= - (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})
- \bm{n}u \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u} \\
&= - (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ \bm{n}u(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})
- \bm{n}u \int_\Gamma \omega \, \overline{u} \cdot \BoldTau.
\end{align*}
Here we used Equation (4.2) of \cite{KNavier} to conclude that
\begin{align*}
(\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \overline{u}
= ((\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot \BoldTau) (\overline{u} \cdot \BoldTau)
= \omega \, \overline{u} \cdot \BoldTau.
\end{align*}
Integrating over time gives
\begin{align*}
\frac{1}{2} &\bm{n}orm{w(t)}_{L^2}^2
+ \bm{n}u \int_0^t \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2
= \bm{n}orm{w(0)}_{L^2}^2
- \int_0^t (w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)
+ \bm{n}u \int_0^t (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u}) \\
&\qquad - \bm{n}u \int_0^t \int_\Gamma \omega \, \overline{u} \cdot \BoldTau.
\end{align*}
Using the bounds,
\begin{align*}
\abs{(w \cdot \ensuremath{\bm{n}abla} \overline{u}, w)}
&\le \bm{n}orm{\ensuremath{\bm{n}abla} \overline{u}}_{L^\ensuremath{\infty}([0, T] \times \Omega)}
\bm{n}orm{w}_{L^2}^2
\le C \bm{n}orm{w}_{L^2}^2, \\
\bm{n}u \int_0^T \abs{(\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} \overline{u})}
&\le \bm{n}u \bm{n}orm{\ensuremath{\bm{n}abla} \overline{u}}_{L^2([0, T] \times \Omega)}
\bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2([0, T] \times \Omega)} \\
&\le C \bm{n}u
\bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2([0, T] \times \Omega)} \\
&\le C \bm{n}u + \frac{\bm{n}u}{2} \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2([0, T] \times \Omega)}^2 , \\
- \bm{n}u \int_0^t \int_\Gamma \omega \, \overline{u} \cdot \BoldTau
&\le
\bm{n}u \bm{n}orm{\overline{u}}_{L^\ensuremath{\infty}} \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}
\le C \bm{n}u \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}
\end{align*}
gives
\begin{align}\ensuremath{\lambda}bel{e:VVArg}
\begin{split}
&\bm{n}orm{w(t)}_{L^2}^2
+ \bm{n}u \int_0^t \bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2}^2
\le \bm{n}orm{w(0)}_{L^2}^2
+ C \bm{n}u + C A_\bm{n}u + C \int_0^t \bm{n}orm{w}_{L^2}^2.
\end{split}
\end{align}
Applying Gronwall's inequality leads to \refE{RateOfConvergence} and shows that $A_\bm{n}u \to 0$ implies ($VV$).
\end{proof}
Let
\begin{align*}
\Gamma_\delta = \set{x \in \Omega \colon \dist(x, \Gamma) < \delta},
\end{align*}
where we always assume that $\delta > 0$ is sufficiently small that $\Gamma_\delta$ is a tubular neighborhood of $\Gamma$.
\begin{lemma}\ensuremath{\lambda}bel{L:BoundaryLayerWidth}
For any sufficiently small $\delta > 0$
\begin{align}\ensuremath{\lambda}bel{e:OmegaL1VanishGeneral}
\bm{n}orm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta}))}^2
\le C \frac{\delta}{\bm{n}u}
\end{align}
for all sufficiently small $\delta(\bm{n}u)$.
\end{lemma}
\begin{proof}
By the Cauchy-Schwarz inequality,
\begin{align*}
\bm{n}orm{\omega}_{L^1(\Gamma_{\delta})}
\le \bm{n}orm{1}_{L^2(\Gamma_{\delta})} \bm{n}orm{\omega}_{L^2(\Gamma_{\delta})}
\le C \delta^{1/2} \bm{n}orm{\omega}_{L^2(\Gamma_{\delta})}
\end{align*}
so
\begin{align*}
\bm{n}orm{\omega}_{L^1(\Gamma_{\delta})}^2
\le C \delta \bm{n}orm{\omega}_{L^2(\Gamma_{\delta})}^2
\end{align*}
and
\begin{align*}
\frac{C \bm{n}u}{\delta} \bm{n}orm{\omega}_{L^2([0, T]; L^1(\Gamma_{\delta}))}^2
\le \bm{n}u \bm{n}orm{\omega}_{L^2([0, T]; L^2(\Gamma_{\delta}))}^2.
\end{align*}
By the basic energy inequality for the Navier-Stokes equations, the right-hand side is bounded, giving \refE{OmegaL1VanishGeneral}.
\end{proof}
\begin{theorem}\ensuremath{\lambda}bel{T:Anu}
Assume that $\Gamma$ is $C^3$.
If the vanishing viscosity limit holds then $A_\bm{n}u \to 0$ as $\bm{n}u \to 0$.
\end{theorem}
\begin{proof}
Impose at first the extra regularity condition that $u_\bm{n}u^0$ lies in $H^3(\Omega)$, so that the $u(t)$ lies in $H^3(\Omega)$ for all $t > 0$. Then for all $t > 0$, $\omega(t)$ is in $H^2(\Omega)$ and hence $\omega(t)$ is continuous up to the boundary by Sobolev embedding. Thus,\begin{align}\ensuremath{\lambda}bel{e:BoundaryIntegralLimit}
\bm{n}orm{\omega(t)}_{L^1(\Gamma)}^2
= \lim_{\delta \to 0} \frac{1}{\delta^2}
\bm{n}orm{\omega(t)}_{L^1(\Gamma_{\delta})}^2.
\end{align}
It follows from Fatou's lemma that
\begin{align*}
\bm{n}u \int_0^T &\bm{n}orm{\omega(t)}_{L^1(\Gamma)}^2 \, dt
= \bm{n}u \int_0^T \lim_{\delta \to 0} \frac{1}{\delta^2}
\bm{n}orm{\omega(t)}_{L^1(\Gamma_{\delta})}^2 \, dt\\
&= \bm{n}u \int_0^T \liminf_{\delta \to 0} \frac{1}{\delta^2}
\bm{n}orm{\omega(t)}_{L^1(\Gamma_{\delta})}^2 \, dt
\le \bm{n}u \liminf_{\delta \to 0} \int_0^T
\frac{1}{\delta^2} \bm{n}orm{\omega(t)}_{L^1(\Gamma_{\delta})}^2 \, dt\\
&\le \bm{n}u \liminf_{\delta \to 0} \frac{1}{\delta^2} C \frac{\delta}{\bm{n}u}
= \liminf_{\delta \to 0} \frac{C}{\delta}.
\end{align*}
In the last inequality we used \refL{BoundaryLayerWidth}. \textbf{Of course, this is BAD!!!}
Using \Holders and Jensen's inequalities it follows that
\begin{align*}
\pr{\frac{\bm{n}u}{T} \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}}^2
\le \frac{\bm{n}u^2}{T} \int_0^T \bm{n}orm{\omega}_{L^1(\Gamma)}^2
\le \frac{C \bm{n}u}{T},
\end{align*}
completing the proof.
\end{proof}
\begin{remark}
In higher dimensions, we could attempt the same argument using $\ensuremath{\bm{n}abla} u$ in place of $\omega$. A problem remains, though, in that we cannot conclude that $\ensuremath{\bm{n}abla} u$ has sufficient space regularity over a finite time interval independent of the viscosity so that $\omega(t)$ is continuous. Weak solutions do have sufficient regularity so that the left-hand side of \refE{BoundaryIntegralLimit} (with $\ensuremath{\bm{n}abla} u$ in place of $\omega$) makes sense, but there is no reason to suppose that equality with the right-hand side holds.
\end{remark}
}
}
\Ignore{
\section{An alternate derivation of Kato's conditions}\ensuremath{\lambda}bel{S:AlternateDerivation}
\bm{n}oindent The argument that led to \refE{VVArg} in the proof of \refT{BoundaryIffCondition} is perhaps the first calculation that anyone who ever attempts to establish the vanishing viscosity limit makes. It is simple, direct, and natural. Because we were working in 2D it was easy to make the argument rigorous, but the essential idea is contained in the formal argument.
Kato's introduction of a boundary layer corrector, on the other hand, handles the rigorous proof of the necessity and sufficiency of his conditions in higher dimensions while at the same time striving to give the motivation for those very conditions. In this way, it obscures to some extent the nature of the argument, and appears somewhat unmotivated. That is to say, one can follow the technical details easily enough, but it is hard to see what the plan is at the outset. (Kato uses the energy inequality for the Navier-Stokes equations in a way that avoids treating $w = u - \overline{u}$ as though it were a test function for the Navier-Stokes equations. This now classical technique is clearly explained in Section 2.2 of \cite{IftimieLopeses2009}.)
We give a different derivation below, which starts with \refT{BoundaryIffCondition}. We give the formal argument, which is rigorous in two dimensions if we pay more attention to the regularity of the solutions.
\begin{theorem}
The condition in \cref{e:KellCondition} is necessary and sufficient for
($VV$) to hold.
\end{theorem}
\begin{proof}
Let $v$ be the boundary layer velocity defined by Kato in \cite{Kato1983}, where $\delta = c \bm{n}u$: so $v$ is divergence-free, vanishes outside of $\Gamma_{c \bm{n}u}$, and $v = \overline{u}$ on $\Gamma$. (In all that follows, one can also refer to \cite{K2006Kato}, which gives Kato's argument using (almost) his same notation.) Since $v = \overline{u}$ on $\Gamma$, by \refT{BoundaryIffCondition}, and using \cref{e:gradunomega}, ($VV$) holds if and only if
\begin{align*}
\bm{n}u \int_0^T \int_\Gamma (\ensuremath{\bm{n}abla} u \cdot \mathbf{n}) \cdot v
= \int_0^T (\bm{n}u \Delta u, v) + \bm{n}u \int_0^T (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} v)
\to 0
\end{align*}
as $\bm{n}u \to 0$.
Using Lemma A.2 of \cite{K2006Kato},
\begin{align*}
\bm{n}u \int_0^T (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} v)
&= 2 \bm{n}u \int_0^T (\omega(u), \omega(v))
\le 2 \bm{n}u \int_0^T \bm{n}orm{\omega(u)}_{L^2(\Gamma_{c \bm{n}u})} \bm{n}orm{\omega(v)}_{L^2} \\
&\le \sqrt{\bm{n}u} \bm{n}orm{\ensuremath{\bm{n}abla} v}_{L^2([0, T] \times \Omega)}
\sqrt{\bm{n}u} \bm{n}orm{\omega(u)}_{L^2([0, T] \times \Gamma_{c \bm{n}u})} \\
&\le C \pr{\bm{n}u \int_0^T \bm{n}orm{\omega(u)}_{L^2(\Gamma_{c \bm{n}u})}^2}^{1/2},
\end{align*}
since $\bm{n}orm{\ensuremath{\bm{n}abla} v}_{L^2([0, T] \times \Omega)} \le C \bm{n}u^{-1/2}$.
Also,
\begin{align*}
\int_0^T (\bm{n}u \Delta u, v)
= \int_0^T \brac{(\ensuremath{\partial}_t u, v) + (u \cdot \ensuremath{\bm{n}abla} u, v) + (\ensuremath{\bm{n}abla} p, v) - (f, v)}.
\end{align*}
The integral involving the pressure disappears, while
\begin{align*}
\int_0^T \abs{(f, v)}
\le C \bm{n}u^{1/2} \int_0^T \bm{n}orm{f}_{L^2(\Gamma_{c \bm{n}u})},
\end{align*}
using the bound on $\bm{n}orm{v}_{L^\ensuremath{\infty}([0, T]; L^2)}$ in \cite{Kato1983} (Equation (3.1) of \cite{K2006Kato}). This vanishes with the viscosity since $f$ lies in $L^1([0, T]; L^2)$.
The integral involving $(u \cdot \ensuremath{\bm{n}abla} u, v)$ we bound the same way as in \cite{K2006Kato}. Using Lemma A.4 of \cite{K2006Kato},
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
&\abs{\int_0^t (u \cdot \ensuremath{\bm{n}abla} u, v)}
= 2 \abs{\int_0^t (v, u \cdot \omega(u))}
\\
&\qquad
\le 2 \bm{n}orm{v}_{L^\ensuremath{\infty}([0, T] \times \Omega)}
\int_0^t \bm{n}orm{u}_{L^2(\Gamma_{c \bm{n}u})}
\bm{n}orm{\omega(u)}_{L^2(\Gamma_{c \bm{n}u})} \\
&\qquad
\le C \bm{n}u \int_0^t
\bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2(\Gamma_{c \bm{n}u})}
\bm{n}orm{\omega(u)}_{L^2(\Gamma_{c \bm{n}u})} \\
&\qquad
\le C \bm{n}u^{1/2}
\bm{n}orm{\ensuremath{\bm{n}abla} u}_{L^2([0, T]; L^2(\Gamma_{c \bm{n}u}))}
\bm{n}u^{1/2} \bm{n}orm{\omega(u)}_{L^2([0, T]; L^2(\Gamma_{c \bm{n}u}))} \\
&\qquad
\le C \pr{\bm{n}u \int_0^t
\bm{n}orm{\omega(u)}_{L^2(\Gamma_{c \bm{n}u})}^2}^{1/2}.
\end{align*}
\endgroup
Finally,
\begin{align*}
\int_0^T (\ensuremath{\partial}_t u, v)
= \int_0^T \int_\Omega \ensuremath{\partial}_t (u v) + \int_0^T (u, \ensuremath{\partial}_t v).
\end{align*}
As in \cite{Kato1983},
\begin{align*}
\abs{\int_0^t (u, \ensuremath{\partial}_t v)}
&\le \int_0^t \bm{n}orm{u}_{L^2(\Omega)} \bm{n}orm{\ensuremath{\partial}_t v}_{L^2(\Omega)}
\le C \bm{n}u^{1/2}.
\end{align*}
Also,
\begin{align*}
\int_0^T \int_\Omega &\ensuremath{\partial}_t (u v)
= \int_0^T \diff{}{t} (u, v)
= (u(T), v(T)) - (u_\bm{n}u^0, v(0)) \\
&\le \bm{n}orm{u(T)}_{L^2} \bm{n}orm{v}_{L^2}
+ \smallnorm{u_\bm{n}u^0}_{L^2} \bm{n}orm{v(0)}_{L^2} \\
&\le C \smallnorm{u_0}_{L^2} \bm{n}orm{v}_{L^\ensuremath{\infty}([0, T]; L^2)}
\le C \sqrt{\bm{n}u}.
\end{align*}
We conclude from all these inequalities that
\begin{align*}
\bm{n}u \int_0^T \bm{n}orm{\omega(u)}_{L^2(\Gamma_{c \bm{n}u})}^2
\to 0 \text{ as } \bm{n}u \to 0
\end{align*}
is a sufficient condition for the vanishing viscosity limit to hold (as, too, is Kato's condition involving $\ensuremath{\bm{n}abla} u$ in place of $\omega(u)$). The necessity follows easily from the energy inequality.
\end{proof}
}
\ifbool{IncludeNavierBCSection}{
\section{Navier boundary conditions in 2D}\ensuremath{\lambda}bel{S:NavierBCs}
\refT{VorticityNotBounded} says that if the vanishing viscosity limit holds, then there cannot be a uniform (in $\bm{n}u$) bound on the $L^2$-norm of the vorticity. This is in stark contrast to the situation in the whole space, where such a bound holds, or for Navier boundary conditions in 2D, where such a bound holds for $L^p$, $p > 2$, as shown in \cite{FLP} and \cite{CMR}. For Navier boundary conditions in 2D, then, as long as the initial vorticity is in $L^p$ for $p > 2$ there will be a uniform bound on the $L^2$-norm of the vorticity, since the domain is bounded.
In fact, for Navier boundary conditions in 2D the classical vanishing viscosity limit ($VV$)
does hold, even for much weaker regularity on the initial velocity than that considered here (see \cite{KNavier}). The argument in the proof of \refT{VorticityNotBounded} then shows that
\begin{align}\ensuremath{\lambda}bel{e:VelocityGammaConvergence}
u \to \overline{u}
\text{ in } L^\ensuremath{\infty}([0, T]; L^2(\Gamma)).
\end{align}
We also have weak$^*$ convergence of the vorticity in $\ensuremath{\BB{C}}al{M}(\overline{\Omega})$, as we show in \cref{T:VorticityConvergenceNavier}.
\begin{theorem}\ensuremath{\lambda}bel{T:VorticityConvergenceNavier}
Assume that the solutions to $(NS)$ are with Navier boundary conditions in 2D, and that the initial vorticity $\omega_0 = \overline{\omega}_0$ is in $L^\ensuremath{\infty}$ (slightly weaker assumptions as in \cite{KNavier} can be made). Then all of the conditions in \cref{T:VVEquiv} hold, but with the three conditions below replacing conditions $(C)$, $(E)$, and $(E_2)$, respectively:
\begin{align*}
(C^N) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} \overline{u}
\quad
\weak^* \text{ in } L^\ensuremath{\infty}(0, T; \ensuremath{\BB{C}}al{M}(\overline{\Omega})^{d \times d}), \\
(E^N) & \qquad \omega \to \overline{\omega}
\quad
\weak^* \text{ in } L^\ensuremath{\infty}(0, T; \ensuremath{\BB{C}}al{M}(\overline{\Omega})^{d \times d}), \\
(E_2^N) & \qquad \omega \to \overline{\omega}
\quad
\weak^* \text{ in } L^\ensuremath{\infty}(0, T; \ensuremath{\BB{C}}al{M}(\overline{\Omega})). \\
\end{align*}
\end{theorem}
\begin{proof}
First observe that $(E^N)$ is just a reformulation of $(E_2^N)$ with vorticity viewed as a matrix. Also, it is sufficient to prove convergences in $(H^1(\Omega))^*$, using the same argument as in the proof of \cref{C:EquivConvMeasure}, since $\omega$ is bounded in all $L^p$ spaces, including $p = $.
It is shown in \cite{KNavier} that condition $(B)$ holds, from which $(A)$ and $(A')$ follow immediately. Condition $(D)$ is weaker than $(C^N)$ and condition $(F_2)$ is weaker than conditions $(E_2^N)$, so it remains only to show that $(C^N)$ and $(E_2^N)$ hold. We show this by modifying slightly the argument in the proof of \cref{T:VVEquiv} given in \cite{K2008VVV}.
\bm{n}oindent $\mathbf{(A') \implies (C^N)}$: Assume that ($A'$) holds and let $M$ be in
$(H^1(\Omega))^{d \times d}$. Then
\begin{align*}
(\ensuremath{\bm{n}abla} u, M)
&= - (u, \dv M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot u \\
&\to -(\overline{u}, \dv M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u}
\text{ in } L^\ensuremath{\infty}([0, T]).
\end{align*}
The convergence follows from condition $(A')$ and \refE{VelocityGammaConvergence}.
But,
\begin{align*}
-(\overline{u}, \dv M)
= (\ensuremath{\bm{n}abla} \overline{u}, M)
- \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u},
\end{align*}
giving ($C^N$).
\bm{n}oindent $\mathbf{(A') \implies (E_2^N)}$: Assume that ($A'$) holds and let $f$ be in
$H^1(\Omega)$. Then
\begin{align*}
(\omega, f)
&= - (\dv u^\perp, f)
= (u^\perp, \ensuremath{\bm{n}abla} f) - \int_\Gamma (u^\perp \cdot \mathbf{n}) f \\
&= - (u, \ensuremath{\bm{n}abla}^\perp f) + \int_\Gamma (u \cdot \BoldTau) f \\
&\to -(\overline{u}, \ensuremath{\bm{n}abla}^\perp f) + \int_\Gamma (\overline{u} \cdot \BoldTau) f
\text{ in } L^\ensuremath{\infty}([0, T])
\end{align*}
where $u^\perp = -\innp{u^2, u^1}$ and we used the identity $\omega(u) = - \dv u^\perp$
and \refE{VelocityGammaConvergence}.
But,
\begin{align*}
-(\overline{u}, &\ensuremath{\bm{n}abla}^\perp f)
= (\overline{u}^\perp, \ensuremath{\bm{n}abla} f)
= - (\dv \overline{u}^\perp, f)
+ \int_\Gamma (\overline{u}^\perp \cdot \mathbf{n}) f \\
&= - (\dv \overline{u}^\perp, f)
- \int_\Gamma (\overline{u} \cdot \BoldTau) f
= (\overline{\omega}, f)
- \int_\Gamma (\overline{u} \cdot \BoldTau) f,
\end{align*}
giving ($E_2^N$).
\end{proof}
\begin{remark}
If one could show that \refE{VelocityGammaConvergence} holds in dimension three then \refT{VorticityConvergenceNavier} would hold, with convergences in $(H^1(\Omega))^*$, in dimension three as well for initial velocities in $H^{5/2}(\Omega)$. This is because by \cite{IP2006} the vanishing viscosity limit holds for such initial velocities, and the argument in the proof of \refT{VorticityConvergenceNavier} would then carry over to three dimensions by making adaptations similar to those we made to the 2D arguments in \cite{K2008VVV}. Note that \refE{VelocityGammaConvergence} would follow, just as in 2D, from a uniform (in $\bm{n}u$) bound on the $L^p$-norm of the vorticity for some $p \ge 2$ if that could be shown to hold, though that seems unlikely.
\end{remark}
}
{
}
\Ignore{
\section{High friction limit}
\bm{n}oindent Assume that $\overline{u}$ is a vector field lying in $L^\ensuremath{\infty}([0, T]; H)$ and let $u = u^\ensuremath{\alpha}$ be a vector field in $L^\ensuremath{\infty}([0, T]; H \cap H^1(\Omega)$ parameterized by $\ensuremath{\alpha}$, where $\ensuremath{\alpha} \to \ensuremath{\infty}$. This is the scenario that occurs in the high friction limit [\textbf{add references}], where $\overline{u}$ (which lies in $L^\ensuremath{\infty}([0, T]; V) \subseteq L^\ensuremath{\infty}([0, T]; H)$), a subject that we return to briefly at the end of this section.
Define the conditions
\begin{align*}
(A_\ensuremath{\alpha}) & \qquad u \to \overline{u} \text{ weakly in } H
\text{ uniformly on } [0, T], \\
(A'_\ensuremath{\alpha}) & \qquad u \to \overline{u} \text{ weakly in } (L^2(\Omega))^d
\text{ uniformly on } [0, T], \\
(B_\ensuremath{\alpha}) & \qquad u \to \overline{u} \text{ in } L^\ensuremath{\infty}([0, T]; H), \\
(C_\ensuremath{\alpha}) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} \overline{u}
\text{ in } ((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(D_\ensuremath{\alpha}) & \qquad \ensuremath{\bm{n}abla} u \to \ensuremath{\bm{n}abla} \overline{u} \text{ in } (H^{-1}(\Omega))^{d \times d}
\text{ uniformly on } [0, T], \\
(E_\ensuremath{\alpha}) & \qquad \omega \to \omega(\overline{u})
\text{ in }
((H^1(\Omega))^{d \times d})^*
\text{ uniformly on } [0, T], \\
(E_{2, \ensuremath{\alpha}}) & \qquad \omega \to \omega(\overline{u})
\text{ in } (H^1(\Omega))^*
\text{ uniformly on } [0, T], \\
(F_{2, \ensuremath{\alpha}}) & \qquad \omega \to \omega(\overline{u}) \text{ in } H^{-1}(\Omega)
\text{ uniformly on } [0, T],
\end{align*}
we have the following theorem:
\begin{theorem}\ensuremath{\lambda}bel{T:MainResultal}
Assume that $u \to \overline{u}$ in $L^\ensuremath{\infty}([0, t]; L^2(\Gamma))$.
Conditions ($A_\ensuremath{\alpha}$), ($A'_\ensuremath{\alpha}$), ($C_\ensuremath{\alpha}$), ($D_\ensuremath{\alpha}$), and ($E_\ensuremath{\alpha}$) are equivalent.
In two dimensions, conditions ($E_{2, \ensuremath{\alpha}}$) and ($F_{2, \ensuremath{\alpha}}$) are equivalent to the other conditions
when $\Omega$ is simply connected.
Also, $(B_\ensuremath{\alpha})$ implies all of the other conditions. Finally, the same equivalences hold if we replace each
convergence above with the convergence of a subsequence.
\end{theorem}
\begin{proof}
$\mathbf{(A) \iff (A')}$: Let $v$ be in $(L^2(\Omega))^d$. By Lemma 7.3 of \cite{K2008VVV}, $v = w + \ensuremath{\bm{n}abla} p$, where $w$ is in $H$ and $p$ is in $H^1(\Omega)$. Then assuming $(A)$ holds,
\begin{align*}
(u(t), v)
&
= (u(t), w)
\to (\overline{u}(t), w)
= (\overline{u}(t), v)
\end{align*}
uniformly over $t$ in $[0, T]$, so $(A')$ holds. The converse is immediate.
\bm{n}oindent $\mathbf{(B) \implies (A)}$:
This implication is immediate.
\bm{n}oindent $\mathbf{(A') \implies (C)}$: Assume that ($A'$) holds and let $M$ be in
$(H^1(\Omega))^{d \times d}$. Then
\begin{align*}
(\ensuremath{\bm{n}abla} &u(t), M)
= - (u(t), \dv M) + \int_\Gamma (M \cdot \mathbf{n}) u(t) \\
&\to -(\overline{u}(t), \dv M) + \int_\Gamma (M \cdot \mathbf{n}) \overline{u} (t)
= (\ensuremath{\bm{n}abla} \overline{u}(t), M)
\text{ in } L^\ensuremath{\infty}([0, T]).
\end{align*}
But,
\begin{align*}
-(\overline{u}(t), \dv M)
= (\ensuremath{\bm{n}abla} \overline{u}(t), M)
- \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u},
\end{align*}
giving ($C$).
\bm{n}oindent $\mathbf{(C) \implies (D)}$: This follows simply because $H^1_0(\Omega) \subseteq H^1(\Omega)$.
\bm{n}oindent $\mathbf{(D) \implies (A)}$: Assume ($D$) holds, and let $v$ be
in $H$. Then $v = \dv M$ for some $M$ in $(H^1_0(\Omega))^{d \times d}$ by
Corollary 7.5 of \cite{K2008VVV}, so
\begin{align*}
(u&(t), v)
= (u(t), \dv M)
= -(\ensuremath{\bm{n}abla} u(t), M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot u(t) \\
& \to -(\ensuremath{\bm{n}abla} \overline{u}(t), M) + \int_\Gamma (M \cdot \mathbf{n}) \cdot \overline{u}(t)
= (\overline{u}(t), \dv M)
= (\overline{u}(t), v)
\end{align*}
uniformly over $[0, T]$.
from which ($A$) follows.
Now assume that $d = 2$.
\bm{n}oindent $\mathbf{(A') \implies (E_2)}$: Assume that ($A'$) holds and let $f$ be in
$H^1(\Omega)$. Then
\begin{align*}
(\omega(t), f&)
= - (\dv u^\perp(t), f)
= (u^\perp(t), \ensuremath{\bm{n}abla} f) - \int_\Gamma (u^\perp \cdot \mathbf{n}) f \\
&= - (u(t), \ensuremath{\bm{n}abla}^\perp f) - \int_\Gamma (u^\perp \cdot \mathbf{n}) f
\to -(\overline{u}(t), \ensuremath{\bm{n}abla}^\perp f) - \int_\Gamma (\overline{u}^\perp \cdot \mathbf{n}) f \\
&= (\overline{u}^\perp(t), \ensuremath{\bm{n}abla} f) - \int_\Gamma (\overline{u}^\perp \cdot \mathbf{n}) f
= - (\dv \overline{u}^\perp(t), f)
= (\overline{\omega}(t), f)
\end{align*}
in $L^\ensuremath{\infty}([0, T])$, giving ($E_2$). Here we used $u^\perp = -\innp{u^2, u^1}$ the identity,
$\omega(u) = - \dv u^\perp$, and the fact that $\ensuremath{\bm{n}abla}^\perp f$ lies in $H$.
\bm{n}oindent $\mathbf{(E_2) \implies (F_2)}$: Follows for the same reason that
$(C) \implies (D)$.
\bm{n}oindent $\mathbf{(F_2) \implies (A)}$: Assume ($F_2$) holds, and let $v$ be
in $H$. Then $v = \ensuremath{\bm{n}abla}^\perp f$ for some $f$ in $H^1_0(\Omega)$ ($f$ is called
the stream function for $v$), and
\begin{align*}
(u(t), &v)
= (u(t), \ensuremath{\bm{n}abla}^\perp f)
= - (u^\perp(t), \ensuremath{\bm{n}abla} f)
= (\dv u^\perp(t), f) - \int_\Gamma (u^\perp(t) \cdot \mathbf{n}) f \\
&= - (\omega(t), f) - \int_\Gamma (u^\perp(t) \cdot \mathbf{n}) f
\to - (\overline{\omega}(t), f) - \int_\Gamma (\overline{u}^\perp(t) \cdot \mathbf{n}) f \\
&= (\dv \overline{u}^\perp(t), f) - \int_\Gamma (u^\perp(t) \cdot \mathbf{n}) f
= - (\overline{u}^\perp(t), \ensuremath{\bm{n}abla} f)
= (\overline{u}(t), \ensuremath{\bm{n}abla}^\perp f) \\
&= (\overline{u}(t), v)
\end{align*}
in $L^\ensuremath{\infty}([0, T])$, which shows that ($A$) holds.
What we have shown so far is that ($A$), ($A'$), ($B$), ($C$), and ($D$) are equivalent, as are $(E_2)$ and $(F_2)$ in two dimensions. It remains to show that $(E)$ is equivalent to these conditions as well. We do this by establishing the implications $(C) \implies (E) \implies (A)$.
\bm{n}oindent $\mathbf{(C) \implies (E)}$: Follows directly from the vorticity being the antisymmetric gradient.
\bm{n}oindent $\mathbf{(E) \implies (A)}$: Let $v$ be in $H$ and let $x$ be the vector field in $(H^2(\Omega) \cap H_0^1(\Omega))^d$ solving $\Delta x = v$ on $\Omega$ ($x$ exists and is unique by standard elliptic theory). Then, utilizing Lemma 7.6 of \cite{K2008VVV} twice (and suppressing the explicit dependence of $u$ and $\overline{u}$ on $t$),
\begin{align}\ensuremath{\lambda}bel{e:EImpliesAEquality}
\begin{split}
(u, v)
&= (u, \Delta x)
= - (\ensuremath{\bm{n}abla} u, \ensuremath{\bm{n}abla} x) + \int_\Gamma (\ensuremath{\bm{n}abla} x \cdot \mathbf{n}) \cdot u \\
&= -2 (\omega(u), \omega(x)) - \int_\Gamma (\ensuremath{\bm{n}abla} u x) \cdot \mathbf{n}
+ \int_\Gamma (\ensuremath{\bm{n}abla} x \cdot \mathbf{n}) \cdot u \\
&= -2 (\omega(u), \omega(x)) + \int_\Gamma (\ensuremath{\bm{n}abla} x \cdot \mathbf{n}) \cdot u \\
&\to -2(\omega(\overline{u}), \omega(x))
+ \int_\Gamma (\ensuremath{\bm{n}abla} x \cdot \mathbf{n}) \cdot \overline{u} \\
&= -(\ensuremath{\bm{n}abla} \overline{u}, \ensuremath{\bm{n}abla} x)
+ \int_\Gamma (\ensuremath{\bm{n}abla} \overline{u} x) \cdot \mathbf{n}
+ \int_\Gamma (\ensuremath{\bm{n}abla} x \cdot \mathbf{n}) \cdot \overline{u} \\
&= -(\ensuremath{\bm{n}abla} \overline{u}, \ensuremath{\bm{n}abla} x)
+ \int_\Gamma (\ensuremath{\bm{n}abla} x \cdot \mathbf{n}) \cdot \overline{u}
= (\overline{u}, \Delta x)
= (\overline{u}, v),
\end{split}
\end{align}
giving $(A)$.
\end{proof}
In the case of the high friction limit, at least in 2D, $(B_\ensuremath{\alpha})$ holds so all of the conditions hold. This means that the vorticities and gradients converge weakly in the sense of the conditions $(C_\ensuremath{\alpha})$ through $(F_{2, \ensuremath{\alpha}})$---convergence that does not include a vortex sheet on the boundary.
}
\addtocontents{toc}{\protect
}
\appendix
\section{Some Lemmas}
\bm{n}oindent \refC{TraceCor}, which we used in the proof of \refT{VorticityNotBounded}, follows from \refL{Trace}.
\begin{lemma}[Trace lemma]\ensuremath{\lambda}bel{L:Trace}
Let $p \in (1, \ensuremath{\infty})$ and $q \in [1, \ensuremath{\infty}]$ be chosen
arbitrarily, and let $q'$ be \Holder conjugate to $q$.
There exists a constant $C = C(\Omega)$
such that for all $f \in W^{1, p}(\Omega)$,
\begin{align*}
\bm{n}orm{f}_{L^p(\Gamma)}
\le C \bm{n}orm{f}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\bm{n}orm{f}_{W^{1, q'}(\Omega)}
^{\frac{1}{p}}.
\end{align*}
If $f \in W^{1, p}(\Omega)$ has mean zero or $f \in W^{1, p}_0(\Omega)$ then
\begin{align*}
\bm{n}orm{f}_{L^p(\Gamma)}
\le C \bm{n}orm{f}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\bm{n}orm{\ensuremath{\bm{n}abla} f}_{L^{q'}(\Omega)}
^{\frac{1}{p}}.
\end{align*}
\end{lemma}
\begin{proof}
We prove this for $f \in C^\ensuremath{\infty}(\Omega)$, the result following by the density of $C^\ensuremath{\infty}(\Omega)$ in $W^{1, p}(\Omega)$. We also prove it explicitly in two dimensions, though the proof extends easily to any dimension greater than two.
Let $\Sigma$ be a tubular neighborhood of $\Gamma$ of uniform width $\delta$, where $\delta$ is half of the maximum possible width. Place coordinates $(s, r)$ on $\Sigma$ where $s$ is arc length along $\Gamma$ and $r$ is the distance of a point in $\Sigma$ from $\Gamma$, with negative distances being inside of $\Omega$. Then $r$ ranges from $-\delta$ to $\delta$, with points $(s,0)$ lying on $\Gamma$. Also, because $\Sigma$ is only half the maximum possible width, $\abs{J}$ is bounded from below, where
\begin{align*}
J = \det \frac{\ensuremath{\partial}(x, y)}{\ensuremath{\partial} (s, r)}
\end{align*}
is the Jacobian of the transformation from $(x, y)$ coordinates to $(s, r)$ coordinates.
Let $\varphi \in C^\ensuremath{\infty}(\Omega)$ equal 1 on $\Gamma$ and equal 0 on $\Omega \setminus \Sigma$. Then if $a$ is the arc length of $\Gamma$,
\begingroup
\ensuremath{\alpha}lowdisplaybreaks
\begin{align*}
\bm{n}orm{f}_{L^p(\Gamma)}^p
&= \int_0^a \int_{-\delta}^0 \pdx{}{r}
\brac{(\varphi f)(s, r)}^p \, dr \, ds \\
&\le \int_0^a \int_{-\delta}^0 \abs{\pdx{}{r}
\brac{(\varphi f)(s, r)}^p} \, dr \, ds \\
&\le \int_0^a \int_{-\delta}^0 \abs{\ensuremath{\bm{n}abla}
\brac{(\varphi f)(s, r)}^p} \, dr \, ds \\
&= \pr{\inf_{\supp \varphi} \abs{J}}^{-1}
\int_0^a \int_{-\delta}^0 \abs{\ensuremath{\bm{n}abla}
\brac{(\varphi f)(s, r)}^p}
\inf_{\supp \varphi} \abs{J}
\, dr \, ds \\
&\le \pr{\inf_{\supp \varphi} \abs{J}}^{-1}
\int_0^a \int_{-\delta}^0 \abs{\ensuremath{\bm{n}abla}
\brac{(\varphi f)(s, r)}^p}
\abs{J}
\, dr \, ds \\
&= C
\int_{\Sigma \cap \Omega} \abs{\ensuremath{\bm{n}abla}
\brac{(\varphi f)(x, y)}^p}
\, dx \, dy \\
&\le C
\bm{n}orm{\ensuremath{\bm{n}abla} \brac{\varphi f}^p}_{L^1(\Omega)} \\
&= C p
\bm{n}orm{(\varphi f)^{p - 1}
\ensuremath{\bm{n}abla} \brac{\varphi f}}_{L^1(\Omega)} \\
&\le C p
\bm{n}orm{(\varphi f)^{p - 1}}_{L^q}
\bm{n}orm{\ensuremath{\bm{n}abla} \brac{\varphi f}}_{L^{q'}(\Omega)} \\
&= C p
\brac{\int_{\Omega}{(\varphi f)^{{(p - 1)} q}}}
^{\frac{1}{q}}
\bm{n}orm{\ensuremath{\bm{n}abla} \brac{\varphi f}}_{L^{q'}(\Omega)} \\
&= C p
\bm{n}orm{\varphi f}_{L^{(p - 1) q}(\Omega)}
^{p - 1}
\bm{n}orm{\varphi \ensuremath{\bm{n}abla} f + f \ensuremath{\bm{n}abla} \varphi}
_{L^{q'}(\Omega)} \\
&\le C p
\bm{n}orm{f}_{L^{(p - 1) q}(\Omega)}
^{p - 1}
\bm{n}orm{f}
_{W^{1, q'}(\Omega)}.
\end{align*}
\endgroup
The first inequality then follows from raising both sides to the $\frac{1}{p}$ power and using $p^{1/p} \le e^{1/e}$. The second inequality follows from Poincare's inequality.
\end{proof}
\begin{remark}
The trace inequality in \refL{Trace} is a folklore result,
most commonly referenced in the special case where
$p = q = q' = 2$. We proved it for completeness, since we
could not find a proof (or even clear statement) in the literature.
We also note that a simple, but incorrect, proof of it
(for $p = q = q' = 2$) is
to apply the \textit{invalid} trace inequality from
$H^{\frac{1}{2}}(\Omega)$ to $L^2(\Gamma)$ then use
Sobolev interpolation.
\end{remark}
Note that in \cref{L:Trace} it could be that $(p - 1) q \in (0, 1)$, though in our application of it in \cref{S:LpNormsBlowUp}, via \cref{C:TraceCor}, we have $(p - 1) q = 2$. Also, examining the last step in the proof, we see that for $p = 1$ the lemma reduces to $\bm{n}orm{f}_{L^1(\Gamma)} \le C \bm{n}orm{f}_{W^{1, q'}(\Omega)}$, which is not useful.
\begin{cor}\ensuremath{\lambda}bel{C:TraceCor}
Let $p, q, q'$ be as in \cref{L:Trace}.
For any $v \in H$,
\begin{align*}
\bm{n}orm{v}_{L^p(\Gamma)}
\le C \bm{n}orm{v}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\bm{n}orm{\ensuremath{\bm{n}abla} v}_{L^{q'}(\Omega)}
^{\frac{1}{p}}
\end{align*}
and for any $v \in V \cap H^2(\Omega)$,
\begin{align*}
\bm{n}orm{\curl v}_{L^p(\Gamma)}
\le C \bm{n}orm{\curl v}_{L^{(p - 1) q}(\Omega)}
^{1 - \frac{1}{p}}
\bm{n}orm{\ensuremath{\bm{n}abla} \curl v}_{L^{q'}(\Omega)}
^{\frac{1}{p}}.
\end{align*}
\end{cor}
\begin{proof}
If $v \in H$, then
\begin{align*}
\int_\Omega v^i
& = \int_\Omega v \cdot \ensuremath{\bm{n}abla} x_i
= - \int_\Omega \dv v \, x_i
+ \int_\Gamma (v \cdot \bm{n}) x_i
= 0.
\end{align*}
If $v \in V$ then
\begin{align*}
\int_\Omega \curl v
&= - \int_\Omega \dv v^\perp
= - \int_{\ensuremath{\partial} \Omega} v^\perp \cdot \bm{n}
= 0.
\end{align*}
Thus, \refL{Trace} can be applied to $v_1, v_2$, and $\curl v$, giving the result.
\end{proof}
}
{
}
\end{document} |
\begin{document}
\title{Experimental challenges for high-mass matter-wave interference with nanoparticles}
\begin{abstract}
We discuss recent advances towards matter-wave interference experiments with free beams of metallic and dielectric nanoparticles. They require a brilliant source, an efficient detection scheme and a coherent method to divide the de Broglie waves associated with these clusters: We describe an approach based on a magnetron sputtering source which ejects an intense cluster beam with a wide mass dispersion but a small velocity spread of $\Delta v/v < 10$\%. The source is \emph{universal} as it can be used with all conducting and many semiconducting or even insulating materials. Here we focus on metals and dielectrics with a low work function of the bulk and thus a low cluster ionization energy. This allows us to realize photoionization gratings as coherent matter-wave beam splitters and also to realize an efficient ionization detection scheme. These new methods are now combined in an upgraded Talbot-Lau interferometer with three 266 nm depletion gratings.
We here describe the experimental boundary conditions and how to realize them in the lab. This next generation of near-field interferometers shall allow us to soon push the limits of matter-wave interference to masses up to $10^6$ amu.
\end{abstract}
\keywords{Matter waves, Quantum mechanics, Matter interference, Quantum optics, Metal clusters, Dielectric nanoparticles, Molecular beams }
\section{INTRODUCTION}
\label{sec:intro}
Probing the quantum superposition principle with objects of increasing mass is expected to advance our understanding of the transition from quantum to classical phenomena and thus to tackle the question why our world appears to be classical. Matter-wave interferometry is a paradigmatic realization of the superposition principle and both decoherence theory and a variety of models of non-standard quantum physics suggest that deviations from perfect quantum coherence are to be expected for quantum objects of increasing complexity. The influence of novel effects, such as spontaneous or gravitational wave-function collapse\cite{Bassi2003},
conjectured space-time fluctuations\cite{Wang2006} or medium-mass dark matter\cite{Riedel2015,Riedel2017} are even expected to scale quadratically in mass. Furthermore, certain tests of gravity would profit from the possibility to compare in the same setup the free fall of atoms, clusters and nanoparticles, with different mass, different materials, different internal energy, and many different types of angular momentum\cite{Rodewald2018}.
\section{INTERFEROMETER CONCEPT}
The overall concept of near-field interferometry goes back to fundamental studies in optics by H.F.Talbot\cite{Talbot1836} who realized that illuminating a single grating by a plane wave will result in the formation of a grating self-image at periodic distances behind the mask, as determined by optical near-field interferometry.
For spatially incoherent sources this concept was extended by E. Lau\cite{Lau1948} who realized that even for arbitrary sources, sufficient spatial coherence can be created by transmitting incoherent light through an array of narrow slit sources. This is based on Huygens principle that every point source is the origin of a coherent spherical wave - which is then captured in more mathematical terms by Kirchhoff-Fresnel diffraction and the van Cittert-Zernike theorem\cite{Born1993}.
Very similar ideas hold for de Broglie waves, where the argument can also be led using Heisenberg's uncertainty relation: enforced confinement to a single slit determined the position which renders the momentum uncertain, and thus enforces quantum uncertainty (coherence) in the position further downstream.
\paragraph{Talbot-Lau interferometry:}The combination of a parallel source array with coherent self-imaging became known as the Talbot-Lau effect and has been used in optics for decades\cite{Patorski1983}. The idea was first implemented into matter-wave interferometry by J. Clauser and S. Li\cite{Clauser1994} using potassium atoms in an interferometer with three nanomechnanical gratings. Generally, when three identical transmission masks ($G_1...G_3$) of period $d$ are placed at equal distance, in the vicinity of an integer multiple of the \textit{Talbot length} $L_T=d^2/\lambda_\mathrm{dB}$, the instrument generates a matter-wave density pattern of period $d$ at the position of the third grating, which can be probed by scanning the third mask and counting the number of transmitted particles with an associated de Broglie wavelength $\lambda_\mathrm{dB}$.
Clauser conjectured already in 1997 that this idea might become important in quantum experiments with\textit{ little rocks} and \textit{viruses}\cite{Clauser1997a}. Meanwhile, research in Vienna has actually walked a good part on that path, in many different experimental realizations. After the first diffraction of complex molecules\cite{Arndt1999}, the first Talbot-Lau interferometer for molecules was realized with fullerenes\cite{Brezger2002} and porphyrins\cite{Hackermueller2003}. It soon became clear that the velocity-dependent van der Waals interaction between the molecules and material grating walls would limit efforts to upscale this idea to true 'rocks'. This triggered the concept of a Kapitza-Dirac-Talbot-Lau interferometer \cite{Brezger2003}, where the central second grating was realized using a green standing wave of 532 nm laser light retro-reflected at a plane mirror to create a $d = 266$ nm optical phase grating, exploiting the dipole force between the polarizable molecules and the rapidly oscillating electric laser field. The phase imprinted onto the molecular wave function at $G_2$ then results in a molecular diffraction pattern further downstream. This \emph{Kapitza-Dirac-Talbot-Lau} interferometer was realized with larger and floppier molecules \cite{Gerlich2007} and became also the basis for numerous experiments in molecule metrology, where the interferometer is being used as a quantum sensor for internal molecular properties exposed to external electric, magnetic and optical fields\cite{Gring2010,Eibenberger2011,Fein2020b,Fein2020c}.
\paragraph{Long-Baseline Universal Matter-wave Interferometer:}
The Long-Baseline Universal Matter-wave Interferometer (LUMI) in Vienna is a three-grating Talbot-Lau interferometer, which started as a 10-fold extension of the Kapitza-Dirac-Talbot-Lau interferometer, with a grating separation of $L=1$ m. This instrument handled a large variety of different particle classes, from alkali and alkali earth atoms\cite{Fein2020a} over vitamins\cite{Mairhofer2017} and tripeptides\cite{Schaetti2020}, polar and non-polar molecules\cite{Gerlich2007} to molecular radicals\cite{Fein2022}. Recently, LUMI has been used to demonstrate quantum interference of molecules consisting of up to 2000 atoms and with masses up to 28.000\,u~\cite{Fein2019}. All those experiments started from natural or synthetic molecules, naturally predefined nanostructures. And while size and mass-selected nanostructures also occur naturally for instance in the form of proteins or nucleic acids, our universal approach here exploits the in-flight aggregation and post-selection of very massive atomic clusters. A suitable source for these clusters and a useful tool for ionizing them efficiently by ultraviolet light has recently been demonstrated, experimentally\cite{Pedalino2022}: Giant metal clusters of Hafnium and Yttrium, composed of thousands of atoms, can be readily produced in a magnetron sputtering source and photoionized by 266\,nm radiation. LUMI 2.0 now also contains an upgrade to three ultra-violet photodepletion standing light waves with a period of $133$ nm. This resembles prior matter-wave interferometry with polypeptides\cite{Shayeghi2020}, but the use of continuous laser beams in Kapitza-Dirac diffraction regime adds additional boundary conditions. Here we focus on how to identify and fulfill the interferometer alignment criteria, on methods to select the particle velocity using a random photo-depletion chopper as well as on the requirements to normalize signal intensity and phase.
\subsection{Alignment criteria}
Talbot-Lau interferometry with photodepletion gratings offers a number of important advantages over many other types of matter-wave interferometers: it can accept spatially incoherent particle beams and thus also accept much larger beam divergence angles and orders of magnitude higher molecular flux than far-field interferometry (e.g. Mach-Zehnder interferometry) which would require sub-microradian collimation of mesoscopic particle beams. Moreover, photodepletion gratings are universal matter wave-front dividers: They are 'threshold' elements rather than resonant matter-optical elements: photons of sufficient energy will always ionize a metal cluster, independent of its internal excitation spectrum.
Naively one might believe that near-field interferometry with nanoclusters must be terribly hard to align, since the de Broglie wavelength of a 100\,kDa metal cluster travelling at 100\,m/s is only $\lambda_\mathrm{dB}=40$ fm while all foreseeable grating dimensions will always stay above 100 nm, i.e. nearly seven orders of magnitude larger than that. The de Broglie wavelength is also five orders of magnitude smaller than the particle itself and one could believe that even the particle structure should play a role on that scale. Interestingly, it does not -- at least not yet in any of the successful experiments -- because molecule interferometry is always interferometry of the object with itself. However, in order to obtain high interference contrast, strict coherence and alignment criteria have to be met. Some boundary conditions for that have already been described in \cite{Nimmrichter2008,Hornberger2009,Fein2020c}. Here, we adapt them for the specific case of three continuous wave photodepletion gratings acting on slow, massive metal clusters. Moreover, for the following calculations we are considering a beam of yttrium clusters with a mass of $m=100$ kDa and a forward velocity of $v_x= 100$ m/s.
Due to relaxed collimation requirements in Talbot-Lau interferometry compared to far-field diffraction, the goal is to work with as many grating slits $N$ as possible to increase the overall transmitted particle flux. While the use of more grating slits would ideally lead to an increase in the number of Talbot orders and a corresponding increase in the interference contrast, real-world optics impose several boundary conditions that limit the number of Talbot orders that can be achieved. These boundary conditions include factors such as the grating periodicity, spacing, roll, yaw, pitch, and the quality of the diffraction beam profile, as well as limitations imposed by collisional, vibrational, and thermal decoherence. As a result, the number of Talbot orders and the resulting interference contrast may be reduced, and the shape of the interference pattern may be altered. By scanning $G_3$ and counting the transmitted particles, the true particle density pattern can be revealed. If the shape of the interference fringes is sinusoidal, which is typically a valid assumption, the normalized signal difference between the maximum and minimum of the interference fringes can be quantified using the interference fringe visibility:
\begin{equation}
V=\frac{S_{\mathrm{max}}-S_{\mathrm{min}}}{S_{\mathrm{max}}+S_{\mathrm{min}}}. \label{visibility}
\end{equation}This fringe visibility is different (usually higher) for a genuine quantum interference pattern in comparison to a classical Moiré shadow pattern \cite{Hornberger2009}. Understanding the details of all influences is therefore important.
\begin{figure}
\caption{Schematic of the various degrees of freedom for an all-optical Talbot-Lau interferometer. The red arrow indicates the direction of the molecular beam. The grating angles $\vartheta$ (grating roll), $\Theta$ (grating pitch) and $\Phi$ (grating yaw) have to be aligned with respect to the incident molecular beam axis. The vertical grating waist $w_y$ restricts the collimation of the molecular beam height $H$ and the longitudinal waist $w_x$ defines the grating thickness and the ionization efficiency for the clusters with a given absorption cross section $\sigma_{abs}
\label{fig:my_label}
\end{figure}
\paragraph{Grating period:} Consider first a wide parallel bundle of geometric (classical) rays passing the three gratings, all separated by the same distance $T_L$.
If all gratings have a perfectly identical period, the matter-wave interferogram will perfectly match the period of $G_3$. However, a consistent deviation $\Delta d$ from the ideal period $d$ in the first or last gratings will accumulate a fringe shift of $N\cdot d$ when $N$ slits are illuminated. A fringe maximum will thus turn into a minimum for $N\cdot \Delta d=d/2$. A similar argument holds for the genuine interferogram and one is well advised to keep
\begin{equation}
N < \frac{d}{10\Delta d} \label{slitnumber}
\end{equation}The laser in LUMI 2.0 has a linewidth of $\Delta \nu =20$ MHz, compared to a base frequency of $\nu=1.127\times 10^{15}$ Hz ($\lambda=$ $266$ nm). This would even allow for molecular beams as wide as $N=5\times 10^6 d$, that is gratings larger than 70 cm (!). The high mode quality of modern lasers is an obvious argument in favor of optical rather than material gratings, and also an argument in favor of continuous rather than pulsed laser beams.
\paragraph{Grating intensity stability:} Laser powers of approximately 1 W can be consistently achieved at a wavelength of 266 nm, which is sufficient for effectively ionizing and depleting a diverse range of massive metal clusters. However, it has been observed that optical elements tend to degrade over time when exposed to high power UV light, particularly under high-vacuum conditions. While this primarily presents an issue of longevity, mechanical noise and thermal fluctuations within the doubling cavity can also lead to short-term intensity fluctuations, which directly affect the interference contrast.
The interference visibility of the LUMI interferometer asymptotically approaches its maximum with increasing laser power applied at the first and third grating, while it oscillates as a function of power applied at the middle grating.
In the vicinity of the power optimum, the power dependence of the interference visibility is thus sub-linear for all three gratings. In order to maintain a fidelity of $>90\%$, it is therefore sufficient to maintain:
\begin{equation}
\frac{\Delta I}{I}< 0.1 .
\end{equation}
\paragraph{Gaussian beam envelope and yaw angle:}
The UV grating lasers are well described by the Gaussian profile with a waist along the molecular beam of $w_x=15$ µm and a waist transverse to it of $w_y=1.5$ mm. This introduces a height dependence in both the laser-induced cluster beam depletion and in the phase shift imprinted in $G_2$. This modulates the fringe visibility but it cannot wash out the interference pattern. Of course, clusters that do not interact with the diffraction grating only contribute to the background and reduce the fringe visibility. We therefore make sure that the molecular beam height $H$ is smaller than the vertical waist $w_y$, in all three grating zones.
If the cluster beam were highly collimated, the longitudinal waist $w_x$ could also be large, because the accumulated phase is proportional to the laser power P, the optical polarizability $\alpha_\mathrm{opt}$, the particle forward velocity $v_x$ and the vertical beam waist $w_y$, but independent of $w_x$:
\begin{equation}
\Delta \varphi \propto \frac{P \cdot \alpha \cdot \tau}{w_x w_y}\propto \frac{P\cdot \alpha}{ v_x w_y}. \label{phase}
\end{equation}A similar argument holds for the absorption and ionization efficiency, which are proportional to the absorption cross section $\sigma_\mathrm{abs}$ rather than to $\alpha_\mathrm{opt}$. And yet a tight focus is needed and here realized by a cylindrical lens with a focal length of $f=130$ mm to achieve a longitudinal waist of $w_x=15$ µm.
This constraint is imposed by the finite divergence and also the finite control over the angle of incidence of the cluster beam or the mirror yaw angle with respect to the standing light wave. The goal is to keep all semi-classical trajectories contained within an angle of incidence $\Delta \vartheta$ that ensures that no cluster can average over a node and a neighboring antinode of the standing light wave:
\begin{equation}
10\cdot\Delta\Phi < \arctan \left(\frac{d}{4 w_x}\right)\simeq d/4 w_x.
\label{collimation}
\end{equation}
With a period of $133$ nm and a waist of $10$ µm an acceptance angle of $\Delta \vartheta \simeq 0.2$ mrad is still permissible. This allows a signal enhancement of about $10^6 $ over far-field diffraction at the same grating.
\paragraph{Wave front curvature:}
In Gaussian beam optics, a tight focus $w_x$ is associated with a short Rayleigh length $z_R=\pi w_x^2 /\lambda=2.65$ mm. The beam has a flat wave front on the mirror itself but its radius of curvature changes with distance $z$ from the mirror surface:
\begin{equation}
R(z)=z\left[1+\left(\frac{z_R}{z}\right)^2\right].
\end{equation}
Assuming no beam divergence and a perfect grating yaw we want to make sure that the curvature of the wave fronts does not reach into the nodes of the grating as illustrated by fig. \ref{fig:wavefronts}
\begin{equation}
w_x(z)\, \tan\!\left[\,\arcsin\!\left(\frac{w_x(z)}{R(z)}\right)\right] < d.
\end{equation}
The molecular beam must pass through the gratings at a distance $z$ that is less than $z_R$, as measured from the point of minimum waist at the mirror surface, in order to fulfill these requirements.
\begin{figure}
\caption{Intensity of the standing light waves at different distances $z_0$ illustrating the effect of the wave front curvature to the grating. As the distance $z$ increases from the focus which is on the mirror surface $z=0$, the radius of the curvature decreases to a minimum at the distance of the Rayleigh length $z=z_R$ and increases again afterwards. However, the waist $w(z)$ is also increasing with $z$ and the resulting wavefronts get more curved the further away one gets from the mirror surface. In order to keep high interference contrast, it is important that the molecules passing through the grating nodes do not see the curved wave fronts of the antinodes. A similar argument holds for the yaw of the grating mirror where the tilt is causing a narrowing of the effective slit width in addition to the curved wave fronts.}
\label{fig:wavefronts}
\end{figure}
Similar effects can occur due to wave front distortions imprinted by the mirror surface. The laser is focused onto a flat mirror, which alters the wave fronts of the standing light wave. However, these topological changes stay constant with the distance from the mirror and do not spoil the interferogram in any significant matter, as long as the position-dependent shift across the relevant laser beam profile is smaller than $\Delta x < \lambda/10$. This sets a minimum requirement for the mirror surface quality to maintain a clear interference pattern.
\paragraph{Grating separation:}
Coherent self-imaging in Talbot-Lau interferometry requires precise rephasing of a large set of individual wavelets.
For any given velocity, the distance between $G_1$ and $G_2$ as well as $G_2$ and $G_3$ needs to be equal such that\cite{Hornberger2009}
\begin{equation}
\frac{N d}{2L} = \frac{D}{\Delta L} <\frac{d / 2}{\Delta L} \Rightarrow \frac{\Delta L}{L}<\frac{1}{N}.
\label{eq:deltaL}
\end{equation}
For a cluster beam width of $W =1$\,mm, the number of illuminated grating periods is $N=1\mathrm{\,mm}/133\mathrm{\,nm} = 7500 $. We therefore require the relative grating separations to be equal to within $\Delta L < 1/7500 \,\mathrm{m}=133 $ µm.
\begin{figure}
\caption{Left: Schematic of a semi-classical trajectory in a three-grating Talbot-Lau interferometer. As the number of illuminated slits $N$ increases, the angle and therefore the diameter of confusion $D$ increases at a given distance. Right: This constrains the grating separations L to be aligned to $\Delta L$ as given by equation \ref{eq:deltaL}
\label{fig:my_label}
\end{figure}
\paragraph{Grating pitch:}
All three laser mirrors of our interferometer need to be parallel to each other. Yaw and roll angle are controlled by precise mirror tilts, which can be controlled on the 10 µrad level. The grating pitch (rotation around the z-axis) is unaffected by any mirror motion, but will be affected by a rotation of the cylindrical lens that focuses the UV beam onto the grating mirror. Aligning this to better than $\theta_\mathrm{pitch}< 100$\,mrad ensures that the ensuing imbalance of the grating distances stays in the safe regime of
\begin{equation}
\Delta \theta_\mathrm{pitch} \cdot w_y< \Delta L
\end{equation}
\paragraph{Grating roll:}
Grating roll is here defined as a rotation around the x-axis, which points along the cluster beam. In the absence of gravity, the effect of roll is to introduce a y-dependent phase shift. Averaging over this shift reduces the interference contrast by the factor
\begin{equation}
R_{\mathrm{roll}}\simeq\frac{\sin \left(2 \pi \theta_\mathrm{roll} \,H /d\right)}{2 \pi \theta_\mathrm{roll} \,H/d},
\end{equation}
with the cluster beam height $H$, the grating roll angle $\theta_\mathrm{roll}$ and the grating period $d$.
\paragraph{Common roll with respect to gravity:}
In the presence of gravity, particles may fall across different grating slits during their flight time from the source to the detector, even if all gratings are perfectly aligned with each other, if their common roll angle $\theta_g$ with respect to gravity is nonzero. The phase shift is proportional to the effective falling distance $H\propto T^2=L^2/v^2$, which can be huge for slow clusters and a long flight path of $L=1$\,m. However, only the average over different phase shifts reduces the fringe visibility. The reduction of the matter-wave fringe visibility therefore additionally depends on the relative velocity spread $\sigma_v/v$:
\begin{equation}
R_{\mathrm{grav}}=\exp \left[-8\left(\frac{\pi g \sin (\theta_g) L^2 \sigma_{\mathrm{v}}}{ v^3 d}\right)^2\right].
\end{equation}
This sets a bound on the maximally permissible roll angle that is compatible with a visibility reduction of less than $R_{\mathrm{grav}}$:
\begin{equation}
\theta_g<\arcsin \left(\sqrt{-\frac{\ln \left(R_{\text {grav }}\right)}{8}} \frac{d\, v_z^3}{\pi g L^2 \sigma_{\mathrm{v}}}\right).
\end{equation}
\paragraph{Coriolis force:}
A matter-wave interferometer with vertical grating lines, i.e. k-vector perpendicular to gravity, can suppress all gravitationally-induced phase shifts,
but it will still accumulate a dispersive phase shift because the Earth is rotating underneath the interferometer while the clusters follow their geodesic path in high vacuum:
\begin{equation}
\Delta \varphi_\mathrm{Coriolis}=k_d (2 \mathbf{v}\times \mathbf{\Omega}_E) L^2/v^2
\end{equation}
Here, $k=2\pi /d$ is the grating wave vector, $v$ the forward velocity in the cluster beam and $\Omega_E$ the vector of the Earth's angular frequency.
Again, an offset velocity does not harm the interference contrast. However, a finite velocity spread will again cause a reduction of the fringe visibility.
\paragraph{Optimized grating roll:}
Since both the gravitational phase shift and the Coriolis phase shift are velocity dependent, and since the sign of the gravitational phase can be chosen by the sign of the common roll angle, one can find an angle where both effects cancel exactly for one specific velocity and are still largely compensated in a finite velocity band around that velocity \cite{Fein2020a}. The optimal correction angle is determined by the geographic latitude of our lab:
\begin{equation}
\theta_0=-\Omega_E \sin(\theta_L) v/g
\end{equation}
Given that $\Omega_E=72$ µrad/s, $g=9.81$ m/s$^2$ and $\theta_L=$48° for Vienna, a cluster beam of $v=100$ m/s will require a common grating roll angle of
$\theta_0=-0.55$\,mrad which can be set easily and accurately.
\begin{figure}
\caption{Left: Visibility reduction as a function of grating roll misalignment with respect to each other for different vertical beam diameters. Right: Maximum permissible roll misalignment of the gratings with respect to gravity versus particle velocity and velocity spread.}
\label{fig:my_label}
\end{figure}
\paragraph{Independent grating vibrations:}
A lateral position shift of any of the gratings enters the total interferometer phase shift like
\begin{equation}
\Delta \varphi =k_d (x_1-2x_2+x_3) = k_d (A_1 sin(\omega_1 t)-2 A_2 sin(\omega_2 t)+A_3 sin(\omega_3 t)).
\end{equation}
Random x-oscillations of any of these gratings with a fixed amplitude $A_i$ but white noise frequency spectrum and random phase then reduces the total fringe visibility by a factor
\begin{equation}
R_{\mathrm{Vib}}=\left|J_0\left(\frac{2 \pi}{d} A_{1,3}\right)\right|^2 \cdot\left|J_0\left(\frac{4 \pi}{d} A_2\right)\right|,
\end{equation}
where $J_0$ is the zeroth order Bessel function.
\paragraph{Common mode vibrations:}
In order to suppress excitations of the individual gratings, the entire interferometer is mounted on a massive Invar bar which is suspended by piano wires, isolated by a stack of plates and Teflon balls and damped by eddy current breaks.
This still allows for common mode motion of amplitude $A$ and frequency $\omega$ which would reduce the fringe visibility by
\begin{equation}
R_{\mathrm{vib}}=\left|J_0\left(\frac{8 \pi A}{d} \sin ^2\left(\frac{\omega L}{2 v_z}\right)\right)\right|.
\end{equation}
\begin{figure}
\caption{Left: Visibility reduction as a function of frequency for different amplitudes of common grating vibrations. A monochromatic velocity of 90 m/s is assumed here. At certain frequencies, visibility revivals can be observed. Right: Visibility reduction as a function of amplitude for independent grating vibrations. For individual grating vibrations, the visibility reduction is independent of the vibration frequency.}
\label{fig:common_vib}
\end{figure}
\section{CONCLUSION}
Matter-wave interferometry with metal clusters is a promising path towards quantum experiments with objects of high mass, whose separation can be on the scale of 100\,nm, i.e. substantially larger than the object itself. The alignment criteria outlined in this work demonstrate that, while challenging, it is feasible to construct an interferometer capable of studying particles with masses of 100 kDa using state-of-the-art technologies. By collimating the molecular beam, particularly the beam width $b$ in the transverse direction, the alignment criteria can be met. A magnetron sputtering source can already provide sufficient flux to work with collimated beams and metal clusters up to 1 MDa. While increasing the degree of collimation will make the alignment criteria less stringent, it will also decrease the signal-to-noise ratio. In practice the collimation will be a trade-off between the flux and alignment feasibility.
Furthermore, this study highlights the importance of ongoing advancements in molecular beam source and detection techniques in order to achieve the high and stable molecular flux required to meet the more stringent alignment requirements necessary to study clusters with masses up to 1 MDa and beyond.
The reward will be new interferometric exclusions of non-linear alternatives to quantum mechanics, as well as a powerful tool that can serve material science with refined measurements of properties of isolated nanomaterials.
\begin{table}[h]
\centering
\begin{tabular}{c|c|c}
Degree of freedom & Restriction & Alignment requirement \\
\hline
Grating period & $N < \frac{d}{10\Delta d}$ & $N \leq 5 \times 10^6 d$ \\
Grating separation & $\frac{\Delta L}{L}< \frac{1}{N}$ & $\Delta L<133$ µm \\
Grating roll & $R_\vartheta<0.9$ & $\Delta\vartheta <0.02$ mrad \\
Grating yaw & $10\cdot\Delta\Phi <\frac{d}{4w_x}$ & $\Delta\Phi<$0.2 mrad \\
Grating pitch & $ \Delta \Theta\cdot w_y <\Delta L $ & $\Delta\Theta<100$ mrad \\
Wave front shape & $\frac{w(z)^2}{R(z)}\ll d$ & $z<2.7$ mm
\end{tabular}
\caption{Summarized alignment criteria for an all-optical Talbot-Lau inteferometer with a grating period of $d=133$ nm separated by $L=1$ m, a molecular beam height and width of $H=1$ mm and $b=1$ mm and a laser beam waist of $w_x=15$ µm and $w_y=1.5$ mm required to obeserve quantum interference of yttrium clusters with a mass of $m=100$ kDa and a forward velocity of $v_x=100$ m/s. }
\end{table}
\acknowledgments
We acknowledge funding by the Austrian Science Funds in FWF project No. P 32543-N (MUSCLE) as well as support by the Gordon and Betty Moore Foundation within the project No. 10771 (ELUQUINT).
\end{document} |
\begin{document}
\begin{abstract}
Let $G$ be a connected complex reductive algebraic group with Lie algebra $\mathfrak{g}$. The Lusztig--Vogan bijection relates two bases for the bounded derived category of $G$-equivariant coherent sheaves on the nilpotent cone $\mathcal{N}$ of $\mathfrak{g}$. One basis is indexed by $\Lambda^+$, the set of dominant weights of $G$, and the other by $\Omega$, the set of pairs $(\mathcal{O}, \mathcal{E})$ consisting of a nilpotent orbit $\mathcal{O} \subset \mathcal{N}$ and an irreducible $G$-equivariant vector bundle $\mathcal{E} \rightarrow \mathcal{O}$. The existence of the Lusztig--Vogan bijection $\gamma \colon \Omega \rightarrow \Lambda^+$ was proven by Bezrukavnikov, and an algorithm computing $\gamma$ in type $A$ was given by Achar. Herein we present a combinatorial description of $\gamma$ in type $A$ that subsumes and dramatically simplifies Achar's algorithm.
\end{abstract}
\title{Computing the Lusztig--Vogan Bijection}
\tableofcontents
\section*{Overview}
In 1989, Lusztig concluded his landmark four-part study of cells in affine Weyl groups \cite{Lusztig1, Lusztig2, Lusztig3, Lusztig4} with an almost offhand remark:
\begin{quote}
``\ldots we obtain a (conjectural) bijection between $X_{\text{dom}}$ and the set of pairs $(u, \rho)$, (up to $G$-conjugacy) with $u \in G$ unipotent and $\rho$ an irreducible representation of $Z_G(u)$.''
\end{quote}
By $X_{\text{dom}}$, Lusztig meant the set of dominant weights of a connected complex reductive algebraic group $G$. (We refer to this set as $\Lambda^+$.) We denote by $\Omega$ the set of pairs $(\mathcal{C}, V)$, where $\mathcal{C} \subset G$ is a unipotent conjugacy class and $V$ is an irreducible representation of the centralizer $Z_G(u)$ for $u \in \mathcal{C}$, which is uniquely determined by $\mathcal{C}$ up to inner isomorphism.
So elementary an assertion was Lusztig's claim of a bijection between $\Lambda^+$ and $\Omega$ that its emergence from so deep an opus was in retrospect an obvious indication that the close connection between the sets in question transcends the setting in which it was first glimpsed.
Indeed, Vogan's work on associated varieties \cite{Vogan} led him to the same supposition only two years later. Let $\mathfrak{g}$ denote the Lie algebra of $G$, and let $\mathcal{N}^*$ denote the nilpotent cone of the dual space $\mathfrak{g}^*$. Fixing a compact real form $K$ of $G$ with Lie algebra $\mathfrak{k}$, let $\mathfrak{C}$ be the category of finitely generated $(S(\mathfrak{g}/\mathfrak{k}), K)$-modules for which each prime ideal in the support corresponds under the Nullstellensatz to a subvariety of $(\mathfrak{g}/\mathfrak{k})^* \subset \mathfrak{g}^*$ contained in $\mathcal{N}^*$. In 1991, Vogan \cite{Vogan} showed that $\Omega$ --- in an alternate incarnation as the set of pairs $(\mathcal{O}, V)$, where $\mathcal{O} \subset \mathcal{N}^*$ is a coadjoint orbit and $V$ is an irreducible representation of the stabilizer $G_X$ for $X \in \mathcal{O}$ --- indexes a basis for the Grothendieck group $K_0(\mathfrak{C})$. That $\Lambda^+$ also indexes such a basis pointed to an uncharted bijection.
Further evidence for the existence of what has come to be known as the \textit{Lusztig--Vogan bijection} was uncovered by Ostrik \cite{Ostrik}, who was first to consider $\Omega$ and $\Lambda^+$ in the context in which the conjecture was ultimately confirmed by Bezrukavnikov \cite{Bezrukav} --- that of the equivariant $K$-theory of the nilpotent cone of $\mathfrak{g}$. Let $\mathcal{N}$ denote this nilpotent cone. Ostrik examined $(G \times \mathbb{C}^*)$-equivariant coherent sheaves on $\mathcal{N}$. Subsequently, Bezrukavnikov examined $G$-equivariant coherent sheaves on $\mathcal{N}$ and proved Lusztig and Vogan's claim.
Let $\mathfrak{D} := \textbf{D}^b(\operatorname{Coh}^G(\mathcal{N}))$ be the bounded derived category of $G$-equivariant coherent sheaves on $\mathcal{N}$. Bezrukavnikov \cite{Bezrukav} showed not only that $\Omega$ and $\Lambda^+$ both index bases for the Grothendieck group $K_0(\mathfrak{D})$, but also that there exists a bijection $\gamma \colon \Omega \rightarrow \Lambda^+$ uniquely characterized by the following property: For any total order $\leq$ on $\Lambda^+$ compatible with the root order, if $\leq$ is imposed on $\Omega$ via $\gamma^{-1}$, then the change-of-basis matrix is upper triangular.
In his proof, Bezrukavnikov did not construct $\gamma$. Instead, he exhibited a $t$-structure on $\mathfrak{D}$, the heart of which is a quasi-hereditary category with irreducible objects indexed by $\Omega$ and costandard objects indexed by $\Lambda^+$. This entailed the existence of $\gamma$, but left open the question of how $\gamma$ is computed.\footnote{In type A, the existence of the Lusztig--Vogan bijection also follows from Xi's work on the based ring of the affine Weyl group \cite{Xi}, in which he proved a more general conjecture of Lusztig \cite{Lusztig4}. }
In his 2001 doctoral thesis \cite{Achart}, Achar set $G := GL_n(\mathbb{C})$ and formulated algorithms to compute inverse maps $\Omega \rightarrow \Lambda^+$ and $\Lambda^+ \rightarrow \Omega$ that yield an upper triangular change of basis in $K_0(\mathfrak{C})$. Then, in a follow-up article \cite{Acharj}, he showed that his calculations carry over to $K_0(\mathfrak{D})$ and therefore that his bijection agrees with Bezrukavnikov's.
Achar's algorithm for $\gamma^{-1}$ is elegant and simple. Unfortunately, his algorithm for $\gamma$ is a series of nested while loops, set to terminate upon reaching a configuration satisfying a list of conditions. Progress is tracked by a six-part monovariant, which is whittled down as the algorithm runs. Achar \cite{Achart, Acharj} proved that his algorithm halts on every input after finitely many steps. But it does not directly describe the image of a given pair $(\mathcal{O}, V) \in \Omega$.
In this article, we present an algorithm that directly describes the terminal configuration returned by Achar's algorithm on an input in $\Omega$, bypassing all of Achar's while loops and obviating the need for an accompanying monovariant. The upshot is a combinatorial algorithm to compute $\gamma$ for $G = GL_n(\mathbb{C})$ that encompasses and expedites Achar's algorithm and holds the prospect of extension to other classical groups.\footnote{A conjectural algorithm, to compute $\gamma$ for \textit{even} nilpotent orbits in type $C$, is featured in Chapter 7 of the author's 2017 doctoral thesis \cite{Rush}. }
\eject
\section*{Index of Notation}
\begin{tabularx}{6.0in}{l X l}
$G$ & connected complex reductive algebraic group & \S 1.1 \\
$\mathfrak{g}$ & Lie algebra of $G$ & \S 1.1 \\
$\mathcal{N}$ & nilpotent cone of $\mathfrak{g}$ & \S 1.1 \\
$\mathfrak{D}$ & bounded derived category of $G$-equivariant coherent sheaves on $\mathcal{N}$ & \S 1.1 \\
$X$ & nilpotent element & \S 1.1 \\
$\mathcal{O}_X$ & nilpotent orbit of $X$ & \S 1.1 \\
$G_X$ & stabilizer of $X$ & \S 1.1 \\
$(\mathcal{O}_X, V)$ & pair consisting of nilpotent orbit $\mathcal{O}_X$ and irreducible $G_X$-representation $V$ & \S 1.1 \\
$IC_{(\mathcal{O}_X, V)}$ & intersection cohomology complex associated to $(\mathcal{O}_X, V)$ & \S 1.1 \\
$\Omega$ & equivalence classes of pairs $(\mathcal{O}_X, V)$ & \S 1.1 \\
$A_{\lambda}$ & complex associated to weight $\lambda$ via Springer resolution & \S 1.1 \\
$\Lambda$ & weight lattice of $G$ & \S 1.1 \\
$\Lambda^+$ & dominant weights of $G$ & \S 1.1 \\
$\gamma(\mathcal{O}_X, V)$ & Lusztig--Vogan bijection & \S 1.1 \\
$A^P_{\lambda}$ & complex associated to weight $\lambda$ via $T^*(G/P) \rightarrow \overline{\mathcal{O}}$ & \S 1.1 \\
$[\alpha_1, \ldots, \alpha_{\ell}]$ & partition associated to $X$ & \S 1.2 \\
$[k_1^{a_1}, \ldots, k_m^{a_m}]$ & distinct parts of $\alpha$ with multiplicities & \S 1.2 \\
$G_X^{\text{red}}$ & reductive part of $G_X$ & \S 1.2 \\
$[\alpha^*_1, \ldots, \alpha^*_s]$ & conjugate partition to $\alpha$ & \S 1.2 \\
$P_X$ & parabolic subgroup associated to $X$ & \S 1.2 \\
$L_X$ & Levi factor of $P_X$ & \S 1.2 \\
$L_X^{\text{ref}}$ & Levi subgroup of $L_X$ containing $G_X^{\text{red}}$ & \S 1.2 \\
$X_{\alpha}$ & representative element of $\mathcal{O}_X$ & \S 1.3 \\
$\mathcal{O}_{\alpha}$ & $\mathcal{O}_{X_{\alpha}}$ & \S 1.3 \\
$V^{\nu(t)}$ & irreducible $GL_{a_t}$-representation with highest weight $\nu(t)$ & \S 1.3 \\
$V^{(\nu(1), \ldots, \nu(m))}$ & $V^{\nu(1)} \boxtimes \cdots \boxtimes V^{\nu(m)}$ & \S 1.3 \\
$[\nu_1, \ldots, \nu_{\ell}]$ & integer sequence & \S 1.3 \\
$G_{\alpha}$ & $G_{X_{\alpha}}$ & \S 1.3 \\
$G_{\alpha}^{\text{red}}$ & $G_{X_{\alpha}}^{\text{red}}$ & \S 1.3 \\
$V^{(\alpha, \nu)}$ & $G_{\alpha}$-representation arising from $V^{(\nu(1), \ldots, \nu(m))}$ & \S 1.3 \\
$P_{\alpha}$ & $P_{X_{\alpha}}$ & \S 1.3 \\
$L_{\alpha}$ & $L_{X_{\alpha}}$ & \S 1.3 \\
$\Lambda^+_{\alpha}$ & dominant weights of $L_{\alpha}$ & \S 1.3 \\
$W^{\lambda^j}$ & irreducible $GL_{\alpha^*_j}$-representation with highest weight $\lambda^j$ & \S 1.3 \\
$W^{\lambda}$ & $W^{\lambda^1} \boxtimes \cdots \boxtimes W^{\lambda^s}$ & \S 1.3 \\
$A^{\alpha}_{\lambda}$ & $A^{P_{\alpha}}_{\lambda}$ & \S 1.3 \\
$W_{\alpha}$ & Weyl group of $L_{\alpha}$ & \S 1.3 \\
$\rho_{\alpha}$ & half-sum of positive roots of $L_{\alpha}$ & \S 1.3 \\
$W$ & Weyl group of $G$ & \S 1.3 \\
$\operatorname{dom}(\mu)$ & unique dominant weight of $G$ in $W$-orbit of $\mu$ & \S 1.3 \\
$\Omega_{\alpha}$ & dominant integer sequences with respect to $\alpha$ & \S 1.3 \\
$\Lambda^+_{\alpha, \nu}$ & dominant weights $\mu$ of $L_{\alpha}$ such that $V^{(\alpha, \nu)}$ occurs in decomposition of $W^{\mu}$ as direct sum of irreducible $G_{\alpha}^{\text{red}}$-representations & \S 1.3 \\
$\mathfrak{A}(\alpha, \nu)$ & integer-sequences version of algorithm & \S 1.5 \\
$\mathsf{A}(\alpha, \nu)$ & Achar's algorithm & \S 1.5 \\
$\mathcal{A}(\alpha, \nu)$ & weight-diagrams version of algorithm & \S 1.5 \\
$\operatorname{dom}(\iota)$ & rearrangement of entries of $\iota$ in weakly decreasing order & \S 2.1 \\
$\mathcal{C}_{-1}(\alpha, \nu, i, I_a, I_b)$ & candidate-ceiling function & \S 2.2 \\
$\mathcal{R}_{-1}(\alpha, \nu)$ & ranking-by-ceilings function & \S 2.2 \\
$\sigma$ & permutation & \S 2.2 \\
$\mathbb{Z}^{\ell}_{\text{dom}}$ & weakly decreasing integer sequences of length $\ell$ & \S 2.2 \\
$\mathcal{U}_{-1}(\alpha, \nu, \sigma)$ & column-ceilings function & \S 2.2 \\
$\mathfrak{A}_{\operatorname{iter}}(\alpha, \nu)$ & iterative integer-sequences version of algorithm & \S 2.2 \\
$D_{\alpha}$ & weight diagrams of shape-class $\alpha$ & \S 3 \\
$X$ & weight diagram & \S 3 \\
$X^j_i$ & $i^{\text{th}}$ entry from top in $j^{\text{th}}$ column of $X$ & \S 3 \\
$EX$ & map $D_{\alpha} \rightarrow D_{\alpha}$ & \S 3 \\
$(X, Y)$ & diagram pair & \S 3 \\
$\kappa(X)$ & map $D_{\alpha} \rightarrow \Omega_{\alpha}$ & \S 3 \\
$h(X)$ & map $D_{\alpha} \rightarrow \Lambda^+_{\alpha}$ & \S 3 \\
$\eta(Y)$ & map $D_{\alpha} \rightarrow \Lambda^+$ & \S 3 \\
$D_{\ell}$ & weight diagrams with $\ell$ rows & \S 4.1 \\
$X_{i,j}$ & entry of $X$ in $i^{\text{th}}$ row and $j^{\text{th}}$ column & \S 4.1 \\
$\mathcal{S}(\alpha, \sigma, \iota)(i)$ & row-survival function & \S 4.1 \\
$\mathcal{k}$ & number of branches & \S 4.1 \\
$\ell_x$ & number of rows surviving into $x^{\text{th}}$ branch & \S 4.1 \\
$\mathcal{C}_1(\alpha, \nu, i, I_a, I_b)$ & candidate-floor function & \S 4.2 \\
$\mathcal{R}_1(\alpha, \nu)$ & ranking-by-floors function & \S 4.2 \\
$\mathcal{U}_1(\alpha, \nu, \sigma)$ & column-floors function & \S 4.2 \\
$\alpha^*_j$ & $|\lbrace i: \alpha_i \geq j \rbrace|$ & \S 4.2 \\
$\#(X,i)$ & number of boxes in $i^{\text{th}}$ row of $X$ & \S 4.3 \\
$\Sigma(X,i)$ & sum of entries in $i^{\text{th}}$ row of $X$ & \S 4.3 \\
$\mathcal{P}(\alpha, \iota)(i)$ & row-partition function & \S 5 \\
$\operatorname{Cat}$ & diagram-concatenation function & \S 5 \\
$\mathcal{T}_j(X)$ & column-reduction function & \S 5 \\
\end{tabularx}
\eject
\section{Introduction}
\subsection{Sheaves on the nilpotent cone}
Let $G$ be a connected complex reductive algebraic group with Lie algebra $\mathfrak{g}$. An element $X \in \mathfrak{g}$ is \textit{nilpotent} if $X \in [\mathfrak{g}, \mathfrak{g}]$ and the endomorphism $\operatorname{ad} X \colon \mathfrak{g} \rightarrow \mathfrak{g}$ is nilpotent. The \textit{nilpotent cone} $\mathcal{N}$ comprises the nilpotent elements of $\mathfrak{g}$. Since $\mathcal{N}$ is a subvariety of $\mathfrak{g}$ (cf. Jantzen \cite{Jantzen}, section 6.2), we may consider the bounded derived category $\mathfrak{D} := \textbf{D}^b(\operatorname{Coh}^G(\mathcal{N}))$ of $G$-equivariant coherent sheaves on $\mathcal{N}$.
Let $X \in \mathfrak{g}$ be nilpotent, and write $\mathcal{O}_X \subset \mathcal{N}$ for the orbit of $X$ in $\mathfrak{g}$ under the adjoint action of $G$. We refer to $\mathcal{O}_X$ as the \textit{nilpotent orbit} of $X$.
Write $G_X$ for the stabilizer of $X$ in $G$. To an irreducible representation $V$ of $G_X$ corresponds the $G$-equivariant vector bundle \[E_{(\mathcal{O}_X, V)} := G \times_{G_X} V \rightarrow \mathcal{O}_X\] with projection given by $(g, v) \mapsto \operatorname{Ad}(g) (X)$. Its sheaf of sections $\mathcal{E}_{(\mathcal{O}_X, V)}$ is a $G$-equivariant coherent sheaf on $\mathcal{O}_X$. To arrive at an object in the derived category $\mathfrak{D}$, we build the complex $\mathcal{E}_{(\mathcal{O}_X, V)}[\frac{1}{2} \dim \mathcal{O}_X]$ consisting of $\mathcal{E}_{(\mathcal{O}_X, V)}$ concentrated in degree $-\frac{1}{2} \dim \mathcal{O}_X$. Then we set \[IC_{(\mathcal{O}_X, V)} := j_{!*}\left(\mathcal{E}_{(\mathcal{O}_X, V)}\left[\frac{1}{2} \dim \mathcal{O}_X\right]\right) \in \mathfrak{D},\] where $j_{!*}$ denotes the Goresky--Macpherson extension functor obtained from the inclusion $j \colon \mathcal{O}_X \rightarrow \mathcal{N}$ and Bezrukavnikov's $t$-structure on $\mathfrak{D}$.
Let ${\Omega}^{\text{pre}}$ be the set of pairs $\lbrace (\mathcal{O}_X, V) \rbrace_{X \in \mathcal{N}}$ consisting of a nilpotent orbit $\mathcal{O}_X$ and an irreducible representation $V$ of the stabilizer $G_X$. We assign an equivalence relation to ${\Omega}^{\text{pre}}$ by stipulating that $(\mathcal{O}_X, V) \sim (\mathcal{O}_Y, W)$ if there exists $g \in G$ and an isomorphism of vector spaces $\pi \colon V \rightarrow W$ such that $\operatorname{Ad}(g) X = Y$ and the group isomorphism $\operatorname{Ad}(g) \colon G_X \rightarrow G_Y$ manifests $\pi$ as an isomorphism of $G_X$-representations.
Note that $(\mathcal{O}_X, V) \sim (\mathcal{O}_Y, W)$ implies $\mathcal{O}_X = \mathcal{O}_Y$ and $E_{(\mathcal{O}_X, V)} \cong E_{(\mathcal{O}_Y, W)}$. Thus, the map associating the intersection cohomology complex $IC_{(\mathcal{O}_X, V)}$ in $\mathfrak{D}$ to the equivalence class of $(\mathcal{O}_X, V)$ in $\Omega^{\text{pre}}$ is well-defined. Set $\Omega := \Omega^{\text{pre}} / \sim$. Then $\Omega$ indexes the family of complexes $\lbrace IC_{(\mathcal{O}_X, V)} \rbrace_{(\mathcal{O}_X, V) \in \Omega}$. (The notation $(\mathcal{O}_X, V) \in \Omega$ is shorthand for the equivalence class represented by $(\mathcal{O}_X, V)$ belonging to $\Omega$.)
On the other hand, weights of $G$ also give rise to complexes in $\mathfrak{D}$. To see this, let $B$ be a Borel subgroup of $G$, and fix a maximal torus $T \subset B$. A weight $\lambda \in \operatorname{Hom}(T, \mathbb{C}^*)$ is a character of $T$, from which we obtain a one-dimensional representation $\mathbb{C}^{\lambda}$ of $B$ by stipulating that the unipotent radical of $B$ act trivially. Then \[L_{\lambda} := G \times_B \mathbb{C}^{\lambda} \rightarrow G/B \] is a $G$-equivariant line bundle on the flag variety $G/B$. Its sheaf of sections $\mathcal{L}_{\lambda}$ is a $G$-equivariant coherent sheaf on $G/B$ which may be pulled back to the cotangent bundle $T^*(G/B)$ along the projection $p \colon T^*(G/B) \rightarrow G/B$.
From the Springer resolution of singularities $\pi \colon T^*(G/B) \rightarrow \mathcal{N}$, we obtain the direct image functor $\pi_{*}$, and then the total derived functor $R\pi_{*}$. We set \[A_{\lambda} := R\pi_{*} p^{*} \mathcal{L}_{\lambda} \in \mathfrak{D}.\]
Let $\Lambda := \operatorname{Hom}(T, \mathbb{C}^*)$ be the weight lattice of $G$, and let $\Lambda^+ \subset \Lambda$ be the subset of dominant weights with respect to $B$. The family of complexes $\lbrace A_{\lambda} \rbrace_{\lambda \in \Lambda^+}$ is sufficient to generate the Grothendieck group $K_0(\mathfrak{D})$, so it is this family which we compare to $\lbrace IC_{(\mathcal{O}_X, V)} \rbrace_{(\mathcal{O}_X, V) \in \Omega}$. Entailed in the relationship is the Lusztig--Vogan bijection.
\begin{thm}[Bezrukavnikov \cite{Bezrukav}, Corollary 4] \label{bez}
The Grothendieck group $K_0(\mathfrak{D})$ is a free abelian group for which both the sets $\lbrace [IC_{(\mathcal{O}_X, V)}] \rbrace_{(\mathcal{O}_X, V) \in \Omega}$ and $\lbrace [A_{\lambda}] \rbrace_{\lambda \in \Lambda^+}$ form bases. There exists a unique bijection $\gamma \colon \Omega \rightarrow \Lambda^+$ such that \[\left[IC_{(\mathcal{O}_X, V)}\right] \in \operatorname{span} \lbrace [A_{\lambda}] : \lambda \leq \gamma(\mathcal{O}_X, V) \rbrace, \] where the partial order on the weights is the root order, viz., the transitive closure of the relations $\upsilon \lessdot \omega$ for all $\upsilon, \omega \in \Lambda$ such that $\omega - \upsilon$ is a positive root with respect to $B$.
Furthermore, the coefficient of $[A_{\gamma(\mathcal{O}_X, V)}]$ in the expansion of $[IC_{(\mathcal{O}_X, V)}]$ is $\pm 1$.
\end{thm}
The association of the complex $A_{\lambda}$ to the weight $\lambda$ evinces a more general construction of objects in $\mathfrak{D}$ that is instrumental in identifying the bijection $\gamma$. Let $P \supset B$ be a parabolic subgroup, and let $U_P$ be its unipotent radical. Denote the Lie algebra of $U_P$ by $\mathfrak{u}_P$. The unique nilpotent orbit $\mathcal{O}$ for which $\mathcal{O} \cap \mathfrak{u}_P$ is an open dense subset of $\mathfrak{u}_P$ is called the \textit{Richardson orbit} of $P$, and there exists a canonical map $\pi \colon T^*(G/P) \rightarrow \overline{\mathcal{O}}$ analogous to the Springer resolution.
Let $L$ be the Levi factor of $P$ that contains $T$. From a weight $\lambda \in \Lambda$ dominant with respect to the Borel subgroup $B_L := B \cap L$ of $L$, we obtain an irreducible $L$-representation $W^{\lambda}$ with highest weight $\lambda$, which we may regard as a $P$-representation by stipulating that $U_P$ act trivially. Then \[M_{\lambda} := G \times_P W^{\lambda} \rightarrow G/P\] is a $G$-equivariant vector bundle on $G/P$. Pulling back its sheaf of sections $\mathcal{M}_{\lambda}$ to the cotangent bundle $T^*(G/P)$ along the canonical projection $p \colon T^*(G/P) \rightarrow G/P$, and then pushing the result forward onto $\mathcal{N}$, we end up with the complex \[A^P_{\lambda} := R\pi_* p^*\mathcal{M}_{\lambda} \in \mathfrak{D}.\]
Note that the Richardson orbit of $B$ is the \textit{regular nilpotent orbit} $\mathcal{O}^{\text{reg}}$, uniquely characterized by the property $\overline{\mathcal{O}^{\text{reg}}} = \mathcal{N}$. The Levi factor of $B$ containing $T$ is $T$ itself. Thus, for all $\lambda \in \Lambda$, the complex $A^B_{\lambda}$ is defined and coincides with $A_{\lambda}$, meaning that the above construction specializes to that of $\lbrace A_{\lambda} \rbrace_{\lambda \in \Lambda}$, as we claimed.
\subsection{The nilpotent cone of $\mathfrak{gl}_n$}
Henceforward we set $G := GL_n(\mathbb{C})$. Then $\mathfrak{g} = \mathfrak{gl}_n(\mathbb{C})$. Let $X \in \mathfrak{g}$ be nilpotent. The existence of the Jordan canonical form implies the existence of positive integers $\alpha_1 \geq \cdots \geq \alpha_{\ell}$ summing to $n$ and vectors $v_1, \ldots, v_{\ell}$ such that \[\mathbb{C}^n = \operatorname{span} \lbrace X^j v_i : 1 \leq i \leq \ell, 0 \leq j \leq \alpha_i -1 \rbrace \] and $X^{\alpha_i} v_i = 0$ for all $i$ (cf. Jantzen \cite{Jantzen}, section 1.1).
Express the partition $\alpha := [\alpha_1, \ldots, \alpha_{\ell}]$ in the form $[k_1^{a_1}, \ldots, k_m^{a_m}]$, where $k_1 > \cdots > k_m$ are the distinct parts of $\alpha$ and $a_t$ is the multiplicity of $k_t$ for all $1 \leq t \leq m$. Let $V_t$ be the $a_t$-dimensional vector space spanned by the set $\lbrace v_i : \alpha_i = k_t \rbrace$. Define a map \[\varphi_X \colon GL(V_1) \times \cdots \times GL(V_m) \rightarrow G_X\] by $\varphi_X(g_1, \ldots, g_m)(X^j v_i) := X^j g_t v_i$ for $v_i \in V_t$.
Note that $\varphi_X$ is injective. Let $G_X^{\text{red}}$ be the image of $\varphi_X$, and let $R_X$ be the unipotent radical of $G_X$. From Jantzen \cite{Jantzen}, sections 3.8--3.10, we see that $G_X^{\text{red}}$ is reductive and $G_X = G_X^{\text{red}} R_X$. Since $R_X$ acts trivially in any irreducible $G_X$-representation, specifying an irreducible representation of $G_X$ is equivalent to specifying an irreducible representation of $G_X^{\text{red}}$, which means specifying an irreducible representation of $GL_{a_1, \ldots, a_m} := GL_{a_1} \times \cdots \times GL_{a_m}$.
Let $\alpha^* = [\alpha^*_1, \ldots, \alpha^*_s]$ be the conjugate partition to $\alpha$, where $s := \alpha_1$. For all $1 \leq j \leq s$, let $V(j)$ be the $\alpha^*_j$-dimensional vector space spanned by the set $\lbrace X^{\alpha_i - j} v_i : \alpha_i \geq j \rbrace$, and set $V^{(j)} := V(1) \oplus \cdots \oplus V(j)$.
Define subgroups $L_X \subset P_X \subset G$ by \[P_X := \lbrace g \in G : g\big(V^{(j)}\big) = V^{(j)} \text{ for all } 1\leq j \leq s \rbrace\] and \[L_X := \lbrace g \in G : g\big(V(j)\big) = V(j) \text{ for all } 1 \leq j \leq s \rbrace.\]
Since $P_X$ is the stabilizer of the partial flag \[\lbrace 0 \rbrace \subset V^{(1)} \subset \cdots \subset V^{(s)} = \mathbb{C}^n,\] it follows immediately that $P_X \subset G$ is a parabolic subgroup and $L_X \subset P_X$ is a Levi factor. Furthermore, the Richardson orbit of $P_X$ is none other than $\mathcal{O}_X$ (cf. Jantzen \cite{Jantzen}, section 4.9). For general $G$, this implies that the connected component of the identity in $G_X$ is contained in $P_X$. In our case $G = GL_n$, the conclusion is stronger: $G_X \subset P_X$, and $G_X^{\text{red}} \subset L_X$. (That we could find $P_X$ so that $\mathcal{O}_X$ is its Richardson orbit is also due to the assumption that $G$ is of type $A$.)
The claim $G_X \subset P_X$ follows from the observation that $V^{(j)}$ is the kernel of $X^j$ for all $1 \leq j \leq s$. To see $G_X^{\text{red}} \subset L_X$, we find a Levi subgroup of $L_X$ that contains $G_X^{\text{red}}$. Since $X^{k_t - j}V_t \subset V(j)$, the direct sum decomposition \[\mathbb{C}^n = \bigoplus_{t = 1}^m \bigoplus_{j = 1}^{k_t} X^{k_t - j} V_t\] is a refinement of the decomposition $\mathbb{C}^n = \bigoplus_{j=1}^s V(j)$. Set \[L_X^{\text{ref}} := \lbrace g \in G : g(X^{k_t-j} V_t) = X^{k_t-j} V_t \text{ for all } 1 \leq t \leq m, 1 \leq j \leq k_t \rbrace.\] Then $L_X^{\text{ref}} \subset L_X$, and the inclusion $G_X^{\text{red}} \subset L_X^{\text{ref}}$ follows directly from the definition of $\varphi_X$.
Let $\chi_X$ be the isomorphism \[L_X^{\text{ref}} \rightarrow \prod_{t = 1}^m \prod_{j=1}^{k_t} GL(X^{k_t -j} V_t)\] given by $g \mapsto \prod_{t=1}^m (g|_{X^{k_t-1} V_t}, \ldots, g|_{V_t})$, and let $\psi_X$ be the isomorphism \[L_X \rightarrow GL(V(1)) \times \cdots \times GL(V(s))\] given by $g \mapsto \left(g|_{V(1)}, \ldots, g|_{V(s)}\right)$.
From the analysis above, we may conclude that the composition \[\psi_X \varphi_X \colon GL_{a_1, \ldots, a_m} \rightarrow GL_{\alpha^*_1, \ldots, \alpha^*_s}\] factors as the composition \[\chi_X \varphi_X \colon GL_{a_1, \ldots, a_m} \rightarrow \prod_{t=1}^m (GL_{a_t})^{k_t}\] (which coincides with the product, over all $1 \leq t \leq m$, of the diagonal embeddings $GL_{a_t} \rightarrow (GL_{a_t})^{k_t}$), followed by the product, over all $1 \leq j \leq s$, of the inclusions $\prod_{t : k_t \geq j} GL_{a_t} \rightarrow GL_{\alpha^*_j}$. This description of $\psi_X \varphi_X$ allows us to detect the appearance of certain $[IC_{(\mathcal{O}_X, V)}]$ classes in the expansion on the $\Omega$-basis of a complex arising via the resolution $T^*(G/P_X) \rightarrow \overline{\mathcal{O}_X}$ (cf. Lemma~\ref{omega}).
\begin{exam} \label{colors}
\setcounter{MaxMatrixCols}{20}
Set $n := 11$. Then $G = GL_{11}$. Set
\[X :=
\begin{bmatrix}
0 & 1 & 0 & 0 & & & & & & & \\
0 & 0 & 1 & 0 & & & & & & & \\
0 & 0 & 0 & 1 & & & & & & & \\
0 & 0 & 0 & 0 & & & & & & & \\
& & & & 0 & 1 & 0 & & & & \\
& & & & 0 & 0 & 1 & & & & \\
& & & & 0 & 0 & 0 & & & & \\
& & & & & & & 0 & 1 & & \\
& & & & & & & 0 & 0 & & \\
& & & & & & & & & 0 & \\
& & & & & & & & & & 0
\end{bmatrix}.\]
The partition encoding the sizes of the Jordan blocks of $X$ is $\alpha = [4,3,2,1,1]$. The Young diagram of $\alpha$ is depicted in Figure~\ref{rowleng}. Each Jordan block of $X$ corresponds to a row of $\alpha$.
\begin{figure}
\caption{The Young diagram of $\alpha$, colored by rows}
\label{rowleng}
\end{figure}
We may express $\alpha$ in the form $[4^1,3^1,2^1,1^2]$, where $4>3>2>1$ are the distinct parts of $\alpha$. Then $G_X^{\text{red}}$ is the image under the isomorphism $\varphi_X$ of \[ GL_1 \times GL_1 \times GL_1 \times GL_2.\] Each factor of the preimage corresponds to a distinct part of $\alpha$ (cf. Figure~\ref{rowmult}).
\begin{figure}
\caption{The Young diagram of $\alpha$, partitioned by distinct parts}
\label{rowmult}
\end{figure}
The conjugate partition $\alpha^*$ is $[5,3,2,1]$. The isomorphism $\psi_X$ maps $L_X$ onto \[GL_5 \times GL_3 \times GL_2 \times GL_1.\] Each factor of the image corresponds to a column of $\alpha$ (cf. Figure~\ref{col}).
\begin{figure}
\caption{The Young diagram of $\alpha$, colored by columns}
\label{col}
\end{figure}
The group $L_X^{\text{ref}}$ lies inside $L_X$ and contains $G_X^{\text{red}}$. The isomorphism $\chi_X$ maps $L_X^{\text{ref}}$ isomorphically onto \[(GL_1)^4 \times (GL_1)^3 \times (GL_1)^2 \times (GL_2)^1.\] Each factor of the image corresponds to an ordered pair consisting of a distinct part of $\alpha$ \textit{and} a column of $\alpha$ (cf. Figure~\ref{both}).
\begin{figure}
\caption{The Young diagram of $\alpha$, partitioned by distinct parts and colored by columns}
\label{both}
\end{figure}
The composition \[\psi_X \varphi_X \colon GL_{1,1,1,2} \rightarrow GL_{5,3,2,1}\] factors as the product of diagonal embeddings \[\chi_X \varphi_X \colon GL_{1,1,1,2} \rightarrow (GL_1)^4 \times (GL_1)^3 \times (GL_1)^2 \times (GL_2)^1,\] followed by the product of the inclusions \[
GL_{1,1,1,2} \rightarrow GL_5, \quad
GL_{1,1,1} \rightarrow GL_3, \quad
GL_{1,1} \rightarrow GL_2, \quad \text{and} \quad
GL_1 \rightarrow GL_1.\]
\end{exam}
\subsection{Sheaves on the nilpotent cone of $\mathfrak{gl}_n$}
Let $e_1, \ldots, e_n$ be the standard basis for $\mathbb{C}^n$. From the nilpotent orbit $\mathcal{O}_X$, we choose the representative element $X_{\alpha} \in \mathfrak{g}$ given by \[e_i \mapsto 0\] for all $1 \leq i \leq \alpha^*_1$ and \[e_{\alpha^*_1 + \cdots + \alpha^*_{j-1} + i} \mapsto e_{\alpha^*_1 + \cdots + \alpha^*_{j-2} + i}\] for all $2 \leq j \leq s$, $1 \leq i \leq \alpha^*_j$.
\begin{exam}
Maintain the notation of Example~\ref{colors}. Then \[X_{\alpha} = \begin{bmatrix}
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & & \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & & \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & & \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & & \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & & \\
& & & & & 0 & 0 & 0 & 1 & 0 & \\
& & & & & 0 & 0 & 0 & 0 & 1 & \\
& & & & & 0 & 0 & 0 & 0 & 0 & \\
& & & & & & & & 0 & 0 & 1 \\
& & & & & & & & 0 & 0 & 0 \\
& & & & & & & & & & 0
\end{bmatrix}.\]
\end{exam}
To see that $X_{\alpha} \in \mathcal{O}_X$, let $g \in G$ be given by $X^{\alpha_i - j} v_i \mapsto e_{\alpha^*_1 + \cdots + \alpha^*_{j-1} + i}$, and observe that $X_{\alpha} = gXg^{-1}$. Thus, $\mathcal{N} = \bigcup_{\alpha \vdash n} \mathcal{O}_{X_{\alpha}}$. For $\alpha$ a partition of $n$, we write $\mathcal{O}_{\alpha}$ for the orbit $\mathcal{O}_{X_{\alpha}}$. The uniqueness of the Jordan canonical form implies that the orbits $\mathcal{O}_{\alpha}$ and $\mathcal{O}_{\beta}$ are disjoint for distinct partitions $\alpha$ and $\beta$, so $\lbrace \mathcal{O}_{\alpha} \rbrace_{\alpha \vdash n}$ constitutes the set of nilpotent orbits of $\mathfrak{g}$.
For each factor $GL_{a_t}$ of $GL_{a_1, \ldots, a_m}$, we identify the weight lattice with the character lattice $\mathbb{Z}^{a_t}$ of the maximal torus $(\mathbb{C}^*)^{a_t}$ of invertible diagonal matrices, and we assign the partial order induced by the Borel subgroup of invertible upper triangular matrices. Then the isomorphism classes of irreducible $GL_{a_1, \ldots, a_m}$-representations are indexed by $m$-tuples of integer sequences $(\nu(1), \ldots, \nu(m))$ such that $\nu(t)$ is a dominant weight of $GL_{a_t}$ for all $1 \leq t \leq m$. The $m$-tuple $(\nu(1), \ldots, \nu(m))$ corresponds to the representation \[V^{(\nu(1), \ldots, \nu(m))} := V^{\nu(1)} \boxtimes \cdots \boxtimes V^{\nu(m)},\] where $V^{\nu(t)}$ denotes the irreducible $GL_{a_t}$-representation with highest weight $\nu(t)$.
We say that an integer sequence $\nu = [\nu_1, \ldots, \nu_{\ell}]$ is \textit{dominant} with respect to $\alpha$ if $\alpha_i = \alpha_{i+1}$ implies $\nu_i \geq \nu_{i+1}$. Note that the dominance condition holds precisely when $\nu$ is the concatenation of an $m$-tuple of dominant weights $(\nu(1), \ldots, \nu(m))$. For such $\nu$, we denote by $V^{(\alpha, \nu)}$ the representation of $G_{\alpha} := G_{X_{\alpha}}$ (or of $G_{\alpha}^{\text{red}} := G_{X_{\alpha}}^{\text{red}}$, depending on context) arising from the representation $V^{(\nu(1), \ldots, \nu(m))}$ of $GL_{a_1, \ldots, a_m}$. Via the association \[(\alpha, \nu) \mapsto \big(\mathcal{O}_{\alpha}, V^{(\alpha, \nu)}\big),\] we construe $\Omega$ as consisting of pairs of integer sequences $(\alpha, \nu)$ such that $\alpha = [\alpha_1, \ldots, \alpha_{\ell}]$ is a partition of $n$ and $\nu = [\nu_1, \ldots, \nu_{\ell}]$ is dominant with respect to $\alpha$.
Let $B \subset G$ be the Borel subgroup of invertible upper triangular matrices, and let $T \subset B$ be the maximal torus of invertible diagonal matrices. The weight lattice $\Lambda = \operatorname{Hom}(T, \mathbb{C}^*) \cong \mathbb{Z}^n$ comprises length-$n$ integer sequences $\lambda = [\lambda_1, \ldots, \lambda_n]$. Those weights $\lambda \in \Lambda$ which are weakly decreasing are dominant with respect to $B$ and belong to $\Lambda^+$.
Set $P_{\alpha} := P_{X_{\alpha}}$ and $L_{\alpha} := L_{X_{\alpha}}$. Then \[P_{\alpha} = \left\lbrace g \in G : ge_{\alpha^*_1 + \cdots + \alpha^*_{j-1} + i} \in \operatorname{span} \lbrace e_1, \ldots, e_{\alpha^*_1 + \cdots + \alpha^*_j} \rbrace \right\rbrace \] and \[L_{\alpha} = \left\lbrace g \in G : ge_{\alpha^*_1 + \cdots + \alpha^*_{j-1} + i} \in \operatorname{span} \lbrace e_{\alpha^*_1 + \cdots + \alpha^*_{j-1} + 1}, \ldots, e_{\alpha^*_1 + \cdots + \alpha^*_j} \rbrace \right\rbrace. \]
We see immediately that $P_{\alpha} \supset B$ and $L_{\alpha} \supset T$. Thus, $\Lambda$ doubles as the weight lattice of $L_{\alpha}$. Given a weight $\lambda \in \Lambda$, let $\lambda^j$ be its restriction to the factor $GL_{\alpha^*_j}$ of $L_{\alpha} \cong GL_{\alpha^*_1, \ldots, \alpha^*_s}$. This realizes $\lambda$ as the concatenation of the $s$-tuple of integer sequences $(\lambda^1, \ldots, \lambda^s)$. If $\lambda^j$ is weakly decreasing for all $1 \leq j \leq s$, then $\lambda$ is dominant with respect to the Borel subgroup $B_{\alpha} := B_{L_{\alpha}}$, in which case $\lambda$ belongs to $\Lambda^+_{\alpha}$, the set of dominant weights of $L_{\alpha}$ with respect to $B_{\alpha}$. For $\lambda \in \Lambda^+_{\alpha}$, we denote by $W^{\lambda^j}$ the irreducible $GL_{\alpha^*_j}$-representation with highest weight $\lambda^j$, and we set \[W^{\lambda} := W^{\lambda^1} \boxtimes \cdots \boxtimes W^{\lambda^s},\] which indeed has highest weight $\lambda$.
We rely on the complexes $A^{\alpha}_{\lambda} := A^{P_{\alpha}}_{\lambda}$ associated to weights $\lambda \in \Lambda^+_{\alpha}$ to interpolate between the $\Omega$- and $\Lambda^+$-bases for $K_0(\mathfrak{D})$. Weights of $L_{\alpha}$ are also weights of $G$, so it is reasonable to expect that the expansion of $[A^{\alpha}_{\lambda}]$ on the $\Lambda^+$-basis be easy to compute. On the other hand, representations of $L_{\alpha}$ restrict to representations of $G_{\alpha}^{\text{red}}$, and it turns out that this relationship lifts to the corresponding objects in $\mathfrak{D}$. The following results of Achar \cite{Acharj} encapsulate these statements formally.
\begin{lem}[Achar \cite{Acharj}, Corollary 2.5] \label{omega}
Let $(\alpha, \nu) \in \Omega$, and let $\lambda \in \Lambda^+_{\alpha}$. Suppose that $V^{(\alpha, \nu)}$ occurs in the decomposition of the $L_{\alpha}$-representation $W^{\lambda}$ as a direct sum of irreducible $G_{\alpha}^{\emph{red}}$-representations. Then, when $[A^{\alpha}_{\lambda}]$ is expanded on the $\Omega$-basis for $K_0(\mathfrak{D})$, the coefficient of $[IC_{(\alpha, \nu)}]$ is nonzero.
\end{lem}
\begin{lem}[Achar \cite{Acharj}, Corollary 2.7] \label{lambda}
Let $W_{\alpha}$ be the Weyl group of $L_{\alpha}$, and let $\rho_{\alpha}$ be the half-sum of the positive roots of $L_{\alpha}$. For all $\lambda \in \Lambda^+_{\alpha}$, the following equality holds in $K_0(\mathfrak{D})$: \[[A^{\alpha}_{\lambda}] = \sum_{w \in W_{\alpha}} (-1)^w [A_{\lambda + \rho_{\alpha} - w \rho_{\alpha}}].\]
\end{lem}
Let $W$ be the Weyl group of $G$, and, for all $\mu \in \Lambda$, let $\operatorname{dom}(\mu) \in \Lambda^+$ be the unique dominant weight in the $W$-orbit of $\mu$. When $[A_{\mu}]$ is expanded on the $\Lambda^+$-basis for $K_0(\mathfrak{D})$, the coefficient of $[A_{\lambda}]$ is zero unless $\lambda \leq \operatorname{dom}(\mu)$ (cf. Achar \cite{Acharj}, Proposition 2.2). Thus, if $\mu \in \Lambda^+_{\alpha}$, it follows from Lemma~\ref{lambda} that $[A^{\alpha}_{\mu}] \in \lbrace \operatorname{span} [A_{\lambda}] : \lambda \leq \operatorname{dom}(\mu + 2 \rho_{\alpha}) \rbrace$.
Let $\Omega_{\alpha}$ be the set of all dominant integer sequences $\nu$ with respect to $\alpha$. Given $\nu \in \Omega_{\alpha}$, set \[\Lambda^+_{\alpha, \nu} := \left \lbrace \mu \in \Lambda^+_{\alpha} : \dim \operatorname{Hom}_{G_{\alpha}^{\text{red}}} \big(V^{(\alpha, \nu)}, W^{\mu}\big) > 0 \right \rbrace.\] On input $(\alpha, \nu)$, our algorithm finds a weight $\mu \in \Lambda^+_{\alpha, \nu}$ such that $||\mu + 2 \rho_{\alpha}||$ is minimal. As demonstrated by Achar \cite{Achart, Acharj}, this guarantees that $\gamma(\alpha, \nu) = \operatorname{dom}(\mu + 2 \rho_{\alpha})$.\footnote{This follows from Claim 2.3.1 in \cite{Achart}, except that $\gamma$ is defined differently. In \cite{Acharj}, Theorem 8.10, Achar shows that the bijection $\gamma$ constructed in \cite{Achart} coincides with the bijection in Theorem~\ref{bez}. }
The intuition behind this approach is straightforward. For all $\mu \in \Lambda^+_{\alpha, \nu}$, the expansion of $[A^{\alpha}_{\mu}]$ on the $\Omega$-basis takes the form \[\big[A^{\alpha}_{\mu}\big] = \dim \operatorname{Hom}_{G_{\alpha}^{\text{red}}} \big(V^{(\alpha, \nu)}, W^{\mu}\big) \big[IC_{(\alpha, \nu)}\big] + \sum_{\upsilon \in \Omega_{\alpha} : \upsilon \neq \nu} c_{\alpha, \upsilon} \big[IC_{(\alpha, \upsilon)}\big] + \sum_{(\beta, \xi) \in \Omega : \beta \vartriangleleft \alpha} c_{\beta, \xi} \big[IC_{(\beta, \xi)}\big],\] where $\trianglelefteq$ denotes the dominance order on partitions of $n$. On the other hand, the expansion of $[A^{\alpha}_{\mu}]$ on the $\Lambda^+$-basis takes the form \[\big[A^{\alpha}_{\mu}\big] = \pm \big[A_{\operatorname{dom}(\mu + 2 \rho_{\alpha})}\big] + \sum_{\lambda < \operatorname{dom}(\mu + 2 \rho_{\alpha})} c_{\lambda} \big[A_{\lambda} \big].\]
We compare the equations. There is a single maximal-weight term in the right-hand side of the second equation. It follows that there is a single maximal-weight term in the expansion of the right-hand side of the first equation on the $\Lambda^+$-basis. By Theorem~\ref{bez}, the maximal weight must be $\gamma(\alpha, \nu)$ or among the sets $\lbrace \gamma(\alpha, \upsilon) : \upsilon \neq \nu \rbrace$ and $\lbrace \gamma(\beta, \xi) : \beta \vartriangleleft \alpha \rbrace$. In the former case, we may conclude immediately that $\gamma(\alpha, \nu) = \operatorname{dom}(\mu + 2 \rho_{\alpha})$. It turns out that mandating the minimality of $||\mu + 2 \rho_{\alpha}||$ suffices to preclude the latter possibility.
\subsection{The Lusztig--Vogan bijection for $GL_2$}
Set $n := 2$. Then $G = GL_2$. The weight lattice $\Lambda$ comprises ordered pairs $[\lambda_1, \lambda_2] \in \mathbb{Z}^2$, and $\Lambda^+ = \lbrace [\lambda_1, \lambda_2] \in \mathbb{Z}^2 : \lambda_1 \geq \lambda_2 \rbrace$.
The variety $\mathcal{N} \subset \mathfrak{g}$ is the zero locus of the determinant polynomial. Each matrix of rank $1$ in $\mathfrak{g}$ is similar to $\begin{bmatrix} 0 & 1 \\ 0 & 0 \end{bmatrix}$, so $\mathcal{N}$ is the union of $\begin{bmatrix} 0 & 0 \\ 0 & 0\end{bmatrix}$ (the \textit{zero orbit}) and the $G$-orbit of $\begin{bmatrix} 0 & 1 \\ 0 & 0 \end{bmatrix}$ (the \textit{regular orbit}).
To the zero orbit corresponds the partition $[1,1]$. Note that $G_{[1,1]}^{\text{red}} = L_{[1,1]} = G$. Hence \[\Omega_{[1,1]} = \lbrace [\nu_1, \nu_2] \in \mathbb{Z}^2 : \nu_1 \geq \nu_2 \rbrace \quad \text{and} \quad \Lambda^+_{[1,1]} = \lbrace [\mu_1, \mu_2] \in \mathbb{Z}^2 : \mu_1 \geq \mu_2 \rbrace.\]
For all $[\mu_1, \mu_2] \in \Lambda^+_{[1,1]}$, the irreducible $L_{[1,1]}$-representation $W^{[\mu_1, \mu_2]}$ is isomorphic as a $G_{[1,1]}^{\text{red}}$-representation to $V^{([1,1], [\mu_1, \mu_2])}$. Thus, for all $[\nu_1, \nu_2] \in \Omega_{[1,1]}$, \[\Lambda^+_{[1,1], [\nu_1, \nu_2]} = \lbrace [\nu_1, \nu_2] \rbrace.\] Our algorithm sets $[\mu_1, \mu_2] := [\nu_1, \nu_2]$.
On the $\Omega$-basis, $[A^{[1,1]}_{[\mu_1, \mu_2]}]$ expands as \[ \left[A^{[1,1]}_{[\nu_1, \nu_2]}\right] = \left[IC_{([1,1], [\nu_1, \nu_2])}\right].\]
Since $W_{[1,1]} = W = \mathfrak{S}_2$ and $\rho_{[1,1]} = [\frac{1}{2}, - \frac{1}{2}]$, it follows that $[A^{[1,1]}_{[\mu_1, \mu_2]}]$ expands on the $\Lambda^+$-basis as \[\left[A^{[1,1]}_{[\nu_1, \nu_2]}\right] = - \left[A_{[\nu_1 + 1, \nu_2 - 1]}\right] + \left[A_{[\nu_1, \nu_2]}\right].\]
Hence $\gamma([1,1], [\nu_1, \nu_2]) = [\nu_1 + 1, \nu_2 - 1] = \operatorname{dom}([\mu_1, \mu_2] + 2 \rho_{[1,1]})$, which confirms that the output is correct.
We turn our attention to the regular orbit, to which corresponds the partition $[2]$. Recall that $G_{[2]}^{\text{red}} \cong GL_1$ and $L_{[2]} \cong GL_1 \times GL_1$. Hence \[\Omega_{[2]} = \lbrace [\nu_1] \in \mathbb{Z}^1 \rbrace \quad \text{and} \quad \Lambda^+_{[2]} = \lbrace [\mu_1, \mu_2] \in \mathbb{Z}^2 \rbrace.\]
Furthermore, the composition $\psi_{X_{[2]}} \varphi_{X_{[2]}}$ of the isomorphisms $\varphi_{X_{[2]}} \colon GL_1 \rightarrow G_{[2]}^{\text{red}}$ and $\psi_{X_{[2]}} \colon L_{[2]} \rightarrow GL_1 \times GL_1$ coincides with the diagonal embedding $GL_1 \rightarrow GL_1 \times GL_1$. For all $[\mu_1, \mu_2] \in \Lambda^+_{[2]}$, the irreducible $L_{[2]}$-representation $W^{[\mu_1, \mu_2]}$ is isomorphic as a $G_{[2]}^{\text{red}}$-representation to $V^{([2], [\mu_1 + \mu_2])}$.
Thus, for all $[\nu_1] \in \Omega_{[2]}$, \[\Lambda^+_{[2], [\nu_1]} = \lbrace [\mu_1, \mu_2] \in \Lambda^+_{[2]} : \mu_1 + \mu_2 = \nu_1 \rbrace.\] Our algorithm sets \[\left[\mu_1, \mu_2 \right] := \left[ \left \lceil \frac{\nu_1}{2} \right \rceil, \left \lfloor \frac{\nu_1}{2} \right \rfloor \right].\]
On the $\Omega$-basis, $[A^{[2]}_{[\mu_1, \mu_2]}]$ expands as \[ \left[A^{[2]}_{\left[ \left \lceil \frac{\nu_1}{2} \right \rceil, \left \lfloor \frac{\nu_1}{2} \right \rfloor \right]} \right] = \left[IC_{([2], [\nu_1])}\right] + \sum_{[\xi_1, \xi_2] \in \Omega_{[1,1]}} c_{[1,1], [\xi_1, \xi_2]}\left[IC_{([1,1], [\xi_1, \xi_2])}\right].\]
Since $W_{[2]}$ is trivial and $\rho_{[2]} = [0, 0]$, it follows that $[A^{[2]}_{[\mu_1, \mu_2]}]$ expands on the $\Lambda^+$-basis as \[\left[A^{[2]}_{\left[ \left \lceil \frac{\nu_1}{2} \right \rceil, \left \lfloor \frac{\nu_1}{2} \right \rfloor \right]} \right] = \left[A_{\left[ \left \lceil \frac{\nu_1}{2} \right \rceil, \left \lfloor \frac{\nu_1}{2} \right \rfloor \right]} \right].\]
From our analysis above, we know that $\gamma([1,1], [\xi_1, \xi_2]) = [\xi_1 + 1, \xi_2 - 1]$, so there cannot exist $[\xi_1, \xi_2] \in \Omega_{[1,1]}$ such that $\gamma([1,1], [\xi_1, \xi_2]) = [\lceil \frac{\nu_1}{2} \rceil, \lfloor \frac{\nu_1}{2} \rfloor]$.
Hence $\gamma([2], [\nu_1]) = [\lceil \frac{\nu_1}{2} \rceil, \lfloor \frac{\nu_1}{2} \rfloor] = \operatorname{dom}([\mu_1, \mu_2] + 2 \rho_{[2]})$.\footnote{It follows immediately from Theorem~\ref{bez} that $c_{[1,1],[\xi_1, \xi_2]} = 0$ for all $[\xi_1, \xi_2] \in \Omega_{[1,1]}$. }
\subsection{Outline}
The cynosure of this article is the \textit{integer-sequences version} of our algorithm, which admits as input a pair $(\alpha, \nu) \in \Omega$ and yields as output a weight $\mathfrak{A}(\alpha, \nu) \in \Lambda^+_{\alpha}$. The output, which consists of a weight of each factor of $L_{\alpha}$, is obtained recursively: The weight of the first factor $GL_{\alpha^*_1}$ is computed; then the input is adjusted accordingly, and the algorithm is called on the residual input to determine the weight of each of the remaining factors.
The algorithm design is guided by the objective of locating $\mathfrak{A}(\alpha, \nu)$ in $\Lambda^+_{\alpha, \nu}$ and keeping $||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ as small as possible. Our main theorem is the following.
\begin{thm} \label{main}
Let $(\alpha, \nu) \in \Omega$. Then $\gamma(\alpha, \nu) = \operatorname{dom}(\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha})$.
\end{thm}
We prove the main theorem by verifying that \[||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}|| = \min \lbrace ||\mu + 2 \rho_{\alpha}|| : \mu \in \Lambda^+_{\alpha, \nu} \rbrace.\] However, our approach is indirect and relies on a combinatorial apparatus introduced by Achar \cite{Achart, Acharj} --- \textit{weight diagrams}.
A weight diagram $X$ of shape-class $\alpha$ encodes several integer sequences, including a weight $h(X) \in \Lambda^+_{\alpha}$. On input $(\alpha, \nu)$, Achar's algorithm outputs a weight diagram $\mathsf{A}(\alpha, \nu)$ of shape-class $\alpha$ such that $h \mathsf{A}(\alpha, \nu) \in \Lambda^+_{\alpha, \nu}$ and $||h \mathsf{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is minimal (cf. \cite{Acharj}, Corollary 8.9). Achar's conclusion (cf. \cite{Acharj}, Theorem 8.10) is that Theorem~\ref{main} holds with $h \mathsf{A}(\alpha, \nu)$ in place of $\mathfrak{A}(\alpha, \nu)$.
The minimality of $||h \mathsf{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is basic to Achar's algorithm, which maintains a candidate output $X$ at each step, and performs only manipulations that do not increase $||hX + 2 \rho_{\alpha}||$. In contrast, $\mathfrak{A}(\alpha, \nu)$ is computed one entry at a time. The minimality of $||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is an emergent property, which we prove by comparison of our algorithm with Achar's.
Rather than attempt to connect $\mathfrak{A}$ to $\mathsf{A}$, we introduce a third algorithm $\mathcal{A}$, built with the same tools as $\mathfrak{A}$, but configured to output weight diagrams rather than integer sequences.\footnote{$\mathcal{A}$ actually outputs pairs of weight diagrams, so what we refer to in the introduction as $\mathcal{A}(\alpha, \nu)$ is denoted in the body by $p_1 \mathcal{A}(\alpha, \nu)$. } The relationship between this \textit{weight-diagrams version} and Achar's algorithm is impossible to miss: $\mathcal{A}(\alpha, \nu)$ always exactly matches $\mathsf{A}(\alpha, \nu)$. Hence $||h\mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is minimal.
While it is not the case that $\mathfrak{A}(\alpha, \nu)$ always coincides with $h\mathcal{A}(\alpha, \nu)$,\footnote{In the author's thesis \cite{Rush}, the integer-sequences version $\mathfrak{A}$ is defined so that $\mathfrak{A}(\alpha, \nu) = h \mathcal{A}(\alpha, \nu)$, but the proof that this equation holds is laborious and not altogether enlightening (cf. Chapter 5). Relaxing this requirement allows us to simplify the definition of $\mathfrak{A}$ and focus on proofs more pertinent to $\gamma$. } we show nonetheless that
\begin{equation} \label{minimality}
||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}|| = ||h \mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}||,
\end{equation}
which implies that \[\operatorname{dom}(\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}) = \operatorname{dom}(h \mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}),\] confirming that $\mathfrak{A}$ is a bona fide version of $\mathcal{A}$. The main theorem follows immediately.
In summary, the algorithm $\mathfrak{A}$ is a bee-line for computing $\gamma$, akin to an ansatz, which works because $\mathfrak{A}(\alpha, \nu) \in \Lambda^+_{\alpha, \nu}$ such that $||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is minimal. The minimality of $||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is a consequence of the minimality of $||h \mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}||$, and we deduce the latter by identifying $\mathcal{A}(\alpha, \nu)$ with $\mathsf{A}(\alpha, \nu)$.
The rest of this article is organized as follows. In section 2, we present the integer-sequences version of our algorithm, along with several example calculations.
In section 3, we define weight diagrams. A weight diagram of shape-class $\alpha$ encodes an element each of $\Omega_{\alpha}$, $\Lambda^+_{\alpha}$, and $\Lambda^+$, and we give a correct proof of Proposition 4.4 in Achar \cite{Acharj} regarding the relations between the corresponding objects in $\mathfrak{D}$.
In section 4, we present the weight-diagrams version of our algorithm and delineate its basic properties. Then we prove Equation~\ref{minimality} holds, assuming that $||h \mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}||$ is minimal.
In section 5, we state Achar's criteria for a weight diagram to be \textit{distinguished}, and we prove that $\mathcal{A}$ outputs a distinguished diagram on any input. As we explain, this implies that the diagrams $\mathcal{A}(\alpha, \nu)$ and $\mathsf{A}(\alpha, \nu)$ are identical for all $(\alpha, \nu) \in \Omega$.
Finally, in the appendix, we cite Achar's algorithm for $\gamma^{-1}$ as heuristic evidence that our algorithm for $\gamma$ is the conceptually correct counterpart. Achar's algorithm for $\gamma$ does not parallel his algorithm for $\gamma^{-1}$, but ours does.
\eject
\section{The Algorithm, Integer-Sequences Version}
\subsection{Overview}
Fix a partition $\alpha = [\alpha_1, \ldots, \alpha_{\ell}]$ with conjugate partition $\alpha^* = [\alpha^*_1, \ldots, \alpha^*_s]$. Given an integer sequence $\iota$ of any length, let $\operatorname{dom}(\iota)$ be the sequence obtained by rearranging the entries of $\iota$ in weakly decreasing order. (This is consistent with the notation of section 1.3, for $\operatorname{dom}(\iota) \in W \iota \cap \Lambda^+$ if $\iota \in \Lambda$.)
Let $\nu \in \Omega_{\alpha}$. On input $(\alpha, \nu)$, our algorithm outputs an integer sequence $\mu$ of length $n$ satisfying the following conditions:
\begin{enumerate}
\item $\mu$ is the concatenation of an $s$-tuple of weakly decreasing integer sequences $(\mu^1, \ldots, \mu^s)$ such that $\mu^j$ is of length $\alpha^*_j$ for all $1 \leq j \leq s$;
\item There exists a collection of integers $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$ such that \[\nu_i = \nu_{i, 1} + \cdots + \nu_{i, \alpha_i}\] for all $1 \leq i \leq \ell$ and $\mu^j = \operatorname{dom}([\nu_{1, j}, \ldots, \nu_{\alpha^*_j, j}])$ for all $1 \leq j \leq s$.
\end{enumerate}
Recall that the first condition indicates $\mu \in \Lambda^+_{\alpha}$. The second condition implies $\mu \in \Lambda^+_{\alpha, \nu}$ (cf. Corollary~\ref{decamp}).
Although we could construct a collection $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$ such that $\nu_i = \nu_{i,1} + \cdots + \nu_{i, \alpha_i}$ for all $i$ and obtain $\mu$ as a by-product (by setting $\mu^j := \operatorname{dom}([\nu_{1,j}, \ldots, \nu_{\alpha^*_j, j}])$ for all $j$), our algorithm instead computes each $\mu^j$ directly, alongside a permutation $\sigma^j \in \mathfrak{S}_{\alpha^*_j}$, so that $\nu_i = \mu^1_{\sigma^1(i)} + \cdots + \mu^{\alpha_i}_{\sigma^{\alpha_i}(i)}$ for all $i$. (Then a collection fit to $\mu$ is given by $\nu_{i,j} := \mu^j_{\sigma^j(i)}$.)
\begin{rem} \label{motiv}
Were we seeking to minimize $||\mu||$, it would suffice to choose, for all $i$, integers $\nu_{i, 1}, \ldots, \nu_{i, \alpha_i} \in \lbrace \lceil \frac{\nu_i}{\alpha_i} \rceil, \lfloor \frac{\nu_i}{\alpha_i} \rfloor \rbrace$ summing to $\nu_i$, and let the collection $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$ induce the output $\mu$.
However, our task is to minimize $||\mu + 2 \rho_{\alpha}||$, in which case we cannot confine each $\nu_{i,j}$ to the set $\lbrace \lceil \frac{\nu_i}{\alpha_i} \rceil, \lfloor \frac{\nu_i}{\alpha_i} \rfloor \rbrace$.\footnote{See section 2.4 for an example in which there exists $i,j$ such that $\nu_{i,j}$ must not belong to $\lbrace \lceil \frac{\nu_i}{\alpha_i} \rceil, \lfloor \frac{\nu_i}{\alpha_i} \rfloor \rbrace$. } Specifying the collection $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$ straightaway, and learning the (numerical) order of the entries in each sequence $[\nu_{1, j}, \ldots, \nu_{\alpha^*_j, j}]$ post hoc, risks needlessly inflating \[\sum_{j=1}^s \left|\left|\operatorname{dom}([\nu_{1,j}, \ldots, \nu_{\alpha^*_j, j}]) + 2 \left[\frac{\alpha^*_j - 1}{2}, \ldots, \frac{1 - \alpha^*_j}{2} \right]\right|\right|^2 = ||\mu + 2 \rho_{\alpha}||^2.\]
But how can we know what the order among the integers $\nu_{1,j}, \ldots, \nu_{\alpha^*_j, j}$ will be before their values are assigned? Our answer is simply to stipulate the order, and pick values pursuant thereto --- by deciding $\sigma^j$, then $\mu^j$, and setting $[\nu_{1, j}, \ldots, \nu_{\alpha^*_j, j}] := [\mu^j_{\sigma^j(1)}, \ldots, \mu^j_{\sigma^j(\alpha^*_j)}]$.
\end{rem}
The algorithm runs by recursion. Roughly: $\sigma^1$ is determined via a \textit{ranking} function, which compares \textit{candidate ceilings}, each measuring how the addition of $2 \rho_{\alpha}$ to $\mu$ might affect a subset of the collection $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$, subject to a hypothesis about $\sigma^1$. After $\sigma^1$ is settled, the corresponding candidate ceilings are tweaked (under the aegis of a \textit{column} function) to compute $\mu^1$. Then $\mu^1$ is ``subtracted off,'' and the algorithm is called on the residual input $\nu'$, defined by $\nu'_i := \nu_i - \mu^1_{\sigma^1(i)}$, returning $\mu^2, \ldots, \mu^s$.
\subsection{The algorithm}
Describing the algorithm explicitly requires us to introduce formally several preliminary functions.
\begin{df}
Given a pair of integer sequences $(\alpha, \nu) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell}$, an integer $i \in \lbrace 1, \ldots, \ell \rbrace$, and an ordered pair of disjoint sets $(I_a, I_b)$ satisfying $I_a \cup I_b = \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i \rbrace$, we define the \textit{candidate-ceiling} function $\mathcal{C}_{-1}$ as follows:
\[\mathcal{C}_{-1}(\alpha, \nu, i , I_a, I_b) := \left \lceil \frac{\nu_i - \sum_{j \in I_a} \min \lbrace \alpha_i, \alpha_j \rbrace + \sum_{j \in I_b} \min \lbrace \alpha_i, \alpha_j \rbrace}{\alpha_i} \right \rceil.\]
\end{df}
\begin{df}
The \textit{ranking-by-ceilings} algorithm $\mathcal{R}_{-1}$ computes a function $\mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \rightarrow \mathfrak{S}_{\ell}$ iteratively over $\ell$ steps.
Say $\mathcal{R}_{-1}(\alpha, \nu) = \sigma$. On the $i^{\text{th}}$ step of the algorithm, $\sigma^{-1}(1), \ldots, \sigma^{-1}(i-1)$ have already been determined. Set \[J_i := \lbrace \sigma^{-1}(1), \ldots, \sigma^{-1}(i-1) \rbrace \quad \text{and} \quad J'_i := \lbrace 1, \ldots, \ell \rbrace \setminus J_i.\] Then $\sigma^{-1}(i)$ is designated the numerically minimal $j \in J'_i$ among those for which \[(\mathcal{C}_{-1}(\alpha, \nu, j, J_i, J'_i \setminus \lbrace j \rbrace), \alpha_j, \nu_j)\] is lexicographically maximal.
\end{df}
\begin{df}
The \textit{column-ceilings} algorithm $\mathcal{U}_{-1}$ is iterative with $\ell$ steps and computes a function $\mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \mathfrak{S}_{\ell} \rightarrow \mathbb{Z}^{\ell}_{\text{dom}}$, where $\mathbb{Z}^{\ell}_{\text{dom}} \subset \mathbb{Z}^{\ell}$ denotes the subset of weakly decreasing sequences.
Say $\mathcal{U}_{-1}(\alpha, \nu, \sigma) = [\iota_1, \ldots, \iota_{\ell}]$. On the $i^{\text{th}}$ step of the algorithm, $\iota_1, \ldots, \iota_{i-1}$ have already been determined. Then \[\iota_i := \mathcal{C}_{-1}(\alpha, \nu, \sigma^{-1}(i), \sigma^{-1} \lbrace 1, \ldots, i-1 \rbrace, \sigma^{-1} \lbrace i+1, \ldots, \ell \rbrace) - \ell + 2i - 1\] unless the right-hand side is greater than $\iota_{i-1}$, in which case $\iota_i := \iota_{i-1}$.
\end{df}
We assemble these constituent functions into a recursive algorithm $\mathfrak{A}$ that computes a map $\mathbb{Y}_{n, \ell} \times \mathbb{Z}^{\ell} \rightarrow \mathbb{Z}^{n}$, where $\mathbb{Y}_{n, \ell}$ denotes the set of partitions of $n$ with $\ell$ parts.
On input $(\alpha, \nu)$, the algorithm sets \[\sigma^1 := \mathcal{R}(\alpha, \nu) \quad \text{and} \quad \mu^1 := \mathcal{U}(\alpha, \nu, \sigma^1).\]
If $\alpha_1 = 1$, it returns $\mu^1$.
Otherwise, it defines $(\alpha', \nu') \in \mathbb{Y}_{n-\ell, \alpha^*_2} \times \mathbb{Z}^{\alpha^*_2}$ by setting \[\alpha'_i := \alpha_i - 1 \quad \text{and} \quad \nu'_i := \nu_i - \mu^1_{\sigma^1(i)}\] for all $1 \leq i \leq \alpha^*_2$.
Then it prepends $\mu^1$ to $\mathfrak{A}(\alpha', \nu')$ and returns the result.
\begin{rem} \label{iter}
The use of recursion makes our instructions for computing $\mathfrak{A}(\alpha, \nu)$ succinct. At the cost of a bit of clarity, we can rephrase the instructions to use iteration, and thereby delineate every step in the computation.
Consider the algorithm $\mathfrak{A}_{\operatorname{iter}} \colon \mathbb{Y}_{n, \ell} \times \mathbb{Z}^{\ell} \rightarrow \mathbb{Z}^n$ defined as follows.
On input $(\alpha, \nu)$, it starts by setting $\alpha^1 := \alpha$, $\nu^1 := \nu$, $\sigma^1 := \mathcal{R}_{-1}(\alpha^1, \nu^1)$, and $\mu^1 := \mathcal{U}_{-1}(\alpha^1, \nu^1, \sigma^1)$.
Then, for $2 \leq j \leq s$:
\begin{itemize}
\item It defines $\alpha^j$ by $\alpha^j_i :=\alpha^{j-1}_i - 1$ for all $1 \leq i \leq \alpha^*_j$;
\item It defines $\nu^{j}$ by $\nu^{j}_i := \nu^{j-1}_i - \mu^{j-1}_{\sigma^{j-1}(i)}$ for all $1 \leq i \leq \alpha^*_{j}$;
\item It sets $\sigma^j := \mathcal{R}_{-1}(\alpha^j, \nu^j)$;
\item It sets $\mu^j := \mathcal{U}_{-1}(\alpha^j, \nu^j, \sigma^j)$.
\end{itemize}
Finally, it returns the concatenation of $(\mu^1, \ldots, \mu^s)$.
It should be clear that $\mathfrak{A}_{\operatorname{iter}}(\alpha, \nu)$ agrees with $\mathfrak{A}(\alpha, \nu)$. To see this, we induct on $s$. For the inductive step, it suffices to show that $\mathfrak{A}(\alpha', \nu')$ is the concatenation of $(\mu^2, \ldots, \mu^s)$. But $\mathfrak{A}(\alpha', \nu') = \mathfrak{A}_{\operatorname{iter}}(\alpha^2, \nu^2)$ by the inductive hypothesis.
\end{rem}
\subsection{Examples}
We study three examples. First, to illustrate the workings of the ranking function, we consider the orbit $\mathcal{O}_{[2,1]}$. Given $\nu \in \Omega_{[2,1]}$, the algorithm makes exactly one meaningful comparison --- to determine whether $\sigma^1$ is the trivial or nontrivial permutation in $\mathfrak{S}_2$.
Second, to underscore the advantages of our approach, we consider an input pair $(\alpha, \nu)$ for which there exists only one collection $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$ such that $\nu_{i,j} \in \lbrace \lceil \frac{\nu_i}{\alpha_i} \rceil, \lfloor \frac{\nu_i}{\alpha_i} \rfloor \rbrace$ for all $i,j$, and setting $\mu^j := \operatorname{dom}([\nu_{1,j}, \ldots, \nu_{\alpha^*_j,j}])$ for all $j$ yields an incorrect answer for $\gamma(\alpha, \nu)$. The input pair is $([3,2,2,1], [15,8,8,4])$.
Last, we revisit the orbit $\mathcal{O}_{[4,3,2,1,1]}$ featured in Example~\ref{colors} and compute $\mathfrak{A}$ on the input pair $([4,3,2,1,1], [15,14,9,4,4])$, taken from Achar's thesis \cite{Achart}. We also discuss the computation of $\mathfrak{A}_{\operatorname{iter}}$.
\begin{exam}
Set $\alpha := [2,1]$. Then $\alpha^* = [2,1]$. Reading $G_{\alpha}^{\text{red}}$ and $L_{\alpha}$ off the Young diagram of $\alpha$ (cf. Figure~\ref{rank}), we see that $G_{[2,1]}^{\text{red}} \cong GL_1 \times GL_1$ and $L_{[2,1]} \cong GL_2 \times GL_1$.
\begin{figure}
\caption{The Young diagram of $[2,1]$}
\label{rank}
\end{figure}
Note that \[\Omega_{[2,1]} = \lbrace [\nu_1, \nu_2] \in \mathbb{Z}^2 \rbrace \quad \text{and} \quad \Lambda^+_{[2,1]} = \lbrace [\lambda_1, \lambda_2, \lambda_3] \in \mathbb{Z}^3 : \lambda_1 \geq \lambda_2 \rbrace.\]
Let $\nu = [\nu_1, \nu_2] \in \Omega_{[2,1]}$. On input $(\alpha, \nu)$, the algorithm computes $\sigma^1 := \mathcal{R}_{-1}(\alpha, \nu)$. Since $\alpha_1 > \alpha_2$, the triple \[(\mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2 \rbrace), \alpha_1, \nu_1)\] is lexicographically greater than the triple \[(\mathcal{C}_{-1}(\alpha, \nu, 2, \varnothing, \lbrace 1 \rbrace), \alpha_2, \nu_2)\] if and only if
\begin{align} \label{pare}
\mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2 \rbrace) \geq \mathcal{C}_{-1}(\alpha, \nu, 2, \varnothing, \lbrace 1 \rbrace).
\end{align}
Therefore, $(\sigma^1)^{-1}(1) = 1$ if and only if Inequality~\ref{pare} holds. By construction of the ranking-by-ceilings algorithm, $(\sigma^1)^{-1}(2) \in \lbrace 1, 2 \rbrace \setminus \lbrace (\sigma^1)^{-1}(1) \rbrace$, so $\sigma^1$ is the identity in $\mathfrak{S}_2$ if Inequality~\ref{pare} holds, and transposes $1$ and $2$ otherwise.
Evaluating the candidate ceilings, we find:
\begin{align*}
& \mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2 \rbrace) = \mathcal{C}_{-1}([2,1], [\nu_1, \nu_2], 1, \varnothing, \lbrace 2 \rbrace) = \left \lceil \frac{\nu_1 + 1}{2} \right \rceil; \\ & \mathcal{C}_{-1}(\alpha, \nu, 2, \varnothing, \lbrace 1 \rbrace) = \mathcal{C}_{-1}([2,1], [\nu_1, \nu_2], 2, \varnothing, \lbrace 1 \rbrace) = \nu_2 + 1.
\end{align*}
Observe that \[\left \lceil \frac{\nu_1 + 1}{2} \right \rceil \geq \nu_2 + 1 \Longleftrightarrow \nu_1 \geq 2 \nu_2.\]
Hence \[\sigma^1 = \begin{cases}
12 & \nu_1 \geq 2 \nu_2 \\ 21 & \nu_1 \leq 2 \nu_2 - 1
\end{cases}.\]
We treat each case separately.
\begin{enumerate}
\item Suppose $\nu_1 \geq 2 \nu_2$.
The algorithm computes $\mu^1 := \mathcal{U}_{-1}(\alpha, \nu, \sigma^1)$. By definition, \[\mu^1_1 = \mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2 \rbrace) - 1 = \left \lceil \frac{\nu_1 - 1}{2} \right \rceil.\]
Since \[\mathcal{C}_{-1}(\alpha, \nu, 2, \lbrace 1 \rbrace, \varnothing) + 1 = \nu_2,\] and $\lceil \frac{\nu_1 - 1}{2} \rceil \geq \nu_2$, it follows that \[\mu^1_1 \geq \mathcal{C}_{-1}(\alpha, \nu, 2, \lbrace 1 \rbrace, \varnothing) + 1.\]
Hence \[\mu^1_2 = \mathcal{C}_{-1}(\alpha, \nu, 2, \lbrace 1 \rbrace, \varnothing) + 1 = \nu_2.\]
Then the algorithm sets $\alpha' := [1]$, and it defines $\nu'$ by \[\nu'_1 := \nu_1 - \mu^1_1 = \nu_1 - \left \lceil \frac{\nu_1 - 1}{2} \right \rceil = \left \lfloor \frac{\nu_1+1}{2} \right \rfloor.\]
Clearly, \[\mathfrak{A}(\alpha', \nu') = \mathcal{C}_{-1}(\alpha', \nu', 1, \varnothing, \varnothing) = \nu'_1 = \left \lfloor \frac{\nu_1+1}{2} \right \rfloor.\]
Hence \[\mathfrak{A}([2,1], [\nu_1, \nu_2]) = \left[\left \lceil \frac{\nu_1 - 1}{2} \right \rceil, \nu_2, \left \lfloor \frac{\nu_1 + 1}{2} \right \rfloor \right].\]
\item Suppose $\nu_1 \leq 2 \nu_2 - 1$.
The algorithm computes $\mu^1 := \mathcal{U}_{-1}(\alpha, \nu, \sigma^1)$. By definition, \[\mu^1_1 = \mathcal{C}_{-1}(\alpha, \nu, 2, \varnothing, \lbrace 1 \rbrace) - 1 = \nu_2.\]
Since \[\mathcal{C}_{-1}(\alpha, \nu, 1, \lbrace 2 \rbrace, \varnothing) + 1 = \left \lceil \frac{\nu_1 + 1}{2} \right \rceil\] and $\nu_2 \geq \lceil \frac{\nu_1 + 1}{2} \rceil$, it follows that \[\mu^1_1 \geq \mathcal{C}_{-1}(\alpha, \nu, 1, \lbrace 2 \rbrace, \varnothing) + 1.\]
Hence \[\mu^1_2 = \mathcal{C}_{-1}(\alpha, \nu, 1, \lbrace 2 \rbrace, \varnothing) + 1 = \left \lceil \frac{\nu_1+1}{2} \right \rceil.\]
Then the algorithm sets $\alpha' := [1]$, and it defines $\nu'$ by \[\nu'_1 := \nu_1 - \mu^1_2 = \nu_1 - \left \lceil \frac{\nu_1 + 1}{2} \right \rceil = \left \lfloor \frac{\nu_1 - 1}{2} \right \rfloor.\]
Clearly, \[\mathfrak{A}(\alpha', \nu') = \mathcal{C}_{-1}(\alpha', \nu', 1, \varnothing, \varnothing) = \nu'_1 = \left \lfloor \frac{\nu_1 - 1}{2} \right \rfloor.\]
Hence \[\mathfrak{A}([2,1], [\nu_1, \nu_2]) = \left[\nu_2, \left \lceil \frac{\nu_1 + 1}{2} \right \rceil, \left \lfloor \frac{\nu_1 - 1}{2} \right \rfloor \right].\]
\end{enumerate}
We conclude that \[\mathfrak{A}([2,1], [\nu_1, \nu_2]) = \begin{cases}
\left[\left \lceil \frac{\nu_1 - 1}{2} \right \rceil, \nu_2, \left \lfloor \frac{\nu_1 + 1}{2} \right \rfloor \right] & \nu_1 \geq 2 \nu_2 \\
\left[\nu_2, \left \lceil \frac{\nu_1 + 1}{2} \right \rceil, \left \lfloor \frac{\nu_1 - 1}{2} \right \rfloor \right] & \nu_1 \leq 2 \nu_2 - 1 \end{cases}.\]
Since $\rho_{[2,1]} = [\frac{1}{2}, -\frac{1}{2}, 0]$, assuming Theorem~\ref{main} holds, we find \[\gamma([2,1], [\nu_1, \nu_2]) = \begin{cases}
\left[\left \lceil \frac{\nu_1 + 1}{2} \right \rceil, \left \lfloor \frac{\nu_1 + 1}{2} \right \rfloor, \nu_2 - 1 \right] & \nu_1 \geq 2 \nu_2 \\
\left[\nu_2 + 1, \left \lceil \frac{\nu_1 - 1}{2} \right \rceil, \left \lfloor \frac{\nu_1 - 1}{2} \right \rfloor \right] & \nu_1 \leq 2 \nu_2 - 1 \end{cases}.\]
\end{exam}
\begin{exam}
Set $\alpha := [3,2,2,1]$. Then $\alpha^* = [4,3,1]$. Reading $G_{\alpha}^{\text{red}}$ and $L_{\alpha}$ off the diagram of $\alpha$ (cf. Figure~\ref{task}), we see that $G_{\alpha}^{\text{red}} \cong GL_1 \times GL_2 \times GL_1$ and $L_{\alpha} \cong GL_4 \times GL_3 \times GL_1$.
\begin{figure}
\caption{The Young diagram of $[4,3,1]$}
\label{task}
\end{figure}
Note that \[\Omega_{\alpha} = \lbrace \nu \in \mathbb{Z}^4 : \nu_2 \geq \nu_3 \rbrace\] and \[\Lambda^+_{\alpha} = \lbrace \lambda \in \mathbb{Z}^{8} : \lambda_1 \geq \lambda_2 \geq \lambda_3 \geq \lambda_4; \lambda_5 \geq \lambda_6 \geq \lambda_7 \rbrace.\]
Set $\nu := [15, 8, 8, 4] \in \Omega_{\alpha}$. On input $(\alpha, \nu)$, the algorithm computes \[\sigma^1 := \mathcal{R}_{-1}(\alpha, \nu) = 1234.\]
Next it computes \[\mu^1 := \mathcal{U}_{-1}(\alpha, \nu, \sigma^1) = [4, 4, 4, 4].\]
Then it sets \[\alpha' := [2, 1, 1] \quad \text{and} \quad \nu' := [11, 4, 4].\]
To finish off, it computes \[\mathfrak{A}(\alpha', \nu') = [5, 4, 4, 6].\]
Thus, \[\mathfrak{A}(\alpha, \nu) = [4, 4, 4, 4, 5, 4, 4, 6].\]
Since \[\rho_{\alpha} = \left[\frac{3}{2}, \frac{1}{2}, - \frac{1}{2}, - \frac{3}{2}, 1, 0, -1, 0 \right],\] assuming Theorem~\ref{main} holds, we find \[\gamma(\alpha, \nu) = [7,7,6,5,4,3,2,1].\]
Note that \[\frac{\nu_1}{\alpha_1} = 5 \quad \text{and} \quad \frac{\nu_2}{\alpha_2} = \frac{\nu_3}{\alpha_3} = \frac{\nu_4}{\alpha_4} = 4.\]
Therefore, if $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq 4 \\ 1 \leq j \leq \alpha_i}} \subset \mathbb{Z}$ is a collection such that $\nu_{i,j} \in \lbrace \lceil \frac{\nu_i}{\alpha_i} \rceil, \lfloor \frac{\nu_i}{\alpha_i} \rfloor \rbrace$ for all $i, j$, then \[\nu_{1,1} = \nu_{1,2} = \nu_{1,3} = 5 \quad \text{and} \quad \nu_{2,1} = \nu_{2,2} = \nu_{3,1} = \nu_{3,2} = \nu_{4,1} = 4.\]
Setting \[\mu^1 := [5, 4, 4, 4], \quad \mu^2 := [5, 4, 4], \quad \mu^3 := [5],\] we arrive at the induced weight $\mu = [5, 4, 4, 4, 5, 4, 4, 5]$, which has smaller norm than the output $\mathfrak{A}(\alpha, \nu) = [4, 4, 4, 4, 5, 4, 4, 6]$.
However, \[[5, 4, 4, 4, 5, 4, 4, 5] + 2 \rho_{\alpha} = [8, 5, 3, 1, 7, 4, 2, 5],\] which has larger norm than \[[4, 4, 4, 4, 5, 4, 4, 6] + 2 \rho_{\alpha} = [7, 5, 3, 1, 7, 4, 2, 6].\]
Thus, attempting to minimize $||\mathfrak{A}(\alpha, \nu)||$ leads to an incorrect answer for $\gamma(\alpha, \nu)$. It is essential to minimize $||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}||$, which is accomplished by our algorithm (cf. Remark~\ref{motiv}).
\end{exam}
\begin{exam} \label{acharexam}
Set $\alpha := [4,3,2,1,1]$. Then $\alpha^* = [5,3,2,1]$. Recall from Example~\ref{colors} that \[G_{\alpha}^{\text{red}} \cong GL_1 \times GL_1 \times GL_1 \times GL_2 \quad \text{and} \quad L_{\alpha} \cong GL_5 \times GL_3 \times GL_2 \times GL_1.\]
Note that \[\Omega_{\alpha} = \lbrace \nu \in \mathbb{Z}^5 : \nu_4 \geq \nu_5 \rbrace\] and \[\Lambda^+_{\alpha} = \lbrace \lambda \in \mathbb{Z}^{11} : \lambda_1 \geq \lambda_2 \geq \lambda_3 \geq \lambda_4 \geq \lambda_5; \lambda_6 \geq \lambda_7 \geq \lambda_8; \lambda_9 \geq \lambda_{10} \rbrace.\]
Set $\nu := [15, 14, 9, 4, 4] \in \Omega_{\alpha}$. On input $(\alpha, \nu)$, the algorithm computes \[\sigma^1 := \mathcal{R}_{-1}(\alpha, \nu) = 42135.\]
Next it computes \[\mu^1 := \mathcal{U}_{-1}(\alpha, \nu, \sigma^1) = [4, 4, 4, 4, 4].\]
Then it sets \[\alpha' := [3, 2, 1] \quad \text{and} \quad \nu' := [11, 10, 5].\]
To finish off, it computes \[\mathfrak{A}(\alpha', \nu') = [5, 5, 5, 5, 4, 2].\]
Thus, \[\mathfrak{A}(\alpha, \nu) = [4, 4, 4, 4, 4, 5, 5, 5, 5, 4, 2].\]
If we run $\mathfrak{A}_{\operatorname{iter}}$ on input $(\alpha, \nu)$, we obtain the following table.
\begin{center}
\begin{tabular}{ |l|l|l|l|l| }
\hline
$\alpha^1 = [4,3,2,1,1]$ & $\nu^1 = [15, 14, 9, 4, 4]$ & $\sigma^1 = 42135$ & $\mu^1 = [4, 4, 4, 4, 4]$ \\
$\alpha^2 = [3,2,1]$ & $\nu^2 = [11, 10, 5]$ & $\sigma^2 = 312$ & $\mu^2 = [5, 5, 5]$ \\
$\alpha^3 = [2,1]$ & $\nu^3 = [6, 5]$ & $\sigma^3 = 21$ & $\mu^3 = [5, 4]$ \\
$\alpha^4 = [1]$ & $\nu^4 = [2]$ & $\sigma^4 = 1$ & $\mu^4 = [2]$ \\
\hline
\end{tabular}
\end{center}
Hence \[\mathfrak{A}_{\operatorname{iter}}(\alpha, \nu) = [4, 4, 4, 4, 4, 5, 5, 5, 5, 4, 2] = \mathfrak{A}(\alpha, \nu).\]
Since \[\rho_{\alpha} = \left[2, 1, 0, -1, -2, 1, 0, -1, \frac{1}{2}, -\frac{1}{2}, 0 \right],\] assuming Theorem~\ref{main} holds, we find \[\gamma(\alpha, \nu) = [8, 7, 6, 6, 5, 4, 3, 3, 2, 2, 0].\]
This agrees with Achar's answer (cf. \cite{Achart}, Appendix A).
\end{exam}
\eject
\section{Weight Diagrams}
In this section, we define a class of combinatorial models, which Achar christened \textit{weight diagrams}. In form akin to Young tableaux, weight diagrams in function capture at the level of integer sequences the interactions in $K_0(\mathfrak{D})$ described in Lemmas~\ref{omega} and ~\ref{lambda}. A weight diagram of shape-class $\alpha \vdash n$ simultaneously depicts a dominant integer sequence $\kappa(X)$ with respect to $\alpha$ and a dominant weight $h(X)$ of $L_{\alpha}$. We establish herein that $[IC_{(\alpha, \kappa(X))}]$ occurs in the decomposition of $[A^{\alpha}_{h(X)}]$ on the $\Omega$-basis.
Let $\alpha = [\alpha_1, \ldots, \alpha_{\ell}]$ be a partition of $n$ with conjugate partition $\alpha^* = [\alpha^*_1, \ldots, \alpha^*_s]$. Let $k_1 > \cdots > k_m$ be the distinct parts of $\alpha$, and $a_t$ be the multiplicity of $k_t$ for all $1 \leq t \leq m$.
\begin{df} \label{blank}
A \textit{blank diagram} of \textit{shape-class} $\alpha$ is a collection of unit squares (referred to as boxes) arranged in $\ell$ left-justified rows, which differs from a Young diagram of shape $\alpha$ only by permutation of the rows.
\end{df}
\begin{df} \label{diagram}
A \textit{weight diagram} of \textit{shape-class} $\alpha$ is a filling of a blank diagram of shape-class $\alpha$ by integer entries, with one entry in each box.
\end{df}
Let $D_{\alpha}$ be the set of all weight diagrams of shape-class $\alpha$. For a weight diagram $X \in D_{\alpha}$, we denote by $X^j_i$ the $i^{\text{th}}$ entry from the top in the $j^{\text{th}}$ column from the left. We next define a combinatorial map $E \colon D_{\alpha} \rightarrow D_{\alpha}$.
\begin{df}
Let $X$ be a weight diagram of shape-class $\alpha$. Set $EX$ to be the filling of the same blank diagram as $X$ given by $EX^j_i := X^j_i + \alpha^*_j - 2i + 1$ for all $1 \leq j \leq s$, $1 \leq i \leq \alpha^*_j$.
\end{df}
For the sake of convenience, we consider weight diagrams in pairs for which the second diagram is obtained from the first via $E$. The weight-diagrams version of our algorithm better stores simultaneously the combinatorial information pertinent to the corresponding elements in $\Omega_{\alpha}$ and $\Lambda^+$ when formulated to build diagram pairs, rather than individual diagrams.
\begin{df}
Let $\overline{E} \colon D_{\alpha} \rightarrow D_{\alpha} \times D_{\alpha}$ denote the composition of the diagonal map $D_{\alpha} \rightarrow D_{\alpha} \times D_{\alpha}$ with the map $\operatorname{Id} \times E \colon D_{\alpha} \times D_{\alpha} \rightarrow D_{\alpha} \times D_{\alpha}$. A \textit{diagram pair} of \textit{shape-class} $\alpha$ is an ordered pair of diagrams $(X, Y)$ in $\overline{E}(D_{\alpha})$.
\end{df}
The nomenclature ``weight diagram'' is attributable to the natural maps $\kappa \colon D_{\alpha} \rightarrow \Omega_{\alpha}$, $h \colon D_{\alpha} \rightarrow \Lambda^+_{\alpha}$, and $\eta \colon D_{\alpha} \rightarrow \Lambda^+$, which we proceed to define.
\begin{df}
Let $X$ be a weight diagram of shape-class $\alpha$. For all $1 \leq t \leq m$, $1 \leq i \leq a_t$, $1 \leq j \leq k_t$, let $\kappa_X^j(t, i)$ be the entry of $X$ in the $j^{\text{th}}$ column and the $i^{\text{th}}$ row from the top among rows of length $k_t$. Then set \[\kappa_X(t) := \operatorname{dom} \left(\sum_{j=1}^{k_t} [\kappa_X^j(t, 1), \ldots, \kappa_X^j(t, a_t)] \right).\] Set $\kappa(X)$ to be the concatenation of the $m$-tuple $(\kappa_X(1), \ldots, \kappa_X(m))$.
\end{df}
\begin{df}
Let $X$ be a weight diagram of shape-class $\alpha$. For all $1 \leq j \leq s$, set $h_X^j := \operatorname{dom}([X^j_1, \ldots, X^j_{\alpha^*_j}])$. Then set $h(X)$ to be the concatenation of the $s$-tuple $(h_X^1, \ldots, h_X^s)$.
\end{df}
\begin{df}
Let $Y$ be a weight diagram of shape-class $\alpha$. Set $\eta(Y) := \operatorname{dom}(h(Y))$.
\end{df}
Suppose that the entries of $X$ are weakly decreasing down each column. Then $E$ lifts the addition of $2 \rho_{\alpha}$ to the underlying $L_{\alpha}$-weight of $X$; in other words, $h(EX) = h(X) + 2 \rho_{\alpha}$. Hence
\begin{align} \label{compat}
\eta(EX) = \operatorname{dom}(h(X) + 2 \rho_{\alpha}).
\end{align}
If $X$ is \textit{distinguished} (cf. Definition~\ref{dis}), then the pair $(\alpha, \kappa(X)) \in \Omega$ and the dominant weight $\eta(EX) \in \Lambda^+$ correspond under $\gamma$ (cf. Theorem~\ref{achar}), and both can be read off the diagram pair $(X, EX)$. The task of the weight-diagrams version of our algorithm is to find, on input $(\alpha, \nu)$, a distinguished diagram $X$ such that $\kappa(X) = \nu$, and output $(X, EX)$.
\begin{exam} \label{thes}
We present a diagram pair of shape-class $[4,3,2,1,1] \vdash 11$, taken from Achar's thesis \cite{Achart}.
\begin{figure}
\caption{A diagram pair of shape-class $[4,3,2,1,1]$}
\end{figure}
We see that $\kappa(X) = [15, 14, 9, 4, 4]$ and $h(X) = [4, 4, 4, 4, 4, 5, 5, 4, 5, 4, 3]$. Furthermore, $Y = EX$, and $\eta(Y) = [8, 7, 6, 6, 5, 4, 3, 3, 2, 2, 0]$. As noted in Example~\ref{acharexam}, \[\gamma([4,3,2,1,1], \kappa(X)) = \eta(Y).\]
\end{exam}
\begin{thm} \label{decomp}
Let $(X, Y) \in \overline{E}(D_{\alpha})$ be a diagram pair of shape-class $\alpha$. Then $V^{(\alpha, \kappa(X))}$ occurs in the decomposition of $W^{h(X)}$ as a direct sum of irreducible $G_{\alpha}^{\text{red}}$-representations. Furthermore, $[IC_{(\alpha, \kappa(X))}]$ occurs in the decomposition of $[A^{\alpha}_{h(X)}]$ on the $\Omega$-basis.
\end{thm}
\begin{proof}
It suffices to prove the former statement, for the latter follows from the former in view of Lemma~\ref{omega}. For all $1 \leq t \leq m$, $1 \leq j \leq k_t$, set \[\kappa_X^j(t) := \operatorname{dom}([\kappa_X^j(t, 1), \ldots, \kappa_X^j(t, a_t)]).\] For all $1 \leq j \leq s$, let $\kappa_X^j$ be the concatenation of $\prod_{t : k_t \geq j} \kappa_X^j(t)$. Finally, set $\kappa_X^{\text{ref}}$ to be concatenation of $(\kappa_X^1, \ldots, \kappa_X^s)$.
Observe first that $\kappa_X^{\text{ref}}$ is a dominant weight of $L_{\alpha}^{\text{ref}} := L_{X_{\alpha}}^{\text{ref}}$ with respect to the Borel subgroup $B_{\alpha}^{\text{ref}} := B_{L_{\alpha}^{\text{ref}}}$. To see this, note that $\kappa_X^j(t)$ is weakly decreasing for all $1 \leq t \leq m$, $1 \leq j \leq k_t$, and $L_{\alpha}^{\text{ref}}$ is included in $L_{\alpha}$ via the product, over all $1 \leq j \leq s$, of the inclusions $\prod_{t : k_t \geq j} GL_{a_t} \rightarrow GL_{\alpha^*_j}$ (cf. section 1.2).
Since $\kappa_X^j$ is a permutation of $h_X^j$ for all $1 \leq j \leq s$, it follows that $\kappa_X^{\text{ref}}$ belongs to the $W_{\alpha}$-orbit of $h(X)$, so $\kappa_X^{\text{ref}}$ is a weight of the $L_{\alpha}$-representation $W^{h(X)}$. Let $w \in W_{\alpha}$ be chosen so that $w(\kappa_X^{\text{ref}}) = h(X)$. We claim that $\kappa_X^{\text{ref}}$ is a highest weight of the restriction of $W^{h(X)}$ to $L_{\alpha}^{\text{ref}}$.
Let $\Phi_{\alpha} \subset \Lambda$ be the set of roots of $L_{\alpha}$, and let $\Phi_{\alpha}^{\text{ref}} \subset \Phi_{\alpha}$ be the subset of roots of $L_{\alpha}^{\text{ref}}$. Assume for the sake of contradiction that there exists a root $\beta \in \Phi_{\alpha}^{\text{ref}}$, positive with respect to $B_{\alpha}^{\text{ref}}$, such that $\kappa_X^{\text{ref}} + \beta$ is a weight of $W^{h(X)}$. Let $\beta^{\vee}$ denote the coroot corresponding to $\beta$. Then $\langle \kappa_X^{\text{ref}}, \beta^{\vee} \rangle \geq 0$, which implies $\langle h(X), \beta_1^{\vee} \rangle \geq 0$, where $\beta_1 := w(\beta)$.
However, $w(\kappa_X^{\text{ref}} + \beta) = h(X) + \beta_1$ is a weight of $W^{h(X)}$, so $\beta_1$ must be negative with respect to $B_{\alpha}$. Since $h(X)$ is dominant with respect to $B_{\alpha}$, it follows that $\langle h(X), \beta_1^{\vee} \rangle \leq 0$.
We conclude that $\langle h(X), \beta_1^{\vee} \rangle = 0$. Let $s_{\beta_1} \in W_{\alpha}$ be the reflection corresponding to $\beta_1$. Then $s_{\beta_1}(h(X)) = h(X)$. Hence $s_{\beta_1}(h(X) + \beta_1) = h(X) - \beta_1$ is a weight of $W^{h(X)}$ that exceeds $h(X)$ in the root order. (Contradiction.)
Let $V$ be the $(GL_{a_1})^{k_1} \times \cdots \times (GL_{a_m})^{k_m}$-representation given by \[ V := \left(V^{\kappa_X^1(1)} \boxtimes \cdots \boxtimes V^{\kappa_X^{k_1}(1)}\right) \boxtimes \cdots \boxtimes \left(V^{\kappa_X^1(m)} \boxtimes \cdots \boxtimes V^{\kappa_X^{k_m}(m)}\right).\]
What we have just shown implies that $V$ occurs in the decomposition of $W^{h(X)}$ as a direct sum of irreducible $L_{\alpha}^{\text{ref}}$-representations. Recall from section 1.2 that $G_{\alpha}^{\text{red}}$ is embedded in $L_{\alpha}^{\text{ref}}$ via the product, over all $1 \leq t \leq m$, of the diagonal embeddings $GL_{a_t} \rightarrow (GL_{a_t})^{k_t}$. It follows that the restriction of $V$ to $G_{\alpha}^{\text{red}} \cong GL_{a_1, \ldots, a_m}$ is \[\left(V^{\kappa_X^1(1)} \otimes \cdots \otimes V^{\kappa_X^{k_1}(1)}\right) \boxtimes \cdots \boxtimes \left(V^{\kappa_X^1(m)} \otimes \cdots \otimes V^{\kappa_X^{k_m}(m)}\right).\]
Therefore, to see that \[\dim \operatorname{Hom}_{G_{\alpha}^{\text{red}}} \left(V^{(\alpha, \kappa(X))}, V \right) > 0,\] it suffices to show that \[\dim \operatorname{Hom}_{GL_{a_t}}\left(V^{\kappa_X(t)}, V^{\kappa_X^1(t)} \otimes \cdots \otimes V^{\kappa_X^{k_t}(t)}\right) > 0\] for all $1 \leq t \leq m$.
This is a consequence of the Parthasarathy--Ranga Rao--Varadarajan conjecture, first proved for complex semisimple algebraic groups (via sheaf cohomology) by Kumar \cite{Kumar} in 1988. For complex general linear groups, a combinatorial proof via honeycombs is given in Knutson--Tao \cite{Knutson}, section 4.
\end{proof}
\begin{rem}
In Achar's work, the corresponding claim is Proposition 4.4 in \cite{Acharj}. Unfortunately, Achar's proof is incorrect: He implicitly assumes that the combinatorial map $\kappa \colon D_{\alpha} \rightarrow \Omega_{\alpha}$ lifts the action of a representation-theoretic map $\Lambda^+_{\alpha} \rightarrow \Omega_{\alpha}$, which he also denotes by $\kappa$, so that $\kappa(X) = \kappa(h(X))$. This is manifestly untrue, for permuting the entries within a column of $X$ affects $\kappa(X)$ but leaves $h(X)$ unchanged.
Thus, Achar's assertion:
\begin{quote} ``\ldots the $G^{\alpha}$-submodule generated by the $\mu$-weight space of $V^L_{\mu}$ is a representation whose highest weight is the restriction of $\mu$, \textit{which is exactly what $E$ is}'' [emphasis added]
\end{quote}
is false unless $\kappa_X^{\text{ref}}$ coincides with $h(X)$ and $\kappa_X(t) = \sum_{j=1}^{k_t} \kappa_X^j(t)$ for all $1 \leq t \leq m$ --- in which case the $L_{\alpha}^{\text{ref}}$-subrepresentation of $W^{h(X)}$ generated by the highest weight space is isomorphic to $V$, and the highest weight of its restriction to $G_{\alpha}^{\text{red}}$ is $\kappa(X)$.
\end{rem}
\begin{exam}
Set $\alpha := [2,2]$. Note that $G_{[2,2]}^{\text{red}} \cong GL_{2}$ and $L_{[2,2]}^{\text{ref}} = L_{[2,2]} \cong (GL_{2})^2$. Furthermore, $G_{[2,2]}^{\text{red}}$ is embedded in $L_{[2,2]}$ via the diagonal embedding $GL_2 \rightarrow (GL_2)^2$.
Let $X_1$ and $X_2$ be the weight diagrams $\begin{smallmatrix} 1 & 1 \\ 0 & 0 \end{smallmatrix}$ and $\begin{smallmatrix} 1 & 0 \\ 0 & 1 \end{smallmatrix}$, respectively. Then \[\kappa(X_1) = [2,0], \quad \kappa(X_2) = [1,1], \quad \text{and} \quad h(X_1) = h(X_2) = [1,0,1,0].\]
The restriction of the $L_{[2,2]}$-representation \[W^{[1,0,1,0]} = W^{[1,0]} \boxtimes W^{[1,0]}\] to $G_{[2,2]}^{\text{red}}$ is \[W^{[1,0]} \otimes W^{[1,0]} \cong W^{[2,0]} \oplus W^{[1,1]}.\]
Hence Theorem~\ref{decomp} holds for $X_1$ and $X_2$.
However, Achar's proof is valid for $X_1$ only. To see this, let $v$ and $w$ be weight vectors of $W^{[1,0]}$ of weight $[1,0]$ and $[0,1]$, respectively. Up to scaling, \[\lbrace v \otimes v, v \otimes w, w \otimes v, w \otimes w \rbrace\] is the unique basis of weight vectors for $W^{[1,0]} \boxtimes W^{[1,0]}$. Whereas $v \otimes v$ and $w \otimes w$ each generates a $GL_2$-subrepresentation isomorphic to $W^{[2,0]}$, both $v \otimes w$ and $w \otimes v$ are cyclic vectors. No weight space of $W^{[1,0]} \boxtimes W^{[1,0]}$ generates a $GL_2$-subrepresentation isomorphic to $W^{[1,1]}$ (instead, $W^{[1,1]}$ is generated by $v \otimes w - w \otimes v$).
\end{exam}
\begin{cor} \label{decamp}
Let $\nu \in \Omega_{\alpha}$, and let $\lbrace \nu_{i,j} \rbrace_{\substack{1 \leq i \leq \ell \\ 1 \leq j \leq \alpha_i}}$ be a collection of integers such that \[\nu_i = \nu_{i, 1} + \cdots + \nu_{i, \alpha_i}\] for all $1 \leq i \leq \ell$. For all $1 \leq j \leq s$, set $\mu^j := \operatorname{dom}([\nu_{1, j}, \ldots, \nu_{\alpha^*_j, j}])$. Set $\mu$ to be the concatenation of $(\mu^1, \ldots, \mu^s)$. Then $\mu \in \Lambda^+_{\alpha, \nu}$.
\end{cor}
\begin{proof}
Let $X$ be the filling of the Young diagram of shape $\alpha$ for which $\nu_{i,j}$ is the entry in the $i^{\text{th}}$ row and $j^{\text{th}}$ column of $X$ for all $i, j$. Then $\kappa(X) = \nu$, and $h(X) = \mu$. Hence the result follows from Theorem~\ref{decomp}.
\end{proof}
\begin{cor} \label{inside}
Let $\nu \in \Omega_{\alpha}$. Then $\mathfrak{A}(\alpha, \nu) \in \Lambda^+_{\alpha, \nu}$.
\end{cor}
\begin{proof}
By Remark~\ref{iter}, it suffices to show that $\mathfrak{A}_{\operatorname{iter}}(\alpha, \nu) \in \Lambda^+_{\alpha, \nu}$. For all $1 \leq i \leq \ell$ and $1 \leq j \leq \alpha_i$, set $\nu_{i,j} := \mu^j_{\sigma^{j}(i)}$. Then Corollary~\ref{decamp} implies the result.
\end{proof}
\eject
\section{The Algorithm, Weight-Diagrams Version}
\subsection{Overview}
In this section, we reengineer our algorithm from section 2.2 to output diagram pairs rather than weights. Let $D_{\ell}$ be the set of weight diagrams, of any shape-class, with $\ell$ rows. For a diagram $X \in D_{\ell}$, we denote by $X_{i,j}$ the entry of $X$ in the $i^{\text{th}}$ row and the $j^{\text{th}}$ column.
We define a recursive algorithm $\mathcal{A}$ that computes a map \[\mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \lbrace \pm 1 \rbrace \rightarrow D_{\ell} \times D_{\ell}\] by determining the entries in the first column of each diagram of its output and using recursion to ascertain the entries in the remaining columns. Whenever we write $\mathcal{A}(\alpha, \nu)$, we refer to $\mathcal{A}(\alpha, \nu, -1)$.
Let maps $p_1, p_2 \colon D_{\ell} \times D_{\ell} \rightarrow D_{\ell}$ be given by projection onto the first and second factors, respectively. We refer to $p_1 \mathcal{A}(\alpha, \nu)$ as the \textit{left} diagram and to $p_2 \mathcal{A}(\alpha, \nu)$ as the \textit{right} diagram. The algorithm $\mathcal{A}$ computes the Lusztig--Vogan bijection via $\gamma(\alpha, \nu) = \eta p_2 \mathcal{A}(\alpha, \nu)$.
While $\mathcal{A}$ relies on the same functions as $\mathfrak{A}$ for its computations, it also requires companion versions of these functions that use floors rather than ceilings. The \textit{candidate-floor} function $\mathcal{C}_1$, and the \textit{ranking-by-floors} and \textit{column-floors} algorithms $\mathcal{R}_1$ and $\mathcal{U}_1$, are analogous to the function $\mathcal{C}_{-1}$, and the algorithms $\mathcal{R}_{-1}$ and $\mathcal{U}_{-1}$, respectively, and we define them formally in section 4.2.
More substantively, the recursive structure of $\mathcal{A}$ differs from that of $\mathfrak{A}$. The integer-sequences version is singly recursive: On input $(\alpha, \nu)$, it reduces the task of determining the output to one sub-problem, namely, computing $\mathfrak{A}(\alpha', \nu')$. In contrast, the weight-diagrams version is multiply recursive, and, depending on the input, it may require the solutions to several sub-problems to be assembled in order to return the output.
After computing the first column of each output diagram, the weight-diagrams version creates a separate branch for each \textit{distinct} entry in the first column of the left diagram. Then it attaches each branch's output diagrams to the first columns already computed to build the output diagrams of the whole recursion tree. The attachment process is trivial; preparing each branch for its recursive call is not.\footnote{Thus, $\mathcal{A}$ deviates from the pattern of most prototypical divide-and-conquer algorithms, such as mergesort, for which dividing the residual input into branches is easier than combining the resulting outputs. }
On input $(\alpha, \nu, \epsilon)$, the algorithm $\mathcal{A}$ undertakes the following steps to compute $p_1 \mathcal{A}(\alpha, \nu, \epsilon)$ (the diagram $p_2 \mathcal{A}(\alpha, \nu, \epsilon)$ is computed simultaneously and similarly):
\begin{enumerate}
\item It computes $\sigma := \mathcal{R}_{\epsilon}(\alpha, \nu)$, which it construes as permuting the rows of a blank diagram of shape $\alpha$;\footnote{By a diagram of shape $\alpha$, we mean a diagram for which the $i^{\text{th}}$ row contains $\alpha_i$ boxes for all $1 \leq i \leq \ell$. }
\item It fills in the first column of the (permuted) diagram with the entries of $\iota := \mathcal{U}_{\epsilon}(\alpha, \nu, \sigma)$;
\item For each row, it appeals to the \textit{row-survival function} to query whether the row \textit{survives} into the residual input (viz., is of length greater than $1$), and, if so, determine which branch of the residual input it is sorted into (and its position therein);
\item For all $x$, it records the surviving rows in the $x^{\text{th}}$ branch in $\alpha^{(x)}$, and subtracts off the corresponding entries in $\iota$ from those in $\nu$ to obtain $\nu^{(x)}$;
\item For all $x$, it adjusts $\nu^{(x)}$ to $\hat{\nu}^{(x)}$ to reflect the data from the other branches;
\item For all $x$, it sets $X^{(x)} := p_1 \mathcal{A}(\alpha^{(x)}, \hat{\nu}^{(x)}, -\epsilon)$ and attaches $X^{(x)}$ to the first column.
\end{enumerate}
After the rows of a blank diagram of shape $\alpha$ have been permuted according to $\sigma \in \mathfrak{S}_{\ell}$, the $i^{\text{th}}$ row from the top is of length $\alpha_{\sigma^{-1}(i)}$. Thus, the $i^{\text{th}}$ row \textit{survives} into the residual input if and only if $\alpha_{\sigma^{-1}(i)} > 1$. Which branch it belongs to depends on its first-column entry.
The first column of the permuted diagram is filled in with the entries of $\iota$. Each distinct entry $\iota^{\circ}$ in $\iota$ gives rise to its own branch, comprising the surviving rows whose first-column entry is $\iota^{\circ}$ (a branch may be empty). If the $i^{\text{th}}$ row does survive, it is sorted into the $x^{\text{th}}$ branch, where $x$ is the number of distinct entries in the subsequence $[\iota_1, \ldots, \iota_i]$; if, furthermore, exactly $i'$ rows among the first $i$ survive into the $x^{\text{th}}$ branch, then the $i^{\text{th}}$ row becomes the $i'^{\text{th}}$ row in the $x^{\text{th}}$ branch.
To encompass these observations, we define the \textit{row-survival} function as follows.
\begin{df}
For all $(\alpha, \sigma, \iota) \in \mathbb{N}^{\ell} \times \mathfrak{S}_{\ell} \times \mathbb{Z}^{\ell}_{\text{dom}}$, \[\mathcal{S}(\alpha, \sigma, \iota) \colon \lbrace 1, \ldots, \ell \rbrace \rightarrow \lbrace 1, \ldots, \ell \rbrace \times \lbrace 0, 1, \ldots, \ell \rbrace\] is given by \[\mathcal{S}(\alpha, \sigma, \iota)(i) := \big(|\lbrace \iota_{i'} : i' \leq i \rbrace|, |\lbrace i' \leq i : \iota_{i'} = \iota_i; \alpha_{\sigma^{-1}(i')} > 1 \rbrace| \cdot 1_{i} \big),\] where \[1_{i} := \begin{cases}
1 & \alpha_{\sigma^{-1}(i)} > 1 \\ 0 & \alpha_{\sigma^{-1}(i)} = 1
\end{cases}.\]
\end{df}
\begin{rem}
Suppose $\mathcal{S}(\alpha, \sigma, \iota)(i) = (x, i')$. Assuming $i' > 0$, the $i^{\text{th}}$ row becomes the $i'^{\text{th}}$ row in the $x^{\text{th}}$ branch (if $i' = 0$, the row dies).
\end{rem}
\begin{exam} \label{surv}
We revisit the input $(\alpha, \nu) := ([4,3,2,1,1], [15,14,9,4,4])$ from Example~\ref{acharexam}. As noted therein, $\sigma := \mathcal{R}_{-1}(\alpha, \nu) = 42135$ and $\iota := \mathcal{U}_{-1}(\alpha, \nu, \sigma) = [4,4,4,4,4]$. Thus, beginning with a blank diagram of shape $\alpha$, we see that the permuted diagram (with first column filled in) looks like
\begin{figure}
\caption{The left diagram after steps 1 and 2}
\end{figure}
From the picture, it is clear that there is exactly one branch, comprising the first, second, and fourth rows. The row-survival function indicates the same, for \[\mathcal{S}(\alpha, \sigma, \iota)(1,2,3,4,5) = ((1,1), (1,2), (1,0), (1,3), (1,0)).\]
We see later that $\mathcal{A}(\alpha, \nu) = (X, Y)$ in the notation of Example~\ref{thes}.
\end{exam}
\subsection{The algorithm}
Before we describe the algorithm, we define the preliminary functions that use floors.
\begin{df}
Given a pair of integer sequences \[(\alpha, \nu) = ([\alpha_1, \ldots, \alpha_{\ell}], [\nu_1, \ldots, \nu_{\ell}]) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell},\] an integer $i \in \lbrace 1, \ldots, \ell \rbrace$, and an ordered pair of disjoint sets $(I_a, I_b)$ satisfying $I_a \cup I_b = \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i \rbrace$, we define the \textit{candidate-floor} function $\mathcal{C}$ as follows:
\[\mathcal{C}_1(\alpha, \nu, i , I_a, I_b) := \left \lfloor \frac{\nu_i - \sum_{j \in I_a} \min \lbrace \alpha_i, \alpha_j \rbrace + \sum_{j \in I_b} \min \lbrace \alpha_i, \alpha_j \rbrace}{\alpha_i} \right \rfloor.\]
\end{df}
\begin{df}
The \textit{ranking-by-floors} algorithm $\mathcal{R}_1$ computes a function $\mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \rightarrow \mathfrak{S}_{\ell}$ iteratively over $\ell$ steps.
Say $\mathcal{R}_1(\alpha, \nu) = \sigma$. On the $i^{\text{th}}$ step of the algorithm, $\sigma^{-1}(\ell), \ldots, \sigma^{-1}(\ell-i+2)$ have already been determined. Set \[J_i := \lbrace \sigma^{-1}(\ell), \ldots, \sigma^{-1}(\ell-i+2) \rbrace \quad \text{and} \quad J'_i := \lbrace 1, \ldots, \ell \rbrace \setminus J_i.\] Then $\sigma^{-1}(\ell-i+1)$ is designated the numerically maximal $j \in J'_i$ among those for which \[(\mathcal{C}_1(\alpha, \nu, j, J'_i \setminus \lbrace j \rbrace, J_i), -\alpha_j, \nu_j)\] is lexicographically minimal.
\end{df}
\begin{df}
The \textit{column-floors} algorithm $\mathcal{U}_1$ is iterative with $\ell$ steps and computes a function $\mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \mathfrak{S}_{\ell} \rightarrow \mathbb{Z}^{\ell}_{\text{dom}}$.
Say $\mathcal{U}_1(\alpha, \nu, \sigma) = [\iota_1, \ldots, \iota_{\ell}]$. On the $i^{\text{th}}$ step of the algorithm, $\iota_{\ell}, \ldots, \iota_{\ell - i + 2}$ have already been determined. Then \[\iota_{\ell - i + 1} := \mathcal{C}_1(\alpha, \nu, \sigma^{-1}(\ell - i + 1), \sigma^{-1} \lbrace 1, \ldots, \ell - i \rbrace, \sigma^{-1} \lbrace \ell - i + 2, \ldots, \ell \rbrace) + \ell - 2i + 1\] unless the right-hand side is less than $\iota_{\ell - i +2}$, in which case $\iota_{\ell - i + 1} := \iota_{\ell - i + 2}$.
\end{df}
We assemble these functions, together with the preliminary functions that use ceilings, and the row-survival function, into the recursive algorithm $\mathcal{A} \colon \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \lbrace \pm 1 \rbrace \rightarrow D_{\ell} \times D_{\ell}$.
On input $(\alpha, \nu, \epsilon)$, the algorithm sets \[\sigma := \mathcal{R}_{\epsilon}(\alpha, \nu) \quad \text{and} \quad \iota := \mathcal{U}_{\epsilon}(\alpha, \nu, \sigma).\]
Next it sets \[X_{i,1} := \iota_{i} \quad \text{and} \quad Y_{i, 1} := \iota_i + \ell - 2i + 1\] for all $1 \leq i \leq \ell$.
For all $(x, i')$ in the image of $\mathcal{S}(\alpha, \sigma, \iota)$ such that $i' > 0$, we write \[i_{(x, i')} := \mathcal{S}(\alpha, \sigma, \iota)^{-1}(x, i').\]
The algorithm sets $\mathcal{k} := |\lbrace \iota_1, \ldots, \iota_{\ell} \rbrace|$, which counts the number of branches.
For all $1 \leq x \leq \mathcal{k}$, it sets \[\ell_x := \max \left\lbrace i' : (x, i') \in \mathcal{S}(\alpha, \sigma, \iota) \lbrace 1, \ldots, \ell \rbrace \right\rbrace.\]
Note that $\ell_x$ counts the number of rows surviving into the $x^{\text{th}}$ branch; if $\ell_x = 0$, then the $x^{\text{th}}$ branch is empty.
If $\ell_x > 0$, then the $x^{\text{th}}$ branch contains $\ell_x$ surviving rows, and the algorithm sets \[\alpha^{(x)} := \left [\alpha_{\sigma^{-1}\left(i_{(x, 1)}\right)} - 1, \ldots, \alpha_{\sigma^{-1}\left(i_{(x, \ell_x)}\right)} - 1 \right] \] and \[\nu^{(x)} = \left [\nu_{\sigma^{-1}\left(i_{(x, 1)}\right)} - \iota_{i_{(x, 1)}}, \ldots, \nu_{\sigma^{-1}\left(i_{(x, \ell_x)}\right)} - \iota_{i_{(x, \ell_x)}} \right].\]
The algorithm does not call itself on $(\alpha^{(x)}, \nu^{(x)})$ because it has to adjust $\nu^{(x)}$ to reflect the data from the other branches, if any are present.
For all $1 \leq i' \leq \ell_x$, it sets \[\hat{\nu}^{(x)}_{i'} := \nu^{(x)}_{i'} - \sum_{x' = 1}^{x-1} \sum_{i_0 = 1}^{\ell_{x'}} \min \left\lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right\rbrace + \sum_{x' = x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell_{x'}} \min \left\lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right\rbrace.\]
Then it sets $\hat{\nu}^{(x)} := \left[\hat{\nu}^{(x)}_1, \ldots, \hat{\nu}^{(x)}_{\ell_x}\right]$ and $\left(X^{(x)}, Y^{(x)} \right) := \mathcal{A}\left(\alpha^{(x)}, \hat{\nu}^{(x)}, -\epsilon \right)$.
The algorithm fills in the rest of the entries of $X$ and $Y$ according to the following rule: For all $(i', j') \in \mathbb{N} \times \mathbb{N}$ such that $X^{(x)}$ and $Y^{(x)}$ each have an entry in the $i'^{\text{th}}$ row and $j'^{\text{th}}$ column,
\begin{align} \label{attachx}
X_{i_{(x, i')}, j'+1} := X^{(x)}_{i', j'} + \sum_{x' = 1}^{x-1} (\alpha^{(x')})^*_{j'} - \sum_{x' = x+1}^{\mathcal{k}} (\alpha^{(x')})^*_{j'},
\end{align}
where $(\alpha^{(x')})^*_{j'} := |\lbrace i_0 : \alpha^{(x')}_{i_0} \geq j' \rbrace|$, and
\begin{align} \label{attachy}
Y_{i_{(x,i')}, j'+1} := Y^{(x)}_{i', j'}.
\end{align}
Finally, it returns $(X, Y)$.
Henceforward we adopt the notation of Equation~\ref{attachx} and denote $|\lbrace i : \alpha_i \geq j \rbrace|$ by $\alpha^*_j$ for all integer sequences $\alpha$, regardless of whether $\alpha$ is a partition.
\begin{exam} \label{cont}
Maintain the notation of Example~\ref{surv}. We proceed to compute $\mathcal{A}(\alpha, \nu)$.
Since $\sigma := \mathcal{R}_{-1}(\alpha, \nu) = 42135$ and $\iota := \mathcal{U}_{-1}(\alpha, \nu, \sigma) = [4,4,4,4,4]$, we see that \[ [X_{1, 1}, X_{2, 1}, X_{3, 1}, X_{4, 1}, X_{5, 1}] = [4, 4, 4, 4, 4]\] and \[ [Y_{1, 1}, Y_{2, 1}, Y_{3, 1}, Y_{4, 1}, Y_{5, 1}] = [8, 6, 4, 2, 0].\]
Set $f := \mathcal{S}(\alpha, \sigma, \iota)$. Recall from Example~\ref{surv} that \[\big(f(1), f(2), f(3), f(4), f(5)\big) = \big((1,1), (1,2), (1,0), (1,3), (1,0)\big).\]
Thus, $\mathcal{k} = 1$ and $\ell_1 = 3$. Furthermore, $(i_{(1,1)}, i_{(1,2)}, i_{(1,3)}) = (1, 2, 4)$. It follows that \[\alpha^{(1)} = [1, 2, 3] \quad \text{and} \quad \hat{\nu}^{(1)} = \nu^{(1)} = [5, 10, 11].\]
(Since the first branch is the only branch, no adjustment to $\nu^{(1)}$ is required and $\hat{\nu}^{(1)} = \nu^{(1)}$.)
As it happens, we find that $X^{(1)}$ and $Y^{(1)}$ look as depicted in Figure~\ref{firstbranch}.
\begin{figure}
\caption{The diagram pair obtained from the first branch}
\label{firstbranch}
\end{figure}
Finally, we ``attach'' $X^{(1)}$ and $Y^{(1)}$ to the first columns of $X$ and $Y$, respectively, to complete the output.
\begin{figure}
\caption{The diagram pair obtained from the recursion tree}
\end{figure}
\end{exam}
Since Example~\ref{cont} involves only one branch, it doesn't fully illustrate the contours of the algorithm. For this reason, we also show how the algorithm computes $X^{(1)}$ and $Y^{(1)}$ in Example~\ref{cont}, during which we encounter multiple branches.
\begin{exam}
Set $\alpha := [1,2,3]$ and $\nu := [5,10,11]$. We compute $(\mathsf{X}, \mathsf{Y}) := \mathcal{A}(\alpha, \nu, 1)$.
We find $\sigma := \mathcal{R}_1(\alpha, \nu) = 123$ and $\iota := \mathcal{U}_1(\alpha, \nu, \sigma) = [5,5,4]$, so \[[\mathsf{X}_{1,1}, \mathsf{X}_{2,1}, \mathsf{X}_{3,1}] = [5,5,4] \quad \text{and} \quad [\mathsf{Y}_{1,1}, \mathsf{Y}_{2,1}, \mathsf{Y}_{3,1}] = [7,5,2].\]
Set $\mathsf{f} := \mathcal{S}(\alpha, \sigma, \iota)$. Note that \[\big(\mathsf{f}(1), \mathsf{f}(2), \mathsf{f}(3)\big) = \big((1,0), (1,1), (2,1)\big).\]
Thus, $\mathcal{k} = 2$ and $\ell_1 = \ell_2 = 1$. Furthermore, $i_{(1,1)} = 2$ and $i_{(2,1)} = 3$. It follows that \[(\alpha^{(1)}, \nu^{(1)}) = ([1], [5]) \quad \text{and} \quad (\alpha^{(2)}, \nu^{(2)}) = ([2], [7]).\]
Hence \[(\alpha^{(1)}, \hat{\nu}^{(1)}) = ([1], [6]) \quad \text{and} \quad (\alpha^{(2)}, \hat{\nu}^{(2)}) = ([2], [6]).\]
We draw the diagram pairs $(\mathsf{X}^{(1)}, \mathsf{Y}^{(1)})$ and $(\mathsf{X}^{(2)}, \mathsf{Y}^{(2)})$ below.
\begin{figure}
\caption{The diagram pairs obtained from the first and second branches}
\end{figure}
Finally, we ``attach'' these diagrams to the first columns computed above to complete the output.
\begin{figure}
\caption{The diagram pair obtained from the recursion tree}
\end{figure}
\textit{Nota bene.} Equation~\ref{attachx} dictates that the entries of $\mathsf{X}^{(1)}$ and $\mathsf{X}^{(2)}$ must be modified before they can be adjoined to $\mathsf{X}$, but the entries of $\mathsf{Y}^{(1)}$ and $\mathsf{Y}^{(2)}$ are adjoined to $Y$ as they are.
\end{exam}
\subsection{Properties}
The following propositions delineate a few properties of $\mathcal{A}$.
\begin{prop} \label{permute}
Let $(\beta, \xi), (\alpha, \nu) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell}$, and suppose that the multisets \[\lbrace (\beta_1, \xi_1), \ldots, (\beta_{\ell}, \xi_{\ell}) \rbrace \quad \text{and} \quad \lbrace (\alpha_1, \nu_1), \ldots, (\alpha_{\ell}, \nu_{\ell}) \rbrace\] are coincident. Then $\mathcal{A}(\beta, \xi, \pm 1) = \mathcal{A}(\alpha, \nu, \pm 1)$.
\end{prop}
\begin{proof}
We prove $\mathcal{A}(\beta, \xi, -1) = \mathcal{A}(\alpha, \nu, -1)$.
Set $\tau := \mathcal{R}_{-1}(\beta, \xi)$ and $\sigma := \mathcal{R}_{-1}(\alpha, \nu)$. It suffices to show that
\begin{align} \label{perm}
\beta_{\tau^{-1}(i)} = \alpha_{\sigma^{-1}(i)} \quad \text{and} \quad \xi_{\tau^{-1}(i)} = \nu_{\sigma^{-1}(i)}
\end{align}
for all $1 \leq i \leq \ell$. The proof is by induction on $i$; we show the inductive step. In other words, we assume that Equation~\ref{perm} holds for all $1 \leq i_0 \leq i-1$ and show it holds for $i$.
Set $J := \tau^{-1} \lbrace 1, \ldots, i-1 \rbrace$ and $J' := \lbrace 1, \ldots, \ell \rbrace \setminus J$. Also set $I := \sigma^{-1} \lbrace 1, \ldots, i-1 \rbrace$ and $I' := \lbrace 1, \ldots, \ell \rbrace \setminus I$. By the inductive hypothesis, the multisets \[\lbrace (\beta_j, \xi_j) \rbrace_{j \in J} \quad \text{and} \quad \lbrace (\alpha_j, \nu_j) \rbrace_{j \in I} \] are coincident.
Therefore, the multisets \[\lbrace (\beta_j, \xi_j) \rbrace_{j \in J'} \quad \text{and} \quad \lbrace (\alpha_j, \nu_j) \rbrace_{j \in I'} \] are coincident, and there exists a bijection $\zeta \colon I' \rightarrow J'$ such that $(\beta_j, \xi_j) = (\alpha_{\zeta^{-1}(j)}, \nu_{\zeta^{-1}(j)})$ for all $j \in J'$.
Then \[(\mathcal{C}_{-1}(\beta, \xi, j, J, J' \setminus \lbrace j \rbrace), \beta_j, \xi_j) = (\mathcal{C}_{-1}(\alpha, \nu, \zeta^{-1}(j), I, I' \setminus \lbrace \zeta^{-1}(j) \rbrace), \alpha_{\zeta^{-1}(j)}, \nu_{\zeta^{-1}(j)})\] for all $j \in J'$.
Hence the lexicographically maximal value of the function \[j \mapsto (\mathcal{C}_{-1}(\beta, \xi, j, J, J' \setminus \lbrace j \rbrace), \beta_j, \xi_j)\] over the domain $J'$ coincides with the lexicographically maximal value of the function \[j \mapsto (\mathcal{C}_{-1}(\alpha, \nu, j, I, I' \setminus \lbrace j \rbrace), \alpha_j, \nu_j) \] over the domain $I'$.
By definition of $\mathcal{R}_{-1}$, the former value is attained at $j = \tau^{-1}(i)$, and the latter value is attained at $j = \sigma^{-1}(i)$. The result follows.
\end{proof}
\begin{prop} \label{eworks}
Let $(\alpha, \nu, \epsilon) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \lbrace \pm 1 \rbrace$. Then $\mathcal{A}(\alpha, \nu, \epsilon) \in \overline{E}(D_{\operatorname{dom}(\alpha)})$.
\end{prop}
\begin{proof}
Set $(X, Y) := \mathcal{A}(\alpha, \nu, \epsilon)$. We first show $(X, Y) \in D_{\operatorname{dom}(\alpha)} \times D_{\operatorname{dom}(\alpha)}$. The proof is by induction on $\max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace$; we show the inductive step. Since $X$ has an entry in the $i^{\text{th}}$ row and $j^{\text{th}}$ column if and only if $Y$ does for all $(i, j) \in \mathbb{N} \times \mathbb{N}$, it suffices to show $X \in D_{\operatorname{dom}(\alpha)}$. By construction, the first column of $X$ has $\alpha^*_1$ entries. Applying the inductive hypothesis, viz., $(X^{(x)}, Y^{(x)}) \in D_{\operatorname{dom}(\alpha^{(x)})} \times D_{\operatorname{dom}(\alpha^{(x)})}$, we find the $(j'+1)^{\text{th}}$ column of $X$ has $\sum_{x=1}^{\mathcal{k}} (\alpha^{(x)})^*_{j'} = \alpha^*_{j'+1}$ entries for all $1 \leq j' \leq \max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace -1$. We conclude that $X$ is of shape $\alpha$.
To see that $(X, Y) \in \overline{E}(D_{\operatorname{dom}(\alpha)})$, we show $EX = Y$. Again the proof is by induction on $\max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace$, and we show the inductive step. By construction, $X^1_i + \alpha^*_1 - 2i + 1 = Y^1_i$ for all $1 \leq i \leq \ell$. Let $(i', j') \in \mathbb{N} \times \mathbb{N}$ such that $X^{(x)}$ and $Y^{(x)}$ each have an entry in the ${i'}^{\text{th}}$ row and ${j'}^{\text{th}}$ column, and set $\mathcal{i}$ so that $X^{(x)}_{i', j'} = (X^{(x)})^{j'}_{\mathcal{i}}$. Note that \[X_{i_{(x, i')}, j'+1} = X^{j'+1}_{\mathcal{i} + \sum_{x'=1}^{x-1} (\alpha^{(x')})^*_{j'}}.\]
Therefore,
\begin{align*}
EX_{i_{(x, i')}, j'+1} & = X_{i_{(x, i')}, j'+1} + \alpha^*_{j'+1} - 2\left(\mathcal{i} + \sum_{x'=1}^{x-1} (\alpha^{(x')})^*_{j'}\right) + 1 \\ & = X^{(x)}_{i', j'} + \sum_{x'=1}^{x-1} (\alpha^{(x')})^*_{j'} - \sum_{x' = x+1}^{\mathcal{k}} (\alpha^{(x')})^*_{j'} + \alpha^*_{j'+1} - 2\mathcal{i}- 2 \sum_{x'=1}^{x-1} (\alpha^{(x')})^*_{j'} + 1 \\ & = X^{(x)}_{i', j'} - \sum_{x'=1}^{x-1} (\alpha^{(x')})^*_{j'} - \sum_{x' = x+1}^{\mathcal{k}} (\alpha^{(x')})^*_{j'} + \sum_{x'=1}^{\mathcal{k}} (\alpha^{(x')})^*_{j'} - 2\mathcal{i} + 1 \\ & = X^{(x)}_{i', j'} + (\alpha^{(x)})^*_{j'} - 2\mathcal{i} + 1 = Y^{(x)}_{i', j'} = Y_{i_{(x,i')}, j'+1},
\end{align*}
where the second-to-last equality follows from the inductive hypothesis.
\end{proof}
For a diagram $X \in D_{\ell}$, let $\#(X,i)$ be the number of boxes in the $i^{\text{th}}$ row of $X$, and set $\Sigma(X, i) := \sum_{j=1}^{\#(X,i)} X_{i,j}$.
\begin{prop} \label{multi}
Set $(X, Y) := \mathcal{A}(\alpha, \nu, \epsilon)$. For all $1 \leq i \leq \ell$, set $\beta_i := \#(X,i)$ and $\xi_i := \Sigma(X,i)$. Then the multisets \[\lbrace (\beta_1, \xi_1), \ldots, (\beta_{\ell}, \xi_{\ell}) \rbrace \quad \text{and} \quad \lbrace (\alpha_1, \nu_1), \ldots, (\alpha_{\ell}, \nu_{\ell}) \rbrace\] are coincident.
\end{prop}
\begin{proof}
We prove the assertion for $\epsilon = -1$. The proof is by induction on $\max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace$; we show the inductive step. By Proposition~\ref{permute}, we may assume $\mathcal{R}_{-1}(\alpha, \nu) = \operatorname{id}$ without loss of generality.
Let $i \in \lbrace 1, \ldots, \ell \rbrace$. Set $\iota := \mathcal{U}_{-1}(\alpha, \nu, \operatorname{id})$. We first claim that $\alpha_i = 1$ entails $\iota_i = \nu_i$. To see this, suppose $\alpha_i = 1$. Then $\mathcal{C}_{-1}(\alpha, \nu, i, I_a, I_b) = \nu_i - |I_a| + |I_b|$. Thus,
\begin{align*}
\iota_i & = \mathcal{C}_{-1}(\alpha, \nu, i, \lbrace 1, \ldots, i-1 \rbrace, \lbrace i+1, \ldots, \ell \rbrace) - \ell + 2i - 1 \\ & = \nu_i - (i-1) + (\ell -i) - \ell + 2i -1 = \nu_i
\end{align*}
unless $\nu_i > \iota_{i-1}$. If indeed $\nu_i > \iota_{i-1}$, then let $i_0$ be minimal such that $\iota_{i-1} = \iota_{i_0}$. Since $\mathcal{R}_{-1}(\alpha, \nu) = \operatorname{id}$,
\begin{align*}
\nu_i + \ell - 2i_0 + 1 & = \mathcal{C}_{-1}(\alpha, \nu, i, \lbrace 1, \ldots, i_0-1 \rbrace, \lbrace i_0, i_0+1, \ldots, i-1, i+1, \ldots, \ell \rbrace) \\ & \leq \mathcal{C}_{-1}(\alpha, \nu, i_0, \lbrace 1, \ldots, i_0-1 \rbrace, \lbrace i_0+1, \ldots, \ell \rbrace) \\ & = \iota_{i_0} + \ell - 2i_0 + 1 < \nu_i + \ell - 2i_0 + 1.
\end{align*}
This is a contradiction, so $\iota_i = \nu_i$ for all $i$ such that $\alpha_i = 1$. It follows that \[(\alpha_i, \nu_i) = (1, \iota_i) = (\beta_i, \xi_i)\] for all $i$ such that $\alpha_i = 1$.
Thus, it suffices to show, for all $1 \leq x \leq \mathcal{k}$, that the multisets \[\left \lbrace \big (\beta_{i_{(x, 1)}}, \xi_{i_{(x, 1)}} \big ), \ldots, \big (\beta_{i_{(x, \ell_x)}}, \xi_{i_{(x, \ell_x)}} \big) \right \rbrace \] and \[ \left \lbrace \big (\alpha_{i_{(x, 1)}}, \nu_{i_{(x, 1)}} \big ), \ldots, \big (\alpha_{i_{(x, \ell_x)}}, \nu_{i_{(x, \ell_x)}} \big ) \right \rbrace \] are coincident. For all $1 \leq i' \leq \ell_x$, set $\beta^{(x)}_{i'} := \#(X^{(x)}, i')$ and $\xi^{(x)}_{i'} := \Sigma(X^{(x)}, i')$. Note that
\begin{align*}
\xi_{i_{(x, i')}} & = \iota_{i_{(x, i')}} + \xi^{(x)}_{i'} + \sum_{j' =1}^{\beta^{(x)}_{i'}} \left(\sum_{x' = 1}^{x-1} (\alpha^{(x')})^*_{j'} - \sum_{x' = x+1}^{\mathcal{k}} (\alpha^{(x')})^*_{j'}\right) \\ & = \iota_{i_{(x, i')}} + \xi^{(x)}_{i'} + \sum_{x' = 1}^{x-1} \sum_{i_0 = 1}^{\ell_{x'}} \min \left \lbrace \beta^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace - \sum_{x' = x+1}^{\mathcal{k}} \sum_{i_0 =1}^{\ell_{x'}} \min \left \lbrace \beta^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace.
\end{align*}
Therefore, as multisets,
\begin{align*}
& \left \lbrace \big (\beta_{i_{(x, i')}}, \xi_{i_{(x, i')}} \big ) \right \rbrace_{i' =1}^{\ell_x} \\ & = \left \lbrace \Big (1 + \beta^{(x)}_{i'}, \iota_{i_{(x, i')}} + \xi^{(x)}_{i'} + \sum_{x' = 1}^{x-1} \sum_{i_0 = 1}^{\ell_{x'}} \min \left \lbrace \beta^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace - \sum_{x' = x+1}^{\mathcal{k}} \sum_{i_0 =1}^{\ell_{x'}} \min \left \lbrace \beta^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace \Big) \right \rbrace_{i'=1}^{\ell_x} \\ & = \left \lbrace \Big (1 + \alpha^{(x)}_{i'}, \iota_{i_{(x, i')}} + \hat{\nu}^{(x)}_{i'} + \sum_{x' = 1}^{x-1} \sum_{i_0 = 1}^{\ell_{x'}} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace - \sum_{x' = x+1}^{\mathcal{k}} \sum_{i_0 =1}^{\ell_{x'}} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace \Big) \right \rbrace_{i'=1}^{\ell_x} \\ & = \left \lbrace \big (\alpha_{i_{(x, i')}}, \iota_{i_{(x, i')}} + \nu^{(x)}_{i'} \big) \right \rbrace_{i'=1}^{\ell_x} = \left \lbrace \big (\alpha_{i_{(x, i')}}, \nu_{i_{(x, i')}} \big) \right \rbrace_{i'=1}^{\ell_x},
\end{align*}
where we obtain the second equality by recalling $\iota_{i_{(x, 1)}} = \cdots = \iota_{i_{(x, \ell_x)}}$ and applying the inductive hypothesis.
\end{proof}
\begin{cor} \label{multiforward}
Set $\sigma := \mathcal{R}_{-1}(\alpha, \nu)$, and $\iota := \mathcal{U}_{-1}(\alpha, \nu, \sigma)$. Then the multisets \[\left \lbrace (\beta_i - 1, \xi_i - \iota_i) : \beta_i > 1 \right \rbrace \quad \text{and} \quad \left \lbrace (\alpha_i - 1, \nu_i - \iota_{\sigma(i)}) : \alpha_i > 1 \right \rbrace \] are coincident.
\end{cor}
\begin{proof}
Maintain the notation of Proposition~\ref{multi}. By Proposition~\ref{multi}, for all $1 \leq x \leq \mathcal{k}$, \[\left \lbrace \big(\beta^{(x)}_{i'}, \xi^{(x)}_{i'}\big) \right \rbrace_{i'=1}^{\ell_x} = \left \lbrace \big(\alpha^{(x)}_{i'}, \hat{\nu}^{(x)}_{i'}\big) \right\rbrace_{i'=1}^{\ell_x}\] is an equality of multisets.
Therefore, as multisets,
\begin{align*}
& \left \lbrace \big (\beta_{i_{(x, i')}} - 1, \xi_{i_{(x, i')}} - \iota_{i_{(x,i')}} \big ) \right \rbrace_{i' =1}^{\ell_x} \\ & = \left \lbrace \Big (\beta^{(x)}_{i'}, \xi^{(x)}_{i'} + \sum_{x' = 1}^{x-1} \sum_{i_0 = 1}^{\ell_{x'}} \min \left \lbrace \beta^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace - \sum_{x' = x+1}^{\mathcal{k}} \sum_{i_0 =1}^{\ell_{x'}} \min \left \lbrace \beta^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace \Big) \right \rbrace_{i'=1}^{\ell_x} \\ & = \left \lbrace \Big (\alpha^{(x)}_{i'}, \hat{\nu}^{(x)}_{i'} + \sum_{x' = 1}^{x-1} \sum_{i_0 = 1}^{\ell_{x'}} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace - \sum_{x' = x+1}^{\mathcal{k}} \sum_{i_0 =1}^{\ell_{x'}} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace \Big) \right \rbrace_{i'=1}^{\ell_x} \\ & = \left \lbrace \big (\alpha^{(x)}_{i'}, \nu^{(x)}_{i'} \big) \right \rbrace_{i'=1}^{\ell_x} = \left \lbrace \big (\alpha_{\sigma^{-1}(i_{(x, i')})} - 1, \nu_{\sigma^{-1}(i_{(x, i')})} - \iota_{i_{(x, i')}} \big) \right \rbrace_{i'=1}^{\ell_x}.
\end{align*}
Taking the union of both sides over $1 \leq x \leq \mathcal{k}$, we obtain the equality of multisets \[\left \lbrace(\beta_i - 1, \xi_i - \iota_i) : \beta_i > 1 \right \rbrace = \left \lbrace (\alpha_{\sigma^{-1}(i)} -1, \nu_{\sigma^{-1}(i)} - \iota_i) : \alpha_{\sigma^{-1}(i)} > 1 \right \rbrace,\] whence the result follows.
\end{proof}
Fix again a partition $\alpha = [\alpha_1, \ldots, \alpha_{\ell}]$ of $n$ with conjugate partition $\alpha^* = [\alpha^*_1, \ldots, \alpha^*_s]$. Recall from section 3 that a diagram pair $(X, Y) \in \overline{E}(D_{\alpha})$ encodes three sequences related to objects in $\mathfrak{D}$ --- $\kappa(X)$, $h(X)$, and $\eta(Y)$. If $(X,Y)$ is an output of the weight-diagrams version of the algorithm, then all three sequences carry crucial information: $\kappa(X)$ returns the input; $h(X)$ is a weight in $\Lambda^+_{\alpha, \nu}$ such that $||h(X) + 2 \rho_{\alpha}||$ is minimal, and $\eta(Y)$ is the output of the Lusztig--Vogan bijection.
\begin{prop} \label{kap}
Let $\nu \in \Omega_{\alpha}$. Then $\kappa p_1 \mathcal{A}(\alpha, \nu) = \nu$. (Hence $hp_1\mathcal{A}(\alpha, \nu) \in \Lambda^+_{\alpha, \nu}$.)
\end{prop}
\begin{proof}
This is a direct consequence of Proposition~\ref{multi}. (The statement in parentheses follows from Theorem~\ref{decomp}.)
\end{proof}
\begin{thm} \label{mini}
Let $\nu \in \Omega_{\alpha}$. Then $||hp_1\mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}|| = \min \lbrace ||\bar{\mu} + 2 \rho_{\alpha}|| : \bar{\mu} \in \Lambda^+_{\alpha, \nu} \rbrace$.
\end{thm}
\begin{cor} \label{etz}
Let $\nu \in \Omega_{\alpha}$. Then $\operatorname{dom}(hp_1 \mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha}) = \gamma(\alpha, \nu)$.
\end{cor}
\begin{proof}
This follows from (the parenthetical statement in) Proposition~\ref{kap} and Theorem~\ref{mini}, as discussed in the introduction (cf. footnote 3).
\end{proof}
\begin{cor} \label{eta}
Let $\nu \in \Omega_{\alpha}$. Then $\eta p_2 \mathcal{A}(\alpha, \nu) = \gamma(\alpha, \nu)$.
\end{cor}
\begin{proof}
Recall from Equation~\ref{compat} that $\eta p_2 \mathcal{A}(\alpha, \nu) = \operatorname{dom}(hp_1 \mathcal{A}(\alpha, \nu) + 2 \rho_{\alpha})$.\footnote{Equation~\ref{compat} holds under the assumption that the entries of $X$ are weakly decreasing down each column, which is certainly the case if $X$ is distinguished (cf. condition (4) of Definition~\ref{dis}). We prove that $p_1 \mathcal{A}(\alpha, \nu)$ is distinguished in the next section (cf. Corollary~\ref{dist}). }
\end{proof}
Corollaries~\ref{etz} and ~\ref{eta} express that $\mathcal{A}$ computes the Lusztig--Vogan bijection. Just as Corollary~\ref{etz} follows from Proposition~\ref{kap} and Theorem~\ref{mini}, that $\mathfrak{A}$ computes the Lusztig--Vogan bijection (as expressed in Theorem~\ref{main}) follows from Corollary~\ref{inside} and the following theorem, which we deduce from Theorem~\ref{mini}.
\begin{thm} \label{thesame}
Let $\nu \in \Omega_{\alpha}$. Then $||\mathfrak{A}(\alpha, \nu) + 2 \rho_{\alpha}|| = \min \lbrace ||\bar{\mu} + 2 \rho_{\alpha} || : \bar{\mu} \in \Lambda^+_{\alpha, \nu} \rbrace$.
\end{thm}
\begin{proof}
Assume Theorem~\ref{mini} holds. Set $\mu := \mathfrak{A}(\alpha, \nu)$ and $\breve{\mu} := hp_1 \mathcal{A}(\alpha, \nu)$. Recall from Corollary~\ref{inside} that $\mu \in \Lambda^+_{\alpha, \nu}$ and from Proposition~\ref{kap} that $\breve{\mu} \in \Lambda^+_{\alpha, \nu}$. Note that
\begin{align} \label{first}
[\mu_1, \ldots, \mu_{\ell}] = [\breve{\mu}_1, \ldots, \breve{\mu}_{\ell}].
\end{align}
In other words, the first $\ell$ entries of $\mathfrak{A}(\alpha, \nu)$ agree with the first $\ell$ entries of $hp_1 \mathcal{A}(\alpha, \nu)$; both $\mathfrak{A}$ and $hp_1\mathcal{A}$ assign the same weight to the first factor $GL_{\alpha^*_1}$ of $L_{\alpha}$.
To prove the theorem, we induct on $s$. For the inductive step, set $\mu' := [\mu_{\ell + 1}, \ldots, \mu_n]$ and $\breve{\mu}' := [\breve{\mu}_{\ell+1}, \ldots, \breve{\mu}_n]$.
Maintain the notation of sections 2.1 and 2.2. By Corollary~\ref{inside}, $\mu' \in \Lambda^+_{\alpha', \nu'}$. We claim that $\breve{\mu}' \in \Lambda^+_{\alpha', \nu'}$. Indeed, since $(\alpha_i - 1, \nu_i - \mu_{\sigma(i)}) = (\alpha'_i, \nu'_i)$ for all $1 \leq i \leq \alpha^*_2$, Corollary~\ref{multiforward} (in view of Equation~\ref{first}) tells us that the multisets \[\lbrace \big(\beta_i - 1, \xi_i - \breve{\mu}_i \big) : \beta_i > 1 \rbrace \quad \text{and} \quad \lbrace (\alpha'_i, \nu'_i) \rbrace_{i=1}^{\alpha^*_2}\] are coincident.
Therefore, there exists a function $\zeta \colon \lbrace 1, \ldots, \alpha^*_2 \rbrace \rightarrow \lbrace 1, \ldots, \ell \rbrace$ such that $\beta_{\zeta(i)} > 1$ and $(\beta_{\zeta(i)} - 1, \xi_{\zeta(i)} - \breve{\mu}_{\zeta(i)}) = (\alpha'_i, \nu'_i)$ for all $1 \leq i \leq \alpha^*_2$. For all $1 \leq i \leq \alpha^*_2$ and $1 \leq j \leq \alpha'_i$, set $\nu'_{i,j} := X_{\zeta(i), j+1}$. Then the claim follows from Corollary~\ref{decamp}.
Thus, by the inductive hypothesis, \[||\mu' + 2 \rho_{\alpha'}|| = \min \lbrace || \bar{\mu}' + 2 \rho_{\alpha'}|| : \bar{\mu}' \in \Lambda^+_{\alpha', \nu'} \rbrace \leq ||\breve{\mu}' + 2 \rho_{\alpha'}||.\]
It follows that
\begin{align*}
||\mu + 2 \rho_{\alpha}||^2 & = ||[\mu_1 + \ell - 1, \ldots, \mu_{\ell} + 1 - \ell]||^2 + ||\mu' + 2 \rho_{\alpha'}||^2 \\ & \leq ||[\breve{\mu}_1 + \ell - 1, \ldots, \breve{\mu}_{\ell} + 1 - \ell]||^2 + ||\breve{\mu}' + 2 \rho_{\alpha'}||^2 \\ & = ||\breve{\mu} + 2 \rho_{\alpha}||^2 \\ & = \min \lbrace ||\bar{\mu} + 2 \rho_{\alpha}||^2 : \bar{\mu} \in \Lambda^+_{\alpha, \nu} \rbrace \\ & \leq ||\mu + 2 \rho_{\alpha}||^2,
\end{align*}
where the first inequality follows from Equation~\ref{first} and the third equality follows from Theorem~\ref{mini}.
We conclude that $||\mu + 2 \rho_{\alpha}|| = \min \lbrace ||\bar{\mu} + 2 \rho_{\alpha}|| : \bar{\mu} \in \Lambda^+_{\alpha, \nu} \rbrace$, as desired.
\end{proof}
It remains is to prove Theorem~\ref{mini}. In the next section, we make good on our pledge to prove that the weight-diagrams version of our algorithm encompasses Achar's algorithm; in particular, we prove the following theorem, whence Theorem~\ref{mini} follows immediately.
\begin{thm} \label{mata}
Let $\nu \in \Omega_{\alpha}$. Then $p_1 \mathcal{A}(\alpha, \nu) = \mathsf{A}(\alpha, \nu)$.
\end{thm}
\begin{cor}
Theorem~\ref{mini} holds.
\end{cor}
\begin{proof}
Note that $||h\mathsf{A}(\alpha, \nu) + 2 \rho_{\alpha}|| = \min \lbrace ||\bar{\mu} + 2 \rho_{\alpha}|| : \bar{\mu} \in \Lambda^+_{\alpha, \nu} \rbrace$ (cf. Achar \cite{Acharj}, Corollary 8.9).
\end{proof}
\eject
\section{Proof of Theorem~\ref{mata}}
The crux of the proof is a simple characterization of the diagram pairs that occur as outputs of the algorithm $\mathcal{A}$. These \textit{distinguished} diagram pairs are images under $\overline{E}$ of the \textit{distinguished} diagrams of Achar \cite{Acharj}. We start by defining distinguished diagrams and diagram pairs.
\begin{df}
Let $Y$ be a diagram of shape-class $\alpha$. The entry $Y^j_i$ is \textit{$E$-raisable} if $i=1$, or if $i > 1$ and $Y^j_{i-1} > Y^j_i + 2$. The entry $Y^j_i$ is \textit{$E$-lowerable} if $i = \alpha^*_j$, or if $i < \alpha^*_j$ and $Y^j_{i+1} < Y^j_i - 2$.
\end{df}
\begin{df} \label{dis}
Let $X \in D_{\alpha}$, and set $Y := EX$. Then the diagram $X$ and the diagram pair $(X,Y)$ are \textit{odd-distinguished} if the following four conditions hold.
\begin{enumerate}
\item $Y_{i,j+1} - Y_{i,j} \in \lbrace 0, (-1)^j \rbrace$.
\item For all $1 \leq j < j' \leq s$ such that $j \equiv j' \pmod 2$:
\begin{enumerate}
\item If $j$ and $j'$ are odd and $Y_{i,j} \leq Y_{i,j'} - 1$, then $Y_{i,j}$ is not $E$-raisable;
\item If $j$ and $j'$ are even and $Y_{i,j} \geq Y_{i, j'} + 1$, then $Y_{i,j}$ is not $E$-lowerable.
\end{enumerate}
\item For all $1 \leq j < j' \leq s$:
\begin{enumerate}
\item If $Y_{i,j} \leq Y_{i,j'} - 2$, then $Y_{i,j}$ is not $E$-raisable;
\item If $Y_{i,j} \geq Y_{i,j'} + 2$, then $Y_{i,j}$ is not $E$-lowerable.
\end{enumerate}
\item $Y^j_i - Y^j_{i+1} \geq 2$.
\end{enumerate}
\end{df}
\begin{df}
Let $X \in D_{\alpha}$, and set $Y := EX$. Then the diagram $X$ and the diagram pair $(X,Y)$ are \textit{even-distinguished} if the following four conditions hold.
\begin{enumerate}
\item $Y_{i,j+1} - Y_{i,j} \in \lbrace 0, (-1)^{j+1} \rbrace$.
\item For all $1 \leq j < j' \leq s$ such that $j \equiv j' \pmod 2$:
\begin{enumerate}
\item If $j$ and $j'$ are even and $Y_{i,j} \leq Y_{i,j'} - 1$, then $Y_{i,j}$ is not $E$-raisable;
\item If $j$ and $j'$ are odd and $Y_{i,j} \geq Y_{i, j'} + 1$, then $Y_{i,j}$ is not $E$-lowerable.
\end{enumerate}
\item For all $1 \leq j < j' \leq s$:
\begin{enumerate}
\item If $Y_{i,j} \leq Y_{i,j'} - 2$, then $Y_{i,j}$ is not $E$-raisable;
\item If $Y_{i,j} \geq Y_{i,j'} + 2$, then $Y_{i,j}$ is not $E$-lowerable.
\end{enumerate}
\item $Y^j_{i} - Y^j_{i+1} \geq 2$.
\end{enumerate}
\end{df}
We refer to odd-distinguished diagrams and diagram pairs as just \textit{distinguished}.
\begin{rem} \label{weak}
The definition of distinguished diagram in Achar \cite{Acharj} is weaker than ours inasmuch as it requires $Y^j_i - Y^j_{i+1} \geq 1$ rather than $Y^j_{i} - Y^j_{i+1} \geq 2$. However, Achar's definition of the $E$ map differs slightly from ours, so it does not suffice for our purposes to copy his definition wholesale. Our definition of distinguished ensures that, if $(X,Y)$ is distinguished, then $Y = EX$ under Achar's definition as well as ours --- so it guarantees that any diagram distinguished by our reckoning is distinguished by Achar's \textit{a fortiori}.
\end{rem}
To simplify our analysis of the algorithm, we define the \textit{row-partition} function, which is similar to the row-survival function, but does not discriminate between surviving and non-surviving rows.
\begin{df}
For all $(\alpha, \iota) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell}_{\operatorname{dom}}$, \[\mathcal{P}(\alpha, \iota) \colon \lbrace 1, \ldots, \ell \rbrace \rightarrow \lbrace 1, \ldots, \ell \rbrace \times \lbrace 1, \ldots, \ell \rbrace\] is given by \[\mathcal{P}(\alpha, \iota)(i) :=\big(|\lbrace \iota_{i'} : i' \leq i \rbrace|, |\lbrace i' \leq i : \iota_{i'} = \iota_i \rbrace | \big).\]
\end{df}
\begin{rem}
After the rows of a blank diagram of shape $\alpha$ have been permuted according to $\sigma \in \mathfrak{S}_{\ell}$ and the first column of the permuted diagram is filled in with the entries of $\iota$, the row-survival function $\mathcal{S}(\alpha, \sigma, \iota)$ tells us, for each surviving row, which branch it belongs to, and its position within that branch. (For each row that does not survive, the row-survival function still records a branch, but records its position within that branch as $0$.)
If we construe each branch as comprising all rows with a particular first-column entry, not merely the surviving such rows, then, for each row, the row-partition function $\mathcal{P}(\alpha, \iota)$ tells us which branch it belongs to, and its position among all rows within that branch.
\end{rem}
\begin{df}
Given diagrams $Z^{(1)}, \ldots, Z^{(k)}$ such that $Z^{(x)} \in D_{\ell_x}$ for all $1 \leq x \leq k$, construct $Z \in D_{\ell_1 + \cdots + \ell_k}$ as follows: For all $(i, j) \in \mathbb{N} \times \mathbb{N}$ such that $Z^{(x)}$ has an entry in the $i^{\text{th}}$ row and $j^{\text{th}}$ column, $Z_{i + \sum_{x'=1}^{x-1} \ell_{x'}, j} := Z^{(x)}_{i, j}$. The \textit{diagram-concatenation} function $\operatorname{Cat} \colon D_{\ell_1} \times \cdots \times D_{\ell_k} \rightarrow D_{\ell_1 + \cdots + \ell_k}$ is given by $(Z^{(1)}, \ldots, Z^{(k)}) \mapsto Z$.
\end{df}
\begin{rem}
Diagram concatenation is transitive: If $Z^{(x)} = \operatorname{Cat} (Z^{(x, 1)}, \ldots, Z^{(x, \omega_x)})$ for all $x$, then \[Z = \operatorname{Cat}\big(Z^{(1, 1)}, \ldots, Z^{(1, \omega_1)}, \ldots, Z^{(k, 1)}, \ldots, Z^{(k, \omega_k)}\big).\]
\end{rem}
Let $(\alpha, \nu, \epsilon) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \lbrace \pm 1 \rbrace$. Set $\sigma := \mathcal{R}_{\epsilon}(\alpha, \nu)$. Set $\iota := \mathcal{U}_{\epsilon}(\alpha, \nu, \sigma)$. For all $(x, i')$ in the image of $\mathcal{P}(\alpha, \iota)$, set $p_{(x,i')} := \mathcal{P}(\alpha, \iota)^{-1}(x, i')$.
For all $1 \leq x \leq \mathcal{k}$, set \[\ell^{\circ}_x := \max \lbrace i' : (x,i') \in \mathcal{P}(\alpha, \iota) \lbrace 1, \ldots, \ell \rbrace \rbrace. \]
Set \[\mathcal{a}^{(x)} := \left[\alpha_{\sigma^{-1}(p_{(x,1)})}, \ldots, \alpha_{\sigma^{-1}(p_{(x,\ell^{\circ}_x)})}\right] \quad \text{and} \quad \mathcal{n}^{(x)} := \left[\nu_{\sigma^{-1}(p_{(x,1)})}, \ldots, \nu_{\sigma^{-1}(p_{(x,\ell^{\circ}_x)})}\right].\]
For all $1 \leq i' \leq \ell^{\circ}_x$, set \[\hat{\mathcal{n}}^{(x)}_{i'} := \mathcal{n}^{(x)}_{i'} - \sum_{x'=1}^{x-1} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \mathcal{a}^{(x)}_{i'}, \mathcal{a}^{(x')}_{i_0} \right \rbrace + \sum_{x'=x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \mathcal{a}^{(x)}_{i'}, \mathcal{a}^{(x')}_{i_0} \right \rbrace.\]
Then set $\hat{\mathcal{n}}^{(x)} := \left[ \hat{\mathcal{n}}^{(x)}_{1}, \ldots, \hat{\mathcal{n}}^{(x)}_{\ell^{\circ}_x} \right]$ and $\left (\mathcal{X}^{(x)}, \mathcal{Y}^{(x)} \right) := \mathcal{A} \left (\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, \epsilon \right)$.
\begin{lem} \label{collapse}
Set $(X,Y) := \mathcal{A}(\alpha, \nu, \epsilon)$. Then $Y = \operatorname{Cat}(\mathcal{Y}^{(1)}, \ldots, \mathcal{Y}^{(\mathcal{k})})$.
\end{lem}
\begin{proof}
We show the claim for $\epsilon = -1$ only. By Proposition~\ref{permute}, we may assume $\sigma = \operatorname{id}$ without loss of generality.
Fix $1 \leq x \leq \mathcal{k}$. Set $\mathcal{s}_{(x)} := \mathcal{R}_{-1}(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)})$ and $\mathcal{m}_{(x)} := \mathcal{U}_{-1}(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, \mathcal{s}_{(x)})$. We claim that $\mathcal{s}_{(x)} = \operatorname{id}$, which is to say that $\mathcal{s}_{(x)}(i') = i'$ for all $1 \leq i' \leq \ell^{\circ}_x$. The proof is by induction on $i'$; we show the inductive step.
Set $J := \lbrace 1, \ldots, i'-1 \rbrace$ and $J' := \lbrace 1, \ldots, \ell^{\circ}_x \rbrace \setminus J$. For all $I \subset \lbrace 1, \ldots, \ell^{\circ}_x \rbrace$, set $p^{(x)}_I := \lbrace p_{(x, i_0)} : i_0 \in I \rbrace$. Also set $\mathcal{Q}_{<x} := \lbrace p_{(x', i_0)} : x' < x \rbrace$ and $\mathcal{Q}_{>x} := \lbrace p_{(x', i_0)} : x' > x \rbrace$.
For all $j \in J'$,
\begin{align*}
& \mathcal{n}^{(x)}_{j} - \sum_{x'=1}^{x-1} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \mathcal{a}^{(x)}_{j}, \mathcal{a}^{(x')}_{i_0} \right \rbrace + \sum_{x'=x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \mathcal{a}^{(x)}_{j}, \mathcal{a}^{(x')}_{i_0} \right \rbrace \\ & - \sum_{i_0 \in J} \min \left \lbrace \mathcal{a}^{(x)}_{j}, \mathcal{a}^{(x)}_{i_0} \right \rbrace + \sum_{i_0 \in J' \setminus \lbrace j \rbrace} \min \left \lbrace \mathcal{a}^{(x)}_{j}, \mathcal{a}^{(x)}_{i_0} \right \rbrace \\ & = \nu_{p_{(x,j)}} - \sum_{i \in \mathcal{Q}_{<x} \cup p^{(x)}_J} \min \lbrace \alpha_{p_{(x,j)}}, \alpha_i \rbrace + \sum_{i \in \mathcal{Q}_{>x} \cup p^{(x)}_{J' \setminus \lbrace j \rbrace}} \min \lbrace \alpha_{p_{(x,j)}}, \alpha_i \rbrace.
\end{align*}
Therefore,
\begin{align} \label{pro}
\mathcal{C}_{-1}\big(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, j, J, J' \setminus \lbrace j \rbrace\big) = \mathcal{C}_{-1}\big(\alpha, \nu, p_{(x,j)}, \mathcal{Q}_{<x} \cup p^{(x)}_J, \mathcal{Q}_{>x} \cup p^{(x)}_{J' \setminus \lbrace j \rbrace}\big).
\end{align}
Note that $\mathcal{Q}_{<x} \cup p^{(x)}_J = \lbrace 1, \ldots, p_{(x,i')} - 1 \rbrace$. Since $\sigma(p_{(x,i')}) = p_{(x,i')}$, it follows that the lexicographically maximal value of the function \[j \mapsto \left(\mathcal{C}_{-1}\big(\alpha, \nu, p_{(x,j)}, \mathcal{Q}_{<x} \cup p^{(x)}_J, \mathcal{Q}_{>x} \cup p^{(x)}_{J' \setminus \lbrace j \rbrace}\big), \alpha_{p_{(x,j)}}, \nu_{p_{(x,j)}} \right)\] over the domain $J'$ is attained at $j = i'$.
Thus, the lexicographically maximal value of the function \[j \mapsto \left(\mathcal{C}_{-1}\big(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, j, J, J' \setminus \lbrace j \rbrace\big), \mathcal{a}^{(x)}_{j}, \mathcal{n}^{(x)}_{j} \right)\] over the domain $J'$ is attained at $j = i'$.
Since $\hat{\mathcal{n}}^{(x)}_j - \mathcal{n}^{(x)}_j$ as a function of $j$ depends only on $\mathcal{a}^{(x)}_j$, the lexicographically maximal value of the function \[j \mapsto \left(\mathcal{C}_{-1}\big(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, j, J, J' \setminus \lbrace j \rbrace\big), \mathcal{a}^{(x)}_{j}, \hat{\mathcal{n}}^{(x)}_{j} \right)\] over the domain $J'$ is also attained at $j = i'$.
Since $i' \in J'$ is numerically minimal, it follows that $\mathcal{s}^{(x)}(i') = i'$, as desired.
We next claim that
\begin{align} \label{art}
X_{p_{(x,i')}, 1} = \mathcal{X}^{(x)}_{i', 1} + \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} - \sum_{x'=x+1}^{\mathcal{k}} \ell^{\circ}_{x'}.
\end{align}
Again the proof is by induction on $i'$. For all $1 \leq i' \leq \ell^{\circ}_x$, we see from Equation~\ref{pro} that
\begin{align*}
& \mathcal{C}_{-1}(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, i', \lbrace 1, \ldots, i'-1 \rbrace, \lbrace i'+1, \ldots, \ell^{\circ}_x \rbrace) \\ & = \mathcal{C}_{-1}(\alpha, \nu, p_{(x,i')}, \lbrace 1, \ldots, p_{(x,i')} -1 \rbrace, \lbrace p_{(x,i')} + 1, \ldots, \ell \rbrace).
\end{align*}
Denote the value $\iota_{p_{(x,1)}} = \cdots = \iota_{p_{(x, \ell^{\circ}_x)}}$ by $\iota^{\circ}_x$.
Note that \[\iota^{\circ}_x = \mathcal{C}_{-1}(\alpha, \nu, p_{(x,1)}, \lbrace 1, \ldots, p_{(x,1)} -1 \rbrace, \lbrace p_{(x,1)} + 1, \ldots, \ell \rbrace) - \ell + 2p_{(x,1)} - 1,\] else $\iota^{\circ}_{x} = \iota^{\circ}_{x-1}$, which contradicts the definition of $\mathcal{P}(\alpha, \iota)$.
Hence
\begin{align*}
\mathcal{X}_{1,1}^{(x)} & = \mathcal{C}_{-1}(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, 1, \varnothing, \lbrace 2, \ldots, \ell^{\circ}_x \rbrace) - \ell^{\circ}_x + 1 \\ & = \mathcal{C}_{-1}(\alpha, \nu, p_{(x,1)}, \lbrace 1, \ldots, p_{(x,1)} -1 \rbrace, \lbrace p_{(x,1)} + 1, \ldots, \ell \rbrace) - \ell^{\circ}_x + 1 \\ & = \iota^{\circ}_x + \ell - 2(p_{(x,1)} - 1) - \ell^{\circ}_x \\ & = X_{p_{(x,1)}, 1} - \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} + \sum_{x' = x+1}^{\mathcal{k}} \ell^{\circ}_{x'},
\end{align*}
which proves the base case.
For the inductive step, note that \[\iota^{\circ}_x \leq \mathcal{C}_{-1}(\alpha, \nu, p_{(x,i')}, \lbrace 1, \ldots, p_{(x,i')} -1 \rbrace, \lbrace p_{(x,i')} + 1, \ldots, \ell \rbrace) - \ell + 2p_{(x,i')} - 1.\]
Thus,
\begin{align*}
& \mathcal{C}_{-1}(\mathcal{a}^{(x)}, \hat{\mathcal{n}}^{(x)}, i', \lbrace 1, \ldots, i' -1 \rbrace, \lbrace i'+1, \ldots, \ell^{\circ}_x \rbrace) - \ell^{\circ}_x + 2i' - 1 \\ & = \mathcal{C}_{-1}(\alpha, \nu, p_{(x,i')}, \lbrace 1, \ldots, p_{(x,i')} -1 \rbrace, \lbrace p_{(x,i')} + 1, \ldots, \ell \rbrace) - \ell^{\circ}_x + 2i' - 1 \\ & \geq \iota^{\circ}_x + \ell - 2(p_{(x,i')} -i') - \ell^{\circ}_x \\ & = X_{p_{(x,i'-1)}, 1} - \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} + \sum_{x' = x+1}^{\mathcal{k}} \ell^{\circ}_{x'} \\ & = \mathcal{X}^{(x)}_{i'-1, 1},
\end{align*}
where the last inequality follows from the inductive hypothesis.
We conclude that
\begin{align*}
\mathcal{X}^{(x)}_{i', 1} = \mathcal{X}^{(x)}_{i'-1, 1} & = X_{p_{(x,i'-1)}, 1} - \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} + \sum_{x' = x+1}^{\mathcal{k}} \ell^{\circ}_{x'} \\ & = X_{p_{(x,i')}, 1} - \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} + \sum_{x' = x+1}^{\mathcal{k}} \ell^{\circ}_{x'}.
\end{align*}
This establishes Equation~\ref{art}, which implies $Y_{p_{(x,i')}, 1} = \mathcal{Y}^{(x)}_{i', 1}$, proving the result for the first column of $Y$.
We turn now to the successive columns. By Equation~\ref{art}, \[\mathcal{m}^{(x)}_1 = \cdots = \mathcal{m}^{(x)}_{\ell^{\circ}_x} = \iota^{\circ}_x - \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} + \sum_{x' = x+1}^{\mathcal{k}} \ell^{\circ}_{x'}.\]
Set $\mathcal{f}_x := \mathcal{S}(\mathcal{a}^{(x)}, \operatorname{id}, \mathcal{m}^{(x)})$ and $\mathcal{f} := \mathcal{S}(\alpha, \operatorname{id}, \iota)$. Suppose that $\ell_x > 0$. Note that \[p_{(x, \mathcal{f}_x^{-1}(1,i'))} = \mathcal{f}^{-1}(x, i')\] for all $1 \leq i' \leq \ell_x$.
Set \[({\mathcal{a}^{(x)}})' := \left [\mathcal{a}^{(x)}_{\mathcal{f}_x^{-1}(1,1)} - 1, \ldots, \mathcal{a}^{(x)}_{\mathcal{f}_x^{-1}(1, \ell_x)} - 1 \right ]\] and \[({\hat{\mathcal{n}}^{(x)}})' := \left [\hat{\mathcal{n}}^{(x)}_{\mathcal{f}_x^{-1}(1,1)} - \mathcal{m}^{(x)}_{\mathcal{f}_x^{-1}(1,1)}, \ldots, \hat{\mathcal{n}}^{(x)}_{\mathcal{f}_x^{-1}(1,\ell_x)} - \mathcal{m}^{(x)}_{\mathcal{f}_x^{-1}(1,\ell_x)}\right].\]
Then set \[\left((\mathcal{X}^{(x)})', (\mathcal{Y}^{(x)})'\right) := \mathcal{A}_{1} \left(({\mathcal{a}^{(x)}})', ({\hat{\mathcal{n}}^{(x)}})' \right).\]
Since \[({\mathcal{a}^{(x)}})'_{i'} = \mathcal{a}^{(x)}_{\mathcal{f}_x^{-1}(1,i')} - 1 = \alpha_{\mathcal{f}^{-1}(x,i')} - 1 = \alpha^{(x)}_{i'}\] and
\begin{align*}
& ({\hat{\mathcal{n}}^{(x)}})'_{i'} = \hat{\mathcal{n}}^{(x)}_{\mathcal{f}_x^{-1}(1,i')} - \mathcal{m}^{(x)}_{\mathcal{f}_x^{-1}(1,i')} \\ & = \nu_{\mathcal{f}^{-1}(x,i')} - \sum_{x'=1}^{x-1} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \alpha_{\mathcal{f}^{-1}(x,i')}, \alpha_{p_{(x',i_0)}} \right \rbrace + \sum_{x'=x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \alpha_{\mathcal{f}^{-1}(x,i')}, \alpha_{p_{(x',i_0)}} \right \rbrace \\ & - \iota^{\circ}_x + \sum_{x'=1}^{x-1} \ell^{\circ}_{x'} - \sum_{x'=x+1}^{\mathcal{k}} \ell^{\circ}_{x'} \\ & = \nu^{(x)}_{i'} - \sum_{x'=1}^{x-1} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \alpha_{\mathcal{f}^{-1}(x,i')} - 1, \alpha_{p_{(x',i_0)}} - 1 \right \rbrace + \sum_{x'=x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell^{\circ}_{x'}} \min \left \lbrace \alpha_{\mathcal{f}^{-1}(x,i')} - 1, \alpha_{p_{(x',i_0)}} - 1 \right \rbrace \\ & = \nu^{(x)}_{i'} - \sum_{x'=1}^{x-1} \sum_{i_0 =1}^{\ell_x} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha_{\mathcal{f}^{-1}(x', i_0)} - 1 \right \rbrace + \sum_{x'=x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell_x} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha_{\mathcal{f}^{-1}(x', i_0)} - 1\right \rbrace \\ & = \nu^{(x)}_{i'} - \sum_{x'=1}^{x-1} \sum_{i_0 =1}^{\ell_x} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace + \sum_{x'=x+1}^{\mathcal{k}} \sum_{i_0 = 1}^{\ell_x} \min \left \lbrace \alpha^{(x)}_{i'}, \alpha^{(x')}_{i_0} \right \rbrace = \hat{\nu}^{(x)}_{i'},
\end{align*}
it follows that $((\mathcal{X}^{(x)})', (\mathcal{Y}^{(x)})') = (X^{(x)}, Y^{(x)})$.
Thus, $Y_{p_{(x, \mathcal{f}_x^{-1}(1,i'))}, j' + 1} = Y_{\mathcal{f}^{-1}(x,i'), j' + 1} = Y^{(x)}_{i',j'} = (\mathcal{Y}^{(x)})'_{i',j'} = \mathcal{Y}^{(x)}_{\mathcal{f}_x^{-1}(1, i'), j' + 1}$ for all $j' \geq 1$. The result follows.
\end{proof}
\begin{thm} \label{differences}
Let $(\alpha, \nu, \epsilon) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell} \times \lbrace \pm 1 \rbrace$. Set $(X,Y) := \mathcal{A}(\alpha, \nu, \epsilon)$. Then $Y_{i, j+1} - Y_{i,j} \in \lbrace 0, \epsilon (-1)^{j+1} \rbrace$.
\end{thm}
\begin{proof}
The proof is by induction on $\max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace$. We show the inductive step for $\epsilon = -1$ only. Thus, we set $(X,Y) := \mathcal{A}(\alpha, \nu, -1)$ and prove $Y_{i, 2} - Y_{i, 1} \in \lbrace 0, -1 \rbrace$. The rest follows from the inductive hypothesis. To see this, set $\sigma := \mathcal{R}_{-1}(\alpha, \nu)$ and $\mu := \mathcal{U}_{-1}(\alpha, \nu, \sigma)$. Then note that \[Y_{i_{(x,i')}, j'+2} - Y_{i_{(x,i')},j'+1} = Y^{(x)}_{i', j'+1} - Y^{(x)}_{i', j'} \in \lbrace 0, (-1)^{j'+1} \rbrace\] for all $(x,i')$ in the image of $\mathcal{S}(\alpha, \sigma, \mu)$ such that $i' > 0$.
By Lemma~\ref{collapse}, we may assume $\mu_1 = \cdots = \mu_{\ell}$. Denote the common value by $\mu^{\circ}$. Furthermore, by Proposition~\ref{permute}, we may assume without loss of generality that $\sigma = \operatorname{id}$. Hence
\begin{align*}
\mu^{\circ} & = \mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2, \ldots, \ell \rbrace) - \ell + 1 \\ & = \left \lceil \frac{\nu_1 + \sum_{i = 2}^{\ell} \min \lbrace \alpha_1, \alpha_i \rbrace}{\alpha_1} \right \rceil - \ell + 1 \\ & = \left \lceil \frac{\nu_1 + \alpha^*_1 + \cdots + \alpha^*_{\alpha_1}}{\alpha_1} \right \rceil - \ell.
\end{align*}
It follows that \[Y_{i,1} = X_{i,1} + \ell - 2i + 1 = \left \lceil \frac{\nu_1 + \alpha^*_1 + \cdots + \alpha^*_{\alpha_1}}{\alpha_1} \right \rceil - 2i + 1.\]
Set $\mathcal{f} := \mathcal{S}(\alpha, \mu, 1)$. Set $\ell' := \alpha^*_2$. For all $1 \leq i' \leq \ell'$, set $i_{i'} := \mathcal{f}^{-1}(1, i')$. Set \[\alpha' := \left [\alpha_{i_1} - 1, \ldots, \alpha_{i_{\ell'}} - 1 \right] \quad \text{and} \quad \nu' := \left [\nu_{i_1} - \mu^{\circ}, \ldots, \nu_{i_{\ell'}} - \mu^{\circ} \right].\]
Set $\tau := \mathcal{R}_1 (\alpha', \nu')$. Set $\mu' := \mathcal{U}_1(\alpha', \nu', \tau)$. Additionally, set $(X', Y') := \mathcal{A}(\alpha', \nu', 1)$. Note that \[Y_{i_{i'},2} = Y'_{i',1} = X'_{i',1} + \ell' - 2i' + 1 = \mu'_{i'} + \ell' - 2i' + 1.\]
We claim that $Y_{i_{i'}, 2} - Y_{i_{i'}, 1} \in \lbrace 0, -1 \rbrace$ for all $1 \leq i' \leq \ell'$. The proof is by (backwards) induction on $i'$. For the inductive step, assume that the claim holds for all $i' + 1 \leq i_0 \leq \ell'$. Set $I_b := \tau^{-1} \lbrace i' + 1, \ldots, \ell' \rbrace$ and $I'_b := \lbrace 1, \ldots, \ell' \rbrace \setminus I_b$. To show the claim holds for $i'$, we split into two cases.
\begin{enumerate}
\item If $i' = \ell'$, or $i' < \ell'$ and $\alpha_{i_{i'} + 1} = 1$, we show:
\begin{enumerate}
\item For all $\mathcal{i} \in I'_b$, \[\mathcal{C}_1 \left (\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b \right) \geq \left \lceil \frac{\nu_1 + \alpha^*_1 + \cdots + \alpha^*_{\alpha_1}}{\alpha_1} \right \rceil - 2i_{i'}.\]
\item There exists $\mathcal{i} \in I'_b$ such that \[\mathcal{C}_1 \left (\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b \right) \leq \left \lceil \frac{\nu_1 + \alpha^*_1 + \cdots + \alpha^*_{\alpha_1}}{\alpha_1} \right \rceil - 2i_{i'} + 1.\]
\end{enumerate}
\item If $i' < \ell'$ and $\alpha_{i_{i'} + 1} \geq 2$, we show (b) only.
\end{enumerate}
We first prove that the properties indicated are sufficient to obtain the desired; then we show that they indeed hold.
\begin{enumerate}
\item Suppose $i' = \ell'$, or $i' < \ell'$ and $\alpha_{i_{i'}+1} = 1$, and suppose (a) and (b) hold. We first claim that
\begin{align} \label{echo}
\mu'_{i'} = \mathcal{C}_1(\alpha', \nu', \tau^{-1}(i'), I'_b \setminus \lbrace \tau^{-1}(i') \rbrace, I_b) -\ell' + 2i' - 1.
\end{align}
If $i' = \ell'$, the claim follows immediately. If $i' < \ell'$ and $\alpha_{i_{i'}+1} = 1$, it suffices to show \[\mathcal{C}_1(\alpha', \nu', \tau^{-1}(i'), I'_b \setminus \lbrace \tau^{-1}(i') \rbrace, I_b) -\ell' + 2i' - 1 \geq \mu'_{i'+1}.\] Applying (a) and the inductive hypothesis, we find
\begin{align*}
\mathcal{C}_1(\alpha', \nu', \tau^{-1}(i'), I'_b \setminus \lbrace \tau^{-1}(i') \rbrace, I_b) & \geq \left \lceil \frac{\nu_1 + \alpha^*_1 + \cdots + \alpha^*_{\alpha_1}}{\alpha_1} \right \rceil - 2i_{i'} \\ & = \mu^{\circ} + \ell - 2i_{i'} \\ & = X_{i_{i' + 1}, 1} + \ell - 2i_{i'} \\ & = Y_{i_{i'+1},1} + 2i_{i'+1} - 2i_{i'}- 1 \\ & \geq Y_{i_{i'+1}, 1} + 3 \\ & \geq Y_{i_{i'+1}, 2} + 3 \\ & = \mu'_{i'+1} + \ell' - 2i' + 2.
\end{align*}
From Equation~\ref{echo}, invoking (a) yields
\begin{align*}
Y_{i_{i'}, 2} & = \mu'_{i'} + \ell' - 2i' + 1 \\ & = \mathcal{C}_1(\alpha', \nu', \tau^{-1}(i'), I'_b \setminus \lbrace \tau^{-1}(i') \rbrace, I_b) \\ & \geq \mu^{\circ} + \ell - 2i_{i'} \\ & = X_{i_{i'}, 1} + \ell - 2i_{i'} \\ & = Y_{i_{i'}, 1} - 1.
\end{align*}
Since the minimum value of the function given by \[\mathcal{i} \mapsto \mathcal{C}_1(\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b) \] is attained at $\mathcal{i} = \tau^{-1}(i')$, invoking (b) yields
\begin{align*}
Y_{i_{i'}, 2} & = \mathcal{C}_1(\alpha', \nu', \tau^{-1}(i'), I'_b \setminus \lbrace \tau^{-1}(i') \rbrace, I_b) \\ & \leq \mu^{\circ} + \ell - 2{i_{i'}} + 1 \\ & = X_{i_{i'}, 1} + \ell -2i_{i'} + 1 \\ & = Y_{i_{i'}, 1}.
\end{align*}
It follows that $Y_{i_{i'}, 2} - Y_{i_{i'}, 1} \in \lbrace 0, -1 \rbrace$.
\item Suppose $i' < \ell'$ and $\alpha_{i_{i'} + 1} \geq 2$, and suppose (b) holds. Note that $i_{i'+1} = i_{i'} + 1$. Thus,
\begin{align*}
Y_{i_{i'}, 2} & = \mu'_{i'} + \ell' - 2i' + 1 \\ & \geq \mu'_{i'+1} + \ell' - 2i' + 1 \\ & = Y_{i_{i'+1}, 2} + 2 \\ & = Y_{i_{i'} +1, 2} + 2 \\ & \geq Y_{i_{i'} + 1, 1} + 1 \\ & = X_{i_{i'} + 1, 1} + \ell - 2i_{i'}\\ & = X_{i_{i'}, 1} + \ell - 2i_{i'} \\ & = Y_{i_{i'}, 1} - 1.
\end{align*}
If Equation~\ref{echo} holds, then $Y_{i_{i'}, 2} \leq Y_{i_{i'}, 1}$ follows from invoking (b) as above. Otherwise, $\mu'_{i'} = \mu'_{i' + 1}$, and
\begin{align*}
Y_{i_{i'}, 2} & = \mu'_{i'} + \ell' - 2i' + 1 \\ & = \mu'_{i'+1} + \ell' - 2i' + 1 \\ & = Y_{i_{i'+1}, 2} + 2 \\ & = Y_{i_{i'} + 1, 2} + 2 \\ & \leq Y_{i_{i'} + 1, 1} + 2 \\ & = X_{i_{i'} +1, 1} + \ell - 2i_{i'} + 1 \\ & = X_{i_{i'}, 1} + \ell - 2i_{i'} + 1 \\ & = Y_{i_{i'}, 1}.
\end{align*}
\end{enumerate}
Therefore, it suffices to show (i) that (a) holds if $i' = \ell'$, or $i' < \ell'$ and $\alpha_{i_{i'} + 1} = 1$, and (ii) that (b) holds always. We prove these claims subject to the following assumption: For all $1 \leq i \leq \ell$ such that $\alpha_i = 1$, the set $I_i := \lbrace i_0 \in \lbrace 1, \ldots, \ell' \rbrace : i_{i_0} > i \rbrace$ is preserved under $\tau$. Finally, we justify the assumption.
\begin{enumerate}
\item[(i)] Suppose $i' = \ell'$, or $i' < \ell'$ and $\alpha_{i_{i'} + 1} = 1$. Assume for the sake of contradiction that there exists $\mathcal{i} \in I'_b$ such that \[\mathcal{C}_1 \left (\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b \right) < \mu^{\circ} + \ell - 2i_{i'}.\]
For all $1 \leq i, j \leq \ell$, set $m_{i,j} := \min \lbrace \alpha_i, \alpha_j \rbrace$.
Note that
\begin{align*}
\mathcal{C} \left (\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b \right) & = \nu'_{\mathcal{i}} - \sum_{i_0 \in I'_b \setminus \lbrace \mathcal{i} \rbrace} \min \left \lbrace \alpha'_{\mathcal{i}}, \alpha'_{i_0} \right \rbrace + \sum_{i_0 \in I_b} \min \left \lbrace \alpha'_{\mathcal{i}}, \alpha'_{i_0} \right \rbrace \\ & = \nu_{i_{\mathcal{i}}} - \mu^{\circ} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) + \alpha_{i_{\mathcal{i}}} - 1 - 2 (\ell' - i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}}.
\end{align*}
Hence \[\mathcal{C}_1(\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b) = \left \lfloor \frac{ \nu_{i_{\mathcal{i}}} - \mu^{\circ} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}}}{\alpha_{i_{\mathcal{i}}} - 1} \right \rfloor + 1.\]
Thus, \[\left \lfloor \frac{ \nu_{i_{\mathcal{i}}} - \mu^{\circ} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}}}{\alpha_{i_{\mathcal{i}}} - 1} \right \rfloor < \mu^{\circ} + \ell - 2i_{i'} - 1.\]
Since the right-hand side is an integer, it follows that
\begin{align*}
& \frac{ \nu_{i_{\mathcal{i}}} - \mu^{\circ} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}}}{\alpha_{i_{\mathcal{i}}} - 1} < \mu^{\circ} + \ell - 2i_{i'} - 1 \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} - (\ell - 2i_{i'} - 1)(\alpha_{i_{\mathcal{i}}} - 1)}{\alpha_{i_{\mathcal{i}}} - 1} \\ & < \mu^{\circ} \left( 1 + \frac{1}{\alpha_{i_{\mathcal{i}}} - 1} \right) \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} - (\ell - 2i_{i'} - 1)(\alpha_{i_{\mathcal{i}}} - 1)}{\alpha_{i_{\mathcal{i}}}} < \mu^{\circ} \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} - (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2 \ell' + 2 i' - 2i_{i'} - 1}{\alpha_{i_{\mathcal{i}}}} - \ell + 2i_{i'} + 1 < \mu^{\circ} \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} + (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - \alpha_{i_{\mathcal{i}}} - 2 \sum_{i_0 = 1}^{i_{\mathcal{i}} - 1} m_{i_{\mathcal{i}}, i_0} - 1}{\alpha_{i_{\mathcal{i}}}} \\ & + \frac{- 2 \sum_{i_0 = i_{\mathcal{i}} + 1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} - \ell + 2i_{i'} < \mu^{\circ} \\ & \Longleftrightarrow \frac{\mathcal{C}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, i_{\mathcal{i}}-1 \rbrace, \lbrace i_{\mathcal{i}} +1, \ldots, \ell \rbrace) -1}{\alpha_{i_{\mathcal{i}}}} \\ & + \frac{- 2 \sum_{i_0 = i_{\mathcal{i}} + 1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2i_{\mathcal{i}} < \mu^{\circ} + \ell - 2i_{\mathcal{i}}.
\end{align*}
We observe that $I_b = \lbrace i'+1, \ldots, \ell' \rbrace$. If $i' = \ell'$, this holds vacuously; otherwise, it follows from the assumption indicated above, for $\alpha_{i_{i'} + 1} =1$ implies $\lbrace i'+1, \ldots, \ell' \rbrace = I_{i_{i'} +1}$ is preserved under $\tau$. Since $\mathcal{i} \in I'_b$, we see also that $\mathcal{i} \leq i'$.
Thus,
\begin{align*}
& \frac{- 2 \sum_{i_0 = i_{\mathcal{i}} + 1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2i_{\mathcal{i}} \\ & = \frac{-2 \sum_{i_0 = i_{\mathcal{i}} + 1}^{i_{i'}} m_{i_{\mathcal{i}}, i_0} - 2 \sum_{i_0 = i_{i'} + 1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2 \sum_{i_0 = i' +1}^{\ell'} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2i_{\mathcal{i}} \\ & = \frac{-2 \sum_{i_0=i_{\mathcal{i}} + 1}^{i_{i'}} m_{i_{\mathcal{i}}, i_0}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2i_{\mathcal{i}} \geq 0.
\end{align*}
Furthermore, \[\mu^{\circ} = \mu_{i_{\mathcal{i}}} \leq \mathcal{C}_{-1}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, i_{\mathcal{i}} - 1 \rbrace, \lbrace i_{\mathcal{i}} + 1, \ldots, \ell \rbrace) - \ell + 2i_{\mathcal{i}} - 1.\]
Set \[\mathcal{c} := \mathcal{C}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, i_{\mathcal{i}} - 1 \rbrace, \lbrace i_{\mathcal{i}} + 1, \ldots, \ell \rbrace).\]
Then
\begin{align*}
& \frac{\mathcal{c}-1}{\alpha_{i_{\mathcal{i}}}} < \left \lceil \frac{\mathcal{c}}{\alpha_{i_{\mathcal{i}}}} \right \rceil - 1 \\ & \Longleftrightarrow \mathcal{c} -1 < \alpha_{i_{\mathcal{i}}} \left \lceil \frac{\mathcal{c}}{\alpha_{i_{\mathcal{i}}}} \right \rceil - \alpha_{i_{\mathcal{i}}} \\ & \Longleftrightarrow \mathcal{c} \leq \alpha_{i_{\mathcal{i}}} \left \lceil \frac{\mathcal{c}}{\alpha_{i_{\mathcal{i}}}} \right \rceil - \alpha_{i_{\mathcal{i}}} \\ & \Longrightarrow \mathcal{c} < \alpha_{i_{\mathcal{i}}} \left( \frac{\mathcal{c}}{\alpha_{i_{\mathcal{i}}}} + 1 \right) - \alpha_{i_{\mathcal{i}}} = \mathcal{c},
\end{align*}
which is a contradiction.
\item[(ii)] If $\alpha_j > 1$ for all $1 \leq j < i_{\mathcal{i'}}$, set $j_0 := 0$. Otherwise, let $j_0 < i_{i'}$ be maximal such that $\alpha_{j_0} = 1$. Analogously, if $\alpha_j > 1$ for all $i_{\mathcal{i'}} < j \leq \ell$, set $j_1 := \ell + 1$. Otherwise, let $j_1 > i_{i'}$ be minimal such that $\alpha_{j_1} = 1$. Set $I_c := I_{j_0} \setminus I_{j_1}$. By assumption, $I_c$ is preserved under $\tau$. Hence $I_c \cap I'_b \neq \varnothing$, else $I_c \subset I_b$, meaning $I_c \subset \lbrace i' + 1, \ldots, \ell' \rbrace$, which is impossible because $i' \in I_c$.
Let $\mathcal{i} \in I_c \cap I'_b$ be chosen so that $\alpha_{i_{\mathcal{i}}}$ is minimal. We claim that \[\mathcal{C}_1 \left (\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b \right) \leq \mu^{\circ} + \ell - 2i_{i'} + 1.\]
Assume for the sake of contradiction that \[\mathcal{C}_1 \left (\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b \right) \geq \mu^{\circ} + \ell - 2i_{i'} + 2.\]
As above, \[\mathcal{C}_1(\alpha', \nu', \mathcal{i}, I'_b \setminus \lbrace \mathcal{i} \rbrace, I_b) = \left \lfloor \frac{ \nu_{i_{\mathcal{i}}} - \mu^{\circ} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}}}{\alpha_{i_{\mathcal{i}}} - 1} \right \rfloor + 1.\]
Thus,
\begin{align*}
& \frac{ \nu_{i_{\mathcal{i}}} - \mu^{\circ} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}}}{\alpha_{i_{\mathcal{i}}} - 1} \geq \mu^{\circ} + \ell - 2i_{\mathcal{i'}} + 1 \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} - (\ell - 2i_{i'} + 1)(\alpha_{i_{\mathcal{i}}} - 1)}{\alpha_{i_{\mathcal{i}}} - 1} \\ & \geq \mu^{\circ} \left( 1 + \frac{1}{\alpha_{i_{\mathcal{i}}} - 1} \right) \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} - (\alpha^*_2 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - 2(\ell'-i') + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} - (\ell - 2i_{i'} + 1)(\alpha_{i_{\mathcal{i}}} - 1)}{\alpha_{i_{\mathcal{i}}}} \geq \mu^{\circ} \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} - (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) + 2 \sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2 \ell - 2 \ell' + 2 i' - 2i_{i'}+1}{\alpha_{i_{\mathcal{i}}}} - \ell + 2i_{i'} - 1 \geq \mu^{\circ} \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} + (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - \alpha_{i_{\mathcal{i}}} - 2 \sum_{i_0 = 1}^{j_0} m_{i_{\mathcal{i}}, i_0} + 1}{\alpha_{i_{\mathcal{i}}}} \\ & + \frac{- 2 \sum_{i_0 = j_0+1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} - \ell + 2i_{i'} \geq \mu^{\circ} \\ & \Longleftrightarrow \frac{\nu_{i_{\mathcal{i}}} + (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - \alpha_{i_{\mathcal{i}}} - 2 \sum_{i_0 = 1}^{j_0} m_{i_{\mathcal{i}}, i_0} + 1}{\alpha_{i_{\mathcal{i}}}} \\ & + \frac{- 2 \sum_{i_0 = j_0 + 1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2j_0 \geq \mu^{\circ} + \ell - 2 j_0.
\end{align*}
From the inclusions $I_{j_1} \subset \lbrace i'+1, \ldots, \ell' \rbrace \subset I_{j_0}$, we see that $I_{j_1} \subset I_b \subset I_{j_0}$, so $I_{j_1} = I_b \setminus I_c$. Furthermore, $|I_c \cap I'_b| = |I_c \cap \lbrace 1, \ldots, i' \rbrace| = i_{i'} - j_0$. Thus,
\begin{align*}
& \frac{- 2 \sum_{i_0 = j_0 + 1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_b} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2j_0 \\ & = \frac{- 2 \sum_{i_0 = j_0 + 1}^{j_1 - 1} m_{i_{\mathcal{i}}, i_0} + 2\sum_{i_0 \in I_c \cap I_b} m_{i_{\mathcal{i}}, i_{i_0}} }{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2j_0 \\ & + \frac{- 2 \sum_{i_0 = j_1}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2 \sum_{i_0 \in I_{j_1}} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} \\ & = \frac{- 2\sum_{i_0 \in I_c \cap I'_b} m_{i_{\mathcal{i}}, i_{i_0}}}{\alpha_{i_{\mathcal{i}}}} + 2i_{i'} - 2j_0 \\ & + \frac{-2 \sum_{i_0 = i_{i'}}^{\ell} m_{i_{\mathcal{i}}, i_0} + 2 \sum_{i_0 = i'}^{\ell'} m_{i_{\mathcal{i}}, i_{i_0}} + 2\ell - 2\ell' + 2i' - 2i_{i'}}{\alpha_{i_{\mathcal{i}}}} \\ & = 0 + 0 = 0.
\end{align*}
Hence \[ \frac{\nu_{i_{\mathcal{i}}} + (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - \alpha_{i_{\mathcal{i}}} - 2 \sum_{i_0 = 1}^{j_0} m_{i_{\mathcal{i}}, i_0} + 1}{\alpha_{i_{\mathcal{i}}}} \geq \mu^{\circ} + \ell - 2j_0.\]
If $j_0 = 0$, then
\begin{align*}
\frac{\mathcal{C}(\alpha, \nu, i_{\mathcal{i}}, \varnothing, \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace) + 1}{\alpha_{i_{\mathcal{i}}}} & = \frac{\nu_{i_{\mathcal{i}}} + (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - \alpha_{i_{\mathcal{i}}} + 1}{\alpha_{i_{\mathcal{i}}}} \\ & \geq \mu^{\circ} + \ell \\ & = \mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2, \ldots, \ell \rbrace) + 1 \\ & \geq \mathcal{C}_{-1}(\alpha, \nu, i_{\mathcal{i}}, \varnothing, \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace) + 1 \\ & \geq \frac{\mathcal{C}(\alpha, \nu, i_{\mathcal{i}}, \varnothing, \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace)}{\alpha_{i_{\mathcal{i}}}} + 1,
\end{align*}
which is impossible because $\alpha_{i_{\mathcal{i}}} > 1$.
Thus, $j_0 \geq 1$. From Proposition~\ref{multi}, it follows that $\nu_{j_0} = \mu^{\circ}$. Hence \[\mathcal{C}_{-1}(\alpha, \nu, j_0, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0 + 1, \ldots, \ell \rbrace) = \mu^{\circ} + \ell - 2j_0 + 1.\]
Since $j_0 < i_{\mathcal{i}}$ and $\alpha_{j_0} < \alpha_{i_{\mathcal{i}}}$, it follows that \[\mathcal{C}_{-1}(\alpha, \nu, j_0, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0 + 1, \ldots, \ell \rbrace) > \mathcal{C}_{-1}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace).\]
Therefore, \[\mu^{\circ} + \ell - 2j_0 \geq \mathcal{C}_{-1}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace).\]
Then
\begin{align*}
& \frac{\mathcal{C}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace) - 1}{\alpha_{i_{\mathcal{i}}}} \\ & = \frac{\nu_{i_{\mathcal{i}}} + (\alpha^*_1 + \cdots + \alpha^*_{\alpha_{i_{\mathcal{i}}}}) - \alpha_{i_{\mathcal{i}}} - 2 \sum_{i_0 = 1}^{j_0} m_{i_{\mathcal{i}}, i_0} + 1}{\alpha_{i_{\mathcal{i}}}} \\ & \geq \mu^{\circ} + \ell - 2j_0 \\ & \geq \mathcal{C}_{-1}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace) \\ & \geq \frac{\mathcal{C}(\alpha, \nu, i_{\mathcal{i}}, \lbrace 1, \ldots, j_0 - 1 \rbrace, \lbrace j_0, \ldots, \ell \rbrace \setminus \lbrace i_{\mathcal{i}} \rbrace)}{\alpha_{i_{\mathcal{i}}}},
\end{align*}
which is a contradiction.
\end{enumerate}
It remains to justify the assumption that $I_i$ is preserved under $\tau$ for all $1 \leq i \leq \ell$ such that $\alpha_i = 1$. Given a subset $J \subset \lbrace 1, \ldots, \ell' \rbrace$, set $i_J := \lbrace i_j : j \in J \rbrace$. Given a subset $I \subset \lbrace 1, \ldots, \ell \rbrace$, let $m(I)$ be its minimal element, and let $M(I)$ be its maximal element. Say that $I$ is \textit{consecutive} if $M(I) - m(I) + 1 = |I|$. Partition $\lbrace 1, \ldots, \ell' \rbrace$ into disjoint blocks $J_1, \ldots, J_k$ such that $i_{J_r}$ is consecutive for all $1 \leq r \leq k$ and $m(i_{J_{r+1}}) - M(i_{J_r}) > 1$ for all $1 \leq r \leq k-1$.
We claim that $J_r$ is preserved under $\tau$ for all $1 \leq r \leq k$. The proof is by (backwards) induction on $r$. For the inductive step, suppose the claim holds for all $r + 1 \leq r_0 \leq k$. Let $c$ be the cardinality of $J_r$, and let $j^r_1, \ldots, j^r_c$ be the elements of $J_r$, arranged in increasing order.
The claim for $r$ is then that $\tau^{-1}(j^r_{w}) \in J_r$ for all $1 \leq w \leq c$. If $r=1$, this follows immediately from the inductive hypothesis, so we may assume $r \geq 1$. Set $q := i_{j^r_1} - 1$. Then $\alpha_q = 1$ and $i_{j^r_w} = q + w$. We prove the claim by (backwards) induction on $w$.
Suppose $\tau^{-1}(j^r_{w_0}) \in J_r$ for all $w + 1 \leq w_0 \leq c$. Set $j_0 := \tau^{-1}(j^r_{w})$. Assume for the sake of contradiction that $j_0 \notin J_r$. By the inductive hypothesis (on $r$), we see that $j_0 \in J_1 \cup \cdots \cup J_{r-1}$. Thus, $i_{j_0} < q$.
Note that
\begin{align*}
\mu^{\circ} = \mu_{i_{j_0}} & \leq \mathcal{C}_{-1}(\alpha, \nu, i_{j_0}, \lbrace 1, \ldots, i_{j_0} - 1 \rbrace, \lbrace i_{j_0} + 1, \ldots, \ell \rbrace) - \ell + 2i_{j_0} - 1 \\ & = \left \lceil \frac{\nu_{i_{j_0}} - \sum_{i_0=1}^{i_{j_0}-1} m_{i_{j_0}, i_0} + \sum_{i_0 = i_{j_0} + 1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0}}{\alpha_{i_{j_0}}} \right \rceil - \ell + 2i_{j_0} - 1 \\ & \leq \left \lceil \frac{\nu_{i_{j_0}} - \sum_{i_0=1}^{i_{j_0}-1} m_{i_{j_0}, i_0} - \sum_{i_0 = i_{j_0} + 1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0}}{\alpha_{i_{j_0}}}\right \rceil - \ell + 2q - 3 \\ & = \left \lceil \frac{\nu_{i_{j_0}} - \sum_{i_0=1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0}}{\alpha_{i_{j_0}}}\right \rceil - \ell + 2q - 2.
\end{align*}
Thus,
\begin{align}
& \left \lfloor \frac{\nu_{i_{j_0}} - \mu^{\circ} - \sum_{i_0 = 1}^{q-1} m_{i_{j_0}, i_0} + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0} - \ell + 2q - 1}{\alpha_{i_{j_0}} - 1}\right \rfloor \\ & \geq \left \lfloor \frac{\nu_{i_{j_0}} - \sum_{i_0 = 1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0} - \left \lceil \frac{\nu_{i_{j_0}} - \sum_{i_0=1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0}}{\alpha_{i_{j_0}}}\right \rceil}{\alpha_{i_{j_0}} -1} \right \rfloor \\ & = \left \lfloor \frac{\left \lfloor \left(\nu_{i_{j_0}} - \sum_{i_0=1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0} \right) \left( \frac{\alpha_{i_{j_0}} - 1}{\alpha_{i_{j_0}}} \right)\right \rfloor}{\alpha_{i_{j_0}} - 1}\right \rfloor \\ & = \left \lfloor \frac{\nu_{i_{j_0}} - \sum_{i_0=1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0}}{\alpha_{i_{j_0}}}\right \rfloor \\ & \geq \left \lceil \frac{\nu_{i_{j_0}} - \sum_{i_0=1}^{q-1} m_{i_{j_0}, i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{i_{j_0}, i_0}}{\alpha_{i_{j_0}}}\right \rceil - 1 \\ & \geq \mu^{\circ} + \ell - 2q + 1.
\end{align}
Set \[J_r^w := \left \lbrace w_0 \in \lbrace 1, \ldots, c \rbrace : j^r_{w_0} \in J_r \setminus \tau^{-1} \lbrace j^r_{w+1}, \ldots, j^r_{c} \rbrace \right \rbrace.\] Let $w' \in J_r^w$ be chosen so that $\alpha_{i_{j^r_{w'}}}$ is minimal.
Since $q < q + w'$ and $\alpha_q = 1 < \alpha_{q+w'}$, it follows that
\begin{align*}
&\mathcal{C}_{-1}(\alpha, \nu, q + w', \lbrace 1, \ldots, q-1 \rbrace, \lbrace q, \ldots, \ell \rbrace \setminus \lbrace q+w' \rbrace) \\ & < \mathcal{C}_{-1}(\alpha, \nu, q, \lbrace 1, \ldots, q-1 \rbrace, \lbrace q+1, \ldots, \ell \rbrace) \\ & = \nu_q + \ell - 2q + 1 \\ & = \mu^{\circ} + \ell - 2q + 1,
\end{align*}
where the last equality follows from Proposition~\ref{multi}.
Hence
\begin{align*}
\mu^{\circ} + \ell - 2q + 1 & \geq \mathcal{C}_{-1}(\alpha, \nu, q + w', \lbrace 1, \ldots, q-1 \rbrace, \lbrace q, \ldots, \ell \rbrace \setminus \lbrace q+w' \rbrace) + 1 \\ & = \left \lceil \frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}}{\alpha_{q + w'}} \right \rceil.
\end{align*}
Therefore,
\begin{align}
& \left \lfloor \frac{\nu_{q+w'} - \mu^{\circ} - \sum_{i_0 = 1}^{q-1} m_{q+w', i_0} + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0} - \ell + 2q - 1}{\alpha_{q+w'}-1} \right \rfloor \\ & \leq \left \lfloor \frac{\nu_{q+w'} - \sum_{i_0 = 1}^{q-1} m_{q+w', i_0} + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0} - \left \lceil \frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}}{\alpha_{q + w'}} \right \rceil}{\alpha_{q+w'}-1} \right \rfloor \\ & \leq \left \lfloor \frac{\nu_{q+w'} - \sum_{i_0 = 1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0} - \left \lceil \frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}}{\alpha_{q + w'}} \right \rceil}{\alpha_{q+w'}-1} \right \rfloor \\ & = \left \lfloor \frac{\left \lfloor \left(\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0} \right) \left(\frac{\alpha_{q+w'}-1}{\alpha_{q+w'}} \right) \right \rfloor}{\alpha_{q+w'}-1} \right \rfloor \\ & = \left \lfloor \frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}}{\alpha_{q + w'}} \right \rfloor \\ & \leq \left \lceil \frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}}{\alpha_{q + w'}} \right \rceil \\ & \leq \mu^{\circ} + \ell - 2q + 1.
\end{align}
Combining (5.4) -- (5.9) and (5.10) -- (5.16), we see that $(5.4) \geq (5.10)$, with equality if and only if all the inequalities are in fact equalities. However, if $(5.14) = (5.15)$, then \[z:= \frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}}{\alpha_{q + w'}} \in \mathbb{Z},\] in which case \[\frac{\nu_{q+w'} - \sum_{i_0=1}^{q-1} m_{q+w', i_0} + 1 + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0}-z}{\alpha_{q + w'} -1} = z \in \mathbb{Z},\] so $(5.12) = z$ and $(5.11) = \left \lfloor z - \frac{1}{\alpha_{q+w'} - 1} \right \rfloor = z-1$. It follows that $(5.4) > (5.10)$.
For all $1 \leq i, j \leq \ell$, set $m'_{i,j} := m_{i,j} - 1$. Note that $m'_{i,j} = 0$ unless $\alpha_i, \alpha_j > 1$. Additionally, \[\frac{-2\sum_{w_0 \in J_r^w} m'_{i_{j_0}, q+w_0}}{\alpha_{i_{j_0}} - 1} \geq -2w = \frac{-2\sum_{w_0 \in J_r^w} m'_{q+w', q+w_0}}{\alpha_{q+w'} - 1}.\]
Thus,
\begin{align*}
& \mathcal{C}_1(\alpha', \nu', j_0, (J_1 \cup \cdots \cup J_r) \setminus \tau^{-1} \lbrace j^r_{w}, \ldots, j^r_c \rbrace, \tau^{-1} \lbrace j^r_{w+1}, \ldots, j^r_c \rbrace \cup J_{r+1} \cup \cdots \cup J_k) \\ & = \left \lfloor \frac{\nu'_{j_0} - \sum_{i_0 = 1}^{q-1} m'_{i_{j_0}, i_0} - \sum_{w_0 \in J_r^w} m'_{i_{j_0}, q + w_0} + \sum_{w_0 \in \lbrace 1, \ldots, c \rbrace \setminus J_r^w} m'_{i_{j_0}, q + w_0} + \sum_{i_0 = q + c + 1}^{\ell} m'_{i_{j_0}, i_0}}{\alpha_{i_{j_0}} -1} \right \rfloor + 1 \\ & = \left \lfloor \frac{\nu_{i_{j_0}} - \mu^{\circ} - \sum_{i_0 = 1}^{q-1} m'_{i_{j_0}, i_0} + \sum_{i_0 = q + 1}^{\ell} m'_{i_{j_0}, i_0}}{\alpha_{i_{j_0}} -1} + \frac{-2\sum_{w_0 \in J_r^w} m'_{i_{j_0}, q + w_0}}{\alpha_{i_{j_0}} - 1} \right \rfloor + 1 \\ & \geq \left \lfloor \frac{\nu_{i_{j_0}} - \mu^{\circ} - \sum_{i_0 = 1}^{q-1} m_{i_{j_0}, i_0} + \sum_{i_0 = q + 1}^{\ell} m_{i_{j_0}, i_0} - \ell + 2q - 1}{\alpha_{i_{j_0}} -1} \right \rfloor - 2w + 1 \\ & > \left \lfloor \frac{\nu_{q+w'} - \mu^{\circ} - \sum_{i_0 = 1}^{q-1} m_{q+w', i_0} + \sum_{i_0 = q+1}^{\ell} m_{q+w', i_0} - \ell + 2q - 1}{\alpha_{q+w'}-1} \right \rfloor - 2w + 1 \\ & = \left \lfloor \frac{\nu_{q+w'} - \mu^{\circ} - \sum_{i_0 = 1}^{q-1} m'_{q+w', i_0} + \sum_{i_0 = q+1}^{\ell} m'_{q+w', i_0}}{\alpha_{q+w'}-1} + \frac{-2 \sum_{w_0 \in J_r^w} m'_{q+w', q+ w_0}}{\alpha_{q+w'}-1} \right \rfloor + 1 \\ & = \left \lfloor \frac{\nu'_{j^r_{w'}} - \sum_{i_0=1}^{q-1} m'_{i_{j^r_{w'}}, i_0} - \sum_{w_0 \in J_r^w} m'_{i_{j^r_{w'}}, q + w_0} + \sum_{w_0 \in \lbrace 1, \ldots, c \rbrace \setminus J_r^w} m'_{i_{j^r_{w'}}, q + w_0} + \sum_{i_0 = q+c+1}^{\ell} m'_{i_{j^r_{w'}}, i_0}}{\alpha_{q+w'}-1} \right \rfloor + 1 \\ & = \mathcal{C}_1(\alpha', \nu', j^r_{w'}, (J_1 \cup \cdots \cup J_r) \setminus (\lbrace j^r_{w'} \rbrace \cup \tau^{-1} \lbrace j^r_{w+1}, \ldots, j^r_{c} \rbrace), \tau^{-1} \lbrace j^r_{w+1}, \ldots, j^r_c \rbrace \cup J_{r+1} \cup \cdots \cup J_k).
\end{align*}
Set $J := \tau^{-1} \lbrace j^r_{w+1}, \ldots, j^r_c \rbrace \cup J_{r+1} \cup \cdots \cup J_k$ and $J' := \lbrace 1, \ldots, \ell' \rbrace \setminus J$. By the inductive hypothesis, \[J = \tau^{-1} \lbrace j \in \lbrace 1, \ldots, \ell' \rbrace : j > j^r_w \rbrace.\]
From our work above, we see that \[\mathcal{C}_1(\alpha', \nu', j_0, J' \setminus \lbrace j_0 \rbrace, J) > \mathcal{C}_1(\alpha', \nu', j^r_{w'}, J' \setminus \lbrace j^r_{w'} \rbrace, J),\] which means that the function given by \[j \mapsto \mathcal{C}_1(\alpha', \nu', j, J' \setminus \lbrace j \rbrace, J)\] does not attain its minimal value over the domain $j \in J'$ at $j = j_0 = \tau^{-1}(j^r_w)$. This contradicts the definition of $\tau$.
\end{proof}
\begin{df}
Given a diagram $X$ and a positive integer $j$, the diagram $\mathcal{T}_j(X)$ is obtained from $X$ by removing the leftmost $j-1$ columns of $X$, and then removing the empty rows from the remaining diagram.
\end{df}
\begin{rem}
We refer to $\mathcal{T}_j$ as the \textit{column-reduction} function. Inductively, we see that $\mathcal{T}_j \mathcal{T}_{j'}(X) = \mathcal{T}_{j+j'-1}(X)$ for all $j, j' \in \mathbb{N}$.
\end{rem}
\begin{lem} \label{bigentry}
Let $(\alpha, \nu) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell}$. Set $(X,Y) := \mathcal{A}(\alpha, \nu, -1)$. Suppose $X_{1,1} = \cdots = X_{\ell, 1}$. Then $Y_{1,1} \geq Y_{i,j}$ for all $(i, j) \in \mathbb{N} \times \mathbb{N}$ such that $Y$ has an entry in the $i^{\text{th}}$ row and $j^{\text{th}}$ column.
\end{lem}
\begin{proof}
The proof is by induction on $M := \max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace$. Clearly, $Y_{1, 1} \geq Y_{i, 1}$ for all $i$, and it follows from Theorem~\ref{differences} that $Y_{i, 1} \geq Y_{i, 2}$ for all $i$, so $Y_{1,1} \geq Y_{i,2}$. Thus, we may assume $M \geq 3$.
Maintain the notation from the proof of Theorem~\ref{differences} (continue to assume without loss of generality that $\sigma = \operatorname{id}$). Set $\mathcal{f}' := \mathcal{S}(\alpha', \tau, \mu')$. For all $(x, i_0)$ in the image of $\mathcal{f}'$ such that $i_0 > 0$, set $i'_{(x,i_0)} := {\mathcal{f}'}^{-1}(x,i_0)$. Also set $\mathcal{k}' := |\lbrace \mu'_1, \ldots, \mu'_{\ell'} \rbrace|$. For all $1 \leq x \leq \mathcal{k}'$, set \[\ell'_x := \max \lbrace i_0 : (x, i_0) \in \mathcal{f}' \lbrace 1, \ldots, \ell' \rbrace \rbrace.\]
If $\ell'_x > 0$, then set \[{\alpha'}^{(x)} := \left [\alpha'_{{\tau}^{-1}\left(i'_{(x, 1)}\right)} - 1, \ldots, \alpha'_{{\tau}^{-1}\left(i'_{(x, \ell'_x)}\right)} - 1 \right] \] and \[{\nu'}^{(x)} = \left [\nu'_{{\tau}^{-1}\left(i'_{(x, 1)}\right)} - \mu'_{i'_{(x, 1)}}, \ldots, \nu'_{{\tau}^{-1}\left(i'_{(x, \ell'_x)}\right)} - \mu'_{i'_{(x, \ell'_x)}} \right].\]
For all $1 \leq i_0 \leq \ell'_x$, set \[\widehat{\nu'}^{(x)}_{i_0} := {\nu'}^{(x)}_{i_0} - \sum_{x' = 1}^{x-1} \sum_{i_1 = 1}^{\ell'_{x'}} \min \left\lbrace {\alpha'}^{(x)}_{i_0}, {\alpha'}^{(x')}_{i_1} \right\rbrace + \sum_{x' = x+1}^{\mathcal{k}'} \sum_{i_1 = 1}^{\ell'_{x'}} \min \left\lbrace {\alpha'}^{(x)}_{i_0}, {\alpha'}^{(x')}_{i_1} \right\rbrace.\]
Then set $\widehat{\nu'}^{(x)} := \left[\widehat{\nu'}^{(x)}_1, \ldots, \widehat{\nu'}^{(x)}_{\ell'_x}\right]$ and $\left({X'}^{(x)}, {Y'}^{(x)} \right) := \mathcal{A}\left({\alpha'}^{(x)}, \widehat{\nu'}^{(x)}, -1 \right)$.
By construction $\mathcal{T}_2(Y) = Y'$ and \[\mathcal{T}_3(Y) = \mathcal{T}_2 (Y') = \operatorname{Cat}\left({Y'}^{(1)}, \ldots, {Y'}^{(\mathcal{k}')}\right).\]
Note that $\mathcal{T}_2(Y)_{i, 1} \geq \mathcal{T}_2(Y)_{i+1, 1} + 1$. To see this, suppose the $i^{\text{th}}$ row of $\mathcal{T}_2(Y)$ is contained within the $j^{\text{th}}$ row of $Y$, and the $(i+1)^{\text{th}}$ row of $\mathcal{T}_2(Y)$ is contained within the $k^{\text{th}}$ row of $Y$. Then $k - j \geq 1$, so $Y_{j, 1} \geq Y_{k, 1} + 2$, and it follows from Theorem~\ref{differences} that $Y_{j, 2} \geq Y_{k, 2} + 1$.
Applying exactly the same reasoning, we find $\mathcal{T}_2(Y')_{i, 1} \geq \mathcal{T}_2(Y')_{i+1, 1}$, so the entries of $\mathcal{T}_3(Y) = \mathcal{T}_2 (Y')$ down the first column are weakly decreasing. In particular, ${Y'}^{(1)}_{1,1} \geq {Y'}^{(x)}_{1, 1}$ for all $1 \leq x \leq \mathcal{k}'$. By the inductive hypothesis, we see that ${Y'}^{(1)}_{1,1} \geq \mathcal{T}_3(Y)_{i,j}$ for all $(i, j)$.
Thus, it suffices to show $Y_{1,1} \geq {Y'}^{(1)}_{1,1}$. Assume for the sake of contradiction that ${Y'}^{(1)}_{1,1} > Y_{1,1}$. Set $\phi := \mathcal{R}_{-1}({\alpha'}^{(1)}, \widehat{\nu'}^{(1)})$. Set $i_0 := \phi^{-1}(1)$. Also set $i'_0 := \tau^{-1}\left(i'_{(1,i_0)}\right)$. Note that
\begin{align*}
{Y'}^{(1)}_{1,1} & = {X'}^{(1)}_{1,1} + \ell'_1 - 1 \\ & = \mathcal{C}_{-1}({\alpha'}^{(1)}, \widehat{\nu'}^{(1)}, i_0, \varnothing, \lbrace 1, \ldots, \ell'_1 \rbrace \setminus \lbrace i_0 \rbrace) \\ & = \left \lceil \frac{\widehat{\nu'}^{(1)}_{i_0} + \sum_{i_1 = 1}^{\ell'_1} \min \left \lbrace {\alpha'}^{(1)}_{i_0}, {\alpha'}^{(1)}_{i_1} \right \rbrace}{{\alpha'}^{(1)}_{i_0}} \right \rceil - 1 \\ & = \left \lceil \frac{{\nu'}^{(1)}_{i_0} + \sum_{i_1 = 1}^{\ell'_1} \min \left \lbrace {\alpha'}^{(1)}_{i_0}, {\alpha'}^{(1)}_{i_1} \right \rbrace + \sum_{x' = 2}^{\mathcal{k}'} \sum_{i'=1}^{\ell'_{x'}} \min \left \lbrace {\alpha'}^{(1)}_{i_0},{\alpha'}^{(x')}_{i_1} \right \rbrace}{{\alpha'}^{(1)}_{i_0}} \right \rceil - 1 \\ & = \left \lceil \frac{\nu'_{i'_0} - \mu'_{i'_{(1,i_0)}} + \sum_{x = 1}^{\mathcal{k}'} \sum_{i'=1}^{\ell'_{x}} \min \left \lbrace {\alpha'}_{i'_0} - 1, {\alpha'}_{\tau^{-1}(i'_{(x, i_1)})} - 1 \right \rbrace}{{\alpha'}_{i'_0} - 1} \right \rceil - 1 \\ & = \left \lceil \frac{\nu'_{i'_0} - \mu'_{i'_{(1,i_0)}} + \sum_{i' = 1}^{\ell'} \min \left \lbrace {\alpha'}_{i'_0} - 1, {\alpha'}_{i'} - 1 \right \rbrace}{{\alpha'}_{i'_0} - 1} \right \rceil - 1 \\ & = \left \lceil \frac{\nu'_{i'_0} - \mu'_{i'_{(1,i_0)}} + \left({\alpha'}^*_1 + \cdots + {\alpha'}^*_{{\alpha'}_{i'_0}} \right) - \ell'}{{\alpha'}_{i'_0} - 1} \right \rceil - 1 \\ & = \left \lceil \frac{\nu_{i_{i'_0}} - \mu^{\circ} - \mu'_{i'_{(1,i_0)}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right) - \ell - \ell'}{{\alpha}_{i_{i'_0}} - 2} \right \rceil - 1.
\end{align*}
Suppose that the topmost row of $\mathcal{T}_3(Y)$ is contained within the $p^{\text{th}}$ row of $Y$. If $p > 1$, then \[\mathcal{T}_3(Y)_{1,1} = Y_{p, 3} \leq Y_{p, 2} + 1 \leq Y_{p, 1} + 1 \leq Y_{1,1} - 1.\] It follows that $p = 1$, so there are at least three boxes in the first row of $Y$. Thus, the first row of $Y'$ is contained within the first row of $Y$, and, furthermore, there are at least two boxes in the first row of $Y'$. Hence $\alpha'_{\tau^{-1}(1)} > 1$, whence $\mathcal{f}'(1) = (1, 1)$.
Since $\mathcal{T}_3(Y)_{1,1} = Y_{1, 3}$ and $Y_{1, 3} \leq Y_{1, 2} + 1 \leq Y_{1, 1} + 1$, the assumption $Y_{1,3} > Y_{1,1}$ entails $Y_{1,2} = Y_{1,1}$. Note that $Y_{1,1} = \mu^{\circ} + \ell - 1$ and $Y_{1, 2} = Y'_{1,1} = \mu'_1 + \ell' - 1$, so $\mu^{\circ} + \ell = \mu'_1 + \ell'$. Then $\mu^{\circ} + \ell = \mu'_{i'_{(1,i_0)}} + \ell'$ because $\mu'_{i'_{(1,i_0)}} = \mu'_{i'_{(1,1)}} = \mu'_1$.
Thus, \[{Y'}^{(1)}_{1,1} = \left \lceil \frac{\nu_{i_{i'_0}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right) - 2(\mu^{\circ} + \ell)}{{\alpha}_{i_{i'_0}} - 2} \right \rceil - 1.\]
From ${Y'}^{(1)}_{1,1} > Y_{1,1} = \mu^{\circ} + \ell - 1$, we obtain
\begin{align*}
& \left \lceil \frac{\nu_{i_{i'_0}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right) - 2(\mu^{\circ} + \ell)}{{\alpha}_{i_{i'_0}} - 2} \right \rceil > \mu^{\circ} + \ell \\ & \Longleftrightarrow \frac{\nu_{i_{i'_0}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right) - 2(\mu^{\circ} + \ell)}{{\alpha}_{i_{i'_0}} - 2} > \mu^{\circ} + \ell \\ & \Longleftrightarrow \frac{\nu_{i_{i'_0}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right)}{{\alpha}_{i_{i'_0}} - 2} > (\mu^{\circ} + \ell) \left(1 + \frac{2}{\alpha_{i_{i'_0}} - 2} \right) \\ & \Longleftrightarrow \frac{\nu_{i_{i'_0}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right)}{{\alpha}_{i_{i'_0}}} > \mu^{\circ} + \ell \\ & \Longleftrightarrow \left \lceil \frac{\nu_{i_{i'_0}} + \left({\alpha}^*_1 + \cdots + {\alpha}^*_{{\alpha}_{i_{i'_0}}} \right)}{{\alpha}_{i_{i'_0}}} \right \rceil - 1 > \mu^{\circ} + \ell - 1 \\ & \Longleftrightarrow \mathcal{C}_{-1}(\alpha, \nu, i_{i'_0}, \varnothing, \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i_{i'_0} \rbrace) > \mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2, \ldots, \ell \rbrace).
\end{align*}
However, the function given by \[i \mapsto \mathcal{C}_{-1}(\alpha, \nu, i, \varnothing, \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i \rbrace)\] attains its maximal value over the domain $i \in \lbrace 1, \ldots, \ell \rbrace$ at $i = \sigma^{-1}(1) = 1$.
Therefore, \[\mathcal{C}_{-1}(\alpha, \nu, i_{i'_0}, \varnothing, \lbrace 1, \ldots, \ell \rbrace \setminus \lbrace i_{i'_0} \rbrace) \leq \mathcal{C}_{-1}(\alpha, \nu, 1, \varnothing, \lbrace 2, \ldots, \ell \rbrace).\] This is a contradiction.
\end{proof}
\begin{lem} \label{smallentry}
Let $(\alpha, \nu) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell}$. Set $(X,Y) := \mathcal{A}(\alpha, \nu, 1)$. Suppose $X_{1,1} = \cdots = X_{\ell, 1}$. Then $Y_{\ell,1} \leq Y_{i,j}$ for all $(i, j) \in \mathbb{N} \times \mathbb{N}$ such that $Y$ has an entry in the $i^{\text{th}}$ row and $j^{\text{th}}$ column.
\end{lem}
\begin{proof}
The proof is analogous to that of Lemma~\ref{bigentry}.
\end{proof}
\begin{thm} \label{puttog}
Let $(\alpha, \nu) \in \mathbb{N}^{\ell} \times \mathbb{Z}^{\ell}$. Then $\mathcal{A}(\alpha, \nu, -1)$ is odd-distinguished, and $\mathcal{A}(\alpha, \nu, 1)$ is even-distinguished.
\end{thm}
\begin{proof}
The proof is by induction on $\max \lbrace \alpha_1, \ldots, \alpha_{\ell} \rbrace$. We show the inductive step for the former statement only.
Maintain the notation following the definitions of the row-survival and row-partition functions. Recall that \[Y = \operatorname{Cat}(\mathcal{Y}^{(1)}, \ldots, \mathcal{Y}^{(\mathcal{k})}) \quad \text{and} \quad \mathcal{T}_2(Y) = \operatorname{Cat}(Y^{(1)}, \ldots, Y^{(\mathcal{k})}).\] By the inductive hypothesis, $Y^{(x)}$ is even-distinguished for all $1 \leq x \leq \mathcal{k}$. To see that $Y$ is odd-distinguished, we prove that $Y$ satisfies the four conditions delineated in Definition~\ref{dis}.
\begin{enumerate}
\item This follows immediately from Theorem~\ref{differences}.
\item \begin{enumerate}
\item Suppose $j < j'$ are odd and $Y_{i,j} \leq Y_{i, j'} - 1$. We split into two cases.
If $j > 1$, note that there are at least two boxes in the $i^{\text{th}}$ row of $Y$. Setting $(x, i') := \mathcal{S}(\alpha, \sigma, \iota)$, we obtain $i' > 0$, so $i = i_{(x,i')}$. Thus, \[Y^{(x)}_{i', j-1} = Y_{i,j} \leq Y_{i, j'} - 1 = Y^{(x)}_{i', j' - 1} - 1.\] Since $Y^{(x)}$ is even-distinguished, it follows that $Y^{(x)}_{i', j-1}$ is not $E$-raisable in $Y^{(x)}$, so $Y_{i,j}$ is not $E$-raisable in $Y$.
If $j = 1$, assume for the sake of contradiction that $Y_{i,j}$ is $E$-raisable. Setting $(x, i') := \mathcal{P}(\alpha, \iota)(i)$, we obtain $i' = 1$, so $i = p_{(x,1)}$. Then \[\mathcal{Y}^{(x)}_{1, 1} = Y_{i, 1} \leq Y_{i, j'} - 1 = \mathcal{Y}^{(x)}_{1, j'} - 1,\] which contradicts Lemma~\ref{bigentry}.
\item Suppose $j < j'$ are even and $Y_{i, j} \geq Y_{i, j'} + 1$. As above, note that there are at least two boxes in the $i^{\text{th}}$ row of $Y$. Setting $(x, i') := \mathcal{S}(\alpha, \sigma, \iota)(i)$, we again obtain $i' > 0$, so $i = i_{(x,i')}$. Thus, \[Y^{(x)}_{i', j-1} = Y_{i,j} \geq Y_{i, j'} + 1 = Y^{(x)}_{i', j'-1} + 1.\] Since $Y^{(x)}$ is even-distinguished, it follows that $Y^{(x)}_{i', j-1}$ is not $E$-lowerable in $Y^{(x)}$, so $Y_{i,j}$ is not $E$-lowerable in $Y$.
\end{enumerate}
\item For both parts of this condition, it suffices to address the case $j = 1$; the condition holds for $j > 1$ by the inductive hypothesis (as in the proof of condition (2)).
\begin{enumerate}
\item Suppose $Y_{i,1} \leq Y_{i,j'} - 2$. If $j'$ is odd, then $Y_{i,1}$ is not $E$-raisable by condition (2). Otherwise, $j'$ is even, and we see from Theorem~\ref{differences} that \[Y_{i, 1} \leq Y_{i, j'} - 2 \leq Y_{i, j'-1} - 2.\] Thus, $j' -1 > 1$, and, again invoking condition (2), we find $Y_{i,1}$ is not $E$-raisable.
\item Suppose $Y_{i,1} \geq Y_{i,j'} + 2$, and assume for the sake of contradiction that $Y_{i,1}$ is $E$-lowerable. Setting $(x,i') := \mathcal{P}(\alpha, \iota)(i)$, we obtain $i' = \ell^{\circ}_x$, so $i = p_{(x, \ell^{\circ}_x)}$. By Theorem~\ref{differences},
\begin{align} \label{compare}
\mathcal{Y}^{(x)}_{\ell^{\circ}_x, 2} = Y_{i, 2} \geq Y_{i, 1} - 1 \geq Y_{i, j'} + 1 = \mathcal{Y}^{(x)}_{\ell^{\circ}_x, j'} + 1.
\end{align}
Note that $\mathcal{Y}^{(x)}_{\ell^{\circ}_x, 2}$ is $E$-lowerable in $\mathcal{Y}^{(x)}$ (\textit{even if} $Y_{i,2}$ is \textit{not} $E$-lowerable in $Y$). Thus, if $j'$ is even, then $j' > 2$, and Equation~\ref{compare} contradicts condition (2). Otherwise, $j'$ is odd, and \[\mathcal{Y}^{(x)}_{\ell^{\circ}_x, 2} \geq \mathcal{Y}^{(x)}_{\ell^{\circ}_x,j'} + 1 \geq \mathcal{Y}^{(x)}_{\ell^{\circ}_x,j'-1} + 1,\] which means $j' - 1 > 2$, and again yields a contradiction with condition (2).
\end{enumerate}
\item Clearly this condition holds for $j=1$. Therefore, since $Y^{(x)}$ is even-distinguished for all $1 \leq x \leq \mathcal{k}$, it suffices to show $x < x'$ implies $Y^{(x)}_{i, j} \geq Y^{(x')}_{i', j} + 2$.
We claim that $Y^{(x)}_{i, j} \geq Y^{(x)}_{\ell'_x, 1}$. To see this, note that Lemma~\ref{collapse} tells us that there exists a positive integer $\mathcal{k}_x$ and pairs of integer sequences \[\left(\alpha^{(x;1)}, \nu^{(x;1)}\right), \ldots, \left(\alpha^{(x;\mathcal{k}_x)}, \nu^{(x; \mathcal{k}_x)}\right)\] such that the diagram pairs $\left(X^{(x;y)}, Y^{(x;y)}\right) := \mathcal{A}_1 \left(\alpha^{(x;y)}, \nu^{(x;y)} \right)$ satisfy the following conditions: For all $1 \leq y \leq \mathcal{k}_x$, the entries in the first column of $X^{(x;y)}$ are all equal, and \[Y^{(x)} = \operatorname{Cat}\left(Y^{(x;1)}, \ldots, Y^{(x; \mathcal{k}_x)}\right).\]
Let $y$ be chosen so that the $i^{\text{th}}$ row of $Y^{(x)}$ is contained in $Y^{(x;y)}$. By Lemma~\ref{smallentry}, $Y^{(x)}_{i,j}$ is greater than or equal to the bottommost entry in the first column of $Y^{(x;y)}$. This entry belongs to the first column of $Y^{(x)}$, so it is itself greater than or equal to $Y^{(x)}_{\ell'_x,1}$, which proves the claim.
By Theorem~\ref{differences}, \[Y^{(x)}_{\ell'_x, 1} = Y_{i_{(x,\ell'_x)}, 2} \geq Y_{i_{(x,\ell'_x)},1} - 1 \geq Y_{p_{(x,\ell^{\circ}_x)}, 1} - 1.\]
Furthermore, \[Y_{p_{(x', 1)}, 1} + 2 = \mathcal{Y}^{(x')}_{1,1} + 2 \geq Y^{(x')}_{i',j} + 2,\] where the inequality follows from Lemma~\ref{bigentry} because $Y^{(x')}$ is contained in $\mathcal{Y}^{(x')}$, as shown in Lemma~\ref{collapse}.
Thus, it suffices to show $Y_{p_{(x,\ell^{\circ}_x)}, 1} - 1 \geq Y_{p_{(x',1)}, 1} + 2$. By definition, \[\mathcal{P}(\alpha, \iota)(p_{(x,\ell^{\circ}_x)}) = (x, \ell^{\circ}_x) \quad \text{and} \quad \mathcal{P}(\alpha, \iota)(p_{(x',1)}) = (x', 1).\] Hence $x < x'$ implies $X_{p_{(x,\ell^{\circ}_x)}, 1} > X_{p_{(x',1)}, 1}$, so $X_{p_{(x,\ell^{\circ}_x)}, 1} - 1 \geq X_{p_{(x',1)}, 1}$, whence the result follows.
\end{enumerate}
\end{proof}
\begin{cor} \label{dist}
Let $\alpha \vdash n$, and let $\nu \in \Omega_{\alpha}$. Then $\mathcal{A}(\alpha, \nu)$ is distinguished.
\end{cor}
Corollary~\ref{dist}, in view of Proposition~\ref{kap}, suffices to prove Theorem~\ref{mata} --- thanks to the following theorem.
\begin{thm}[Achar \cite{Acharj}, Theorem 8.8] \label{achar}
Let $\alpha \vdash n$, and let $\nu \in \Omega_{\alpha}$. Then $\mathsf{A}(\alpha, \nu)$ is the unique distinguished diagram of shape-class $\alpha$ in $\kappa^{-1}(\nu)$.
\end{thm}
\begin{rem}
Again (cf. Remark~\ref{weak}), Achar's definition of distinguished is weaker than ours, but it doesn't matter: $p_1 \mathcal{A}(\alpha, \nu)$ is distinguished by our definition, so it is distinguished by Achar's definition, so $p_1\mathcal{A}(\alpha, \nu) = \mathsf{A}(\alpha, \nu)$.
\end{rem}
This completes the proof of Theorem~\ref{mata}. \qed
\eject
\appendix
\section{Afterword}
Achar's algorithm $\mathsf{A}$ computes a map \[\Omega \rightarrow \bigcup_{\ell = 1}^n D_{\ell}.\] On input $(\alpha, \nu) \in \Omega$, the corresponding dominant weight $\gamma(\alpha, \nu) \in \Lambda^+$ is obtained from the output by taking $\eta E \mathsf{A}(\alpha, \nu)$.
Achar's algorithm for $\gamma^{-1}$, which we denote by $\mathsf{B}$, computes a map \[\Lambda^+ \rightarrow \bigcup_{\ell = 1}^n D_{\ell}.\] On input $\lambda \in \Lambda^+$, the corresponding pair $\gamma^{-1}(\lambda) \in \Omega$ is obtained from the output by taking $(\delta E^{-1} \mathsf{B}(\lambda), \kappa E^{-1} \mathsf{B}(\lambda))$, where the map $\delta$ sends a diagram to its shape-class.
Consider the following diagram.
\[\Omega \xleftarrow{(\delta p_1, \kappa p_1)} \bigcup_{\ell=1}^n D_{\ell} \times D_{\ell} \xrightarrow{\eta p_2} \Lambda^+\]
The algorithms $\mathsf{A}$ and $\mathsf{B}$ yield sections $(\mathsf{A}, E \mathsf{A})$ and $(E^{-1}\mathsf{B}, \mathsf{B})$ of the projections $(\delta p_1, \kappa p_1)$ and $\eta p_2$ onto $\Omega$ and $\Lambda^+$, respectively, for which \[\eta p_2 \circ (\mathsf{A}, E \mathsf{A}) = \gamma \quad \text{and} \quad (\delta p_1, \kappa p_1) \circ (E^{-1} \mathsf{B}, \mathsf{B}) = \gamma^{-1}.\] That the maps computed by $\mathsf{A}$ and $\mathsf{B}$ play symmetric roles suggests that the algorithms themselves should exhibit structural symmetry. Unfortunately, they do not.
We address this incongruity by introducing the algorithm $\mathcal{A}$, which computes the section $(\mathsf{A}, E \mathsf{A})$, yet has the same recursive structure as $\mathsf{B}$: Both $\mathcal{A}$ and $\mathsf{B}$ recur after determining the entries in the first column of their output diagram(s).\footnote{Achar \cite{Acharj} phrases the instructions for $\mathsf{B}$ to use iteration rather than recursion, but they amount to carrying out the same computations. } Thus, the weight-diagrams version of our algorithm achieves structural parity with $\mathsf{B}$; the integer-sequences version $\mathfrak{A}$ is a singly recursive simplification that sidesteps weight diagrams altogether.
Having established that our algorithm is correct, we are mindful that Einstein's admonition, ``Everything should be made as simple as possible, but not simpler,'' will have the last word. We believe the appeal of our weight-by-weight, column-by-column approach is underscored by its consonance with Achar's algorithm $\mathsf{B}$, which has stood the test of time. To demonstrate the complementarity between $\mathcal{A}$ and $\mathsf{B}$, we offer the following description of $\mathsf{B}$. More details can be found in Achar \cite{Acharj}, section 6.
\subsubsection*{The algorithm}
We define a recursive algorithm $\mathsf{B}$ that computes a map \[\mathbb{Z}^n_{\operatorname{dom}} \times \lbrace \pm 1 \rbrace \rightarrow \bigcup_{\ell = 1}^n D_{\ell}\] by filling in the entries in the first column of its output diagram and using recursion to fill in the entries in the remaining columns. Whenever we write $\mathsf{B}(\lambda)$, we refer to $\mathsf{B}(\lambda, -1)$.
The algorithm $\mathsf{B}$ is multiply recursive and begins by dividing a weakly decreasing integer sequence into \textit{clumps}. From each clump, it builds a diagram by first extracting a maximal-length \textit{majuscule} sequence to comprise the entries of the first column, and then calling itself on the clump's remains. To obtain the output, it concatenates the diagrams constructed from all the clumps.
\begin{df}
A subsequence of a weakly decreasing integer sequence is \textit{clumped} if no two of its consecutive entries differ by more than $1$. A clumped subsequence is a \textit{clump} if it is not contained in a longer clumped subsequence.
\end{df}
\begin{df}
An integer sequence $\iota = [\iota_1, \ldots, \iota_{\ell}]$ is \textit{majuscule} if $\iota_i - \iota_{i+1} \geq 2$ for all $1 \leq i \leq \ell - 1$.
\end{df}
We are ready to describe $\mathsf{B}$.
On input $(\lambda, \epsilon)$, the algorithm designates $\mathsf{c}$ the number of distinct clumps in $\lambda$. For all $1 \leq x \leq \mathsf{c}$, it designates $n_x$ the number of entries in the $x^{\text{th}}$ clump.
Let $\lambda^{(x)}$ denote the $x^{\text{th}}$ clump, and $\mathcal{Y}^{(x)}$ the diagram to be built from $\lambda^{(x)}$. For all $1 \leq x \leq \mathsf{c}$, the algorithm obtains a majuscule sequence $\iota^{(x)}$ from $\lambda^{(x)}$ as follows:
\begin{itemize}
\item If $\epsilon = -1$, then $\iota^{(x)}$ is the maximal-length majuscule sequence contained in $\lambda^{(x)}$ that begins with $\lambda^{(x)}_1$;
\item If $\epsilon = 1$, then $\iota^{(x)}$ is the maximal-length majuscule sequence contained in $\lambda^{(x)}$ that ends with $\lambda^{(x)}_{n_x}$.
\end{itemize}
Then it sets \[\mathcal{Y}^{(x)}_{i, 1} := \iota^{(x)}_i\] for all $1 \leq i \leq \ell_x$, where $\ell_x$ is the length of $\iota^{(x)}$. This determines the entries in the first column of $\mathcal{Y}^{(x)}$.
If $\iota^{(x)} = \lambda^{(x)}$, the diagram $\mathcal{Y}^{(x)}$ is complete. Otherwise, the algorithm arranges the elements of the (multiset) difference $\lambda^{(x)} \setminus \iota^{(x)}$ in weakly decreasing order, leaving a weakly decreasing integer sequence $\bar{\lambda}^{(x)}$, and it sets \[Y^{(x)} := \mathsf{B}(\bar{\lambda}^{(x)}, -\epsilon).\]
It proceeds to attach $Y^{(x)}$ to the first column of $\mathcal{Y}^{(x)}$. For all $i'$ such that $Y^{(x)}$ has an $i'^{\text{th}}$ row, the algorithm finds the unique $i \in \lbrace 1, \ldots, \ell_x \rbrace$ such that $Y^{(x)}_{i',1} - \mathcal{Y}^{(x)}_{i,1} \in \lbrace 0, \epsilon \rbrace$. Then, for all $j'$ such that $Y^{(x)}$ has an entry in the $i'^{\text{th}}$ row and $j'^{\text{th}}$ column, it sets \[\mathcal{Y}^{(x)}_{i,j'+1} := Y^{(x)}_{i',j'}.\]
Finally, it sets \[Y := \operatorname{Cat}\left (\mathcal{Y}^{(1)}, \ldots, \mathcal{Y}^{(\mathcal{c})} \right )\] and returns $Y$.
\begin{exam}
In Example~\ref{acharexam}, we found \[\gamma([4,3,2,1,1], [15,14,9,4,4]) = [8,7,6,6,5,4,3,3,2,2,0].\]
Here we set $\lambda := [8,7,6,6,5,4,3,3,2,2,0]$ and compute $\mathsf{B}(\lambda)$.
We start by observing that $\lambda$ has $2$ clumps, \[\lambda^{(1)} = [8,7,6,6,5,4,3,3,2,2] \quad \text{and} \quad
\lambda^{(2)} = [0].\]
The maximal-length majuscule sequence contained in $\lambda^{(1)}$ that begins with $\lambda^{(1)}_1 = 8$ is $\iota^{(1)} = [8,6,4,2]$. Hence \[\big[\mathcal{Y}^{(1)}_{1,1}, \mathcal{Y}^{(1)}_{2,1}, \mathcal{Y}^{(1)}_{3,1}, \mathcal{Y}^{(1)}_{4,1} \big] = [8,6,4,2].\]
Upon removing $\iota^{(1)}$ from $\lambda^{(1)}$, we see that \[\bar{\lambda}^{(1)} = [7,6,5,3,3,2].\]
As it happens, $\mathsf{B}(\bar{\lambda}^{(1)}, 1)$ looks as follows.
\begin{figure}
\caption{The diagram obtained from the remains of the first clump}
\end{figure}
We complete $\mathcal{Y}^{(1)}$ by attaching $Y^{(1)}$ to the first column of $\mathcal{Y}^{(1)}$.
\begin{figure}
\caption{The diagram obtained from the first clump}
\end{figure}
Since $\lambda^{(2)}$ consists of a single entry, it follows that $\iota^{(2)} = \lambda^{(2)}$, so $\mathcal{Y}^{(2)}$ consists of a single box.
\begin{figure}
\caption{The diagram obtained from the second clump}
\end{figure}
Concatenating $\mathcal{Y}^{(1)}$ and $\mathcal{Y}^{(2)}$, we arrive at $Y$.
\begin{figure}
\caption{The diagram obtained from the recursion}
\end{figure}
\end{exam}
Comparing our result with that of Example~\ref{cont}, we see d\'ej\`a vu --- $\mathcal{A}(\alpha, \nu) = (E^{-1}Y, Y)$. This corroborates that the sections $\mathcal{A}$ and $(E^{-1} \mathsf{B}, \mathsf{B})$ send $(\alpha, \nu)$ and $\lambda$, respectively, to the same diagram pair whenever $(\alpha, \nu)$ and $\lambda$ correspond under the Lusztig--Vogan bijection, as in this case they do.
\eject
\section*{Acknowledgments}
This research is based on the author's doctoral dissertation at the Massachusetts Institute of Technology. His gratitude to his advisor David A. Vogan Jr. --- for suggesting the problem and offering invaluable help and encouragement along the way to a solution --- cannot be overstated. Thanks are also due to Roman Bezrukavnikov and George Lusztig for serving on the author's thesis committee and offering thoughtful remarks.
John W. Chun, the author's late grandfather, emigrated from Korea and fell in love with the author's grandmother and American literature. He earned a doctorate in English at the Ohio State University, becoming the first member of the author's family to be awarded a Ph.D. This article is dedicated to his memory, fondly and with reverence.
Throughout his graduate studies, the author was supported by the US National Science Foundation Graduate Research Fellowship Program.
\end{document} |
\begin{document}
\doublespacing
\title{High-dimensional peaks-over-threshold inference}
\begin{abstract}
Max-stable processes are increasingly widely used for modelling complex extreme events, but existing fitting methods are computationally demanding, limiting applications to a few dozen variables. $r$-Pareto processes are mathematically simpler and have the potential advantage of incorporating all relevant extreme events, by generalizing the notion of a univariate exceedance. In this paper we investigate score matching for performing high-dimensional peaks over threshold inference, focusing on extreme value processes associated to log-Gaussian random functions and discuss the behaviour of the proposed estimators for regularly-varying distributions with normalized marginals. Their performance is assessed on grids with several hundred locations, simulating from both the true model and from its domain of attraction. We illustrate the potential and flexibility of our methods by modelling extreme rainfall on a grid with $3600$ locations, based on risks for exceedances over local quantiles and for large spatially accumulated rainfall, and briefly discuss diagnostics of model fit.
The differences between the two fitted models highlight the importance of the choice of risk and its impact on the dependence structure.
\end{abstract}
\begin{keywords}
Functional regular variation; Gradient score; Pareto process; Peaks over threshold analysis; Quasi-Monte Carlo method; Statistics of extremes
\end{keywords}
\section{Introduction}
Recent contributions in extreme value theory describe models capable of handling spatio-temporal phenomena \citep[e.g.,][]{Kabluchko2009} and provide a flexible framework for modelling rare events, but their complexity makes inference difficult, if not intractable, for high-dimensional data. For instance, the number of terms in the block maximum likelihood for a Brown--Resnick process grows with dimension like the Bell numbers \citep{Huser2013a}, so less efficient but computationally cheaper methods like composite likelihood \citep{Padoan2010} or the inclusion of partition information \citep{Stephenson2005a} have been advocated. The first is slow, however, and the second is liable to bias if the partition is incorrect \citep{Wadsworth2015}.
An attractive alternative to use of block maxima is peaks over threshold analysis, which includes more information by focusing on single extreme events. In the multivariate case, specific definitions of exceedances have been used \citep[e.g.,][]{Ferreira2014,Engelke2012b}, which can be unified within the framework of $r$-Pareto processes \citep{Dombry2013}. For this approach, a full likelihood is often available in closed form, thus increasing the maximum number of variables that can be jointly modelled from a handful to a few dozen, but non-extreme values may be used, leading to biased estimation. Censored likelihood, proposed in this context by \citet{Wadsworth2014}, is more robust with regard to non-extreme observations, but it involves multivariate normal distribution functions, which can be computationally expensive. Nevertheless, inference is feasible in $30$ or so dimensions.
Nonparametric alternatives to full likelihood inference developed using the tail dependence coefficient \citep{Davis2009,Davis} or the stable tail dependence function \citep{Einmahl2016} rely on pairwise estimators and allow peaks-over-threshold inference in about a hundred dimensions, but are limited by combinatorial considerations.
Applications of max-stable processes \citep[e.g.,][]{Asadi2015} or Pareto processes \citep{Thibaud2013} have focused on small regions and have used at most few dozen locations with specific notions of exceedance, but exploitation of much larger gridded datasets from global and regional climatological models along with complex definitions of risk is needed for a better understanding of extreme events and to reduce model uncertainties. The goals of this paper are to highlight the advantages of generalized peaks-over-threshold modelling using $r$-Pareto processes, to show the feasibility of high-dimensional inference for the Brown--Resnick {model} with hundreds of locations, and to compare the robustness of different procedures with regard to finite thresholds. We develop an estimation method based on the gradient score \citep{Hyvarinen2005} for a generalized notion of exceedances, for which computational complexity is driven by matrix inversion, similarly to classical Gaussian likelihood inference. This method focuses on single extreme events and a general notion of exceedance, modelled by Pareto processes, instead of the max-stable approach.
Section~\ref{sec: theory} reviews recent results on regular variation for continuous processes and {generalized} peaks over threshold theory, with a focus on extreme-value processes associated to log-Gaussian random vectors. In Section~\ref{sec: HD inference}, classical inference schemes are summarised, an efficient parallel algorithm for maximum likelihood is developed, and a faster alternative based on the gradient score \citep{Hyvarinen2005} is considered. Section~\ref{sec: simulations} describes simulations that establish the computational tractability of these procedures and investigate their robustness. In Section~\ref{sec: case study} we apply our methods to estimate Florida extreme rainfall dependence structure for two types of risks using a grid with $3600$ cells.
\section{Modelling exceedances over a high threshold}\label{sec: theory}
\subsection{Univariate model}
The statistical analysis of extremes was first developed for block maxima \citep[Section 5.1]{Gumbel1958}.
This approach is widely used and can give good results, but the reduction of a complex dataset to maxima can lead to significant loss of information \citep{Madsen1997}, so the modelling of exceedances over a threshold is often preferred in applications \citep{Davison1990}. Let $X$ be a random variable with distribution function $F$ satisfying Theorem~3.1.1 in \citet[][Section 3.1 p. 48]{Coles2001}. Then for a large enough threshold $u > 0$,
\begin{equation}\label{eq: gpd}
\mathbb{P}\left( X-u > x \mid X>u\right) \approx H_{(\xi,\sigma)}(x) = \left\{
\begin{array}{ll}
\left(1+\xi x/\sigma\right)_+^{-1/ \xi}, & \xi \neq 0, \\
\exp\left(- x/\sigma\right), & \xi = 0,
\end{array}
\right.
\end{equation}
where $\sigma > 0$ and $a_+ = \max(a, 0)$.
If the shape parameter $\xi$ is negative, then $X$ must lie in the interval $[0,-\sigma/\xi]$, whereas $X$ can take any positive value with positive or zero $\xi$.
The implication is that the distribution over a high threshold $u$ of any random variable $X$ satisfying conditions for equation~(\ref{eq: gpd}) can be approximated by
\begin{equation}
\label{eq:GPD}
G_{(\xi,\sigma, u)}(x) = 1 - \zeta_u H_{(\xi,\sigma)}(x-u), \quad x > u,
\end{equation}
where $\zeta_u$, the probability that $X$ exceeds the threshold $u$, is determined by $u$.
In its simplest form this model for univariate exceedances applies to independent and identically-distributed variables, but it has been used for time series, non-stationary and spatial data.
Modelling exceedances can be generalized to a multivariate setting \citep{Rootzen2006} and to continuous processes \citep{Ferreira2014, Dombry2013} within the functional regular variation framework.
\subsection{Functional regular variation}
Let $S$ be a compact metric space, such as $[0,1]^2$ for spatial applications. We write $\mathcal{F} = C\{S,[0,\infty)\}$ for the Banach space of continuous functions $x: S \rightarrow [0,\infty)$ endowed with the uniform norm $\|x\|_\infty = \sup_{s \in S} |x(s)|$ and $\mathcal{B}(\Xi)$ for the Borel $\sigma$-algebra associated to a metric space $\Xi$.
A measurable closed subset $\mathcal{C}$ of $\mathcal{F}$ is called a cone if $tx \in \mathcal{C}$ for any $x \in \mathcal{C}$ and $t >0$.
For the study of extremes, the cones $\mathcal{C} = \{0\}$ or $\mathcal{C} = \{x \in \mathcal{F} : \inf_{s \in S} x(s) \leqslant 0\}$ are often excluded from $\mathcal{F} $ to avoid the appearance of limiting measures with infinite masses at the origin or on the coordinate axes, so let $M_{\mathcal{F} \setminus \mathcal{C}}$ denote the class of Borel measures on $\mathcal{B}(\mathcal{F} \setminus \mathcal{C})$ for any cone $\mathcal{C}$, and say that a set $A \in \mathcal{B}(\mathcal{F} \setminus \mathcal{C})$ is bounded away from $\mathcal{C}$ if $d(A,\mathcal{C}) = \inf_{x\in A, y\in \mathcal{C}} d(x,y) > 0$.
A sequence of measures $\{\nu_n\}\subset M_{\mathcal{F} \setminus \mathcal{C}}$ is said to converge to a limit $\nu\in M_{\mathcal{F} \setminus \mathcal{C}}$, written $\nu_n \xrightarrow{\hat{w}} \nu$ \citep{Hult2005}, if
$\lim_{n \rightarrow \infty} \nu_n(A) = \nu(A)$,
for all $A \in \mathcal{B}(\mathcal{F} \setminus \mathcal{C})$ bounded away from $\mathcal{C}$ with $\nu(\partial A) = 0$, where $\partial A$ denotes the boundary of $A$.
For equivalent definitions of this so-called $\hat{w}$-convergence, see \citet[Theorem 2.1]{Lindskog2014}.
Regular variation provides a flexible mathematical setting in which to characterize the tail behaviour of random processes in terms of $\hat{w}$-convergence of measures.
A stochastic process $X$ with sample paths in $\mathcal{F} \setminus \mathcal{C}$ is regularly varying \citep{Hult2005} if there exists a sequence of positive real numbers $a_1,a_2,\ldots$ with $\lim_{n \rightarrow \infty} a_n = \infty$, and a measure $\nu \in M_{\mathcal{F} \setminus \mathcal{C}}$ such that
\begin{equation}\label{eq: rv}
n\mathbb P\left(a_n^{-1}X \in \cdot\right) \xrightarrow{\hat{w}} \nu(\cdot), \quad n \rightarrow \infty;
\end{equation}
then we write $X \in {\rm RV}\left(\mathcal{F} \setminus \mathcal{C}, a_n, \nu\right)$.
For a normalized processes $X^*$, obtained by standardizing marginals of $X$ to unit Fr\'echet \citep[e.g.,][Section 5]{Coles1991} or unit Pareto \citep{Kluppelberg2008}, for instance, regular variation is equivalent to the convergence of the renormalised pointwise maximum $n^{-1} \max_{i = 1,\dots,n} X_i^*$ of independent replicates of $X^*$ to a non-degenerate process $Z^*$, with unit Fr\'echet margins and exponent measure $\nu^*$ \citep{DeHaan2001}.
The process $Z^*$ is called simple max-stable, and $X^*$ is said to lie in the max-domain of attraction of $Z^*$.
Regular variation also impacts the properties of exceedances over high thresholds.
For any nonnegative measurable functional $r : \mathcal{F} \rightarrow [0, +\infty)$ and stochastic process $\{X(s)\}_{s \in S}$, an $r$-exceedance is defined to be an event $\{ r(X) > u_n \}$ where the threshold $u_n$ is such that
$\mathbb P \{r(X) > u_n\} \to 0$ as $n \rightarrow \infty$.
We further require that $r$ satisfies a homogeneity property, i.e., there exists $\alpha > 0$ such that $r(ax) = a^\alpha r(x)$, for $a> 0$ and $x \in \mathcal{F}$.
\citet{Dombry2013} called $r$ a `cost functional' and \cite{Opitz2013} called it a `radial aggregation function', but we prefer the term `risk functional' because $r$ determines the type of extreme event whose risk is to be studied.
A natural formulation of subsequent results on $r$-exceedances uses a pseudo-polar decomposition.
For a norm $\|\cdot\|_{\rm ang}$ on $\mathcal{F}$, called the angular norm, and a risk functional $r$, a pseudo-polar transformation $T$ is a map such that
$$
T: \mathcal{F} \setminus \mathcal{C} \rightarrow [0,\infty) \times \mathcal{S}_{\rm ang} \setminus T(\mathcal{C}), \quad T(x) = \left\{r = r(x), w = \frac{x}{\|x\|_{\rm ang}}\right\},
$$
where $ \mathcal{S}_{\rm ang} $ is the unit sphere $\{ x \in \mathcal{F} \setminus \mathcal{C} : \|x\|_{\rm ang} = 1\}$. If $r$ is continuous and $T$ is restricted to $\{x \in \mathcal{F} \setminus \mathcal{C} : r(x) >0\}$, then $T$ is a homeomorphism with inverse $T^{-1}(r,w) = r\times w/r(w)$.
Theorem 2.1 in \cite{Lindskog2014} provides an equivalent pseudo-polar formulation of equation~(\ref{eq: rv}). For any $X \in {\rm RV}\left(\mathcal{F} \setminus \mathcal{C}, a_n, \nu\right)$ and any uniformly continuous risk functional $r$ such that $T(\mathcal{C})$ is closed and $r$ does not vanish $\nu$-almost everywhere, there exist $\beta > 0$ and a measure $\sigma_{r}$ on $\mathcal{B}(\mathcal{S}_{\rm ang})$ such that
\begin{equation}\label{eq: polar rv}
n\mathbb P\left\{T^{-1}\left(a_n^{-1}r, w\right) \in \cdot\right\} \xrightarrow{\hat{w}} \nu \circ T^{-1}(\cdot) = \nu_{\beta} \times \sigma_{r} (\cdot), \quad n \rightarrow \infty,
\end{equation}
where $\nu_{\beta} [r,\infty) = r^{-\beta}$ and the angular measure $\sigma_{r} (\cdot)$ equals $\nu\left\{x \in \mathcal{F} \setminus \mathcal{C} : r(x) > 1, \: x/\|x\|_{\rm ang} \in (\cdot) \right\}$.
The converse holds if $\{x \in \mathcal{F} \setminus \mathcal{C} : r(x) = 0\} = \emptyset$ and $\mathcal{C}$ is compact \citep[Corollary~4.4]{Lindskog2014}.
The functional $r(x) = \sup_{s \in S}\{x(s)\}$, used by \citet{Rootzen2006} in a multivariate setting and by \citet{Ferreira2014} for continuous processes, implies that realisations of $X(s)$ exceeding the threshold at any location $s \in S$ are labelled extreme, but this functional can only be used in applications where $X(s)$ is observed throughout $S$. Thus it may be preferable to use functions such as $\max_{s\in S'} X(s)$ or $\max_{s\in S'} X(s)/u(s)$, where $S'\subset S$ is a finite set of gauged sites.
Other suggested risk functionals include $\int_{S} X(s) ds$ for the study of areal rainfall \citep{Tawn1996}, $\min_{s \in S'} X(s)/u(s)$, or $X(s_0)$ for risks impacting a specific location $s_0$.
Although the choice of risk functional allows a focus on particular types of extreme event, the choice of the angular norm $\|\cdot\|_{\rm ang}$ has no impact and is usually made for convenience.
Finally, for a common angular norm $\|\cdot\|_{\rm ang}$, the angular measures of two risk functionals $r_1$ and $r_2$ that are strictly positive $\nu$-almost everywhere are linked by the expression
\begin{equation}\label{eq: ang measure link}
\sigma_{r_1} (dw) = \left\{\frac{r_1(dw)}{r_2(dw)}\right\}^\beta\sigma_{r_2} (dw), \quad dw \in \mathcal{B}(\mathcal{S}_{\rm ang}).
\end{equation}
Equation~(\ref{eq: ang measure link}) is useful when we are interested in $r_2$-exceedances but inference has been performed based on $r_1$.
All the previous definitions and results also hold for finite dimensions, i.e., for $I$-dimensional random vectors, by replacing $\hat{w}$-convergence by vague convergence \citep[Section 3.3.5]{Resnick2007} on $M_{\mathbb{R}^I \setminus \mathcal{C}^I}$, the class of Borel measures on $\mathcal{B}(\mathbb{R}^I \setminus \mathcal{C}^I)$ endowed with the $\|\cdot\|_\infty$ norm, where $\mathcal{C}^I$ denotes a cone in $\mathbb{R}^I$ \citep{Opitz2013}
\subsection{$r$-Pareto processes}
In this section, $r$ denotes a functional that is nonnegative and homogeneous of order $\alpha = 1$, $\mathcal{F}^+$ denotes the restriction of $\mathcal{F}$ to nonnegative functions and $\mathcal{C}$ is the closed cone $\{0\}$.
The $r$-Pareto processes \citep{Dombry2013} are important for modeling exceedances, and may be constructed as
\begin{equation}
P = U\frac{Q}{r(Q)},
\end{equation}
where $U$ is a univariate Pareto random variable with $\mathbb P(U > r) = 1/r^\beta$ $(r\geq 1)$ and $Q$ is a random process with sample paths in $\mathcal{S}_{\rm ang}^+ = \{x \in \mathcal{F}^+ \setminus \mathcal{C}: \|x\|_{\rm ang} = 1\}$ and probability measure $\sigma_{\rm ang}$; then $P$ is called an $r$-Pareto process with tail index $\beta > 0$ and angular measure $\sigma_{\rm ang}$, and we write $P\sim P_{\beta,\sigma_{\rm ang}}^r$.
An important property of this class of processes is threshold-invariance: for all $A \in \mathcal{B}(\mathcal{F}^+)$ and all $u \geqslant 1$ such that $\mathbb P\{r(P) > u\} > 0$,
\begin{equation}\label{eq: pot stability}
\mathbb P\{u^{-1}P \in A \mid r(P) > u\} = \mathbb{P}(P \in A).
\end{equation}
Furthermore, for $X\in {\rm RV}\left(\mathcal{F}^+ \setminus \mathcal{C}, a_n, \nu\right)$ with index $\beta >0$ and for a risk functional $r$ that is continuous at the origin and does not vanish $\nu$-almost everywhere, the distribution of the $r$-exceedances converges weakly to that of a Pareto process, i.e.,
\begin{equation}\label{eq: weak conv Pareto}
\mathbb P\left\{u^{-1}X \in (\cdot) \mid r(X) > u\right\} \xrightarrow{w} P_{\beta,\sigma_{r}}^r, \quad u \rightarrow \infty,
\end{equation}
with tail index $\beta$ and probability measure $ \sigma_{r}$ as defined in equation~(\ref{eq: polar rv}) \citep[Theorem~2]{Dombry2013}.
When working with a normalized process $X^*$, the exponent measure $\nu^*$ of the limiting max-stable process $Z^*$ and the measure $\nu_1 \times \sigma_{r}$ of the Pareto process are equal up to a coordinate transform, as suggested by equation~(\ref{eq: polar rv}). \citet{Opitz2013} derived these results in a multivariate setting.
\subsection{Extreme value processes associated to log-Gaussian random functions}\label{sec: BR}
We focus on a class of generalized Pareto processes based on log-Gaussian stochastic processes, whose max-stable counterparts are Brown--Resnick processes.
This class is particularly useful, not only for its flexibility but also because it is based on classical Gaussian models widely used in applications; \citet[][p.~84--108]{Chiles2012a} review existing models.
Let $Z$ be a zero-mean Gaussian process with stationary increments, i.e., the semi-variogram $\gamma(s,s') = \mathbb E[\{Z(s) - Z(s')\}^2]/2$, $(s,s' \in S)$ depends only the difference $s-s'$ \citep[][p.~30]{Chiles2012a}.
If $Z_1, Z_2,\ldots$ are independent copies of a zero-mean Gaussian process with semi-variogram $\gamma$ and $\{U_i : i \in \mathbb{N} \}$ is a Poisson process on $(0, + \infty)$ with intensity $u^{-2}du$, then
\begin{equation}\label{eq: BR}
M(s) = \max_{ i \in \mathbb{N}} U_i \exp\{Z_i(s) - \gamma(0,s)\}, \quad s \in S,
\end{equation}
is a stationary max-stable Brown--Resnick process with standard Fr\'echet margins, whose distribution depends only on $\gamma$ \citep{Kabluchko2009}.
Let $s_1,\ldots, s_I$ be locations of interest in $S$.
In the rest of the paper, $x$ denotes an element of $\mathbb{R}^I_+$ and $x_i \equiv x(s_i)$ $(i = 1,\dots, I)$ denote its components.
The finite-dimensional exponent measure $\Lambda_{\theta}(\cdot)$ of a simple Brown--Resnick process with $I > 1$ variables is
\begin{equation}\label{eq: exp measure max}
\Lambda_{\theta} (x)= \mathbb E\left[ \max_{i = 1, \dots, I} \left\{ \frac{Z(s_i) - \gamma(0,s)}{x_i} \right\} \right] = \nu_\theta\left\{A_{\max}(x)\right\},
\end{equation}
where $\nu_\theta(\cdot)$ is the finite-dimensional equivalent of the measure defined in Equation~(\ref{eq: rv}), $\theta$ is an element of the compact set $\Theta$ of the parameters of the semi-variogram $\gamma_\theta$ and $A_{\max}(x) = \left\{y \in \mathbb{R}^I : \max(y_1/x_1, \dots, y_I/x_I) > 1\right\}$.
A closed form for $\Lambda_{\theta} (x)$ is \citep{Huser2013a}
\begin{equation}\label{eq: brown resnick}
\Lambda_{\theta} (x)= \sum_{i=1}^I \frac{1}{x_i} \Phi\{\eta_i(x), R_i\},
\end{equation}
where $\eta_i$ is the $(I - 1)$-dimensional vector with $j^{th}$ component $\eta_{ij} = \sqrt{\gamma_{i,j}/2} + \log(x_j/x_i)/ \sqrt{2 \gamma_{i,j}} $, $\gamma_{j,k}$ denotes $\gamma(s_j,s_k)$ $(s_j,s_k \in S)$, and $\Phi( \cdot , R_i)$ is the multivariate normal cumulative distribution function with zero mean and covariance matrix $R_i$ whose $(j,k)$ entry is $(\gamma_{i,j} +\gamma_{i,k} - \gamma_{j,k})/\{2(\gamma_{i,j}\gamma_{i,k} )^{1/2}\}$.
{$r$-Pareto processes associated to log-Gaussian random functions are closely related to the intensity function $\lambda_{\theta}$ corresponding to the measure $\nu_\theta$, which can be found by taking partial derivatives of $\Lambda_{\theta} (x)$ with respect to $x_1, \dots, x_I$, yielding \citep{Engelke2012b}
\begin{equation} \label{eq: cdfBR Engelke}
\lambda_{\theta}(x) = \frac{| \Sigma_{\theta}|^{-1/2}}{x_1^2x_2 \cdots x_I (2\pi)^{(I-1)/2}} \exp \left( -\frac{1}{2} \widetilde{x}^T \Sigma_{\theta}^{-1} \widetilde{x}\right), \quad x \in \mathbb{R}^I_+,
\end{equation}
where $\widetilde{x} $ is the $(I-1)$-dimensional vector with components $\{\log(x_j/x_1) + \gamma_{j,1} : j = 2, \dots, I \}$ and $\Sigma_{\theta} $ is the $(I-1)\times(I-1)$ matrix with elements $ \{ \gamma_{i,1} + \gamma{j,1} - \gamma_{i,j} \}_{i,j \in \{2, \dots, I\} }$.
\cite{Wadsworth2014} derive an alternative symmetric expression for (\ref{eq: cdfBR Engelke}) which will be useful in Section~\ref{sec: grad score}, but Equation~(\ref{eq: cdfBR Engelke}) is more readily interpreted.
Similar expressions exist for extremal-$t$ processes \citep{Thibaud2013}.}
\section{Inference for $r$-Pareto processes}\label{sec: HD inference}
\subsection{Generalities}
In this section, $x^1,\ldots, x^N$ are independent replicates of an $I$-dimensional $r$-Pareto random vector $P$ with tail index $\beta =1$ and
$y^1,\ldots, y^N$, are independent replicates from a regularly-varying $I$-dimensional random vector $Y^*$ with normalized margins.
As in the univariate setting, statistical inference based on block maxima and the max-stable framework discards information by focusing on maxima instead of single events.
These models are difficult to fit not only due to the small number of replicates, but also because the likelihood is usually too complex to compute in high dimensions \citep{Castruccio2014}.
For the Brown--Resnick process, the full likelihood cannot be computed for more than ten variables \citep{Huser2013a}, except in special cases.
When the occurrence times of maxima are available, inference is typically possible up to a few dozen variables \citep{Stephenson2005a}.
Estimation based on threshold exceedances and the Pareto process has the advantages that individual events are used, the likelihood function is usually simpler, and the choice of the risk functional can tailor the definition of an exceedance to the application. Equation~(\ref{eq: polar rv}) suggests that the choice of risk functional should not affect the estimates, but this is not entirely true, because the threshold cannot be taken arbitrarily high and the events selected depend on the risk functional $r$, the choice of which enables the detection of mixtures in the extremes and can improve sub-asymptotic behaviour by fitting the model using only those observations closest to the chosen type of extreme event. For example, we might expect the extremal dependence of intense local rainfall events to differ from that of heavy large-scale precipitation, even in the same geographical region.
The probability density function of a Pareto process for $r$-exceedances over the threshold vector $u \in \mathbb{R}_+^I$ can be found by rescaling the intensity function $\lambda_\theta$ by $\nu_{\theta}\{A_{r}(u)\}$, yielding
\begin{equation} \label{eq: pareto density}
\lambda_{\theta, u}^{r}(x) = \frac{\lambda_\theta(x)}{\nu_{\theta}\{A_{r}(u)\}}, \quad x \in A_{r}(u),
\end{equation}
where $\nu_{\theta}\{A_{r}(u)\} = \int_{A_{r}(u)} \lambda_\theta(x) dx$ and $A_{r}(u)$ is the exceedance region $\left\{x \in \mathbb{R}^I_+ : r(x/u) > 1 \right\}$.
Equation (\ref{eq: pareto density}) for $r$-Pareto process inference yields the log-likelihood
\begin{equation}\label{eq: likelihood}
\ell(\theta; x^1,\ldots, x^N) = \sum_{n = 1}^{N} \mathbb{1}\left\{r\left(\frac{x^n}{u}\right) > 1\right\}\log\left[ \frac{\lambda_{\theta}(x^n)}{\nu_{\theta}\{A_{r}(u)\}} \right],
\end{equation}
where division of vectors is component-wise and $\mathbb{1}$ denotes the indicator function. Maximization of $\ell$ gives an estimator $\widehat{\theta}_{r}(x^1,\ldots, x^N)$ that is consistent, asymptotically normal and efficient.
Numerical evaluation of the $I$-dimensional integral $\nu_{\theta}\{A_{r}(u)\}$ is generally intractable for high $I$, though it simplifies for some special risk functionals, such as $r(x) = \max_{i = 1,\dots,I} x_i$, for which the integral is a sum of multivariate probability functions; see Equation~(\ref{eq: brown resnick}).
Similarly, \citet{Coles1991} pointed out that $\nu_{\theta}\{A_{r}(u)\}$ is constant and independent of $\theta$ when the risk functional is $r(x) = I^{-1} \sum_{i = 1,\dots,I} x_i$; \citet{Engelke2012b} called the resulting quantity (\ref{eq: likelihood}) the spectral likelihood.
In practice observations cannot be assumed to be exactly Pareto distributed; it is usually more plausible that they lie in the domain of attraction of some extremal process.
As a consequence of Theorem~3.1 in \citet{DeHaan1993}, asymptotic properties of $\widehat{\theta}_{r}(x^1,\ldots, x^N)$ hold for $\widehat{\theta}_{r}(y^1,\ldots, y^N)$ as $N \rightarrow \infty$ and $u \rightarrow \infty$ with the number of exceedances $N_u = o(N)$; see Section~\ref{sec: grad score}.
However, the threshold $u$ is finite and thus low components of $y^i \in A_{r}(u)$ may lead to biased estimation.
As it is due to model mis-specification, this bias is unavoidable, and moreover, it grows with $I$, so these methods can perform poorly, especially if the extremal dependence is weak, as it is then more likely that at least one component of $x^i$ will be small \citep{Engelke2012b,Thibaud2013,Huser}.
The bias can be reduced by a form of censored likelihood proposed in the multivariate setting by \citet{Joe.Smith.Weissman:1992}, and used for the Brown--Resnick model by \citet{Wadsworth2014}, and for the extremal-$t$ process by \citet{Thibaud2013}.
This method works well in practice but typically requires the computation of multivariate normal and $t$ probabilities, which can be challenging in realistic cases if standard code is used. Some relatively modest changes to the code to perform quasi-Monte Carlo maximum likelihood estimation with hundreds of locations are described in Section~\ref{sec: censored likelihood}.
For spatio-temporal applications, inference for $r$-Pareto processes must be performed using data from thousands of locations, and in Section~\ref{sec: grad score} we discuss an approach that applies to a wide range of risk functionals, is computationally fast and statistically efficient, and is robust with regard to finite thresholds.
\subsection{Efficient censored likelihood inference}\label{sec: censored likelihood}
\subsubsection{Definition and properties}
Censored likelihood estimation for extreme value process associated to log-Gaussian random functions was developed by \citet{Wadsworth2014} and is based on equation~(\ref{eq: likelihood}) with $\max_{i = 1,\dots,I}\{x_i/u_i\}$ as risk functional and where any component lying below the threshold vector $(u_1,\dots,u_I) > 0 $ is treated as censored.
This estimator has increased variance but reduced bias compared to the spectral estimator.
For the Brown--Resnick process, the censored likelihood density function, in \citet{Engelke2012b}'s notation, is
\begin{equation} \label{eq: brown resnick cens}
\lambda_{\theta, u}^{\text{cens}}(x) = \frac{1}{\nu_{\theta}\{A_{\max}(u)\}}\frac{1}{x_1^2x_2 \cdots x_k} \phi_{k-1}(\widetilde{x}_{2:k}; \Sigma_{2:k}) \Phi_{I-k}\{\mu_{\text{cens}}(x_{1:k}),\Sigma_{\text{cens}}(x_{1:k})\}, \quad x \in A_{\max}(u),
\end{equation}
where $A_{\max}(u) = \{x \in \mathbb{R}^I : \max_{i = 1, \dots, I} (x_i/u_i) > 1 \}$, $k$ components exceed their thresholds, $\widetilde{x}_{2:k}$ and $\Sigma_{2:k}$ are subsets of the variables $\widetilde{x}$ and $\Sigma_\theta$ in equation~(\ref{eq: cdfBR Engelke}), and $\phi_{k-1}$ and $\Phi_{I - k}$ are the multivariate Gaussian density and distribution functions. The mean and covariance matrix for $\Phi_{I-k}$ are
\begin{eqnarray*}
\mu_{\text{cens}}(x_{1:k}) &=& \{\log(u_j/x_1) + \gamma_{j,1} \}_{j = k+1, \dots, I} - \Sigma_{(k+1):I, 2:k}\Sigma_{2:k,2:k}^{-1}\widetilde{x}_{2:k}, \\
\Sigma_{\text{cens}}(x_{1:k}) &=& \Sigma_{(k+1):I, (k+1):I} - \Sigma_{(k+1):I, 2:k}\Sigma_{2:k,2:k}^{-1} \Sigma_{ 2:k,(k+1):I}.
\end{eqnarray*}
\cite{Wadsworth2014} derived similar expressions based on equation~(\ref{eq: cdfBR Wadsworth}).
The estimator
\begin{equation}\label{eq: censored estimator}
\widehat{\theta}_{\text{cens}}(y^1,\ldots, y^N) = \text{arg} \max_{\theta \in \Theta} \sum_{n = 1, \dots, N} \mathbb{1}\left\{\max_{i = 1,\dots,I}\left(\frac{y^n_i}{u_i}\right) > 1\right\}\log \lambda_{\theta, u}^{\text{cens}}(\mathrm{y}^n),
\end{equation}
is also consistent and asymptotically normal as $u \rightarrow \infty$, $N \rightarrow \infty$, $N_u \rightarrow \infty$ with $N_u = o(N)$.
For finite thresholds, $\widehat{\theta}_{\text{cens}}$ has been found to be more robust with regard to low components \citep{Engelke2012b,Huser}, but it is awkward due to the potentially large number of multivariate normal integrals involved, thus far limiting its application to $I\lesssim 30$ \citep{Wadsworth2014,Thibaud.etal:2016}.
A useful alternative is composite likelihood inference \citep{Padoan2010,Varin.Reid.Firth:2011} based on subsets of observations of sizes smaller than $I$, which trades off a gain in computational efficiency against a loss of statistical efficiency. The number of possible subsets increases very rapidly with $I$, and their selection can be vexed, though some statistical efficiency can be retrieved by taking higher-dimensional subsets. \citet{Castruccio2014} found higher-order composite likelihoods to be more robust than spectral likelihood, but in realistic cases they are limited to fairly small dimensions. Even with $I=9$ they required days of computation.
\subsubsection{Quasi-Monte Carlo maximum likelihood}\label{sec:QMC}
When maximizing the right-hand side of equation~(\ref{eq: censored estimator}), the normalizing constant $\nu_{\theta}\{A_{\max}(u)\}$, described in equation~(\ref{eq: exp measure max}), and the multivariate normal distribution functions require the computation of multidimensional integrals.
Theorem~7 of \citet{Geyer1994} suggests that we approximate $\widehat{\theta}_{\text{cens}}$ by maximizing
\begin{equation}\label{eq: monte carlo log like}
\ell^p_{\text{cens}}(\theta) = \sum_{m = 1}^{n} \mathbb{1}\left\{\max\left(\frac{x^m}{u}\right) > 1\right\} \left[ \log \left\{ \frac{\phi_{t-1}(\widetilde{x}_{2:t}; \Sigma_{2:t})}{(x_1^m)^2x^m_2 \cdots x^m_t} \right\} + \log\frac{\Phi^p_{I-t}\{\mu_{\text{cens}}(x^m_{1:t}),\Sigma_{\text{cens}}(x^m_{1:t})\}}{\Lambda_{\theta}^p (u) } \right],
\end{equation}
where $\Phi^p_{I-t}$ and $\Lambda^p_\theta$ are Monte Carlo estimates of the corresponding integrals based on $p$ simulated samples, yielding a maximizer $\widehat{\theta}^p_{\text{cens}}$ that converges almost surely to $\widehat{\theta}_{\text{cens}}$
as $p \rightarrow \infty$.
Classical Monte Carlo estimation for multivariate integrals yields a probabilistic error bound that is $O(\omega p^{-1/2})$, where $\omega = \omega(\phi)$ is the square root of the variance of the integrand $\phi$.
Quasi-Monte Carlo methods can achieve higher rates of convergence and thus improve computational efficiency while preserving the consistency of $\widehat{\theta}^p_{\text{cens}}$.
For estimation of multivariate normal distribution functions, \citet[][Section 4.2.2]{Genz2009} advocate the use of randomly-shifted deterministic lattice rules, which can achieve a convergence rate of order $O(p^{-2 + \epsilon})$ for some $\epsilon > 0$.
Lattice rules rely on regular sampling of the hypercube $[0,1]^I$, taking
\begin{equation}\label{eq: lattice rule}
\mathrm{z}_q = |2 \times \overline{( qv + \Delta)} - 1|, \quad q = 1, \dots, p,
\end{equation}
where $\overline{(\mathrm{z})}$ denotes the component-wise fractional part of $\mathrm{z} \in \mathbb{R}^I$, $p$ is a prime number of samples in the hypercube $[0,1]^I$, $v \in \{1, \dots, p\}^I$ is a carefully-chosen generating vector and $\Delta \in [0,1]^I$ is a uniform random shift.
Fast construction rules exist to find an optimal $v$ for given numbers of dimensions $I$ and samples $p$ \citep{Nuyens2006}.
The existence of generating vectors achieving a nearly optimal convergence rate, with integration error independent of the dimension, has been proved and methods for their construction exist \citep{Dick2010}.
Our implementation of this approach applied to equation~(\ref{eq: censored estimator}) and coupled with parallel computing is tractable for $I$ of the order of a few hundred; see Appendix \ref{app: censored likelihood} for details.
\subsection{ Score matching}\label{sec: grad score}
Classical likelihood inference methods require either evaluation or simplification of the scaling constant $\nu_{\theta}\{A_{r}(u)\}$, whose complexity increases with the number of dimensions.
Hence we seek alternatives that do not require its computation.
Let $\mathcal{A}$ be a sample space such as $\mathbb{R}_+^I$, and let $\mathcal{P}$ be a convex class of probability measures on $\mathcal{A}$.
A proper scoring rule \citep{Gneiting2007b} is a functional $\delta: \mathcal{P}\times \mathcal{A} \rightarrow \mathbb{R}$ such that
\begin{equation}\label{eq: properness}
\int_\mathcal{A} \delta(g,x)g(x)dx \geqslant \int_\mathcal{A} \delta(h,x)g(x)dx, \quad h,g \in \mathcal{P}.
\end{equation}
The scoring rule is said to be strictly proper if equality in (\ref{eq: properness}) holds only when $g =h $.
A proper scoring rule is a consistent estimator of a divergence measure between two distributions \citep{Thorarinsdottir2013} and can be used for inference.
For a risk functional $r$, the estimator
\begin{equation}\label{eq: maximum score estimator}
\widehat{\theta}_{\delta, u}^r(x^1,\ldots, x^N) = \text{arg} \max_{\theta \in \Theta} \sum_{n = 1}^N \mathbb{1}\left\{r\left(\frac{x^n}{u}\right) > 1\right\} \delta(\lambda_{\theta, u}^{r}, x^n),
\end{equation}
where $x^1,\ldots, x^N$ were defined at the beginning of Section~\ref{sec: HD inference}, is a consistent and asymptotically normal estimator under suitable regularity conditions \citep[Theorem~4.1]{Dawid2014}.
As a consequence of \citet[Propositions~3.1, 3.2]{DeHaan1993}, these asymptotic properties can be generalized to samples from a regularly-varying random vector with normalized marginals; see Appendix \ref{app: mda score normality}.
\begin{prop*}
Let $1 \leqslant N_u \leqslant N$. Let $y^1,\ldots, y^N$ be independent replicates of a regularly-varying random vector $Y^*$ with normalized marginals and limiting measure $\nu_{\theta_0}$ and let $\delta$ be a strictly proper scoring rule satisfying the conditions of Theorem~4.1 of \citet{Dawid2014}. If $N \rightarrow \infty$ and $N_u \rightarrow \infty$ such that $N_u = o(N)$, then
$$
\sqrt{N_u}\left\{\widehat{\theta}_{\delta, N/N_u}^r\left(y^1,\ldots, y^N\right) - \theta_0\right\} \rightarrow \mathcal{N}\left\{0, K^{-1}J(K^{-1})^T\right\}
$$
in distribution, where
\begin{equation}
J = \mathbb E_P \left\{\frac{\partial\delta}{\partial \theta}(\theta_0)\frac{\partial\delta}{\partial \theta}(\theta_0)^T\right\}, \quad
K= \mathbb E_P\left\{\frac{\partial^2\delta}{\partial \theta^2}(\theta_0) \right\}.
\end{equation}
\end{prop*}
Estimates of the Godambe information matrix $G = \left\{K^{-1}J(K^{-1})^T\right\}^{-1}$ can be used for inference, and the scoring-rule ratio statistic
$$
W^\delta = 2\left\{\frac{\partial\delta}{\partial \theta}\left(\theta_0\right) - \frac{\partial\delta}{\partial \theta}\left(\widehat{\theta}_{\delta, n/k_u}^r\right) \right\},
$$
properly calibrated, can be used to compare models \citep[Section~4.1]{Dawid2014}.
The log-likelihood function is a proper scoring rule associated to the Kullback--Leibler divergence. Although efficient, it is not robust, which is problematic for fitting asymptotic models like Pareto processes, and the normalizing coefficient $\nu_{\theta}\{A_{r}(u)\}$ is obtainable only in special cases.
The gradient score \citep{Hyvarinen2005} uses the derivative $\nabla_{x} \log g$, and so does not require computation of scaling constants such as $\nu_{\theta}\{A_{r}(u)\}$.
\citet{Hyvarinen2007} adapted this scoring rule for strictly positive variables, and we propose to extend it to any domain of the form $A_{r}(u) = \{x \in \mathbb{R}_+^I : r(x / u) > 1\}$, using the divergence measure
\begin{equation}\label{eq: gradient divergence}
\int_{A_{r}(u)} \| \nabla_{x} \log g(x) \otimes {w}(x) - \nabla_{x} \log h(x) \otimes {w}(x) \|^2_2 \:g(x)dx,
\end{equation}
where $g$ and $h$ are multivariate density functions differentiable on { $A_{r}(u) \setminus \partial A_{r}(u)$, where $\partial A$ denotes the boundary of $A$}, $\nabla_x$ is the gradient operator, ${w}: A_{r}(u) \rightarrow \mathbb{R}_+^I$ is a positive weight function, and $\otimes$ denotes the Hadamard product.
If ${w}(\cdot)$ is differentiable on $A_{r}(u)$, and if for every $i \in \{1, \dots, I\}$, we have
\begin{equation} \label{eq: consitence grad score}
\lim_{x_i \rightarrow a_i(x_1, \dots, x_{i-1}, x_{i+1}, \dots , x_I)} w_i(x)^2 \frac{\partial \log h(x)}{\partial x_i} g(x) - \lim_{x_i \rightarrow b_i(x_1, \dots, x_{i-1}, x_{i+1}, \dots , x_I)} w_i(x)^2 \frac{\partial \log h(x)}{\partial x_i} g(x) = 0,
\end{equation}
where $a_i(x_1, \dots, x_{i-1}, x_{i+1}, \dots , x_I)$ and $b_i(x_1, \dots, x_{i-1}, x_{i+1}, \dots , x_I)$ are respectively the lower and upper bounds of the variable $x_i$ on $A_{r}(u)$ for fixed $(x_1, \dots, x_{i-1}, x_{i+1}, \dots , x_I)$, then the scoring rule
\begin{equation}\label{eq: grad score new}
\delta_{{w}}(h,x) = \sum_{i =1}^I \left(2w_i(x)\frac{\partial w_i(x)}{\partial x_i} \frac{\partial \log h(x)}{\partial x_i} + w_i(x)^2 \left[ \frac{\partial^2 \log h(x)}{\partial x_i^2} + \frac{1}{2} \left\{\frac{\partial \log h(x)}{\partial x_i} \right\}^2 \right] \right), \quad x \in A_{r}(u),
\end{equation}
is strictly proper, as is easily seen by modification of \citet{Hyvarinen2007}. The gradient score for a Pareto process satisfies the regularity conditions of Theorem~4.1 in \citet{Dawid2014}, so the resulting estimator $\widehat{\theta}_w$ is asymptotically normal.
Two possible weight functions for inference on the Pareto process are
\begin{equation}\label{eq: grad score weights}
\left.
\begin{array}{ll}
w^1_i(x) = & x_i\left[1 - e^{-r(x/u) - 1} \right], \\
w^2_i(x) = & \left[1 - e^{-3\frac{x_i - u_i}{u_i}} \right]\left[1 - e^{-r(x/u) - 1} \right],
\end{array}\right\}
\quad i \in \{1, \dots, I\},
\end{equation}
where $r$ is a risk functional differentiable on $\mathbb{R}_+^I$ and the threshold vector $u$ lies in $\mathbb{R}^I_+$.
The weights ${w}^1$ are derived from \citet{Hyvarinen2007}, whereas ${w}^2$ is designed to approximate the effect of censoring by down-weighting components of $x^i$ near the threshold.
These weighting functions are particularly well suited for extremes: a vector $x \in A_{r}(u)$ is penalized if $r(x / u)$ is close to $1$, and low components of $x$ induce low weights for the associated partial derivatives.
For these reasons, inference using $\delta_{w}$ with the weighting functions in equation~(\ref{eq: grad score weights}) can be expected to be more robust to low components than is the spectral log-likelihood.
The estimator $\widehat\theta_w$ can be much cheaper to compute than $\widehat\theta_{\text{cens}}$ and can be obtained for any risk functional differentiable on $\mathbb{R}_+^I$.
The gradient score can be applied to any extremal model with a multivariate density function {whose logarithm} is twice differentiable away from the boundaries of its support, and if these display discontinuities on this support then the weighting function ${w}$, chosen such that~\eqref{eq: consitence grad score} is fulfilled, ensures the existence and the consistency of the score.
Expressions for scores for the Brown--Resnick model can be found in Appendix~\ref{app: BR score}, and the performances of these inference procedures are compared in Section~\ref{sec: simulations}.
\section{Simulation study}\label{sec: simulations}
\subsection{Exact simulation}\label{sec: simul Pareto}
The inference procedures and simulation algorithms described below have been wrapped in an R package, \verb+mvPot+ available on \verb+CRAN+.
We first illustrate the feasibility of high-dimensional inference by simulating generalized Pareto processes associated to log-normal random functions at $I$ locations.
Details of the algorithm can be found in Appendix~\ref{app: pareto sims}.
We use an isotropic power semi-variogram, $\gamma(s,s') =\left(\|s - s'\|/\tau\right)^\kappa/2$, shape parameters $\kappa= 0.5,1,1.3$, and scale parameter $\tau = 2.5$. In spatial extremes, it is common to compare models by plotting the extremal coefficient \citep{Schlather2003} against distance between locations, as in Figure~\ref{fig: extremal models}. The extremal coefficient measures the strength of dependence, has a lower bound equal to $1$, which is achieved in case of perfect dependence, and an upper bound $2$ corresponding to independence. For this simulation, dependence models with $\kappa > 1.3$ could not be tested because the Pareto process drifts below the smallest representable number and thus rounding produces exact zeros, which are incompatible with the Brown--Resnick model. For each simulation, $N=10,000$ Pareto processes were simulated on regular $10 \times 10$, $20 \times 10$ and $20 \times 15$ grids. The grid size was restricted to a maximum of $300$ locations for ease of comparison with the second simulation study. For the gradient score, we use $r(x) = \sum_{i = 1}^I x(s_i)$. The threshold $u$ is taken equal to the empirical $0.99$ quantile of $r(x^1),\ldots, r(x^N)$, giving $N_u=100$. For censored likelihood inference, we use the approach described in Appendix \ref{app: parallel censored likelihood} with $\bar{p} = 10$. One hundred replicates are used in each case.
\begin{figure}
\caption{Pairwise extremal coefficient for a Brown--Resnick process with semi-variogram $\gamma(s,s') = \left(\|s - s'\|/\tau\right)^\kappa$ as a function of distance for $\kappa = 1.8$ (solid), $\kappa = 1.3$ (dashes), $\kappa = 1$ (dots), $\kappa = 0.5$ (dot-dash) and $\tau = 2.5$. The extremal dependence is perfect for an extremal coefficient of $1$ and independence is reached when it equals $2$.}
\label{fig: extremal models}
\end{figure}
Table~\ref{tab: mvn est Pareto} gives the relative root mean square error for estimation based on censored log-likelihood and the gradient score with weights $w^1$ and $w^2$, relative to that based on the spectral log-likelihood. For all the methods and parameter combinations, bias is negligible and performance is mainly driven by the variance.
As expected, efficiency is lower than $100\%$ because when simulating and fitting from the true model, the spectral likelihood performs best. The gradient score and the censored likelihood estimators deteriorate as the extremal dependence weakens and the number of low components in the simulated vectors increases.
The gradient score outperforms the censored likelihood except when censoring is low, i.e., when $\kappa = 0.5$.
The performance of the censored likelihood estimators deteriorates when the dimensionality increases, suggesting that the gradient score will be preferable in high dimensions. These results, however, are not realistic since the data are simulated from the fitted model, whereas in practice the model is used as a high-threshold approximation to the data distribution.
\begin{table}[!t]
\begin{center}
\begin{tabular}{c}
\begin{tabular}{c c c c c}
Grid size & $\kappa = 0.5$ & $\kappa = 1$ & $\kappa = 1.3$ \\ \hline
$10 \times 10$ & $\textbf{53.3}/44.8/42.4$ & $10.3/\textbf{30.8}/12.4$ & $4.7/\textbf{36.5}/12.3$ \\
$20 \times 10$ & $\textbf{66.8}/48.9/49.0$ & $10.1/\textbf{23.8}/14.1$ & $5.4/\textbf{32.6}/12.4$ \\
$20 \times 15$ & $\textbf{66.9}/44.0/43.9$ & $10.6/\textbf{28.8}/16.9$ & $4.1/\textbf{23.4}/9.3$ \\
\end{tabular} \\ Shape $\kappa$\\ \\
\begin{tabular}{c c c c c}
Grid size & $\kappa = 0.5$ & $\kappa = 1$ & $\kappa = 1.3$ \\ \hline
$10 \times 10$ & $52.4/\textbf{54.9}/54.2$ & $18.8/\textbf{57.9}/40.1$ & $10.1/\textbf{58.0}/36.1$ \\
$20 \times 10$ & $40.6/\textbf{77.6}/76.2$ & $16.6/\textbf{69.1}/61.2$ & $9.2/\textbf{64.2}/37.2$ \\
$20 \times 15$ & $37.9/\textbf{65.6}/67.0$ & $16.5/\textbf{77.6}/64.9$ & $7.1/\textbf{58.2}/29.2$ \\
\end{tabular} \\ Scale $\tau$\\
\end{tabular}
\end{center}
\caption{Relative root mean square error (\%) for comparison of estimates based on censored log-likelihood (left) and the gradient score with weights $w_1$ (middle) and $w_2$ (right) relative to those based on the spectral log-likelihood, for the parameters $\kappa$ and $\tau$. Efficiency of $100\%$ corresponds to the performance of the, optimal, maximum spectral log-likelihood estimator, and smaller values show less efficient estimators. Inference is performed using the top 1\% of $10000$ simulated Pareto processes with semi-variogram $\gamma(s,s') =\left(\|s - s'\|/\tau\right)^\kappa/2$. The scale parameter is $\tau = 2.5$ and grids are regular of sizes $10 \times 10$, $20 \times 10$ and $20 \times 15$ on $[0,100]^2$.}
\label{tab: mvn est Pareto}
\end{table}
The optimization of the spectral likelihood and gradient score functions takes only a dozen seconds even for the finest grid. The same random starting point is used for each optimization to ensure fair comparison. Estimation using the censored approach takes several minutes and slows greatly as the dimension increases; see Appendix~\ref{app: computation time}.
\subsection{Domain of attraction}\label{sec: simul BR}
As in practice the asymptotic regime is never reached, we now compare the robustness of each inference procedure for finite thresholds. The Brown--Resnick process belongs to its own max-domain of attraction, so its peaks-over-threshold distribution converges to a generalized Pareto process with log-Gaussian random function. We repeat the simulation study of Section~\ref{sec: simul Pareto} with $10,000$ Brown--Resnick processes and the same parameter values, adding $\kappa = 1.8$. Simulation of the max-stable processes uses the algorithm of \citet{Dombry2016} and is computationally expensive, so we restricted the simulation to $300$ variables. It takes around $3$ hours using $16$ cores to generate $N=10,000$ samples on the finest grid.
\begin{table}
\begin{center}
\begin{tabular}{c}
\begin{tabular}{c c c c c}
Grid size & $\kappa = 0.5$ & $\kappa = 1$ & $\kappa = 1.3$ & $\kappa = 1.8$ \\ \hline
$10 \times 10$ & $\textbf{153.8}/111.3/80.6$ & $\textbf{472.9}/183.1/107.9$ & $\textbf{196.1}/169.5/105.3$ & NC\\
$20 \times 10$ & $\textbf{171.7}/121.8/95.4$ & $\textbf{413.4}/149.6/113.9$ & $\textbf{308.9}/181.2/136.8$ & $144.4/\textbf{167.8}/121.8$ \\
$20 \times 15$ & $\textbf{142.4}/119.4/99.4$ & $\textbf{369.2}/133.3/109.6$ & $\textbf{313.7}/170.1/139.5$ & $163.2/\textbf{173.1}/136.6$ \\
\end{tabular} \\ Shape $\kappa$\\ \\
\begin{tabular}{c c c c c}
Grid size & $\kappa = 0.5$ & $\kappa = 1$ & $\kappa = 1.3$ & $\kappa = 1.8$ \\ \hline
$10 \times 10$ & $106.7/\textbf{126.5}/115.6$ & $\textbf{262.49}/38.2/34.6$ & $109.0/231.4/\textbf{451.5}$ & NC \\
$20 \times 10$ & $105.3/\textbf{133.3}/119.2$ & $\textbf{205.7}/94.2/79.7$ & $\textbf{314.8}/65.7/53.2$ & $104.5/\textbf{335.5}/261.2$ \\
$20 \times 15$ & $103.8/\textbf{138.1}/125.9$ & $\textbf{173.4}/101.9/89.7$ & $\textbf{289.5}/91.5/45.8$ & $102.8/\textbf{211.1}/144.3$ \\
\end{tabular} \\ Scale $\tau$\\
\end{tabular}
\end{center}
\caption{Relative root mean square error (\%) for the censored log-likelihood (left) and the gradient score with weights $w_1$ (middle) and $w_2$ (right) relative to those based on the spectral log-likelihood for the parameters $\kappa$ and $\tau$. An efficiency of $100\%$ corresponds to the performance of the maximum spectral log-likelihood estimator, and larger values show more efficient estimators. Inference is based on the top 1\% of $10000$ simulated Brown--Resnick processes with semi-variogram $\gamma(s,s') = \left(\|s - s'\|/\tau\right)^\kappa/2$. In each case the scale parameter equals $\tau=2.5$ and grids are regular of sizes $10 \times 10$, $20 \times 10$ and $20 \times 15$. ``NC'' means that optimization does not converge.}
\label{tab: br est efficiency}
\end{table}
Table~\ref{tab: br est efficiency} shows the results.
As expected when the model is misspecified, the root relative mean square error is mainly driven by bias, which increases with the shape $\kappa$ and the dimension $I$.
Spectral likelihood estimation is least robust overall, and for this reason it is outperformed by both other methods.
For $\kappa = 0.5$, the three methods show fairly similar performance, with the censored likelihood better capturing the shape parameter, whereas the gradient score does better for the scale. The moderate extremal dependence cases, with $\kappa = 1$ and 1.3, are dominated by the censored likelihood, whereas for the weak extremal dependence, $\kappa = 1.8$, the gradient score performs best, because too much information is lost by censoring. For the 100-point grid, the optimization procedures do not converge when the extremal dependence is too weak. Comparison of the weighting functions $w^1$ and $w^2$ reveals that the choice of the weighting function $w$ affects the robustness of the gradient score. Further simulations, not shown in this paper, show that $w$ tailored to specific types of misspecification can produce very robust estimates. Computation times are similar to those in Section~\ref{sec: simul Pareto}.
Quantile-quantile plots show that the score-matching estimators are very close to normally distributed, but censored likelihood estimates can deviate somewhat from normality due to the quasi-Monte Carlo approximation; this can be remedied by increasing the value of $p$.
To summarise: for weak extremal dependence, the three types of estimator are roughly equivalent. For moderate extremal dependence, we recommend using the censored likelihood if the number of variables permits ($I \lesssim 500$ with our computational capabilities), though if extremal independence is reached at far distances and the grid is dense, the gradient score is a very good substitute. For gridded applications with fine resolution, the gradient score appears to be the best choice for its robustness and because it does not suffer from dimensionality limitations.
\section{Extreme rainfall over Florida}\label{sec: case study}
\subsection{General}
We fit a $r$-Pareto process based on the Brown--Resnick model to radar measurements of rainfall taken every $15$ minutes during the wet season, June--September, from $1999$ to $2004$ on a regular 2~km grid in a 120~km$\times$120~km region of east Florida; see Figure~\ref{fig: florida}. There are 3,600 spatial observations in each radar image, and $58,560$ images in all. The region was chosen to repeat the application of \citet{Buhl2015}, but in a spatial setting only; a spatio-temporal model is outside the scope of the present paper.
\citet{Buhl2015} analysed daily maxima for 10~km$\times$10~km squares, but we use non-aggregated data to fit a non-separable parametric model for spatial extremal dependence, using single extreme events instead of daily maxima.
The marginal distributions for each grid cell were first locally transformed to unit Pareto using their empirical distribution function. For general application, where we wish to extrapolate the distribution above observed intensities, a model for the marginal distributions of exceedances is needed, but since our goal here is to illustrate the feasibility of dependence model estimation on dense grids, we treat marginal modelling as outside the scope of this study.
\begin{figure}
\caption{Radar rainfall measurement grid ($2\text{km}
\label{fig: florida}
\end{figure}
\subsection{Multivariate extremal dependence model}\label{sec: model fitting}
The spatial model of \citet{Buhl2015} is fully separable, i.e., it is a sum of two separate semi-variograms. This has the advantage that inference for each direction can be performed separately, but it cannot capture any anisotropy that does not follow the axis of the grid, i.e., is not in the South-North or East-West directions. Furthermore their pairwise likelihood approach focuses on short-distance pairs, and so might mis-estimate dependence at longer distances.
To better capture possible anisotropy, we use the non-separable semi-variogram model
\begin{equation}\label{eq: model}
\gamma(s_i,s_j) = \left\| \frac{\Omega (s_i - s_j)}{\tau} \right\|^\kappa, \quad s_i,s_j \in [0,120]^2, \quad i,j \in \{1, \dots ,3600\},\quad 0 < \kappa \leqslant 2,\tau>0,
\end{equation}
and anisotropy matrix
\begin{equation}\label{eq: anisotropic matrix}
\Omega =
\left[\begin{array}{cc}\cos \eta & -\sin \eta \\ a \sin \eta & a \cos \eta\end{array} \right], \quad \eta \in \left(-\frac{\pi}{2}; \frac{\pi}{2}\right], \quad a \geqslant 1.
\end{equation}
The semi-variogram $\gamma$ achieves asymptotic extremal independence as the distance between sites tends to infinity, i.e., the pairwise extremal index $\theta \rightarrow 2$ as $\| s - s'\| \rightarrow \infty$.
To apply the peaks-over-threshold methodology, we must define exceedances by choosing risk functionals.
We focus on two types of extremes: local very intense rainfall at any point of the region, and high cumulative rainfall over the whole region, both of which can severely damage infrastructure.
We therefore take the risk functionals
\begin{equation}
r_{\max}(X^\ast) = \left[ \sum_{i = 1}^I \left\{X^\ast(s_i)\right\}^{20}\right]^{1/20}, \quad
r_{\text{sum}}(X^\ast) = \left[ \sum_{i = 1}^I \left\{X^\ast(s_i)\right\}^{\xi_0}\right]^{1/ \xi_0} .
\end{equation}
The function $r_{\text{max}}$ is a differentiable approximation to $\max_{i = 1,\dots,I} X(s_i)$, which cannot be used with the gradient score because of its non-differentiability. Censored likelihood is computationally out of reach with so many locations.
Directly summing normalized observations $X^*$ makes no physical sense, so we use a modified $r_{\text{sum}}$, using $\xi_0 = 0.114$, chosen as the mean of independent local estimates of a generalized Pareto distribution; this can be seen as a transformation back to the original data scale.
The function $r_{\text{sum}}$ selects extreme events with large spatial extent.
We fitted univariate generalized Pareto distributions to $r_{\text{sum}}(x_m^\ast)$ and $r_{\max}(x_m^\ast)$ ($m =1, \dots, 58560$) with increasing thresholds.
The estimated shape parameters are stable around the $99.9$ percentile, which we used for event selection, giving $59$ exceedances; 2 events were found to be extreme relative to both risk functionals. Here we merely illustrate the feasibility of high-dimensional inference, so we treat them as independent, but in practice temporal declustering should be considered.
Optimization of the gradient score with the ${w}^1$ weighting function on a $16$-core cluster took from $1$ to $6$ hours, depending on the initial point.
Different initial points must be considered because of the possibility of local maxima.
Results are shown in Table~\ref{tab: model estimates}, where
standard deviations are obtained using a jackknife procedure with $20$ blocks. Both the estimated bias and variance are fairly low. {For $r_{\text{sum}}(x_m^\ast)$,} we obtain a model similar to that of \citet{Buhl2015}.
The estimated parameters differ appreciably for the two risk functionals, suggesting the presence of a mixture of types of extreme events.
The structure for $r_{\max}$ is consistent with the database, in which the most intense events tend to be spatially concentrated.
Our model suggests higher dependence for middle distances than was found by \citet{Buhl2015}, but they note that their model underestimates dependence, especially for high quantiles.
The estimated smoothness parameters are very close.
For $r_{\text{sum}}$, the estimated parameters shows strong extremal dependence even at long distances, corresponding to exceedances of cumulated rainfall with large spatial cover.
Depending on the risk functional, the model represents either local rainfall, using $r_{\max}$, or events with wide coverage, using $r_{\text{sum}}$.
Anisotropy was introduced as in \citet{Buhl2015}, but as $\widehat a\approx 1$, it does not seem necessary.
\begin{table}
\begin{center}
\begin{tabular}{ c c c c c }
Risk functional & $\kappa$ & $\tau$ & $\eta$ & $a$ \\ \hline
$r_{\text{sum}}$ & $0.814~(0.036)$ &$25.63~(4.70)$ & $-0.009~(0.458)$ & $1.059~(0.031)$ \\
$r_{\max}$& $0.955~(0.048)$ &$3.54~(0.67)$ & $ -0.316~(0.410)$ & $0.94~(0.029)$ \\
\end{tabular}
\end{center}
\caption{Parameter estimates (standard errors) for a Brown--Resnick process with the semi-variogram $\gamma(s,s') = \left\{\|\Omega(s - s')\|/\tau\right\}^\kappa$ obtained by maximization of the gradient score for events corresponding to $60$ highest exceedances of the risk functionals $r_{\text{sum}}$ and $r_{\max}$ for the Florida radar rainfall data. Standard errors are obtained using a jackknife with $20$ blocks.}
\label{tab: model estimates}
\end{table}
\subsection{Model checking and simulation}
For model checking, we propose to use the conditional probability of exceedances,
\begin{equation}\label{eq: cond prob}
\pi_{ij} = \Pr\left[X^\ast(s_j) > u_j \mid \{X^\ast(s_i ) > u_i\} \cap \{r(X^\ast/ u)>1\} \right] = 2 \left\{1 - \Phi\left(\sqrt{\frac{\gamma_{ij}}{2}}\right)\right\},
\end{equation}
where $\gamma_{i,j}$ is the semi-variogram for sites $s_i$ and $s_j$ ($i,j = 1,\dots,3600$), as defined in~(\ref{eq: brown resnick}).
A natural estimator for $\pi_{ij}$ is
\begin{equation}\label{eq: cond prob estimator}
\widehat{\pi}_{ij} = \frac{\sum_{n=1}^N \mathbb{1}\left[\left\{r\left({x^{\ast n}}/{u}\right) > 1\right\} \cap \left\{ x^{\ast n}_i > u_i \right\}\cap \left\{x^{\ast n}_j > u_j\right\}\right]}{\sum_{n = 1}^N \mathbb{1}\left[\left\{r\left({x^{\ast n}}/{u}\right) > 1 \right\} \cap \left\{ x^{\ast n}_i > u_i\right\}\right]},
\end{equation}
whose asymptotic behaviour can easily be adapted from \citet{Davis2009}.
For both risk functionals, the fitted model, represented by the solid black lines in Figure~\ref{fig: model checking}, follows the cloud of estimated conditional exceedance probabilities reasonably well and captures the general trend, but fails to represent some some local variation, perhaps owing to a lack of flexibility of the power model; a more complex dependence model might be considered.
\begin{figure}
\caption{Estimated conditional probability of exceedance $\pi_{ij}
\label{fig: model checking}
\end{figure}
Finally, we use the models fitted in Section~\ref{sec: model fitting} to simulate events with intensities equivalent to the $60$ most intense events found by our risk functionals. Simulation is performed by generating a Pareto process with the fitted dependence structure, as in Section~\ref{sec: simul Pareto}. Figures~\ref{fig: rainfall simulations sum} shows results for $r_{\text{sum}}$ and $r_{\max}$; its upper row contains observations from the database, and the second row shows representative simulations.
The simulations seem reasonable for both risk functionals; they successfully reproduce both the spatial dependence and the intensity of the selected observations. A closer examination suggests that in both cases the models produce over-smooth rainfall fields. This could be addressed by improving event selection using risk functionals $r$ that characterize special spatial structures or physical processes. Also, as we fail to detect anisotropy, more complex models for dependence that integrate possible stochasticity of the spatial patterns might be worthwhile.
\begin{figure}
\caption{Fifteen-minute cumulated rainfall (inches), observed (first row) and simulated (second row) for the risk functionals $r_{\text{sum}
\label{fig: rainfall simulations sum}
\end{figure}
\section{Discussion}
In this paper high-dimensional inference methods for $r$-Pareto processes associated to log-Gaussian random vectors were developed, implemented and compared. When simulating from the true model, spectral likelihood estimation performs best, closely followed by gradient score estimation, but censored likelihood estimation was found to perform better with simulations from the domain of attraction, except in cases of weak extremal dependence, where it is outperformed by the gradient score. Even with computational improvements, use of the censored likelihood is limited to a few hundred variables at most. The gradient score is a good compromise, attractive for its robustness and because it allows a range of risk functionals while remaining cheap to compute. Empirical work suggests room for improvement of the robustness of the gradient score.
We used these inference methods to study extreme spatial rainfall over Florida. The resulting models can reproduce both spatial patterns and extreme intensity for spatially accumulated and local heavy rainfall. In both cases the fitted model provides a reasonable fit and simulations seem broadly consistent with observations. However, the presence of two very different dependence structures highlights the complexity of extreme rainfall and suggests that a mixture model might be considered. Our model is only a first step towards a spatio-temporal rainfall generator: more complex risk functionals should be considered that take temporal dependence into account.
This paper opens the development of spatio-temporal models for extremes using large climatological datasets, with a view toward a better understanding and estimation of risks associated with natural hazards.
\appendix
\section{High-dimensional censored likelihood}\label{app: censored likelihood}
\subsection{Computational considerations}\label{app: parallel censored likelihood}
The algorithm due to \citet{Genz2009} and implemented in the R package \verb+mvtnorm+ \citep{Genz2014} provides an unbiased estimate of a multivariate normal probabilities, with an indication of its largest probable error. An improved Matlab implementation \citep{Genz2013} makes better use of quasi-Monte Carlo methods. We translated this code into \verb C++ to speed it up; see Appendix~\ref{app: mvn estim}.
Function evaluation is independent for each sample, so we also adapted the algorithm for GPU computing and compared different implementations.
Our \verb C++ implementation is about $4$ times faster than the \verb mvtnorm implementation for a probable worst-case error of order $10^{-3}$.
GPU computing provides a slight improvement in speed compared to C++ for reasonably low error, but shows a significant speed-up for higher accuracies ($\lesssim 10^{-4}$).
A computation time of 1~s for estimation of one integral seems reasonable for censored likelihood, and is achievable for $I\approx 500$ for probable worst-case errors of order $10^{-3}$ without GPU computing.
Although Jensen's inequality implies that estimation of the log-likelihood function is biased for finite $p$, quasi-Monte Carlo estimation of an integral is unbiased, so for a sufficiently high $p$,
\begin{equation}\label{eq: estim approx}
\log \Phi^p = \log (\Phi + \epsilon^p) = \log (\Phi) + \frac{\epsilon^p}{\Phi} + o_p\left(\frac{\epsilon^p}{\Phi}\right),
\end{equation}
where $\epsilon^p$ is a random error with zero mean and bounded variance.
Using equation~(\ref{eq: estim approx}) with a small $\epsilon^p$, we have $\widehat{\theta}_{\text{cens}} \approx \mathbb E\left(\widehat{\theta}^p_{\text{cens}}\right)$.
On a multi-node cluster, for scalability purposes, it is more efficient to combine independent estimates $\widehat{\theta}^p_{\text{cens},q}$ ($q = 1,\dots, \bar{p}$) into $\widetilde{\theta}^{\bar{p}} = \bar{p}^{-1} \sum_{i = q}^{\bar{p}} \widehat{\theta}^p_{\text{cens},q}$ than to compute a single estimate $\widehat{\theta}^{p \times \bar{p}}_{\text{cens}}$ with $p \times \bar{p}$ samples in the quasi-Monte Carlo procedures.
Indeed, maximization of $\ell^p_{\text{cens}}(\theta)$ requires a reduction step, in which the computations performed on each node are assembled, for every evaluation of the objective function.
Hence for a cluster with several nodes, where communication is usually slow and reduction steps expensive, $\widetilde{\theta}^{\bar{p}} $ is more efficient because the computation of several $\widehat{\theta}^p_{\text{cens},q}$ can be done independently on different nodes.
Moreover, use of $\widetilde{\theta}^{\overline{p}}$ allows $\text{var}(\widehat{\theta}^p_{\text{cens}})$ to be estimated.
We parallelized the above inference procedure on a cluster with $12$ nodes each of $16$ cores.
First computation of $\ell^p_{\text{cens}}(\theta)$ was parallelized within each node using the R package \verb+parallel+.
The time needed to compute the censored likelihood for a $300$-dimensional vector for a generalized Pareto process associated to a log-Gaussian random function with $p = 499$ and different dependence strengths dropped from minutes to a dozen seconds.
Each node performs an independent maximization using the R routine \verb+optim+. Even if slightly biased, this approach is computationally efficient for our cluster infrastructure.
If the empirical variance of $\widehat{\theta}^p_{\text{cens}}$ is too high then the number of samples $p$ should be increased.
For high accuracy and/or complex models, GPU computing may be relevant.
Lastly, the tolerance of the optimization algorithm must be reduced for low $p$ to ensure its convergence if the quasi-Monte Carlo estimates vary substantially.
\subsection{Algorithm for multivariate normal distribution function estimation} \label{app: mvn estim}
This algorithm is a simplified version of that of \citet{Genz2009}. To estimate the $I$-dimensional multivariate normal distribution $\Phi_I(x, \Sigma)$:
\begin{enumerate}
\item input covariance matrix $\Sigma$, upper bound $x$, number of deterministic samples $p$, number of random shifts $p'$ and generating vector $v$;
\item compute lower triangular Cholesky factor $\textbf{L}$ for $\Sigma$, permuting $x$, and rows and columns of $\Sigma$ for variable prioritisation;
\item initialize $\Phi = 0$, $\delta = 0$ and $V = 0$;
\item for $q'$ in $1,\ldots,p'$:
\begin{enumerate}
\item set $I_q' = 0$ and generate uniform random shift $\Delta \in [0,1]^I$;
\item for $q$ in $1,\ldots, p$:
\begin{enumerate}[(i)]
\item set $z_q = |2 \times \overline{( qv + \Delta)} - 1|$
$e_1 = \Phi(b_1/l_{1,1})$
$f_1 = e_1$;
\item for $i$ in $2,\ldots, I$
set $y_{i-1} = \Phi^{-1}(w_{i-1} e_{i-1})$
$e_i = \Phi\left(\frac{b_i -\sum_{j = 1}^{i-1}l_{i,j}y_j}{l_{i,i}}\right)$
$f_i = e_i f_{i-1}$
End $i$ loop;
\item set $I_{q'} = I_ {q'}+ (f_i-I_{q'})/q$;
\end{enumerate}
End $q$ loop;
\item Set $\delta = (I_{q'} - \Phi)/i$, $\Phi = \Phi + \delta$, $V = (q' -2)V/i + \delta^2$ and ${\rm ERR} = \alpha\sqrt{V}$;
\end{enumerate}
end $q'$ loop;
\item output $\Phi \approx \Phi_k(-\infty, x;\Sigma)$ with error estimate ${\rm ERR}$.
\end{enumerate}
\section{Gradient score for Brown--Resnick processes} \label{app: BR score}
\cite{Wadsworth2014} derive an alternative expression for the intensity function (\ref{eq: cdfBR Engelke}):
\begin{equation}\label{eq: cdfBR Wadsworth}
\begin{aligned}
\lambda_{\theta}(x) = & \frac{|\det \Sigma_{\theta}^*|^{-1/2}(1^T_I \rho)^{-1/2}}{(2\pi)^{(I-1)/2} x_1 \cdots x_I} \exp \left(-\frac{1}{2} \left[ \log x^T \hat{G}amma \log x + \log x^T \left\{ \frac{2\rho}{1^T_I \rho} + \left(\Sigma^*_{\theta}\right)^{-1} \sigma - \frac{\rho\rho^T\sigma}{1^T_I \rho}\right\}\right]\right) \\
& \times \exp \left[-\frac{1}{2} \left\{\frac{1}{4} \sigma^T\left(\Sigma^*_{\theta}\right)^{-1}\sigma - \frac{1}{4} \frac{\sigma^T \rho\rho^T \sigma}{1^T_I \rho} + \frac{\sigma^T \rho}{1^T_I \rho} - \frac{1}{1^T_I \rho}\right\} \right] , \quad x \in A_{r}(u),
\end{aligned}
\end{equation}
where $\Sigma^*_\theta$ is the $I$-dimensional covariance matrix of a non-stationary Gaussian process with semi-variogram $\gamma$, $\rho = \left(\Sigma^*_{\theta}\right)^{-1}1_I$, $\hat{G}amma = \left(\Sigma^*_{\theta}\right)^{-1} - \rho\rho^T/1^T_I \rho$ and $\sigma = \text{diag}(\Sigma^*_{\theta})$.
This expression is symmetric and thus it is more convenient to compute its gradient and Laplacian.
The gradient of the density function $\lambda_{\theta,u}^{r}$ with respect to $x$ and with the notation of equation (\ref{eq: cdfBR Wadsworth}) is
\begin{equation}
\nabla_{x} \log \lambda_{\theta,u}^{r} (x) = - \hat{G}amma \log x \otimes \frac{1}{x} - \frac{1}{2x} \otimes \left(\frac{2\rho}{1^T_I \rho} + 2 + \hat{G}amma^{-1}\sigma - \frac{\rho\rho^T \sigma}{1^T_I \rho}\right), \quad x \in A_{r}(u), \quad u >0,
\end{equation}
where $\otimes$ is the Hadamard product, $1_I$ is a $I$-dimensional vector with unit components, $\Sigma^*_\theta$ is the covariance matrix of the non-stationary Gaussian process with semi-variogram $\gamma_\theta$, $\rho = \left(\Sigma^*_\theta\right)^{-1}1_I$, $\hat{G}amma = \left(\Sigma^*_\theta\right)^{-1} - \rho\rho^T/1^T_I\rho$ and $\sigma = \text{diag}(\Sigma_\theta^*)$.
The Laplacian of this density function, $\triangle_{x} \log \lambda_{\theta,u}^{r}(x)$, equals
\begin{equation}
-\text{diag}(\hat{G}amma)^T\left[ \frac{1-\log x}{x^2}\right] + \left\|(\hat{G}amma - \text{diag}(\hat{G}amma)) \log x \otimes \frac{1}{x^2}\right\|_1 + \frac{1}{(2x^2)^T} \left\{\frac{2\rho}{1^T_I \rho} + 2 + \left(\Sigma^*_\theta\right)^{-1}\sigma - \frac{\rho\rho^T \sigma}{1^T_I \rho}\right\},
\end{equation}
where $x \in A_{r}(u)$, $u >0$ and $\| \cdot \|_1$ denotes the $L_1$ norm.
\section{Average computation times of the fitting procedures} \label{app: computation time}
\begin{table}[H]
\begin{center}
\begin{tabular}{ c c c c c }
Grid size & $\kappa$ & Spectral likelihood & Censored log-likelihood & Gradient score \\ \hline
\multirow{3}{*}{$10 \times 10$} & 0.5 &4 & 135 & 6 \\% \cline{2-5}
& 1 &4 & 140 & 4.9 \\
& 1.3 &4.5 & 129 & 4.8 \\
\\
\multirow{3}{*}{$20 \times 10$} & 0.5 &14.3 &486& 10 \\
& 1 &6 & 492 & 9.7 \\
& 1.3 &6.7 & 483 & 9.8 \\
\\
\multirow{3}{*}{$20 \times 15$} & 0.5 &14 & 1190 & 18 \\
& 1 &14 & 1217 & 16.4 \\
& 1.3 &14.6 & 1236 & 18.8 \\
\end{tabular}
\end{center}
\caption{Average times (s) of the optimization for the different objective functions, when fitting a Brown--Resnick process applied to the three different semi-variogram models $\gamma$ with $\kappa= \{0.5,1,1.3\}$ and the three grids $10 \times 10$, $20 \times 10$ and $20 \times 15$. Random starting points are used for fair comparison.}
\label{tab: time est Pareto}
\end{table}
\section{Proof of the Proposition} \label{app: mda score normality}
Let $(\mathrm{y}^m)_{m = 1, \dots, n}$ be independent replicates of a regularly-varying random vector $Y$ with normalized marginals and measure $\nu_{\theta_0}$.
Let $k_u = k_u(n)$ be a sequence of integers, where $n$ is the sample size, statisfying $ k_u(n) \rightarrow \infty$ and $ k_u(n) = o(n)$ as $n \rightarrow \infty$ and suppose we only keep vectors such that $\left\{r(\mathrm{y}^m)\right\}_{m = 1, \dots, n}$ exceeds the threshold $n/k_u$, i.e., we retain the set
$$
A_{r}\left(\frac{n}{k_u}\right) = \left\{ \tilde{\mathrm{y}} : r\left(\tilde{\mathrm{y}}\right) = r\left(\frac{k_u}{n}\mathrm{y}\right) > 1 \right\}.
$$
For any $A \in \mathbb{R}^I_+$, we first need the asymptotic normality of the empirical measure
$$
\tilde{\nu}_{k_u}\left(A\right) = \frac{1}{k_u} \sum_{m = 1}^n \mathbb{1}\left(\tilde{\mathrm{y}}^m \in A \right)
$$
Since $G$ is in the max--domain of attraction of $P$, Proposition~2.1 in \citet{DeHaan1993} gives
\begin{equation}\label{eq: convergence of measure}
\tilde{\nu}_{k_u}\left(A\right) \xrightarrow{\text{Pr}} \nu\left(A\right), \quad A \in \mathbb{R}^I_+, \quad n \rightarrow \infty,
\end{equation}
where $\nu$ is the exponent measure associated to the multivariate extreme value distribution $P$ and $\xrightarrow{\text{Pr}}$ denotes convergence in probability.
Moreover, following Propositions~3.1 and~3.2 in \citet{DeHaan1993}, define the random field $Z_n(x)$, $x \in (0, \infty]^I$, by
$$
Z_n(x) = \sqrt{k_u} \left\{ \tilde{\nu}_{k_u}\left((0, x]\right) - \tilde{\nu}\left((0, x]^c\right) \right\} , \quad x \in (0, \infty]^I.
$$
There exists a zero-mean Gaussian random field $Z(x)$, $x \in (0, \infty]^I$, with continuous sample paths and covariance function
$$
\text{Cov}\left\{Z_n(x^1), Z_n(x^2) \right\} = \nu \left\{ (0, x^1]^c \cap (0, x^2]^c \right\}, \quad x^1,x^2 \in (0, \infty]^I,
$$
such that $Z_n(x) $ converges weakly to $Z(x)$ in the space of cadlag functions defined on $ (0, \infty]^I$ equipped with the Skorohod topology.
Now let $\delta$ be a proper scoring rule satisfying the regularity conditions of Theorem 4.1~of \citet{Dawid2014}.
The maximum scoring rule estimator $\widehat{\theta}_{k_u}^\delta$ is defined by
$$
\sum_{\left\{m, \mathrm{y}^m \in A_{r}\left(n/k_u\right)\right\}} \nabla_\theta \delta\left(\widehat{\theta}_{\delta,k_u}^{r}, \mathrm{y}^m \right) = 0,
$$
which is equivalent to
$$
\frac{1}{k_u} \int_{A_{r}\left(n/k_u\right)} \nabla_\theta \delta\left(\widehat{\theta}_{\delta,k_u}^{r}, \mathrm{y} \right) \tilde{\nu}_{k_u}\left(\text{d}\mathrm{y}\right)= 0.
$$
The second-order condition in the hypothesis of Theorem 4.1 in \citet{Dawid2014} allows us to use a Taylor expansion around $\theta_0$, yielding
$$
\frac{1}{k_u}\int_{A_{r}\left(n/k_u\right)} \nabla_\theta \delta\left(\theta_0, \mathrm{y} \right) \tilde{\nu}_{k_u}\left(\text{d}\mathrm{y}\right) + \left(\widehat{\theta}_{\delta,k_u}^{r} - \theta_0\right) \frac{1}{k_u}\int_{A_{r}\left(n/k_u\right)} \nabla_\theta^2 \delta\left(\theta_0, \mathrm{y} \right) \tilde{\nu}_{k_u}\left(\text{d}\mathrm{y}\right) + o\left\{\left(\widehat{\theta}_{\delta,k_u}^{r} - \theta_0\right)\right\} = 0.
$$
Also equation~(\ref{eq: convergence of measure}) ensures that
$$
\frac{1}{k_u}\int_{A_{r}\left(n/k_u\right)} \nabla_\theta^2 \delta\left(\theta_0, \mathrm{y} \right) \tilde{\nu}_{k_u}\left(\text{d}\mathrm{y}\right) \xrightarrow{\text{Pr}} \text{E}_P\left\{\frac{\partial^2\delta}{\partial \theta^2}(\theta_0) \right\} = K,
$$
and using the convergence of $Z_n$, we get
$$
\frac{1}{k_u}\int_{A_{r}\left(n/k_u\right)} \nabla_\theta \delta\left(\theta_0, \mathrm{y} \right) \tilde{\nu}_{k_u}\left(\text{d}\mathrm{y}\right) \xrightarrow{D} \mathcal{N}\left[0, \mathbb E_P \left\{\frac{\partial\delta}{\partial \theta}(\theta_0)\frac{\partial\delta}{\partial \theta}(\theta_0)^T\right\}\right], \quad n \rightarrow \infty.
$$
Then it is straightforward to see that
$$
\sqrt{n_u}\left(\widehat{\theta}_{\delta,k_u}^{r} - \theta_0\right) \xrightarrow{D} \mathcal{N}\left\{0, K^{-1}J\left(K^{-1}\right)^T\right\}, \quad n \rightarrow \infty,
$$
with $J = \mathbb E_P \left\{\partial\delta/\partial \theta(\theta_0)\partial\delta/\partial \theta(\theta_0)^T\right\}$.
\section{Pareto process simulation}\label{app: pareto sims}
To compare the performance of our estimators in Section \ref{sec: simul Pareto}, the simulation of a Pareto $P$ process for $I > 0 $ locations over $[0,100]^2$ with semi-variogram $\gamma$ is done as follows:
\begin{itemize}
\item for regularly spaced locations $\{s_1,\ldots, s_I\}\in [0,100]^2$, choose $i\in\{1,\ldots, I\}$ uniformly at random;
\item for a given semi-variogram $\gamma(s,s')$, $s,s' \in [0,100]^2$, generate an $(I-1)$-dimensional Gaussian vector $Z$ with covariance matrix $\Sigma = \{ \gamma(s_j,s_i) + \gamma(s_k,s_i) - \gamma(s_j,s_k) \}_{ j,k \in \left\{1, \dots, I\right\} \setminus \{i\}}$ and mean $\mu = \{- \gamma(s_j,s_j)\}_{ j \in \left\{1, \dots, I\right\} \setminus \{i\}}$, i.e., conditional on the value at $s_i$;
\item set $Q_i = 1$ and $Q_1=\exp(Z_1), \dots, Q_{i-1}=\exp(Z_{i-1}), Q_{i+1}=\exp(Z_i),\dots, Q_{I} = \exp( Z_{I-1})$;
\item generate a Pareto random variable $U$ with distribution function $1 - 1/x$ ($x > 1$) and set $P = U Q/ \|Q\|_1$;
\item return $P$.
\end{itemize}
\end{document} |
\begin{document}
\maketitle
\begin{abstract}
In the present paper, we consider the Cauchy problem of fourth order nonlinear
Schr\"odinger type equations with a derivative nonlinearity.
In one dimensional case, we prove that the fourth order nonlinear Schr\"odinger equation with the derivative quartic nonlinearity $\partial _x (\overline{u}^4)$ is the small data global in time well-posed and scattering to a free solution.
Furthermore, we show that the same result holds for the $d \ge 2$ and derivative polynomial type nonlinearity, for example $|\nabla | (u^m)$ with $(m-1)d \ge 4$.
\\
\noindent {\it Key Words and Phrases.} Schr\"odinger equation, well-posedness, Cauchy problem, scaling critical, multilinear estimate, bounded $p$-variation.\\
2010 {\it Mathematics Subject Classification.} 35Q55, 35B65.
\end{abstract}
\section{Introduction\label{intro}}
We consider the Cauchy problem of the fourth order nonlinear Schr\"odinger type equations:
\begin{equation}\label{D4NLS}
\begin{cases}
\displaystyle (i\partial_{t}+\Delta ^2)u=\partial P_{m}(u,\overline{u}),\hspace{2ex}(t,x)\in (0,\infty )\times {\BBB R}^{d} \\
u(0,x)=u_{0}(x),\hspace{2ex}x\in {\BBB R}^{d}
\end{cases}
\end{equation}
where $m\in {\BBB N}$, $m\geq 2$, $P_{m}$ is a polynomial which is written by
\[
P_{m}(f,g)=\sum_{\substack{\alpha ,\beta \in {\BBB Z}_{\geq 0}\\ \alpha +\beta=m}}f^{\alpha}g^{\beta},
\]
$\partial$ is a first order derivative with respect to the spatial variable, for example a linear combination of
$\frac{\partial}{\partial x_1} , \, \dots , \, \frac{\partial}{\partial x_d}$ or $|\nabla |= \mathcal{F}^{-1}[|\xi | \mathcal{F}]$
and the unknown function $u$ is ${\BBB C}$-valued.
The fourth order Schr\"{o}dinger equation with $P_{m}(u,\overline{u})=|u|^{m-1}u$ appears in the study of deep water wave dynamics \cite{Dysthe}, solitary waves \cite{Karpman}, \cite{KS}, vortex filaments \cite{Fukumoto}, and so on.
The equation (\ref{D4NLS}) is invariant under the following scaling transformation:
\[
u_{\lambda}(t,x)=\lambda^{-3/(m-1)}u(\lambda^{-4}t,\lambda^{-1}x),
\]
and the scaling critical regularity is $s_{c}=d/2-3/(m-1)$.
The aim of this paper is to prove the well-posedness and the scattering for the solution of (\ref{D4NLS})
in the scaling critical Sobolev space.
There are many results for the fourth order nonlinear Schr\"{o}dinger equation
with derivative nonlinearities (see \cite{S1}, \cite{S2}, \cite{HJ1}, \cite{HHW}, \cite{HHW2}, \cite{HJ3}, \cite{S3}, \cite{HJ2}, \cite{Y12}, \cite{HN15_1}, \cite{HN15_2}, and references cited therein).
Especially, the one dimensional case is well studied.
Wang (\cite{Y12}) considered (\ref{D4NLS}) for the case $d=1$, $m=2l+1$, $l\ge 2$, $P_{2l+1}(u,\overline{u})=|u|^{2l}u$
and proved the small data global in time well-posedness for $s=s_{c}$ by using Kato type smoothing effect.
But he did not treat the cubic case.
Actually, a technical difficulty appears in this case (see Theorem \ref{notC3} below).
Hayashi and Naumkin (\cite{HN15_1}) considered (\ref{D4NLS}) for $d=1$ with the power type nonlineality $\partial_{x}(|u|^{\rho -1}u)$ ($\rho >4$)
and proved the global existence of the solution and the scattering in the weighted Sobolev space.
Moreover, they (\cite{HN15_2}) also proved that the large time asymptotics is determined by the self similar solution in the case $\rho =4$.
Therefore, derivative quartic nonlinearity in the one spatial dimension is the critical in the sense of the asymptotic behavior of the solution.
We firstly focus on the quartic nonlinearity $\partial _x (\overline{u}^4)$ in one space dimension.
Since this nonlinearity has some good structure, the global solution scatters to a free solution in the scaling critical Sobolev space.
Our argument does not apply to \eqref{D4NLS} with $P (u,\overline{u}) = |u|^3 u$ because we rely on the Fourier restriction norm method.
Now, we give the first results in this paper.
For a Banach space $H$ and $r>0$, we define $B_r(H):=\{ f\in H \,|\, \|f\|_H \le r \}$.
\begin{thm}\label{wellposed_1}
Let $d=1$, $m=4$ and $P_{4}(u,\overline{u})=\overline{u}^{4}$. Then the equation {\rm (\ref{D4NLS})} is globally well-posed for small data in $\dot{H}^{-1/2}$.
More precisely, there exists $r>0$ such that for any $T>0$ and all initial data $u_{0}\in B_{r}(\dot{H}^{-1/2})$, there exists a solution
\[
u\in \dot{Z}_{r}^{-1/2}([0,T))\subset C([0,T );\dot{H}^{-1/2})
\]
of {\rm (\ref{D4NLS})} on $(0, T )$.
Such solution is unique in $\dot{Z}_{r}^{-1/2}([0,T))$ which is a closed subset of $\dot{Z}^{-1/2}([0,T))$ {\rm (see Definition~\ref{YZ_space} and (\ref{Zr_norm}))}.
Moreover, the flow map
\[
S^{+}_{T}:B_{r}(\dot{H}^{-1/2})\ni u_{0}\mapsto u\in \dot{Z}^{-1/2}([0,T))
\]
is Lipschitz continuous.
\end{thm}
\begin{rem}
We note that $s=-1/2$ is the scaling critical exponent of (\ref{D4NLS}) for $d=1$, $m=4$.
\end{rem}
\begin{cor}\label{sccat}
Let $r>0$ be as in Theorem~\ref{wellposed_1}.
For all $u_{0}\in B_{r}(\dot{H}^{-1/2})$, there exists a solution
$u\in C([0,\infty );\dot{H}^{s_{c}})$ of (\ref{D4NLS}) on $(0,\infty )$ and the solution scatters in $\dot{H}^{-1/2}$.
More precisely, there exists
$u^{+}\in \dot{H}^{-1/2}$
such that
\[
u(t)-e^{it\Delta^2}u^{+}
\rightarrow 0
\ {\rm in}\ \dot{H}^{-1/2}\ {\rm as}\ t\rightarrow + \infty.
\]
\end{cor}
Moreover, we obtain the large data local in time well-posedness in the scaling critical Sobolev space.
To state the result, we put
\[
B_{\delta ,R} (H^s) := \{ u_0 \in H^s | \ u_0=v_0+w_0 , \, \| v_0 \| _{\dot{H}^{-1/2}} < \delta, \, \| w_0 \| _{L^2} <R \}
\]
for $s<0$.
\begin{thm} \label{large-wp}
Let $d=1$, $m=4$ and $P_{4}(u,\overline{u})=\overline{u}^{4}$.
Then the equation {\rm (\ref{D4NLS})} is locally in time well-posed in $H^{-1/2}$.
More precisely,
there exists $\delta >0$ such that for all $R \ge \delta$ and $u_0 \in B_{\delta ,R} (H^{-1/2})$ there exists a solution
\[
u \in Z^{-1/2}([0,T]) \subset C([0,T); H^{-1/2})
\]
for $T=\delta ^{8} R^{-8}$ of \eqref{D4NLS}.
Furthermore, the same statement remains valid if we replace $H^{-1/2}$ by $\dot{H}^{-1/2}$ as well as $Z^{-1/2}([0,T])$ by $\dot{Z}^{-1/2}([0,T])$.
\end{thm}
\begin{rem}
For $s>-1/2$, the local in time well-posedness in $H^s$ follows from the usual Fourier restriction norm method, which covers for all initial data in $H^s$.
It however is not of very much interest.
On the other hand, since we focus on the scaling critical cases, which is the negative regularity, we have to impose that the $\dot{H}^{-1/2}$ part of initial data is small.
But, Theorem \ref{large-wp} is a large data result because the $L^2$ part is not restricted.
\end{rem}
The main tools of the proof are the $U^{p}$ space and $V^{p}$ space which are applied to prove
the well-posedness and the scattering for KP-II equation at the scaling critical regularity by Hadac, Herr and Koch (\cite{HHK09}, \cite{HHK10}).
We also consider the one dimensional cubic case and the high dimensional cases.
The second result in this paper is as follows.
\begin{thm}\label{wellposed_2}
{\rm (i)}\ Let $d=1$ and $m=3$. Then the equation {\rm (\ref{D4NLS})} is locally well-posed in $H^{s}$ for $s\ge 0$. \\
{\rm (ii)}\ Let $d\geq 2$ and $(m-1)d\geq 4$. Then the equation {\rm (\ref{D4NLS})}
is globally well-posed for small data in $\dot{H}^{s_{c}}$ (or $H^{s}$ for $s\ge s_{c}$)
and the solution scatters in $\dot{H}^{s_{c}}$ (or $H^{s}$ for $s\ge s_{c}$).
\end{thm}
The smoothing effect of the linear part recovers derivative in higher dimensional case.
Therefore, we do not use the $U^p$ and $V^p$ type spaces.
More precisely, to establish Theorem \ref{wellposed_2}, we only use the Strichartz estimates
and get the solution in $C([0,T);H^{s_c})\cap L^{p_m}([0,T); W^{q_m,s_{c}+1/(m-1)})$
with $p_m =2(m-1)$, $q_m =2(m-1)d/\{ (m-1)d-2\}$.
Accordingly, the scattering follows from a standard argument.
Since the condition $(m-1)d\geq 4$ is equivalent to $s_{c}+1/(m-1)\ge 0$,
the solution space $L^{p_m}([0,T); W^{q_m,s_{c}+1/(m-1)})$ has nonnegative regularity even if the data belongs to $H^{s_{c}}$ with $-1/(m-1)\le s_c <0$.
Our proof of Thorem~\ref{wellposed_2} {\rm (ii)} cannot applied for $d=1$
since the Schr\"odingier admissible $(a,b)$ in {\rm (\ref{admissible_ab})} does not exist.
\begin{rem}
For the case $d=1$, $m=4$ and $P_{4}(u,\overline{u})\ne \overline{u}^{4}$,
we can obtain the local in time well-posedness of {\rm (\ref{D4NLS})} in $H^{s}$ for $s\ge 0$
by the same way of the proof of Theorem~\ref{wellposed_2}.
Actually, we can get the solution in $C([0,T];H^s)\cap L^4 ([0,T];W^{s+1/2,\infty })$
for $s\ge 0$ by using the iteration argument
since the fractional Leibnitz rule (see \cite{CW91}) and the H\"older inequality imply
\[
\left\| |\nabla |^{s+\frac{1}{2}}\prod_{j=1}^{4}u_j \right\|_{L^{4/3}_{t}([0,T);L_{x}^{1})}
\lesssim T^{1/4}\| |\nabla |^{s+\frac{1}{2}}u_1 \|_{L^{4}_{t}L_{x}^{\infty}}\| u_2 \|_{L^{4}_{t}L_{x}^{\infty}}
\| u_3 \|_{L^{\infty}_{t}L_{x}^{2}}\| u_4 \|_{L^{\infty}_{t}L_{x}^{2}}.
\]
\end{rem}
We give a remark on our problem, which shows that the standard iteration argument does not work.
\begin{thm}\label{notC3}
{\rm (i)}\ Let $d=1$, $m=3$, $s<0$ and $P_{3}(u,\overline{u})=|u|^{2}u$. Then the flow map of {\rm (\ref{D4NLS})} from $H^s$ to $C({\BBB R} ; H^s)$ is not smooth. \\
{\rm (ii)}\ Let $m\ge 2$, $s<s_c$ and $\partial =|\nabla |$ or $\frac{\partial}{\partial x_k}$ for some $1\le k \le d$.
Then the flow map of {\rm (\ref{D4NLS})} from $H^s$ to $C({\BBB R} ; H^s)$ is not smooth.
\end{thm}
More precisely, we prove that the flow map is not $C^3$ if $d=1$, $m=3$, $s<0$ and $P_{3}(u,\overline{u})=|u|^{2}u$ or $C^m$ if $d \ge 1$, $m \ge 2$, and $s<s_c$.
It leads that the standard iteration argument fails, because the flow map is smooth if it works.
Of course, there is a gap between ill-posedness and absence of a smooth flow map.
Since the resonance appears in the case $d=1$, $m=3$ and $P_{3}(u,\overline{u})=|u|^{2}u$, there exists an irregular flow map even for the subcritical Sobolev regularity.
\text{} \\
\noindent {\bf Notation.}
We denote the spatial Fourier transform by\ \ $\widehat{\cdot}$\ \ or $\mathcal{F}_{x}$,
the Fourier transform in time by $\mathcal{F}_{t}$ and the Fourier transform in all variables by\ \ $\widetilde{\cdot}$\ \ or $\mathcal{F}_{tx}$.
The free evolution $S(t):=e^{it\Delta^{2}}$ is given as a Fourier multiplier
\[
\mathcal{F}_{x}[S(t)f](\xi )=e^{-it|\xi |^{4}}\widehat{f}(\xi ).
\]
We will use $A\lesssim B$ to denote an estimate of the form $A \le CB$ for some constant $C$ and write $A \sim B$ to mean $A \lesssim B$ and $B \lesssim A$.
We will use the convention that capital letters denote dyadic numbers, e.g. $N=2^{n}$ for $n\in {\BBB Z}$ and for a dyadic summation we write
$\sum_{N}a_{N}:=\sum_{n\in {\BBB Z}}a_{2^{n}}$ and $\sum_{N\geq M}a_{N}:=\sum_{n\in {\BBB Z}, 2^{n}\geq M}a_{2^{n}}$ for brevity.
Let $\chi \in C^{\infty}_{0}((-2,2))$ be an even, non-negative function such that $\chi (t)=1$ for $|t|\leq 1$.
We define $\psi (t):=\chi (t)-\chi (2t)$ and $\psi_{N}(t):=\psi (N^{-1}t)$. Then, $\sum_{N}\psi_{N}(t)=1$ whenever $t\neq 0$.
We define frequency and modulation projections
\[
\widehat{P_{N}u}(\xi ):=\psi_{N}(\xi )\widehat{u}(\xi ),\
\widetilde{Q_{M}^{S}u}(\tau ,\xi ):=\psi_{M}(\tau -|\xi|^{4})\widetilde{u}(\tau ,\xi ).
\]
Furthermore, we define $Q_{\geq M}^{S}:=\sum_{N\geq M}Q_{N}^{S}$ and $Q_{<M}^{S}:=Id -Q_{\geq M}^{S}$.
The rest of this paper is planned as follows.
In Section 2, we will give the definition and properties of the $U^{p}$ space and $V^{p}$ space.
In Section 3, we will give the multilinear estimates which are main estimates to prove Theorems~\ref{wellposed_1} and \ref{large-wp}.
In Section 4, we will give the proof of the well-posedness and the scattering (Theorem~\ref{wellposed_1}, Corollary~\ref{sccat}, and Theorem \ref{large-wp}).
In Section 5, we will give the proof of Theorem~\ref{wellposed_2}.
In Section 6, we will give the proof of Theorem~\ref{notC3}.
\section{The $U^{p}$, $V^{p}$ spaces and their properties \label{func_sp}}
In this section, we define the $U^{p}$ space and the $V^{p}$ space,
and introduce the properties of these spaces which are proved by Hadac, Herr and Koch (\cite{HHK09}, \cite{HHK10}).
We define the set of finite partitions ${\BBB Z}Z$ as
\[
{\BBB Z}Z :=\left\{ \{t_{k}\}_{k=0}^{K}|K\in {\BBB N} , -\infty <t_{0}<t_{1}<\cdots <t_{K}\leq \infty \right\}
\]
and if $t_{K}=\infty$, we put $v(t_{K}):=0$ for all functions $v:{\BBB R} \rightarrow L^{2}$.
\begin{defn}\label{upsp}
Let $1\leq p <\infty$. For $\{t_{k}\}_{k=0}^{K}\in {\BBB Z}Z$ and $\{\phi_{k}\}_{k=0}^{K-1}\subset L^{2}$ with
$\sum_{k=0}^{K-1} \| \phi_{k} \| _{L^{2}}^{p}=1$ we call the function $a:{\BBB R}\rightarrow L^{2}$
given by
\[
a(t)=\sum_{k=1}^{K}\mbox{\boldmath $1$}_{[t_{k-1},t_{k})}(t)\phi_{k-1}
\]
a ``$U^{p}${\rm -atom}''.
Furthermore, we define the atomic space
\[
U^{p}:=\left\{ \left. u=\sum_{j=1}^{\infty}\lambda_{j}a_{j}
\right| a_{j}:U^{p}{\rm -atom},\ \lambda_{j}\in {\BBB C} \ {\rm such\ that}\ \sum_{j=1}^{\infty}|\lambda_{j}|<\infty \right\}
\]
with the norm
\[
\| u \| _{U^{p}}:=\inf \left\{\sum_{j=1}^{\infty}|\lambda_{j}|\left|u=\sum_{j=1}^{\infty}\lambda_{j}a_{j},\
a_{j}:U^{p}{\rm -atom},\ \lambda_{j}\in {\BBB C}\right.\right\}.
\]
\end{defn}
\begin{defn}\label{vpsp}
Let $1\leq p <\infty$. We define the space of the bounded $p$-variation
\[
V^{p}:=\{ v:{\BBB R}\rightarrow L^{2}|\ \| v \| _{V^{p}}<\infty \}
\]
with the norm
\[
\| v \| _{V^{p}}:=\sup_{\{t_{k}\}_{k=0}^{K}\in {\BBB Z}Z}\left(\sum_{k=1}^{K} \| v(t_{k})-v(t_{k-1}) \| _{L^{2}}^{p}\right)^{1/p}.
\]
Likewise, let $V^{p}_{-, rc}$ denote the closed subspace of all right-continuous functions $v\in V^{p}$ with
$\lim_{t\rightarrow -\infty}v(t)=0$, endowed with the same norm $ \| \cdot \| _{V^{p}}$.
\end{defn}
\begin{prop}[\cite{HHK09} Proposition\ 2.2,\ 2.4,\ Corollary\ 2.6]\label{upvpprop}
Let $1\leq p<q<\infty$. \\
{\rm (i)} $U^{p}$, $V^{p}$ and $V^{p}_{-, rc}$ are Banach spaces. \\
{\rm (ii)} For every $v\in V^{p}$, $\lim_{t\rightarrow -\infty}v(t)$ and $\lim_{t\rightarrow \infty}v(t)$ exist in $L^{2}$. \\
{\rm (iii)} The embeddings $U^{p}\hookrightarrow V^{p}_{-,rc}\hookrightarrow U^{q}\hookrightarrow L^{\infty}_{t}({\BBB R} ;L^{2}_{x}({\BBB R}^{d}))$ are continuous.
\end{prop}
\begin{thm}[\cite{HHK09} Proposition\ 2,10,\ Remark\ 2.12]\label{duality}
Let $1<p<\infty$ and $1/p+1/p'=1$.
If $u\in V^{1}_{-,rc}$ be absolutely continuous on every compact intervals, then
\[
\| u \| _{U^{p}}=\sup_{v\in V^{p'}, \| v \| _{V^{p'}}=1}\left|\int_{-\infty}^{\infty}(u'(t),v(t))_{L^{2}({\BBB R}^{d})}dt\right|.
\]
\end{thm}
\begin{defn}
Let $1\leq p<\infty$. We define
\[
U^{p}_{S}:=\{ u:{\BBB R}\rightarrow L^{2}|\ S(-\cdot )u\in U^{p}\}
\]
with the norm $ \| u \| _{U^{p}_{S}}:= \| S(-\cdot )u \| _{U^{p}}$,
\[
V^{p}_{S}:=\{ v:{\BBB R}\rightarrow L^{2}|\ S(-\cdot )v\in V^{p}_{-,rc}\}
\]
with the norm $ \| v \| _{V^{p}_{S}}:= \| S(-\cdot )v \| _{V^{p}}$.
\end{defn}
\begin{rem}
The embeddings $U^{p}_{S}\hookrightarrow V^{p}_{S}\hookrightarrow U^{q}_{S}\hookrightarrow L^{\infty}({\BBB R};L^{2})$ hold for $1\leq p<q<\infty$
by {\rm Proposition~\ref{upvpprop}}.
\end{rem}
\begin{prop}[\cite{HHK09} Corollary\ 2.18]\label{projest}
Let $1< p<\infty$. We have
\begin{align}
& \| Q_{\geq M}^{S}u \| _{L_{tx}^{2}}\lesssim M^{-1/2} \| u \| _{V^{2}_{S}},\label{highMproj}\\
& \| Q_{<M}^{S}u \| _{V^{p}_{S}}\lesssim \| u \| _{V^{p}_{S}},\ \ \| Q_{\geq M}^{S}u \| _{V^{p}_{S}}\lesssim \| u \| _{V^{p}_{S}},\label{Vproj}
\end{align}
\end{prop}
\begin{prop}[\cite{HHK09} Proposition\ 2.19]\label{multiest}
Let
\[
T_{0}:L^{2}({\BBB R}^{d})\times \cdots \times L^{2}({\BBB R}^{d})\rightarrow L^{1}_{loc}({\BBB R}^{d})
\]
be a $m$-linear operator. Assume that for some $1\leq p, q< \infty$
\[
\| T_{0}(S(\cdot )\phi_{1},\cdots ,S(\cdot )\phi_{m}) \| _{L^{p}_{t}({\BBB R} :L^{q}_{x}({\BBB R}^{d}))}\lesssim \prod_{i=1}^{m} \| \phi_{i} \| _{L^{2}({\BBB R}^{d})}.
\]
Then, there exists $T:U^{p}_{S}\times \cdots \times U^{p}_{S}\rightarrow L^{p}_{t}({\BBB R} ;L^{q}_{x}({\BBB R}^{d}))$ satisfying
\[
\| T(u_{1},\cdots ,u_{m}) \| _{L^{p}_{t}({\BBB R} ;L^{q}_{x}({\BBB R}^{d}))}\lesssim \prod_{i=1}^{m} \| u_{i} \| _{U^{p}_{S}}
\]
such that $T(u_{1},\cdots ,u_{m})(t)(x)=T_{0}(u_{1}(t),\cdots ,u_{m}(t))(x)$ a.e.
\end{prop}
Now we refer the Strichartz estimate for the fourth order Schr\"odinger equation proved by Pausader.
We say that a pair $(p,q)$ is admissible if $2 \le p,q \le \infty$, $(p,q,d) \neq (2, \infty ,2)$, and
\[
\frac{2}{p} + \frac{d}{q} = \frac{d}{2}.
\]
\begin{prop}[\cite{P07} Proposition\ 3.1]\label{Stri_est}
Let $(p,q)$ and $(a,b)$ be admissible pairs.
Then, we have
\[
\begin{split}
\| S(\cdot )\varphi \| _{L_{t}^{p}L_{x}^{q}}&\lesssim \| |\nabla|^{-2/p}\varphi \| _{L^{2}_{x}},\\
\left\| \int_{0}^{t}S(t-t' )F(t')dt'\varphi \right\| _{L_{t}^{p}L_{x}^{q}}&\lesssim \| |\nabla|^{-2/p-2/a}F \| _{L^{a'}_{t}L^{b'}_{x}},
\end{split}
\]
where $a'$ and $b'$ are conjugate exponents of $a$ and $b$ respectively.
\end{prop}
Propositions \ref{multiest} and ~\ref{Stri_est} imply the following.
\begin{cor}\label{Up_Stri}
Let $(p,q)$ be an admissible pair.
\begin{equation}\label{U_Stri}
\| u \| _{L_{t}^{p}L_{x}^{q}}\lesssim \| |\nabla|^{-2/p}u \| _{U_{S}^{p}},\ \ u\in U^{p}_{S}.
\end{equation}
\end{cor}
Next, we define the function spaces which will be used to construct the solution.
We define the projections $P_{>1}$ and $P_{<1}$ as
\[
P_{>1}:=\sum_{N\ge 1}P_N,\ P_{<1}:=Id-P_{>1}.
\]
\begin{defn}\label{YZ_space}
Let $s <0$.\\
{\rm (i)} We define $\dot{Z}^{s}:=\{u\in C({\BBB R} ; \dot{H}^{s}({\BBB R}^{d}))\cap U^{2}_{S}|\ \| u \| _{\dot{Z}^{s}}<\infty\}$ with the norm
\[
\| u \| _{\dot{Z}^{s}}:=\left(\sum_{N}N^{2s} \| P_{N}u \| ^{2}_{U^{2}_{S}}\right)^{1/2}.
\]
{\rm (ii)} We define $Z^{s}:=\{u\in C({\BBB R} ; H^{s}({\BBB R}^{d})) |\ \| u \| _{Z^{s}}<\infty\}$ with the norm
\[
\| u \| _{Z^{s}}:= \| P_{<1} u \| _{\dot{Z}^{0}}+ \| P_{>1} u \| _{\dot{Z}^{s}}.
\]
{\rm (iii)} We define $\dot{Y}^{s}:=\{u\in C({\BBB R} ; \dot{H}^{s}({\BBB R}^{d}))\cap V^{2}_{S}|\ \| u \| _{\dot{Y}^{s}}<\infty\}$ with the norm
\[
\| u \| _{\dot{Y}^{s}}:=\left(\sum_{N}N^{2s} \| P_{N}u \| ^{2}_{V^{2}_{S}}\right)^{1/2}.
\]
{\rm (iv)} We define $Y^{s}:=\{u\in C({\BBB R} ; H^{s}({\BBB R}^{d})) |\ \| u \| _{Y^{s}}<\infty\}$ with the norm
\[
\| u \| _{Y^{s}}:= \| P_{<1} u \| _{\dot{Y}^{0}}+ \| P_{>1 }u \| _{\dot{Y}^{s}}.
\]
\end{defn}
\section{Multilinear estimate for $P_{4}(u,\overline{u})=\overline{u}^{4}$ in $1d$ \label{Multi_est}}
In this section, we prove multilinear estimates for the nonlinearity $\partial_{x}(\overline{u}^{4})$ in $1d$, which plays a crucial role in the proof of Theorem \ref{wellposed_1}.
\begin{lemm}\label{modul_est}
We assume that $(\tau_{0},\xi_{0})$, $(\tau_{1}, \xi_{1})$, $\cdots$, $(\tau_{4}, \xi_{4})\in {\BBB R}\times {\BBB R}^{d}$ satisfy
$\sum_{j=0}^{4}\tau_{j}=0$ and $\sum_{j=0}^{4}\xi_{j}=0$. Then, we have
\begin{equation}\label{modulation_est}
\max_{0\leq j\leq 4}|\tau_{j}-|\xi_{j}|^{4}|
\geq \frac{1}{5}\max_{0\leq j\leq 4}|\xi_{j}|^{4}.
\end{equation}
\end{lemm}
\begin{proof}
By the triangle inequality, we obtain (\ref{modulation_est}).
\end{proof}
\subsection{The homogeneous case}
\begin{prop}\label{HL_est_n}
Let $d=1$ and $0<T\leq \infty$.
For a dyadic number $N_{1}\in 2^{{\BBB Z}}$, we define the set $A_{1}(N_{1})$ as
\[
A_{1}(N_{1}):=\{ (N_{2},N_{3},N_{4})\in (2^{{\BBB Z}})^{3}|N_{1}\gg N_{2}\geq N_{3} \geq N_{4}\}.
\]
If $N_{0}\sim N_{1}$, then we have
\begin{equation}\label{hl}
\begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\\
&\lesssim
\| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}.
\end{split}
\end{equation}
\end{prop}
\begin{proof}
We define $u_{j,N_{j},T}:=\mbox{\boldmath $1$}_{[0,T)}P_{N_{j}}u_{j}$\ $(j=1,\cdots ,4)$ and put
$M:=N_{0}^{4}/5$. We decompose $Id=Q^{S}_{<M}+Q^{S}_{\geq M}$.
We divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form
\begin{equation}\label{piece_form_hl}
\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}Q_{j}^{S}u_{j,N_{j},T}\right) dxdt
\end{equation}
with $Q_{j}^{S}\in \{Q_{\geq M}^{S}, Q_{<M}^{S}\}$\ $(j=0,\cdots ,4)$.
By the Plancherel's theorem, we have
\[
(\ref{piece_form_hl})
= c\int_{\sum_{j=0}^{4}\tau_{j}=0}\int_{\sum_{j=0}^{4}\xi_{j}=0}N_{0}\prod_{j=0}^{4}\mathcal{F}[Q_{j}^{S}u_{j,N_{j},T}](\tau_{j},\xi_{j}),
\]
where $c$ is a constant. Therefore, Lemma~\ref{modul_est} implies that
\[
\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{m}Q_{<M}^{S}u_{j,N_{j},T}\right) dxdt=0.
\]
So, let us now consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$.
First, we consider the case $Q_{0}^{S}=Q_{\geq M}^{S}$.
By the Cauchy-Schwartz inequality, we have
\[
\begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}\prod_{j=2}^{4}\left\|\sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}}.
\end{split}
\]
Furthermore by (\ref{highMproj}) and $M\sim N_{0}^{4}$, we have
\[
\| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}}
\lesssim N_{0}^{-2} \| u_{0,N_{0},T} \| _{V^{2}_{S}}
\]
and by (\ref{U_Stri}) and $V^{2}_{S}\hookrightarrow U^{4}_{S}$,
we have
\[
\begin{split}
\| Q_{1}^{S}u_{1,N_{1},T} \| _{L_{t}^{4}L_{x}^{\infty}}
&\lesssim N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1},T} \| _{U^{4}_{S}}
\lesssim N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1},T} \| _{V^{2}_{S}}.
\end{split}
\]
While by the Sobolev inequality, (\ref{U_Stri}), $V^{2}_{S}\hookrightarrow U^{12}_{S}$ and the Cauchy-Schwartz inequality for the dyadic sum
, we have
\begin{equation}\label{L12L6_est}
\begin{split}
\left\|\sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}}
&\lesssim \left\| |\nabla |^{1/6}\sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\| _{L^{12}_{t}L^{3}_{x}}
\lesssim \left\| \sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\| _{V^{2}_{S}}\\
& \lesssim N_1^{1/2} \left( \sum _{N_j \lesssim N_1} N_j^{-1} \| u_{j,N_j,T} \|_{V^2_S}^2 \right) ^{1/2}
\lesssim N_{1}^{1/2} \| \mbox{\boldmath $1$}_{[0,T)}u_{j} \| _{\dot{Y}^{-1/2}}
\end{split}
\end{equation}
for $2\leq j\leq 4$. Therefore, we obtain
\[
\begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{m}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\lesssim
\| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}
\end{split}
\]
by (\ref{Vproj}) since $ \| \mbox{\boldmath $1$}_{[0,T)}u \| _{V^{2}_{S}}\lesssim \| u \| _{V^{2}_{S}}$ for any $T\in (0,\infty]$.
For the case $Q_{1}^{S}=Q_{\geq M}^{S}$ is proved in same way.
Next, we consider the case $Q_{i}^{S}=Q_{\geq M}^{S}$ for some $2\le i \le 4$.
By the H\"older inequality, we have
\[
\begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{i,N_{i},T}\prod_{\substack{0\le j\le 4\\ j\neq i}}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\lesssim N_{0} \| Q_{0}^{S}u_{0,N_{0},T} \| _{L_{t}^{12}L_{x}^{6}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L_{t}^{4}L_{x}^{\infty}}\\
&\ \ \ \ \times \left\| \sum_{N_{i}\lesssim N_{1}}Q_{\geq M}^{S}u_{i,N_{i},T} \right\|_{L_{tx}^{2}}
\prod_{\substack{2\le j\le 4 \\ j\neq i}}\left\| \sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\| _{L_{t}^{12}L_{x}^{6}}.
\end{split}
\]
By $L^{2}$ orthogonality and (\ref{highMproj}), we have
\begin{equation}\label{hi_mod_234}
\begin{split}
\left\| \sum_{N_{i}\lesssim N_{1}}Q_{\geq M}^{S}u_{i,N_{i},T}\right\| _{L_{tx}^{2}}
&\lesssim \left(\sum_{N_{2}} \| Q_{\geq M}^{S}u_{i,N_{i},T} \| _{L_{tx}^{2}}^{2}\right)^{1/2}\\
&\lesssim N_{1}^{-3/2} \| \mbox{\boldmath $1$}_{[0,T)}u_{i} \| _{\dot{Y}^{-1/2}}
\end{split}
\end{equation}
since $M\sim N_{0}^{4}$. While, by the calculation way as the case $Q_{0}^{S}=Q_{\geq M}^{S}$, we have
\[
\| Q_{0}^{S}u_{0,N_{0},T} \| _{L_{t}^{12}L_{x}^{6}}\lesssim \| Q_{0}^{S}u_{0,N_{0},T} \| _{V^{2}_{S}},
\]
\[
\| Q_{1}^{S}u_{1,N_{1},T} \| _{L_{t}^{4}L_{x}^{\infty}}\lesssim N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1},T} \| _{V^{2}_{S}}
\]
and
\[
\left\| \sum_{N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T} \right\|_{L_{t}^{12}L_{x}^{6}}
\lesssim N_{1}^{1/2} \| \mbox{\boldmath $1$}_{[0,T)}u_{j} \| _{\dot{Y}^{-1/2}}.
\]
Therefore, we obtain
\[
\begin{split}
&\left|\sum_{A_{1}(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{i,N_{i},T}\prod_{\substack{0\le j\le 4\\ j\neq i}}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\lesssim
\| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}
\end{split}
\]
by (\ref{Vproj}) since $ \| \mbox{\boldmath $1$}_{[0,T)}u \| _{V^{2}_{S}}\lesssim \| u \| _{V^{2}_{S}}$ for any $T\in (0,\infty]$.
\end{proof}
\begin{prop}\label{HH_est}
Let $d=1$ and $0<T\leq \infty$.
For a dyadic number $N_{2}\in 2^{{\BBB Z}}$, we define the set $A_{2}(N_{2})$ as
\[
A_{2}(N_{2}):=\{ (N_{3}, N_{4})\in (2^{{\BBB Z}})^{4}|N_{2}\geq N_{3}\geq N_{4}\}.
\]
If $N_{0}\lesssim N_{1}\sim N_{2}$, then we have
\begin{equation}\label{hh}
\begin{split}
&\left|\sum_{A_{2}(N_{2})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\\
&\lesssim
\frac{N_{0}}{N_{1}} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}N_{2}^{-1/2} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}} \| u_{3} \| _{\dot{Y}^{-1/2}} \| u_{4} \| _{\dot{Y}^{-1/2}}.
\end{split}
\end{equation}
\end{prop}
The proof of Proposition~\ref{HH_est} is quite similar as the proof of Proposition~\ref{HL_est_n}.
\subsection{The inhomogeneous case}
\begin{prop}\label{HL_est_n-inh}
Let $d=1$ and $0<T\leq 1$.
For a dyadic number $N_{1}\in 2^{{\BBB Z}}$, we define the set $A_{1}'(N_{1})$ as
\[
A_{1}'(N_{1}):=\{ (N_{2},N_{3},N_{4})\in (2^{{\BBB Z}})^{3}|N_{1}\gg N_{2}\geq N_{3} \ge N_{4}, \, N_4 \le 1 \}.
\]
If $N_{0}\sim N_{1}$, then we have
\begin{equation}\label{hl-inh}
\begin{split}
\left|\sum_{A_{1}'(N_{1})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|
\lesssim T^{\frac{1}{6}} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}} \prod_{j=2}^{4} \| u_{j} \| _{Y^{-1/2}}.
\end{split}
\end{equation}
\end{prop}
\begin{proof}
We further divide $A_1'(N_1)$ into three pieces:
\begin{align*}
A_1'(N_1) & = \bigcup _{j=1}^3 A_{1,j}'(N_1), \\
A_{1,1}'(N_1) &:= \{ (N_{2},N_{3},N_{4}) \in A_1'(N_1) : N_3 \ge 1 \} ,\\
A_{1,2}'(N_2) &:= \{ (N_{2},N_{3},N_{4}) \in A_1'(N_1) : N_2 \ge 1 \ge N_3 \} ,\\
A_{1,3}'(N_2) &:= \{ (N_{2},N_{3},N_{4}) \in A_1'(N_1) : 1 \ge N_2 \} .
\end{align*}
We define $u_{j,N_{j}}:=P_{N_{j}}u_{j}$, $u_{j,T}:=\mbox{\boldmath $1$}_{[0,T)}u_{j}$ and $u_{j,N_{j},T}:=\mbox{\boldmath $1$}_{[0,T)}P_{N_{j}}u_{j}$\ $(j=1,\cdots ,4)$.
We firstly consider the case $A_{1,1}'(N_1)$
In the case $T \le N_0^{-3}$, the H\"older inequality implies
\begin{align*}
& \left|\sum_{A_{1,1}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
& \le N_0 \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}
\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}}
\prod _{j=2}^3 \left\| \sum_{1\le N_j \le N_{1}} u_{j,N_j} \right\| _{L_t^{\infty} L_x^2} \| P_{<1} u_{4} \| _{L_t^{\infty} L_x^{\infty}}
\end{align*}
Furthermore by (\ref{U_Stri}) and $V^{2}_{S}\hookrightarrow U^{4}_{S}$,
we have
\[
\begin{split}
\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}}
&\lesssim N_{0}^{-1/2}\| u_{0,N_0} \| _{U^{4}_{S}}N_{1}^{-1/2} \| Q_{1}^{S}u_{1,N_{1}} \| _{U^{4}_{S}}\\
&\lesssim N_{0}^{-1} \| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}
\end{split}
\]
and by the Sobolev inequality, $V^{2}_{S}\hookrightarrow L^{\infty}_{t}L^{2}_{x}$ and the Cauchy-Schwartz inequality , we have
\[
\| P_{<1} u_{4} \| _{L_t^{\infty} L_x^{\infty}}\lesssim \| P_{<1} u_{4} \| _{L_t^{\infty} L_x^{2}}
\lesssim \left(\sum_{N\le 2}\|P_{N}P_{<1}u_{4}\|_{V^{2}_{S}}^{2}\right)^{1/2}
\le \|P_{<1}u_4\|_{\dot{Y}^{0}}
\]
While by $L^{2}$ orthogonality and $V^{2}_{S}\hookrightarrow L^{\infty}_{t}L^{2}_{x}$, we have
\[
\begin{split}
\left\| \sum_{1\le N_j \le N_{1}} u_{j,N_j} \right\| _{L_t^{\infty} L_x^2}
&\lesssim \left(\sum_{1\le N_j \le N_{1}} \| u_{j,N_{j}} \| _{V^{2}_{S}}^{2}\right)^{1/2}
\lesssim N_{0}^{1/2} \| P_{>1}u_{j} \| _{\dot{Y}^{-1/2}}
\end{split}
\]
Therefore, we obtain
\[
\begin{split}
&\left|\sum_{A_{1,1}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
&\lesssim T^{1/2}N_0 \| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}\prod_{j=2}^{3}\| P_{>1}u_{j} \| _{\dot{Y}^{-1/2}}\|P_{<1}u_4\|_{\dot{Y}^{0}}
\end{split}
\]
and note that $T^{1/2}N_0\le T^{1/6}$.
In the case $T \ge N_0^{-3}$, we divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \eqref{piece_form_hl} in the proof of Proposition \ref{HL_est_n}.
Thanks to Lemma~\ref{modul_est}, let us consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$.
First, we consider the case $Q_{0}^{S}=Q_{\geq M}^{S}$.
By the same way as in the proof of Proposition \ref{HL_est_n} and using
\[
\|Q_{4}^{S}P_{<1}u_{4,T}\|_{L^{12}_{t}L^{6}_{x}}\lesssim \|Q_{4}^{S}P_{<1}u_{4,T}\|_{V^{2}_{S}}\lesssim \|P_{<1}u_{4,T}\|_{\dot{Y}^{0}}
\]
instead of (\ref{L12L6_est}), we obtain
\[
\begin{split}
&\left|\sum_{A_{1,1}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}
\prod_{j=2}^{3} \left\|\sum_{1 \le N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}} \|Q_{4}^{S}P_{<1}u_{4,T}\|_{L^{12}_{t}L^{6}_{x}}\\
& \lesssim N_0^{-\frac{1}{2}} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \prod_{j=2}^{3} \left\| P_{>1} u_j \right\| _{\dot{Y}^{-1/2}} \| P_{<1} u_{4} \| _{\dot{Y}^0}
\end{split}
\]
and note that $N_0^{-1/2}\le T^{1/6}$.
Since the cases $Q_j^S = Q_{\ge M}^S$ ($j=1,2,3$) are similarly handled, we omit the details here.
We focus on the case $Q_4^S = Q_{\ge M}^S$.
By the same way as in the proof of Proposition \ref{HL_est_n} and using
\[
\|Q_{\ge M}^{S}P_{<1}u_{4,T}\|_{L^{2}_{tx}}\lesssim N_{0}^{-2} \|P_{<1}u_{4,T}\|_{V^{2}_{S}}\lesssim N_{0}^{-2}\|P_{<1}u_{4,T}\|_{\dot{Y}^{0}}
\]
instead of (\ref{hi_mod_234}) with $j=4$, we obtain
\[
\begin{split}
&\left|\sum_{A_{1,1}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{4,N_{4},T}\prod_{j=0}^{3}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| u_{0,N_{0},T} \| _{L^{12}_{t}L_x^6} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}
\prod_{j=2}^{3} \left\|\sum_{1 \le N_{j}\lesssim N_{1}}Q_{j}^{S}u_{j,N_{j},T}\right\|_{L^{12}_{t}L^{6}_{x}}
\|Q_{\geq M}^{S} P_{<1}u_{4,T}\|_{L^{2}_{tx}}\\
& \lesssim N_{0}^{-1/2}\| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \prod_{j=2}^{3} \left\| P_{>1} u_j \right\| _{\dot{Y}^{-1/2}} \| P_{<1} u_4 \| _{\dot{Y}^0}
\end{split}
\]
and note that $N_0^{-1/2}\le T^{1/6}$.
We secondly consider the case $A_{1,2}'(N_1)$.
In the case $T \le N_0^{-3}$, the H\"older inequality implies
\[
\begin{split}
& \left|\sum_{A_{1,2}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
& \le N_0 \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}
\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}} \left\| \sum _{1 \le N_2 \lesssim N_1} u_{2,N_2} \right\| _{L_t^{\infty} L_x^2}
\prod_{j=3}^{4}\| P_{<1} u_{j} \| _{L_t^{\infty} L_x^4} .
\end{split}
\]
By the same estimates as in the proof for the case $A_{1,1}'(N_1)$ and
\[
\| P_{<1} u_{j} \| _{L_t^{\infty} L_x^4}\lesssim \| P_{<1} u_{j} \| _{L_t^{\infty} L_x^{2}}
\lesssim \left(\sum_{N\le 2}\|P_{N}P_{<1}u_{j}\|_{V^{2}_{S}}^{2}\right)^{1/2}
\le \|P_{<1}u_j\|_{\dot{Y}^{0}}
\]
for $j=3,4$, we obtain
\[
\begin{split}
&\left|\sum_{A_{1,2}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
&\lesssim T^{1/2}N_0^{1/2} \| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}\| P_{>1}u_{2} \| _{\dot{Y}^{-1/2}}\prod_{j=3}^{4}\|P_{<1}u_j\|_{\dot{Y}^{0}}
\end{split}
\]
and note that $T^{1/2}N_0^{1/2}\le T^{1/3}$.
In the case $T \ge N_0^{-3}$, we divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \eqref{piece_form_hl} in the proof of Proposition \ref{HL_est_n}.
Thanks to Lemma~\ref{modul_est}, let us consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$.
By the same argument as in the proof for the case $A_{1,1}'(N_1)$, we obtain
\[
\begin{split}
&\left|\sum_{A_{1,2}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}} \left\|\sum_{1 \le N_{2}\lesssim N_{1}}Q_{2}^{S}u_{2,N_{2},T}\right\|_{L^{12}_{t}L^{6}_{x}} \prod_{j=3}^{4} \| Q_{j}^{S}P_{<1}u_{j,T}\|_{L^{12}_{t}L^{6}_{x}}\\
& \lesssim N_0^{-1} \| P_{N_0} u_0 \| _{V^2_S} | P_{N_1} u_1 \| _{V^2_S} \left\| P_{>1} u_2 \right\| _{\dot{Y}^{-1/2}} \prod _{j=3}^4 \| P_{<1} v_j \| _{\dot{Y}^0}
\end{split}
\]
if $Q_0 = Q_{\ge M}^S$ and
\[
\begin{split}
&\left|\sum_{A_{1,2}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{4}Q_{\geq M}^{S}u_{4,N_{4},T}\prod_{j=0}^{3}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| u_{0,N_{0},T} \| _{L^{12}_{t}L_x^6} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}} \left\|\sum_{1 \le N_{2}\lesssim N_{1}}Q_{2}^{S}u_{2,N_{2},T}\right\|_{L^{12}_{t}L^{6}_{x}} \\
&\hspace{21ex}\times \|Q_{3}^{S} P_{<1}u_{3,T}\|_{L^{12}_{t}L^{6}_{x}} \| Q_{\geq M}^{S} P_{<1}u_{4,T}\|_{L^{2}_{tx}}\\
& \lesssim N_0^{-1} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \left\| P_{>1} u_2 \right\| _{\dot{Y}^{\frac{1}{2}}} \prod_{j=3}^{4}\| P_{<1} u_j \| _{\dot{Y}^0}
\end{split}
\]
if $Q_4 = Q_{\ge M}^S$
Note that $N_0^{-1}\le T^{1/3}$.
The remaining cases follow from the same argument as above.
We thirdly consider the case $A_{1,3}'(N_1)$.
In the case $T \le N_0^{-3}$, the H\"older inequality implies
\[
\begin{split}
& \left|\sum_{A_{1,3}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right| \\
& \le N_0 \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}\| u_{0,N_0} \| _{L_t^4 L_x^{\infty}} \| u_{1,N_1} \| _{L_t^4 L_x^{\infty}}
\prod_{j=2}^{4} \| P_{<1}u_{2} \| _{L_t^{\infty} L_x^3}.
\end{split}
\]
By the same estimates as in the proof for the case $A_{1,1}'(N_1)$ and
\[
\| P_{<1} u_{j} \| _{L_t^{\infty} L_x^3}\lesssim \| P_{<1} u_{j} \| _{L_t^{\infty} L_x^{2}}
\lesssim \left(\sum_{N\le 2}\|P_{N}P_{<1}u_{j}\|_{V^{2}_{S}}^{2}\right)^{1/2}
\le \|P_{<1}u_j\|_{\dot{Y}^{0}}
\]
for $j=2, 3,4$, we obtain
\[
\begin{split}
&\left|\sum_{A_{1,3}'(N_{1})} \int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|
\lesssim T^{1/2}\| u_{0,N_{0}} \| _{V^{2}_{S}} \| u_{1,N_{1}} \| _{V^{2}_{S}}\prod_{j=2}^{4}\| P_{<1}u_{j} \| _{\dot{Y}^{0}}.
\end{split}
\]
In the case $T \ge N_0^{-3}$, we divide the integrals on the left-hand side of (\ref{hl}) into $10$ pieces of the form \eqref{piece_form_hl} in the proof of Proposition \ref{HL_est_n}.
Thanks to Lemma~\ref{modul_est}, let us consider the case that $Q_{j}^{S}=Q_{\geq M}^{S}$ for some $0\leq j\leq 4$.
By the same argument as in the proof for the case $A_{1,1}'(N_1)$, we obtain
\[
\begin{split}
&\left|\sum_{A_{1,3}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{0}Q_{\geq M}^{S}u_{0,N_{0},T}\prod_{j=1}^{4}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| Q_{\geq M}^{S}u_{0,N_{0},T} \| _{L^{2}_{tx}} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}}
\prod_{j=2}^{4} \|Q_{j}^{S}P_{<1}u_{j,T}\|_{L^{12}_{t}L^{6}_{x}}\\
& \lesssim N_0^{-3/2} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \left\| P_{<1} u_2 \right\| _{Y^{-1/2}} \prod _{j=3}^4 \| P_{<1} v_j \| _{\dot{Y}^0}
\end{split}
\]
if $Q_0 = Q_{\ge M}^S$ and
\[
\begin{split}
&\left|\sum_{A_{1,3}'(N_{1})}\int_{{\BBB R}}\int_{{\BBB R}}\left(N_{4}Q_{\geq M}^{S}u_{4,N_{4},T}\prod_{j=0}^{3}Q_{j}^{S}u_{j,N_{j},T}\right)dxdt\right|\\
&\leq N_{0} \| u_{0,N_{0},T} \| _{L^{12}_{t}L_x^6} \| Q_{1}^{S}u_{1,N_{1},T} \| _{L^{4}_{t}L^{\infty}_{x}} \prod _{j=2}^3 \|Q_{j}^{S} P_{<1}u_{j,T}\|_{L^{12}_{t}L^{6}_{x}}
\|Q_{\geq M}^{S} P_{<1}u_{4,T}\|_{L^2_{tx}}\\
& \lesssim N_0^{-3/2} \| P_{N_0} u_0 \| _{V^2_S} \| P_{N_1} u_1 \| _{V^2_S} \prod _{j=2}^4 \left\| P_{<1} u_j \right\| _{Y^{0}}
\end{split}
\]
if $Q_4 = Q_{\ge M}^S$.
Note that $N_0^{-3/2}\le T^{1/2}$.
The cases $Q_j^S = Q_{\ge M}^S$ ($j=1,2,3$) are the same argument as above.
\end{proof}
Furthermore, we obtain the following estimate.
\begin{prop}\label{HH_est-inh}
Let $d=1$ and $0<T\leq 1$.
For a dyadic number $N_{2}\in 2^{{\BBB Z}}$, we define the set $A_{2}'(N_{2})$ as
\[
A_{2}'(N_{2}):=\{ (N_{3}, N_{4})\in (2^{{\BBB Z}})^{4}|N_{2}\geq N_{3}\ge N_{4} , \, N_4 \le 1 \}.
\]
If $N_{0}\lesssim N_{1}\sim N_{2}$, then we have
\begin{equation}\label{hh-inh}
\begin{split}
&\left|\sum_{A_{2}'(N_{2})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\\
&\lesssim T^{\frac{1}{6}} \frac{N_{0}}{N_{1}} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}N_{2}^{-1/2} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}} \| u_{3} \| _{Y^{-1/2}} \| u_{4} \| _{Y^{-1/2}}.
\end{split}
\end{equation}
\end{prop}
Because the proof is similar as above, we skip the proof.
\section{Proof of well-posedness \label{pf_wellposed_1}}
\subsection{The small data case}
In this section, we prove Theorem~\ref{wellposed_1} and Corollary~\ref{sccat}.
We define the map $\Phi_{T, \varphi}$ as
\[
\Phi_{T, \varphi}(u)(t):=S(t)\varphi -iI_{T}(u,\cdots, u)(t),
\]
where
\[
I_{T}(u_{1},\cdots u_{4})(t):=\int_{0}^{t}\mbox{\boldmath $1$}_{[0,T)}(t')S(t-t')\partial_{x}\left(\prod_{j=1}^{4}\overline{u_{j}(t')}\right)dt'.
\]
To prove the well-posedness of (\ref{D4NLS}) in $\dot{H}^{-1/2}$, we prove that $\Phi_{T, \varphi}$ is a contraction map
on a closed subset of $\dot{Z}^{-1/2}([0,T))$.
Key estimate is the following:
\begin{prop}\label{Duam_est}
Let $d=1$. For any $0<T<\infty$, we have
\begin{equation}\label{Duam_est_1}
\| I_{T}(u_{1},\cdots u_{4}) \| _{\dot{Z}^{-1/2}}\lesssim \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}.
\end{equation}
\end{prop}
\begin{proof}
We decompose
\[
I_{T}(u_{1},\cdots u_{m})=\sum_{N_{1},\cdots ,N_{4}}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4}).
\]
By symmetry, it is enough to consider the summation for $N_{1}\geq N_{2}\geq N_{3} \geq N_{4}$. We put
\[
\begin{split}
S_{1}&:=\{ (N_{1},\cdots ,N_{m})\in (2^{{\BBB Z}})^{m}|N_{1}\gg N_{2}\geq N_{3} \geq N_{4}\}\\
S_{2}&:=\{ (N_{1},\cdots ,N_{m})\in (2^{{\BBB Z}})^{m}|N_{1}\sim N_{2}\geq N_{3} \geq N_{4}\}
\end{split}
\]
and
\[
J_{k}:=\left\| \sum_{S_{k}}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4}) \right\| _{\dot{Z}^{-1/2}}\ (k=1,2).
\]
First, we prove the estimate for $J_{1}$. By Theorem~\ref{duality} and the Plancherel's theorem, we have
\[
\begin{split}
J_{1}&\leq \left\{ \sum_{N_{0}}N_{0}^{-1}\left\| S(-\cdot )P_{N_{0}}\sum_{S_{1}}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4})\right\|_{U^{2}}^{2}\right\}^{1/2}\\
&\lesssim \left\{\sum_{N_{0}}N_{0}^{-1}\sum_{N_{1}\sim N_{0}}
\left( \sup_{ \| u_{0} \| _{V^{2}_{S}}=1}\left|\sum_{A_{1}(N_{1})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|\right)^{2}\right\}^{1/2},
\end{split}
\]
where $A_{1}(N_{1})$ is defined in Proposition~\ref{HL_est_n}.
Therefore by Proposition~\ref{HL_est_n}, we have
\[
\begin{split}
J_{1}&\lesssim \left\{\sum_{N_{0}}N_{0}^{-1}\sum_{N_{1}\sim N_{0}}
\left( \sup_{ \| u_{0} \| _{V^{2}_{S}}=1} \| P_{N_{0}}u_{0} \| _{V^{2}_{S}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}\right)^{2}\right\}^{1/2}\\
&\lesssim
\left(\sum_{N_{1}}N_{1}^{-1} \| P_{N_{1}}u_{1} \| _{V^{2}_{\Delta}}^{2}\right)^{1/2}
\prod_{j=2}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}\\
&=\prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{-1/2}}.
\end{split}
\]
Next, we prove the estimate for $J_{2}$. By Theorem~\ref{duality} and the Plancherel's theorem, we have
\[
\begin{split}
J_{2}&\leq
\sum_{N_{1}}\sum_{N_{2}\sim N_{1}}\left(\sum_{N_{0}}N_{0}^{-1}\left\|S(-\cdot )P_{N_{0}}\sum_{A_{2}(N_{2})}I_{T}(P_{N_{1}}u_{1},\cdots P_{N_{4}}u_{4})\right\|_{U^{2}}^{2}\right)^{1/2}\\
&=\sum_{N_{1}}\sum_{N_{2}\sim N_{1}}\left(\sum_{N_{0}\lesssim N_{1}}N_{0}^{-1}
\sup_{ \| u_{0} \| _{V^{2}_{S}}=1}\left| \sum_{A_{2}(N_{2})}\int_{0}^{T}\int_{{\BBB R}}\left(N_{0}\prod_{j=0}^{4}P_{N_{j}}u_{j}\right)dxdt\right|^{2}\right)^{1/2},
\end{split}
\]
where $A_{2}(N_{2})$ is defined in Proposition~\ref{HH_est}.
Therefore by {\rm Proposition~\ref{HH_est}} and Cauchy-Schwartz inequality for the dyadic sum, we have
\[
\begin{split}
J_{2}&\lesssim
\sum_{N_{1}}\sum_{N_{2}\sim N_{1}}\left(\sum_{N_{0}\lesssim N_{1}}N_{0}^{-1}
\left(\frac{N_{0}}{N_{1}} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}N_{2}^{-1/2} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}} \| u_{3} \| _{\dot{Y}^{-1/2}} \| u_{4} \| _{\dot{Y}^{-1/2}}\right)^{2}\right)^{1/2}\\
&\lesssim \left(\sum_{N_{1}}N_{1}^{-1} \| P_{N_{1}}u_{1} \| _{V^{2}_{S}}^{2}\right)^{1/2}
\left(\sum_{N_{2}}N_{2}^{-1} \| P_{N_{2}}u_{2} \| _{V^{2}_{S}}^{2}\right)^{1/2} \| u_{3} \| _{\dot{Y}^{-1/2}} \| u_{4} \| _{\dot{Y}^{-1/2}}\\
&= \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{s_{c}}}.
\end{split}
\]
\end{proof}
\begin{proof}[\rm{\bf{Proof of Theorem~\ref{wellposed_1}.}}]
For $r>0$, we define
\begin{equation}\label{Zr_norm}
\dot{Z}^{s}_{r}(I)
:=\left\{u\in \dot{Z}^{s}(I)\left|\ \| u \| _{\dot{Z}^{s}(I)}\leq 2r \right.\right\}
\end{equation}
which is a closed subset of $\dot{Z}^{s}(I)$.
Let $T>0$ and $u_{0}\in B_{r}(\dot{H}^{-1/2})$ are given. For $u\in \dot{Z}^{-1/2}_{r}([0,T))$,
we have
\[
\| \Phi_{T,u_{0}}(u) \| _{\dot{Z}^{-1/2}([0,T))}\leq \| u_{0} \| _{\dot{H}^{-1/2}} +C \| u \| _{\dot{Z}^{-1/2}([0,T))}^{4}\leq r(1+ 16 Cr^{3})
\]
and
\[
\begin{split}
\| \Phi_{T,u_{0}}(u)-\Phi_{T,u_{0}}(v) \| _{\dot{Z}^{-1/2}([0,T))}
&\leq C( \| u \| _{\dot{Z}^{-1/2}([0,T))}+ \| v \| _{\dot{Z}^{-1/2}([0,T))})^{3} \| u-v \| _{\dot{Z}^{-1/2}([0,T))}\\
&\leq 64Cr^{3} \| u-v \| _{\dot{Z}^{-1/2}([0,T))}
\end{split}
\]
by Proposition~\ref{Duam_est} and
\[
\| S(\cdot )u_{0} \| _{\dot{Z}^{-1/2}([0,T))}\leq \| \mbox{\boldmath $1$}_{[0,T)}S(\cdot )u_{0} \| _{\dot{Z}^{-1/2}}\leq \| u_{0} \| _{\dot{H}^{-1/2}},
\]
where $C$ is an implicit constant in (\ref{Duam_est_1}). Therefore if we choose $r$ satisfying
\[
r <(64C)^{-1/3},
\]
then $\Phi_{T,u_{0}}$ is a contraction map on $\dot{Z}^{-1/2}_{r}([0,T))$.
This implies the existence of the solution of (\ref{D4NLS}) and the uniqueness in the ball $\dot{Z}^{-1/2}_{r}([0,T))$.
The Lipschitz continuously of the flow map is also proved by similar argument.
\end{proof}
Corollary~\ref{sccat} is obtained by the same way as the proof of Corollaty\ 1.2 in \cite{Hi}.
\subsection{The large data case}
In this subsection, we prove Theorem \ref{large-wp}.
The following is the key estimate.
\begin{prop}\label{Duam_est-inh}
Let $d=1$. We have
\begin{equation}\label{Duam_est_1-inh}
\| I_{1}(u_{1},\cdots u_{4}) \| _{\dot{Z}^{-1/2}} \lesssim \prod_{j=1}^{4} \| u_{j} \| _{Y^{-1/2}}.
\end{equation}
\end{prop}
\begin{proof}
We decompose $u_j = v_j +w_j$ with $v_j = P_{>1}u_j \in \dot{Y}^{-1/2}$ and $w_j = P_{<1} u_j \in \dot{Y}^0$.
>From Propositions \ref{HL_est_n-inh}, \ref{HH_est-inh}, and the same way as in the proof of Proposition~\ref{Duam_est},
it remains to prove that
\[
\| I_{1}(w_{1},w_2,w_3,w_{4}) \| _{\dot{Z}^{-1/2}} \lesssim \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^0}.
\]
By Theorem \ref{duality}, the Cauchy-Schwartz inequality, the H\"older inequality and the Sobolev inequality, we have
\[
\| I_{1}(w_{1},w_2,w_3,w_{4}) \| _{\dot{Z}^{-1/2}}
\lesssim \left\| \prod_{j=1}^{4}\overline{w_{j}} \right\|_{L^1([0,1];L^2)}
\lesssim \prod _{j=1}^4 \| w_j \| _{L_t^{\infty} L_x^2}
\lesssim \prod_{j=1}^{4} \| u_{j} \| _{\dot{Y}^{0}},
\]
which completes the proof.
\end{proof}
\begin{proof}[\rm{\bf{Proof of Theorem \ref{large-wp}}}]
Let $u_0 \in B_{\delta ,R}(H^{-1/2})$ with $u_0=v_0+w_0$, $v_0 \in \dot{H}^{-1/2}$, $w_0 \in L^2$.
A direct calculation yields
\[
\| S(t) u_0 \| _{Z^{-1/2}([0,1))} \le \delta +R.
\]
We start with the case $R=\delta = (4C+4)^{-4}$, where $C$ is the implicit constant in \eqref{Duam_est_1-inh}.
Proposition \ref{Duam_est-inh} implies that for $u \in Z^{-1/2}_r([0,1])$ with $r=1/(4C+4)$
\begin{align*}
\| \Phi_{1,u_{0}}(u) \| _{Z^{-1/2}([0,1))} & \leq \| S(t) u_0 \| _{Z^{-1/2}([0,1))} +C \| u \| _{Z^{-1/2}([0,1))}^{4} \\
& \leq 2r^4 + 16C r^4
= r^4 (16C+2)
\le r
\end{align*}
and
\begin{align*}
\| \Phi_{1,u_{0}}(u)-\Phi_{1,u_{0}}(v) \| _{Z^{-1/2}([0,1))}
&\leq C( \| u \| _{Z^{-1/2}([0,1))}+ \| v \| _{Z^{-1/2}([0,1))})^{3} \| u-v \| _{Z^{-1/2}([0,1))}\\
&\leq 64Cr^{3} \| u-v \| _{Z^{-1/2}([0,1))}
< \| u-v \| _{Z^{-1/2}([0,1))}
\end{align*}
if we choose $C$ large enough (namely, $r$ is small enough).
Accordingly, $\Phi_{1,u_{0}}$ is a contraction map on $\dot{Z}^{-1/2}_{r}([0,1))$.
We note that
all of the above remains valid if we exchange $Z^{-1/2}([0,1))$ by the smaller space $\dot{Z}^{-1/2}([0,1))$ since $\dot{Z}^{-1/2}([0,1)) \hookrightarrow Z^{-1/2}([0,1))$ and the left hand side of \eqref{Duam_est_1-inh} is the homogeneous norm.
We now assume that $u_0 \in B_{\delta ,R}(H^{-1/2})$ for $R \ge \delta = (4C+4)^{-4}$.
We define $u_{0, \lambda}(x) = \lambda ^{-1} u_0 (\lambda ^{-1}x)$.
For $\lambda = \delta ^{-2} R^{2}$, we observe that $u_{0,\lambda} \in B_{\delta ,\delta}(H^{-1/2})$.
We therefore find a solution $u_{\lambda} \in Z^{-1/2}([0,1))$ with $u_{\lambda}(0,x) = u_{0,\lambda}(x)$.
By the scaling, we find a solution $u \in Z^{-1/2}([0, \delta ^8 R^{-8}))$.
Thanks to Propositions \ref{HL_est_n-inh} and \ref{HH_est-inh}, the uniqueness follows from the same argument as in \cite{HHK10}.
\end{proof}
\section{Proof of Theorem~\ref{wellposed_2}}\label{pf_wellposed_2}\text{}
In this section, we prove Theorem~\ref{wellposed_2}.
We only prove for the homogeneous case since the proof for the inhomogeneous case is similar.
We define the map $\Phi_{T, \varphi}^{m}$ as
\[
\Phi_{T, \varphi}^{m}(u)(t):=S(t)\varphi -iI_{T}^{m}(u,\cdots, u)(t),
\]
where
\[
I_{T}^{m}(u_{1},\cdots u_{m})(t):=\int_{0}^{t}\mbox{\boldmath $1$}_{[0,T)}(t')S(t-t')\partial \left(\prod_{j=1}^{m}u_{j}(t')\right)dt'.
\]
and the solution space $\dot{X}^{s}$ as
\[
\dot{X}^{s}:=C({\BBB R};\dot{H}^{s})\cap L^{p_{m}}({\BBB R};\dot{W}^{s+1/(m-1),q_{m}}),
\]
where $p_{m}=2(m-1)$, $q_{m}=2(m-1)d/\{(m-1)d-2\}$ for $d \ge 2$ and $p_3=4$, $q_3=\infty$ for $d=1$.
To prove the well-posedness of (\ref{D4NLS}) in $L^{2}({\BBB R} )$ or $H^{s_{c}}({\BBB R}^{d})$, we prove that $\Phi_{T, \varphi}$ is a contraction map
on a closed subset of $\dot{X}^{s}$.
The key estimate is the following:
\begin{prop}\label{Duam_est_g}
{\rm (i)}\ Let $d=1$ and $m=3$. For any $0<T<\infty$, we have
\begin{equation}\label{Duam_est_1d}
\| I_{T}^{3}(u_{1},u_{2}, u_{3}) \| _{\dot{X^{0}}}\lesssim T^{1/2}\prod_{j=1}^{3} \| u_{j} \| _{\dot{X}^{0}}.
\end{equation}
{\rm (ii)}\ Let $d\ge 2$, $(m-1)d\ge 4$ and $s_c=d/2-3/(m-1)$ For any $0<T\le \infty$, we have
\begin{equation}\label{Duam_est_2}
\| I_{T}^{m}(u_{1},\cdots, u_{m}) \| _{\dot{X^{s_c}}}\lesssim \prod_{j=1}^{m} \| u_{j} \| _{\dot{X}^{s_c}}.
\end{equation}
\end{prop}
\begin{proof}
{\rm (i)}\ By Proposition~\ref{Stri_est} with $(a,b)=\left( 4, \infty \right)$,
we get
\[
\| I_{T}^{3}(u_{1},u_{2}, u_{3}) \| _{L^{\infty}_{t}L^{2}_{x}}
\lesssim \left\|\mbox{\boldmath $1$}_{[0,T)} |\nabla |^{-1/2}\partial \left(\prod_{j=1}^{3}u_{j}\right)\right\|_{L^{4/3}_{t}L^{1}_{x}}
\]
and
\[
\| |\nabla |^{1/2}I_{T}^{3}(u_{1},u_{2}, u_{3}) \| _{L^{4}_{t}L^{\infty}_{x}}
\lesssim \left\| \mbox{\boldmath $1$}_{[0,T)}|\nabla |^{1/2-1/2-1/2}\partial \left(\prod_{j=1}^{3}u_{j}\right)\right\|_{L^{4/3}_{t}L^{1}_{x}}.
\]
Therefore, thanks to the fractional Leibniz rule (see \cite{CW91}), we have
\[
\begin{split}
\| I_{T}^{3}(u_{1},\cdots, u_{3}) \| _{\dot{X^{0}}}
& \lesssim \left\| \mbox{\boldmath $1$}_{[0,T)}|\nabla |^{1/2}\prod_{j=1}^{3}u_{j}\right\|_{L^{4/3}_{t}L^{1}_{x}} \\
& \lesssim \| \mbox{\boldmath $1$}_{[0,T)}\|_{L^{2}_{t}}\| |\nabla |^{1/2}u_{i} \| _{L^{4}_{t}L^{\infty}_{x}}\prod_{\substack{1\le j\le 3\\ j\neq i}} \| u_{j} \| _{L^{\infty}_{t}L^{2}_{x}}\\
&\lesssim T^{1/2}\prod_{j=1}^{3} \| u_{j} \| _{\dot{X}^{0}}
\end{split}
\]
by the H\"older inequality.
\\
{\rm (ii)}\ By Proposition~\ref{Stri_est} with
\begin{equation}\label{admissible_ab}
(a,b)=\left( \frac{2(m-1)}{m-2}, \frac{2(m-1)d}{(m-1)d-2(m-2)}\right),
\end{equation}
we get
\[
\| |\nabla |^{s_c}I_{T}^{m}(u_{1},\cdots u_{m}) \| _{L^{\infty}_{t}L^{2}_{x}}
\lesssim \left\| |\nabla |^{s_c-2/a}\partial \left(\prod_{j=1}^{m}u_{j}\right)\right\|_{L^{a'}_{t}L^{b'}_{x}}
\]
and
\[
\| |\nabla |^{s_c+1/(m-1)}I_{T}^{m}(u_{1},\cdots u_{m}) \| _{L^{p_m}_{t}L^{q_m}_{x}}
\lesssim \left\| |\nabla |^{s_c+1/(m-1)-2/p_m-2/a}\partial \left(\prod_{j=1}^{m}u_{j}\right)\right\|_{L^{a'}_{t}L^{b'}_{x}}.
\]
Therefore, thanks to the fractional Leibniz rule (see \cite{CW91}), we have
\[
\begin{split}
\| I_{T}^{m}(u_{1},\cdots u_{m}) \| _{\dot{X^{s_c}}}
& \lesssim \left\| |\nabla |^{s_c+1/(m-1)}\prod_{j=1}^{m}u_{j}\right\|_{L^{a'}_{t}L^{b'}_{x}} \\
& \lesssim \sum_{i=1}^{m} \| |\nabla |^{s_c+1/(m-1)}u_{i} \| _{L^{p_{m}}_{t}L^{q_{m}}_{x}}\prod_{\substack{1\le j\le m\\ j\neq i}} \| u_{j} \| _{L^{p_{m}}_{t}L^{(m-1)d}_{x}}\\
&\lesssim \sum_{i=1}^{m} \| |\nabla |^{s_c+1/(m-1)}u_{i} \| _{L^{p_{m}}_{t}L^{q_{m}}_{x}}\prod_{\substack{1\le j\le m\\ j\neq i}} \| |\nabla |^{s_{c}+1/(m-1)}u_{j} \| _{L^{p_{m}}_{t}L^{q_{m}}_{x}}\\
&\lesssim \prod_{j=1}^{m} \| u_{j} \| _{\dot{X}^{s_c}}
\end{split}
\]
by the H\"older inequality and the Sobolev inequality, where we used the condition $(m-1)d\ge 4$ which is equivalent to $s_{c}+1/(m-1)\ge 0$.
\end{proof}
The well-posedness can be proved by the same way as the proof of Theorem~\ref{wellposed_1} and the scattering follows from
that the Strichartz estimate because the $\dot{X}^{s_c}$ norm of the nonlinear part is bounded by the norm of the $L^{p_m}L^{q_m}$ space (see for example \cite[Section 9]{P07}).
\section{Proof of Theorem~\ref{notC3}}\label{pf_notC3}
In this section we prove the flow of (\ref{D4NLS}) is not smooth.
Let $u^{(m)}[u_0]$ be the $m$-th iteration of \eqref{D4NLS} with initial data $u_0$:
\[
u^{(m)}[u_0] (t,x) := -i \int _0^t e^{i(t-t') \Delta ^2} \partial P_m( S(t') u_0, S(-t') \overline{u_0}) dt' .
\]
Firstly we consider the case $d=1$, $m=3$, $P_{3}(u,\overline{u})=|u|^{2}u$.
For $N\gg 1$, we put
\[
f_{N} = N^{-s+1/2} \mathcal{F}^{-1}[ \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]}]
\]
Let $u^{(3)}_{N}$ be the third iteration of (\ref{D4NLS}) for the data $f_{N}$.
Namely,
\[
u^{(3)}_{N}(t,x) = u^{(3)}[f_N] (t,x)= -i \int _0^t e^{i(t-t') \partial _x ^4} \partial _x \left( |e^{it' \partial _x^4} f_{N}| ^2 e^{it' \partial _x^4} f_{N} \right)(x) dt'.
\]
Note that $ \| f_{N} \| _{H^s} \sim 1$.
Thorem~\ref{notC3} is implied by the following propositions.
\begin{prop}
If $s<0$, then for any $N\gg 1$, we have
\[
\| u^{(3)}_{N} \| _{L^{\infty}([0,1]; H^s)} \rightarrow \infty
\]
as $N\rightarrow \infty$.
\end{prop}
\begin{proof}
A direct calculation implies
\[
\widehat{u^{(3)}_{N}} (t, \xi ) = e^{it \xi ^4} \xi \int _{\xi _1-\xi _2+\xi _3 =\xi} \int _0^t e^{it'(-\xi ^4 +\xi _1^4-\xi _2^4+\xi _3^4)} d t' \widehat{f_{N}}(\xi _1) \overline{\widehat{f_{N}}}(\xi _2) \widehat{f_{N}}(\xi _3)
\]
and
\begin{equation} \label{modulation}
\begin{split}
&-(\xi _1-\xi _2+\xi _3)^4+\xi _1^4-\xi _2^4+\xi _3^4\\
&= 2 (\xi _1- \xi _2)(\xi _2-\xi _3) ( 2 \xi _1^2 +\xi _2^2+2\xi _3^2 -\xi _1 \xi _2 -\xi _2\xi _3 +3 \xi _3 \xi _1) .
\end{split}
\end{equation}
>From $\xi _j \in [N-N^{-1}, N+N^{-1}]$ for $j=1,2,3$, we get
\[
|-(\xi _1-\xi _2+\xi _3)^4+\xi _1^4-\xi _2^4+\xi _3^4|
\lesssim 1.
\]
We therefore obtain for sufficiently small $t>0$
\begin{align*}
|\widehat{u^{(3)}_{N}} (t,\xi ) |
& \gtrsim t N^{-3s+5/2} \left| \int _{\xi _1-\xi _2+\xi _3 =\xi} \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]} (\xi _1) \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]} (\xi _2) \mbox{\boldmath $1$} _{[N-N^{-1}, N+N^{-1}]} (\xi _3) \right| \\
& \gtrsim t N^{-3s+1/2} \mbox{\boldmath $1$} _{[N-N^{-1},N+N^{-1} ]} (\xi ) .
\end{align*}
Hence,
\[
\| u^{(3)}_{N} \| _{L^{\infty}([0,1]; H^s)} \gtrsim N^{-2s}.
\]
This lower bound goes to infinity as $N$ tends to infinity if $s<0$, which concludes the proof.
\end{proof}
Secondly, we show that absence of a smooth flow map for $d \ge 1$ and $m \ge 2$.
Putting
\[
g_N := N^{-s-d/2} \mathcal{F}^{-1}[ \mbox{\boldmath $1$} _{[-N,N]^d}] ,
\]
we set $u_N^{(m)} := u^{(m)} [g_N]$.
Note that $\| g_N \| _{H^s} \sim 1$.
As above, we show the following.
\begin{prop}
If $s<s_c := d/2-3/(m-1)$ and $\partial =|\nabla |$ or $\frac{\partial}{\partial x_k}$ for some $1\le k\le d$, then for any $N \gg 1$, we have
\[
\| u_N^{(m)} \| _{L^{\infty}([0,1];H^s)} \rightarrow \infty
\]
as $N \rightarrow \infty$.
\end{prop}
\begin{proof}
We only prove for the case $\partial =|\nabla |$ since the proof for the case $\frac{\partial}{\partial x_k}$ is same.
Let
\[
\mathcal{A} := \{ (\pm _1, \dots , \pm _m) : \pm _j \in \{ +, - \} \, (j=1, \dots ,m) \} .
\]
Since $\mathcal{A}$ consists of $2^m$ elements, we write
\[
\mathcal{A} = \bigcup _{\alpha}^{2^m} \{ \pm ^{(\alpha )} \} ,
\]
where $\pm ^{( \alpha )}$ is a $m$-ple of signs $+$ and $-$.
We denote by $\pm _{j}^{(\alpha )}$ the $j$-th component of $\pm ^{(\alpha )}$.
A simple calculation shows that
\[
\widehat{u_N^{(m)}} (t,\xi) = |\xi | \sum _{\alpha =0}^{2^m} e^{it |\xi |^4} \int _{\xi = \sum _{j=1}^m \pm _j^{(\alpha)} \xi _j} \int _0^t e^{it' (-|\xi|^4 + \sum _{j=1}^m \pm _j^{(\alpha )} |\xi _j|^4)} dt' \prod _{j=1}^m \widehat{g_N} (\xi _j) .
\]
From
\[
\left| -|\xi|^4 + \sum _{j=1}^m \pm _j^{(\alpha )} |\xi _j|^4 \right| \lesssim N^4
\]
for $|\xi _j| \le N$ ($j= 1, \dots , m$), we have
\[
|\widehat{u_N^{(m)}} (t, \xi )|
\gtrsim |\xi | N^{-4} N^{-m(s+d/2)} N^{(m-1)d} \mbox{\boldmath $1$} _{[-N.N]^d} (\xi )
\gtrsim N^{-3} N^{-m(s+d/2)} N^{(m-1)d} \mbox{\boldmath $1$} _{[N/2.N]^d} (\xi )
\]
provided that $t \sim N^{-4}$.
Accordingly, we obtain
\[
\| u_N^{(m)} (N^{-4}) \| _{H^s} \gtrsim N^{-3} N^{-m(s+d/2)} N^{(m-1)d} N^{s+d/2}
\sim N^{-(m-1)s+(m-1)d/2-3} ,
\]
which conclude that $\limsup _{t \rightarrow 0} \| u^{(m)}_N(t) \| _{H^s} = \infty$ if $s<s_c$.
\end{proof}
\section*{Acknowledgment}
The work of the second author was partially supported by JSPS KAKENHI Grant number 26887017.
\end{document} |
\begin{document}
\title{Two Algorithms for Additive and \\ Fair Division of Mixed Manna}
\titlerunning{Two Algorithms for Additive and Fair Division of Mixed Manna}
\author{Martin Aleksandrov \and Toby Walsh}
\authorrunning{M. Aleksandrov, T. Walsh}
\institute{Technical University Berlin, Germany \\
\email{\{martin.aleksandrov,toby.walsh\}@tu-berlin.de}}
\maketitle
\begin{abstract}
We consider a fair division model in which agents have positive, zero and negative utilities for items. For this model, we analyse one existing fairness property - EFX - and three new and related properties - EFX$_0$, EFX$^3$ and EF1$^3$ - in combination with Pareto-optimality. With general utilities, we give a modified version of an existing algorithm for computing an EF1$^3$ allocation. With $-\alpha/0/\alpha$ utilities, this algorithm returns an EFX$^3$ and PO allocation. With absolute identical utilities, we give a new algorithm for an EFX and PO allocation. With $-\alpha/0/\beta$ utilities, this algorithm also returns such an allocation. We report some new impossibility results as well.
\keywords{Additive Fair division \and Envy-freeness \and Pareto-optimality}
\end{abstract}
\section{Introduction}\label{sec:intro}
Fair division of indivisible items lies on the intersection of fields such as social choice, computer science and algorithmic economics \cite{chevaleyre2006}. Though a large body of work is devoted to the case when the items are goods (e.g.\ \cite{brams1996,steinhaus1948,moulin2003,young1995}), there is a rapidly growing interest in the case of mixed manna (e.g.\ \cite{aziz2019popropone,caragiannis2012,sandomirskiy2019minimal}). In a mixed manna, each item can be classified as \emph{mixed} (i.e.\ some agents strictly like it and other agents strictly dislike it), \emph{good} (i.e.\ all agents weakly like it and some agents strictly like it), \emph{bad} (i.e.\ all agents weakly dislike it and some agents strictly dislike it) or \emph{dummy} (i.e.\ all agents are indifferent to it).
An active line of fair division research currently focuses on approximations of envy-freeness (i.e.\ no agent envies another one) \cite{foley1967}. For example, Aziz et al.\ \cite{aziz2019gc} proposed two such approximations for mixed manna: EF1 and EFX. EF1 requires that an agent's envy for another agent's bundle is eliminated by removing some item from these agents' bundles. EFX strengthens EF1 to any non-zero valued item in these bundles, increasing the agent's utility or decreasing the other agent's utility. However, they study only EF1 and identify improving our understanding of EFX as an important open problem for mixed manna:
\begin{quote}
{\em ``Our work paves the way for detailed examination of allocation of goods/ chores, and opens up an interesting line of research, with many problems left open to explore. In particular, there are further fairness concepts that could be studied from both existence and complexity issues, most notably envy-freeness up to the least valued item (EFX) \cite{caragiannis2016}.''}
\end{quote}
We make in this paper a step forward in this direction. In particular, we study not only EFX but also {\em new} properties, all stronger than EF1. For example, one such property is {\em envy-freeness by parts up to some item}: EF1$^3$. This ensures EF1 independently for the set of all items, the set of goods and the set of bads (i.e.\ the different parts). Another such property is {\em envy-freeness by parts up to any item}: EFX$^3$. This requires EFX for each of the different parts of the set of items. Yet a third such property is EFX$_0$. This one extends the existing envy-freeness up to any (possibly zero valued) good from \cite{plaut2018} to any (possibly zero valued) bad by relaxing the non-zero marginal requirements in the definition of EFX. We will shortly observe the following relations between these properties.
\begin{center}
EFX$_0$\hspace{0.15cm}$\Rightarrow$\hspace{0.15cm}EFX\hspace{0.5cm}
EFX$^3$\hspace{0.15cm}$\Rightarrow$\hspace{0.15cm}EFX\hspace{0.5cm} EF1$^3$\hspace{0.15cm}$\Rightarrow$\hspace{0.15cm}EF1 \hspace{0.5cm} EFX$^3$\hspace{0.15cm}$\Rightarrow$\hspace{0.15cm}EF1$^3$
\end{center}
We analyse these properties in isolation and also in combination with an efficiency criterion such as Pareto-optimality (PO). PO ensures that we cannot make an agent happier without making another one unhappier. More precisely, we ask in our work whether combinations of these properties can be guaranteed, and also how to do this when it is possible. Our analysis covers three common domains for \emph{additive} (i.e.\ an agent's utility for a set of items is a sum of their utilities for the items in the set) utility functions: \emph{general} (i.e.\ each utility is real-valued), \emph{absolute identical} (i.e.\ for each item, the agents' utilities have identical magnitudes but may have different signs) as well as \emph{ternary} (i.e.\ each utility is $-\alpha$, $0$ or $\beta$ for some $\alpha,\beta\in\mathbb{R}_{>0}$).
Each of these domains can be observed in practice. For instance, if a machine can perform a certain task faster than some pre-specified amount of time, then its utility for the task is positive and, otherwise, it is negative. Thus, multiple machines can have mixed utilities for tasks. Further, consider a market where items have prices and agents sell or buy items. In this context, the agent's utilities for an item have identical magnitudes but different signs. Finally, a special case of ternary utilities is when each agent have utility $-1$, $0$, or $1$ for every item. This is practical because we need simply to elicit whether agents like, dislike or are indifferent to each item. A real-world setting with such utilities is the food bank problem studied in \cite{aleksandrov2015ijcai}.
We give some related work, formal preliminaries and motivation in Sections~\ref{sec:work},~\ref{sec:pre} and~\ref{sec:mot}, respectively. In Section~\ref{sec:gen}, we give a polynomial-time algorithm (i.e.\ Algorithm~\ref{alg:mdrr}) for computing an EF1$^3$ allocation with general utilities. We also prove that an EFX$^3$ allocation, or an EFX$_0$ allocation might not exist even with ternary identical utilities. In Section~\ref{sec:ident}, we give a polynomial-time algorithm (i.e.\ Algorithm~\ref{alg:minimax}) for computing an EFX and PO allocation with absolute identical utilities, and show that Algorithm~\ref{alg:mdrr} returns an EF1$^3$ and PO allocation. In Section~\ref{sec:ter}, we show that Algorithm~\ref{alg:mdrr} returns an EF1$^3$ and PO allocation with ternary utilities, whereas Algorithm~\ref{alg:minimax} returns an EFX and PO allocation. Finally, we give a summary in Section~\ref{sec:con}.
\section{Related work}\label{sec:work}
For indivisible goods, EF1 was defined by Budish \cite{budish2011}. Caragiannis et al. \cite{caragiannis2016} proposed EFX. It remains an open question of whether EFX allocations exist in problems with general utilities. Recently, Amanatidis et al. \cite{amanatidis2020maximum} proved that EFX allocations exist in \emph{2-value} (i.e.\ each utility takes one of two values) problems. In contrast, we show that EFX and PO allocations exist in problems with ternary (i.e.\ $-\alpha/0/\beta$) utilities, which are special cases of 3-value problems. Barman, Murthy and Vaish \cite{barman2018fe} presented a pseudo-polynomial time algorithm for EF1 and PO allocations. Barman et al. \cite{barman2018} gave an algorithm for EFX and PO allocations in problems with identical utilities. Plaut and Roughgarden \cite{plaut2018} proved that the {\em leximin} solution from \cite{dubins1961} is also EFX and PO in this domain. Although this solution maximizes the minimum agent's utility (i.e.\ the egalitarian welfare), it is intractable to find in general \cite{dobzinski2013}. In our work, we give a polynomial-time algorithm for EFX and PO allocations in problems with absolute identical utilities, and show that this welfare and EFX$^3$ are incompatible.
For mixed manna, Aziz et al.\ \cite{aziz2019gc} proposed EF1 and EFX. They gave the double round-robin algorithm that returns EF1 allocations. Unfortunately, these are not guaranteed to satisfy PO. They also gave a polynomial-time algorithm that returns allocations which are EF1 and PO in the case of \num{2} agents. Aziz and Rey \cite{aziz2019group} gave a ``ternary flow'' algorithm for leximin, EFX and PO allocations with $-\alpha/0/\alpha$ utilities. With $-\alpha/0/\beta$ utilities, we discuss that these might sadly violate EFX$^3$ even when $\alpha=1,\beta=1$, or EFX when $\alpha=2,\beta=1$. By comparison, we give a modified version of the double round-robin algorithm that returns EF1$^3$ allocations in problems with general utilities, EF1$^3$ and PO allocations in problems with absolute identical utilities and EFX$^3$ and PO allocations in problems with $-\alpha/0/\alpha$ utilities. Other works of divisible manna are \cite{bogomolnaia2019div,bogomolnaia2016gab,bogomolnaia2017comp}, and approximations of envy-freeness for indivisible goods are \cite{amanatidis2018,caragiannis2016,lipton2004}. In contrast, we study some new approximations and the case of indivisible manna.
\section{Formal preliminaries}\label{sec:pre}
We consider a set $[n]=\lbrace 1,\ldots, n\rbrace$ of $n\in\mathbb{N}_{\geq 2}$ agents and a set $[m]=\lbrace 1,\ldots,m\rbrace$ of $m\in\mathbb{N}_{\geq 1}$ indivisible items. We assume that each agent $a\in [n]$ have some \emph{utility} function $u_a:2^{[m]}\rightarrow\mathbb{R}$. Thus, they assign some utility $u_a(M)$ to each bundle $M\subseteq [m]$. We write $u_a(o)$ for $u_a(\lbrace o\rbrace)$. We say that $u_a$ is \emph{additive} if, for each $M\subseteq [m]$, $u_a(M)=\sum_{o\in M} u_a(o)$. We also write $u(M)$ if, for each other agent $b\in [n]$, $u_a(M)=u_b(M)$.
With additive utility functions, the set of items $[m]$ can be partitioned into \emph{mixed items}, \emph{goods}, \emph{bads} and \emph{dummies}. Respectively, we write $[m]^{\pm}=\lbrace o\in [m]|\exists a\in [n]: u_a(o)>0,\exists b\in [n]:u_b(o)<0\rbrace$, $[m]^+=\lbrace o\in [m]|\forall a\in [n]: u_a(o)\geq 0,\exists b\in [n]:u_b(o)>0\rbrace$, $[m]^-=\lbrace o\in [m]|\forall a\in [n]:u_a(o)\leq 0,\exists b\in [n]:u_b(o)<0\rbrace$ and $[m]^0=\lbrace o\in [m]|\forall a\in [n]:u_a(o)=0\rbrace$ for the sets of these items. We refer to an item $o$ from $[m]^+$ as a \emph{pure good} if $\forall a\in [n]: u_a(o)>0$. Also, we refer to an item $o$ from $[m]^-$ as a \emph{pure bad} if $\forall a\in [n]: u_a(o)<0$.
We say that agents have \emph{general} additive utilities if, for each $a\in [n]$ and each $o\in [m]$, $u_a(o)$ could be any number from $\mathbb{R}$. Further, we say that they have \emph{absolute identical} additive utilities if, for each $o\in [m]$, $|u_a(o)|=|u_b(o)|$ where $a,b\in [n]$, or \emph{identical} additive utilities if, for each $o\in [m]$, $u_a(o)=u_b(o)$ where $a,b\in [n]$. Finally, we say that agents have \emph{ternary} additive utilities if, for each $a\in [n]$ and each $o\in [m]$, $u_a(o)\in \lbrace -\alpha,0,\beta\rbrace$ for some $\alpha,\beta\in\mathbb{R}_{>0}$.
An \emph{(complete) allocation} $A=(A_1,\ldots,A_n)$ is such that (1) $A_a$ is the set of items allocated to agent $a\in [n]$, (2) $\bigcup_{a\in [n]} A_a=[m]$ and (3) $A_a\cap A_b=\emptyset$ for each $a,b\in[n]$ with $a\neq b$. We consider several properties for allocations.
\paragraph{\bf Envy-freeness up to one item}\label{par:ef}
Envy-freeness up to one item requires that an agent's envy for another's bundle is eliminated by removing an item from the bundles of these agents. Two notions for our model that are based on this idea are EF1 and EFX \cite{aziz2019gc}.
\begin{mydefinition} $(${\em EF1}$)$
An allocation $A$ is \emph{envy-free up to some item} if, for each $a,b\in [n]$, $u_a(A_a)\geq u_a(A_b)$ or $\exists o\in A_a\cup A_b$ such that $u_a(A_a\setminus\lbrace o\rbrace)\geq u_a(A_b\setminus\lbrace o\rbrace)$.
\end{mydefinition}
\begin{mydefinition}$(${\em EFX}$)$
An allocation $A$ is \emph{envy-free up to any non-zero valued item} if, for each $a, b\in [n]$, (1) $\forall o\in A_a$ such that $u_a(A_a)$ $<$ $u_a(A_a\setminus\lbrace o\rbrace)$: $u_a(A_a\setminus\lbrace o\rbrace)\geq u_a(A_b)$ and (2) $\forall o\in A_b$ such that $u_a(A_b)>u_a(A_b\setminus\lbrace o\rbrace)$: $u_a(A_a)\geq u_a(A_b\setminus\lbrace o\rbrace)$.
\end{mydefinition}
Plaut and Roughgarden \cite{plaut2018} considered a variant of EFX for goods where, for any given pair of agents, the removed item may be valued with zero utility by the envy agent. Kyropoulou et al.\ \cite{kyropoulou2019} referred to this one as EFX$_0$. We adapt this property to our model.
\begin{mydefinition}$(${\em EFX$_0$}$)$
An allocation $A$ is \emph{envy-free up to any item} if, for each $a, b\in [n]$, (1) $\forall o\in A_a$ such that $u_a(A_a)$ $\leq$ $u_a(A_a\setminus\lbrace o\rbrace)$: $u_a(A_a\setminus\lbrace o\rbrace)\geq u_a(A_b)$ and (2) $\forall o\in A_b$ such that $u_a(A_b)\geq u_a(A_b\setminus\lbrace o\rbrace)$: $u_a(A_a)\geq u_a(A_b\setminus\lbrace o\rbrace)$.
\end{mydefinition}
An allocation that is EFX$_0$ further satisfies EFX. Also, EFX is stronger than EF1. It is well-known that the opposite relations might not hold.
\paragraph{\bf Envy-freeness by parts}\label{par:exfthree}
Let $A=(A_1,\ldots,A_n)$ be a given allocation. We let $A^+_a=\lbrace o\in A_a|u_a(o)>0\rbrace$ and $A^-_a=\lbrace o\in A_a|u_a(o)<0\rbrace$ for each $a\in [n]$. Envy-freeness by parts up to one item ensures that EF1 (or EFX) is satisfied in each of the allocations $A$, $A^+=(A^+_1,\ldots,A^+_n)$ and $A^-=(A^-_1,\ldots,A^-_n)$.
\begin{mydefinition}$(${\em EF1$^3$}$)$
An allocation $A$ is \emph{envy-free by parts up to some item} $($\emph{EF1-EF1-EF1 or EF1$^3$}$)$ if the following conditions hold: (1) $A$ is EF1, (2) $A^+$ is EF1 and (3) $A^-$ is EF1.
\end{mydefinition}
\begin{mydefinition}$(${\em EFX$^3$}$)$
An allocation $A$ is \emph{envy-free by parts up to any item} $($\emph{EFX-EFX-EFX or EFX$^3$}$)$ if the following conditions hold: (1) $A$ is EFX, (2) $A^+$ is EFX and (3) $A^-$ is EFX.
\end{mydefinition}
With just goods (bads), EF1$^3$ (EFX$^3$) is EF1 (EFX). With mixed manna, an allocation that is EF1$^3$ also satisfies EF1, one that is EFX$^3$ satisfies EFX, and one that is EFX$^3$ satisfies EF1$^3$. The reverse implications might not be true.
\paragraph{\bf Pareto-optimality}\label{par:po}
We also study each of these fairness properties in combination with an efficiency criterion such as Pareto-optimality (PO), proposed a long time ago by Vilfredo Pareto \cite{pareto1896}.
\begin{mydefinition}$(${\em PO}$)$
An allocation $A$ is \emph{Pareto-optimal} if there is no allocation $B$ that \emph{Pareto-improves} $A$, i.e.\ $\forall a\in [n]$: $u_a(B_a)\geq u_a(A_a)$ and $\exists b\in [n]$: $u_b(B_b)> u_b(A_b)$.
\end{mydefinition}
\section{Further motivation}\label{sec:mot}
\opencutright
\renewcommand\windowpagestuff{
\hspace{0.075\columnwidth}
\includegraphics[height=4.25cm,width=0.85\columnwidth]{anime-new.png}
}
\begin{cutout}{9}{0.5\columnwidth}{0pt}{10}
We next further motivate the new properties EF1$^3$ and EFX$^3$ by means of a simple example. Consider a birthday party where Bob invites his new friends Alice and Mary. Bob has \num{3} pieces of his favourite \emph{strawberry cake} (value is $1$) and \num{2} pieces of the less favorable to him \emph{chocolate cake} (value is $0$). Bob also hopes that some of his guests would be willing to help him \emph{washing up the dishes} and \emph{throwing away the garbage} after the party. Alice and Mary arrive and it turns out that both like only chocolate cake (value is $1$), and dislike any of the household chores (value is $-1$) as does Bob. How shall we allocate the \num{5} goods (i.e.\ all pieces of cake) and the \num{2} chores? \newline\indent For EF1 (EFX) and PO, we shall give the strawberry cake to Bob and one piece of the chocolate cake to each of Alice and Mary. As a result, Bob gets utility $3$ whereas Alice and Mary get each utility $1$. If we want to maximize the egalitarian welfare, we should assign both chores to Bob. Doing so preserves EF1 (EFX) and PO for all items. However, it violates EF1$^3$ (EFX$^3$). Indeed, Bob might be unhappy simply because they have to do both chores instead of sharing them with Alice and Mary. This means that an EF1 (EFX) allocation might not satisfy EF1$^3$ (EFX$^3$). In contrast, achieving EF1$^3$ (EFX$^3$) avoids assigning both chores to Bob. For example, asking Bob to wash up the dishes and Alice to throw away the garbage, or vice versa is EF1$^3$ (EFX$^3$). Other such options share the chores between Bob and Mary, and Alice and Mary. However, none of these maximizes the egalitarian welfare. This means that EF1$^1$ (EFX$^3$) is incompatible with this objective.
\end{cutout}
\section{General additive utilities}\label{sec:gen}
We begin with general utilities. An EF1 allocation in this domain can be computed in $O(\max\lbrace m^2,mn\rbrace)$ time. For this purpose, we can use the existing \emph{double round-robin} algorithm from \cite{aziz2019gc}. However, this algorithm may fail to guarantee PO because an agent might pick a bad for which some other agent have zero utility.
\begin{myexample}\label{exp:gen}
Consider \num{2} agents and \num{2} items, say $a$ and $b$. Define the utilities as follows: $u_1(a)=-1$, $u_1(b)=-1$ and $u_2(a)=-1$, $u_2(b)=0$. In this problem, the double round-robin algorithm is simply a round-robin rule with some strict priority ordering of the agents. Wlog, let agent 1 pick before agent 2. Wlog, let agent 1 pick $b$. Now, agent 2 can only pick $a$. The returned allocation gives utility $-1$ to agent 1 and utility $-1$ to agent 2. By swapping these items, agent 1 receive utility $-1$ and agent 2 receive utility $0$. Clearly, this is a Pareto-improvement.
\mbox{$\square$}
\end{myexample}
In response, we modify slightly the double round-robin algorithm by adding an extra preliminary phase where each dummy item/non-pure bad is allocated to an agent who has zero utility for it: Algorithm~\ref{alg:mdrr}. As we show, this modified version gives us an EF1$^3$ allocation that is PO not only with $-1/0/1$ utilities but also with any ternary utilities, as well as with absolute identical utilities.
\begin{mytheorem}\label{thm:efonethreegen}
With general utilities, Algorithm~\ref{alg:mdrr} returns an EF1$^3$ allocation in $O(\max\lbrace m^2,mn\rbrace)$ time.
\end{mytheorem}
\begin{algorithm}
\caption{An EF1$^3$ allocation (see the Appendix for a complete version).}\label{alg:mdrr}
\begin{algorithmic}[1]
\Procedure{Modified Double Round-Robin}{$[n],[m],(u_1,\ldots,u_n)$}
\State $M^0\gets\lbrace o\in [m]|\forall b\in [n]:u_b(t)\leq 0,\exists c\in [n]:u_c(t)=0\rbrace$
\State $\forall a\in [n]: A_a\gets\emptyset$
\For{$t\in M^0$} \Comment{allocate all dummies/non-pure bads}
\State pick $a\in\lbrace b\in [n]|u_b(t)=0\rbrace$
\State $A_a\gets A_a\cup\lbrace t\rbrace$
\EndFor
\State $B\gets ${\sc Double Round-Robin}($[n],[m]\setminus M^0,(u_1,\ldots,u_n)$)
\State \Return $(A_1\cup B_1,\ldots,A_n\cup B_n)$
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{myproof}
The double round-robin algorithm returns an EF1 allocation, and so $B$ is EF1. Consider $B^+$ and $B^-$. Let there be $qn-p$ pure bads for some $q,p\in\mathbb{N}$ with $p<n$. The algorithm creates $p$ ``fake'' dummy items for which each agent has utility $0$, and adds them to the set of pure bads. Hence, the number of items in this set becomes $qn$. Thus, the agents come in a round-robin fashion according to some ordering of the agents, say $(1,\ldots,n-1,n)$, and pick their most preferred item in this set (i.e.\ all pure bads and ``fake'' dummies) until all of them are allocated. This is EF1 for the pure bads. Hence, $B^-$ is EF1.
Further, the agents come in a round-robin fashion by following the reversed ordering, i.e.\ $(n,n-1,\ldots,1)$, and pick their most preferred good until all mixed items and goods are allocated. If an agent has no available item which gives them strictly positive utility, they pretend to pick a new ``fake'' dummy item for which they have utility $0$. This is EF1 for the mixed items and goods. Hence, $B^+$ is also EF1 which implies that $B$ is EF1$^3$. Finally, extending $B$ to all items, by allocating each dummy item/non-pure bad to someone who holds zero utility, preserves EF1$^3$. This means that the returned allocation is EF1$^3$.
\mbox{$\square$}
\end{myproof}
We move to stronger properties. For example, EFX$^3$ allocations in our setting might not exist. The rationale behind this is that an agent may get their least valued bad in an attempt of achieving EFX for the bads. As a result, removing this bad from their bundle might not be sufficient to eliminate their envy of some other agent who receive positive utility for a good and a bad.
\begin{myproposition}\label{pro:impefxthree}
There are problems with \num{2} agents and ternary identical utilities for \num{1} pure goods and \num{2} pure bads, in which \emph{no} allocation is EFX$^3$.
\end{myproposition}
\begin{myproof}
Suppose that there are \num{2} agents and \num{3} items. We define the utilities as follows: $u(a)=-1$, $u(b)=-1$, $u(c)=2$. We note that one EFX allocation gives items $a$, $b$ and $c$ to agent 1 and no items to agent 2. However, there is no allocation that satisfies EFX$^3$.
We observe that there are two EFX allocations of the pure bads, i.e.\ $A=(\lbrace a\rbrace,\lbrace b\rbrace)$ and $B=(\lbrace b\rbrace,\lbrace a\rbrace)$. Further, we observe that there are two EFX allocations of the pure good, i.e.\ $C=(\lbrace c\rbrace,\emptyset)$ and $D=(\emptyset,\lbrace c\rbrace)$. By the symmetry of the utilities, we consider only $A$, $C$ and $D$.
If we unite (``agent-wise'') $A$ and $C$, then $u(A_2\cup C_2\setminus\lbrace b\rbrace)=0<1=u(A_1\cup C_1)$. Therefore, the union of $A$ and $C$ is not EFX and, therefore, EFX$^3$. If we unite $A$ and $D$, then $u(A_1\cup D_1\setminus\lbrace a\rbrace)=0<1=u(A_2\cup D_2)$. Again, the union of $A$ and $D$ violates EFX$^3$. Similarly, for $B$, $C$ and $D$.
\mbox{$\square$}
\end{myproof}
By comparison, EFX allocations exist in 2-value problems with goods \cite{amanatidis2020maximum}. It follows immediately that EFX$^3$ allocations exist in such problems. From this perspective, we feel that our impossibility result compares favorably to this possibility result because such allocations may not exist in 2-value problems with goods and bads.
Even more, this result also implies that no EFX allocation satisfies EF1$^3$ and no EF1$^3$ allocation satisfies EFX in some problems with identical and ternary utilities. As a consequence, any allocation that could be returned by Algorithm~\ref{alg:mdrr} might violate EFX. These implications are also true for the stronger version EFX$_0$ in problems where such allocations exist.
However, EFX$_0$ allocations might also not always exist. The reason for this might be the presence of dummies. One may argue that such items could be removed. However, some web-applications on Spliddit for example ask agents to announce items (e.g.\ inherited items) and utilities but the system has no access to the actual items and, therefore, cannot remove the dummies \cite{caragiannis2016}.
\begin{myproposition}\label{pro:impefxzero}
There are problems with \num{2} agents and ternary identical utilities for \num{1} pure good and \num{1} dummy, in which \emph{no} allocation is EFX$_0$.
\end{myproposition}
\begin{myproof}
Suppose that there are \num{2} agents and \num{2} items, say $a$ and $b$. We define the utilities as follows: $u(a)=1$ and $u(b)=0$. We argue that there is no EFX$_0$ allocation in this problem. To see this, we make two observations. Firstly, with the given set of items, it is impossible that both agents obtain the same utility, as the individual utilities are integers and their sum is odd. Secondly, EFX$_0$ for the agents in this problem where a dummy item is present requires that both agents have the same utility. This follows by the definition of EFX$_0$. \mbox{$\square$}
\end{myproof}
This result is perhaps interesting because EFX$_0$ allocations exist in problems with \num{2} agents and general utilities for goods \cite{plaut2018}, or \emph{any} number of agents and $0/1$ utilities \cite{amanatidis2020maximum}. By Propositions~\ref{pro:impefxthree} and~\ref{pro:impefxzero}, it follows that neither EFX$^3$ nor EFX$_0$ can be achieved in combination with PO, or even a weaker efficiency notion such as \emph{completeness} (i.e.\ all items are allocated), in general.
\section{Absolute identical additive utilities}\label{sec:ident}
We continue with absolute identical utilities. Requiring such utilities is not as strong as requiring just identical utilities. To see this, consider agents 1, 2 and items $a$, $b$. Define the utilities as $u_1(a)=3$, $u_1(b)=2$ and $u_2(a)=3$, $u_2(b)=-2$. The absolute values of these utilities are identical but their cardinal values are not, e.g.\ $|u_1(b)|=|u_2(b)|=2$ but $u_1(b)=2, u_2(b)=-2$.
By Proposition~\ref{pro:impefxthree}, EF1$^3$ and EFX are incompatible in this domain. Nevertheless, we can combine each of them with PO. For example, Algorithm~\ref{alg:mdrr} returns an allocation that satisfies PO besides EF1$^3$. The key reason for this result is that in such problems there are no items that are bads (goods) for some agents and dummy for other agents.
\begin{mytheorem}\label{thm:efonethreepoid}
With absolute identical utilities, Algorithm~\ref{alg:mdrr} returns an EF1$^3$ and PO allocation.
\end{mytheorem}
\begin{myproof}
EF1$^3$ follows by Theorem~\ref{thm:efonethreegen}. We note that each allocation that gives at least one mixed item to an agent who values it strictly negatively can be Pareto-improved by moving this item to an agent who values it strictly positively. Therefore, such an allocation is not Pareto-optimal. We also note that each other allocation, including the returned one, maximizes the sum of agents' utilities because it achieves the maximum utility for each individual item. Such an allocation is always Pareto-optimal.
\mbox{$\square$}
\end{myproof}
At the same time, we can compute an EFX and PO allocation in polynomial time. For this task, we propose a \emph{new} algorithm: Algorithm~\ref{alg:minimax}. We let $M(o)=\max_{a\in [n]} u_a(o)$ denote the maximum utility that an agent derives from item $o$. Further, let us arrange the items in non-increasing absolute maximum utility order by using the following tie-breaking rule.
{\em Ordering} $\sigma_m$: Wlog, $|M(1)|\geq \ldots\geq |M(m)|$. Initialize $\sigma_m$ to $(1,\ldots,m)$. While there are two items $s$ and $t$ from $[m]$ such that $|M(s)|=|M(t)|$, $M(s)>0$, $M(t)<0$ and $t$ is right before $s$ in $\sigma_m$, do move $s$ right before $t$ in $\sigma_m$. Thus, within items with the same maximum absolute utility, $\sigma_m$ gives higher priority to the mixed items/goods than to the pure bads.
Algorithm~\ref{alg:minimax} allocates the items one-by-one in such an ordering $\sigma_m$. If the current item $t$ is mixed or pure good, then Algorithm~\ref{alg:minimax} gives it to an agent who has currently the minimum utility among the agents who like the item. If item $t$ is pure bad, then Algorithm~\ref{alg:minimax} gives it to an agent who has currently the maximum utility. Otherwise, it gives item $t$ to an agent with zero utility.
\begin{mytheorem}\label{thm:efxpoid}
With absolute identical utilities, Algorithm~\ref{alg:minimax} returns an EFX and PO allocation in $O(\max\lbrace m\log m,mn\rbrace)$ time.
\end{mytheorem}
\begin{algorithm}
\caption{An EFX and PO allocation.}\label{alg:minimax}
\begin{algorithmic}[1]
\Procedure{Minimax}{$[n],[m],(u_1,\ldots,u_n)$}
\State compute the ordering $\sigma_m$ \Comment{see right above}
\State $\forall a\in [n]: A_a\gets\emptyset$
\For{$t\in \sigma_m$}
\If {$t$ is mixed item or good}
\State $N\gets \lbrace b\in [n]|u_b(t)>0\rbrace$
\State $\mbox{MinUtil}(A)\gets \lbrace b\in N|u_b(A_b)=\min_{c\in N} u_c(A_c)\rbrace$
\State pick $a\in\mbox{MinUtil}(A)$
\ElsIf {$t$ is pure bad}
\State $\mbox{MaxUtil}(A)\gets \lbrace b\in [n]|u_b(A_b)=\max_{c\in [n]} u_c(A_c)\rbrace$
\State pick $a\in\mbox{MaxUtil}(A)$
\Else \Comment{$t$ is dummy item or non-pure bad}
\State pick $a\in\lbrace b\in [n]| u_b(t)=0\rbrace$
\EndIf
\State $A_a\gets A_a\cup\lbrace t\rbrace$
\EndFor
\State \Return $(A_1,\ldots,A_n)$
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{myproof}
For $t\in [m]$, we let $A^t$ denote the partially constructed allocation of items $1$ to $t$. Pareto-optimality of $A^t$ follows by the same arguments as in Theorem~\ref{thm:efonethreepoid}, but now applied to the sub-problem of the first $t$ items. As a result, $A^m$ (i.e.\ the returned allocation) satisfies also PO.
We next prove that $A^t$ is EFX by induction on $t\in [m]$. This will imply the result for EFX of $A^m$ (i.e.\ the returned allocation). In the base case, let $t$ be $1$. The allocation of item $1$ is trivially EFX. In the hypothesis, let $t>1$ and assume that the allocation $A^{t-1}$ is EFX. In the step case, let us consider round $t$.
Wlog, let the algorithm give item $t$ to agent 1. That is, $A^t_1=A^{t-1}_1\cup\lbrace t\rbrace$ and $A^t_a=A^{t-1}_a$ for each $a\in [n]\setminus\lbrace 1\rbrace$. It follows immediately by the hypothesis that each pair of different agents from $[n]\setminus\lbrace 1\rbrace$ is EFX of each other in $A^t$. We note that $t$ gives positive, negative or zero utility to agent 1. For this reason, we consider three remaining cases for agent $a\in [n]\setminus\lbrace 1\rbrace$ and agent 1.
\emph{Case 1}: Let $u_1(t)>0$. In this case, $t$ is mixed item or pure good (good) and $u_1(A^t_1)>u_1(A^{t-1}_1)$ holds. Hence, agent 1 remain EFX of agent $a$ by the hypothesis. For this reason, we next show that agent $a$ is EFX of agent 1. We consider two sub-cases depending on whether agent $a$ belong to $N=\lbrace b\in [n]|u_b(t)>0\rbrace$ or not. We note that $1\in N$ holds because of $u_1(t)>0$.
\emph{Sub-case 1 for $a\rightarrow 1$}: Let $a\not\in N$. Hence, $u_a(t)\leq 0$. As a result, $u_a(A^{t-1}_1)\geq u_a(A^t_1)$ holds. Thus, as $A^{t-1}$ is EFX, we derive that $u_a(A^t_a)=u_a(A^{t-1}_a)\geq u_a(A^{t-1}_1\setminus\lbrace o\rbrace)\geq u_a(A^t_1\setminus\lbrace o\rbrace)$ holds for each $o\in A^{t-1}_1$ with $u_a(o)>0$. We also derive $u_a(A^t_a\setminus\lbrace o\rbrace)=u_a(A^{t-1}_a\setminus\lbrace o\rbrace)\geq u_a(A^{t-1}_1)\geq u_a(A^t_1)$ for each $o\in A^t_a$ with $u_a(o)<0$. Hence, agent $a$ is EFX of agent 1.
\emph{Sub-case 2 for $a\rightarrow 1$}: Let $a\in N$. Hence, $u_a(t)>0$. Moreover, $u_a(A^{t-1}_a)\geq u_1(A^{t-1}_1)$ by the selection rule of the algorithm. For each item $o\in A^{t-1}_1$, we have $u_1(o)=u_a(o)$ if $o$ is pure good, pure bad or dummy item, and $u_1(o)\geq u_a(o)$ if $o$ is mixed item. Therefore, $u_1(A^{t-1}_1)\geq u_a(A^{t-1}_1)$ or agent $a$ is envy-free of agent 1 in $A^{t-1}$.
We derive $u_a(A^t_a)=u_a(A^{t-1}_a)\geq u_a(A^{t-1}_1)=u_a(A^t_1\setminus\lbrace t\rbrace)$ because $A^t_a=A^{t-1}_a$ and $A^t_1=A^{t-1}_1\cup\lbrace t\rbrace$. Furthermore, $u_a(A^t_1\setminus\lbrace t\rbrace)\geq u_a(A^t_1\setminus\lbrace o\rbrace)$ for each $o\in A^{t-1}_1$ with $u_a(o)>0$ because $u_a(o)\geq u_a(t)$ holds due to the ordering of items used by the algorithm.
We now show EFX of the bads. We have $u_a(A^t_a\setminus\lbrace o\rbrace)=u_a(A^{t-1}_a\setminus\lbrace o\rbrace)\geq u_a(A^{t-1}_1)+u_a(t)=u_a(A^t_1)$ for each $o\in A^{t-1}_a$ with $u_a(o)<0$ because $|u_a(o)|\geq u_a(t)$ holds due to the ordering of items used by the algorithm. Hence, agent $a$ is EFX of agent 1.
\emph{Case 2}: Let $u_1(t)<0$. In this case, $t$ is pure bad and $u_a(A^t_1)<u_a(A^{t-1}_1)$ holds. That is, agent 1's utility decreases. By the hypothesis, it follows that agent $a$ remain EFX of agent 1 in $A^t$. For this reason, we only show that agent 1 remain EFX of agent $a$.
\emph{$1\rightarrow a$}: We have $u_1(A^{t-1}_1)\geq u_a(A^{t-1}_a)$ by the selection rule of the algorithm. For each item $o\in A^{t-1}_a$, we have $u_a(o)=u_1(o)$ if $o$ is pure good, pure bad or dummy item, and $u_a(o)\geq u_1(o)$ if $o$ is mixed item. We conclude $u_a(A^{t-1}_a)\geq u_1(A^{t-1}_a)$ and, therefore, $u_1(A^{t-1}_1)\geq u_1(A^{t-1}_a)$. Hence, agent $1$ is envy-free of agent $a$ in $A^{t-1}$.
Additionally, it follows that $u_1(A^t_1\setminus\lbrace t\rbrace)\geq u_1(A^t_a)$ holds because $A^t_1\setminus\lbrace t\rbrace=A^{t-1}_1$ and $A^t_a=A^{t-1}_a$. Due to the order of the items, we have $|u_1(b)|\geq |u_1(t)|$ for each $b\in A^t_1$ with $u_1(b)<0$. Hence, $u_1(A^t_1\setminus\lbrace b\rbrace)\geq u_1(A^t_1\setminus\lbrace t\rbrace)\geq u_1(A^t_a)$ for each $b\in A^t_1$ with $u_1(b)<0$.
At the same time, $u_1(A^{t-1}_1)\geq u_1(A^{t-1}_a\setminus\lbrace g\rbrace)$ for each $g\in A^{t-1}_a$ with $u_1(g)>0$. Again, due to the order of the items, $u_1(g)\geq |u_1(t)|$. Therefore, $u_1(A^t_a\setminus\lbrace g\rbrace)\leq u_1(A^t_a)-|u_1(t)|=u_1(A^{t-1}_a)-|u_1(t)|\leq u_1(A^{t-1}_1)-|u_1(t)|=u_1(A^t_1)$. Consequently, $u_1(A^t_1)\geq u_1(A^t_a\setminus\lbrace g\rbrace)$ for each $g\in A^t_a$ with $u_1(g)>0$.
\emph{Case 3}: Let $u_1(t)=0$. In this case, $t$ is dummy item or non-pure bad. Hence, $u_a(A^{t-1}_1)\geq u_a(A^t_1)$ and $u_1(A^t_1)=u_1(A^{t-1}_1)$ hold. That is, agent 1's utility does not change. By the hypothesis, this means that they remain EFX of each agent $a$ and also each agent $a$ remains EFX of them in $A^t$.
Finally, computing maximum values takes $O(mn)$ time and sorting items takes $O(m\log m)$ time. The loop of the algorithm takes $O(mn)$ time.
\mbox{$\square$}
\end{myproof}
For problems with identical utilities, Aziz and Ray \cite{aziz2019group} proposed the ``egal-sequential'' algorithm for computing EFX and PO allocations. By Theorem~\ref{thm:efxpoid}, Algorithm~\ref{alg:minimax} also does that. However, we feel that such problems are very restrictive as they do not have mixed items unlike many practical problems.
\begin{mycorollary}\label{cor:identone}
With identical utilities, Algorithm~\ref{alg:minimax} returns an EFX and PO allocation.
\end{mycorollary}
Algorithm~\ref{alg:minimax} allocates each mixed item/good to an agent who likes it, and each dummy item/non-pure bad to an agent who is indifferent to it. As a consequence, the result in Theorem~\ref{thm:efxpoid} extends to problems where, for each mixed item/good, the likes are identical and, for each pure bad, the dislikes are identical.
\begin{mycorollary}\label{cor:identtwo}
With identical likes (i.e.\ strictly positive utilities) for each mixed item, identical likes for each good and identical dislikes (i.e.\ strictly negative utilities) for each pure bad, Algorithm~\ref{alg:minimax} returns an EFX and PO allocation.
\end{mycorollary}
\section{Ternary additive utilities}\label{sec:ter}
We end with ternary utilities. That is, each agent's utility for each item is from $\lbrace -\alpha, 0,\beta\rbrace$ where $\alpha,\beta\in\mathbb{R}_{>0}$. We consider two cases for such utilities.
\subsection{Case for any $\alpha,\beta$}
By Proposition~\ref{pro:impefxthree}, it follows that an EFX$^3$ allocation might not exist in some problems even when $\alpha=1$ and $\beta=2$. However, we can compute an EF1$^3$ (notably, also EF1-EFX-EFX) and PO allocation with Algorithm~\ref{alg:mdrr}.
\begin{mytheorem}\label{thm:efonethreepoter}
With ternary utilities from $\lbrace -\alpha,0,\beta \rbrace$ where $\alpha,\beta \in\mathbb{R}_{>0}$, Algorithm~\ref{alg:mdrr} returns an EF1$^3$ and PO allocation.
\end{mytheorem}
\begin{myproof}
The returned allocation is EF1$^3$ by Theorem~\ref{thm:efonethreegen}. This one achieves the maximum utility for each individual item. Hence, the sum of agents' utilities in it is maximized and equal to $\beta$ multiplied by the number of goods plus $\beta$ multiplied by the number of mixed items minus $\alpha$ multiplied by the number of pure bads. In fact, this holds for each allocation that gives each mixed item/good to an agent who has utility $\beta$, and each dummy/non-pure bad to an agent who has utility $0$. Each other allocation is not PO and does not Pareto-dominate the returned allocation. Hence, the returned one is PO. \mbox{$\square$}
\end{myproof}
One the other hand, we already mentioned after Proposition~\ref{pro:impefxthree} that each allocation returned by Algorithm~\ref{alg:mdrr} in such problems may violate EFX. However, Algorithm~\ref{alg:minimax} returns an EFX and PO allocation in this case.
\begin{mytheorem}\label{thm:efxpoter}
With ternary utilities from $\lbrace -\alpha,0,\beta \rbrace$ where $\alpha,\beta \in\mathbb{R}_{>0}$, Algorithm~\ref{alg:minimax} returns an EFX and PO allocation.
\end{mytheorem}
\begin{myproof} This is where the ordering used by the algorithm plays a crucial role. If $\beta\geq\alpha$, we note that all mixed items and goods are allocated before all pure bads and all of these are allocated before the remaining items (i.e.\ dummy items and non-pure bads). If $\beta<\alpha$, we note that all pure bads are allocated before all mixed items and goods and all of these are allocated before the remaining items. Further, we observe that agents have identical likes for each mixed item or each good (i.e.\ $\beta$), and identical dislikes for each pure bad (i.e.\ $-\alpha$). Therefore, the result follows by Corollary~\ref{cor:identtwo}.
\mbox{$\square$}
\end{myproof}
By comparison, the ``ternary flow'' algorithm of Aziz and Rey \cite{aziz2019group} may fail to return an EFX allocation even with $-2/1$ utilities. To see this, simply negate the utilities in the problem from Proposition~\ref{pro:impefxthree}. This algorithm allocates firstly one good to each agent and secondly the bad to one of the agents. This outcome violates EFX.
\subsection{Case for $\alpha=\beta$}
In this case, we can compute an EFX$^3$ and PO allocation with Algorithm~\ref{alg:mdrr}. Although we consider this a minor result, we find it important because it is the only one in our analysis when EFX$^3$ and PO allocations exist.
\begin{mytheorem}\label{thm:efxthreepoter}
With ternary utilities from $\lbrace -\alpha,0,\alpha \rbrace$ where $\alpha \in\mathbb{R}_{>0}$, Algorithm~\ref{alg:mdrr} returns an EFX$^3$ and PO allocation.
\end{mytheorem}
\begin{myproof}
The returned allocation is EF1$^1$ and PO by Theorem~\ref{thm:efonethreepoter}. With general (and, therefore, ternary) utilities, an allocation that is EFX$^3$ also satisfies EF1$^3$ because EFX is a stronger property than EF1, but the opposite implication might not be true. Well, with utilities from $\lbrace -\alpha,0,\alpha \rbrace$, the opposite implication also holds. Indeed, if an allocation is EF1 for a given pair of agents, then removing some good from the envied agent's bundle or removing some bad from the envy agent's bundle eliminates the envy of the envy agent. But, the envy agent likes each such good with $\alpha$ and each such bad with $-\alpha$. Hence, such an allocation is EFX. This implies that an EF1$^3$ allocation is also EFX$^3$ in this domain. \mbox{$\square$}
\end{myproof}
By Theorem~\ref{thm:efxpoter}, Algorithm~\ref{alg:minimax} returns an EFX and PO allocation in this case. However, this one might falsify EFX$^3$ even when $\alpha=1$ (see motivating example). The same holds for the ``ternary flow'' algorithm of Aziz and Rey \cite{aziz2019group} because it maximizes the egalitarian welfare when $\alpha=1$ (see motivating example).
\section{Conclusions}\label{sec:con}
We considered additive and fair division of mixed manna. For this model, we analysed axiomatic properties of allocations such as EFX$_0$, EFX$^3$, EFX, EF1$^3$, EF1 and PO in three utility domains. With general utilities, we showed that an EF1$^3$ allocation exists and gave Algorithm~\ref{alg:mdrr} for computing such an allocation (Theorem~\ref{thm:efonethreegen}). With absolute identical or $-\alpha/0/\beta$ utilities, this algorithm returns an EF1$^3$ and PO allocation (Theorems~\ref{thm:efonethreepoid} and~\ref{thm:efonethreepoter}). With $-\alpha/0/\alpha$ utilities, it returns an EFX$^3$ and PO allocation (Theorem~\ref{thm:efxthreepoter}).
With absolute identical utilities, we gave Algorithm~\ref{alg:minimax} for computing an EFX and PO allocation (Theorem~\ref{thm:efxpoid}). With ternary utilities, this algorithm also returns such an allocation (Theorem~\ref{thm:efxpoter}). We further proved two impossibilities results (Propositions~\ref{pro:impefxthree} and~\ref{pro:impefxzero}). In particular, with ternary identical utilities, an EFX$_0$ allocation, or an EFX$^3$ allocation might not exist. We leave for future work two very interesting open questions with general utilities. Table~\ref{tab:results} contains our results.
\begin{table*}[b]
\centering
\caption{Key: $\checkmark$-possible, $\times$-not possible, $\P$-polynomial time, $\alpha,\beta\in\mathbb{R}_{>0}:\alpha\neq\beta$.}
\label{tab:results}
\begin{tabular}{|c|C|C|C|C|C|C|C|C|C|C|C|C|}
\hline
\multirow{2}{*}{property} & \multicolumn{3}{C|}{general} & \multicolumn{3}{C|}{ident. \& abs.} & \multicolumn{3}{C|}{$-\alpha/0/\beta$} & \multicolumn{3}{C|}{$-\alpha/0/\alpha$} \\
& \multicolumn{3}{C|}{utilities} & \multicolumn{3}{C|}{utilities} & \multicolumn{3}{C|}{utilities} & \multicolumn{3}{C|}{utilities} \\ \hline
EF1$^3$ & \multicolumn{3}{c}{$\checkmark$, $\P$ (Thm~\ref{thm:efonethreegen})} & \multicolumn{3}{c}{} & \multicolumn{3}{c}{} & \multicolumn{3}{c|}{} \\ \cline{1-10}
EF1$^3$ \& PO & \multicolumn{3}{c|}{open} & \multicolumn{3}{c|}{$\checkmark$, $\P$ (Thm~\ref{thm:efonethreepoid})} & \multicolumn{3}{c|}{$\checkmark$, $\P$ (Thm~\ref{thm:efonethreepoter})} & \multicolumn{3}{c|}{} \\ \cline{1-10}
EFX \& PO & \multicolumn{3}{c|}{open} & \multicolumn{3}{c|}{$\checkmark$, $\P$ (Thm~\ref{thm:efxpoid})} & \multicolumn{3}{c|}{$\checkmark$, $\P$ (Thm~\ref{thm:efxpoter})} & \multicolumn{3}{c|}{} \\ \cline{1-10}
EFX$^3$ & \multicolumn{3}{c}{} & \multicolumn{3}{c}{$\times$ (Prop~\ref{pro:impefxthree})} & \multicolumn{3}{c|}{} & \multicolumn{3}{c|}{} \\ \cline{1-2}
EFX$^3$ \& PO & \multicolumn{3}{c}{} & \multicolumn{3}{c}{} & \multicolumn{3}{c|}{} & \multicolumn{3}{c|}{$\checkmark$, $\P$ (Thm~\ref{thm:efxthreepoter})} \\ \hline
EFX$_0$ & \multicolumn{3}{c}{} & \multicolumn{3}{c}{$\times$ (Prop~\ref{pro:impefxzero})} & \multicolumn{3}{c}{} & \multicolumn{3}{c|}{} \\ \hline
\end{tabular}
\end{table*}
\appendix
\section{A complete version of Algorithm~\ref{alg:mdrr}}\label{sec:alg}
For reasons of space, we presented a short version of Algorithm~\ref{alg:mdrr} in the main text. We present in here a complete version of it.
\setcounter{algorithm}{0}
\begin{algorithm}
\caption{An EF1$^3$ allocation.}
\begin{algorithmic}[1]
\Procedure{Modified Double Round-Robin}{$[n],[m],(u_1,\ldots,u_n)$}
\State $M^0\gets\lbrace o\in [m]|\forall b\in [n]:u_b(t)\leq 0,\exists c\in [n]:u_c(t)=0\rbrace$
\State Allocate each item from $M^0$ to an agent who has utility $0$ for it. We let $A$ denote this allocation.
\State $M^-\gets\lbrace o\in [m]\setminus M^0|\forall a\in [n]: u_a(o)<0\rbrace$
\State Suppose $|M^-|=qn-p$ for some $q,p\in\mathbb{N}$ with $p<n$. Create $p$ ``fake'' dummy items for which each agent has utility $0$, and add them to $M^-$. Hence, $|M^-|=qn$.
\State Let the agents come in a round-robin sequence $(1,\ldots,n-1,n)$ and pick their most preferred item in $M^-$ until all items in it are allocated.
\State $M^+\gets\lbrace o\in [m]\setminus M^0|\exists a\in [n]: u_a(o)>0\rbrace$
\State Let the agents come in a round-robin sequence $(n,n-1,\ldots,1)$ and pick their most preferred item in $M^+$ until all items in it are allocated. If an agent has no available item which gives them strictly positive utility, they pretend to pick a ``fake'' dummy item for which they have utility $0$.
\State Remove the ``fake'' dummy items from the current allocation and return the resulting allocation. We let $B$ denote this allocation.
\State \Return $(A_1\cup B_1,\ldots,A_n\cup B_n)$
\EndProcedure
\end{algorithmic}
\end{algorithm}
\end{document} |
\begin{document}
\begin{abstract}
We define the $i$-restriction and $i$-induction functors on the
category $\mathcal{O}$ of the cyclotomic rational double affine Hecke
algebras. This yields a crystal on the set of isomorphism classes of
simple modules, which is isomorphic to the crystal of a Fock space.
\\
\\
\noindent\textsc{R\'esum\'e.} On d\'efinit les foncteurs de
$i$-restriction et $i$-induction sur la cat\'egorie $\mathcal{O}$ des
alg\`ebres de Hecke doublement affine rationnelles cyclotomiques.
Ceci donne lieu \`a un cristal sur l'ensemble des classes
d'isomorphismes de modules simples, qui est isomorphe au cristal
d'un espace de Fock.
\end{abstract}
\maketitle
\section*{Introduction}
In \cite{A1}, S. Ariki defined the $i$-restriction and $i$-induction
functors for cyclotomic Hecke algebras. He showed that the
Grothendieck group of the category of finitely generated projective
modules of these algebras admits a module structure over the affine
Lie algebra of type $A^{(1)}$, with the action of Chevalley
generators given by the $i$-restriction and $i$-induction functors.
The restriction and induction functors for rational DAHA's(=double
affine Hecke algebras) were recently defined by R. Bezrukavnikov and
P. Etingof. With these functors, we give an analogue of Ariki's
construction for the category $\mathcal{O}$ of cyclotomic rational DAHA's:
we show that as a module over the type $A^{(1)}$ affine Lie algebra,
the Grothendieck group of this category is isomorphic to a Fock
space. We also construct a crystal on the set of isomorphism classes
of simple modules in the category $\mathcal{O}$. It is isomorphic to the
crystal of the Fock space. Recall that this Fock space also enters
in some conjectural description of the decomposition numbers for the
category $\mathcal{O}$ considered here. See \cite{U}, \cite{Y}, \cite{R}
for related works.
\section*{Notation}
For $A$ an algebra, we will write $A\modu$ for the category of
finitely generated $A$-modules. For $f: A\rightarrow B$ an algebra
homomorphism from $A$ to another algebra $B$ such that $B$ is
finitely generated over $A$, we will write
$$f_\ast: B\modu\rightarrow A\modu$$ for the restriction functor and we write $$f^\ast: A\modu\rightarrow B\modu,\quad M\mapsto B\otimes_AM.$$
A $\mathbb{C}$-linear category $\mathcal{A}$ is called artinian if the Hom sets
are finite dimensional $\mathbb{C}$-vector spaces and every object has a
finite length. Given an object $M$ in $\mathcal{A}$, we denote by
$\soc(M)$ (resp. $\mathfrak{h}ead(M)$) the socle (resp. the head) of $M$,
which is the largest semi-simple subobject (quotient) of $M$.
Let $\mathcal{C}$ be an abelian category. The Grothendieck group of
$\mathcal{C}$ is the quotient of the free abelian group generated by
objects in $\mathcal{C}$ modulo the relations $M=M'+M''$ for all
objects $M,M',M''$ in $\mathcal{C}$ such that there is an exact
sequence $0\rightarrow M'\rightarrow M\rightarrow M''\rightarrow 0$. Let $K(\mathcal{C})$ denote the
complexified Grothendieck group, a $\mathbb{C}$-vector space. For each
object $M$ in $\mathcal{C}$, let $[M]$ be its class in
$K(\mathcal{C})$. Any exact functor $F: \mathcal{C}\rightarrow\mathcal{C}'$
between two abelian categories induces a vector space homomorphism
$K(\mathcal{C})\rightarrow K(\mathcal{C}')$, which we will denote by $F$
again. Given an algebra $A$ we will abbreviate $K(A)=K(A\modu)$.
Denote by $\Fct(\mathcal{C},\mathcal{C}')$ the category of functors
from a category $\mathcal{C}$ to a category $\mathcal{C}'$. For
$F\in\Fct(\mathcal{C},\mathcal{C}')$ write $\End(F)$ for the ring of
endomorphisms of the functor $F$. We denote by $1_F: F\rightarrow F$ the
identity element in $\End(F)$. Let
$G\in\Fct(\mathcal{C'},\mathcal{C''})$ be a functor from
$\mathcal{C}'$ to another category $\mathcal{C}''$. For any
$X\in\End(F)$ and any $X'\in\End(G)$ we write $X'X:G\circ F\rightarrow
G\circ F$ for the morphism of functors given by
$X'X(M)=X'(F(M))\circ G(X(M))$ for any $M\in\mathcal{C}$.
Let $e\mathfrak{g}eqs 2$ be an integer and $z$ be a formal parameter. Denote
by $\mathfrak{sl}_e$ the Lie algebra of traceless $e\times e$ complex
matrices. Write $E_{ij}$ for the elementary matrix with $1$ in the
position $(i,j)$ and $0$ elsewhere. The type $A^{(1)}$ affine Lie
algebra $\widehat{\mathfrak{sl}}_e$ is
$\mathfrak{sl}_e\otimes\mathbb{C}[z,z^{-1}]\oplus\mathbb{C} c$ with $c$ a central
element. The Lie bracket is the usual one. We will denote the
Chevalley generators of $\widehat{\mathfrak{sl}}_e$ as follows:
\begin{eqnarray*}
&e_i=E_{i,i+1}\otimes 1,\quad &f_i=F_{i+1,i}\otimes 1,\quad
h_i=(E_{ii}-E_{i+1,i+1})\otimes 1, \quad 1\leqslant i\leqslant e-1,\\
&e_0=E_{e1}\otimes z,\quad &f_0=E_{1e}\otimes z^{-1},\quad
h_0=(E_{ee}-E_{11})\otimes 1+c.
\end{eqnarray*}
For $i\in\mathbb{Z}/e\mathbb{Z}$ we will denote the simple root (resp. coroot)
corresponding to $e_i$ by $\alpha_i$ (resp. $\alpha\spcheck_i$). The
fundamental weights are $\{\Lambda_i: i\in\mathbb{Z}/e\mathbb{Z}\}$ with
$\alpha\spcheck_i(\Lambda_j)=\delta_{ij}$ for any $i,j\in\mathbb{Z}/e\mathbb{Z}$. We
will write $P$ for the weight lattice, the free abelian group
generated by the fundamental weights.
\section{Reminders on Hecke algebras, rational DAHA's and restriction
functors}\label{s:reminder}
\iffalse We give some reminders on the restriction and induction
functors of Hecke algebras, those of rational DAHA's and the
Knizhnik-Zamolodchikov functor. The reminders on their definitions
mainly serve the proof of Theorem \ref{iso}, while those on their
properties will be more frequently used. This section contains no
new result except in Proposition \ref{standard} and Proposition
\ref{indstandard}.\fi
\subsection{Hecke algebras.}\label{ss:Hecke}
Let $\mathfrak{h}$ be a finite dimensional vector space over $\mathbb{C}$. Recall that
a pseudo-reflection is a non trivial element $s$ of $GL(\mathfrak{h})$ which
acts trivially on a hyperplane, called the reflecting hyperplane of
$s$. Let $W\subset GL(\mathfrak{h})$ be a finite subgroup generated by
pseudo-reflections. Let $\mathcal{S}$ be the set of
pseudo-reflections in $W$ and $\mathcal{A}$ be the set of reflecting
hyperplanes. We set $\mathfrak{h}_{reg}=\mathfrak{h}-\bigcup_{H\in\mathcal{A}}H$, it is
stable under the action of $W$. Fix $x_0\in \mathfrak{h}_{reg}$ and identify
it with its image in $\mathfrak{h}_{reg}/W$. By definition the braid group
attached to $(W,\mathfrak{h})$, denoted by $B(W,\mathfrak{h})$, is the fundamental group
$\pi_1(\mathfrak{h}_{reg}/W, x_0).$
For any $H\in\mathcal{A}$, let $W_H$ be the pointwise stabilizer of
$H$. This is a cyclic group. Write $e_H$ for the order of $W_H$. Let
$s_H$ be the unique element in $W_H$ whose determinant is
$\exp(\frac{2\pi\sqrt{-1}}{e_H})$.
Let $q$ be a map from $\mathcal{S}$ to $\mathbb{C}^\ast$ that is constant on
the $W$-conjugacy classes. Following \cite[Definition 4.21]{BMR} the
Hecke algebra $\mathscr{H}_q(W,\mathfrak{h})$ attached to $(W,\mathfrak{h})$ with parameter
$q$ is the quotient of the group algebra $\mathbb{C} B(W,\mathfrak{h})$ by the
relations:
\begin{equation}\label{heckerelation}
(T_{s_H}-1)\prod_{t\in W_H\cap\mathcal{S}}(T_{s_H}-q(t))=0,\quad
H\in\mathcal{A}.
\end{equation}
Here $T_{s_H}$ is a generator of the monodromy around $H$ in
$\mathfrak{h}_{reg}/W$ such that the lift of $T_{s_H}$ in $\pi_1(W,\mathfrak{h}_{reg})$
via the map $\mathfrak{h}_{reg}\rightarrow\mathfrak{h}_{reg}/W$ is represented by a path from
$x_0$ to $s_H(x_0)$. See \cite[Section 2B]{BMR} for a precise
definition. When the subspace $\mathfrak{h}^W$ of fixed points of $W$ in $\mathfrak{h}$
is trivial, we abbreviate
$$B_W=B(W,\mathfrak{h}), \quad \mathscr{H}_q(W)=\mathscr{H}_q(W,\mathfrak{h}).$$
\subsection{Parabolic restriction and induction for Hecke algebras.}\label{ss:resHecke}
In this section we will assume that $\mathfrak{h}^W=1$. A parabolic subgroup
$W'$ of $W$ is by definition the stabilizer of a point $b\in\mathfrak{h}$. By
a theorem of Steinberg, the group $W'$ is also generated by
pseudo-reflections. Let $q'$ be the restriction of $q$ to
$\mathcal{S'}=W'\cap \mathcal{S}$. There is an explicit inclusion
$\imath_q: \mathscr{H}_{q'}(W')\mathfrak{h}ookrightarrow \mathscr{H}_q(W)$ given by
\cite[Section 2D]{BMR}. The restriction functor
\begin{equation*}
\Resh:\mathscr{H}_q(W)\modu\rightarrow\mathscr{H}_{q'}(W')\modu
\end{equation*} is the functor
$(\imath_q)_\ast$. The induction functor
$$\Indh=\mathscr{H}_q(W)\otimes_{\mathscr{H}_{q'}(W')}-$$
is left adjoint to $\Resh$. The coinduction functor
$$\coIndh=\Hom_{\mathscr{H}_{q'}(W')}(\mathscr{H}_q(W),-)$$ is right adjoint to
$\Resh$. The three functors above are all exact.
Let us recall the definition of $\imath_q$. It is induced from an
inclusion $\imath: B_{W'}\mathfrak{h}ookrightarrow B_{W}$, which is in turn
the composition of three morphisms $\ell$, $\mathds{k}ppa$, $\jmath$
defined as follows. First, let $\mathcal{A}'\subset\mathcal{A}$ be
the set of reflecting hyperplanes of $W'$. Write
$$\overline{\mathfrak{h}}=\mathfrak{h}/\mathfrak{h}^{W'},\quad\overline{\mathcal{A}}=\{\overline{H}=H/\mathfrak{h}^{W'}:\,H\in
\mathcal{A}'\}, \quad
\overline{\mathfrak{h}}_{reg}=\overline{\mathfrak{h}}-\bigcup_{\overline{H}\in\overline{\mathcal{A}}}\overline{H},
\quad\mathfrak{h}'_{reg}=\mathfrak{h}-\bigcup_{H\in\mathcal{A}'}H.$$ The canonical
epimorphism $p: \mathfrak{h}\rightarrow\overline{\mathfrak{h}}$ induces a trivial
$W'$-equivariant fibration $p: \mathfrak{h}'_{reg}\rightarrow \overline{\mathfrak{h}}_{reg}$,
which yields an isomorphism
\begin{equation}\label{heckeres1}
\ell: B_{W'}=\pi_1(\overline{\mathfrak{h}}_{reg}/{W'},
p(x_0))\overset{\sim}\rightarrow \pi_1(\mathfrak{h}'_{reg}/W',x_0).
\end{equation}
Endow $\mathfrak{h}$ with a $W$-invariant hermitian scalar product. Let
$||\cdot||$ be the associated norm. Set
\begin{equation}\label{eq:omega}
\Omega=\{x\in\mathfrak{h}:\,||x-b||< \varepsilon\},
\end{equation}
where $\varepsilon$ is a positive real number such that the closure
of $\Omega$ does not intersect any hyperplane that is in the
complement of $\mathcal{A}'$ in $\mathcal{A}$. Let $\mathfrak{g}amma: [0,1]\rightarrow
\mathfrak{h}$ be a path such that $\mathfrak{g}amma(0)=x_0$, $\mathfrak{g}amma(1)=b$ and
$\mathfrak{g}amma(t)\in\mathfrak{h}_{reg}$ for $0<t<1$. Let $u\in[0,1[$ such that
$x_1=\mathfrak{g}amma(u)$ belongs to $\Omega$, write $\mathfrak{g}amma_u$ for the
restriction of $\mathfrak{g}amma$ to $[0,u]$. Consider the homomorphism
\begin{equation*}
\sigma: \pi_1(\Omega\cap\mathfrak{h}_{reg},x_1)\rightarrow\pi_1(\mathfrak{h}_{reg}, x_0),
\quad\lambda\mapsto \mathfrak{g}amma^{-1}_u\cdot\lambda\cdot\mathfrak{g}amma_u.
\end{equation*} The canonical
inclusion $\mathfrak{h}_{reg}\mathfrak{h}ookrightarrow\mathfrak{h}'_{reg}$ induces a homomorphism
$\pi_1(\mathfrak{h}_{reg}, x_0)\rightarrow \pi_1(\mathfrak{h}'_{reg}, x_0)$. Composing it with
$\sigma$ gives an invertible homomorphism
$$\pi_1(\Omega\cap\mathfrak{h}_{reg},x_1)\rightarrow\pi_1(\mathfrak{h}'_{reg}, x_0).$$ Since
$\Omega$ is $W'$-invariant, its inverse gives an isomorphism
\begin{equation}\label{heckeres2}
\mathds{k}ppa:\pi_1(\mathfrak{h}'_{reg}/W',
x_0)\overset{\sim}\rightarrow\pi_1((\Omega\cap\mathfrak{h}_{reg})/W',x_1).
\end{equation}
Finally, we see from above that $\sigma$ is injective. So it induces
an inclusion
$$\pi_1((\Omega\cap\mathfrak{h}_{reg})/W',x_1)\mathfrak{h}ookrightarrow\pi_1(\mathfrak{h}_{reg}/W', x_0).$$
Composing it with the canonical inclusion $\pi_1(\mathfrak{h}_{reg}/W',
x_0)\mathfrak{h}ookrightarrow \pi_1(\mathfrak{h}_{reg}/W, x_0)$ gives an injective
homomorphism
\begin{equation}\label{heckeres3}
\jmath:\pi_1((\Omega\cap\mathfrak{h}_{reg})/W',x_1)\mathfrak{h}ookrightarrow
\pi_1(\mathfrak{h}_{reg}/W, x_0)=B_W.
\end{equation}
By composing $\ell$, $\mathds{k}ppa$, $\jmath$ we get the inclusion
\begin{equation}\label{heckeres4}
\imath=\jmath\circ\mathds{k}ppa\circ\ell: B_{W'}\mathfrak{h}ookrightarrow B_W.
\end{equation}
It is proved in \cite[Section 4C]{BMR} that $\imath$ preserves the
relations in (\ref{heckerelation}). So it induces an inclusion of
Hecke algebras which is the desired inclusion
\begin{equation*}
\imath_q: \mathscr{H}_{q'}(W')\mathfrak{h}ookrightarrow \mathscr{H}_q(W).
\end{equation*}
For $\imath$, $\imath': B_{W'}\mathfrak{h}ookrightarrow B_W$ two inclusions
defined as above via different choices of the path $\mathfrak{g}amma$, there
exists an element $\rho\in P_W=\pi_1(\mathfrak{h}_{reg},x_0)$ such that for
any $a\in B_{W'}$ we have $\imath(a)=\rho\imath'(a)\rho^{-1}$. In
particular, the functors $\imath_\ast$ and $(\imath')_\ast$ from
$B_W\modu$ to $B_{W'}\modu$ are isomorphic. Also, we have
$(\imath_q)_\ast\cong(\imath_q')_\ast.$ So there is a unique
restriction functor $\Resh$ up to isomorphisms.
\subsection{Rational DAHA's.}\label{ss:DAHA}
Let $c$ be a map from $\mathcal{S}$ to $\mathbb{C}$ that is constant on the
$W$-conjugacy classes. The rational DAHA attached to $W$ with
parameter $c$ is the quotient $H_c(W,\mathfrak{h})$ of the smash product of
$\mathbb{C} W$ and the tensor algebra of $\mathfrak{h}\oplus\mathfrak{h}^\ast$ by the relations
\begin{equation*}
[x,x']=0,\quad[y,y']=0,\quad
[y,x]=\pair{x,y}-\sum_{s\in\mathcal{S}}c_s\pair{\alpha_s,
y}\pair{x,\alpha_s\spcheck}s,
\end{equation*}
for all $x,x'\in\mathfrak{h}^\ast$, $y,y'\in\mathfrak{h}$. Here $\pair{\cdot,\cdot}$ is
the canonical pairing between $\mathfrak{h}^\ast$ and $\mathfrak{h}$, the element
$\alpha_s$ is a generator of $\mathrm{Im}(s|_{\mathfrak{h}^\ast}-1)$ and
$\alpha_s\spcheck$ is the generator of $\mathrm{Im}(s|_{\mathfrak{h}}-1)$ such
that $\pair{\alpha_s, \alpha_s\spcheck}=2$.
For $s\in\mathcal{S}$ write $\lambda_s$ for the non trivial eigenvalue
of $s$ in $\mathfrak{h}^\ast$. Let $\{x_i\}$ be a basis of $\mathfrak{h}^\ast$ and let
$\{y_i\}$ be the dual basis. Let
\begin{equation}\label{euler1}
\mathbf{eu}=\sum_{i}x_iy_i+\frac{\dim(\mathfrak{h})}{2}-\sum_{s\in\mathcal{S}}\frac{2c_s}{1-\lambda_s}s
\end{equation}
be the Euler element in $H_c(W,\mathfrak{h})$. Its definition is independent
of the choice of the basis $\{x_i\}$. We have
\begin{equation}\label{euler2}
[\mathbf{eu},x_i]=x_i,\quad [\mathbf{eu},y_i]=-y_i,\quad
[\mathbf{eu},s]=0.
\end{equation}
\subsection{}\label{ss:catO}
The category $\mathcal{O}$ of $H_c(W,\mathfrak{h})$ is the full subcategory
$\mathcal{O}_c(W,\mathfrak{h})$ of the category of $H_c(W,\mathfrak{h})$-modules consisting of
objects that are finitely generated as $\mathbb{C}[\mathfrak{h}]$-modules and
$\mathfrak{h}$-locally nilpotent. We recall from \cite[Section 3]{GGOR} the
following properties of $\mathcal{O}_c(W,\mathfrak{h})$.
The action of the Euler element $\mathbf{eu}$ on a module in
$\mathcal{O}_c(W,\mathfrak{h})$ is locally finite. The category $\mathcal{O}_c(W,\mathfrak{h})$ is a
highest weight category. In particular, it is artinian. Write
$\Irr(W)$ for the set of isomorphism classes of irreducible
representations of $W$. The poset of standard modules in
$\mathcal{O}_c(W,\mathfrak{h})$ is indexed by $\Irr(W)$ with the partial order given
by \cite[Theorem 2.19]{GGOR}. More precisely, for $\xi\in\Irr(W)$,
equip it with a $\mathbb{C} W\ltimes\mathbb{C}[\mathfrak{h}^\ast]$-module structure by letting
the elements in $\mathfrak{h}\subset\mathbb{C}[\mathfrak{h}^\ast]$ act by zero, the standard
module corresponding to $\xi$ is
$$\Delta(\xi)=H_c(W,\mathfrak{h})\otimes_{\mathbb{C} W\ltimes\mathbb{C}[\mathfrak{h}^\ast]}\xi.$$
It is an indecomposable module with a simple head $L(\xi)$. The set
of isomorphism classes of simple modules in $\mathcal{O}_c(W,\mathfrak{h})$ is
$$\{[L(\xi)]:\xi\in\Irr(W)\}.$$
It is a basis of the $\mathbb{C}$-vector space $K(\mathcal{O}_c(W,\mathfrak{h}))$. The set
$\{[\Delta(\xi)]:\xi\in\Irr(W)\}$ gives another basis of
$K(\mathcal{O}_c(W,\mathfrak{h}))$.
We say a module $N$ in $\mathcal{O}_c(W,\mathfrak{h})$ has a standard filtration if
it admits a filtration
$$0=N_0\subset N_1\subset\ldots\subset N_n=N$$ such that each
quotient $N_i/N_{i-1}$ is isomorphic to a standard module. We denote
by $\mathcal{O}^\Delta_c(W,\mathfrak{h})$ the full subcategory of $\mathcal{O}_c(W,\mathfrak{h})$
consisting of such modules.
\begin{lemme}\label{standfilt}
(1)Any projective object in $\mathcal{O}_c(W,\mathfrak{h})$ has a standard
filtration.
(2)A module in $\mathcal{O}_c(W,\mathfrak{h})$ has a standard filtration if and only
if it is free as a $\mathbb{C}[\mathfrak{h}]$-module.
\end{lemme}
Both (1) and (2) are given by \cite[Proposition 2.21]{GGOR}.
The category $\mathcal{O}_c(W,\mathfrak{h})$ has enough projective objects and has
finite homological dimension \cite[Section 4.3.1]{GGOR}. In
particular, any module in $\mathcal{O}_c(W,\mathfrak{h})$ has a finite projective
resolution. Write $\Proj_c(W,\mathfrak{h})$ for the full subcategory of
projective modules in $\mathcal{O}_c(W,\mathfrak{h})$. Let
\begin{equation*}
I: \Proj_c(W,\mathfrak{h})\rightarrow \mathcal{O}_c(W,\mathfrak{h})
\end{equation*}
be the canonical embedding functor. We have the following lemma.
\begin{lemme}\label{projiso}
For any abelian category $\mathcal{A}$ and any right exact
functors $F_1$, $F_2$ from $\mathcal{O}_c(W,\mathfrak{h})$ to $\mathcal{A}$, the
homomorphism of vector spaces
\begin{equation*}
r_I:\Hom(F_1, F_2)\rightarrow \Hom(F_1\circ I, F_2\circ I),
\quad\mathfrak{g}amma\mapsto \mathfrak{g}amma1_I
\end{equation*}
is an isomorphism.
\end{lemme}
In particular, if the functor $F_1\circ I$ is isomorphic to
$F_2\circ I$, then we have $F_1\cong F_2$.
\begin{proof}
We need to show that for any morphism of functors $\nu: F_1\circ
I\rightarrow F_2\circ I$ there is a unique morphism $\tilde{\nu}: F_1\rightarrow
F_2$ such that $\tilde{\nu}1_{I}=\nu$. Since $\mathcal{O}_c(W,\mathfrak{h})$ has enough projectives, for any $M\in \mathcal{O}_c(W,\mathfrak{h})$ there exists $P_0$, $P_1$ in $\Proj_c(W,\mathfrak{h})$ and an exact sequence in $\mathcal{O}_c(W,\mathfrak{h})$
\begin{equation}\label{eq:projresolution}
P_1\overset{d_1}\longrightarrow P_0\overset{d_0}\longrightarrow M\longrightarrow 0.
\end{equation}
Applying the right exact functors $F_1$, $F_2$ to this sequence we get the two exact sequences in the diagram below. The morphism of functors $\nu:F_1\circ I\rightarrow F_2\circ I$ yields well defined morphisms $\nu(P_1)$, $\nu(P_0)$ such that the square commutes
$$\xymatrix{F_1(P_1)\ar[r]^{F_1(d_1)}\ar[d]^{\nu(P_1)} & F_1(P_0)\ar[r]^{F_1(d_0)}\ar[d]^{\nu(P_0)} &F_1(M)\ar[r] \ar@{}[d] &0\ar@{}[d]\\F_2(P_1)\ar[r]^{F_2(d_1)} & F_2(P_0)\ar[r]^{F_2(d_0)} &F_2(M)\ar[r] &0.}$$
Define $\tilde{\nu}(M)$ to be the unique morphism $F_1(M)\rightarrow F_2(M)$ that makes the diagram commute. Its definition is independent of the choice of $P_0$, $P_1$, and it is independent of the choice of the exact sequence (\ref{eq:projresolution}). The assignment $M\mapsto \tilde{\nu}(M)$ gives a morphism of functor $\tilde{\nu}: F_1\rightarrow F_2$ such that $\tilde{\nu}1_{I}=\nu$. It is unique by the uniqueness of the morphism $\tilde{\nu}(M)$.
\end{proof}
\subsection{KZ functor.}\label{ss:KZ}
The Knizhnik-Zamolodchikov functor is an exact functor from the
category $\mathcal{O}_c(W,\mathfrak{h})$ to the category $\mathscr{H}_q(W,\mathfrak{h})\modu$,
where $q$ is a certain parameter associated with $c$. Let us recall
its definition from \cite[Section 5.3]{GGOR}.
Let $\mathcal{D}(\mathfrak{h}_{reg})$ be the algebra of differential operators
on $\mathfrak{h}_{reg}$. Write
$$H_c(W,\mathfrak{h}_{reg})=H_c(W,\mathfrak{h})\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}_{reg}].$$ We consider the Dunkl isomorphism, which is an
isomorphism of algebras
\begin{equation*}
H_c(W,\mathfrak{h}_{reg})\overset{\sim}\rightarrow \mathcal{D}(\mathfrak{h}_{reg})\rtimes\mathbb{C} W
\end{equation*}
given by $x\mapsto x$, $w\mapsto w$ for $x\in\mathfrak{h}^\ast$, $w\in W$, and
\begin{equation*}
y\mapsto
\partial_y+\sum_{s\in\mathcal{S}}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(y)}{\alpha_s}(s-1),\quad\text{for }y\in\mathfrak{h}.
\end{equation*}
For any $M\in \mathcal{O}_c(W,\mathfrak{h})$, write
$$M_{\mathfrak{h}_{reg}}=M\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}_{reg}].$$
It identifies via the Dunkl isomorphism with a
$\mathcal{D}(\mathfrak{h}_{reg})\rtimes W$-module which is finitely generated
over $\mathbb{C}[\mathfrak{h}_{reg}]$. Hence $M_{\mathfrak{h}_{reg}}$ is a $W$-equivariant
vector bundle on $\mathfrak{h}_{reg}$ with an integrable connection $\nabla$
given by $\nabla_y(m)=\partial_ym$ for $m\in M$, $y\in\mathfrak{h}$. It is
proved in \cite[Proposition 5.7]{GGOR} that the connection $\nabla$
has regular singularities. Now, regard $\mathfrak{h}_{reg}$ as a complex
manifold endowed with the transcendental topology. Denote by
$\mathcal{O}^{an}_{\mathfrak{h}_{reg}}$ the sheaf of holomorphic functions on
$\mathfrak{h}_{reg}$. For any free $\mathbb{C}[\mathfrak{h}_{reg}]$-module $N$ of finite rank,
we consider
$$N^{an}=N\otimes_{\mathbb{C}[\mathfrak{h}_{reg}]}\mathcal{O}^{an}_{\mathfrak{h}_{reg}}.$$ It is an
analytic locally free sheaf on $\mathfrak{h}_{reg}$. For $\nabla$ an
integrable connection on $N$, the sheaf of holomorphic horizontal
sections
\begin{equation*}
N^{\nabla}=\{n\in N^{an}:\,\nabla_y(n)=0\text{ for all }y\in\mathfrak{h}\}
\end{equation*}
is a $W$-equivariant local system on $\mathfrak{h}_{reg}$. Hence it identifies
with a local system on $\mathfrak{h}_{reg}/W$. So it yields a finite
dimensional representation of $\mathbb{C} B(W,\mathfrak{h})$. For $M\in \mathcal{O}_c(W,\mathfrak{h})$
it is proved in \cite[Theorem 5.13]{GGOR} that the action of $\mathbb{C}
B(W,\mathfrak{h})$ on $(M_{\mathfrak{h}_{reg}})^{\nabla}$ factors through the Hecke
algebra $\mathscr{H}_q(W,\mathfrak{h})$. The formula for the parameter $q$ is given
in \cite[Section 5.2]{GGOR}.
The Knizhnik-Zamolodchikov functor is the functor
$$\KZ(W,\mathfrak{h}): \mathcal{O}_c(W,\mathfrak{h})\rightarrow\mathscr{H}_q(W,\mathfrak{h})\modu,\quad M\mapsto
(M_{\mathfrak{h}_{reg}})^{\nabla}.$$ By definition it is exact. Let us recall
some of its properties following \cite{GGOR}. Assume in the rest of
this subsection that \emph{the algebras $\mathscr{H}_q(W,\mathfrak{h})$ and $\mathbb{C} W$
have the same dimension over $\mathbb{C}$}. We abbreviate $\KZ=\KZ(W,\mathfrak{h})$. The functor $\KZ$ is represented by a projective object $P_{\KZ}$ in $\mathcal{O}_c(W,\mathfrak{h})$. More precisely, there is an algebra homomorphism
\begin{equation*}
\rho:\mathscr{H}_q(W,\mathfrak{h})\rightarrow\End_{\mathcal{O}_c(W,\mathfrak{h})}(P_{\KZ})^{\op}
\end{equation*}
such that $\KZ$ is isomorphic to the functor $\Hom_{\mathcal{O}_c(W,\mathfrak{h})}(P_{\KZ},-)$. By \cite[Theorem 5.15]{GGOR} the homomorphism $\rho$ is an isomorphism. In particular $\KZ(P_{\KZ})$ is isomorphic to $\mathscr{H}_q(W,\mathfrak{h})$ as $\mathscr{H}_q(W,\mathfrak{h})$-modules.
Now, recall that the center of a category $\mathcal{C}$ is the
algebra $Z(\mathcal{C})$ of endomorphisms of the identity functor
$Id_{\mathcal{C}}$. So there is a canonical map
$$Z(\mathcal{O}_c(W,\mathfrak{h}))\rightarrow\End_{\mathcal{O}_c(W,\mathfrak{h})}(P_{\KZ}).$$
The composition of this map with $\rho^{-1}$ yields an algebra
homomorphism
\begin{equation*}
\mathfrak{g}amma: Z(\mathcal{O}_c(W,\mathfrak{h}))\rightarrow Z(\mathscr{H}_q(W,\mathfrak{h})),
\end{equation*}
where $Z(\mathscr{H}_q(W,\mathfrak{h}))$ denotes the center of $\mathscr{H}_q(W,\mathfrak{h})$.
\begin{lemme}\label{lem:center}
(1) The homomorphism $\mathfrak{g}amma$ is an isomorphism.
(2) For a module $M$ in $\mathcal{O}_c(W,\mathfrak{h})$ and an element $f$ in
$Z(\mathcal{O}_c(W,\mathfrak{h}))$ the morphism
$$\KZ(f(M)): \KZ(M)\rightarrow\KZ(M)$$ is the multiplication by $\mathfrak{g}amma(f)$.
\end{lemme}
See \cite[Corollary 5.18]{GGOR} for (1). Part (2) follows from the
construction of $\mathfrak{g}amma$.
The functor $\KZ$ is a quotient functor, see \cite[Theorem 5.14]{GGOR}. Therefore it has a right adjoint $S:\mathscr{H}_q(W,\mathfrak{h})\rightarrow\mathcal{O}_c(W,\mathfrak{h})$ such that the canonical adjunction map $\KZ\circ S\rightarrow\Id_{\mathscr{H}_q(W,\mathfrak{h})}$ is an isomorphism of functors. We have the following proposition.
\begin{prop}\label{KZ}
Let $Q$ be a projective object in $\mathcal{O}_c(W,\mathfrak{h})$.
(1) For any object $M\in\mathcal{O}_c(W,\mathfrak{h})$, the following morphism of $\mathbb{C}$-vector spaces is an isomorphism
$$\Hom_{\mathcal{O}_c(W,\mathfrak{h})}(M,Q)\overset{\sim}\lra \Hom_{\mathscr{H}_q(W)}(\KZ(M),\KZ(Q)),\quad f\mapsto\KZ(f).$$
In particular, the functor $\KZ$ is fully faithful over $\Proj_c(W,\mathfrak{h})$.
(2)The canonical adjunction map gives an isomorphism $Q\overset{\sim}\ra S\circ \KZ (Q)$.
\end{prop}
See \cite[Theorems 5.3, 5.16]{GGOR}.
\subsection{Parabolic restriction and induction for rational
DAHA's.}\label{ss:resDAHA}
From now on we will always assume that $\mathfrak{h}^W=1$. Recall from Section
\ref{ss:resHecke} that $W'\subset W$ is the stabilizer of a point
$b\in\mathfrak{h}$ and that $\overline{\mathfrak{h}}=\mathfrak{h}/\mathfrak{h}^{W'}$. Let us recall from
\cite{BE} the definition of the parabolic restriction and induction
functors
$$\Res_b:\mathcal{O}_c(W,\mathfrak{h})\rightarrow\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})\,,\quad
\Ind_b:\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})\rightarrow\mathcal{O}_c(W,\mathfrak{h}).$$ First we need some notation. For any point $p\in\mathfrak{h}$ we write
$\mathbb{C}[[\mathfrak{h}]]_p$ for the completion of $\mathbb{C}[\mathfrak{h}]$ at $p$, and we write $\widehat{\mathbb{C}[\mathfrak{h}]}_p$ for the completion of $\mathbb{C}[\mathfrak{h}]$ at the $W$-orbit of $p$ in $\mathfrak{h}$. Note that we have $\mathbb{C}[[\mathfrak{h}]]_0=\widehat{\mathbb{C}[\mathfrak{h}]}_0$. For any
$\mathbb{C}[\mathfrak{h}]$-module $M$ let $$\widehat{M}_p=\widehat{\mathbb{C}[\mathfrak{h}]}_p\otimes_{\mathbb{C}[\mathfrak{h}]}M.$$ The completions $\widehat{H}_{c}(W,\mathfrak{h})_b$, $\widehat{H}_{c'}(W',\mathfrak{h})_0$ are well defined algebras.
We denote by $\widehat{\mathcal{O}}_c(W,\mathfrak{h})_b$ the
category of $\widehat{H}_{c}(W,\mathfrak{h})_b$-modules that are finitely
generated over $\widehat{\mathbb{C}[\mathfrak{h}]}_b$, and we denote by $\widehat{\mathcal{O}}_{c'}(W',\mathfrak{h})_0$ the
category of $\widehat{H}_{c'}(W',\mathfrak{h})_0$-modules that are finitely
generated over $\widehat{\mathbb{C}[\mathfrak{h}]}_0$. Let
$P=\mathrm{Fun}_{W'}(W,\widehat{H}_{c}(W',\mathfrak{h})_0)$ be the set of
$W'$-invariant maps from $W$ to $\widehat{H}_{c}(W',\mathfrak{h})_0$. Let
$Z(W,W',\widehat{H}_{c}(W',\mathfrak{h})_0)$ be the ring of endomorphisms of the
right $\widehat{H}_{c}(W',\mathfrak{h})_0$-module $P$.
We have the following proposition given by \cite[Theorem 3.2]{BE}.
\begin{prop}\label{BEiso}
There is an isomorphism of algebras $$\Theta:
\widehat{H}_{c}(W,\mathfrak{h})_b\longrightarrow Z(W,W', \widehat{H}_{c'}(W',\mathfrak{h})_0)$$
defined as follows: for $f\in P$, $\alpha\in\mathfrak{h}^\ast$, $a\in\mathfrak{h}$, $u\in
W$,
\begin{eqnarray*}
(\Theta(u)f)(w)&=&f(w u),\\
(\Theta(x_{\alpha})f)(w)&=&(x^{(b)}_{w\alpha}+\alpha(w^{-1}b))f(w),\\
(\Theta(y_a)f)(w)&=&y^{(b)}_{wa}f(w)+\sum_{s\in\mathcal{S}, s\notin
W'}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(wa)}{x^{(b)}_{\alpha_s}+\alpha_s
(b)}(f(sw)-f(w)),
\end{eqnarray*}
where $x_\alpha\in\mathfrak{h}^\ast\subset H_{c}(W,\mathfrak{h})$,
$x^{(b)}_{\alpha}\in\mathfrak{h}^\ast\subset H_{c'}(W',\mathfrak{h})$, $y_a\in\mathfrak{h}\subset
H_{c}(W,\mathfrak{h})$, $y_a^{(b)}\in\mathfrak{h}\subset H_{c'}(W',\mathfrak{h})$.
\end{prop}
Using $\Theta$ we will identify $\widehat{H}_{c}(W,\mathfrak{h})_b$-modules with $Z(W,W', \widehat{H}_{c'}(W',\mathfrak{h})_0)$-modules. So the module $P=\mathrm{Fun}_{W'}(W,\widehat{H}_{c}(W',\mathfrak{h})_0)$ becomes an
$(\widehat{H}_{c}(W,\mathfrak{h})_b,\widehat{H}_{c'}(W',\mathfrak{h})_0)$-bimodule. Hence
for any $N\in \widehat{\mathcal{O}}_{c'}(W',\mathfrak{h})_0$ the module
$P\otimes_{\widehat{H}_{c'}(W',\mathfrak{h})_0}N$ lives in
$\widehat{\mathcal{O}}_c(W,\mathfrak{h})_b$. It is naturally identified with
$\mathrm{Fun}_{W'}(W,N)$, the set of $W'$-invariant maps from $W$ to
$N$. For any $\mathbb{C}[\mathfrak{h}^\ast]$-module $M$ write $E(M)\subset M$ for the
locally nilpotent part of $M$ under the action of $\mathfrak{h}$.
The ingredients for defining the functors $\Res_b$ and $\Ind_b$
consist of:
\begin{itemize}
\item the adjoint pair of functors $(\widehat{\quad}_b, E^b)$ with
$$\widehat{\quad}_b:\mathcal{O}_{c}(W,\mathfrak{h})\rightarrow\widehat{\mathcal{O}}_c(W,\mathfrak{h})_b,\quad
M\mapsto\widehat{M}_b,$$
$$E^b: \widehat{\mathcal{O}}_c(W,\mathfrak{h})_b\rightarrow \mathcal{O}_{c}(W,\mathfrak{h}),\quad N\rightarrow
E(N),$$
\item the Morita equivalence
$$J:\widehat{\mathcal{O}}_{c'}(W',\mathfrak{h})_0\rightarrow\widehat{\mathcal{O}}_c(W,\mathfrak{h})_b,\quad
N\mapsto \mathrm{Fun}_{W'}(W,N),$$ and its quasi-inverse
$R$ given in Section \ref{ss:BEiso} below,
\item the equivalence of categories
$$E: \widehat{\mathcal{O}}_{c'}(W',\mathfrak{h})_0\rightarrow\mathcal{O}_{c'}(W',\mathfrak{h}), \quad M\mapsto E(M)$$
and its quasi-inverse given by $N\mapsto\widehat{N}_0$,
\item the equivalence of categories
\begin{equation}\label{zeta}
\mathfrak{z}eta: \mathcal{O}_{c'}(W',\mathfrak{h})\rightarrow \mathcal{O}_{c'}(W',\overline{\mathfrak{h}}),\quad
M\mapsto\{v\in M:\,yv=0,\,\text{ for all }y\in \mathfrak{h}^{W'}\}
\end{equation}
and its quasi-inverse $\mathfrak{z}eta^{-1}$ given in Section
\ref{rmq:resDAHA} below.
\end{itemize}
For $M\in\mathcal{O}_c(W,\mathfrak{h})$ and $N\in\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$ the
functors $\Res_b$ and $\Ind_b$ are defined by
\begin{eqnarray}
\Res_b(M)=\mathfrak{z}eta\circ E\circ R(\widehat{M}_b),\label{Resb}\\
\Ind_b(N)=E^b\circ J(\widehat{\mathfrak{z}eta^{-1}(N)}_0).\nonumber
\end{eqnarray}
We refer to \cite[Section 2,3]{BE} for details.
\subsection{The idempotent $x_{\pr}$ and the functor $R$.}\label{ss:BEiso}
We give some details on the isomorphism $\Theta$ for a future use. Fix elements
$1=u_1, u_2,\ldots, u_r$ in $W$ such that $W=\bigsqcup_{i=1}^r
W'u_i$. Let $\mathrm{Mat}_r(\widehat{H}_{c'}(W',\mathfrak{h})_0)$ be the
algebra of $r\times r$ matrices with coefficients in
$\widehat{H}_{c'}(W',\mathfrak{h})_0$. We have an algebra isomorphism
\begin{eqnarray}
\Phi:Z(W,W', \widehat{H}_{c'}(W',\mathfrak{h})_0)&\rightarrow&
\mathrm{Mat}_r(\widehat{H}_{c'}(W',\mathfrak{h})_0),\label{Phi}\\
A&\mapsto& (\Phi(A)_{ij})_{1\leqslant i,j\leqslant r}\nonumber
\end{eqnarray}
such that
\begin{equation*}
(Af)(u_i)=\sum_{j=1}^r\Phi(A)_{ij}f(u_j), \quad \text{ for all }f\in
P, \,1\leqslant i\leqslant r.\end{equation*}
Denote by $E_{ij}$, $1\leqslant i,j\leqslant r$, the elementary matrix in $\mathrm{Mat}_r(\widehat{H}_{c'}(W',\mathfrak{h})_0)$ with coefficient $1$ in the position $(i,j)$ and zero elsewhere. Note that the algebra isomorphism
$$\Phi\circ\Theta: \widehat{H}_{c}(W,\mathfrak{h})_b\overset{\sim}\lra \mathrm{Mat}_r(\widehat{H}_{c'}(W',\mathfrak{h})_0)$$
restricts to an isomorphism of subalgebras
\begin{equation}\label{eq:phithetax}
\widehat{\mathbb{C}[\mathfrak{h}]}_b\cong\bigoplus_{i=1}^r \mathbb{C}[[\mathfrak{h}]]_0E_{ii}.
\end{equation}
Indeed, there is an unique isomorphism of algebras
\begin{equation}\label{eq:varpi}
\varpi:\widehat{\mathbb{C}[\mathfrak{h}]}_b\cong\bigoplus_{i=1}^r\mathbb{C}[[\mathfrak{h}]]_{u_i^{-1}b}.
\end{equation}
extending the algebra homomorphism
$$\mathbb{C}[\mathfrak{h}]\rightarrow\bigoplus_{i=1}^r\mathbb{C}[\mathfrak{h}],\quad x\mapsto (x,x,\ldots, x),\quad \forall\ x\in\mathfrak{h}^\ast.$$
For each $i$ consider the isomorphism of algebras
$$\phi_i: \mathbb{C}[[\mathfrak{h}]]_{u_i^{-1}b}\rightarrow\mathbb{C}[[\mathfrak{h}]]_0,\quad x\mapsto u_ix+x(u_i^{-1}b),\quad\forall\ x\in\mathfrak{h}^\ast.$$
The isomorphism (\ref{eq:phithetax}) is exactly the composition of $\varpi$ with the direct sum $\oplus_{i=1}^r\phi_i.$ Here $E_{ii}$ is the image of the idempotent in $\widehat{\mathbb{C}[\mathfrak{h}]}_b$ corresponding to the component $\mathbb{C}[[\mathfrak{h}]]_{u_i^{-1}b}$. We will denote by $x_{\pr}$ the idempotent in $\widehat{\mathbb{C}[\mathfrak{h}]}_b$ corresponding to $\mathbb{C}[[\mathfrak{h}]]_b$, i.e., $\Phi\circ\Theta(x_{\pr})=E_{11}$. Then the following functor
$$R:\widehat{\mathcal{O}}_c(W,\mathfrak{h})_b\rightarrow \widehat{\mathcal{O}}_{c'}(W',\mathfrak{h})_0,\quad M\mapsto x_{\pr}M$$
is a quasi-inverse of $J$. Here, the action of $\widehat{H}_{c'}(W',\mathfrak{h})_0$ on $R(M)=x_{\pr}M$ is given by the following formulas deduced from Proposition \ref{BEiso}. For any $\alpha\in\mathfrak{h}^\ast$, $w\in W'$,
$a\in\mathfrak{h}^\ast$, $m\in M$ we have
\begin{eqnarray}
x_\alpha^{(b)}x_{\pr}(m)&=&x_{\pr}((x_{\alpha}-\alpha(b))m), \label{xform}\\
wx_{\pr}(m)&=&x_{\pr}(wm), \label{wform}\\
y_a^{(b)}x_{\pr}(m)&=&x_{\pr}((y_a+\sum_{s\in\mathcal{S},\,s\notin
W'}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(a)}{x_{\alpha_s}})m). \label{yform}
\end{eqnarray}
In particular, we have
\begin{equation}\label{eq:R(M)}
R(M)=\phi_1^\ast(x_{\pr}(M))
\end{equation}
as $\mathbb{C}[[\mathfrak{h}]]_0\rtimes W'$-modules. Finally, note that the following equality holds in $\widehat{H}_c(W,\mathfrak{h})_b$
\begin{equation}\label{killwform}
x_{\pr}ux_{\pr}=0, \quad \forall\ u\in W-W'.
\end{equation}
\subsection{A quasi-inverse of $\mathfrak{z}eta$.}\label{rmq:resDAHA}
Let us recall from \cite[Section 2.3]{BE} the following facts. Let
$\mathfrak{h}^{\ast W'}$ be the subspace of $\mathfrak{h}^\ast$ consisting of fixed
points of $W'$. Set $$(\mathfrak{h}^{\ast W'})^\bot=\{v\in\mathfrak{h}: f(v)=0\text{ for
all } f\in\mathfrak{h}^{\ast W'}\}.$$ We have a $W'$-invariant decomposition
$$\mathfrak{h}=(\mathfrak{h}^{\ast W'})^\bot\oplus\mathfrak{h}^{W'}.$$ The $W'$-space $(\mathfrak{h}^{\ast
W'})^\bot$ is canonically identified with $\overline{\mathfrak{h}}$. Since the
action of $W'$ on $\mathfrak{h}^{W'}$ is trivial, we have an obvious algebra
isomorphism
\begin{equation}\label{isobete}
H_{c'}(W',\mathfrak{h})\cong H_{c'}(W',\overline{\mathfrak{h}})\otimes
\mathcal{D}(\mathfrak{h}^{W'}).
\end{equation}
It maps an element $y$ in the subset $\mathfrak{h}^{W'}$ of $H_{c'}(W',\mathfrak{h})$ to
the operator $\partial_y$ in $\mathcal{D}(\mathfrak{h}^{W'})$. Write
$\mathcal{O}(1,\mathfrak{h}^{W'})$ for the category of finitely generated
$\mathcal{D}(\mathfrak{h}^{W'})$-modules that are $\partial_y$-locally nilpotent for
all $y\in\mathfrak{h}^{W'}$. The algebra isomorphism above yields an
equivalence of categories
\begin{equation*}
\mathcal{O}_{c'}(W',\mathfrak{h})\cong\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})\otimes\mathcal{O}(1,\mathfrak{h}^{W'}).
\end{equation*}
The functor $\mathfrak{z}eta$ in (\ref{zeta}) is an equivalence, because it is
induced by the functor
$$\mathcal{O}(1,\mathfrak{h}^{W'})\overset{\sim}\ra\mathbb{C}\modu,\quad M\rightarrow\{m\in M, \partial_y(m)=0\text{ for all }y\in\mathfrak{h}^{W'}\},$$
which is an equivalence by Kashiwara's lemma upon taking Fourier
transforms. In particular, a quasi-inverse of $\mathfrak{z}eta$ is given by
\begin{equation}\label{eq:zetainverse}
\mathfrak{z}eta^{-1}: \mathcal{O}_{c'}(W',\overline{\mathfrak{h}})\rightarrow \mathcal{O}_{c'}(W',\mathfrak{h}),\quad N\mapsto
N\otimes\mathbb{C}[\mathfrak{h}^{W'}],\end{equation}
where $\mathbb{C}[\mathfrak{h}^{W'}]\in \mathcal{O}(1,\mathfrak{h}^{W'})$ is
the polynomial representation of $\mathcal{D}(\mathfrak{h}^{W'})$.
Moreover, the functor $\mathfrak{z}eta$ maps a standard module in
$\mathcal{O}_{c'}(W',\mathfrak{h})$ to a standard module in
$\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$. Indeed, for any $\xi\in\Irr(W')$, we
have an isomorphism of $H_{c'}(W',\mathfrak{h})$-modules
\begin{equation*}
H_{c'}(W',\mathfrak{h})\otimes_{\mathbb{C}[\mathfrak{h}^\ast]\rtimes
W'}\xi=(H_{c'}(W',\overline{\mathfrak{h}})\otimes_{\mathbb{C}[(\overline{\mathfrak{h}})^\ast]\rtimes
W'}\xi)\otimes(\mathcal{D}(\mathfrak{h}^{W'})\otimes_{\mathbb{C}[(\mathfrak{h}^{W'})^\ast]}\mathbb{C}).
\end{equation*}
On the right hand side $\mathbb{C}$ denotes the trivial module of
$\mathbb{C}[(\mathfrak{h}^{W'})^\ast]$, and the latter is identified with the
subalgebra of $\mathcal{D}(\mathfrak{h}^{W'})$ generated by $\partial_y$ for all
$y\in\mathfrak{h}^{W'}$. We have
$$\mathcal{D}(\mathfrak{h}^{W'})\otimes_{\mathbb{C}[(\mathfrak{h}^{W'})^\ast]}\mathbb{C}\cong\mathbb{C}[\mathfrak{h}^{W'}]$$ as
$\mathcal{D}(\mathfrak{h}^{W'})$-modules. So $\mathfrak{z}eta$ maps the standard module
$\Delta(\xi)$ for $H_{c'}(W',\mathfrak{h})$ to the standard module
$\Delta(\xi)$ for $H_{c'}(W',\overline{\mathfrak{h}})$.
\subsection{}\label{ss:resprop}
Here are some properties of $\Res_b$ and $\Ind_b$.
\begin{prop}\label{Res}
\begin{itemize}
\item[(1)] Both functors $\Res_b$ and $\Ind_b$ are exact. The functor $\Res_b$ is left
adjoint to $\Ind_b$. In particular the functor $\Res_b$ preserves
projective objects and $\Ind_b$ preserves injective objects.
\iffalse \item[(2)] If $W'=1$ then for each $M\in \mathcal{O}_c(W,\mathfrak{h})$ the
vector space $\Res_b(M)$ is the fiber of the $\mathbb{C}[\mathfrak{h}]$-module $M$ at
$b$, and $\Ind_b(\mathbb{C})=P_{\KZ}.$\fi
\item[(2)] Let $\Res^W_{W'}$
and $\Ind^W_{W'}$ be respectively the restriction and induction
functors of groups. We have the following commutative diagram
\begin{equation*}
\xymatrix{K(\mathcal{O}_c(W,\mathfrak{h}))\ar[r]_{\sim}^{\omega}\ar@<1ex>[d]^{\Res_b} & K(\mathbb{C} W)\ar@<1ex>[d]^{\Res^W_{W'}}\\
K(\mathcal{O}_{c'}(W',\overline{\mathfrak{h}}))\ar[r]_{\sim}^{\omega'}\ar@<1ex>[u]^{\Ind_b} & K(\mathbb{C}
W')\ar@<1ex>[u]^{\Ind^W_{W'}}.}
\end{equation*}
Here the isomorphism $\omega$ (resp. $\omega'$) is given by mapping
$[\Delta(\xi)]$ to $[\xi]$ for any $\xi\in\Irr(W)$ (resp.
$\xi\in\Irr(W')$).
\end{itemize}
\end{prop}
See \cite[Proposition $3.9$, Theorem $3.10$]{BE} for (1),
\cite[Proposition $3.14$]{BE} for (2).
\subsection{Restriction of modules having a standard filtration}\label{ss:standardres}
In the rest of Section 1, we study the actions of the restriction functors on modules having a standard filtration in
$\mathcal{O}_c(W,\mathfrak{h})$ (Proposition \ref{standard}). We will need the following lemmas.
\begin{lemme}\label{lem:MV}
Let $M$ be a module in $\mathcal{O}^\Delta_c(W,\mathfrak{h})$.
(1) There is a finite dimensional subspace $V$ of $M$ such that $V$
is stable under the action of $\mathbb{C} W$ and the map
\begin{equation*}
\mathbb{C}[\mathfrak{h}]\otimes V\rightarrow M,\quad p\otimes v\mapsto pv
\end{equation*}
is an isomorphism of $\mathbb{C}[\mathfrak{h}]\rtimes W$-modules.
(2) The map $\omega:K(\mathcal{O}_c(W,\mathfrak{h}))\rightarrow K(\mathbb{C} W)$ in Proposition
\ref{Res}(2) satisfies
\begin{equation}\label{eq:omegakgp}
\omega([M])=[V].
\end{equation}
\end{lemme}
\begin{proof}
Let $$0=M_0\subset M_1\subset \ldots\subset M_l=M$$ be a filtration
of $M$ such that for any $1\leqslant i\leqslant l$ we have
$M_i/M_{i-1}\cong\Delta(\xi_i)$ for some $\xi_i\in\Irr(W)$. We
prove (1) and (2) by recurrence on $l$. If $l=1$, then $M$ is a
standard module. Both (1) and (2) hold by definition. For $l>1$, by
induction we may suppose that there is a subspace $V'$ of $M_{l-1}$
such that the properties in (1) and (2) are satisfied for $M_{l-1}$
and $V'$. Now, consider the exact sequence
$$0\longrightarrow M_{l-1}\longrightarrow M\overset{j}\longrightarrow \Delta(\xi_l)\longrightarrow 0$$
From the isomorphism of $\mathbb{C}[\mathfrak{h}]\rtimes W$-modules
$\Delta(\xi_l)\cong\mathbb{C}[\mathfrak{h}]\otimes \xi$ we see that $\Delta(\xi_l)$
is a projective $\mathbb{C}[\mathfrak{h}]\rtimes W$-module. Hence there exists a
morphism of $\mathbb{C}[\mathfrak{h}]\rtimes W$-modules $s: \Delta(\xi_l)\rightarrow M$ that
provides a section of $j$. Let $V=V'\oplus s(\xi_l)\subset M$. It is
stable under the action of $\mathbb{C} W$. The map $\mathbb{C}[\mathfrak{h}]\otimes V\rightarrow M$ in
(1) is an injective morphism of $\mathbb{C}[\mathfrak{h}]\rtimes W$-modules. Its image
is $M_{l-1}\oplus s(\Delta(\xi))$, which is equal to $M$. So it is
an isomorphism. We have
$$\omega([M])=\omega([M_{l-1}])+\omega([\Delta(\xi_l)]),$$ by
assumption $\omega([M_{l-1}])=[V']$, so
$\omega([M])=[V']+[\xi_l]=[V]$.
\end{proof}
\begin{lemme}\label{lem:eufinite}
(1) Let $M$ be a
$\widehat{H}_{c}(W,\mathfrak{h})_0$-module free over $\mathbb{C}[[\mathfrak{h}]]_0$. If there
exist generalized eigenvectors $v_1,\ldots v_n$ of $\eu$ which form
a basis of $M$ over $\mathbb{C}[[\mathfrak{h}]]_0$, then for $f_1,\ldots,
f_n\in\mathbb{C}[[\mathfrak{h}]]_0$ the element $m=\sum_{i=1}^nf_iv_i$ is $\eu$-finite
if and only if $f_1,\ldots,f_n$ all belong to $\mathbb{C}[\mathfrak{h}]$.
(2) Let $N$ be an object in $\mathcal{O}_c(W,\mathfrak{h})$. If $\widehat{N}_0$ is a
free $\mathbb{C}[[\mathfrak{h}]]_0$-module, then $N$ is a free $\mathbb{C}[\mathfrak{h}]$-module. It
admits a basis consisting of generalized eigenvectors
$v_1,\ldots,v_n$ of $\eu$.
\end{lemme}
\begin{proof}
(1) It follows from the proof of \cite[Theorem 2.3]{BE}.
(2) Since $N$ belongs to $\mathcal{O}_c(W,\mathfrak{h})$, it is finitely generated
over $\mathbb{C}[\mathfrak{h}]$. Denote by $\mathfrak{m}$ the maximal ideal of $\mathbb{C}[[\mathfrak{h}]]_0$.
The canonical map $N\rightarrow\widehat{N}_0/\mathfrak{m}\widehat{N}_0$ is
surjective. So there exist $v_1,\ldots,v_n$
in $N$ such that their images form a basis of
$\widehat{N}_0/\mathfrak{m}\widehat{N}_0$ over $\mathbb{C}$. Moreover, we may choose $v_1,\ldots,v_n$ to be generalized eigenvectors of $\eu$, because the $\eu$-action on $N$ is locally finite. Since
$\widehat{N}_0$ is free over $\mathbb{C}[[\mathfrak{h}]]_0$, Nakayama's lemma yields
that $v_1,\ldots,v_n$ form a basis of $\widehat{N}_0$ over
$\mathbb{C}[[\mathfrak{h}]]_0$. By part (1) the set $N'$ of $\eu$-finite elements in
$\widehat{N}_0$ is the free $\mathbb{C}[\mathfrak{h}]$-submodule generated by
$v_1,\ldots, v_n$. On the other hand, since $\widehat{N}_0$ belongs
to $\widehat{\mathcal{O}}_c(W,\mathfrak{h})_0$, by \cite[Proposition 2.4]{BE} an
element in $\widehat{N}_0$ is $\mathfrak{h}$-nilpotent if and only if it is
$\eu$-finite. So $N'=E(\widehat{N}_0).$ On the other hand, the canonical inclusion $N\subset E(\widehat{N}_0)$ is an equality by \cite[Theorem 3.2]{BE}. Hence $N=N'$. This implies that $N$ is free over
$\mathbb{C}[\mathfrak{h}]$, with a basis given by $v_1,\ldots,v_n$, which are
generalized eigenvectors of $\eu$.
\end{proof}
\begin{prop}\label{standard}
Let $M$ be an object in $\mathcal{O}^\Delta_c(W,\mathfrak{h})$.
(1) The object $\Res_b(M)$ has a standard filtration.
(2) Let $V$ be a subspace of $M$ that has the properties of Lemma
\ref{lem:MV}(1). Then there is an isomorphism of
$\mathbb{C}[\overline{\mathfrak{h}}]\rtimes W'$-modules
\begin{equation*}
\Res_b(M)\cong \mathbb{C}[\overline{\mathfrak{h}}]\otimes\Res^{W}_{W'}(V).
\end{equation*}
\end{prop}
\begin{proof}
(1) By the end of Section \ref{rmq:resDAHA} the equivalence
$\mathfrak{z}eta$ maps a standard module in $\mathcal{O}_{c'}(W',\mathfrak{h})$ to a standard
one in $\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$. Hence to prove that
$\Res_b(M)=\mathfrak{z}eta\circ E\circ R(\widehat{M}_b)$ has a standard
filtration, it is enough to show that $N=E\circ R(\widehat{M}_b)$
has one. We claim that the module $N$ is free over $\mathbb{C}[\mathfrak{h}]$. So the
result follows from Lemma \ref{standfilt}(2).
Let us prove the claim. Recall from (\ref{eq:R(M)}) that we have $R(\widehat{M}_b)=\phi_1^\ast(x_{\pr}\widehat{M}_b)$ as $\mathbb{C}[[\mathfrak{h}]]_0\rtimes W'$-modules. Using the isomorphism of $\mathbb{C}[\mathfrak{h}]\rtimes W$-modules
$M\cong\mathbb{C}[\mathfrak{h}]\otimes V$ given in Lemma \ref{lem:MV}(1), we deduce an isomorphism of $\mathbb{C}[[\mathfrak{h}]]_0\rtimes W'$-modules
\begin{eqnarray*}
R(\widehat{M}_b)&\cong&\phi_1^\ast(x_{\pr}(\widehat{\mathbb{C}[\mathfrak{h}]}_b\otimes V))\nonumber\\
&\cong & \mathbb{C}[[\mathfrak{h}]]_0\otimes V.\label{completeiso}
\end{eqnarray*}
So the module $R(\widehat{M}_b)$ is free over $\mathbb{C}[[\mathfrak{h}]]_0$. The completion
of the module $N$ at $0$ is isomorphic to $R(\widehat{M}_b)$. By
Lemma \ref{lem:eufinite}(2) the module $N$ is free over $\mathbb{C}[\mathfrak{h}]$.
The claim is proved.
(2) Since $\Res_b(M)$ has a standard filtration, by Lemma
\ref{lem:MV} there exists a finite dimensional vector space
$V'\subset \Res_b(M)$ such that $V'$ is stable under the action of
$\mathbb{C} W'$ and we have an isomorphism of $\mathbb{C}[\overline{\mathfrak{h}}]\rtimes
W'$-modules
\begin{equation*}
\Res_b(M)\cong \mathbb{C}[\overline{\mathfrak{h}}]\otimes V'.
\end{equation*}
Moreover, we have $\omega'([\Res_b(M)])=[V']$ where $\omega'$ is the
map in Proposition \ref{Res}(2). The same proposition yields that
$\Res^W_{W'}(\omega[M])=\omega'([\Res_b(M)])$. Since
$\omega([M])=[V]$ by (\ref{eq:omegakgp}), the $\mathbb{C} W'$-module $V'$ is
isomorphic to $\Res^W_{W'}(V)$. So we have an isomorphism of
$\mathbb{C}[\overline{\mathfrak{h}}]\rtimes W'$-modules
$$\Res_b(M)\cong \mathbb{C}[\overline{\mathfrak{h}}]\otimes\Res^{W}_{W'}(V).$$
\end{proof}
\section{KZ commutes with restriction functors}\label{s:KZcommute}
In this section, we relate the restriction and induction functors
for rational DAHA's to the corresponding functors for Hecke algebras
via the functor $\KZ$. We will always assume that the Hecke algebras
have the same dimension as the corresponding group algebras. Thus
the Knizhnik-Zamolodchikov functors admit the properties recalled in
Section \ref{ss:KZ}.
\subsection{}\label{ss: thmiso}
Let $W$ be a complex reflection group acting on $\mathfrak{h}$. Let $b$ be a
point in $\mathfrak{h}$ and let $W'$ be its stabilizer in $W$. We will
abbreviate $\KZ=\KZ(W,\mathfrak{h})$, $\KZ'=\KZ(W',\overline{\mathfrak{h}})$.
\begin{thm}\label{iso}
There is an isomorphism of functors
\begin{equation*}
\KZ'\circ\Res_b\cong\Resh\circ\KZ.
\end{equation*}
\end{thm}
\begin{proof}
We will regard $\KZ: \mathcal{O}_c(W,\mathfrak{h})\rightarrow \mathscr{H}_q(W)\modu$ as a functor
from $\mathcal{O}_c(W,\mathfrak{h})$ to $B_W\modu$ in the obvious way. Similarly we
will regard $\KZ'$ as a functor to $B_{W'}\modu$. Recall the
inclusion $\imath: B_{W'}\mathfrak{h}ookrightarrow B_W$ from
(\ref{heckeres4}). The theorem amounts to prove that for any $M\in
\mathcal{O}_c(W,\mathfrak{h})$ there is a natural isomorphism of $B_{W'}$-modules
\begin{equation}\label{eq:thm}
\KZ'\circ\Res_b(M)\cong\imath_\ast\circ\KZ(M).\end{equation}
\emph{Step 1.} Recall the functor $\mathfrak{z}eta:
\mathcal{O}_{c'}(W',\mathfrak{h})\rightarrow\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$ from (\ref{zeta}) and its quasi-inverse $\mathfrak{z}eta^{-1}$ in (\ref{eq:zetainverse}).
Let $$N=\mathfrak{z}eta^{-1}(\Res_b(M)).$$ We have
$N\cong\Res_b(M)\otimes\mathbb{C}[\mathfrak{h}^{W'}].$ Since the canonical
epimorphism $\mathfrak{h}\rightarrow\overline{\mathfrak{h}}$ induces a fibration
$\mathfrak{h}'_{reg}\rightarrow\overline{\mathfrak{h}}_{reg}$, see Section \ref{ss:resHecke}, we have
\begin{equation}\label{h}
N_{\mathfrak{h}'_{reg}}\cong
\Res_b(M)_{\overline{\mathfrak{h}}_{reg}}\otimes\mathbb{C}[\mathfrak{h}^{W'}].\end{equation} By
Dunkl isomorphisms, the left hand side is a $\mathcal{D}(\mathfrak{h}'_{reg})\rtimes
W'$-module while the right hand side is a
$(\mathcal{D}(\overline{\mathfrak{h}}_{reg})\rtimes
W')\otimes\mathcal{D}(\mathfrak{h}^{W'})$-module. Identify these two algebras
in the obvious way. The isomorphism (\ref{h}) is compatible with the
$W'$-equivariant $\mathcal{D}$-module structures. Hence we have
$$(N_{\mathfrak{h}'_{reg}})^\nabla\cong(\Res_b(M)_{\overline{\mathfrak{h}}_{reg}})^\nabla\otimes
\mathbb{C}[\mathfrak{h}^{W'}]^\nabla.$$ Since $\mathbb{C}[\mathfrak{h}^{W'}]^\nabla=\mathbb{C}$, this yields a
natural isomorphism
\begin{equation*}
\ell_\ast\circ\KZ(W',\mathfrak{h})(N)\cong\KZ'\circ\Res_b(M),
\end{equation*}
where $\ell$ is the homomorphism defined in (\ref{heckeres1}).
\emph{Step 2.} Consider the $W'$-equivariant algebra isomorphism
$$\phi: \mathbb{C}[\mathfrak{h}]\rightarrow\mathbb{C}[\mathfrak{h}],\quad x\mapsto x+x(b)\text{ for } x\in\mathfrak{h}^\ast.$$
It induces an isomorphism
$\mathfrak{h}at{\phi}:\mathbb{C}[[\mathfrak{h}]]_b\overset{\sim}\rightarrow\mathbb{C}[[\mathfrak{h}]]_0$. The latter
yields an algebra isomorphism
$$\mathbb{C}[[\mathfrak{h}]]_b\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}_{reg}]\simeq
\mathbb{C}[[\mathfrak{h}]]_0\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}'_{reg}].$$ To see this note first
that by definition, the left hand side is $\mathbb{C}[[\mathfrak{h}]]_b[\alpha_{s}^{-1},
s\in\mathcal{S}]$. For $s\in\mathcal{S}$, $s\notin W'$ the element
$\alpha_{s}$ is invertible in $\mathbb{C}[[\mathfrak{h}]]_b$, so we have
$$\mathbb{C}[[\mathfrak{h}]]_b\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}_{reg}]=\mathbb{C}[[\mathfrak{h}]]_b[\alpha_{s}^{-1},
s\in\mathcal{S}\cap W'].$$ For $s\in \mathcal{S}\cap W'$ we have
$\alpha_{s}(b)=0$, so $\mathfrak{h}at{\phi}(\alpha_s)=\alpha_s$. Hence
\begin{eqnarray*}
\mathfrak{h}at{\phi}(\mathbb{C}[[\mathfrak{h}]]_b)[\mathfrak{h}at{\phi}(\alpha_{s})^{-1},
s\in\mathcal{S}\cap W']&=&\mathbb{C}[[\mathfrak{h}]]_0[\alpha_{s}^{-1},
s\in\mathcal{S}\cap
W']\\
&=&\mathbb{C}[[\mathfrak{h}]]_0\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}'_{reg}].
\end{eqnarray*}
\emph{Step 3.} We will assume in Steps 3, 4, 5 that $M$ is a module
in $\mathcal{O}^\Delta_c(W,\mathfrak{h})$. In this step we prove that $N$ is
isomorphic to $\phi^\ast(M)$ as $\mathbb{C}[\mathfrak{h}]\rtimes W'$-modules. Let
$V$ be a subspace of $M$ as in Lemma \ref{lem:MV}(1). So we have an
isomorphism of $\mathbb{C}[\mathfrak{h}]\rtimes W$-modules
\begin{equation}\label{inducefilt}
M\cong \mathbb{C}[\mathfrak{h}]\otimes V.
\end{equation}
Also, by Proposition \ref{standard}(2) there is an isomorphism of
$\mathbb{C}[\mathfrak{h}]\rtimes W'$-modules
\begin{eqnarray*}
N&\cong&\mathbb{C}[\mathfrak{h}]\otimes\Res^W_{W'}(V).
\end{eqnarray*}
So $N$ is isomorphic to $\phi^\ast(M)$ as $\mathbb{C}[\mathfrak{h}]\rtimes
W'$-modules.
\emph{Step 4.} In this step we compare
$(\widehat{(\phi^\ast(M))}_0)_{\mathfrak{h}'_{reg}}$ and
$(\widehat{N}_0)_{\mathfrak{h}'_{reg}}$ as
$\widehat{\mathcal{D}(\mathfrak{h}'_{reg})}_0$-modules. The definition of these
$\widehat{\mathcal{D}(\mathfrak{h}'_{reg})}_0$-module structures will be given below
in terms of connections. By (\ref{Resb}) we have $N=E\circ
R(\widehat{M}_b)$, so we have $\widehat{N}_0\cong
R(\widehat{M}_b).$ Next, by (\ref{eq:R(M)}) we have an isomorphism of $\mathbb{C}[[\mathfrak{h}]]_0\rtimes W'$-modules
\begin{eqnarray*}
R(\widehat{M}_b)&=&\mathfrak{h}at{\phi}^\ast (x_{\pr}(\widehat{M}_b))\\
&=&\widehat{(\phi^\ast(M))}_0.
\end{eqnarray*}
So we get an isomorphism of $\mathbb{C}[[\mathfrak{h}]]_0\rtimes W'$-modules
$$\mathfrak{h}at\Psi: \widehat{(\phi^\ast(M))}_0\rightarrow\widehat{N}_0.$$
Now, let us consider connections on these modules. Note that by Step $2$ we have
$$(\widehat{(\phi^\ast(M))}_0)_{\mathfrak{h}'_{reg}}
=\mathfrak{h}at{\phi}^\ast(x_{\pr}(\widehat{M}_b)_{\mathfrak{h}_{reg}}).$$
Write $\nabla$ for the connection on $M_{\mathfrak{h}_{reg}}$ given by the Dunkl isomorphism for $H_c(W,\mathfrak{h}_{reg})$. We equip
$(\widehat{(\phi^\ast(M))}_0)_{\mathfrak{h}'_{reg}}$ with the connection $\tilde{\nabla}$ given by
$$\tilde{\nabla}_a(x_{\pr}m)=x_{\pr}(\nabla_a(m)),\quad\forall\ m\in(\widehat{M}_b)_{\mathfrak{h}_{reg}},\ a\in\mathfrak{h}.$$ Let $\nabla^{(b)}$ be the connection on $N_{\mathfrak{h}'_{reg}}$ given by the Dunkl isomorphism for $H_{c'}(W',\mathfrak{h}'_{reg})$. This restricts to a connection on $(\widehat{N}_0)_{\mathfrak{h}'_{reg}}$. We claim that $\Psi$ is compatible with these connections, i.e., we have
\begin{equation}\label{but}
\nabla_a^{(b)}(x_{\pr} m)=x_{\pr}\nabla_a(m),\quad\forall\ m\in (\widehat{M}_b)_{\mathfrak{h}_{reg}}.
\end{equation} Recall the subspace $V$ of $M$ from Step 3. By
Lemma \ref{lem:MV}(1) the map
$$(\widehat{\mathbb{C}[\mathfrak{h}]}_b\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}_{reg}])\otimes V\rightarrow
(\widehat{M_b})_{\mathfrak{h}_{reg}},\quad p\otimes v\mapsto pv$$ is a
bijection. So it is enough to prove (\ref{but}) for $m=pv$ with
$p\in\widehat{\mathbb{C}[\mathfrak{h}]}_b\otimes_{\mathbb{C}[\mathfrak{h}]}\mathbb{C}[\mathfrak{h}_{reg}]$, $v\in V$. We have
\begin{eqnarray}\label{conncal}
\nabla^{(b)}_a(x_{\pr}p v)&=&(y^{(b)}_a-\sum_{s\in\mathcal{S}\cap
W'}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(a)}
{x_{\alpha_s}^{(b)}}(s-1))(x_{\pr}p v)\nonumber\\
&=&x_{\pr}(y_a+\sum_{s\in\mathcal{S},s\notin
W'}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(a)}{x_{\alpha_s}}-\nonumber\\
&&-\sum_{s\in\mathcal{S}\cap W'}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(a)}{x_{\alpha_s}}(s-1))(x_{\pr}p v)\nonumber\\
&=&x_{\pr}(\nabla_a+\sum_{s\in\mathcal{S},s\notin W'}\frac{2c_s}{1-\lambda_s}\frac{\alpha_s(a)}{x_{\alpha_s}}s)(x_{\pr}p v)\nonumber\\
&=&x_{\pr}\nabla_a(x_{\pr}p v) .
\end{eqnarray}
Here the first equality is by the Dunkl isomorphism for
$H_{c'}(W',\mathfrak{h}'_{reg})$. The second is by (\ref{xform}),
(\ref{wform}), (\ref{yform}) and the fact that $x_{\pr}^2=x_{\pr}$.
The third is by the Dunkl isomorphism for $H_{c}(W,\mathfrak{h}_{reg})$. The last is by (\ref{killwform}). Next, since $x_{\pr}$ is the idempotent in $\widehat{\mathbb{C}[\mathfrak{h}]}_b$ corresponding to the component $\mathbb{C}[[\mathfrak{h}]]_b$ in the decomposition (\ref{eq:varpi}), we have
\begin{eqnarray*}
\nabla_a(x_{\pr}p
v)&=&(\partial_a(x_{\pr}p))v+x_{\pr}p\,(\nabla_av)\\
&=&x_{\pr}(\partial_a(p))v+x_{\pr}p\,(\nabla_av)\\
&=&x_{\pr}\nabla_a(p v).
\end{eqnarray*}
Together with (\ref{conncal}) this implies that
$$\nabla^{(b)}_a(x_{\pr}p v)=x_{\pr}\nabla_a(p v).$$
So (\ref{but}) is proved.
\emph{Step 5.} In this step we prove isomorphism (\ref{eq:thm}) for
$M\in\mathcal{O}^\Delta_c(W,\mathfrak{h})$. Here we need some more notation. For
$X=\mathfrak{h}$ or $\mathfrak{h}'_{reg}$, let $U$ be an open analytic subvariety of $X$, write
$i:U\mathfrak{h}ookrightarrow X$ for the canonical embedding. For $F$ an analytic coherent sheaf
on $X$ we write $i^\ast (F)$ for the restriction of $F$ to $U$. If
$U$ contains $0$, for an analytic locally free sheaf $E$ over $U$,
we write $\widehat{E}$ for the restriction of $E$ to the formal disc
at $0$.
Let $\Omega\subset \mathfrak{h}$ be the open ball defined in (\ref{eq:omega}).
Let $f:\mathfrak{h}\rightarrow\mathfrak{h}$ be the morphism defined by $\phi$. It maps
$\Omega$ to an open ball $\Omega_0$ centered at $0$. We have
$$f(\Omega\cap\mathfrak{h}_{reg})=\Omega_0\cap\mathfrak{h}'_{reg}.$$
Let $u:\Omega_0\cap\mathfrak{h}'_{reg}\mathfrak{h}ookrightarrow\mathfrak{h}$ and $v:
\Omega\cap\mathfrak{h}_{reg}\mathfrak{h}ookrightarrow\mathfrak{h}$ be the canonical embeddings. By
Step 3 there is an isomorphism of $W'$-equivariant analytic locally
free sheaves over $\Omega_0\cap\mathfrak{h}'_{reg}$
$$u^\ast (N^{an})\cong
\phi^\ast(v^\ast (M^{an})).$$ By Step 4 there is an isomorphism
$$\widehat{u^\ast (N^{an})}\overset{\sim}\ra\widehat{\phi^\ast(v^\ast
(M^{an}))}$$ which is compatible with their connections. It follows
from Lemma \ref{monodromie} below that there is an isomorphism
$$(u^\ast (N^{an}))^{\nabla^{(b)}}
\cong \phi^\ast((v^\ast (M^{an}))^{\nabla}).$$ Since
$\Omega_0\cap\mathfrak{h}'_{reg}$ is homotopy equivalent to $\mathfrak{h}'_{reg}$ via
$u$, the left hand side is isomorphic to $(N_{\mathfrak{h}'_{reg}})^{\nabla^{(b)}}$. So
we have
\begin{equation*}
\mathds{k}ppa_\ast\circ\jmath_\ast\circ\KZ(M)\cong\KZ(W',\mathfrak{h})(N),
\end{equation*}
where $\mathds{k}ppa$, $\jmath$ are as in (\ref{heckeres2}), (\ref{heckeres3}). Combined with Step 1 we have the following isomorphisms
\begin{eqnarray}\label{i}
\KZ'\circ\Res_b(M)&\cong&\ell_\ast\circ\KZ(W',\mathfrak{h})(N)\nonumber\\
&\cong&\ell_\ast\circ\mathds{k}ppa_\ast\circ\jmath_\ast\circ\KZ(M)\\
&=&\imath_\ast\circ\KZ(M).\nonumber
\end{eqnarray}
They are functorial on $M$.
\begin{lemme}\label{monodromie}
Let $E$ be an analytic locally free sheaf over the complex manifold
$\mathfrak{h}'_{reg}$. Let $\nabla_1$, $\nabla_2$ be two integrable
connections on $E$ with regular singularities. If there exists an
isomorphism $\mathfrak{h}at{\psi}:(\widehat{E},\nabla_1)\rightarrow
(\widehat{E},\nabla_2)$, then the local systems $E^{\nabla_1}$ and
$E^{\nabla_2}$ are isomorphic.
\end{lemme}
\begin{proof}
Write $\End(E)$ for the sheaf of endomorphisms of $E$. Then
$\End(E)$ is a locally free sheaf over $\mathfrak{h}'_{reg}$. The connections
$\nabla_1$, $\nabla_2$ define a connection $\nabla$ on $\End(E)$ as follows,
$$\nabla: \End(E)\rightarrow\End(E),\quad f\mapsto \nabla_2\circ f-f\circ\nabla_1.$$
So the isomorphism $\mathfrak{h}at{\psi}$ is a horizontal section of
$(\widehat{\End(E)},\nabla)$. Let $(\End(E)^\nabla)_0$ be the set of
germs of horizontal sections of $(\End(E),\nabla)$ on zero. By the
Comparison theorem \cite[Theorem 6.3.1]{KK} the canonical map
$(\End(E)^\nabla)_0\rightarrow (\widehat{\End(E)})^\nabla$ is bijective.
Hence there exists a holomorphic isomorphism $\psi: (E,\nabla_1)\rightarrow
(E,\nabla_2)$ which maps to $\mathfrak{h}at{\psi}$. Now, let $U$ be an open
ball in $\mathfrak{h}'_{reg}$ centered at $0$ with radius $\varepsilon$ small
enough such that the holomorphic isomorphism $\psi$ converges in
$U$. Write $E_U$ for the restriction of $E$ to $U$. Then $\psi$
induces an isomorphism of local systems $(E_U)^{\nabla_1}\cong
(E_U)^{\nabla_2}$. Since $\mathfrak{h}'_{reg}$ is homotopy equivalent to $U$,
we have $$E^{\nabla_1}\cong E^{\nabla_2}.$$
\end{proof}
\emph{Step 6.} Finally, write $I$ for the inclusion of
$\Proj_c(W,\mathfrak{h})$ into $\mathcal{O}_c(W,\mathfrak{h})$. By Lemma \ref{standfilt}(1)
any projective object in $\mathcal{O}_c(W,\mathfrak{h})$ has a standard filtration,
so (\ref{i}) yields an isomorphism of functors
$$\KZ'\circ\Res_b\circ I\rightarrow \imath_\ast\circ\KZ\circ I.$$ Applying Lemma \ref{projiso} to the exact functors $\KZ'\circ\Res_b$
and $\imath_\ast\circ\KZ$ yields that there is an isomorphism of
functors $$\KZ'\circ\Res_b\cong\imath_\ast\circ\KZ.$$
\end{proof}
\subsection{}\label{ss:coriso}
We give some corollaries of Theorem \ref{iso}.
\begin{cor}\label{indiso}
There is an isomorphism of functors
\begin{equation*}
\KZ\circ\Ind_b\cong\coIndh\circ\KZ'.
\end{equation*}
\end{cor}
\begin{proof}
To simplify notation let us write $$\mathcal{O}=\mathcal{O}_c(W,\mathfrak{h}),
\quad\mathcal{O}'=\mathcal{O}_{c'}(W',\overline{\mathfrak{h}}), \quad\mathscr{H}=\mathscr{H}_q(W),
\quad\mathscr{H}'=\mathscr{H}_{q'}(W').$$ Recall that the functor $\KZ$ is
represented by a projective object $P_{\KZ}$ in $\mathcal{O}$. So for any $N\in
\mathcal{O}'$ we have a morphism of $\mathscr{H}$-modules
\begin{eqnarray}
\KZ\circ\Ind_b(N)&\cong&\Hom_{\mathcal{O}}(P_{\KZ},\Ind_b(N))\nonumber\\
&\cong&\Hom_{\mathcal{O}'}(\Res_b(P_{\KZ}),N)\nonumber\\
\quad&\rightarrow&\Hom_{\mathscr{H}'}(\KZ'(\Res_b (P_{\KZ})), \KZ'(N)).\label{a}
\end{eqnarray}
By Theorem \ref{iso} we have
$$\KZ'\circ\Res_b(P_{\KZ})\cong
\sideset{^{\scriptscriptstyle\mathscr{H}}}{^W_{W'}}\Res\circ\KZ(P_{\KZ}).$$ Recall from Section \ref{ss:KZ} that the $\mathscr{H}$-module
$\KZ(P_{\KZ})$ is isomorphic to $\mathscr{H}$. So as $\mathscr{H}'$-modules $\KZ'(\Res_b(P_{\KZ}))$ is also isomorphic to $\mathscr{H}$. Therefore the morphism (\ref{a})
rewrites as
\begin{equation}\label{b}
\chi(N):\KZ\circ\Ind_b(N)\rightarrow\Hom_{\mathscr{H}'}(\mathscr{H},\KZ'(N)).
\end{equation}
It yields a morphism of functors
$$\chi: \KZ\circ\Ind_b\rightarrow \coIndh\circ
\KZ'.$$ Note that if $N$ is a projective object in $\mathcal{O}'$, then
$\chi(N)$ is an isomorphism by Proposition \ref{KZ}(1). So Lemma \ref{projiso} implies that $\chi$ is an isomorphism of
functors, because
both functors $\KZ\circ\Ind_b$ and $\coIndh\circ \KZ'$ are exact.
\end{proof}
\subsection{}
The following lemma will be useful to us.
\begin{lemme}\label{fullyfaithful}
Let $K$, $L$ be two right exact functors from $\mathcal{O}_1$ to $\mathcal{O}_2$,
where $\mathcal{O}_1$ and $\mathcal{O}_2$ can be either $\mathcal{O}_c(W,\mathfrak{h})$ or
$\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$. Suppose that $K$, $L$ map projective objects to projective ones. Then the vector space homomorphism
\begin{equation}\label{y}
\Hom(K,L)\rightarrow\Hom(\KZ_2\circ K, \KZ_2\circ L),\quad f\mapsto
1_{\KZ_2}f,
\end{equation}
is an isomorphism.
\end{lemme}
Notice that if $K=L$, this is even an isomorphism of rings.
\begin{proof}
Let $\Proj_1$, $\Proj_2$ be respectively the subcategory of projective objects in $\mathcal{O}_1$, $\mathcal{O}_2$. Write $\tilde{K}$, $\tilde{L}$ for the functors from $\Proj_1$ to $\Proj_2$ given by the restrictions of $K$, $L$, respectively.
Let $\mathscr{H}_2$ be the Hecke algebra corresponding to $\mathcal{O}_2$.
Since the functor $\KZ_2$ is fully faithful over $\Proj_2$ by Proposition \ref{KZ}(1), the following
functor
$$\Fct(\Proj_1,\Proj_2)\rightarrow\Fct(\Proj_1,
\mathscr{H}_2\modu)\,,\quad G\mapsto \KZ_2\circ G$$ is also fully
faithful. This yields an isomorphism
$$\Hom(\tilde{K},\tilde{L})\overset{\sim}\ra\Hom(\KZ_2\circ\tilde{K},\KZ_2\circ\tilde{L}),\quad f\mapsto 1_{\KZ_2}f.$$
Next, by Lemma \ref{projiso} the canonical morphisms
$$\Hom(K,L)\rightarrow\Hom(\tilde{K}, \tilde{L)},\quad\Hom(\KZ_2\circ
K,\KZ_2\circ L)\rightarrow \Hom(\KZ_2\circ\tilde{K},\KZ_2\circ\tilde{L})$$
are isomorphisms. So the map (\ref{y}) is also an isomorphism.
\end{proof}
Let $b(W,W'')$ be a point in $\mathfrak{h}$ whose stabilizer is $W''$. Let
$b(W',W'')$ be its image in $\overline{\mathfrak{h}}=\mathfrak{h}/\mathfrak{h}^{W'}$ via the
canonical projection. Write $b(W,W')=b$.
\begin{cor}\label{corcom}
There are isomorphisms of functors
\begin{eqnarray*}
\Res_{b(W',W'')}\circ\Res_{b(W,W')}&\cong&\Res_{b(W,W'')},\\
\Ind_{b(W,W')}\circ\Ind_{b(W',W'')}&\cong&\Ind_{b(W,W'')}.
\end{eqnarray*}
\end{cor}
\begin{proof}
Since the restriction functors map projective objects to projective ones by Proposition \ref{Res}(1), Lemma
\ref{fullyfaithful} applied to the categories
$\mathcal{O}_1=\mathcal{O}_c(W,\mathfrak{h})$, $\mathcal{O}_2=\mathcal{O}_{c''}(W'',\mathfrak{h}/\mathfrak{h}^{W''})$ yields an
isomorphism
\begin{eqnarray*}
&&\Hom(\Res_{b(W',W'')}\circ\Res_{b(W,W')},\Res_{b(W,W'')})\\
&&\cong\Hom(\KZ''\circ\Res_{b(W',W'')}\circ\Res_{b(W,W')},\KZ''\circ\Res_{b(W,W'')}).
\end{eqnarray*}
By Theorem \ref{iso} the set on the second row is
\begin{equation}\label{d}
\Hom(\sideset{^{\scriptscriptstyle\mathscr{H}}}{^{W'}_{W''}}\Res\circ\Resh\circ\KZ,
\sideset{^{\scriptscriptstyle\mathscr{H}}}{^W_{W''}}\Res\circ\KZ). \end{equation}
By the presentations of Hecke algebras in \cite[Proposition
4.22]{BMR}, there is an isomorphism
$$\sigma:\sideset{^{\scriptscriptstyle\mathscr{H}}}{^{W'}_{W''}}\Res\circ\Resh\overset{\sim}\ra\sideset{^{\scriptscriptstyle\mathscr{H}}}{^{W}_{W''}}\Res.$$
Hence the element $\sigma 1_{\KZ}$ in the set (\ref{d}) maps to an
isomorphism
$$\Res_{b(W',W'')}\circ\Res_{b(W,W')}\cong\Res_{b(W,W'')}.$$
This proves the first isomorphism in the corollary. The second one
follows from the uniqueness of right adjoint functor.
\end{proof}
\subsection{Biadjointness of $\Res_b$ and $\Ind_b$.}\label{ss:biadjoint}
Recall that a finite dimensional $\mathbb{C}$-algebra $A$ is
symmetric if $A$ is isomorphic to $A^\ast=\Hom_{\mathbb{C}}(A,\mathbb{C})$
as $(A,A)$-bimodules.
\begin{lemme}\label{heckeind}
Assume that $\mathscr{H}_{q}(W)$ and $\mathscr{H}_{q'}(W')$ are symmetric
algebras. Then the functors $\Indh$ and $\coIndh$ are isomorphic,
i.e., the functor $\Indh$ is biadjoint to $\Resh$.
\end{lemme}
\begin{proof}
We abbreviate $\mathscr{H}=\mathscr{H}_{q}(W)$ and $\mathscr{H}'=\mathscr{H}_{q'}(W')$. Since $\mathscr{H}$ is free as a left $\mathscr{H}'$-module,
for any $\mathscr{H}'$-module $M$ the map
\begin{equation}\label{eq:proj}
\Hom_{\mathscr{H}'}(\mathscr{H},\mathscr{H}')\otimes_{\mathscr{H}'}M\rightarrow
\Hom_{\mathscr{H}'}(\mathscr{H},M)
\end{equation} given by multiplication is an
isomorphism of $\mathscr{H}$-modules. By assumption $\mathscr{H}'$ is isomorphic to $(\mathscr{H}')^\ast$ as $(\mathscr{H}',\mathscr{H}')$-bimodules.
Thus we have the following $(\mathscr{H},\mathscr{H}')$-bimodule isomorphisms
\begin{eqnarray*}
\Hom_{\mathscr{H}'}(\mathscr{H},\mathscr{H}')&\cong&\Hom_{\mathscr{H}'}(\mathscr{H},(\mathscr{H}')^\ast)\\
&\cong&\Hom_{\mathbb{C}}(\mathscr{H}'\otimes_{\mathscr{H}'}\mathscr{H},\mathbb{C})\\
&\cong&\mathscr{H}^\ast\\
&\cong&\mathscr{H}.
\end{eqnarray*}
The last isomorphism follows from the fact the $\mathscr{H}$ is
symmetric. Thus, by (\ref{eq:proj}) the functors $\Indh$ and $\coIndh$ are isomorphic.
\end{proof}
\begin{rmq}\label{rmq:symmetric}
It is proved that $\mathscr{H}_{q}(W)$ is a symmetric algebra for all
irreducible complex reflection group $W$ except for some of the 34
exceptional groups in the Shephard-Todd classification. See
\cite[Section 2A]{BMM} for details.
\end{rmq}
The biadjointness of $\Res_b$ and $\Ind_b$ was conjectured in
\cite[Remark 3.18]{BE} and was announced by I. Gordon and M. Martino. We give a proof in Proposition \ref{leftadjunction} since it seems not yet to be available in the literature. Let us first consider the following lemma.
\begin{lemme}\label{lem:adjunction}
(1) Let $A$, $B$ be noetherian algebras and $T$ be a functor $$T:A\modu\rightarrow B\modu.$$ If $T$ is right exact and commutes with direct sums, then it has a right adjoint.
(2) The functor $$\Res_b:\mathcal{O}_{c}(W,\mathfrak{h})\rightarrow\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$$
has a left adjoint.
\end{lemme}
\begin{proof}
(1) Consider the $(B,A)$-bimodule $M=T(A)$. We claim that the functor $T$ is isomorphic to the functor $M\otimes_A-$. Indeed, by definition we have $T(A)\cong M\otimes_AA$ as $B$ modules. Now, for any $N\in A\modu$, since $N$ is finitely generated and $A$ is noetherian there exists $m$, $n\in\mathbb{N}$ and an exact sequence
$$A^{\oplus n}\longrightarrow A^{\oplus m}\longrightarrow N\longrightarrow 0.$$
Since both $T$ and $M\otimes_A-$ are right exact and they commute with direct sums, the fact that $T(A)\cong M\otimes_AA$ implies that $T(N)\cong M\otimes_AN$ as $B$-modules. This proved the claim. Now, the functor $M\otimes_A-$ has a right adjoint $\Hom_B(M,-)$, so $T$ also has a right adjoint.
(2) Recall that for any complex reflection group $W$, a contravariant duality functor
$$(-)\spcheck:\mathcal{O}_{c}(W,\mathfrak{h})\rightarrow\mathcal{O}_{c^\dag}(W,\mathfrak{h}^\ast)$$
was defined in \cite[Section 4.2]{GGOR}, here $c^\dag:\mathcal{S}\rightarrow\mathbb{C}$ is another parameter explicitly determined by $c$. Consider the functor $$\Res_b\spcheck=(-)\spcheck\circ\Res_b\circ(-)\spcheck: \mathcal{O}_{c^\dag}(W,\mathfrak{h}^\ast)\rightarrow \mathcal{O}_{{c'}^\dag}(W',(\overline{\mathfrak{h}})^\ast).$$
The category $\mathcal{O}_{c^\dag}(W,\mathfrak{h}^\ast)$ has a projective generator $P$. The algebra $\End_{\mathcal{O}_{c^\dag}(W,\mathfrak{h}^\ast)}(P)^{\op}$ is finite dimensional over $\mathbb{C}$ and by Morita theory we have an equivalence of categories $$\mathcal{O}_{c^\dag}(W,\mathfrak{h}^\ast)\cong
\End_{\mathcal{O}_{c^\dag}(W,\mathfrak{h}^\ast)}(P)^{\op}\modu.$$ Since the functor $\Res_b\spcheck$ is exact and obviously commutes with direct sums, by part (1) it has a right adjoint $\Psi$. Then it follows that $(-)\spcheck\circ\Psi\circ(-)\spcheck$ is left adjoint to $\Res_b$. The lemma is proved.
\end{proof}
\begin{prop}\label{leftadjunction}
Under the assumption of Lemma \ref{heckeind}, the functor
$\Ind_b$ is left adjoint to $\Res_b$.
\end{prop}
\begin{proof}
\emph{Step 1.} We abbreviate $\mathcal{O}=\mathcal{O}_c(W,\mathfrak{h})$, $\mathcal{O}'=\mathcal{O}_{c'}(W',\overline{\mathfrak{h}})$, $\mathscr{H}=\mathscr{H}_q(W)$, $\mathscr{H}'=\mathscr{H}_{q'}(W')$, and write $\Id_{\mathcal{O}}$, $\Id_{\mathcal{O}'}$,
$\Id_{\mathscr{H}}$, $\Id_{\mathscr{H}'}$ for the identity functor on the corresponding categories. We also abbreviate $E^{\scriptscriptstyle\mathscr{H}}=\Resh$,
$F^{\scriptscriptstyle\mathscr{H}}=\Indh$ and $E=\Res_b$. By Lemma \ref{lem:adjunction} the functor $E$ has a left adjoint. We denote it by
$F:\mathcal{O}'\rightarrow\mathcal{O}$. Recall the functors $$\KZ:\mathcal{O}\rightarrow\mathscr{H}\modu,\quad \KZ':\mathcal{O}'\rightarrow\mathscr{H}'\modu.$$
The goal of this step is to show that there exists an isomorphism of functors
\begin{equation*}
\KZ\circ F\cong F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'.
\end{equation*}
To this end, let $S$, $S'$ be respectively the right adjoints of $\KZ$, $\KZ'$, see Section \ref{ss:KZ}. We will first give an isomorphism of functors $$F^{\scriptscriptstyle\mathscr{H}}\cong\KZ\circ F\circ S'.$$ Let $M\in\mathscr{H}'\modu$ and $N\in\mathscr{H}\modu$. Consider the following equalities given by adjunctions
\begin{eqnarray*}
\Hom_{\mathscr{H}}(\KZ\circ F\circ S'(M), N)&=&\Hom_{\mathcal{O}}(F\circ S'(M), S(N))\\
&=&\Hom_{\mathcal{O}'}(S'(M), E\circ S(N)).
\end{eqnarray*}
The functor $\KZ'$ yields a map
\begin{equation}\label{eq:mapKZ'}
a(M,N): \Hom_{\mathcal{O}'}(S'(M), E\circ S(N))\rightarrow\Hom_{\mathscr{H}'}(\KZ'\circ S'(M), \KZ'\circ E\circ S(N)).
\end{equation}
Since the canonical adjunction maps $\KZ'\circ S'\rightarrow \Id_{\mathscr{H}'}$, $\KZ\circ S\rightarrow \Id_{\mathscr{H}}$ are isomorphisms (see Section \ref{ss:KZ}) and since we have an isomorphism of functors $\KZ'\circ E\cong E^{\scriptscriptstyle\mathscr{H}}\circ \KZ$ by Theorem \ref{iso}, we get the following equalities
\begin{eqnarray*}
\Hom_{\mathscr{H}'}(\KZ'\circ S'(M), \KZ'\circ E\circ S(N))&=&\Hom_{\mathscr{H}'}(M, E^{\scriptscriptstyle\mathscr{H}}\circ \KZ\circ S(N))\\
&=&\Hom_{\mathscr{H}'}(M, E^{\scriptscriptstyle\mathscr{H}}(N))\\
&=&\Hom_{\mathscr{H}}(F^{\scriptscriptstyle\mathscr{H}}(M), N).
\end{eqnarray*}
In the last equality we used that $F^{\scriptscriptstyle\mathscr{H}}$ is left adjoint to $E^{\scriptscriptstyle\mathscr{H}}$. So the map (\ref{eq:mapKZ'}) can be rewritten into the following form
\begin{equation*}
a(M,N): \Hom_{\mathscr{H}}(\KZ\circ F\circ S'(M), N)\rightarrow\Hom_{\mathscr{H}}(F^{\scriptscriptstyle\mathscr{H}}(M), N).
\end{equation*}
Now, take $N=\mathscr{H}$. Recall that $\mathscr{H}$ is isomorphic to $\KZ(P_{\KZ})$ as $\mathscr{H}$-modules. Since $P_{\KZ}$ is projective, by Proposition \ref{KZ}(2) we have a canonical isomorphism in $\mathcal{O}$ $$P_{\KZ}\cong S(\KZ(P_{\KZ}))=S(\mathscr{H}).$$ Further $E$ maps projectives to projectives by Proposition \ref{Res}(1), so $E\circ S(\mathscr{H})$ is also projective. Hence Proposition \ref{KZ}(1) implies that in this case (\ref{eq:mapKZ'}) is an isomorphism for any $M$, i.e., we get an isomorphism
\begin{equation*}
a(M,\mathscr{H}): \Hom_{\mathscr{H}}(\KZ\circ F\circ S'(M), \mathscr{H})\overset{\sim}\ra\Hom_{\mathscr{H}}(F^{\scriptscriptstyle\mathscr{H}}(M), \mathscr{H}).
\end{equation*}
Further this is an isomorphism of right $\mathscr{H}$-modules with respect to the $\mathscr{H}$-actions induced by the right action of $\mathscr{H}$ on itself. Now, the fact that $\mathscr{H}$ is a symmetric algebra yields that for any finite dimensional $\mathscr{H}$-module $N$ we have isomorphisms of right $\mathscr{H}$-modules
\begin{eqnarray*}
\Hom_{\mathscr{H}}(N,\mathscr{H})&\cong&\Hom_{\mathscr{H}}(N,\Hom_{\mathbb{C}}(\mathscr{H},\mathbb{C}))\\
&\cong&\Hom_\mathbb{C}(N,\mathbb{C}).
\end{eqnarray*}
Therefore $a(M,\mathscr{H})$ yields an isomorphism of right $\mathscr{H}$-modules
$$\Hom_{\mathbb{C}}(\KZ\circ F\circ S'(M), \mathbb{C})\rightarrow\Hom_{\mathbb{C}}(F^{\scriptscriptstyle\mathscr{H}}(M), \mathbb{C}).$$
We deduce a natural isomorphism of left $\mathscr{H}$-modules
$$\KZ\circ F\circ S'(M)\cong F^{\scriptscriptstyle\mathscr{H}}(M)$$
for any $\mathscr{H}'$-module $M$. This gives an isomorphism of functors $$\psi:\KZ\circ F\circ S'\overset{\sim}\ra F^{\scriptscriptstyle\mathscr{H}}.$$
Finally, consider the canonical adjunction map $\eta:\Id_{\mathcal{O}'}\rightarrow S'\circ\KZ'$. We have a morphism of functors
$$\phi=(1_{\KZ\circ F}\eta)\circ(\psi 1_{\KZ'}):\KZ\circ F\rightarrow F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'.$$
Note that $\psi 1_{\KZ'}$ is an isomorphism of functors. If $Q$ is a projective object in $\mathcal{O}'$, then by Proposition \ref{KZ}(2) the morphism $\eta(Q): Q\rightarrow S'\circ\KZ'(Q)$ is also an isomorphism, so $\phi(Q)$ is an isomorphism. This implies that $\phi$ is an isomorphism of functors by Lemma \ref{projiso}, because both $\KZ\circ F$ and $F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'$ are right exact functors. Here the right exactness of $F$ follows from that it is left adjoint to $E$. So we get the desired isomorphism of functors
$$\KZ\circ F\cong F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'.$$
\emph{Step 2.} Let us now prove that $F$ is right adjoint to $E$. By uniqueness of adjoint functors, this will imply that $F$ is isomorphic to $\Ind_b$. First, by Lemma \ref{heckeind} the functor $F^{\scriptscriptstyle\mathscr{H}}$ is isomorphic to
$\coIndh$. So $F^{\scriptscriptstyle\mathscr{H}}$ is right
adjoint to $E^{\scriptscriptstyle\mathscr{H}}$, i.e., we have morphisms of functors
$$\varepsilon^{\scriptscriptstyle\mathscr{H}}: E^{\scriptscriptstyle\mathscr{H}}\circ F^{\scriptscriptstyle\mathscr{H}}\rightarrow\Id_{\mathscr{H}'},\quad
\eta^{\scriptscriptstyle\mathscr{H}}: \Id_{\mathscr{H}}\rightarrow F^{\scriptscriptstyle\mathscr{H}}\circ E^{\scriptscriptstyle\mathscr{H}}$$ such that
$$(\varepsilon^{\scriptscriptstyle\mathscr{H}}
1_{E^{\scriptscriptstyle\mathscr{H}}})\circ(1_{E^{\scriptscriptstyle\mathscr{H}}}\eta^{\scriptscriptstyle\mathscr{H}})=1_{E^{\scriptscriptstyle\mathscr{H}}},\quad
(1_{F^{\scriptscriptstyle\mathscr{H}}}\varepsilon^{\scriptscriptstyle\mathscr{H}}
)\circ(\eta^{\scriptscriptstyle\mathscr{H}}1_{F^{\scriptscriptstyle\mathscr{H}}})=1_{F^{\scriptscriptstyle\mathscr{H}}}.$$ Next, both $F$ and $E$ have exact right adjoints, given respectively by $E$ and $\Ind_b$. Therefore $F$ and $E$ map projective objects to projective ones. Applying
Lemma \ref{fullyfaithful} to
$\mathcal{O}_1=\mathcal{O}_2=\mathcal{O}'$, $K=E\circ F$, $L=\Id_{\mathcal{O}'}$ yields that the following map is bijective
\begin{equation}\label{eq:isoFE}
\Hom(E\circ F,\Id_{\mathcal{O}'})\rightarrow\Hom(\KZ'\circ E\circ
F,\KZ'\circ\Id_{\mathcal{O}}),\quad f\mapsto 1_{\KZ'}f. \end{equation} By Theorem \ref{iso} and Step $1$ there
exist isomorphisms of functors $$\phi_E:
E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\overset{\sim}\ra\KZ'\circ E,\quad \phi_F:
F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'\overset{\sim}\ra\KZ\circ F.$$ Let
\begin{eqnarray*}
\phi_{EF}=(\phi_E 1_F)\circ(1_{E^{\scriptscriptstyle\mathscr{H}}}\phi_F):
E^{\scriptscriptstyle\mathscr{H}}\circ F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'\overset{\sim}\ra\KZ'\circ E\circ F,\\
\phi_{FE}=(\phi_F 1_E)\circ(1_{F^{\scriptscriptstyle\mathscr{H}}}\phi_E):F^{\scriptscriptstyle\mathscr{H}}\circ
E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\overset{\sim}\ra\KZ\circ F\circ E.
\end{eqnarray*} Identify
$$\KZ\circ\Id_{\mathcal{O}}=\Id_{\mathscr{H}}\circ\KZ,\quad
\KZ'\circ\Id_{\mathcal{O}'}=\Id_{\mathscr{H}'}\circ\KZ'.$$ We have a bijective
map
$$\Hom(\KZ'\circ E\circ F,\KZ'\circ\Id_{\mathcal{O}'})\overset{\sim}\ra \Hom(E^{\scriptscriptstyle\mathscr{H}}\circ
F^{\scriptscriptstyle\mathscr{H}}\circ\KZ',\Id_{\mathscr{H}'}\circ\KZ'),\quad g\mapsto
g\circ\phi_{EF}.$$ Together with (\ref{eq:isoFE}), it implies that
there exists a unique morphism $\varepsilon: E\circ F\rightarrow\Id_{\mathcal{O}'}$ such that
$$(1_{\KZ'}\varepsilon)\circ\phi_{EF}=\varepsilon^{\scriptscriptstyle\mathscr{H}}1_{\KZ'}.$$ Similarly,
there exists a unique morphism $\eta: \Id_{\mathcal{O}}\rightarrow F\circ E$ such that
$$(\phi_{FE})^{-1}\circ(1_{\KZ}\eta)=\eta^{\scriptscriptstyle\mathscr{H}}1_{\KZ}.$$
Now, we have the following commutative diagram
$$\xymatrix{E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\ar@{=}[r]\ar[d]_{1_{E^{\scriptscriptstyle\mathscr{H}}}
\eta^{\scriptscriptstyle\mathscr{H}}1_{\KZ}}&E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\ar[r]^{\phi_E}
\ar[d]^{1_{E^{\scriptscriptstyle\mathscr{H}}}1_{\KZ}\eta}
&\KZ'\circ E\ar[d]^{1_{\KZ'}1_E\eta}\\
E^{\scriptscriptstyle\mathscr{H}}\circ F^{\scriptscriptstyle\mathscr{H}}\circ
E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\quad\ar[r]^{\,1_{E^{\scriptscriptstyle\mathscr{H}}}\phi_{FE}}\ar@{=}[d]
&\quad E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\circ F\circ E\quad\ar[r]^{\,\phi_E1_F1_E}
&\quad\KZ'\circ
E\circ F\circ E\ar@{=}[d]\\
E^{\scriptscriptstyle\mathscr{H}}\circ F^{\scriptscriptstyle\mathscr{H}}\circ
E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\quad\ar[r]^{\,1_{E^{\scriptscriptstyle\mathscr{H}}}1_{F^{\scriptscriptstyle\mathscr{H}}}\phi_E}
\ar[d]_{\varepsilon^{\scriptscriptstyle\mathscr{H}}1_{E^{\scriptscriptstyle\mathscr{H}}}1_{\KZ}}
&\quad E^{\scriptscriptstyle\mathscr{H}}\circ F^{\scriptscriptstyle\mathscr{H}}\circ\KZ'\circ
E\quad\ar[u]_{1_{E^{\scriptscriptstyle\mathscr{H}}}\phi_F1_E}
\ar[r]^{\,\phi_{EF}1_E}\ar[d]^{\varepsilon^{\scriptscriptstyle\mathscr{H}}1_{\KZ'}1_E}
&\quad\KZ'\circ E\circ F\circ E\ar[d]^{1_{\KZ'}\varepsilon 1_E}\\
E^{\scriptscriptstyle\mathscr{H}}\circ\KZ\ar[r]^{\phi_E} &\KZ'\circ E\ar@{=}[r]&\KZ'\circ
E.}$$ It yields that
$$(1_{\KZ'}\varepsilon 1_E)\circ(1_{\KZ'}1_E\eta)=
\phi_E\circ(\varepsilon^{\scriptscriptstyle\mathscr{H}}1_{E^{\scriptscriptstyle\mathscr{H}}}1_{\KZ})\circ(1_{E^{\scriptscriptstyle\mathscr{H}}}
\eta^{\scriptscriptstyle\mathscr{H}}1_{\KZ})\circ(\phi_E)^{-1}.$$
We deduce that
\begin{eqnarray}
1_{\KZ'}((\varepsilon 1_E)\circ(1_E\eta))&=&\phi_E\circ(1_{E^{\scriptscriptstyle\mathscr{H}}}1_{\KZ})
\circ(\phi_E)^{-1}\nonumber\\
&=&1_{\KZ'}1_E.\label{eq:unit}
\end{eqnarray}
By applying Lemma \ref{fullyfaithful} to
$\mathcal{O}_1=\mathcal{O}$, $\mathcal{O}_2=\mathcal{O}'$,
$K=L=E$, we deduce that the following map is bijective
$$\End(E)\rightarrow\End(\KZ'\circ E),\quad f\mapsto1_{\KZ'}f.$$
Hence (\ref{eq:unit}) implies that $$(\varepsilon 1_E)\circ(1_E\eta)=1_E.$$
Similarly, we have $(1_F\varepsilon)\circ (\eta 1_F)=1_F$. So $E$ is left adjoint to $F$. By uniqueness of adjoint functors this implies that $F$ is isomorphic to $\Ind_b$. Therefore $\Ind_b$ is biadjoint to $\Res_b$.
\end{proof}
\section{Reminders on the Cyclotomic case.}\label{s:cyclotomiccase}
From now on we will concentrate on the cyclotomic rational DAHA's.
We fix some notation in this section.
\subsection{}\label{ss:cyclot1}
Let $l,n$ be positive integers. Write $\varepsilon=\exp(\frac{2\pi
\sqrt{-1}}{l})$. Let $\mathfrak{h}=\mathbb{C}^n$, write $\{y_1,\ldots,y_n\}$ for its
standard basis. For $1\leqslant i,j,k\leqslant n$ with $i,j,k$ distinct, let
$\varepsilon_k$, $s_{ij}$ be the following elements of $GL(\mathfrak{h})$:
$$\varepsilon_k(y_k)=\varepsilon y_k,\quad
\varepsilon_k(y_j)=y_j,\quad s_{ij}(y_i)=y_j,\quad
s_{ij}(y_k)=y_k.$$ Let $B_n(l)$ be the subgroup of $GL(\mathfrak{h})$
generated by $\varepsilon_k$ and $s_{ij}$ for $1\leqslant k\leqslant n$ and
$1\leqslant i<j\leqslant n$. \iffalse We have
$B_n(l)\cong\mathfrak{S}_n\ltimes(\mu_l)^n$ where $\mathfrak{S}_n$
is the symmetric group on $n$ elements and $(\mu_l)^n$ is $n$ copies
of the cyclic group $\mu_l$ which is generated by
$\varepsilon=\exp(\frac{2\pi\sqrt{-1}}{l})$. \fi It is a complex
reflection group with the set of reflections
$$\mathcal{S}_n=\{\varepsilon_i^p:1\leqslant i\leqslant n, 1\leqslant p \leqslant l-1\}\bigsqcup
\{s_{ij}^{(p)}=s_{ij}\varepsilon_i^p\varepsilon_j^{-p}:1\leqslant
i<j\leqslant n, 1\leqslant p\leqslant l\}.$$
Note that there is an obvious inclusion $\mathcal{S}_{n-1}\mathfrak{h}ookrightarrow\mathcal{S}_{n}$. It yields an embedding
\begin{equation}\label{eq:inclusiongroup}
B_{n-1}(l)\mathfrak{h}ookrightarrow B_n(l).
\end{equation}
This embedding identifies $B_{n-1}(l)$ with the parabolic subgroup of $B_{n}(l)$ given by the stabilizer of the point $b_n=(0,\ldots,0,1)\in\mathbb{C}^n$.
The cyclotomic rational DAHA is the algebra $H_{c}(B_n(l),\mathfrak{h})$. We
will use another presentation in which we replace the parameter $c$
by an $l$-tuple $\mathbf{h}=(h,h_1,\ldots,h_{l-1})$ such that
\begin{equation*}
c_{s^{(p)}_{ij}}=-h, \quad
c_{\varepsilon_p}=\frac{-1}{2}\sum_{p'=1}^{l-1}(\varepsilon^{-pp'}-1)h_{p'}.
\end{equation*}
We will denote $H_c(B_n(l),\mathfrak{h})$ by $H_{\mathbf{h},n}$. The corresponding
category $\mathcal{O}$ will be denoted by $\mathcal{O}_{\mathbf{h},n}$. In the rest of
the paper, we will fix the positive integer $l$. We will also fix a
positive integer $e\mathfrak{g}eqs 2$ and an $l$-tuple of integers
$\mathbf{s}=(s_1,\ldots,s_l)$. \emph{We will always assume that the
parameter $\mathbf{h}$ is given by the following formulas\,,}
\begin{equation}\label{assumptionh}
h=\frac{-1}{e},\quad h_p=\frac{s_{p+1}-s_p}{e}-\frac{1}{l},\quad
1\leqslant p\leqslant l-1\,.
\end{equation}
The functor $\KZ(B_n(l),\mathbb{C}^n)$ goes from $\mathcal{O}_{\mathbf{h},n}$ to the
category of finite dimensional modules of a certain Hecke algebra
$\mathscr{H}_{\mathbf{q},n}$ attached to the group $B_n(l)$. Here the
parameter is $\mathbf{q}=(q,q_1,\ldots, q_l)$ with
\begin{equation*}
q=\exp(2\pi\sqrt{-1}/e),\quad q_p=q^{s_p},\quad 1\leqslant p\leqslant l.
\end{equation*}
The algebra $\mathscr{H}_{\mathbf{q},n}$ has the following presentation:
\begin{itemize}
\item Generators: $T_0, T_1,\ldots, T_{n-1}$,
\item Relations: \begin{gather*}
(T_0-q_1)\cdots(T_0-q_l)=(T_i+1)(T_i-q)=0,\quad 1\leqslant i\leqslant n-1, \notag \\
T_0T_1T_0T_1=T_1T_0T_1T_0,\notag \\
T_iT_j=T_jT_i,\quad\text{if }|i-j|>1,\label{pres} \\
T_iT_{i+1}T_i=T_{i+1}T_iT_{i+1},\quad 1\leqslant i\leqslant n-2. \notag
\end{gather*}
\end{itemize}
The algebra $\mathscr{H}_{\mathbf{q},n}$ satisfies the assumption of
Section \ref{s:KZcommute}, i.e., it has the same dimension as $\mathbb{C}
B_n(l)$.
\subsection{}\label{ss:cyclot2}
For each positive integer $n$, the embedding (\ref{eq:inclusiongroup}) of $B_{n}(l)$ into $B_{n+1}(l)$ yields an embedding of Hecke algebras $$\imath_{\mathbf{q}}:
\mathscr{H}_{\mathbf{q},n}\mathfrak{h}ookrightarrow \mathscr{H}_{\mathbf{q},n+1},$$ see Section \ref{ss:resHecke}. Under
the presentation above this embedding is given by $$\imath_{\mathbf{q}}(T_i)=T_i,\quad\forall\ 0\leqslant i\leqslant n-1,$$
see \cite[Proposition 2.29]{BMR}.
We will consider the following restriction and induction functors:
\begin{eqnarray*}
E(n)=\Res_{b_n},\quad
E(n)^{\scriptscriptstyle\mathscr{H}}=\sideset{^{\scriptscriptstyle\mathscr{H}}}{^{B_{n}(l)}_{B_{n-1}(l)}}\Res,\\
F(n)=\Ind_{b_n},\quad
F(n)^{\scriptscriptstyle\mathscr{H}}=\sideset{^{\scriptscriptstyle\mathscr{H}}}{^{B_{n}(l)}_{B_{n-1}(l)}}\Ind.
\end{eqnarray*}
The algebra $\mathscr{H}_{\mathbf{q},n}$ is symmetric (see Remark
\ref{rmq:symmetric}). Hence by Lemma \ref{heckeind} we have
$$F(n)^{\scriptscriptstyle\mathscr{H}}\cong\sideset{^{\scriptscriptstyle\mathscr{H}}}{^{B_{n}(l)}_{B_{n-1}(l)}}\coInd.$$
We will abbreviate
$$\mathcal{O}_{\mathbf{h},\mathbb{N}}=\bigoplus_{n\in\mathbb{N}}\mathcal{O}_{\mathbf{h},n},\quad \KZ=\bigoplus_{n\in\mathbb{N}}\KZ(B_n(l),\mathbb{C}^n),\quad \mathscr{H}_{\mathbf{q},\mathbb{N}}\modu=\bigoplus_{n\in\mathbb{N}}\mathscr{H}_{\mathbf{q},n}\modu.$$
So $\KZ$ is the Knizhnik-Zamolodchikov functor from
$\mathcal{O}_{\mathbf{h},\mathbb{N}}$ to $\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu$. Let
\begin{eqnarray*}
E=\bigoplus_{n\mathfrak{g}eqs 1}E(n),\quad E^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{n\mathfrak{g}eqs
1}E^{\scriptscriptstyle\mathscr{H}}(n),\\ F=\bigoplus_{n\mathfrak{g}eqs 1}F(n),\quad
F^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{n\mathfrak{g}eqs 1}F^{\scriptscriptstyle\mathscr{H}}(n).\end{eqnarray*} So
$(E^{\scriptscriptstyle\mathscr{H}},F^{\scriptscriptstyle\mathscr{H}})$ is a pair of biadjoint endo-functors of $\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu$, and $(E,F)$ is a pair of biadjoint endo-functors of $\mathcal{O}_{\mathbf{h},\mathbb{N}}$ by Proposition \ref{leftadjunction}.
\subsection{Fock spaces.}\label{ss: fock}
Recall that an $l$-partition is an $l$-tuple $\lambda=(\lambda^1,\cdots,
\lambda^l)$ with each $\lambda^j$ a partition, that is a sequence of
integers $(\lambda^j)_1\mathfrak{g}eqs\cdots\mathfrak{g}eqs(\lambda^j)_k>0$. To any
$l$-partition $\lambda=(\lambda^1,\ldots,\lambda^l)$ we attach the set
\begin{equation*}
\Upsilon_\lambda=\{(a,b,j)\in \mathbb{N}\times\mathbb{N}\times(\mathbb{Z}/l\mathbb{Z}):
0<b\leqslant(\lambda^j)_a\}.
\end{equation*}
Write $|\lambda|$ for the number of elements in this set, we say that
$\lambda$ is an $l$-partition of $|\lambda|$. For $n\in\mathbb{N}$ we denote by
$\mathcal{P}_{n,l}$ the set of $l$-partitions of $n$. For any
$l$-partition $\mu$ such that $\Upsilon_\mu$ contains
$\Upsilon_\lambda$, we write $\mu/\lambda$ for the complement of
$\Upsilon_\lambda$ in $\Upsilon_\mu$. Let $|\mu/\lambda|$ be the number of
elements in this set. To each element $(a,b,j)$ in $\Upsilon_\lambda$
we attach an element $$\res ((a,b,j))=b-a+s_j\in\mathbb{Z}/e\mathbb{Z},$$ called the
residue of $(a,b,j)$. Here $s_j$ is the $j$-th component of our
fixed $l$-tuple $\mathbf{s}$.
The Fock space with multi-charge $\mathbf{s}$ is the $\mathbb{C}$-vector
space $\mathcal{F}_\mathbf{s}$ spanned by the $l$-partitions, i.e.,
$$\mathcal{F}_\mathbf{s}=\bigoplus_{n\in\mathbb{N}}\bigoplus_{\lambda\in\mathcal{P}_{n,l}}\mathbb{C}\lambda.$$
It admits an integrable $\widehat{\mathfrak{sl}}_e$-module structure with
the Chevalley generators acting as follows (cf. \cite{JMMO}): for
any $i\in\mathbb{Z}/e\mathbb{Z}$,
\begin{equation}\label{fockei}
e_i(\lambda)=\sum_{|\lambda/\mu|=1,\res(\lambda/\mu)=i}\mu,\quad
f_i(\lambda)=\sum_{|\mu/\lambda|=1,\res(\mu/\lambda)=i}\mu.
\end{equation}
For each $n\in\mathbb{Z}$ set $\Lambda_n=\Lambda_{\underline{n}}$, where
$\underline{n}$ is the image of $n$ in $\mathbb{Z}/e\mathbb{Z}$ and
$\Lambda_{\underline{n}}$ is the corresponding fundamental weight of
$\widehat{\Lie{sl}}_e$. Set
$$\Lambda_{\mathbf{s}}=\Lambda_{\underline{s_1}}+\cdots+\Lambda_{\underline{s_l}}.$$
Each $l$-partition $\lambda$ is a weight vector of
$\mathcal{F}_\mathbf{s}$ with weight
\begin{equation}\label{wt}
\wt(\lambda)=\Lambda_{\mathbf{s}}-\sum_{i\in\mathbb{Z}/e\mathbb{Z}}n_i\alpha_i,
\end{equation}
where $n_i$ is the number of elements in the set $\{
(a,b,j)\in\Upsilon_\lambda: \res((a,b,j))=i\}$. We will call
$\wt(\lambda)$ the weight of $\lambda$.
In \cite[Section $6.1.1$]{R} an explicit bijection was given between
the sets $\Irr(B_n(l))$ and $\mathcal{P}_{n,l}$. Using this
bijection we identify these two sets and index the standard and
simple modules in $\mathcal{O}_{\mathbf{h},\mathbb{N}}$ by $l$-partitions. In
particular, we have an isomorphism of $\mathbb{C}$-vector spaces
\begin{equation}\label{Fock}
\theta:
K(\mathcal{O}_{\mathbf{h},\mathbb{N}})\overset{\sim}\rightarrow\mathcal{F}_{\mathbf{s}},\quad
[\Delta(\lambda)]\mapsto \lambda.
\end{equation}
\subsection{}\label{kzspecht}
We end this section by the following lemma. Recall that the functor
$\KZ$ gives a map $K(\mathcal{O}_{\mathbf{h},n})\rightarrow K(\mathscr{H}_{\mathbf{q},n})$.
For any $l$-partition $\lambda$ of $n$ let $S_\lambda$ be the
corresponding Specht module in $\mathscr{H}_{\mathbf{q},n}\modu$, see
\cite[Definition $13.22$]{A} for its definition.
\begin{lemme}\label{Specht}
In $K(\mathscr{H}_{\mathbf{q},n})$, we have
$\KZ([\Delta(\lambda)])=[S_\lambda]$.
\end{lemme}
\begin{proof}
Let $R$ be any commutative ring over $\mathbb{C}$. For any $l$-tuplet
$\mathbf{z}=(z,z_1,\ldots,z_{l-1})$ of elements in $R$ one defines the
rational DAHA over $R$ attached to $B_n(l)$ with parameter $\mathbf{z}$ in
the same way as before. Denote it by $H_{R,\mathbf{z},n}$. The standard
modules $\Delta_R(\lambda)$ are also defined as before. For any
$(l+1)$-tuplet $\mathbf{u}=(u,u_1,\ldots, u_l)$ of invertible elements in
$R$ the Hecke algebra $\mathscr{H}_{R,\mathbf{u},n}$ over $R$ attached to
$B_n(l)$ with parameter $\mathbf{u}$ is defined by the same presentation
as in Section \ref{ss:cyclot1}. The Specht modules $S_{R,\lambda}$ are
also well-defined (see \cite{A}). If $R$ is a field, we will write
$\Irr(\mathscr{H}_{R,\mathbf{u},n})$ for the set of isomorphism classes of
simple $\mathscr{H}_{R,\mathbf{u},n}$-modules.
Now, fix $R$ to be the ring of holomorphic functions of one variable
$\varpi$. We choose $\mathbf{z}=(z,z_1,\ldots,z_{l-1})$ to be given by
\begin{equation*}
z=l\varpi,\quad z_p=(s_{p+1}-s_p)l\varpi+e\varpi,\quad 1\leqslant p\leqslant
l-1.
\end{equation*}
Write $x=\exp(-2\pi\sqrt{-1}\varpi)$. Let $\mathbf{u}=(u,u_1,\ldots, u_l)$
be given by
\begin{equation*}
u=x^{l},\quad u_p=\varepsilon^{p-1}x^{s_pl-(p-1)e},\quad 1\leqslant p\leqslant
l.
\end{equation*}
By \cite[Theorem 4.12]{BMR} the same definition as in Section
\ref{ss:KZ} yields a well defined $\mathscr{H}_{R,\mathbf{u},n}$-module
$$T_{R}(\lambda)=\KZ_{R}(\Delta_{R}(\lambda)).$$
It is a free $R$-module of finite rank
\iffalse. Moreover, for any ring
homomorphism $a: R\rightarrow\mathbb{C}$, write $\mathbb{C}_a$ for the vector space $\mathbb{C}$
equipped with the $R$-module structure given by $a$. Let $a(\mathbf{z})$,
$a(\mathbf{u})$ denote the images of $\mathbf{z}$, $\mathbf{u}$ by $a$. Note that we
have $H_{a(\mathbf{z}),n}=H_{R,\mathbf{z},n}\otimes_R\mathbb{C}_a$ and
$\mathscr{H}_{a(\mathbf{u}),n}=\mathscr{H}_{R,\mathbf{u},n}\otimes_R\mathbb{C}_a$. Denote the
Knizhnik-Zamolodchikov functor of $H_{a(\mathbf{z}),n}$ by $\KZ_{a(\mathbf{z})}$
and the standard module corresponding to $\lambda$ by
$\Delta_{a(\mathbf{z})}(\lambda)$. Then by the existence and unicity theorem for linear
differential equations, there is a canonical isomorphism of $\mathscr{H}_{a(\mathbf{u}),n}$-modules
\begin{equation*}
T_{R}(\lambda)\otimes_{R}\mathbb{C}_{a}\cong\KZ_{a(\mathbf{z})}(\Delta_{a(\mathbf{z})}(\lambda)).
\end{equation*}
Similarly, for any ring $R'$ over $R$, the module $T_{R'}(\lambda)=\KZ_{R'}(\Delta_{R'}(\lambda))$ is well defined. It is free over $R'$. For any homomorphism $a':R'\rightarrow\mathbb{C}$ such that the restriction of $a'$ to $R$ is equal to $a$, we have
\begin{equation}\label{eq:basechange}
T_{R'}(\lambda)\otimes_{R'}\mathbb{C}_{a'}\cong\KZ_{a(\mathbf{z})}(\Delta_{a(\mathbf{z})}(\lambda)).
\end{equation}
We also h
\begin{equation*}
T_{R}(\lambda)\otimes_R{R'}\cong T_{R'}(\lambda).
\end{equation*}
\fi
and it commutes with the base
change functor by the existence and unicity theorem for linear
differential equations, i.e., for any ring homomorphism $R\rightarrow R'$
over $\mathbb{C}$, we have a canonical isomorphism of
$\mathscr{H}_{R',\mathbf{u},n}$-modules
\begin{equation}\label{eq:basechange}
T_{R'}(\lambda)=\KZ_{R'}(\Delta_{R'}(\lambda))\cong
T_{R}(\lambda)\otimes_RR'.
\end{equation} In particular, for any ring
homomorphism $a: R\rightarrow\mathbb{C}$. Write $\mathbb{C}_a$ for the vector space $\mathbb{C}$
equipped with the $R$-module structure given by $a$. Let $a(\mathbf{z})$,
$a(\mathbf{u})$ denote the images of $\mathbf{z}$, $\mathbf{u}$ by $a$. Note that we
have $H_{a(\mathbf{z}),n}=H_{R,\mathbf{z},n}\otimes_R\mathbb{C}_a$ and
$\mathscr{H}_{a(\mathbf{u}),n}=\mathscr{H}_{R,\mathbf{u},n}\otimes_R\mathbb{C}_a$. Denote the
Knizhnik-Zamolodchikov functor of $H_{a(\mathbf{z}),n}$ by $\KZ_{a(\mathbf{z})}$
and the standard module corresponding to $\lambda$ by
$\Delta_{a(\mathbf{z})}(\lambda)$. Then we have an isomorphism of $\mathscr{H}_{a(\mathbf{u}),n}$-modules
\begin{equation*}
T_{R}(\lambda)\otimes_{R}\mathbb{C}_{a}\cong\KZ_{a(\mathbf{z})}(\Delta_{a(\mathbf{z})}(\lambda)).
\end{equation*}
Let $K$ be the fraction field of $R$. By \cite[Theorem 2.19]{GGOR} the category
$\mathcal{O}_{K,\mathbf{z},n}$ is split semisimple. In particular, the standard
modules are simple. We have
$$\{T_K(\lambda),\lambda\in\mathcal{P}_{n,l}\}=\Irr(\mathscr{H}_{K,\mathbf{u},n}).$$
The Hecke algebra
$\mathscr{H}_{K,\mathbf{u},n}$ is also split semisimple and we have
$$\{S_{K,\lambda},\lambda\in\mathcal{P}_{n,l}\}=\Irr(\mathscr{H}_{K,\mathbf{u},n}),$$
see for example \cite[Corollary 13.9]{A}. Thus there is a bijection
$\varphi: \mathcal{P}_{n,l}\rightarrow\mathcal{P}_{n,l}$ such that $T_K(\lambda)$ is
isomorphic to $S_{K,\varphi(\lambda)}$ for all $\lambda$. We claim that
$\varphi$ is identity. To see this, consider the algebra
homomorphism $a_0:R\rightarrow\mathbb{C}$ given by $\varpi\mapsto 0$. Then
$\mathscr{H}_{a_0(\mathbf{u}),n}$ is canonically isomorphic to the group
algebra $\mathbb{C} B_n(l)$, thus it is semi-simple. Let $\overline{K}$ be
the algebraic closure of $K$. Let $\overline{R}$ be the integral
closure of $R$ in $\overline{K}$ and fix an extension
$\overline{a}_0$ of $a_0$ to $\overline{R}$. By Tit's deformation
theorem (see for example \cite[Section 68A]{CuR}), there is a
bijection
$$\psi:\Irr(\mathscr{H}_{\overline{K},\mathbf{u},n})\overset{\sim}\ra\Irr(\mathscr{H}_{a_0(\mathbf{u}),n})$$
such that
\begin{equation*}
\psi(T_{\overline{K}}(\lambda))=T_{\overline{R}}(\lambda)\otimes_{\overline{R}}\mathbb{C}_{\overline{a}_0},\quad
\psi(S_{\overline{K},\lambda})=S_{\overline{R},\lambda}\otimes_{\overline{R}}\mathbb{C}_{\overline{a}_0}.
\end{equation*}
By the definition of Specht modules we have
$S_{\overline{R},\lambda}\otimes_{\overline{R}}\mathbb{C}_{\overline{a}_0}\cong\lambda$
as $\mathbb{C} B_n(l)$-modules. On the other hand, since $a_0(\mathbf{z})=0$, by
(\ref{eq:basechange}) we have the following isomorphisms
\begin{eqnarray*}
T_{\overline{R}}(\lambda)\otimes_{\overline{R}}\mathbb{C}_{\overline{a}_0}&\cong&T_R(\lambda)\otimes_R\mathbb{C}_{a_0}\\
&\cong&\KZ_0(\Delta_0(\lambda))\\
&=&\lambda.
\end{eqnarray*}
So $\psi(T_{\overline{K}}(\lambda))=\psi(S_{\overline{K},\lambda})$.
Hence we have $T_{\overline{K}}(\lambda)\cong S_{\overline{K},\lambda}$.
Since $T_{\overline{K}}(\lambda)=T_K(\lambda)\otimes_K\overline{K}$ is
isomorphic to
$S_{\overline{K},\varphi(\lambda)}=S_{K,\varphi(\lambda)}\otimes_K\overline{K}$,
we deduce that $\varphi(\lambda)=\lambda$. The claim is proved.
Finally, let $\mathfrak{m}$ be the maximal ideal of $R$ consisting
of the functions vanishing at $\varpi=-1/el$. Let $\widehat R$ be
the completion of $R$ at $\mathfrak{m}$. It is a discrete valuation
ring with residue field $\mathbb{C}$. Let $a_1:\widehat R\rightarrow
\widehat{R}/\mathfrak{m}\widehat{R}=\mathbb{C}$ be the quotient map. We have
$a_1(\mathbf{z})=\mathbf{h}$ and $a_1(\mathbf{u})=\mathbf{q}$. Let $\widehat{K}$ be the
fraction field of $\widehat R$. Recall that the decomposition map is
given by
$$d: K(\mathscr{H}_{\widehat{K},\mathbf{u},n})\rightarrow K(\mathscr{H}_{\mathbf{q},n}),\quad [M]\mapsto [L\otimes_{\widehat{R}}\mathbb{C}_{a_1}].$$
Here $L$ is any free $\widehat{R}$-submodule of $M$ such that
$L\otimes_{\widehat{R}}\widehat{K}=M$. The choice of $L$ does not
affect the class $[L\otimes_{\widehat{R}}\mathbb{C}_{a_1}]$ in
$K(\mathscr{H}_{\mathbf{q},n})$. See \cite[Section 13.3]{A} for details on this
map. Now, observe that we have
\begin{eqnarray*}
&d([S_{\widehat{K},\lambda}])= [S_{\widehat{R},\lambda}\otimes_{\widehat{R}}\mathbb{C}_{a_1}]=[S_\lambda],\\
&d([T_{\widehat{K}}(\lambda)])= [T_{\widehat{R}}(\lambda)\otimes_{\widehat{R}}\mathbb{C}_{a_1}]=[\KZ(\Delta(\lambda))].
\end{eqnarray*}
Since $\widehat{K}$ is an extension of $K$, by the last paragraph we
have $[S_{\widehat{K},\lambda}]=[T_{\widehat{K}}(\lambda)]$. We deduce
that $[\KZ(\Delta(\lambda))]=[S_\lambda]$.
\end{proof}
\section{$i$-Restriction and $i$-Induction}\label{iresiind}
We define in this section the $i$-restriction and $i$-induction
functors for the cyclotomic rational DAHA's. This is done in
parallel with the Hecke algebra case.
\subsection{}\label{ss:ireshecke}
Let us recall the definition of the $i$-restriction and
$i$-induction functors for $\mathscr{H}_{\mathbf{q},n}$. First define the
Jucy-Murphy elements $J_0,\ldots, J_{n-1}$ in
$\mathscr{H}_{\mathbf{q},n}$ by
\begin{equation*}
J_0=T_0,\quad J_i=q^{-1}T_iJ_{i-1}T_i\quad\text{ for }1\leqslant i\leqslant
n-1.
\end{equation*}
Write $Z(\mathscr{H}_{\mathbf{q},n})$ for the center of
$\mathscr{H}_{\mathbf{q},n}$. For any symmetric polynomial $\sigma$ of
$n$ variables the element $\sigma(J_0,\ldots,J_{n-1})$ belongs to
$Z(\mathscr{H}_{\mathbf{q},n})$ (cf. \cite[Section $13.1$]{A}). In
particular, if $z$ is a formal variable the polynomial
$C_n(z)=\prod_{i=0}^{n-1}(z-J_i)$ in $\mathscr{H}_{\mathbf{q},n}[z]$ has
coefficients in $Z(\mathscr{H}_{\mathbf{q},n})$.
Now, for any $a(z)\in\mathbb{C}(z)$ let $P_{n,a(z)}$ be the exact
endo-functor of the category $\mathscr{H}_{\mathbf{q},n}\modu$ that maps an object $M$ to the generalized eigenspace of $C_n(z)$ in $M$
with the eigenvalue $a(z)$.
For any $i\in\mathbb{Z}/e\mathbb{Z}$ the $i$-restriction functor and $i$-induction
functor
$$E_i(n)^{\scriptscriptstyle\mathscr{H}}: \mathscr{H}_{\mathbf{q},n}\modu\rightarrow\mathscr{H}_{\mathbf{q},n-1}\modu,
\quad F_i(n)^{\scriptscriptstyle\mathscr{H}}:
\mathscr{H}_{\mathbf{q},n-1}\modu\rightarrow\mathscr{H}_{\mathbf{q},n}\modu$$ are defined as
follows (cf. \cite[Definition 13.33]{A}):
\begin{eqnarray*}
E_i(n)^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{a(z)\in\mathbb{C}(z)}P_{n-1,a(z)/(z-q^i)}\circ E(n)^{\scriptscriptstyle\mathscr{H}}\circ P_{n,a(z)},\label{e}\\
F_i(n)^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{a(z)\in\mathbb{C}(z)}P_{n,a(z)(z-q^i)}\circ
F(n)^{\scriptscriptstyle\mathscr{H}}\circ P_{n-1,a(z)}.\label{f}
\end{eqnarray*}
We will write
\begin{equation*}
E^{\scriptscriptstyle\mathscr{H}}_i=\bigoplus_{n\mathfrak{g}eqs 1}E_i(n)^{\scriptscriptstyle\mathscr{H}},\quad
F^{\scriptscriptstyle\mathscr{H}}_i=\bigoplus_{n\mathfrak{g}eqs 1}F_i(n)^{\scriptscriptstyle\mathscr{H}}.
\end{equation*}
They are endo-functors of $\mathscr{H}_{\mathbf{q},\mathbb{N}}$. For each $\lambda\in\mathcal{P}_{n,l}$ set $$a_\lambda(z)=\prod_{v
\in\Upsilon_\lambda}(z-q^{\res(v)}).$$ We recall some properties of
these functors in the following proposition.
\begin{prop}\label{hv}
(1) The functors $E_i(n)^{\scriptscriptstyle\mathscr{H}}$, $F_i(n)^{\scriptscriptstyle\mathscr{H}}$ are exact. The
functor $E_i(n)^{\scriptscriptstyle\mathscr{H}}$ is biadjoint to
$F_i(n)^{\scriptscriptstyle\mathscr{H}}$.
(2) For any $\lambda\in\mathcal{P}_{n,l}$ the element $C_n(z)$ has a
unique eigenvalue on the Specht module $S_\lambda$. It is equal to
$a_\lambda(z)$.
(3) We have
\begin{equation*}
E_i(n)^{\scriptscriptstyle\mathscr{H}}([S_\lambda])=\sum_{\res(\lambda/\mu)=i}[S_\mu],\qquad
F_i(n)^{\scriptscriptstyle\mathscr{H}}([S_\lambda])=\sum_{\res(\mu/\lambda)=i}[S_\mu].
\end{equation*}
(4) We have
\begin{equation*}
E(n)^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}E_i(n)^{\scriptscriptstyle\mathscr{H}},\quad
F(n)^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}F_i(n)^{\scriptscriptstyle\mathscr{H}}.
\end{equation*}
\end{prop}
\begin{proof}
Part $(1)$ is obvious. See \cite[Theorem $13.21$(2)]{A} for $(2)$ and \cite[Lemma $13.37$]{A} for $(3)$. Part $(4)$ follows from (3) and \cite[Lemma $13.32$]{A}.
\end{proof}
\subsection{}\label{iresdaha}
By Lemma \ref{lem:center}(1) we have an algebra isomorphism
$$\mathfrak{g}amma: Z(\mathcal{O}_{\mathbf{h},n})\overset{\sim}\rightarrow
Z(\mathscr{H}_{\mathbf{q},n}).$$ So there are unique elements
$K_1,\ldots,K_n\in Z(\mathcal{O}_{\mathbf{h},n})$ such that the polynomial
$$D_n(z)=z^n+K_1z^{n-1}+\cdots+K_n$$ maps to $C_n(z)$ by $\mathfrak{g}amma$. Since the
elements $K_1,\ldots,K_n$ act on simple modules by scalars and the
category $\mathcal{O}_{\mathbf{h},n}$ is artinian, every module $M$ in
$\mathcal{O}_{\mathbf{h},n}$ is a direct sum of generalized eigenspaces of
$D_n(z)$. For $a(z)\in\mathbb{C}(z)$ let $Q_{n,a(z)}$ be the exact
endo-functor of $\mathcal{O}_{\mathbf{h},n}$ which maps an object $M$ to the generalized eigenspace of $D_n(z)$ in $M$ with the eigenvalue
$a(z)$.
\begin{df}\label{def}
The \emph{$i$-restriction} functor and the \emph{$i$-induction}
functor
$$E_i(n): \mathcal{O}_{\mathbf{h},n}\rightarrow\mathcal{O}_{\mathbf{h},n-1},\quad F_i(n): \mathcal{O}_{\mathbf{h},n-1}\rightarrow\mathcal{O}_{\mathbf{h},n}$$
are given by
\begin{eqnarray*}
E_i(n)=\bigoplus_{a(z)\in\mathbb{C}(z)}Q_{n-1,a(z)/(z-q^i)}\circ E(n)\circ Q_{n,a(z)},\\
F_i(n)=\bigoplus_{a(z)\in\mathbb{C}(z)}Q_{n,a(z)(z-q^i)}\circ F(n)\circ
Q_{n-1,a(z)}.
\end{eqnarray*}
\end{df}
We will write
\begin{equation}\label{ireso}
E_i=\bigoplus_{n\mathfrak{g}eqs 1}E_i(n),\quad F_i=\bigoplus_{n\mathfrak{g}eqs 1}F_i(n).
\end{equation}
We have the following proposition.
\begin{prop}\label{isoi}
For any $i\in\mathbb{Z}/e\mathbb{Z}$ there are isomorphisms of functors
\begin{eqnarray*}
\KZ\circ E_i(n)\cong E^{\scriptscriptstyle\mathscr{H}}_i(n)\circ\KZ, \quad \KZ\circ
F_i(n)\cong F^{\scriptscriptstyle\mathscr{H}}_i(n)\circ\KZ.
\end{eqnarray*}
\end{prop}
\begin{proof}
Since $\mathfrak{g}amma(D_n(z))=C_n(z)$, by Lemma \ref{lem:center}(2) for any
$a(z)\in\mathbb{C}(z)$ we have $$\KZ\circ Q_{n,a(z)}\cong P_{n,
a(z)}\circ\KZ.$$ So the proposition follows from Theorem \ref{iso}
and Corollary \ref{indiso}.
\end{proof}
The next proposition is the DAHA version of Proposition \ref{hv}.
\begin{prop}\label{dv}
(1) The functors $E_i(n)$, $F_i(n)$ are exact. The functor $E_i(n)$
is biadjoint to $F_i(n)$.
(2) For any $\lambda\in\mathcal{P}_{n,l}$ the unique eigenvalue of
$D_n(z)$ on the standard module $\Delta(\lambda)$ is $a_\lambda(z)$.
(3) We have the following equalities
\begin{equation}\label{Pierii}
E_i(n)([\Delta(\lambda)])=\sum_{\res(\lambda/\mu)=i}[\Delta(\mu)],\qquad
F_i(n)([\Delta(\lambda)])=\sum_{\res(\mu/\lambda)=i}[\Delta(\mu)].
\end{equation}
(4) We have
\begin{equation*}
E(n)=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}E_i(n),\quad
F(n)=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}F_i(n).
\end{equation*}
\end{prop}
\begin{proof}
(1) This is by construction and by Proposition \ref{leftadjunction}.
(2) Since a standard module is indecomposable, the element $D_n(z)$
has a unique eigenvalue on $\Delta(\lambda)$. By Lemma \ref{Specht}
this eigenvalue is the same as the eigenvalue of $C_n(z)$ on
$S_\lambda$.
(3) Let us prove the equality for $E_i(n)$. The Pieri rule for the
group $B_n(l)$ together with Proposition \ref{Res}(2) yields
\begin{equation}\label{Pieri}
E(n)([\Delta(\lambda)])=\sum_{|\lambda/\mu|=1}[\Delta(\mu)],\quad
F(n)([\Delta(\lambda)])=\sum_{|\mu/\lambda|=1}[\Delta(\mu)].
\end{equation}
So we have
\begin{eqnarray*}
E_i(n)([\Delta(\lambda)])&=&\bigoplus_{a(z)\in\mathbb{C}[z]}Q_{n-1,a(z)/(z-q^i)}(E(n)(Q_{n,a(z)}([\Delta(\lambda)])))\\
&=&Q_{n-1,a_\lambda(z)/(z-q^i)}(E(n)(Q_{n,a_\lambda(z)}([\Delta(\lambda)])))\\
&=&Q_{n-1,a_\lambda(z)/(z-q^i)}(E(n)([\Delta(\lambda)]))\\
&=&Q_{n-1,a_\lambda(z)/(z-q^i)}(\sum_{|\lambda/\mu|=1}[\Delta(\mu)])\\
&=&\sum_{\res(\lambda/\mu)=i}[\Delta(\mu)].
\end{eqnarray*}
The last equality follows from the fact that for any $l$-partition
$\mu$ such that $|\lambda/\mu|=1$ we have
$a_\lambda(z)=a_\mu(z)(z-q^{\res(\lambda/\mu)})$. The proof for $F_i(n)$
is similar.
(4) It follows from part (3) and (\ref{Pieri}).
\end{proof}
\begin{cor}\label{rep}
Under the isomorphism $\theta$ in (\ref{Fock}) the operators $E_i$
and $F_i$ on $K(\mathcal{O}_{\mathbf{h},\mathbb{N}})$ go respectively to the operators
$e_i$ and $f_i$ on $\mathcal{F}_\mathbf{s}$. When $i$ runs over
$\mathbb{Z}/e\mathbb{Z}$ they yield an action of $\widehat{\mathfrak{sl}}_e$ on
$K(\mathcal{O}_{\mathbf{h},\mathbb{N}})$ such that $\theta$ is an isomorphism of
$\widehat{\mathfrak{sl}}_e$-modules.
\end{cor}
\begin{proof}
This is clear from Proposition \ref{dv}$(3)$ and from
(\ref{fockei}).
\end{proof}
\section{$\widehat{\mathfrak{sl}}_e$-categorification}\label{categorification}
In this section, we construct an
$\widehat{\mathfrak{sl}}_e$-categorification on the category
$\mathcal{O}_{\mathbf{h},\mathbb{N}}$ under some mild assumption on the parameter $\mathbf{h}$
(Theorem \ref{thm:categorification}).
\subsection{}\label{defcategorification}
Recall that we put $q=\exp(\frac{2\pi\sqrt{-1}}{e})$ and $P$ denotes
the weight lattice. Let $\mathcal{C}$ be a $\mathbb{C}$-linear artinian
abelian category. For any functor $F:\mathcal{C}\rightarrow\mathcal{C}$ and
any $X\in\End(F)$, the generalized eigenspace of $X$ acting on $F$
with eigenvalue $a\in\mathbb{C}$ will be called the $a$-eigenspace of $X$ in
$F$. By \cite[Definition 5.29]{R2} an
\emph{$\widehat{\mathfrak{sl}}_e$-categorification} on $\mathcal{C}$ is
the data of
\begin{itemize}
\item[(a)] an adjoint pair $(U,V)$ of exact functors
$\mathcal{C}\rightarrow\mathcal{C}$,
\item[(b)] $X\in\End(U)$ and $T\in\End(U^2)$,
\item[(c)] a decomposition $\mathcal{C}=\bigoplus_{\tau\in
P}\mathcal{C}_\tau$.
\end{itemize}
such that, set $U_i$ (resp. $V_i$) to be the $q^i$-eigenspace of $X$
in $U$ (resp. in $V$)\footnote{Here $X$ acts on $V$ via the
isomorphism $\End(U)\cong\End(V)^{op}$ given by adjunction, see
\cite[Section 4.1.2]{CR} for the precise definition.} for $i\in\mathbb{Z}/e\mathbb{Z}$, we have
\begin{itemize}
\item[(1)] $U=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}U_i$,
\item[(2)] the endomorphisms $X$ and $T$ satisfy
\begin{eqnarray}
&(1_{U}T)\circ(T1_{U})\circ(1_{U}T)=(T1_{U})\circ(1_{U}T)\circ(T1_{U}),\nonumber\\
&(T+1_{U^2})\circ(T-q1_{U^2})=0,\label{affineHeckerelation}\\
&T\circ(1_{U}X)\circ T=qX1_{U},\nonumber
\end{eqnarray}
\item[(3)] the action of $e_i=U_i$, $f_i=V_i$ on $K(\mathcal{C})$
with $i$ running over $\mathbb{Z}/e\mathbb{Z}$ gives an integrable representation of
$\widehat{\mathfrak{sl}}_e$.
\item[(4)] $U_i(\mathcal{C}_\tau)\subset \mathcal{C}_{\tau+\alpha_i}$ and $V_i(\mathcal{C}_\tau)\subset
\mathcal{C}_{\tau-\alpha_i}$,
\item[(5)] $V$ is isomorphic to a left adjoint of $U$.
\end{itemize}
\subsection{}\label{ss:XT}
We construct a $\widehat{\Lie{sl}}_e$-categorification on $\mathcal{O}_{\mathbf{h},\mathbb{N}}$ in the
following way. The adjoint pair will be given by $(E,F)$. To
construct the part (b) of the data we need to go back to Hecke
algebras. Following \cite[Section $7.2.2$]{CR} let $X^{\scriptscriptstyle\mathscr{H}}$ be the
endomorphism of $E^{\scriptscriptstyle\mathscr{H}}$ given on $E^{\scriptscriptstyle\mathscr{H}}(n)$ as the
multiplication by the Jucy-Murphy element $J_{n-1}$. Let $T^{\scriptscriptstyle\mathscr{H}}$
be the endomorphism of $(E^{\scriptscriptstyle\mathscr{H}})^2$ given on $E^{\scriptscriptstyle\mathscr{H}}(n)\circ
E^{\scriptscriptstyle\mathscr{H}}(n-1)$ as the multiplication by the element $T_{n-1}$ in
$\mathscr{H}_{\mathbf{q},n}$. The endomorphisms $X^{\scriptscriptstyle\mathscr{H}}$ and $T^{\scriptscriptstyle\mathscr{H}}$
satisfy the relations (\ref{affineHeckerelation}). Moreover the
$q^i$-eigenspace of $X^{\scriptscriptstyle\mathscr{H}}$ in $E^{\scriptscriptstyle\mathscr{H}}$ and $F^{\scriptscriptstyle\mathscr{H}}$ gives
respectively the $i$-restriction functor $E^{\scriptscriptstyle\mathscr{H}}_i$ and the
$i$-induction functor $F^{\scriptscriptstyle\mathscr{H}}_i$ for any $i\in\mathbb{Z}/e\mathbb{Z}$.
By Theorem \ref{iso} we have an isomorphism $\KZ\circ E\cong
E^{\scriptscriptstyle\mathscr{H}}\circ\KZ$. This yields an isomorphism
$$\End(\KZ\circ E)\cong \End(E^{\scriptscriptstyle\mathscr{H}}\circ\KZ).$$ By Proposition \ref{standard}(1) the functor $E$ maps projective objects to projective ones, so Lemma
\ref{fullyfaithful} applied to
$\mathcal{O}_1=\mathcal{O}_2=\mathcal{O}_{\mathbf{h},\mathbb{N}}$ and $K=L=E$ yields an isomorphism
$$\End(E)\cong\End(\KZ\circ E).$$ Composing it with the isomorphism
above gives a ring isomorphism
\begin{equation}\label{sigmae}
\sigma_{E}:\End(E)\overset{\sim}\rightarrow\End(E^{\scriptscriptstyle\mathscr{H}}\circ\KZ).
\end{equation}
Replacing $E$ by $E^2$ we get another isomorphism
$$\sigma_{E^2}:\End(E^2)\overset{\sim}\rightarrow\End((E^{\scriptscriptstyle\mathscr{H}})^2\circ\KZ).$$
The data of $X\in\End(E)$ and $T\in\End(E^2)$ in our
$\widehat{\mathfrak{sl}}_e$-categorification on $\mathcal{O}_{\mathbf{h},\mathbb{N}}$ will be
provided by
\begin{equation*}
X=\sigma^{-1}_{E}(X^{\scriptscriptstyle\mathscr{H}} 1_{\KZ}),\quad
T=\sigma^{-1}_{E^2}(T^{\scriptscriptstyle\mathscr{H}} 1_{\KZ}).
\end{equation*}
Finally, the part (c) of the data will be given by the block
decomposition of the category $\mathcal{O}_{\mathbf{h},\mathbb{N}}$. Recall from
\cite[Theorem 2.11]{LM} that the block decomposition of the category
$\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu$ yields
$$\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu=\bigoplus_{\tau\in P}(\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu)_\tau,$$
where $(\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu)_\tau$ is the subcategory
generated by the composition factors of the Specht modules $S_\lambda$
with $\lambda$ running over $l$-partitions of weight $\tau$. By
convention $(\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu)_\tau$ is zero if such
$\lambda$ does not exist. By Lemma \ref{lem:center} the functor $\KZ$
induces a bijection between the blocks of the category
$\mathcal{O}_{\mathbf{h},\mathbb{N}}$ and the blocks of $\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu$.
So the block decomposition of $\mathcal{O}_{\mathbf{h},\mathbb{N}}$ is
\begin{equation*}
\mathcal{O}_{\mathbf{h},\mathbb{N}}=\bigoplus_{\tau\in P}(\mathcal{O}_{\mathbf{h},\mathbb{N}})_\tau,
\end{equation*}
where $(\mathcal{O}_{\mathbf{h},\mathbb{N}})_\tau$ is the block corresponding to
$(\mathscr{H}_{\mathbf{q},\mathbb{N}}\modu)_\tau$ via $\KZ$.
\subsection{}\label{ss:categorification}
Now we prove the following theorem.
\begin{thm}\label{thm:categorification}
The data of
\begin{itemize}
\item[(a)] the adjoint pair $(E,F)$,
\item[(b)] the endomorphisms $X\in\End(E)$, $T\in\End(E^2)$,
\item[(c)] the decomposition $\mathcal{O}_{\mathbf{h},\mathbb{N}}=\bigoplus_{\tau\in P}(\mathcal{O}_{\mathbf{h},\mathbb{N}})_\tau$
\end{itemize}
is a $\widehat{\Lie{sl}}_e$-categorification on $\mathcal{O}_{\mathbf{h},\mathbb{N}}$.
\end{thm}
\begin{proof}
First, we prove that for any $i\in\mathbb{Z}/e\mathbb{Z}$ the $q^i$-generalized
eigenspaces of $X$ in $E$ and $F$ are respectively the
$i$-restriction functor $E_i$ and the $i$-induction functor $F_i$ as
defined in (\ref{ireso}).
Recall from Proposition \ref{hv}(4) and Proposition \ref{dv}(4) that
we have $$E=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}E_i\quad\text{ and }\quad
E^{\scriptscriptstyle\mathscr{H}}=\bigoplus_{i\in\mathbb{Z}/e\mathbb{Z}}E^{\scriptscriptstyle\mathscr{H}}_i.$$ By the proof of
Proposition \ref{isoi} we see that any isomorphism $$\KZ\circ
E\cong E^{\scriptscriptstyle\mathscr{H}}\circ\KZ$$ restricts to an isomorphism $\KZ\circ
E_i\cong E^{\scriptscriptstyle\mathscr{H}}_i\circ\KZ$ for each $i\in\mathbb{Z}/e\mathbb{Z}$. So the
isomorphism $\sigma_E$ in (\ref{sigmae}) maps $\Hom(E_i,E_j)$ to
$\Hom(E_i^{\scriptscriptstyle\mathscr{H}}\circ\KZ,E_j^{\scriptscriptstyle\mathscr{H}}\circ\KZ)$. Write
$$X=\sum_{i,j\in\mathbb{Z}/e\mathbb{Z}}X_{ij},\quad X^{\scriptscriptstyle\mathscr{H}}
1_{\KZ}=\sum_{i,j\in\mathbb{Z}/e\mathbb{Z}}(X^{\scriptscriptstyle\mathscr{H}} 1_{\KZ})_{ij}$$ with
$X_{ij}\in\Hom(E_i, E_j)$ and $(X^{\scriptscriptstyle\mathscr{H}}
1_{\KZ})_{ij}\in\Hom(E_i^{\scriptscriptstyle\mathscr{H}}\circ\KZ,E_j^{\scriptscriptstyle\mathscr{H}}\circ\KZ)$. We
have $$\sigma_E(X_{ij})=(X^{\scriptscriptstyle\mathscr{H}} 1_{\KZ})_{ij}.$$ Since
$E^{\scriptscriptstyle\mathscr{H}}_i$ is the $q^i$-eigenspace of $X^{\scriptscriptstyle\mathscr{H}}$ in $E^{\scriptscriptstyle\mathscr{H}}$, we
have $(X^{\scriptscriptstyle\mathscr{H}} 1_{\KZ})_{ij}=0$ for $i\neq j$ and $(X^{\scriptscriptstyle\mathscr{H}}
1_{\KZ})_{ii}-q^i$ is nilpotent for any $i\in\mathbb{Z}/e\mathbb{Z}$. Since
$\sigma_{E}$ is an isomorphism of rings, this implies that
$X_{ij}=0$ and $X_{ii}-q^i$ is nilpotent in $\End(E)$. So $E_i$ is
the $q^i$-eigenspace of $X$ in $E$. The fact that $F_i$ is the
$q^i$-eigenspace of $X$ in $F$ follows from adjunction.
Now, let us check the conditions (1)--(5):
(1) It is given by Proposition \ref{dv}(4).
(2) Since $X^{\scriptscriptstyle\mathscr{H}}$ and $T^{\scriptscriptstyle\mathscr{H}}$ satisfy relations in
(\ref{affineHeckerelation}), the endomorphisms $X$ and $T$ also
satisfy them. Because these relations are preserved by ring
homomorphisms.
(3) It follows from Corollary \ref{rep}.
(4) By the definition of $(\mathcal{O}_{\mathbf{h},\mathbb{N}})_\tau$ and Lemma
\ref{Specht}, the standard modules in $(\mathcal{O}_{\mathbf{h},\mathbb{N}})_\tau$ are
all the $\Delta(\lambda)$ such that $\wt(\lambda)=\tau$. By $(\ref{wt})$
if $\mu$ is an $l$-partition such that $\res(\lambda/\mu)=i$ then
$\wt(\mu)=\wt(\lambda)+\alpha_i.$ Now, the result follows from
(\ref{Pierii}).
(5) This is Proposition \ref{leftadjunction}.
\end{proof}
\section{Crystals}\label{s:crystal}
Using the $\widehat{\Lie{sl}}_e$-categorification in Theorem
\ref{thm:categorification} we construct a crystal on
$\mathcal{O}_{\mathbf{h},\mathbb{N}}$ and prove that it coincides with the crystal of
the Fock space $\mathcal{F}_\mathbf{s}$ (Theorem \ref{thm:main}).
\subsection{}\label{defcrystal}
A \emph{crystal} (or more precisely, an
$\widehat{\mathfrak{sl}}_e$-crystal) is a set $B$ together with maps
$$\wt: B\rightarrow P,\quad \tilde{e}_i, \tilde{f}_i: B\rightarrow B\sqcup
\{0\},\quad \epsilonsilon_i,\varphi_i: B\rightarrow
\mathbb{Z}\sqcup\{-\infty\},$$ such that
\begin{itemize}
\item $\varphi_i(b)=\epsilonsilon_{i}(b)+\pair{\alpha\spcheck_i,\wt(b)}$,
\item if $\tilde{e}_ib\in B$, then
$\wt(\tilde{e}_ib)=\wt(b)+\alpha_i,\quad
\epsilonsilon_i(\tilde{e}_ib)=\epsilonsilon_i(b)-1,\quad
\varphi_i(\tilde{e}_ib)=\varphi_i(b)+1$,
\item if $\tilde{f}_ib\in B$, then
$\wt(\tilde{f}_ib)=\wt(b)-\alpha_i,\quad
\epsilonsilon_i(\tilde{f}_ib)=\epsilonsilon_i(b)+1,\quad
\varphi_i(\tilde{f}_ib)=\varphi_i(b)-1$,
\item let $b, b'\in B$, then $\tilde{f}_ib=b'$ if and only if
$\tilde{e}_ib'=b$,
\item if $\varphi_i(b)=-\infty$, then $\tilde{e}_ib=0$ and
$\tilde{f}_ib=0$.
\end{itemize}
Let $V$ be an integrable $\widehat{\mathfrak{sl}}_e$-module. For any
nonzero $v\in V$ and any $i\in\mathbb{Z}/e\mathbb{Z}$ we set
$$l_i(v)=\max\{l\in\mathbb{N}:\,e_i^{l}v\neq 0\}.$$ Write $l_i(0)=-\infty$.
For $l\mathfrak{g}eqs 0$ let $$V_i^{<l}=\{v\in V:\,l_i(v) < l\}.$$ A weight
basis of $V$ is a basis $B$ of $V$ such that each element of $B$ is
a weight vector. Following A. Berenstein and D. Kazhdan (cf.
\cite[Definition 5.30]{BK}), a \emph{perfect basis} of $V$ is a
weight basis $B$ together with maps $\tilde{e}_i,\tilde{f}_i: B\rightarrow
B\sqcup\{0\}$ for $i\in \mathbb{Z}/e\mathbb{Z}$ such that
\begin{itemize}
\item for $b, b'\in B$ we have
$\tilde{f}_ib=b'$ if and only if $\tilde{e}_ib'=b,$
\item we have $\tilde{e}_i(b)\neq 0$ if and only if $e_i(b)\neq 0$,
\item if $e_i(b)\neq 0$ then we have
\begin{equation}\label{perf}
e_i(b)\in\mathbb{C}^\ast\tilde{e}_i(b)+V_i^{<l_i(b)-1}.
\end{equation}
\end{itemize}
We denote it by $(B,\tilde{e}_i, \tilde{f}_i)$. For such a basis let
$\mathrm{wt}(b)$ be the weight of $b$, let $\epsilonsilon_i(b)=l_i(b)$
and let
$$\varphi_i(b)=\epsilonsilon_i(b)+\pair{\alpha_i\spcheck, \mathrm{wt}(b)}$$
for all $b\in B$. The data
\begin{equation}\label{crystaldata}
(B,\wt,\tilde{e}_i,\tilde{f}_i,\epsilonsilon_i,\varphi_i)
\end{equation} is a crystal. We will
always attach this crystal structure to
$(B,\tilde{e}_i,\tilde{f}_i)$. We call $b\in B$ a primitive element
if $e_i(b)=0$ for all $i\in\mathbb{Z}/e\mathbb{Z}$. Let $B^+$ be the set of
primitive elements in $B$. Let $V^+$ be the vector space spanned by
all the primitive vectors in $V$. The following lemma is \cite[Claim
$5.32$]{BK}.
\begin{lemme}\label{basis}
For any perfect basis $(B,\tilde{e}_i,\tilde{f}_i)$ the set $B^+$ is
a basis of $V^+$.
\end{lemme}
\begin{proof}
By definition we have $B^+\subset V^+$. Given a vector $v\in V^+$,
there exist $\mathfrak{z}eta_1,\ldots, \mathfrak{z}eta_r\in\mathbb{C}^\ast$ and distinct
elements $b_1,\ldots,b_r\in B$ such that $v=\sum_{j=1}^r\mathfrak{z}eta_jb_j$.
For any $i\in\mathbb{Z}/e\mathbb{Z}$ let $l_i=\max\{l_i(b_j):\,1\leqslant j\leqslant r\}$
and $J=\{j:\,l_i(b_j)=l_i,\,1\leqslant j\leqslant r\}$. Then by the third
property of perfect basis there exist $\eta_j\in\mathbb{C}^\ast$ for $j\in
J$ and a vector $w\in V^{<l_i-1}$ such that $0=e_i(v)=\sum_{j\in
J}\mathfrak{z}eta_j\eta_j\tilde{e}_i(b_j)+w$. For distinct $j, j'\in J$, we
have $b_j\neq b_{j'}$, so $\tilde{e}_i(b_j)$ and
$\tilde{e}_i(b_{j'})$ are different unless they are zero. Moreover,
since $l_i(\tilde{e}_i(b_j))=l_i-1$, the equality yields that
$\tilde{e}_i(b_j)=0$ for all $j\in J$. So $l_i=0$. Hence $b_j\in
B^+$ for $j=1,\ldots,r$.
\end{proof}
\subsection{}\label{ss:perfectbasis}
Given an $\widehat{\Lie{sl}}_e$-categorification on a $\mathbb{C}$-linear artinian abelian
category $\mathcal{C}$ with the adjoint pair of endo-functors $(U,V)$,
$X\in\End(U)$ and $T\in\End(U^2)$, one can construct a perfect basis
of $K(\mathcal{C})$ as follows. For $i\in\mathbb{Z}/e\mathbb{Z}$ let $U_i$, $V_i$ be the
$q^i$-eigenspaces of $X$ in $U$ and $V$. By definition, the action
of $X$ restricts to each $U_i$. One can prove that $T$ also
restricts to endomorphism of $(U_i)^2$, see for example the
beginning of Section 7 in \cite{CR}. It follows that the data $(U_i,
V_i, X, T)$ gives an $\mathfrak{sl}_2$-categorification on $\mathcal{C}$ in the
sense of \cite[Section 5.21]{CR}. By \cite[Proposition 5.20]{CR}
this implies that for any simple object $L$ in $\mathcal{C}$, the object
$\mathrm{head}(U_i(L))$ (resp. $\mathrm{soc}(V_iL)$) is simple
unless it is zero.
Let $B_{\mathcal{C}}$ be the set of isomorphism classes of simple objects
in $\mathcal{C}$. As part of the data of the $\widehat{\Lie{sl}}_e$-categorification, we
have a decomposition $\mathcal{C}=\oplus_{\tau\in P}\mathcal{C}_\tau$. For a
simple module $L\in\mathcal{C}_\tau$, the weight of $[L]$ in $K(\mathcal{C})$ is
$\tau$. Hence $B_{\mathcal{C}}$ is a weight basis of $K(\mathcal{C})$. Now for
$i\in\mathbb{Z}/e\mathbb{Z}$ define the maps
\begin{eqnarray*}
\tilde{e}_i:& B_{\mathcal{C}}\rightarrow B_{\mathcal{C}}\sqcup\{0\},\quad &[L]\mapsto [\mathfrak{h}ead (U_iL)],\\
\tilde{f}_i:& B_{\mathcal{C}}\rightarrow B_{\mathcal{C}}\sqcup\{0\},\quad &[L]\mapsto
[\soc (V_iL)].
\end{eqnarray*}
\begin{prop}\label{perfectbasis}
The data $(B_\mathcal{C},\tilde{e}_i,\tilde{f}_i)$ is a perfect basis of
$K(\mathcal{C})$.
\end{prop}
\begin{proof}
Fix $i\in \mathbb{Z}/e\mathbb{Z}$. Let us check the conditions in the definition in
order:
\begin{itemize}
\item for two simple modules $L$, $L'\in\mathcal{C}$, we have
$\tilde{e}_i([L])=[L']$ if and only if
$0\neq\Hom(U_iL,L')=\Hom(L,V_iL'),$ if and only if
$\tilde{f}_i([L'])=[L]$.
\item it follows from the fact that any non trivial module has a non
trivial head.
\item this is \cite[Proposition 5.20(d)]{CR}.
\end{itemize}
\end{proof}
\subsection{}\label{ss:mainresult}
Let $B_{\mathcal{F}_\mathbf{s}}$ be the set of $l$-partitions. In
\cite{JMMO} this set is given a crystal structure. We will call it
the crystal of the Fock space $\mathcal{F}_\mathbf{s}$.
\begin{thm}\label{thm:main}
(1) The set
$$B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}=\{[L(\lambda)]\in K(\mathcal{O}_{\mathbf{h},\mathbb{N}}): \lambda\in\mathcal{P}_{n,l}, n\in\mathbb{N}\}$$
and the maps
\begin{eqnarray*}
\tilde{e}_i:& B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}\rightarrow B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}\sqcup\{0\},\quad &[L]\mapsto [\mathfrak{h}ead (E_iL)],\\
\tilde{f}_i:& B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}\rightarrow
B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}\sqcup\{0\},\quad &[L]\mapsto [\soc (F_iL)].
\end{eqnarray*}
define a crystal structure on $B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}$.
(2) The crystal $B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}$ given by (1) is isomorphic to
the crystal $B_{\mathcal{F}_\mathbf{s}}$.
\end{thm}
\begin{proof}
(1) Applying Proposition \ref{perfectbasis} to the
$\widehat{\Lie{sl}}_e$-categorification in Theorem \ref{thm:categorification} yields
that $(B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}},\tilde{e}_i,\tilde{f}_i)$ is a perfect
basis. So it defines a crystal structure on $B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}$ by
(\ref{crystaldata}).
(2) It is known that $B_{\mathcal{F}_\mathbf{s}}$ is a perfect basis
of $\mathcal{F}_\mathbf{s}$. Identify the $\widehat{\Lie{sl}}_e$-modules
$\mathcal{F}_\mathbf{s}$ and $K(\mathcal{O}_{\mathbf{h},\mathbb{N}})$. By Lemma \ref{basis}
the set $B_{\mathcal{F}_\mathbf{s}}^+$ and $B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}^+$
are two weight bases of $\mathcal{F}_\mathbf{s}^+$. So there is a
bijection $\psi: B_{\mathcal{F}_\mathbf{s}}^+\rightarrow
B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}^+$ such that $\wt(b)=\wt(\psi(b))$. Since
$\mathcal{F}_\mathbf{s}$ is a direct sum of highest weight simple
$\widehat{\mathfrak{sl}}_e$-modules, this bijection extends to an
automorphism $\psi$ of the $\widehat{\mathfrak{sl}}_e$-module
$\mathcal{F}_\mathbf{s}$. By \cite[Main Theorem $5.37$]{BK} any
automorphism of $\mathcal{F}_\mathbf{s}$ which maps
$B_{\mathcal{F}_\mathbf{s}}^+$ to $B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}^+$ induces an
isomorphism of crystals $B_{\mathcal{F}_\mathbf{s}}\cong
B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}$.
\end{proof}
\begin{rmq}
One can prove that if $n < e$ then a simple module
$L\in\mathcal{O}_{\mathbf{h},n}$ has finite dimension over $\mathbb{C}$ if and only if
the class $[L]$ is a primitive element in $B_{\mathcal{O}_{\mathbf{h},\mathbb{N}}}$. In
the case $n=1$, we have $B_n(l)=\mu_l$, the cyclic group, and the
primitive elements in the crystal $B_{\mathcal{F}_\mathbf{s}}$ have
explicit combinatorial descriptions. This yields another proof of
the classification of finite dimensional simple modules of
$H_\mathbf{h}(\mu_l)$, which was first given by W. Crawley-Boevey and M. P. Holland. See type $A$ case of \cite[Theorem $7.4$]{CB}.
\end{rmq}
\end{document} |
\begin{document}
\begin{abstract} We propose and analyze a mathematical model of Modulated Luminescent Tomography. We show that when single X-rays or focused X-rays are used as an excitation, the problem is similar to the inversion of weighted X-ray transforms. In particular, we give an explicit inversion in the case of Dual Cone X-ray excitation.
\end{abstract}
\maketitle
\section{Introduction: A brief description of the method and the model}
\subsection{ A brief description of the modalities} \
The purpose of this work is to build and analyze the mathematical model of several medical multiwave imaging techniques that we call Modulated Luminescent Tomography. Certain phosphorescent contrast agents (for example, nanophosphors) are delivered to the cells and then illuminated with X-rays. The illuminated particles emit photons which scatter through the tissue and are detected outside the body, see, e.g., \cite{Liu2013}. The goal is to recover the concentration of the contrast agents. The resolution is expected to come from the X-ray excitation. In this respect, those modalities behave differently than Photoacoustic and Thermoacoustic Tomography, where the excitation is highly diffusive but the emitted ultrasound signal allows for high resolution, see, e.g., \cite{Wang2012}.
Two such techniques, X-ray luminescence computed tomography (XLCT), \cite{Pratx10} and X-ray micro-modulated luminescence tomography (XMLT) \cite{Cong14, St-double_cone_eng}, described below, have been proposed recently.
Microscopy is the principal observational tool that has made fundamental contributions to our understanding of biological systems and engineered tissues. Popular microscopy techniques use visible light and electrons. Sample preparation and imaging with these techniques are relatively simple, being good for in situ or in vivo studies of cultured cell/tissue samples. Inherently, the resolution of optical microscopy is limited by diffraction. With additional sample preparation, stochastic information and innovative interference techniques ~100nm resolution is achievable. Three-dimensional images can be obtained with optical sectioning. Ultimately, multiple scattering prevents these techniques from imaging thick samples. Photoacoustic tomography permits scalable resolution with a depth of up to ~7cm with a depth-to-resolution ratio ~200. Photoacoustic microscopy aims at millimeter depth and micron-scale resolution based on absorption contrast \cite{Wang2013}, which can be used to characterize the structure of a scaffold but does not provide the sensitivity of fluorescence imaging. All these methods are bounded by 1mm imaging depth.
In the in vivo imaging field, fluorescence molecular tomography (FMT) and bioluminescence tomography (BLT) are capable of visualizing biological processes at molecular and cellular levels deep inside living tissue \cite{Ntziachristos2005}. Such optical molecular imaging tools have major advantages in terms of high sensitivity, biological specificity, non-invasiveness, and cost-effectiveness, which are widely used in preclinical investigations and a limited number of clinical applications \cite{Wang2006}. However, resolution and stability of FMT and BLT still remain unsatisfactory. Recently, X-ray luminescence computed tomography (XLCT) was developed \cite{Pratx10} with nanophosphors (NPs) as imaging probes. Excited with a pencil beam of X-rays, NPs simultaneously gives rise to luminescence emission whose photons can be efficiently collected with a CCD camera. This mechanism allows tomographic reconstruction of a NP distribution, similar to the case of X-ray CT. X-ray excitation not only confines luminescence sources within the active beam but also eliminates auto-fluorescence related artifacts in FMT images. However, due to several technical and physical limitations, the lower bound of XLCT resolution is about one millimeter and seems infeasible to break through.
Recently, a unique imaging approach called “X-ray micro-modulated luminescence tomography (XMLT)” approach was proposed \cite{Cong14, St-double_cone_eng}, which combines X-ray focusing, nanophosphor excitation, optical sensing, and image reconstruction in a synergistic fashion, and promises significant imaging performance gains in both microscopic and preclinical studies. The XMLT approach is
an exogenous reporter-based imaging system in which spatial resolution is determined by the excitation of the luminescence from nanophosphors with a micro-focused X-ray beam such as a zone-plane or a micro-focus X-ray source coupled with a polycapillary lens. High in vivo spatial resolution can be achieved due to the short wavelength and small spot size of the X-rays. When X-rays are focused in this way, double cones of X-rays are formed with their shared vertex point inside a sample or a subject.
Our main results state that we can use boundary averaged measurements only and the problem is reduced to an inversion of a weighted linear transform determined by the excitation. For example, if the excitation consist of single X-rays, in Section~\ref{sec5}, we get a weighted X-ray transform with a weight depending on the diffusion and the absorption coefficients $D$ and $\mu_a$ of the medium with respect to the emitted photons, see \r{M1} below.
In the case of double cone excitation (XMLT) with a constant aperture, or in XLCT, we have an explicit and stable inversion under an explicit if and only if condition \r{stab_cond}, see Theorem~\ref{thm4.1} and Section~\ref{sec4}. We show that the local problem in a region of interest behaves similarly to the corresponding X-ray problems, and we have explicit microlocal inversions of the visible singularities.
\subsection{The model}
Let $\Omega\subset {\bf R}^n$ be a smooth domain.
We illuminate the medium in different ways. Each illumination excites the particles at a rate $I_\alpha(x)$, which can be a distribution, where $\alpha$ belongs to some index set $\mathcal{A}$, discrete or not. In the X-ray case, $I_\alpha(x)$ is just a superposition of delta functions along straight lines; which also allows for attenuated X-rays by introducing an appropriate weight. The structure of $I_\alpha$, XLCT or XMLT or something else, see section~\ref{sec4} and section~\ref{sec5}, is not important at the moment.
The modulated source then is
\be{S}
s_\alpha(x) = I_\alpha(x)f(x),
\end{equation}
where $f$ is the concentration of the active particles, which we want to reconstruct. We assume that the light from the source propagates according to the diffusion model \cite{Welch-book}
\be{M1}
\left(-\nabla\cdot D\nabla +\mu_a\right) u_\alpha= s_\alpha (x) \quad \text{in $\Omega$}.
\end{equation}
Here, $\mu_a(x){\bar g}e0$ is the absorption, $D(x)>0$ is the diffusion coefficient,
and $u_\alpha$ is the photon density. The coefficients $\mu_a$ and $D$ are given. In the diffusion approximation regime, $D(x)= [3(\mu_a+\mu'_s)]^{-1}>0$, where $\mu_s'=(1-g)\mu_s$ is the reduced scattering coefficient, $\mu_s$ is the scattering coefficient and $g$ is the scattering anisotropy.
The boundary conditions are of semi-transparent type due to the different index of refraction of the tissue and the air around it. They are of Robin type and have the form
\be{M2}
u_\alpha+2AD\partial_\nu u_\alpha \big|_{{\partial \Omega}}=0,
\end{equation}
where $A>0$ is a given coefficient, well approximated by $A=(1+R)/(1-R)$ with $R$ closely approximated by $R= -1.4399m^{-2} + 0.7099m^{-1}+ 0.6681+0.063m$ \cite{Schweiger95}, where $m$ is the refractive index of the tissue.
Let $G$ be the solution operator (the Green's function) of \r{M1}, \r{M2}, i.e., $u_\alpha = Gs_\alpha = GI_\alpha f$.
What we measure is the outgoing photon density
\be{M3}
Q_\alpha f= -D\partial_\nu u_\alpha \big|_{{\partial \Omega}} =\frac1{2A} u_\alpha |_{{\partial \Omega}} =\frac1{2A} (GI_\alpha f)|_{{\partial \Omega}}.
\end{equation}
The data therefore is
\[
\{Q_\alpha f\}_{\alpha\mathrm{i}n\mathcal{A}},
\]
and we want to reconstruct $f$.
For each illumination choice, $\alpha$, the data $Q_\alpha$ is a function of $n-1$ variables. When $\alpha$ runs over a continuous space with dimension $m$, we get $m+n-1$ variables. This could make the problem formally overdetermined. On the other hand, because of the diffusion nature of the data for a fixed $\alpha$, we cannot have much resolution hidden in that $Q_\alpha$. The only way to get resolution (i.e., stability) is to have a well chosen set of illuminations $I_\alpha$, $\alpha\mathrm{i}n\mathcal{A}$ which have enough singularities. We show below that we do not actually need to know or measure $Q_\alpha$ pointwise; some average over ${\partial \Omega}$ is enough for a stable recovery. This removes $n-1$ variables from the data.
\section{The first phase: recovery of an averaged intensity.}
The operator $L:= -\nabla\cdot D\nabla +\mu_a$ is symmetric and positive on smooth functions satisfying the the Robin boundary condition \r{M2} because
\be{L}
\begin{split}
\mathrm{i}nt_\Omega (Lu)\bar u\,\mathrm{d} x &= \mathrm{i}nt_\Omega\left( |\nabla u|^2+\mu_a|u|^2 \right)\mathrm{d} x - \mathrm{i}nt_{\partial \Omega} (\partial_\nu u)\bar u\,\mathrm{d}\sigma\\
&= \mathrm{i}nt_\Omega\left( |\nabla u|^2+\mu_a|u|^2 \right)\mathrm{d} x +\mathrm{i}nt_{\partial \Omega} \frac{1}{2AD} |\bar u|^2\,\mathrm{d}\sigma,
\end{split}
\end{equation}
where $\mathrm{d} \sigma$ is the surface measure. By
Green's formula, for such $u$ and $v$,
\be{M4}
\begin{split}
\mathrm{i}nt_\Omega \left( vLu -uLv\right)\,\mathrm{d} x &= -\mathrm{i}nt_{{\partial \Omega}} \left( v\partial_\nu u - u\partial_\nu v \right)D\,\mathrm{d} \sigma\\
&= -\mathrm{i}nt_{{\partial \Omega}} \big( (v +2AD\partial_\nu v) \partial_\nu u - (u +2AD\partial_\nu u )\partial_\nu v \big)D\,\mathrm{d} \sigma =0.
\end{split}
\end{equation}
Therefore, $L$ with the Robin boundary conditions if symmetric and positive by \r{L}.
It is well known that $L$ extends to a self-adjoint operator on $L^2(\Omega)$ with a compact resolvent. In particular, $0$ is in the resolvent set by \r{L}. This shows that the Green's function $G$ is well-defined, and self-adjoint on $L^2(\Omega)$. Moreover, the boundary-value problem \r{M6} below is well posed.
If $Lv_\alpha=0$, and $u_\alpha$ solves \r{M1}, \r{M2}, we get (we drop the index $\alpha$ in this formula)
\be{M5}
\begin{split}
\mathrm{i}nt_\Omega vs\,\mathrm{d} x &= -\mathrm{i}nt_{{\partial \Omega}} \left( v\partial_\nu u - u\partial_\nu v \right)D\,\mathrm{d} \sigma= -\mathrm{i}nt_{\partial \Omega} \left( v +2AD \partial_\nu v \right)D\partial_\nu u \,\mathrm{d} \sigma \\
&= \mathrm{i}nt_{{\partial \Omega}} \left( v +2AD \partial_\nu v \right)Qf \,\mathrm{d} \sigma.
\end{split}
\end{equation}
This suggests the following. Choose $h_\alpha$ and let $v_\alpha:=Vh_\alpha$ solve
\be{M6}
\begin{split}
\left(-\nabla\cdot D\nabla +\mu_a\right) v_\alpha&= 0 \quad \text{in $\Omega$},\\
v_\alpha+2AD\partial_\nu v_\alpha\big|_{{\partial \Omega}}&=h_\alpha.
\end{split}
\end{equation}
Then, by \r{M5},
\be{M7}
\mathrm{i}nt_\Omega v_\alpha I_\alpha f \,\mathrm{d} x = \mathrm{i}nt_{{\partial \Omega}}h_\alpha Q_\alpha f\,\mathrm{d} \sigma.
\end{equation}
In other words, choose $h_\alpha$ somehow; then we can recover $\mathrm{i}nt_\Omega\! (Vh_\alpha)I_\alpha f\,\mathrm{d} x$ from the data. This requires us to solve \r{M6} first but this can be done numerically in a very efficient way. If $D$ and $\mu_a$ are constants, and $\Omega$ is a circle or a rectangle, it can be done explicitly, as we show below. This gives us a way to compute an averaged value of $I_\alpha f$, with a weight $v_\alpha$ depending on the choice of $h_\alpha $. If $\mu_a=0$, one can take $h_\alpha=1$ and then $v_\alpha =1$.
We would want to have positive solutions $v>0$, and we do not want $v\ll1$ because we will divide by it eventually. Positivity can be guaranteed by the maximum principle if $h>0$, since $0<v<\max_{{\partial \Omega}}h$.
The lower bound of $u$ could be very small if the attenuation $\mu_a$ is large, and that would create stability problems; but this is natural to expect.
Note that we do not need to solve the boundary value problem \r{M6} to find $v$. We could just take any solution $v$ of $(-\nabla\cdot D\nabla +\mu_a)v= 0$ and compute $h= (v+2AD\partial_\nu v)\big|_{{\partial \Omega}}$ next. If $D$ and $\mu_0$ are constants, we can generate many explicit solutions, regardless of the shape of the domain.
Finally, we want to emphasize that we cannot recover $s_\alpha= I_\alpha f$ from \r{M7} known for all $h_\alpha$; for example, for any $\phi\mathrm{i}n C_0^\mathrm{i}nfty(\Omega)$, adding $L\phi$ to $I_\alpha f$ would not change the left-hand side of \r{M7}.
We consider the constant coefficient cases below and continue with the general case in the next section.
\subsection{Partial cases: $D$ and $\mu_a$ constants}
Assume now that $D>0$ and $\mu_a{\bar g}e0$ are constants.
\subsection{$n=3$, $\Omega$ a ball}
Let $n=3$. The Green's function of $L=-D\mathcal{D}elta+\mu_a$ is then given by
\be{MG}
G_k(x,y) = \frac{e^{-k|x-y|}}{4\pi D|x-y|},
\end{equation}
with $k=\sqrt{\mu_a/D}$, i.e., $(-D\mathcal{D}elta_x+\mu_a) G(x,y)=\mathrm{d}elta(x-y)$
Then $G_{-k}$ is a Green's function as well, and the difference is in the kernel of $L$. Therefore,
\[
(-D\mathcal{D}elta_x+\mu_a)\frac{\sinh\left(k|x-y|\right)}{|x-y|}=0.
\]
Therefore, we can chose
\be{Mv}
v(x) = \frac{\sinh\left(k|x-y|\right)}{|x-y|}
\end{equation}
with $y$ arbitrary but fixed. Then compute $h$ from \r{M6}.
If $\Omega$ is the ball $\Omega= \{x|\;|x|\le a\}$, then we can choose $y=0$ to get
\be{M8a}
v(x) = \frac{\sinh{(k|x|)}}{|x|}, \quad
h= a^{-1}\sinh(ka)+ 2AD\left(ka^{-1}\sinh(ka)- a^{-2}\sinh(ka)\right).
\end{equation}
Note that $h$ is just a constant on the boundary $|x|=a$.
Then by \r{M7}, we can recover
\[
\mathrm{i}nt_{|x|\le a}I_\alpha(x)f(x)\frac{\sinh{(k|x|)}}{|x|}\, \mathrm{d} x.
\]
When $ka{\bar g}g1$, the function $v$ is very small near $0$ compared to its boundary values, which means poor recovery of $s_\alpha$ there (we do not actually recover $s_\alpha$ here, just a weighed integral of it). That can be expected though --- signals coming from the center would be attenuated the most.
\subsection{$n=3$, $\Omega$ arbitrary} One can still use $v$ as in \r{M8a} (or, as in\r{Mv}, see the remark above); then $h$ needs to be calculated by the second equation in \r{M6}.
\subsection{$n=2$, $\Omega$ a disk} In this case, the fundamental solution involves the Bessel function $K_0$ and is $K_0(k|x-y|)$. Note that there is a weak (logarithmic) singularity at $x=y$. This is equivalent to \r{MG} in the 3D case. One can then take
\[
v(x) = I_0(k|x|),
\]
where $I_0$ is a Bessel function again, and there is no singularity. This is equivalent to taking $\sinh(k|x|)/|x|$ in 3D. For $h$ on the sphere $|x|=a$ the boundary of the ball), we get
\[
h= I_0(ak)+ 2ADI_1(ak),
\]
which is a constant again.
\section{Reducing the problem to the invertibility of a weighted version of the illumination transform}\label{sec3}
As we showed above, we cannot recover the source $s_\alpha =I_\alpha f$ from \r{M7}. We are not trying to reconstruct $s_\alpha$ however.
We view \r{M7} as a linear integral transform
\be{M8}
R : f \longmapsto Rf(\alpha) := \mathrm{i}nt_\Omega v_\alpha I_\alpha f\, \mathrm{d} x, \quad \alpha\mathrm{i}n \mathcal{A}.
\end{equation}
We can write equation \r{M7} then as
\be{Q1}
Rf(\alpha) = \mathrm{i}nt_{\partial \Omega} h_\alpha Q_\alpha f\,\mathrm{d}\sigma.
\end{equation}
The right-hand side is determined by the data.
The goal then is to invert $R$. Recall that we have some freedom to choose $v_\alpha=Vh_\alpha$, and in particular, we can choose them independently of $\alpha$. Then $v$ just multiples $f$, and we need to invert the transform with kernel $I_\alpha(x)$; and then divide by $v$.
We consider two main examples below: when $R$ is a dual cone transform modeling the XMLT; and when $R$ is a weighted X-ray transform, which models the XLCT.
\section{Double cone excitation (XMLT)}\label{sec4}
\subsection{Formulation}
We work in ${\bf R}^n$ but the interesting case is ${\bf R}^3$.
The idea of the XMLT is to focus X-rays, thus forming a double cone,
at each point $x$ in some region of interest ROI at $N$ different directions $\theta_j$, $j=1,\mathrm{d}ots, N$. Then
\[
\alpha=(x,j)\mathrm{i}n \Omega\times \{1,2,\mathrm{d}ots,N\}.
\]
We model the cones with their aperture functions $a_{x,j}(\theta)$, $\theta\mathrm{i}n S^{n-1}$, in other words, $I_\alpha(x)$ is a superposition of X-rays with density $a_{x,j}$.
Then the corresponding intensity $I_{x,\alpha}(y)$ is given by
\[
I_{x,j}(y) = \frac{ a_{x,j}\big(\frac{x-y}{|x-y|}\big)}{|x-y|^{n-1}}.
\]
For example, if we assume uniform angular density, then $a_{x,j}$ would be constant on the intersection of the interior of the cone with the unit sphere, and zero otherwise. We assume that the cones are double; then $a_{x,j}$ are even functions on the sphere. The simplest case is when $a_{x,j}$ is independent of $x$, and $a_{x_1,j_1}$ is obtained from $a_{x_2,j_2}$ by a translation in the $x$ variable and a rotation in the angular one.
By \r{S}, the transform $R$ becomes
\be{X2a}
Rf(x,j) =\mathrm{i}nt_\Omega\frac{ a_{x,j}\big(\frac{x-y}{|x-y|}\big)}{|x-y|^{n-1}}v_{j,x}(y)f(y)\,\mathrm{d} y.
\end{equation}
If $a_{x,j}$ are smooth functions, then $R$ is a $\Psi$DO\ of order $-1$ with principal symbol
\[
r_j(x,\xi) :=
\pi \mathrm{i}nt_{S^{n-1}}a_{x,j}(\theta) v_{j,x}(x) \mathrm{d}elta (\xi\cdot\theta)\,\mathrm{d}\theta,
\]
see, e.g., \cite{FSU}. Here, $\mathrm{d}elta$ is the Dirac delta ``function''; therefore, the integral is taken over the sphere of co-dimension two (the grand circle if $n=3$, two points if $n=2$) of the unit sphere intersected with the plane normal to $\xi$.
Note that the symbol above is homogeneous of order $-1$. This shows that the ``visible'' set of singularities $\mathcal{U}\subset T^*\Omega\setminus 0 $ is where the system $\{r_j\}_{j=1}^N$ is elliptic, i.e.,
\be{U}
\mathcal{U} = \{ (x,\xi)\mathrm{i}n T^*\Omega\setminus 0;\; \exists j, \theta\perp\xi\; \text{so that}\; a_{x,j}(\theta)\not=0 \}.
\end{equation}
\subsection{Recovery of the singularities in a region of interest} If $\Omega_0\subset\Omega$ is an open subset (a region of interest), and if $ a_{x,j}$ are smooth, we can recover the singularities of $f$ there, if the condition for $\mathcal{U}$ in \r{U} is satisfied for any $x\mathrm{i}n\Omega_0$, i.e., if
\be{stab_cond}
T^*\Omega_0\setminus 0\subset\mathcal{U}.
\end{equation}
This can be done constructively as follows.
\begin{itemize}
\mathrm{i}tem For any $j$ and $x\mathrm{i}n \Omega_0$, choose a smooth function $0<h_{j,x}$ on ${\partial \Omega}$ and compute $v_{j,x}=Vh_{j,x}$ by solving \r{M6} with $h_\alpha =h_{j,x}$. This determines the transform $R$.
\mathrm{i}tem Compute the right-hand side of \r{Q1}
for $x\mathrm{i}n\Omega_0$.
\mathrm{i}tem In \r{Q1}, apply a left parametrix for the matrix operator $f\mapsto Rf(j,\cdot)$ in $\Omega_0$.
\end{itemize}
One way to construct a parametrix of order $1$ is to construct a $\Psi$DO\ with the following principal symbol
\[
q_j := \Big (\sum_kr_k^2\Big)^{-1} r_j,
\]
and then $\sum_j q_j(x,D)Rf(j,x)=f+Kf$, where $K$ is of order $-1$. If $a_{x,j}{\bar g}e0$, one can use instead
\[
q := \Big (\sum_k r_k\Big)^{-1}
\]
and then $ q(x,D)\sum_j Rf(j,x)=f+Kf$ with $K$ as above.
Note that we can simply choose $h_{j,x}$ independent of ${j,x}$.
The aperture functions $a_{x,j}$ only need to be smooth of finite order to recover the leading singularities, like jumps across surfaces. In practical implementations, this condition can be satisfied approximately by introducing vignetting near the edges of the cones. Even without applying the operator $Q$, the $\sum_j Rf(j,x)$ would recover all singularities in the correct places but with varying amplitudes.
When the stability condition \r{stab_cond} does not hold, we can still reconstruct the visible singularities in $\mathcal{U}$ stably by a microlocal inversion, if $a_{x,j}$ are smooth but not the ones in the complement of $\bar{\mathcal{U}}$, see, e.g., \cite{SU-JFA09}.
\subsection{Explicit global inversion when the apertures are translation-independent}
Assume now that the aperture functions are non-negative and independent of $x$, i.e., they are given by $a_j(\theta)$. The stability condition \r{stab_cond} then takes the following form, see \r{U} :
\be{U2}
\text{Any plane through the origin intersects at least one of the cones $\{\theta;\; a_j(\theta)>0\}$.}
\end{equation}
Assume also that $h_{j,x}$ are independent of $j$ and $x$.
The construction above then simplifies as follows. We have
\[
r_j(x,\xi) := r^0_j(\xi)v(x), \quad r^0_j(\xi):= \pi
\mathrm{i}nt_{S^{n-1}}a_j(\theta) \mathrm{d}elta (\xi\cdot\theta)\,\mathrm{d}\theta,
\]
and $r_j(\xi)v(y)$ is actually the full (not just the principal symbol) amplitude of $f\mapsto Rf(j,\cdot)$. Symbols independent of $x$ are Fourier multipliers, i.e., $r_j(D)= \mathcal{F}^{-1}r_j\mathcal{F}$, where $\mathcal{F}$ is the Fourier transform.
Then $R_j= r^0_j(D)v$ with $v$ regarded as a multiplication, where we changed the notation for $R$ since $\alpha=j$ now. In integral form,
\[
R_jf(x) = \mathrm{i}nt_\Omega\frac{ a_j\big(\frac{x-y}{|x-y|}\big)}{|x-y|^{n-1}}v(y)f(y)\,\mathrm{d} y.
\]
Note that $r_j^0(D)$ makes sense even if $a_j$ is $L^\mathrm{i}nfty$ only.
Equation \r{Q1} takes the form
\[
R_jf (x)= \mathrm{i}nt_{\partial \Omega} h Q_{j,x}\,\mathrm{d}\sigma.
\]
When the stability condition \r{U2} holds, this equation can be solved explicitly as follows
\be{f}
f = \frac1v \Big(\sum_j r_j(D) \Big)^{-1}\sum_j \mathrm{i}nt_{\partial \Omega} h Q_{j,x}\,\mathrm{d}\sigma.
\end{equation}
We summarize this in the following:
\begin{theorem}\label{thm4.1}
Let the aperture functions $0 \le a_j(\theta)\mathrm{i}n L^\mathrm{i}nfty$ be independent on the point $x$ where we focus. Choose $0<h\mathrm{i}n L^\mathrm{i}nfty({\partial \Omega})$ and let $v=Vh$ be the solution of \r{M6} corresponding to that $h$. If the stability condition \r{U2} holds, then $f$ can be explicitly computed by \r{f}.
Moreover,
\[
\|f\|_{L^2(\Omega)} \le C\frac{\sup_{{\partial \Omega}}h}{\mathrm{i}nf_\Omega v}\Big\| \mathrm{i}nt_{\partial \Omega} \sum_j Q_{j,x}\,\mathrm{d}\sigma \Big\|_{H^1(\Omega_x)}.
\]
\end{theorem}
The subscript $x$ in $\Omega_x$ indicates that the $H^1$ norm is taken w.r.t.\ to the variable $x$.
\begin{remark}\label{rem1}
One way to satisfy the stability condition \r{U2} is to make sure that union of the open sets $\{a_j>0\}$ where the apertures do not vanish covers the equator of the unit sphere (or any fixed in advance grand circle). This is the situation in the numerical example below. If the apertures $\{a_j>0\}$ are too small, then there will be a large variation between the minimum and the maximum of the symbol $q$ below, which may lead to a mild instability; the most stable singularities would be the vertical ones.
\end{remark}
\section{X-ray excitation (XLCT)} \label{sec5}
In the X-ray excitation case, we send individual X-rays through the medium. If the direction is $\theta\mathrm{i}n S^{n-1}$, and we parameterize lines in this direction by initial points $z$ on $ \theta^\perp =\{x;\; x\cdot\theta=0\}$, we have
\[
\alpha = (\theta,z), \quad \theta\mathrm{i}n S^{n-1}, \; z\mathrm{i}n \theta^\perp.
\]
In other words, we identify the directed lines in ${\bf R}^n$ with such $\alpha$'s. Then $I_\alpha$ is just a delta unction on the line parameterized by $\alpha$. Therefore, $R$ is just the weighted X-ray transform with weight $h_{\theta,z}$. Assume now that $h$ is chosen independently of the line parameterized by $(\theta,z)$. Then
\[
Rf= R_0vf, \quad R_0g(\theta,z) = \mathrm{i}nt g(z+t\theta)\,\mathrm{d} t.
\]
Equation \r{Q1} takes the form
\[
(R_0vf)(\theta,z)= \mathrm{i}nt_{\partial \Omega} hQ_{\theta,z}\,\mathrm{d}\sigma.
\]
In other words, each measurement gives us an integral of $vf$ over a single line.
The function $f$ can then be found by inverting the X-ray transform $R_0$ and then dividing by $v$. The problem with a subset of lines and microlocal recovery in a ROI is the same as with the X-ray transform.
Note that in practice, diffraction and other engineering challenges limit the ability to concentrate the radiation too closely along a single line thus limiting the resolution.
\section{Numerical simulations}
A spherical phantom of radius 10 mm was employed for the numerical simulation. The phantom is assigned with optical parameters: absorption coefficient $\mu_a=0.05$ mm$^{-1}$, scattering coefficient $\mu_s=15.$0 mm$^{-1}$, anisotropic coefficient $ g=0.9$, and relative refractive index of $1.37$. The phantom was discretized into $65,775$ tetrahedral elements and $12,044$ nodes. A total of $2,108$ virtual detectors were distributed over the phantom surface to record the photon fluence rates. Then two light sources of radius $1.0$ mm were embedded into the phantom, and filled with nanophosphors of concentrations of $5$ $\mu$g/mL and $10$ $\mu$g/mL, respectively. The centers of the two light sources were at $(2.5, 2.5, 0.0)$ and $(3.5, 0.0, 0.0)$. Two spherical subregions are nearly connected to test the spatial resolution. Using a polycapillary lens, X-ray beams are reshaped to double cone with a cone angle of $19.2$ degrees. The focal point of the X-ray is scanned at grid points in a region of interest $\text{ROI} = \{ -2<x<5,\; -2<y<5,\; -2<z<2 \} $ along a scanning direction. Then, the phantom was rotated with $10$ equal angles over a $360$ degrees range to acquire sufficient information for an improved stability of the reconstruction.
The stability condition \r{U2} then holds, see Remark~\ref{rem1}.
At each scanning, the intensity of the photon luminescence on the surface of the phantom was acquired. The intensity describes the optical emission of nanophosphors with double cone excitation.
Poisson noise was added to the synthetic data for the simulation of measurements. After discretization, the LSQR method \cite{Paige:1982} was used to solve solve the resulting system, in order to reconstruct the nanophosphor distribution from simulation data. The reconstructed results are in excellent agreement with the true phantom, and the average relative error of the reconstructed nanophosphor concentration was less than $5.37\%$, which was defined as
\[
\text{Error} = \frac1{ \#\{i|\; \rho_i^T>\varepsilonilon \}} \sum_{\rho_i^T>\varepsilonilon} \frac{\rho_i^r-\rho_i^T }{ \rho_i^T} ,
\]
where $ \rho_i^T $ and $\rho_i^r$ are the true and reconstructed nanoparticles concentrations, respectively, and $\varepsilonilon$ is a background noise level. Figures~\ref{pic1}--\ref{pic2} presents the comparison between the true and reconstructed nanophosphor distribution, showing the quantification accuracy of the image reconstructions.
\begin{figure}
\caption{Modulated luminescence tomography simulation. Left: the true nanophosphor distribution in the phantom. Right: the reconstructed nanophosphor distribution.
The different colors here do not represent different values. }
\label{pic1}
\end{figure}
\begin{figure}
\caption{Modulated luminescence tomography simulation. Left: The true nanophosphor distribution on the 2D slice $z=0$. Right: the reconstructed nanophosphor distributionat on the 2D slice $z=0$. }
\label{pic2}
\end{figure}
\end{document} |
\begin{document}
\hypersetup{linkcolor=blue}
\date{\today}
\author{Ray Bai\footremember{UofSC}{Department of Statistics, University of South Carolina, Columbia, SC 29208. Email: \href{mailto:RBAI@mailbox.sc.edu}{\tt RBAI@mailbox.sc.edu}},
Veronika Ro\v{c}kov\'{a}\footremember{Booth}{Booth School of Business, University of Chicago, Chicago, IL, 60637. E-mail: \href{mailto:Veronika.Rockova@chicagobooth.edu}{\tt Veronika.Rockova@chicagobooth.edu}}, Edward I. George\footremember{Wharton}{Department of Statistics, The Wharton School, University of Pennsylvania, Philadelphia, PA, 19104. E-mail: \href{mailto:edgeorge@wharton.upenn.edu}{\tt edgeorge@wharton.upenn.edu} } }
\title{Spike-and-Slab Meets LASSO: A Review of the Spike-and-Slab LASSO \thanks{Keywords and phrases:
{high-dimensional data},
{sparsity},
{spike-and-slab},
{spike-and-slab LASSO},
{variable selection}
}
}
\maketitle
\begin{abstract}
\noindent High-dimensional data sets have become ubiquitous in the past few decades, often with many more covariates than observations. In the frequentist setting, penalized likelihood methods are the most popular approach for variable selection and estimation in high-dimensional data. In the Bayesian framework, spike-and-slab methods are commonly used as probabilistic constructs for high-dimensional modeling. Within the context of linear regression, \cite{RockovaGeorge2018} introduced the spike-and-slab LASSO (SSL), { an approach based on a prior which provides a continuum between the penalized likelihood LASSO and the Bayesian point-mass spike-and-slab formulations}. Since its inception, the spike-and-slab LASSO has been extended to a variety of contexts, including generalized linear models, factor analysis, graphical models, and nonparametric regression. The goal of this paper is to survey the landscape surrounding spike-and-slab LASSO methodology. First we elucidate the attractive properties and the computational tractability of SSL priors in high dimensions. We then review methodological developments of the SSL and outline several theoretical developments. We illustrate the methodology on both simulated and real datasets.
\end{abstract}
\section{Introduction} \label{Intro}
High-dimensional data are now routinely analyzed. In these settings, one often wants to impose a low-dimensional structure such as sparsity. For example, in astronomy and other image processing contexts, there may be thousands of noisy observations of image pixels, but only a small number of these pixels are typically needed to recover the objects of interest \cite{JohnstoneSilverman2004,JohnstoneSilverman2005}. In genetic studies, scientists routinely observe tens of thousands of gene expression data points, but only a few genes may be significantly associated with a phenotype of interest. For example, \cite{WellcomeTrust2007} has confirmed that only seven genes have a non-negligible association with Type I diabetes. Among practitioners, the main objectives in these scenarios are typically: a) identification (or \textit{variable selection}) of the non-negligible variables, and b) \textit{estimation} of their effects.
A well-studied model for sparse recovery in the high-dimensional statistics literature is the normal linear regression model,
\begin{equation} \label{linearregression}
\boldsymbol{y} = \boldsymbol{X} \boldsymbol{\beta} + \boldsymbol{\text{var}epsilon}, \hspace{.5cm} \boldsymbol{\text{var}epsilon} \sim \mathcal{N}_n (\boldsymbol{0}, \sigma^2 \boldsymbol{I}_n ),
\end{equation}
where $\boldsymbol{y} \in \mathbb{R}^{n}$ is a vector of $n$ responses, $\boldsymbol{X} = [ \boldsymbol{x}_1, \ldots, \boldsymbol{x}_p] \in \mathbb{R}^{n \times p}$ is a design matrix of $p$ potential covariates, $\boldsymbol{\beta} = (\beta_1, \ldots, \beta_p)^T$ is a $p$-dimensional vector of unknown regression coefficients, and $\boldsymbol{\text{var}epsilon}$ is the noise vector. When $p > n$, we often assume that most of the elements in $\boldsymbol{\beta}$ are zero or negligible. Under this setup, there have been a large number of methods proposed for selecting and estimating the active coefficients in $\boldsymbol{\beta}$. In the frequentist framework, penalized likelihood approaches such as the least absolute shrinkage and selection operator (LASSO) \cite{Tibshirani1996} are typically used to achieve sparse recovery for $\boldsymbol{\beta}$. In the Bayesian framework, spike-and-slab priors are a popular approach for sparse modeling of $\boldsymbol{\beta}$.
The spike-and-slab LASSO (SSL), introduced by \cite{RockovaGeorge2018}, forms a continuum between these penalized likelihood and spike-and-slab constructs. The spike-and-slab LASSO methodology has experienced rapid development in recent years, and its scope now extends well beyond the normal linear regression model \eqref{linearregression}. The purpose of this paper is to offer a timely review of the SSL and its many variants. We first provide a basic review of frequentist and Bayesian approaches to high-dimensional variable selection and estimation under the model \eqref{linearregression}. We then review the spike-and-slab LASSO and provide an overview of its attractive properties and techniques to implement it within the context of normal linear regression. Next, we review the methodological developments of the SSL and some of its theoretical developments.
\section{Variable selection in high dimensions: Frequentist and Bayesian strategies} \label{FrequentistVsBayes}
We first review the frequentist penalized regression and the Bayesian spike-and-slab frameworks before showing how the spike-and-slab LASSO bridges the gap between them.
\subsection{Penalized likelihood approaches} \label{PenalizedLikelihood}
In the frequentist high-dimensional literature, there have been a variety of penalized likelihood approaches proposed to estimate $\boldsymbol{\beta}$ in \eqref{linearregression}. A variant of the penalized likelihood approach estimates $\boldsymbol{\beta}$ with
\begin{equation} \label{penalizedlikelihood}
\widehat{\boldsymbol{\beta}} = \argmax_{\boldsymbol{\beta} \in \mathbb{R}^{p}} - \frac{1}{2} \lVert \boldsymbol{y} - \boldsymbol{X} \boldsymbol{\beta} \rVert_2^2 + \textrm{pen}_{\lambda} (\boldsymbol{\beta}),
\end{equation}
where $\textrm{pen}_{\lambda} (\boldsymbol{\beta})$ is a penalty function indexed by penalty parameter $\lambda$. Most of the literature has focused on penalty functions which are separable, i.e. $\textrm{pen}_{\lambda} (\boldsymbol{\beta}) = \sum_{j=1}^{p} \rho_{\lambda} (\beta_j)$. In particular, the popular least absolute shrinkage and selection operator (LASSO) penalty of \cite{Tibshirani1996} uses the function $\rho_{\lambda} (\beta_j) = - \lambda \lvert \beta_j \rvert$. Besides the LASSO and its many variants \cite{BelloniChernozhukovWang2011,SunZhang2012,ZhangZhang2014,Zou2006,ZouHastie2005}, other popular choices for $\rho_{\lambda} (\cdot)$ include non-concave penalty functions, such as the smoothly clipped absolute deviation (SCAD) penalty \cite{FanLi2001} and the minimax concave penalty (MCP) \cite{Zhang2010}. All of the aforementioned penalties threshold some coefficients to zero, thus enabling them to perform variable selection and estimation simultaneously. In addition, SCAD and MCP also mitigate the well-known estimation bias of the LASSO.
Any penalized likelihood estimator \eqref{penalizedlikelihood} also has a Bayesian interpretation in that it can be seen as a posterior mode under an independent product prior $p(\boldsymbol{\beta} \,|\:\lambda) = \prod_{j=1}^{p} p(\beta_j \,|\:\lambda)$, where $\textrm{pen}_{\lambda} (\boldsymbol{\beta}) = \log p (\boldsymbol{\beta} \,|\:\lambda) = \sum_{j=1}^{p} \log p (\beta_j \,|\:\lambda)$. In particular, the solution to the LASSO is equivalent to the posterior mode under a product of Laplace densities indexed by hyperparameter, $\lambda$:
\begin{align} \label{bayesianLASSO}
p ( \boldsymbol{\beta} \,|\:\lambda) = \prod_{j=1}^{p} \frac{\lambda}{2} e^{- \lambda \lvert \beta_j \rvert }.
\end{align}
This prior, known as the Bayesian LASSO, was first introduced by \cite{ParkCasella2008}. In \cite{ParkCasella2008}, both fully Bayes and empirical Bayes procedures were developed to tune the hyperparameter $\lambda$ in \eqref{bayesianLASSO}. The fully Bayes approach of placing a prior on $\lambda$, in particular, renders the Bayesian LASSO penalty \textit{non}-separable. Thus, the fully Bayesian LASSO has the added advantage of being able to share information across different coordinates. Despite this benefit, \cite{RockovaGeorge2016Abel} showed that the fully Bayesian LASSO cannot simultaneously adapt to sparsity \textit{and} avoid the estimation bias issue of the original LASSO. In addition, \cite{GhoshTangGhoshChakrabarti2016} showed that the univariate Bayesian LASSO often undershrinks negligible coefficients, while overshrinking large coefficients. Finally, \cite{CastilloSchmidtHieberVanDerVaart2015} also proved that the posterior under the Bayesian LASSO contracts at a suboptimal rate. In Sections \ref{SpikeAndSlabLASSO}-\ref{Illustration}, we will illustrate how the \textit{spike-and-slab LASSO} mitigates these issues.
In addition to the spike-and-slab LASSO, other alternative priors have also been proposed to overcome the limitations of the Bayesian LASSO \eqref{bayesianLASSO}. These priors, known as global-local shrinkage (GL) priors, place greater mass around zero and have heavier tails than the Bayesian LASSO. Thus, GL priors shrink small coefficients more aggressively towards zero, while their heavy tails prevent overshrinkage of large coefficients. Some examples include the normal-gamma prior \cite{GriffinBrown2010}, the horseshoe prior \cite{CarvalhoPolsonScott2010}, the generalized double Pareto prior \cite{ArmaganDunsonLee2013}, the Dirichlet-Laplace prior \cite{BhattacharyaPatiPillaiDunson2015}, and the normal-beta prime prior \cite{BaiGhosh2021}. We refer the reader to \cite{BhadraDattaPolsonWillard2019} for a detailed review of GL priors.
\subsection{Spike-and-slab priors} \label{sspriors}
In the Bayesian framework, variable selection under the linear model \eqref{linearregression} arises directly from probabilistic considerations and has frequently been carried out through placing spike-and-slab priors on the coefficients of interest. The spike-and-slab prior was first introduced by \cite{MitchellBeauchamp1988} and typically has the following form,
\begin{equation} \label{pointmassspikeandslab}
\begin{array}{rl}
p (\boldsymbol{\beta} \,|\:\boldsymbol{\gamma}, \sigma^2 ) = & \displaystyle \prod_{j=1}^{p} \left[ (1-\gamma_j) \delta_0 (\beta_j) + \gamma_j p (\beta_j \,|\:\sigma^2 ) \right], \\
p (\boldsymbol{\gamma} \,|\:\theta) = & \displaystyle \prod_{j=1}^{p} \theta^{\gamma_j} (1-\theta)^{1-\gamma_j}, \hspace{.5cm} \theta \sim p(\theta), \\
\sigma^2 \sim & p(\sigma^2),
\end{array}
\end{equation}
where $\delta_0$ is a point mass at zero used to model the negligible entries (the ``spike''), $p( \beta_j \,|\:\sigma^2)$ is a diffuse and/or heavy-tailed density (rescaled by the variance $\sigma^2$) to model the non-negligible entries (the ``slab''), $\boldsymbol{\gamma}$ is a binary vector that indexes the $2^p$ possible models, and $\theta \in (0,1)$ is a mixing proportion. The error variance $\sigma^2$ is typically endowed with a conjugate inverse gamma prior or an improper Jeffreys prior, $p (\sigma^2) \propto \sigma^{-2}$. With a well-chosen prior on $\theta$, this prior \eqref{pointmassspikeandslab} also automatically favors parsimonious models in high dimensions, thus avoiding the curse of dimensionality.
The point-mass spike-and-slab prior \eqref{pointmassspikeandslab} is often considered ``theoretically ideal,'' or a ``gold standard'' for sparse Bayesian problems \cite{CarvalhoPolsonScott2009,PolsonSun2019,Rockova2018}. In high dimensions, however, exploring the full posterior over the entire model space using point-mass spike-and-slab priors \eqref{pointmassspikeandslab} can be computationally prohibitive, in large part because of the combinatorial complexity of updating the discrete indicators $\boldsymbol{\gamma}$. There has been some work to mitigate this issue by using either shotgun stochastic search (SSS) \cite{BottoloRichardson2010,HansDobraWest2007} or variational inference (VI) \cite{RaySzabo2020} to quickly identify regions of high posterior probability.
As an alternative to the point-mass spike-and-slab prior, fully continuous spike-and-slab models have been developed. In these continuous variants, the point-mass $\delta_0$ in \eqref{pointmassspikeandslab} is replaced by a continuous density that is heavily concentrated about zero. The first such continuous relaxation was made by \cite{GeorgeMcCulloch1993}, who used a normal density with very small variance for the spike and a normal density with very large variance for the slab. Specifically, the prior for $\boldsymbol{\beta}$ in \cite{GeorgeMcCulloch1993} is
\begin{equation} \label{spikeandslabnormals}
p(\boldsymbol{\beta} \,|\:\boldsymbol{\gamma}, \sigma^2) = \prod_{j=1}^{p} \left[ (1-\gamma_j) \mathcal{N}(0, \sigma^2 \tau_0^2) + \gamma_j \mathcal{N}(0, \sigma^2 \tau_1^2) \right],
\end{equation}
where $0 < \tau_0^2 \ll \tau_1^2$. \cite{GeorgeMcCulloch1993} developed a stochastic search variable selection (SSVS) procedure based on posterior sampling with Markov chain Monte Carlo (MCMC) and thresholding the posterior inclusion probabilities, $\Pr (\gamma_j = 1 \,|\:\boldsymbol{y}), j=1, \ldots, p$. In practice, the ``median thresholding'' rule \cite{BarbieriBerger2004}, i.e. $\Pr (\gamma_j = 1 \,|\:\boldsymbol{y}) > 0.5, j=1, \ldots, p$, is often used to perform variable selection. \cite{IshwaranRao2005, NarisettyHe2014} further extended the model \eqref{spikeandslabnormals} by rescaling the variances $\tau_0^2$ and $\tau_1^2$ with sample size $n$ in order to better control the amount of shrinkage for each individual coefficient.
To further reduce the computational intensiveness of SSVS, a deterministic optimization procedure called EM variable selection (EMVS) was developed by \cite{RockovaGeorge2014}. The EMVS procedure employs \eqref{spikeandslabnormals} as the prior for $\boldsymbol{\beta}$ and uses an EM algorithm to target the posterior mode for $(\boldsymbol{\beta}, \theta, \sigma )$. (\cite{RockovaGeorge2014} also consider continuous spike-and-slab models where the slab, $\mathcal{N}(0, \sigma^2 \tau_1^2)$, is replaced with a polynomial-tailed density, such as a Student's $t$ or a Cauchy distribution, to prevent overshrinkage of the non-negligible entries in $\boldsymbol{\beta}$). Compared to SSS and SSVS, EMVS has been shown to more rapidly and reliably identify those sets of higher probability submodels which may be of most interest \cite{RockovaGeorge2014}. Recently, \cite{KimGao2019} proposed a general algorithmic framework for Bayesian variable selection with graph-structured sparsity which subsumes the EMVS algorithm as a special case. Like SSVS, these algorithms also require thresholding the posterior inclusion probabilities to perform variable selection. Letting $(\boldsymbol{\widehat{\beta}}, \widehat{\theta}, \widehat{\sigma})$ denote the posterior mode for $(\boldsymbol{\beta}, \theta, \sigma)$, \cite{RockovaGeorge2014} recommend using median thresholding, $\Pr(\gamma_j = 1 \,|\:\boldsymbol{\widehat{\beta}}, \widehat{\theta}, \widehat{\sigma}) > 0.5$, for selection.
\section{The spike-and-slab LASSO} \label{SpikeAndSlabLASSO}
Having reviewed the penalized likelihood and spike-and-slab paradigms for sparse modeling in the normal linear regression model \eqref{linearregression}, we are now in a position to review the spike-and-slab LASSO (SSL) of \cite{RockovaGeorge2018}. The SSL forms a bridge between these two parallel developments, thereby combining the strengths of both approaches into a single procedure. Throughout this section and Section \ref{Computing}, we assume that $\boldsymbol{y}$ has been centered at zero to avoid the need for an intercept and that the design matrix $\boldsymbol{X}$ has been centered and standardized so that $\lVert \boldsymbol{x}_j \rVert_2^2 = n$ for all $1 \leq j \leq p$.
\subsection{Prior specification}
The spike-and-slab LASSO prior is specified as
\begin{equation} \label{SSLprior}
\begin{array}{rl}
p(\boldsymbol{\beta}\C\boldsymbol{\gamma} ) = & \displaystyle \prod_{j=1}^{p} \left[ (1-\gamma_j) \psi(\beta_j \,|\:\lambda_0) + \gamma_j \psi (\beta_j \,|\:\lambda_1) \right], \\
p(\boldsymbol{\gamma} \,|\:\theta) = & \displaystyle \prod_{j=1}^{p} \left[ \theta^{\gamma_j} (1-\theta)^{1-\gamma_j} \right], \\
\theta \sim & \mathcal{B}eta(a,b),
\end{array}
\end{equation}
where $\psi ( \beta \,|\:\lambda ) = (\lambda / 2) e^{-\lambda \lvert \beta \rvert}$ denotes the Laplace density with scale parameter $\lambda$. Figure \ref{fig:laplace} depicts the Laplace density for two different choices of scale parameter. We see that for large $\lambda$ ($\lambda=20$), the density is very peaked around zero, while for small $\lambda$ ($\lambda = 1$), it is diffuse. Therefore, in our prior \eqref{SSLprior}, we typically set $\lambda_0 \gg \lambda_1$, so that $\psi ( \cdot \,|\:\lambda_0)$ is the ``spike'' and $\psi ( \cdot\C\lambda_1 )$ is the ``slab.''
\begin{figure}
\caption{Plot of the central region for the Laplace density with two different choices of scale parameter. }
\label{fig:laplace}
\end{figure}
The original SSL model of \cite{RockovaGeorge2018} assumed known variance $\sigma^2=1$. \cite{MoranRockovaGeorge2018} extended the SSL to the unknown variance case. As $\sigma^2$ is typically unknown, we consider the hierarchical formulation in \cite{MoranRockovaGeorge2018} in this paper and place an independent Jeffreys prior on $\sigma^2$,
\begin{align*}
p(\sigma^2) \propto \sigma^{-2}.
\end{align*}
Note that unlike the mixture of normals \eqref{spikeandslabnormals}, we do \textit{not} scale the Laplace priors in $p( \boldsymbol{\beta} \,|\:\gamma)$ by $\sigma^2$. \cite{MoranRockovaGeorge2018} showed that such scaling severely underestimates the variance $\sigma^2$ when $\boldsymbol{\beta}$ is sparse or when $p > n$, thus making the model prone to overfitting.
By choosing $\lambda_1 = \lambda_0$ in \eqref{SSLprior}, we obtain the familiar LASSO $\ell_1$ penalty. On the other hand, if $\lambda_0 \rightarrow \infty$, we obtain the ``theoretically ideal'' point-mass spike-and-slab \eqref{pointmassspikeandslab} as a limiting case. Thus, a feature of the SSL prior is its ability to induce a nonconcave continuum between the penalized likelihood and (point-mass) spike-and-slab constructs.
Since it is a mixture of two Laplace distributions, the SSL prior \eqref{SSLprior} can be seen as a two-group refinement of the LASSO's $\ell_1$ penalty on the coefficients. Thus, the posterior mode for $p(\boldsymbol{\beta} \,|\:\boldsymbol{y})$ under \eqref{SSLprior} is \textit{exactly} sparse and can be used to perform simultaneous variable selection and parameter estimation. This automatic modal thresholding property offers an advantage over previous spike-and-slab formulations \eqref{pointmassspikeandslab}-\eqref{spikeandslabnormals} which do not give exactly sparse estimates of the coefficients and which typically require \textit{post hoc} thresholding of the posterior inclusion probabilities for selection.
It is well-known that the original LASSO \cite{Tibshirani1996} suffers from estimation bias, wherein coefficients with large magnitude are overshrunk. One may wonder what advantages the SSL \eqref{SSLprior} confers over penalized likelihood approaches such as the adaptive LASSO, SCAD, or MCP penalties \cite{FanLi2001, Zhang2010,Zou2006} which are designed to mitigate the bias problem of the LASSO. In what follows, we discuss two major advantages of the SSL. First, we demonstrate that the SSL mixes two LASSO ``bias'' terms \textit{adaptively} in such a way that either a very large amount of shrinkage is applied if $\lvert \beta_j \rvert$ is small or a very small amount of shrinkage is applied if $\lvert \beta_j \rvert$ is large. This is in contrast to the adaptive LASSO \cite{Zou2006} and similar penalties which assign \textit{fixed} coefficient-specific penalties and thus do not gear the coefficient-specific shrinkage towards these extremes. Second, the prior on $\theta$ in \eqref{SSLprior} ultimately renders the coordinates in $\boldsymbol{\beta}$ \textit{dependent} in the marginal prior $p(\boldsymbol{\beta})$ and the SSL penalty \textit{non}-separable. This provides the SSL with the additional ability to borrow information across coordinates and adapt to ensemble information about sparsity.
\subsection{Selective shrinkage and self-adaptivity to sparsity}
As noted in Section \ref{PenalizedLikelihood}, any sparsity-inducing Bayesian prior can be recast { in the penalized likelihood framework by treating} the logarithm of the marginal prior $\log p(\boldsymbol{\beta})$ as a penalty function. The SSL penalty is defined as
\begin{equation} \label{SSLpenalty}
\textrm{pen} (\boldsymbol{\beta} ) = \log \left[ \frac{p(\boldsymbol{\beta})}{p(\boldsymbol{0}_p )} \right],
\end{equation}
where the penalty has been centered at $\boldsymbol{0}_p$, the $p$-dimensional zero vector, so that $\textrm{pen}({\boldsymbol{0}_p})= 0$ \cite{RockovaGeorge2018}. Using \eqref{SSLpenalty} and some algebra, the log posterior under the SSL prior (up to an additive constant) can be shown to be
\begin{equation} \label{logposteriorSSL}
L(\boldsymbol{\beta}, \sigma^2) = -\frac{1}{2 \sigma^2} \lVert \boldsymbol{y} - \boldsymbol{X} \boldsymbol{\beta} \rVert_2^2 - (n+2) \log \sigma + \sum_{j=1}^{p} \textrm{pen}(\beta_j \,|\:\theta_j),
\end{equation}
where for $j=1, \ldots, p$,
\begin{equation} \label{singletonpenalty}
\textrm{pen}(\beta_j \,|\:\theta_j) = - \lambda_1 \lvert \beta_j \rvert + \log[ p_{\theta_j}^{\star} (0 ) / p^{\star}_{\theta_j} (\beta_j ) ],
\end{equation}
with
\begin{equation} \label{conditionalinclusionprob}
p^{\star}_{\theta_j} ( \beta_j ) = \frac{\theta_j \psi (\beta_j \,|\:\lambda_1)}{\theta_j \psi (\beta_j \,|\:\lambda_1) + (1-\theta_j) \psi (\beta_j \,|\:\lambda_0)},
\end{equation}
and
\begin{equation} \label{thetaj}
\theta_j = E[\theta \,|\:\boldsymbol{\beta}_{\setminus j}] = \int \theta p (\theta \,|\:\boldsymbol{\beta}_{\setminus j}) d \theta.
\end{equation}
When $p$ is large, \cite{RockovaGeorge2018} noted that $\theta_j$ is very similar to $E [\theta\,|\:\boldsymbol{\beta}] = \int \theta p (\theta \,|\:\boldsymbol{\beta}) d \theta$ for every $j = 1, \ldots, p$. Thus, for practical purposes, we replace the individual $\theta_j$'s in \eqref{logposteriorSSL}-\eqref{thetaj} with a single $\widehat{\theta} = E [ \theta \,|\:\boldsymbol{\beta}] $ going forward.
The connection between the SSL and penalized likelihood methods is made clearer when considering the derivative of each singleton penalty $\textrm{pen}(\beta_j \,|\:\widehat{\theta })$ in \eqref{singletonpenalty}. This derivative corresponds to an implicit bias term \cite{RockovaGeorge2018} and is given by
\begin{equation} \label{penaltyderivative}
\frac{ \partial \textrm{pen} ( \beta_j \,|\:\widehat{\theta} )}{\partial \lvert \beta_j \rvert } = - \lambda^{\star}_{\hat{\theta}} ( \beta_j ),
\end{equation}
where
\begin{equation} \label{lambdastar}
\lambda^{\star}_{\hat{\theta}} (\beta_j) = \lambda_1 p^{\star}_{\hat{\theta}} ( \beta_j ) + \lambda_0 [1 - p^{\star}_{\hat{\theta}} (\beta_j)].
\end{equation}
The Karush-Kuhn-Tucker (KKT) conditions yield the following necessary condition for the global mode $\widehat{\boldsymbol{\beta}}$:
\begin{equation} \label{necessarycond}
\widehat{\beta}_j = \frac{1}{n} \left[ \lvert z_j \rvert - \sigma^2 \lambda^{\star}_{\hat{\theta}} (\widehat{\beta}_j) \right]_{+} \textrm{sign}(z_j), \hspace{.5cm} j=1, \ldots, p,
\end{equation}
where $z_j = \boldsymbol{x}_j^T (\boldsymbol{y} - \sum_{k \neq j} \boldsymbol{x}_k \widehat{\beta}_k )$. Notice that the condition \eqref{necessarycond} resembles the soft-thresholding operator for the LASSO, except that it contains an adaptive penalty term $\lambda_{\hat{\theta}}^{\star}$ for \textit{each} coefficient. In particular, the quantity \eqref{lambdastar} is a weighted average of the two regularization parameters, $\lambda_1$ and $\lambda_0$, and the weight $p_{\hat{\theta}}^{\star} (\beta_j)$. Thus, \eqref{lambdastar}-\eqref{necessarycond} show that the SSL penalty induces an \textit{adaptive} regularization parameter which applies a different amount of shrinkage to each coefficient, unlike the original LASSO which applies the same shrinkage to every coefficient.
It is worth looking at the term $p_{\hat{\theta}}^{\star} (\beta_j)$ more closely. In light of \eqref{conditionalinclusionprob}, this quantity can be viewed as a conditional probability that $\beta_j$ was drawn from the slab distribution rather than the spike distribution, having seen the regression coefficient $\beta_j$. We have $p_{\hat{\theta}}^{\star} (\beta_j) = \Pr (\gamma_j = 1 \,|\:\beta_j, \hat{\theta})$, where
\begin{equation} \label{pstarexpanded}
p_{\hat{\theta}}^{\star} (\beta_j) = \frac{1}{1 + \frac{(1-\hat{\theta})}{\hat{\theta}} \frac{\lambda_0}{\lambda_1} \exp \left[ - \lvert \beta_j \rvert ( \lambda_0 - \lambda_1 ) \right]}
\end{equation}
is an \textit{exponentially increasing} function in $\lvert \beta_j \rvert$. From \eqref{pstarexpanded}, we see that the functional $p_{\hat{\theta}}^{\star}$ has a sudden increase from near-zero to near-one. Therefore, $p_{\hat{\theta}}^{\star} (\beta_j)$ gears $\lambda_{\hat{\theta}}^{\star}$ in \eqref{lambdastar} towards the extreme values $\lambda_1$ and $\lambda_0$, depending on the size of $\lvert \beta_j \rvert$. Assuming that $\lambda_1$ is sufficiently small (and hence, the slab $\psi (\beta_j \,|\:\lambda_1)$ is sufficiently diffuse), this allows the large coefficients to escape the overall shrinkage effect, in sharp contrast to the single Laplace distribution \eqref{bayesianLASSO}, where the bias issue remains even if a prior is placed on $\lambda$ \cite{RockovaGeorge2016Abel}.
Apart from its selective shrinkage property, a second key benefit of the SSL model \eqref{SSLprior} is its \textit{self-adaptivity} to the sparsity pattern of the data through the prior on the mixing proportion $\theta$, $p(\theta) \sim \mathcal{B}eta(a,b)$. As mentioned previously, this prior ultimately renders the SSL penalty \textit{non}-separable. Fully separable penalty functions, such as those described in Section \ref{PenalizedLikelihood}, are limited by their inability to adapt to common features across model parameters because they treat these parameters independently. In contrast, treating $\theta$ (the expected proportion for non-negligible coefficients in $\boldsymbol{\beta}$) as random, allows for automatic adaptivity to different levels of sparsity. As shown in \eqref{conditionalinclusionprob}-\eqref{thetaj} (and replacing $\theta_j$ with $\hat{\theta}$ and $\boldsymbol{\beta}_{\setminus j}$ with $\boldsymbol{\beta}$), the mixing weight $p^{\star}_{\hat{\theta}}$ is obtained by averaging $p_{\theta}^{\star} (\cdot)$ over $p(\theta \,|\:\boldsymbol{\beta})$, i.e. $p_{\hat{\theta}}^{\star} (\beta) = \int_{0}^{1} p_{\theta}^{\star} (\beta_j) p(\theta \,|\:\boldsymbol{\beta}) d \theta$. It is through this averaging that the SSL penalty \eqref{SSLpenalty} is given an opportunity to borrow information across coordinates and learn about the underlying level of sparsity in $\boldsymbol{\beta}$.
For the hyperparameters $(a,b)$ in the beta prior on $\theta$, \cite{RockovaGeorge2018} recommended the default choice of $a=1, b=p$. By Lemma 4 of \cite{RockovaGeorge2018}, this choice ensures that $E[\theta \,|\:\widehat{\boldsymbol{\beta}}] \sim \widehat{p}_{\gamma}/p$, where $\widehat{p}_{\gamma}$ is the number of nonzero coefficients in $\boldsymbol{\widehat{\beta}}$. Further, this choice of hyperparameters results in an automatic multiplicity adjustment \cite{ScottBerger2010} and ensures that $\theta$ is small (or that most of the coefficients belong to the spike) with high probability. Thus, the SSL also favors parsimonious models in high dimensions and avoids the curse of dimensionality.
\subsection{The spike-and-slab LASSO in action}
Before delving into the implementation details of the SSL, we perform a small simulation study to illustrate the benefits of the adaptive shrinkage of SSL versus the non-adaptive shrinkage of the LASSO. We simulated data of $n=50$ observations with $p=12$ predictors generated as four independent blocks of highly correlated predictors. More precisely, $n$ rows of our design matrix $\boldsymbol{X}$ were generated independently from a $\mathcal{N}_p (\boldsymbol{0}, \boldsymbol{\Sigma})$ distribution with block diagonal covariance matrix $\boldsymbol{\Sigma} = \textrm{bdiag} ( \widetilde{\boldsymbol{\Sigma}}, \ldots, \widetilde{\boldsymbol{\Sigma}})$, where $\widetilde{\boldsymbol{\Sigma}} = \{\widetilde{\sigma}_{ij} \}_{i,j=1}^{3}, \widetilde{\sigma}_{ij} = 0.9$ if $i \neq j$ and $\widetilde{\sigma}_{ii} = 1$. The response was generated from $\boldsymbol{y} \sim \mathcal{N}_n ( \boldsymbol{X} \boldsymbol{\beta}_0, \boldsymbol{I} )$, with $\boldsymbol{\beta}_0 = (1.3, 0, 0, 1.3, 0, 0, 1.3, 0, 0, 1.3, 0, 0)'$. Note that only $x_1$, $x_4$, $x_7$, and $x_{10}$ are non-null in this true model.
\begin{figure}
\caption{The coefficient paths of $\widehat{\boldsymbol{\beta}
\label{fig:coefficientpaths}
\end{figure}
We fit both the SSL and the LASSO of \cite{Tibshirani1996} to this model. Figure \ref{fig:coefficientpaths} displays the coefficient paths for both SSL and LASSO as the spike parameter $\lambda_0$ in the SSL and the regularization parameter $\lambda$ in the LASSO are increased. For the SSL, the spike parameter $\lambda_1 = 0.01$ is fixed throughout. Both the SSL and LASSO begin at $\lambda_0 = \lambda = 0$ with the same 12 (nonzero) ordinary least squares (OLS) estimates for $\boldsymbol{\beta}_0$. However, as $\lambda_0$ increases for the SSL, the eight smaller OLS estimates are gradually shrunk to zero by the SSL's spike. Meanwhile, the four large estimates are held steady by the SSL's slab, eventually stabilizing at values close to their OLS estimates. The SSL correctly selects the four nonzero coefficients in the true model, demonstrating its self-adaptivity to the true sparsity pattern of the data.
In contrast, Figure \ref{fig:coefficientpaths} also shows that as the LASSO's single penalty parameter $\lambda$ increases, \textit{all} twelve estimates are gradually shrunk to zero. This is because without a slab distribution to help hold the large values steady, the LASSO eventually shrinks all estimates to zero for a large enough $\lambda$. Additionally, due to the order in which the 12 estimates have been thresholded to zero, no value of $\lambda$ yields the correct subset selection $\{ x_1, x_4, x_y, x_{10} \}$. In particular, if $\lambda$ is chosen from cross-validation, the LASSO selects a subset of variables with four false positives.
Our small simulation study illustrates the advantage of the SSL over the LASSO. Specifically, because the LASSO applies the same amount of shrinkage to all regression coefficients, it may estimate a null model if its regularization parameter $\lambda$ is too large. The SSL's two-group refinement of the LASSO penalty helps to mitigate this problem by facilitating selective shrinkage. In Section \ref{Illustration}, we further illustrate the strong empirical performance of the SSL in high-dimensional settings when $p > n$.
\section{Computational details} \label{Computing}
We now turn our attention to implementation of the SSL model \eqref{SSLprior} under the normal linear regression model \eqref{linearregression}. The method described in Section \ref{MAPAlgorithm} is implemented in the publicly available \textsf{R} package \texttt{SSLASSO} \cite{SSLASSOpackage}. However, we also describe an alternative implementation approach in Section \ref{EMAlgorithm}, which is amenable to situations outside of the Gaussian likelihood.
\subsection{Coordinate-wise optimization} \label{MAPAlgorithm}
As mentioned in Section \ref{SpikeAndSlabLASSO} and shown in \eqref{necessarycond}, the (global) posterior mode under the SSL prior \eqref{SSLprior} is exactly sparse, while avoiding the excessive bias issue for large coefficients. Therefore, we can obtain estimates for $\boldsymbol{\beta}$ by targeting the posterior mode.
Marginalizing out $\boldsymbol{\gamma}$ in \eqref{SSLprior} gives the prior for $\boldsymbol{\beta}$ (conditional on $\theta$),
\begin{equation} \label{SSLconditionalontheta}
p(\boldsymbol{\beta} \,|\:\theta) = \prod_{j=1}^{p} \left[ (1-\theta) \psi (\beta_j \,|\:\lambda_0) + \theta \psi (\beta_j \,|\:\lambda_1) \right].
\end{equation}
Using this reparametrization, \cite{MoranRockovaGeorge2018,RockovaGeorge2018} developed a highly efficient coordinate ascent algorithm to quickly target the mode for $(\boldsymbol{\beta}, \sigma^2)$.
Since the SSL is a non-convex method, the KKT conditions only give a necessary condition \eqref{necessarycond} for $\widehat{\boldsymbol{\beta}}$ to be a global mode, but not a sufficient one. When $p > n$ and $\lambda_0 \gg \lambda_1$, the posterior will typically be multimodal. Nevertheless, it is still possible to obtain a refined characterization of the global mode. Building upon theory developed by \cite{ZhangZhang2012}, \cite{MoranRockovaGeorge2018,RockovaGeorge2018} gave necessary \textit{and} sufficient conditions for $\widehat{\boldsymbol{\beta}}$ to be a \textit{global} mode. By Theorems 3-4 in \cite{RockovaGeorge2018} and Propositions 4-5 in \cite{MoranRockovaGeorge2018}, the global mode under the SSL prior \eqref{SSLprior} is a blend of soft-thresholding \textit{and} hard-thresholding, namely
\begin{equation} \label{globalmode}
\widehat{\beta}_j = \frac{1}{n} \left[ | z_j | - \sigma^2 \lambda_{\hat{\theta}}^{\star} (\widehat{\beta}_j) \right]_{+} \textrm{sign}(z_j) \mathbb{I}( |z_j| > \Delta ),
\end{equation}
where $z_j = \boldsymbol{x}_j^T (\boldsymbol{y} - \sum_{k \neq j} \boldsymbol{x}_k \widehat{\beta}_k)$ and $\Delta \equiv \inf_{t >0} [nt/2 - \sigma^2 \textrm{pen}(t \C\hat{\theta} ) / t]$. In \cite{MoranRockovaGeorge2018}, an approximation for $\Delta$ is given by
\begin{align*}
\Delta = \left\{
\begin{array}{ll}
\sqrt{2 n \sigma^2 \log [ 1 / p_{\hat{\theta}}^{\star} (0)]} + \sigma^2 \lambda_1\ & \textrm{if } g_{\hat{\theta}}(0) > 0, \\
\sigma^2 \lambda_{\hat{\theta}}^{\star} (0) & \textrm{otherwise},
\end{array}
\right.
\end{align*}
where $g_{\theta}(x) = [ \lambda_{\theta}^{\star}(x) - \lambda_1]^2 + (2n / \sigma^2) \log p_{\theta}^{\star} (x)$. The generalized thresholding operator \eqref{globalmode} allows us to eliminate many suboptimal local modes from consideration through the threshold $\Delta$. This refined characterization also facilitates a highly efficient coordinate ascent algorithm \cite{MazumderFriedmanHastie2011} to find the global mode, which we now detail.
After initializing $(\Delta^{(0)}, \boldsymbol{\beta}^{(0)}, \theta^{(0)}, \sigma^{2(0)})$, the coordinate ascent algorithm iteratively updates these parameters until convergence. The update for the threshold $\Delta$ at the $t^{th}$ iteration is
\begin{align*}
\Delta^{(t)} = \left\{
\begin{array}{ll}
\sqrt{2 n \sigma^{2(t-1)} \log[1 / p_{\hat{\theta}^{(t-1)}}^{\star}(0)]} + \sigma^{2(t-1)} \lambda_1 & \textrm{if } g_{\theta^{(t-1)}} (0) > 0, \\
\sigma^{2(t-1)} \lambda_{\theta^{(t-1)}}^{\star}(0) & \textrm{otherwise}.
\end{array}
\right.
\end{align*}
Next, $\boldsymbol{\beta}$ is updated as
\begin{align*}
\beta_j^{(t)} \leftarrow \frac{1}{n} \left( |z_j| - \lambda_{\hat{\theta}^{(t-1)}}^{\star} ( \widehat{\beta}_j^{(t-1)} ) \right)_{+} \textrm{sign}(z_j) \mathbb{I}( |z_j| > \Delta^{(t)}).
\end{align*}
Using the approximation for $E[\theta \,|\:\widehat{\boldsymbol{\beta}}]$ in Lemma 4 of \cite{RockovaGeorge2018}, the update for $\hat{\theta}$ is
\begin{align*}
\hat{\theta}^{(t)} \leftarrow \frac{a + \widehat{p}_{\gamma}^{(t)}}{a+b+p},
\end{align*}
where $\widehat{p}_{\gamma}^{(t)}$ is the number of nonzero entries in $\boldsymbol{\beta}^{(t)}$. Finally, the update for $\sigma^2$ is
\begin{align*}
\sigma^{2(t)} \leftarrow \frac{ \lVert \boldsymbol{y} - \boldsymbol{X} \boldsymbol{\beta}^{(t)} \rVert_2^2}{n+2}.
\end{align*}
\subsection{Dynamic posterior exploration} \label{DynamicPosteriorExploration}
The performance of the SSL model depends on good choices for the hyperparameters $(\lambda_0, \lambda_1)$ in \eqref{SSLprior}. To this end, \cite{RockovaGeorge2018} recommend a ``dynamic posterior exploration'' strategy in which the slab hyperparameter $\lambda_1$ is held fixed at a small value and the spike hyperparameter $\lambda_0$ is gradually increased along a ladder of increasing values, $\{ \lambda_0^{1}, \ldots, \lambda_0^{L} \}$. The algorithm is not very sensitive to the specific choice of $\lambda_1$, provided that the slab is sufficiently diffuse. For each $\lambda_0^{s}$ in the ladder for the spike parameters, we reinitialize $(\Delta^{(0)}, \boldsymbol{\beta}^{(0)}, \theta^{(0)}, \sigma^{2(0)})$ using the MAP estimates for these parameters from the previous spike parameter $\lambda_0^{s-1}$ as a ``warm start.''
This sequential reinitialization strategy allows the SSL to more easily find the global mode. In particular, when $(\lambda_1 - \lambda_0)^2 < 4$ and $\sigma^2$ is fixed, the objective \eqref{logposteriorSSL} is convex. The intuition here is to use the solution to the convex problem as a ``warm'' start for the non-convex problem (when $\lambda_0 \gg \lambda_1$). As we increase $\lambda_0$, the posterior becomes ``spikier,'' with the spikes absorbing more and more of the negligible parameters. Meanwhile, keeping $\lambda_1$ fixed at a small value allows the larger coefficients to escape the pull of the spike. For large enough $\lambda_0$, the algorithm will eventually stabilize so that further increases in $\lambda_0$ do not change the solution. In Section \ref{Illustration}, we illustrate this with plots of the SSL solution paths.
Additionally, as noted by \cite{MoranRockovaGeorge2018}, some care must also be taken when updating $\sigma^2$. When $p > n$ and $\lambda_0 \approx \lambda_1$, the model can become saturated, causing the residual variance to go to zero. To avoid this suboptimal mode at $\sigma^2 = 0$, \cite{MoranRockovaGeorge2018} recommend fixing $\sigma^2$ until the $\lambda_0$ value in the ladder at which the algorithm starts to converge in less than 100 iterations. Then, $\boldsymbol{\beta}$ and $\sigma^2$ are simultaneously updated for the next largest $\lambda_0$ in the sequence. The complete algorithm for coordinate-wise optimization with dynamic posterior exploration is given in Section 4 of the supplementary material in \cite{MoranRockovaGeorge2018}. This algorithm is implemented in the \textsf{R} package \texttt{SSLASSO}.
\subsection{EM implementation of the spike-and-slab LASSO} \label{EMAlgorithm}
The coordinate ascent algorithm of Section \ref{MAPAlgorithm} specifically appeals to the theoretical framework of \cite{ZhangZhang2012} to search for the global SSL mode $\boldsymbol{\widehat{\beta}}$. An alternative approach, also proposed by \cite{RockovaGeorge2018}, is to use an EM algorithm in the vein of EMVS \cite{RockovaGeorge2014}. Again treating the latent variables $\boldsymbol{\gamma}$ in \eqref{SSLprior} are treated as ``missing'' data, this EM implementation of the SSL proceeds as follows.
In the E-step at the $t$th iteration, we compute $E[\tau_j \,|\:\boldsymbol{y}, \boldsymbol{\beta}^{(t-1)}, \theta^{(t-1)}, \sigma^{2(t-1)}] = p_{\theta^{(t-1)}}^{\star} (\beta_j^{(t-1)})$, where $p_{\theta}^{\star}$ is as in \eqref{conditionalinclusionprob}. The M-step then iterates through the following updates:
\begin{align*}
& \boldsymbol{\beta}^{(t)} \leftarrow \displaystyle \argmax_{\boldsymbol{\beta} \in \mathbb{R}^{p}} \left\{ - \frac{1}{2} \lVert \boldsymbol{y} - \boldsymbol{X} \boldsymbol{\beta} \rVert_2^2 - \sum_{j=1}^{p} \sigma^{2(t-1)} \lambda_{\theta^{(t-1)}}^{\star}(\beta_j^{(t-1)}) \lvert \beta_j \rvert \right\}, \\
& \theta^{(t)} \leftarrow \frac{ \sum_{j=1}^{p} p_{\theta^{(t-1)}}^{\star} (\beta_j^{(t)}) + a - 1}{a+b+p - 2},
\\
& \sigma^{2(t)} \leftarrow \frac{ \lVert \boldsymbol{y} - \boldsymbol{X} \boldsymbol{\beta}^{(t)} \rVert_2^2}{n+2},
\end{align*}
where $\lambda_{\theta}^{\star} (\beta) = \lambda_1 p_{\theta}^{\star} (\beta) + \lambda_0 [ 1- p_{\theta}^{\star} (\beta) ]$. Note that the update for $\boldsymbol{\beta}^{(t)}$ is an adaptive LASSO regression with weights $\sigma^2 \lambda_{\theta}^{\star}$ and hence can be solved very efficiently using coordinate descent algorithms \cite{FriedmanHastieTibshirani2010}. Like EMVS \cite{RockovaGeorge2014}, the dynamic posterior exploration strategy detailed in Section \ref{DynamicPosteriorExploration} can be used to find a more optimal mode for $(\boldsymbol{\beta}, \sigma^2)$.
This EM approach can be straightforwardly adapted for other statistical models where the SSL prior \eqref{SSLprior} is used (such as the methods described in Section \ref{MethodologicalExtensions}) but where the likelihood function differs and the theory of \cite{ZhangZhang2012} is not applicable.
Similar to the coordinate ascent algorithm described in Section \ref{MAPAlgorithm}, this EM algorithm may be sensitive to the initialization of $(\boldsymbol{\beta}^{(0)}, \theta^{(0)}, \sigma^{2(0)} )$. The dynamic posterior exploration strategy described earlier can partly help to mitigate this issue, since the posterior starts out relatively flat when $\lambda_0 \approx \lambda_1$ but becomes ``spikier'' as $\lambda_0$ increases. By the time that the spikes have reappeared, the ``warm start'' solution from the previous $\lambda_0$ in the ladder should hopefully be in the basin of dominant mode. Other strategies such as running the algorithm for a wide choice of starting values or deterministic annealing can also aid in adding robustness against poor initializations \cite{McLachlanBasford1988,RockovaGeorge2014,UedaNakano1998}.
\section{Uncertainty quantification} \label{UncertaintyQuantification}
While the algorithms described in Section \ref{Computing} can be used to rapidly target the modes of the SSL posterior, providing a measure of uncertainty for our estimates is a challenging task. In this section, we outline two possible strategies for the task of uncertainty quantification. The first is based on debiasing the posterior mode. The second involves posterior simulation.
\subsection{Debiasing the posterior mode}
One possible avenue for uncertainty quantification is to use debiasing \cite{BaiMoranAntonelliChenBoland2019, JavanmardMontanari2018, VanDeGeerBuhlmannRitovDezeure2014,ZhangZhang2014}. Let $\widehat{\boldsymbol{\Sigma}} = \boldsymbol{X}^T \boldsymbol{X} / n$ and let $\widehat{\boldsymbol{\Theta}}$ be an approximate inverse of $\widehat{\boldsymbol{\Sigma}}$. Note that when $p > n$, $\boldsymbol{X}$ is singular, so $\widehat{\boldsymbol{\Sigma}}^{-1}$ does not necessarily exist. However, we can still obtain a sparse estimate of the precision matrix $\widehat{\boldsymbol{\Theta}}$ for the rows of $\boldsymbol{X}$ by using techniques from the graphical models literature, e.g. the nodewise regression procedure in \cite{MeinshausenBuhlmann2006} or the graphical lasso \cite{FriedmanHastieTibshirani2007}. We define the quantity $\widehat{\boldsymbol{\beta}}_d$ as
\begin{equation}
\widehat{\boldsymbol{\beta}}_d = \widehat{\boldsymbol{\beta}} + \widehat{\boldsymbol{\Theta}} \boldsymbol{X}^T (\boldsymbol{y} - \boldsymbol{X} \widehat{\boldsymbol{\beta}})/n.
\end{equation}
where $\widehat{\boldsymbol{\beta}}$ is the MAP estimator of $\boldsymbol{\beta}$ under the SSL model. By \cite{VanDeGeerBuhlmannRitovDezeure2014}, this quantity $\widehat{\boldsymbol{\beta}}_d$ has the following asymptotic distribution:
\begin{equation} \label{asymptoticdist}
\sqrt{n}(\widehat{\boldsymbol{\beta}}_d - \boldsymbol{\beta}) \sim \mathcal{N}(\boldsymbol{0}, \sigma^2 \widehat{\boldsymbol{\Theta}} \widehat{\boldsymbol{\Sigma}} \widehat{\boldsymbol{\Theta}}^T).
\end{equation}
For inference, we replace the population variance $\sigma^2$ in \eqref{asymptoticdist} with the modal estimate $\widehat{\sigma}^2$ from the SSL model. Let $\widehat{\beta}_{dj}$ denote the $j$th coordinate of $\widehat{\boldsymbol{\beta}}_d$. We have from \eqref{asymptoticdist} that the $100(1-\alpha) \%$ asymptotic pointwise confidence intervals for $\beta_{j}, j = 1, \ldots, p$, are
\begin{align} \label{confidenceintervals}
[ \widehat{\beta}_{dj} - c(\alpha, n, \widehat{\sigma}^2), \widehat{\beta}_{dj} + c(\alpha, n, \widehat{\sigma}^2) ],
\end{align}
where $c(\alpha, n, \widehat{\sigma}^2) := \Phi^{-1} (1-\alpha/2) \sqrt{ \widehat{\sigma}^2 ( \widehat{\boldsymbol{\Theta}} \widehat{\boldsymbol{\Sigma}} \widehat{\boldsymbol{\Theta}}^T )_{jj} / n}$ and $\Phi(\cdot)$ denotes the cumulative distribution function of $\mathcal{N}(0,1)$.
Note that the posterior modal estimate $\widehat{\boldsymbol{\beta}}$ under the SSL prior already has much less bias than the LASSO estimator \cite{Tibshirani1996}. Therefore, the purpose of the debiasing procedure above is mainly to obtain an estimator with an asymptotically normal distribution from which we can construct asymptotic pointwise confidence intervals. While this procedure is asymptotically valid, \cite{AntonelliParmigianiDominici2019} showed through numerical studies that constructing confidence intervals based on asymptotic arguments may provide coverage below the nominal level in finite samples, especially small samples. Therefore, it may be more ideal to use the actual SSL posterior $p(\boldsymbol{\beta} \,|\:\boldsymbol{y})$ for inference.
\subsection{Posterior sampling for the spike-and-slab LASSO}
Fully Bayesian inference with the SSL can be carried out via posterior simulation.
However, posterior sampling under spike-and-slab priors has continued to pose challenges.
One immediate strategy for sampling from the SSL posterior is the SSVS algorithm of \cite{GeorgeMcCulloch1993}, described in Section \ref{sspriors}. One can regard the Laplace distribution as a scale mixture of Gaussians with an exponential mixing distribution \cite{ParkCasella2008} and perform a variant of SSVS. Recently, several clever computational tricks have been suggested that avoid costly matrix inversions needed by SSVS by using linear solvers \cite{BhattacharyaChakrabortyMallick2016}, low-rank approximations \cite{JohndrowOrensteinBhattacharya2020}, or by disregarding correlations between active and inactive coefficients \cite{NarisettyShenHe2019}. These techniques can be suitably adapted for fast posterior sampling of the SSL as well.
Intrigued by the speed of SSL mode detection, \cite{NieRockova2020} explored the possibility of turning SSL into approximate posterior sampling by performing MAP optimization on many independently perturbed datasets. Building on Bayesian bootstrap ideas, they introduced a method for approximate sampling called Bayesian bootstrap spike-and-slab LASSO (BB-SSL) which scales linearly with both $n$ and $p$. Beyond its scalability, they show that BB-SSL has strong theoretical support, matching the convergence rate of the original posterior in sparse normal-means and in high-dimensional regression.
\section{Illustrations} \label{Illustration}
In this section, we illustrate the SSL's potential for estimation, variable selection, and prediction on both simulated and real high-dimensional data sets.
\subsection{Example on synthetic data} \label{Simulations}
For our simulation study, we slightly modified the settings in \cite{MoranRockovaGeorge2018}. We set $n = 100$ and $p = 1000$ in \eqref{linearregression}. The design matrix $\boldsymbol{X}$ was generated from a multivariate Gaussian distribution with mean $\boldsymbol{0}_p$ and a block-diagonal covariance matrix $\boldsymbol{\Sigma} = \textrm{bdiag} ( \widetilde{\Sigma}, \ldots, \widetilde{\Sigma} )$, where $\widetilde{\Sigma} = \{ \widetilde{\sigma} \}_{i,j=1}^{50}$, with $\widetilde{\sigma}_{ij} = 0.9$ if $i \neq j$ and $\widetilde{\sigma}_{ii} = 1$. The true vector of regression coefficients $\boldsymbol{\beta}_0$ was constructed by assigning regression coefficients $\{-3.5, -2.5, -1.5, 1.5, 2.5, 3.5 \}$ to 6 entries located at the indices $\{ 1, 51, 101, 151, 201, 251 \}$ and setting the remaining coefficients equal to zero. Hence, there were 20 independent blocks of 50 highly correlated predictors, where the first six blocks contained only one active predictor. We then generated the response $\boldsymbol{y}$ using \eqref{linearregression}, where the error variance was set as $\sigma^2 = 3$.
We compared the SSL with the LASSO \cite{Tibshirani1996}, SCAD \cite{FanLi2001}, and MCP \cite{Zhang2010}. The SSL method was applied using the \textsf{R} package \texttt{SSLASSO}. The competing methods were applied using the \textsf{R} package \texttt{ncvreg}. We repeated our experiment 500 times with new covariates and responses generated each time. For each experiment, we recorded the mean squared error (MSE) and mean prediction error (MPE), defined as
\begin{align*}
\textrm{MSE} = \frac{1}{p} \lVert \widehat{\boldsymbol{\beta}} - \boldsymbol{\beta}_0 \rVert_2^2 \hspace{.3cm} \textrm{ and } \hspace{.3cm}
\textrm{MPE} = \frac{1}{n} \lVert \boldsymbol{X} ( \widehat{\boldsymbol{\beta}} - \boldsymbol{\beta}_0 ) \rVert_2^2.
\end{align*}
We also kept track of $\widehat{p}_{\gamma}$, or the size of the model selected by each of these methods. Finally, we recorded the false discovery rate (FDR), the false negative rate (FNR), and the Matthews correlation coefficient (MCC) \cite{Matthews1975}, defined respectively as
\begin{align*}
\textrm{FDR} & = \frac{\textrm{FP}}{\textrm{TN}+\textrm{FP}}, \hspace{.5cm} \textrm{FNR} = \frac{\textrm{FN}}{\textrm{TP}+\textrm{FN}}, \\
\textrm{MCC} & = \frac{ \textrm{TP} \times \textrm{TN} - \textrm{FP} \times \textrm{FN}}{ \sqrt{ ( \textrm{TP} + \textrm{FP} )(\textrm{TP} + \textrm{FN})(\textrm{TN} + \textrm{FP})(\textrm{TN} + \textrm{FN})}},
\end{align*}
where TP, TN, FP, and FN denote the number of true positives, true negatives, false positives, and false negatives respectively. The MCC is a correlation coefficient between the predicted set of significant coefficients and the actual set of nonzero coefficients \cite{Matthews1975}. MCC has a range of -1 to 1, with -1 indicating completely incorrect selection (i.e. TP=TN=0) and 1 indicating completely correct variable selection (i.e. FP=FN=0). Models with MCC closer to 1 have higher selection accuracy. \textsf{R} code to reproduce these experiments is available in the online supplementary material.
\begin{table}[t!]
\centering
\begin{tabularx}{\textwidth}{@{}*7{>{\raggedright\arraybackslash}X}@{}}
\hline
& MSE & MPE & $\widehat{p}_{\gamma}$ & FDR & FNR & MCC \\
\hline
\hline
SSL & \textbf{0.0067} (0.0076) & \textbf{0.701} (0.542) & \textbf{6.05} (0.271) & \textbf{0.0012} (0.0010) & 0.187 (0.160) & \textbf{0.809} (0.162) \\
\hline
LASSO & 0.011 (0.0045) & 1.14 (0.303) & 33.38 (5.38) & 0.028 (0.0055) & \textbf{0.083} (0.109) & 0.387 (0.062) \\
\hline
SCAD & 0.011 (0.012) & 0.985 (0.691) & 12.74 (3.98) & 0.0081 (0.0043) & 0.225 (0.187) & 0.554 (0.178) \\
\hline
MCP & 0.020 (0.016) & 1.55 (0.849) & 11.31 (2.70) & 0.0077 (0.0031) & 0.395 (0.211) & 0.447 (0.173) \\
\hline
\end{tabularx}
\caption{MPE, MPE, estimated model size, FDR, FNR, and MCC for SSL, LASSO, SCAD, and MCP. The results are averaged across 500 replications. In parentheses, we report the empirical standard errors.}
\label{Table:1}
\end{table}
Table \ref{Table:1} reports our results averaged across the 500 replications. We see that the SSL had the lowest average MSE and MPE, in addition to selecting (on average) the most parsimonious model. The LASSO (along with SCAD) had the second lowest MSE, but it tended to select far more variables than the other methods, leading to the highest FDR. In contrast, SSL had the lowest FDR and the highest MCC, indicating that the SSL had the best overall variable selection performance of all the methods. Our simulation study demonstrates that SSL achieves both parsimony \textit{and} accuracy of estimation and selection.
Figure \ref{fig:dynamicposteriorexploration} illustrates the benefits of the dynamic posterior exploration approach outlined in Section \ref{DynamicPosteriorExploration}. Specifically, Figure \ref{fig:dynamicposteriorexploration} plots the solution paths for the regression coefficients from one of our experiments as the spike hyperparameter $\lambda_0$ increases. We see that the SSL solution stabilizes fairly quickly (when $\lambda_0$ is less than 20), so that further increases in $\lambda_0$ do not change the solution. This demonstrates that dynamic posterior exploration offers a viable alternative to cross-validation. The \textsf{R} package \texttt{SSLASSO} provides the functionality to generate plots of these solution paths.
\begin{figure}
\caption{The solution paths for the SSL along the ladder of spike parameters $\lambda_0$. We see that the solution stabilizes after a certain point, so that further increases in $\lambda_0$ do not change the solution. The points along the horizontal axis are the zero values where the negligible estimates disappear. }
\label{fig:dynamicposteriorexploration}
\end{figure}
\subsection{Bardet-Beidl syndrome gene expression study} \label{RealDataAnalysis}
We now analyze a microarray data set consisting of gene expression measurements from the eye tissue of 120 laboratory rats\footnote{Data accessed from the Gene Expression Omnibus \texttt{ www.ncbi.nlm.nih.gov/geo} (accession no. GSE5680).}. The data was originally studied by \cite{Scheetz06} to investigate mammalian eye disease. In this data, the goal is to identify genes which are associated with the gene TRIM32. TRIM32 has previously been shown to cause Bardet-Biedl syndrome \cite{chiang06}, a disease affecting multiple organs including the retina.
The original data consists of 31,099 probe sets. For our analysis, we included only the 10,000 probe sets with the largest variances in expression (on the log scale). This resulted in $n = 120$ and $p = 10,000$. We then fit the model \eqref{linearregression} with an SSL penalty. We compared the SSL approach to LASSO, SCAD, and MCP.
To assess predictive accuracy, we randomly split the data set into 90 training observations and 30 test observations. We then fit the models on the training set and used the estimated $\widehat{\boldsymbol{\beta}}_{\textrm{train}}$ to compute the mean squared prediction error (MSPE) on the left-out test set,
\begin{equation*}
\textrm{MSPE} = \frac{1}{30} \sum_{i=1}^{30} (y_{i, \textrm{test}} - \boldsymbol{x}_{i, \textrm{test}}^T \widehat{\boldsymbol{\beta}}_{\textrm{train}})^2,
\end{equation*}
where $(\boldsymbol{x}_{i, \textrm{test}}, y_{i, \textrm{test}}), i = 1, \ldots, 30,$ are the observations in the test set. We repeated this process 100 times and took the average MSPE.
Table \ref{Table:2} shows the results for our analysis, as well as the number of selected probe sets when we fit the different models to the complete data set. SSL had the lowest out-of-sample MSPE, indicating the highest predictive power. MCP selected the most parsimonious model, with only nine probe sets out of the 10,000 selected. However, MCP also had a much higher MSPE than the other methods. On the other hand, SSL selected 28 probe sets (compared to 32 and 44 for LASSO and SCAD respectively) and still achieved the lowest MSPE. Our analysis illustrates that on this particular data set, SSL achieved both the best predictive performance and parsimony.
\begin{table}[t!]
\centering
\begin{tabular}{l c c}
\hline
& MSPE & Number of selected probe sets \\
\hline
SSL & \textbf{0.011} & 28 \\
LASSO & 0.012 & 32 \\
SCAD & 0.015 & 44 \\
MCP & 3.699 & 9 \\
\hline
\end{tabular}
\caption{Average MSPE and the number of selected probe sets for the Bardet-Beidl Syndrome data analysis.}
\label{Table:2}
\end{table}
Of the 28 probe sets selected by SSL as being significantly associated with TRIM32, 14 of them had identifiable gene symbols. These genes were SCGB1A1, CELF1, ASXL3, FGFR2, MOBP, TGM7, SLC39A6, DDX58, TFF2, CLOCK, DUS4L, HTR5B, BIK, and SLC16A6. In particular, according to \texttt{https://www.genecards.org} \footnote{Accessed from \texttt{https://www.genecards.org/cgi-bin/carddisp.pl?gene=TRIM32} on October 11, 2020.}, SCGB1A1 is known to be an interacting protein for the TRIM32 gene. The other associations that we found may be useful for researchers in studying the genetic factors contributing to Bardet-Biedl syndrome.
\section{Methodological extensions} \label{MethodologicalExtensions}
While we have focused on the normal linear regression model \eqref{linearregression} in Sections \ref{FrequentistVsBayes}-\ref{Illustration}, the spike-and-slab LASSO methodology has now been adopted in a variety of other statistical applications. In this section, we survey some of the extensions of the SSL to models beyond the normal linear regression framework.
\textbf{Generalized linear models (GLMs).} GLMs allow for a flexible generalization of the normal linear regression model \eqref{linearregression} which can accommodate categorical and count data, in addition to continuous variables. Letting $\boldsymbol{x}_i \in \mathbb{R}^{p}$ denote a vector of covariates for the $i$th observation, GLMs assume that the mean of the response variable is related to the linear predictor via a link function,
\begin{equation} \label{linkfunction}
E(y_i \,|\:\boldsymbol{x}_i) = h^{-1} ( \boldsymbol{x}_i^T \boldsymbol{\beta} ),
\end{equation}
and that the data distribution is expressed as
\begin{equation} \label{GLMlikelihood}
p( \boldsymbol{y} \,|\:\boldsymbol{X}, \boldsymbol{\beta}, \text{var}phi ) = \prod_{i=1}^{n} p(y_i \,|\:\boldsymbol{x}_i, \boldsymbol{\beta}, \text{var}phi),
\end{equation}
where $\text{var}phi$ is a dispersion parameter and the distribution $p(y_i \,|\:\boldsymbol{x}_i, \boldsymbol{\beta}, \text{var}phi )$ can take various forms, including normal, binomial and Poisson distributions. Obviously, the normal linear model \eqref{linearregression} is a special case of \eqref{linkfunction} with the identity link function $h(u)=u$.
\cite{TangShenZhangYi2017GLM} extended the SSL \eqref{SpikeAndSlabLASSO} to GLMs, including binary regression and Poisson regression, by placing the SSL prior \eqref{SSLprior} on the coefficients vector $\boldsymbol{\beta} \in \mathbb{R}^{p}$ in \eqref{linkfunction} and developing an EM algorithm to perform MAP estimation for $\boldsymbol{\beta}$. For inference with grouped variables in GLMs, \cite{TangShenLiZhangWenQianZhuangShiYi2018} further employed group-specific sparsity parameters $\theta_g$ for each group of variables, instead of a single $\theta$, as in \eqref{SpikeAndSlabLASSO}.
\textbf{Survival analysis.} The SSL has also proven to be useful for predicting censored survival outcomes and detecting and estimating the effects of relevant covariates. Cox proportional hazards models are the most widely used method for studying the relationship between a censored survival response and an explanatory variable $\boldsymbol{x}_i \in \mathbb{R}^{p}$ \cite{KleinMoeschberger2003}. This model assumes that the hazard function of survival time $t$ takes the form,
\begin{equation} \label{CoxRegression}
h(t \,|\:\boldsymbol{x}_i ) = h_0(t) \exp(\boldsymbol{x}_i^T \boldsymbol{\beta}).
\end{equation}
\cite{TangShenZhangYi2017Cox} introduced the spike-and-slab LASSO Cox model which endows the coefficients $\boldsymbol{\beta}$ in \eqref{CoxRegression} with the SSL prior \eqref{SSLprior}. They developed an EM coordinate ascent algorithm to fit SSL Cox models. \cite{TangLeiZhangYiGuoChenShenYi2019} further introduced the GssLASSO Cox model which incorporates grouping information by endowing each group of coefficients with a group-specific sparsity parameter $\theta_g$ instead of the single $\theta$ of \eqref{SSLprior}.
\textbf{Sparse factor analysis and biclustering.} Factor models aim to explain the dependence structure among high-dimensional observations through a sparse decomposition of a $p \times p$ covariance matrix $\boldsymbol{\Omega}$ as $\boldsymbol{B} \boldsymbol{B}^T + \boldsymbol{\Sigma}$ where $\boldsymbol{B}$ is a $p \times K$ factor loadings matrix with $K \ll p$ and $\boldsymbol{\Sigma} = \textrm{diag}(\sigma_1^2, \ldots, \sigma_p^2)$. A generic latent factor model is
\begin{equation} \label{factormodel}
\boldsymbol{y}_i = \boldsymbol{B} \boldsymbol{\eta}_{i} + \boldsymbol{\text{var}epsilon}_i, \hspace{.5cm} \boldsymbol{\text{var}epsilon}_i \sim \mathcal{N}_p (\boldsymbol{0}, \boldsymbol{\Sigma}),
\end{equation}
where $\boldsymbol{y}_i$ is a $p$-dimensional continuous response and $\boldsymbol{\eta}_i \sim \mathcal{N}_K (\boldsymbol{0}, \boldsymbol{I}_K)$ are unobserved latent factors. Many existing factor analysis approaches entail prespecification of the unknown factor cardinality $K$ and \textit{post hoc} rotations of the original solution to sparsity. For the factor model \eqref{factormodel}, \cite{RockovaGeorge2016} endowed the entries of the loading matrix $\boldsymbol{B}$ with independent SSL priors,
\begin{align*}
p(\beta_{jk} \,|\:\gamma_{jk}, \lambda_{0k}, \lambda_1) = (1-\gamma_{jk}) \psi(\beta_{jk} \,|\:\lambda_{0k} ) + \gamma_{jk}\psi(\beta_{jk}\,|\:\lambda_{1}).
\end{align*}
However, instead of endowing each of the indicators $\gamma_{jk}$ with the usual beta-Bernoulli prior as in \eqref{SSLprior}, \cite{RockovaGeorge2016} endowed these with the Indian buffet process (IBP) prior \cite{GriffithsGhahramani2011}, which avoids the need to prespecify $K$. Further, \cite{RockovaGeorge2016} developed a parameter-expanded EM (PXL-EM) algorithm which employs \textit{intermediate} orthogonal rotations rather than post hoc rotations. In addition to obtaining a sparse solution, the PXL-EM algorithm also converges much faster than the vanilla EM algorithm and offers robustness against poor initializations.
For the problem of biclustering, i.e. identifying clusters using only subsets of their associated features, \cite{MoranRockovaGeorge2019} utilized the factor model \eqref{factormodel} in which \textit{both} the factors $\boldsymbol{\eta} = [ \boldsymbol{\eta}_1^T, \ldots, \boldsymbol{\eta}_n^T ] \in \mathbb{R}^{n \times K}$ and the loadings are sparse. To achieve a doubly sparse representation, \cite{MoranRockovaGeorge2019} placed an SSL prior coupled with an IBP prior on the factors and an SSL prior coupled with a beta-Bernoulli prior on the loadings. An EM algorithm with a variational step was developed to implement spike-and-slab LASSO biclustering.
\textbf{Graphical models.} Suppose we are given data $\boldsymbol{Y} = (\boldsymbol{y}_1, \ldots, \boldsymbol{y}_n)^T$, where the $\boldsymbol{y}_i$'s are assumed to be iid $p$-variate random vectors distributed as $\mathcal{N}_p (\boldsymbol{0}, \boldsymbol{\Omega}^{-1})$ and $p>n$. In this setting, off-diagonal zero entries $\omega_{ij}$ encode conditional independence between variables $i$ and $j$. To obtain a sparse estimate of $\boldsymbol{\Omega} = ( \omega_{i,j} )_{i,j}$, \cite{GanNarisettyLiang2018} introduced the following prior on $\boldsymbol{\Omega}$:
\begin{align} \label{graphicalmodelprior}
p(\boldsymbol{\Omega}) = \prod_{i < j} \left[ (1-\theta) \psi(\omega_{ij}\,|\:\lambda_0 ) + \theta \psi(\omega_{ij} \,|\:\lambda_1 ) \right] \prod_{i=1}^{p} [ \tau e^{-\tau \omega_{ii}} ] \mathbb{I}( \Omega \succ 0 ) \mathbb{I}( \lVert \boldsymbol{\Omega} \rVert_2 \leq B ),
\end{align}
for some $\tau > 0, B > 0$. Here, $\boldsymbol{\Omega} \succ 0$ denotes that $\boldsymbol{\Omega}$ is positive-definite and $\lVert \boldsymbol{\Omega} \rVert_2$ denotes the spectral norm of $\boldsymbol{\Omega}$. The prior on $\boldsymbol{\Omega}$ \eqref{graphicalmodelprior} entails independent exponential priors on the diagonal entries and SSL priors on the off-diagonal entries. A similar prior formulation was considered in \cite{DeshpandeRockovaGeorge2019}, except \cite{DeshpandeRockovaGeorge2019} did not constrain $\boldsymbol{\Omega}$ to lie in the space of $p \times p$ matrices with uniformly bounded spectral norm. \cite{GanNarisettyLiang2018} showed that constraining the parameter space for $\boldsymbol{\Omega}$ in such a way ensures that a) the corresponding optimization problem for the posterior mode is \textit{strictly} convex, and b) the posterior mode is a symmetric positive definite matrix. \cite{GanNarisettyLiang2018} developed an EM algorithm to estimate the posterior mode of $p(\boldsymbol{\Omega} \,|\:\boldsymbol{Y})$.
\hspace{.2cm} The SSL prior was also extended to perform \textit{joint} estimation of multiple related Gaussian graphical models by \cite{LiMcCormickClark2019}. \cite{LiMcCormickClark2019}
leveraged similarities in the underlying sparse precision matrices and developed an EM algorithm to perform this joint estimation.
\textbf{Seemingly unrelated regression models.} In seemingly unrelated regression models, multiple correlated responses are regressed on multiple predictors. The multivariate linear regression is an important case. Letting $\boldsymbol{y}_i \in \mathbb{R}^{q}$ be the vector of $q$ responses and $\boldsymbol{x}_i \in \mathbb{R}^{p}$ be the vector of $p$ covariates, this model is
\begin{equation} \label{multivariateregression}
\boldsymbol{y}_i = \boldsymbol{x}_i^T \boldsymbol{B} + \boldsymbol{\text{var}epsilon}_i, \hspace{.5cm} \boldsymbol{\text{var}epsilon}_i \sim \mathcal{N}_q ( \boldsymbol{0}, \boldsymbol{\Omega}^{-1} ),
\end{equation}
\cite{DeshpandeRockovaGeorge2019} introduced the \textit{multivariate spike-and-slab LASSO} (mSSL) to perform joint selection and estimation from the $p \times q$ matrix of regressors $\boldsymbol{B}$ \textit{and} the precision matrix $\boldsymbol{\Omega}$. To obtain a sparse estimate of $(\boldsymbol{B}, \boldsymbol{\Omega})$, \cite{DeshpandeRockovaGeorge2019} placed the SSL prior \eqref{SSLprior} on the individual entries $\beta_{jk}, 1 \leq j \leq p, 1 \leq k \leq q$ in $\boldsymbol{B}$ and a product prior similar to \eqref{graphicalmodelprior} on $\boldsymbol{\Omega}$ (except \cite{DeshpandeRockovaGeorge2019} did not constrain $\boldsymbol{\Omega}$ to have bounded spectral norm). An expectation/conditional maximization (ECM) algorithm was developed to perform this joint estimation.
\textbf{Causal inference.} In observational studies, we are often interested in estimating the causal effect of a treatment $T$ on an outcome $y$, which requires proper adjustment of a set of potential confounders $\boldsymbol{x} \in \mathbb{R}^{p}$. When $p > n$, direct control for all potential confounders is infeasible and standard methods such as propensity scoring \cite{RosenbaumRubin1983} often fail. In this case, it is crucial to impose a low-dimensional structure on the confounder space. Given data $(y_i, T_i, \boldsymbol{x}_i), i=1, \ldots, n,$ where $T_i$ is the treatment effect, \cite{AntonelliParmigianiDominici2019} estimated the (homogeneous) average treatment effect (ATE) $\Delta(t_1, t_2 ) = E(Y(t_1)-Y(t_2))$ by utilizing the model,
\begin{equation} \label{homogeneoustreatmentefectmodel}
y_i \,|\:T_i, \boldsymbol{x}_i, \beta_0, \beta_t, \boldsymbol{\beta}, \sigma^2 \sim \mathcal{N}(0, \beta_0 + \beta_t T_i+ \boldsymbol{x}_i ^T \boldsymbol{\beta}, \sigma^2 ).
\end{equation}
Under \eqref{homogeneoustreatmentefectmodel}, the ATE is straightforwardly estimated as $\Delta (t_1, t_2) = (t_1 - t_2) \widehat{\beta}_t$. \cite{AntonelliParmigianiDominici2019} endowed the coefficients of the confounders $\boldsymbol{\beta}$ with the SSL prior \eqref{SSLprior}. In addition, \cite{AntonelliParmigianiDominici2019} also weighted the sparsity parameter $\theta$ in \eqref{SSLprior} by raising $\theta$ to a power $w_j, j=1, \ldots, p$, for each covariate, in order to better prioritize variables belonging to the slab (i.e. $\gamma_j = 1$) if they are also associated with the treatment. \cite{AntonelliParmigianiDominici2019} further extended the model \eqref{homogeneoustreatmentefectmodel} to the more general case of heterogeneous treatment effects.
\textbf{Regression with grouped variables.} Group structure arises in many statistical applications. For example, in genomics, genes within the same pathway may form a group and act in tandem to regulate a biological system. For regression with grouped variables, we can model the response $\boldsymbol{y}$ as
\begin{equation} \label{groupmodel}
\boldsymbol{y} = \displaystyle \sum_{g=1}^{G} \boldsymbol{X}_g \boldsymbol{\beta}_g + \boldsymbol{\text{var}epsilon}, \hspace{.5cm} \boldsymbol{\text{var}epsilon} \sim \mathcal{N}_n ( \boldsymbol{0}, \sigma^2 \boldsymbol{I}_n),
\end{equation}
where $\boldsymbol{\beta}_g \in \mathbb{R}^{m_g}$ is a coefficients \textit{vector} of length $m_g$, and $\boldsymbol{X}_g$ is an $n \times m_g$ covariate matrix corresponding to group $g = 1, \ldots G$. Under model \eqref{groupmodel}, it is often of practical interest to select non-negligible groups and estimate their effects. To this end, \cite{BaiMoranAntonelliChenBoland2019} introduced the \textit{spike-and-slab group lasso} (SSGL). To regularize groups of coefficients, the SSGL replaces the univariate Laplace densities in the univariate SSL \eqref{SSLprior} with \textit{multivariate} Laplace densities. The SSGL prior is
\begin{equation} \label{ssgroupLASSO}
\begin{array}{rl}
p ( \boldsymbol{\beta} \,|\:\theta ) = & \displaystyle \prod_{g=1}^{G} \left[ (1- \theta) \boldsymbol{\Psi} ( \boldsymbol{\beta}_g \,|\:\lambda_0 ) + \theta \boldsymbol{\Psi} ( \boldsymbol{\beta}_g \,|\:\lambda_1 ) \right], \\
\theta \sim & \mathcal{B}eta(a,b),
\end{array}
\end{equation}
where $\boldsymbol{\Psi} (\boldsymbol{\beta}_g \,|\:\lambda ) \propto \lambda^{m_g} e^{- \lambda \lVert \boldsymbol{\beta}_g \rVert_2 }$ and $\lambda_0 \gg \lambda_1$. The SSGL \eqref{ssgroupLASSO} is a two-group refinement of an $\ell_2$ penalty on groups of coefficients. Accordingly, the posterior mode under the SSGL thresholds entire groups of coefficients to zero, while simultaneously estimating the effects of nonzero groups and circumventing the estimation bias of the original group lasso \cite{YuanLin2006}. \cite{BaiMoranAntonelliChenBoland2019} developed an efficient blockwise-coordinate ascent algorithm to implement the SSGL model.
\textbf{Nonparameteric additive regression.} The advent of the SSGL prior \eqref{ssgroupLASSO} paved the way for the spike-and-slab lasso methodology to be extended to nonparametric problems. \cite{BaiMoranAntonelliChenBoland2019} introduced the \textit{nonparametric spike-and-slab lasso} (NPSSL) for sparse generalized additive models (GAMs). Under this model, the response surface is decomposed into the sum of univariate functions,
\begin{align} \label{GAM}
y_i = \sum_{j=1}^{p} f_j(x_{ij}) + \text{var}epsilon_i, \hspace{.5cm} \text{var}epsilon_i \overset{iid}{\sim} \mathcal{N}(0, \sigma^2).
\end{align}
In \cite{BaiMoranAntonelliChenBoland2019}, each of the $f_j$'s is approximated using a basis expansion, or a linear combination of basis functions $\mathcal{B}_j = \{ g_{j1}, \ldots, g_{jd} \}$, i.e.
\begin{equation} \label{basisexpansion}
f_j(x_{ij}) \approx \sum_{k=1}^{d} g_{jk} ( x_{ij}) \beta_{jk}.
\end{equation}
Under sparsity, most of the $f_j$'s in \eqref{GAM} are assumed to be $f_j = 0$. This is equivalent to assuming that most of the weight vectors $\boldsymbol{\beta}_j = (\beta_{j1}, \ldots, \beta_{jd})^T$, $j=1, \ldots, p$, in \eqref{basisexpansion} are equal to $\boldsymbol{0}_d$. The NPSSL is implemented by endowing the basis coefficients $\boldsymbol{\beta} = (\boldsymbol{\beta}_1^T, \ldots, \boldsymbol{\beta}_p^T)^T$ with the SSGL prior \eqref{ssgroupLASSO} to simultaneously select and estimate nonzero functionals. In addition, \cite{BaiMoranAntonelliChenBoland2019} also extended the NPSSL to identify and estimate the effects of nonlinear interaction terms $f_{rs} (X_{ir}, X_{is}), r \neq s$.
\textbf{Functional regression.} The spike-and-slab lasso methodology has also been extended to functional regression, where the response $y(t)$ is a function that \textit{varies} over some continuum $T$ (often time) A very popular model in this framework is the nonparametric varying coefficient model,
\begin{equation} \label{vcmodel}
y_i(t) = \sum_{k=1}^{p} x_{ik}(t) \beta_k(t) + \text{var}epsilon_i(t), \hspace{.5cm} t \in T,
\end{equation}
where $y_i(t)$ and $x_{ik}(t)$ are time-varying responses and covariates respectively and $\text{var}epsilon_i(t)$ is a zero-mean stochastic process which captures the within-subject temporal correlations for the $i$th subject. Under \eqref{vcmodel}, the $\beta_k(t)$'s are smooth functions of time (possibly $\beta_k(t) = 0$ for all $t \in T$), and our primary interest is in estimation and variable selection from the $\beta_k(t)$'s, $k=1, \ldots, p$.
\hspace{.3cm} \cite{BaiBolandChen2019} introduced the nonparametric varying coefficient spike-and-slab lasso (NVC-SSL) to simultaneously select and estimate the smooth functions $\beta_k(t), k=1, \ldots, p$. Similarly as with the NPSSL, these functions are approximated using basis expansions of smoothing splines, and the basis coefficients are endowed with the SSGL prior \eqref{ssgroupLASSO}. Unlike GAMs, however, the NVC-SSL model does \textit{not} assume homoscedastic, independent error terms. Instead, the NVC-SSL model accounts for within-subject temporal correlations in its estimation procedure.
\textbf{False discovery rate control with missing data}. Sorted L-One Penalized Estimator (SLOPE) is an elaboration of the LASSO tailored to false discovery control by assigning more penalty to the larger coefficients \cite{BodganVanDenBergSabattiSuCandes2015}. SSL, on the other hand, penalizes large coefficients less and its false discovery rate is ultimately determined by a combination of the prior inclusion weight $\theta$ and penalties $\lambda_1$ and $\lambda_0$. \cite{JiangBogdanJosseMiasojedowRockovaTBG2019} propose a hybrid procedure called adaptive Bayesian SLOPE, which effectively combines the SLOPE method (sorted $l_1$ regularization) together with the SSL method in the context of variable selection with missing covariate values.
As with SSL, the coefficients are regarded as arising from a hierarchical model consisting of two groups: (1) the spike for the inactive and (2) the slab for the active. However, instead of assigning spike priors for each covariate, they propose a joint ``SLOPE'' spike prior which takes into account ordering of coefficient magnitudes in order to control for false discoveries.
\section{Theoretical properties} \label{SSLTheory}
In addition to its computational tractability and its excellent finite-sample performance, the spike-and-slab LASSO has also been shown to provide strong theoretical guarantees. Although this paper focuses mainly on methodology, we briefly outline a few of the major theoretical developments for spike-and-slab LASSO methods.
A common theme in Bayesian asymptotic theory is the study of the learning rate of posterior point estimates (such as the mean, median or mode) and/or of the full posterior. Working under the frequentist assumption of a ``true'' underlying model, the aim under the former is to study the \textit{estimation} rate of point estimators under a given risk function, such as expected squared error loss. From a fully Bayes perspective, one may also be interested in the \textit{posterior contraction rate}, or the speed at which the \textit{entire} posterior contracts around the truth. In both cases, { the frequentist minimax estimation rate is a useful benchmark, since the posterior cannot contract faster than this rate} \cite{GhosalGhoshVanDerVaart2000}.
In a variety of contexts, including the Gaussian sequence model, sparse linear regression, and graphical models, the SSL global posterior mode has been shown to achieve the { minimax estimation rate} \cite{GanNarisettyLiang2018,Rockova2018,RockovaGeorge2016Abel,RockovaGeorge2018}. From a fully Bayesian perspective, the \textit{entire} posterior under SSL or SSL-type priors has \textit{also} been shown to achieve (near) optimal posterior contraction rates in the contexts of the Gaussian sequence model, linear regression, regression with grouped variables, nonparametric additive regression, and functional regression \cite{BaiBolandChen2019, BaiMoranAntonelliChenBoland2019, NieRockova2020, Rockova2018, RockovaGeorge2016Abel,RockovaGeorge2018}. It is not necessarily the case that the posterior mode and the full posterior contract at the same rate \cite{CastilloMismer2018,CastilloSchmidtHieberVanDerVaart2015}. These theoretical results thus show that the SSL is optimal from \textit{both} penalized likelihood \textit{and} fully Bayesian perspectives.
\section{Discussion} \label{Discussion}
In this paper, we have reviewed the spike-and-slab LASSO \eqref{SSLprior}. The SSL forms a continuum between the { penalized likelihood LASSO and the Bayesian point-mass spike-and-slab frameworks}, borrowing strength from both constructs while addressing limitations of each. First, the SSL employs a \textit{non}-separable penalty that self-adapts to ensemble information about sparsity and that performs selective shrinkage. Second, the SSL is amenable to fast maximum \textit{a posteriori} finding algorithms which can be implemented in a highly efficient, scalable manner. Third, the posterior mode under the SSL prior can automatically perform both variable selection and estimation. Finally, uncertainty quantification for the SSL can be attained by either debiasing the posterior modal estimate or by utilizing efficient approaches to posterior sampling. Beyond linear regression, the spike-and-slab LASSO methodology is broadly applicable to a wide number of statistical problems, including generalized linear models, factor analysis, graphical models, and nonparametric regression.
{
\section*{Acknowledgments}
This work was supported by funding from the University of South Carolina College of Arts \& Sciences, the James S. Kemper Foundation Faculty Research Fund at the University of Chicago Booth School of Business, and NSF Grants DMS-1916245, DMS-1944740.
}
\end{document} |
\begin{equation}gin{document}
\title{The Master Equation in a Bounded Domain with Neumann conditions}
\author{Michele Ricciardi}\thanks{Dipartimento di Informatica, Universit\`{a} degli studi di Verona.
Via S. Francesco, 22, 37129 Verona (VR), Italy.
\texttt{michele.ricciardi@univr.it}}
\date{\today}
\noalign{
}aketitle
\begin{equation}gin{abstract}
In this article we study the well-posedness of the Master Equation of Mean Field Games in a framework of Neumann boundary condition. The definition of solution is closely related to the classical one of the Mean Field Games system, but the boundary condition here leads to two Neumann conditions in the Master Equation formulation, for both space and measure. The global regularity of the linearized system, which is crucial in order to prove the existence of solutions, is obtained with a deep study of the boundary conditions and the global regularity at the boundary of a suitable class of parabolic equations.
\end{abstract}
\section{Introduction}
Mean Field Games theory is devoted to the study of differential games with a large number $N$ of small and indistinguishable agents. The theory was initially introduced by J.-M. Lasry and P.-L. Lions in 2006 (\cite{LL1, LL2, LL3, LL-japan}), using tools from mean-field theories, and in the same years by P. Caines, M. Huang and R. Malham\'{e} \cite{HCM}.
The macroscopic description used in mean field game theory leads to study coupled systems of PDEs, where the Hamilton-Jacobi-Bellman equation satisfied by the single agent's value function $u$ is coupled with the Kolmogorov Fokker-Planck equation satisfied by the distribution law of the population $m$. The simplest form of this system is the following
\begin{equation}gin{equation}\lambdabel{meanfieldgames}
\begin{equation}gin{cases}
-\partial_t u - \noalign{
}athrm{tr}(a(x)D^2u) +H(x,Du)=F(x,m)\,,\\
\partial_t m - \sum\limits_{i,j} \partial_{ij}^2 (a_{ij}(x)m) -\noalign{
}athrm{div}(mH_p(x,Du))=0\,,\\
m(0)=m_0\,, \hspace{2cm} u(T)=G(x,m(T))\,.
\end{cases}
\end{equation}
Here, $H$ is called the \emph{Hamiltonian} of the system, whereas $a$ is a uniformly elliptic matrix, representing the square of the diffusion term in the stochastic dynamic of the generic player, and $F$ and $G$ are the running cost and the final cost related to the generic player.\\
In his lectures at Coll\`{e}ge de France \cite{prontoprontopronto}, P.-L. Lions proved that the solutions $(u,m)$ of \eqref{meanfieldgames} are the trajectories of a new infinite dimensional partial differential equation.
This $PDE$ is called \textbf{\emph{Master Equation}} and summarizes the informations contained in \eqref{meanfieldgames} in a unique equation.
The definition of the Master Equation is related to its characteristics, which are solution of the MFG system. To be more precise, considering the solution $(u,m)$ of the system \eqref{meanfieldgames} with initial condition $m(t_0)=m_0$, one defines the function
\begin{equation}gin{equation}\lambdabel{defu}
U:[0,T]\times\Omega\times\noalign{
}athcal{P}(\Omega)\to\R\,,\qquad U(t_0,x,m_0)=u(t_0,x)\,,
\end{equation}
where $\Omega\subseteq\R^d$ and $\noalign{
}athcal{P}(\Omega)$ is the set of Borel probability measures on $\Omega$.
In order to give sense at this definition, the equation \eqref{meanfieldgames} must have a unique solution defined in $[0,T]\times\Omega$, for all $(t_0,m_0)$. So, we assume that $F$ and $G$ are \emph{monotone} functions with respect to the measure variable, a structure condition which ensures the existence of a unique solution for large time interval.
If we compute, at least formally, the equation satisfied by $U$, we obtain a Hamilton-Jacobi equation in the space of measures, called Master Equation.\\
The relevance of the Master Equation was recognized in different papers, and most important topics like existence, uniqueness and regularity results have been developed. For example, in \cite{14} and \cite{15} Bensoussan, Frehse and Yam reformulated this equation as a PDE set on an $L^2$ space, and in \cite{24} Carmona and Delarue interpreted it as a decoupling field of forward-backward stochastic differential equations in infinite dimension. A very general result of well-posedness of the Master Equation was given by Cardaliaguet, Delarue, Lasry and Lions in \cite{card}.
So far, most of the literature, especially in the Master Equation's papers, considers the case where the state variable $x$ belongs to the flat torus (i.e. periodic solutions, $\Omega=\noalign{
}athbb{T}^d$), or, especially in the probabilistic literature, in the whole space $\R^d$.
But in many economic and financial applications it is useful to work with a process that remains in a certain domain of existence; thus, some conditions at the boundary need to be prescribed. See for instance the models analyzed by Achdou, Buera et al. in \cite{gol}.
In this paper we want to analyze this situation, by studying the well-posedness of the Master Equation in a framework of Neumann condition at the boundary.
In this case the MFG system \eqref{meanfieldgames} is constrained with the following boundary conditions: for all $x\in\partial\Omega$
\begin{equation}gin{equation}\lambdabel{fame}
a(x)D_x u(t,x)\cdot\nu(x)=0\,,\qquad \noalign{
}athlarger{[}a(x) Dm(t,x)+m(H_p(x,Du)+\tilde{b}(x))\noalign{
}athlarger{]}\cdot\nu(x)=0\,,
\end{equation}
where $\nu(\cdot)$ is the outward normal at $\partial\Omega$ and $\tilde{b}$ is a vector field defined as follows:
$$
\tilde{b}_i(x)=\noalign{
}athlarger{\sum}\limits_{j=1}^d\frac{\partial a_{ji}}{\partial x_j}(x)\hspace{0.08cm},\hspace{2cm}i=1,\dots,d\hspace{0.08cm}.
$$\,.\\
The Master Equation, in this case, takes the following form
\begin{equation}gin{equation}\begin{equation}gin{split}
\lambdabel{Master}
\left\{
\begin{equation}gin{array}{rl}
&-\,\partial_t U(t,x,m)-\noalign{
}athrm{tr}\left(a(x)D_x^2 U(t,x,m)\right)+H\left(x,D_x U(t,x,m)\right)\\&-\noalign{
}athlarger{\ensuremath{\int_{\Omega}}}\noalign{
}athrm{tr}\left(a(y)D_y D_m U(t,x,m,y)\right)dm(y)\\&+\noalign{
}athlarger{\ensuremath{\int_{\Omega}}} D_m U(t,x,m,y)\cdot H_p(y,D_x U(t,y,m))dm(y)= F(x,m)\\&\noalign{
}box{in }(0,T)\times\Omega\times\noalign{
}athcal{P}(\Omega)\hspace{0.08cm},
\\
&U(T,x,m)=G(x,m)\hspace{1cm}\noalign{
}box{in }\Omega\times\noalign{
}athcal{P}(\Omega)\hspace{0.08cm},
\\
&a(x)D_x U(t,x,m)\cdot\nu(x)=0\hspace{1cm}\quad\,\noalign{
}box{for }(t,x,m)\in(0,T)\times\partial\Omega\times\noalign{
}athcal{P}(\Omega)\,,\\
&a(y)D_m U(t,x,m,y)\cdot\nu(y)=0\hspace{1cm}\noalign{
}box{for }(t,x,m,y)\in(0,T)\times\Omega\times\noalign{
}athcal{P}(\Omega)\times\partial\Omega\,,
\end{array}
\right.
\end{split}\end{equation}
where $D_mU$ is a derivation of $U$ with respect to the measure, whose precise definition will be given later. This definition, anyway, is strictly related to the one given by Ambrosio, Gigli and Savar\'{e} in \cite{ags} and by Lions in \cite{prontoprontopronto}.\\
We stress the fact that the last boundary condition, i.e.
$$
a(y)D_m U(t,x,m,y)\cdot\nu(y)=0\hspace{1cm}\noalign{
}box{for }(t,x,m,y)\in(0,T)\times\Omega\times\noalign{
}athcal{P}(\Omega)\times\partial\Omega\,,
$$
is completely new in the literature. It relies on the fact that we have to pay attention to the space where $U$ is defined, i.e. $[0,T]\times\Omega\times\noalign{
}athcal{P}(\Omega)$. Then, together with final data and Neumann condition with respect to $x$, there is another boundary condition caused by the boundary of $\noalign{
}athcal{P}(\Omega)$.\\
The importance of this equation arises in the so-called \emph{convergence problem}. The Mean Field Games system approximates the $N$-player differential game, in the sense that the optimal strategies in the Mean Field Games system provide approximated Nash equilibria (called $\varepsilon$-Nash equilibria) in the $N$-player game. See, for instance, \cite{resultuno}, \cite{resultdue}, \cite{resulttre}.
Conversely, the convergence of the Nash Equilibria in the $N$-player game towards an optimal strategy in the Mean Field Games presents many difficulties, due to the lack of compactness properties of the problem. Hence, the Master Equation plays an instrumental role in order to study this problem. A convergence result in a framework of Neumann conditions at the boundary will be given in the forthcoming paper \cite{prossimamente}.\\
There are many papers about the well-posedness of the Master Equation. We point out here that these papers are studied in two different contexts: the first case is the so-called \emph{First order Master Equation}, studied in this article, where the Brownian motions in the dynamic of the agents in the $N$-player differential game are independent each other. The second case is the \emph{Second order Master Equation}, or \emph{Master Equation with common noise}. In this case, the dynamic has also
an additional Brownian term $dW_t$, which is common to all players. This leads to a different and more difficult type of Master Equation, with some additional terms depending also on the second derivative $D_{mm}U$. It is relevant to say that Mean Field Games with common noise were already studied by Carmona, Delarue and Lacker in \cite{loacker}.
Some preliminary results about the Master Equation were given by Lions in \cite{prontoprontopronto} and a first exhaustive result of existence and uniqueness of solutions was proved, with a probabilistic approach, by Chassagneux, Crisan and Delarue in \cite{28}, who worked in a framework with diffusion and without common noise. Buckhdan, Li, Peng and Rainer in \cite{bucchin} proved the existence of a classical solution using probabilistic arguments, when there is no coupling and no common noise. Furthermore, Gangbo and Swiech proved a short time existence for the Master Equation with common noise, see \cite{nuova16}.
But the most important result in this framework was achieved by Cardaliaguet, Delarue, Lasry and Lions in \cite{card}, who proved existence and uniqueness of solutions for the Master Equation with and without common noise, including applications to the convergence problem, in a periodic setting ($\Omega=\noalign{
}athbb{T}^d$). Other recent results about Master Equation and convergence problem can be found in \cite{dybala, nuova1, gomez, nuova14, cicciocaputo, nuova11, ramadan, nuova4, fifa21, tonali, koulibaly}.\\
This article follows the main ideas of \cite{card}, but many issues appear, connected to the Neumann boundary condition, and more effort has to be done in order to gain the same results.
The function $U$ is defined as in \eqref{defu} and some estimates like global bounds and global Lipschitz regularity are proved.
The main issue in order to obtain that $U$ solves \eqref{Master} is to prove the $\noalign{
}athcal{C}^1$ character of $U$ with respect to $m$. This step requires a careful analysis of the linearized mean field game system (see \cite{card}) in order to prove strong regularity of U in the space and in the measure variable.
However, these estimates requires strong regularities of $U$, and so of the Mean Field Games system, in the space and in the measure variable.
The space regularity is obtained in \cite{card} by differentiating the equation with respect to $x$. But in the Neumann case, and in general in any case of boundary conditions, these methods obviously cannot be applied so straightly, and these bounds are obtained using different kind of space-time estimates, which must be handled with care.
Indeed, regularity estimates for Neumann parabolic equation require compatibility conditions between initial and boundary data. Unfortunately, these compatibility conditions will be not always guaranteed in this context. This forces us to generalize the estimates obtained in \cite{card}, by a deeper study of the regularity of solutions for the Fokker-Planck equation.\\
The article is divided as follows.
In section $2$ we define some useful tools and we state the main assumptions we will need in order to prove the next results.
In the rest of the article (section $3$ to $6$), we analyze the well-posedness of the Master Equation \eqref{Master}.
The idea is quite classical: for each $(t_0,m_0)$ we consider the $MFG$ system \eqref{meanfieldgames} in $[t_0,T]\times\Omega$, with conditions \eqref{fame}, and we define
\begin{equation}gin{equation}\lambdabel{U}
U(t_0,x,m_0)=u(t_0,x)\,.
\end{equation}
Then we prove that $U$ is a solution of the Master Equation.\\
Section $3-5$ are completely devoted to prove technical results to ensure this kind of differentability.
In section $3$ we prove a first estimate of a solution $(u,m)$ of the Mean Field Games system, namely
$$
\norm{u}\amd\le C\,,\qquad\noalign{
}athbf{d}_1(m(t),m(s))\le C|t-s|^\noalign{
}iezz\,,
$$
where $\noalign{
}athbf{d}_1$ is a distance between measures called \emph{Wasserstein distance}, whose definition will be given in Section $3.2$.
In section $4$ we use the definition of $U$ from the Mean Field Games system in order to prove a Lipschitz character of $U$ with respect to $m$:
$$
\norm{U(t,\cdot,m_1)-U(t,\cdot,m_2)}_{2+\alpha}\le C\noalign{
}athbf{d}_1(m_1,m_2)\,.
$$
In section $5$ we prove the $\noalign{
}athcal{C}^1$ character of $U$ with respect to $m$. This goes through different estimates on linearized MFG systems.
Once proved the $\noalign{
}athcal{C}^1$ character of $U$, we can prove that $U$ is actually the unique solution of the Master Equation \eqref{Master}. This will be done in Section $6$.\\
\section{Notation, Assumptions and Main Result}
Throughout this chapter, we fix a time $T>0$. $\Omega\subset\R^d$ will be the closure of an open bounded set, with boundary of class $\noalign{
}athcal{C}^{2+\alpha}$, and we define $Q_T:=[0,T]\times\Omega$.
For $n\ge0$ and $\alpha\in(0,1)$ we denote with $\noalign{
}athcal{C}^{n+\alpha}(\Omega)$, or simply $\noalign{
}athcal{C}^{n+\alpha}$, the space of functions $\phi\in\noalign{
}athcal{C}^n(\Omega)$ with, for each $\ell\in\N^r$, $1\le r\le n$, the derivative $D^\ell\phi$ is H\"{o}lder continuous with H\"{o}lder constant $\alpha$. The norm is defined in the following way:
\begin{equation}gin{align*}
\norm{\phi}_{n+\alpha}:=\sum\limits_{|\ell|\le n}\norminf{D^l\phi}+\sum\limits_{|\ell|= n}\sup\limits_{x\neq y}\frac{|D^\ell\phi(x)-D^\ell\phi(y)|}{|x-y|^\alpha}\,.
\end{align*}
Sometimes, in order to deal with Neumann boundary conditions, we will need to work with a suitable subspace of $\noalign{
}athcal{C}^{n+\alpha}(\Omega)$.
So we will call $\noalign{
}athcal{C}^{n+\alpha,N}(\Omega)$, or simply $\noalign{
}athcal{C}^{n+\alpha,N}$, the set of functions $\phi\in\noalign{
}athcal{C}^{n+\alpha}$ such that $aD\phi\cdot\nu_{|\partial\Omega}=0$, endowed with the same norm $\norm{\phi}_{n+\alpha}$.\\
Then, we define several parabolic spaces we will need to work with during the chapter, starting from $\noalign{
}athcal{C}^{\frac{n+\alpha}{2},n+\alpha}([0,T]\times\Omega)$.
We say that $\phi:[0,T]\times\Omega\to\R$ is in $\noalign{
}athcal{C}^{\frac{n+\alpha}{2},n+\alpha}([0,T]\times\Omega)$ if $\phi$ is continuous in both variables, together with all derivatives $D_t^rD_x^s\phi$, with $2r+s\le n$. Moreover, $\norm{\phi}_{\frac{n+\alpha}{2},n+\alpha}$ is bounded, where
\begin{equation}gin{align*}
\norm{\phi}_{\frac{n+\alpha}{2},n+\alpha}:=\sum\limits_{2r+s\le n}\norminf{D_t^rD^s_x\phi}&+\sum\limits_{2r+s=n}\sup\limits_t\norm{D_t^rD_x^s\phi(t,\cdot)}_{\alpha}\\&+\sum\limits_{0<n+\alpha-2r-s<2}\sup\limits_x\norm{D_t^rD_x^s\phi(\cdot,x)}_{\frac{n+\alpha-2r-s}{2}}\,.
\end{align*}
The space of continuous space-time functions which satisfy a H\"{o}lder condition in $x$ will be denoted by $\noalign{
}athcal{C}^{0,\alpha}([0,T]\times\Omega)$. It is endowed with the norm
$$
\norm{\phi}_{0,\alpha}=\sup\limits_{t\in[0,T]}\norm{\phi(t,\cdot)}_\alpha\,.
$$
The same definition can be given for the space $\noalign{
}athcal{C}^{\alpha,0}$. Finally, we define the space $\noalign{
}athcal{C}^{1,2+\alpha}$ of functions differentiable in time and twice differentiable in space, with all derivatives in $\noalign{
}athcal{C}^{0,\alpha}(\overline{Q_T})$. The natural norm for this space is
$$
\norm{\phi}_{1,2+\alpha}:=\norminf{\phi}+\norm{\phi_t}_{0,\alpha}+\norminf{D_x\phi}+\norm{D^2_x\phi}_{0,\alpha}\,.
$$
We note that, thanks to \emph{Lemma 5.1.1} of \cite{lunardi}, the first order derivatives of $\phi\in\noalign{
}athcal{C}^{1,2}$ satisfy also a H\"{o}lder condition in time. Namely
\begin{equation}gin{equation}\lambdabel{precisissimongulaeva}
\norm{D_x\phi}_{\noalign{
}iezz,\alpha}\le C\norm{f}_{1,2+\alpha}\,.
\end{equation}
In order to study distributional solutions for the Fokker-Planck equation, we also need to define a structure for the dual spaces of regular functions.
We define, for $n\ge0$ and $\alpha\in(0,1)$, the space $\noalign{
}athcal{C}^{-(n+\alpha)}(\Omega)$, called for simplicity $\noalign{
}athcal{C}^{-(n+\alpha)}$ in this article, as the dual space of $\noalign{
}athcal{C}^{n+\alpha}$, endowed with the norm
$$
\norm{\rho}_{-(n+\alpha)}=\sup\limits_{\norm{\phi}_{n+\alpha}\le 1}\lambdangle\rho,\phi\rangle\,.
$$
With the same notations we define the space $\noalign{
}athcal{C}^{-(n+\alpha),N}$ as the dual space of $\noalign{
}athcal{C}^{n+\alpha,N}$ endowed with the same norm:
$$
\norm{\rho}_{-(n+\alpha),N}=\sup\limits_{\substack{\norm{\phi}_{n+\alpha}\le 1\\aD\phi\cdot\nu_{|\partial\Omega}=0}}\lambdangle \rho,\phi \rangle\,.
$$
Finally, for $k\ge1$ and $1\le p\le+\infty$, we can also define the space $W^{-k,p}(\Omega)$, called for simplicity $W^{-k,p}$, as the dual space of $W^{k,p}(\Omega)$, endowed with the norm
$$
\norm{\rho}_{W^{-k,p}}=\sup\limits_{\norm{\phi}_{W^{k,p}}\le 1}\lambdangle\rho,\phi\rangle\,.
$$
\begin{equation}gin{defn}
Let $m_1,m_2\in\noalign{
}athcal{P}(\Omega)$ two Borel probability measures on $\Omega$.\\
We call the \emph{Wasserstein distance} between $m_1$ and $m_2$, and we write $\noalign{
}athbf{d}_1(m_1,m_2)$ the quantity
\begin{equation}gin{equation}\lambdabel{wass1}
\noalign{
}athbf{d}_1(m_1,m_2):=\sup\limits_{Lip(\phi)\le 1}\ensuremath{\int_{\Omega}} \phi(x)d(m_1-m_2)(x)\,.
\end{equation}
\end{defn}
We note that we can also write \eqref{wass1} as
\begin{equation}gin{align}\lambdabel{wass}
\noalign{
}athbf{d}_1(m_1,m_2):=\sup\limits_{\substack{\norm{\phi}_{W^{1,\infty}}\le C\\Lip(\phi)\le 1}}\ensuremath{\int_{\Omega}} \phi(x)d(m_1-m_2)(x)\,,
\end{align}
for a certain $C>0$. Actually, for a fixed $x_0\in\Omega$, we can restrict ourselves to the functions $\phi$ such that $\phi(x_0)=0$, since
$$
\ensuremath{\int_{\Omega}} \phi(x)d(m_1-m_2)(x)=\ensuremath{\int_{\Omega}} (\phi(x)-\phi(x_0))d(m_1-m_2)(x)\,,
$$
and these functions obviously satisfies $\norm{\phi}_{W^{1,\infty}}\le C$ for a certain $C>0$.
We will always work with \eqref{wass}, where the restriction in $W^{1,\infty}$ allows us to obtain some desired estimates with respect to $\noalign{
}athbf{d}_1$.
\begin{equation}gin{comment}
We will need also some different distances, that are a natural extension of the Wasserstein distance.
\begin{equation}gin{defn}
Let $m_1,m_2\in\noalign{
}athcal{P}(\Omega)$, and let $\alpha\in(0,1)$. We define a generalized Wasserstein distance in this way:
$$
\daw{\alpha}(m_1,m_2):=\sup\limits_{\norm{\phi}_{\alpha}\le 1}\ensuremath{\int_{\Omega}} \phi(x)d(m_1-m_2)(x)\,.
$$
\end{defn}
Actually, we can define this distance for all $\alpha>0$, but only in case of $\alpha\in(0,1)$ these distances are equivalent to the Wasserstein one, as stated in the following proposition.
\begin{equation}gin{prop}
Let $\alpha\in(0,1)$. Then the following inequalities hold true:
\begin{equation}gin{equation}\lambdabel{equivwass}
\noalign{
}athbf{d}_1(m_1,m_2)\le\daw{\alpha}(m_1,m_2)\le 3\,\noalign{
}athbf{d}_1(m_1,m_2)^\alpha\,.\\
\end{equation}
Hence, $\noalign{
}athbf{d}_1$ and $\daw{\alpha}$ are equivalent distances.
\begin{equation}gin{proof}
The inequality
$$
\noalign{
}athbf{d}_1(m_1,m_2)\le\daw{\alpha}(m_1,m_2)
$$
is obvious. In order to prove the other one, we take $\phi$ $\alpha$-H\"older, with H\"older constant smaller than $1$, and we consider, for $L>0$, the following function:
$$
\phi_L(x)=\sup\limits_{z\in\Omega}\left(\phi(z)-L|x-z|\right)\,.
$$
It is easy to prove that $\phi_L$ is an $L$-Lipschitz function and that $\phi_L(x)\ge\phi(x)$.
We call $z_x$ a point where the $\sup$ is attained. Then we have
$$
\phi(x)\le\phi(z_x)-L|x-z_x|\implies L|x-z_x|\le|x-z_x|^\alpha\implies |x-z_x|\le L^{-\frac{1}{1-\alpha}}\,.
$$
Using these computations, we find
$$
\norm{\phi-\phi_L}_{\infty}=\sup\limits_{x\in\Omega}(\phi(z_x)-L|x-z_x|-\phi(x))\le L^{-\frac{\alpha}{1-\alpha}}\,.
$$
Now we can directly show the last inequality of \eqref{equivwass}:
\begin{equation}gin{align*}
\daw{\alpha}(m_1,m_2)&=\sup\limits_{\norm{\phi}_{\alpha}\le 1}\left(\ensuremath{\int_{\Omega}}(\phi(x)-\phi_L(x))d(m_1-m_2)(x)+\ensuremath{\int_{\Omega}}\phi_L(x)d(m_1-m_2)(x)\right)\le\\
&\le 2L^{-\frac{\alpha}{1-\alpha}}+L\,\daw{1}(m_1,m_2)\,.
\end{align*}
Choosing $L=\noalign{
}athbf{d}_1(m_1,m_2)^{-(1-\alpha)}$, we finally obtain
$$
\daw{\alpha}(m_1,m_2)\le 3\,\noalign{
}athbf{d}_1(m_1,m_2)^\alpha\,.
$$
\end{proof}
\end{prop}
Finally, we define the distance $\daw{0}$ in this way:
$$
\daw{0}(m_1,m_2):=\sup\limits_{\norminf{\phi}\le 1}\ensuremath{\int_{\Omega}} \phi(x)d(m_1-m_2)(x)\,.
$$
We note that this distance is well defined only if $m_1-m_2$ has a positive density with respect to the Lebesgue measure.
We note that $\noalign{
}athcal{P}(\Omega)\subseteq W^{-1,\infty}(\Omega)$. Hence, the distance $\noalign{
}athbf{d}_1$ is just the restriction of the $\norm{\cdot}_{W^{-1,\infty}}$ to the set $\noalign{
}athcal{P}(\Omega)$.\\
\end{comment}
In order to give a sense to equation \eqref{Master}, we need to define a suitable derivation of $U$ with respect to the measure $m$.
\begin{equation}gin{defn}\lambdabel{dmu}
Let $U:\noalign{
}athcal{P}(\Omega)\to\R$. We say that $U$ is of class $\noalign{
}athcal{C}^1$ if there exists a continuous map $K:\noalign{
}athcal{P}(\Omega)\times\Omega\to\R$ such that, for all $m_1$, $m_2\in\noalign{
}athcal{P}(\Omega)$ we have
\begin{equation}gin{equation}\lambdabel{deu}
\lim\limits_{t\to0}\frac{U(m_1+s(m_2-m_1))-U(m_1)}{s}=\ensuremath{\int_{\Omega}} K(m_1,x)(m_2(dx)-m_1(dx))\,.
\end{equation}
\end{defn}
Note that the definition of $K$ is up to additive constants. Then, we define the derivative $\dm{U}$ as the unique map $K$ satisfying \eqref{deu} and the normalization convention
$$
\ensuremath{\int_{\Omega}} K(m,x)dm(x)=0\,.
$$
As an immediate consequence, we obtain the following equality, that we will use very often in the rest of the chapter: for each $m_1$, $m_2\in\noalign{
}athcal{P}(\Omega)$ we have
$$
U(m_2)-U(m_1)=\int_0^1\ensuremath{\int_{\Omega}}\dm{U}((m_1)+s(m_2-m_1),x)(m_2(dx)-m_1(dx))\,.
$$
Finally, we can define the \emph{intrinsic derivative} of $U$ with respect to $m$.
\begin{equation}gin{defn}\lambdabel{Dmu}
Let $U:\noalign{
}athcal{P}(\Omega)\to\R$. If $U$ is of class $\noalign{
}athcal{C}^1$ and $\dm{U}$ is of class $\noalign{
}athcal{C}^1$ with respect to the last variable, we define the intrinsic derivative $D_mU:\noalign{
}athcal{P}(\Omega)\times\Omega\to\R^d$ as
$$
D_mU(m,x):=D_x\dm{U}(m,x)\,.
$$
\end{defn}
\begin{equation}gin{comment}
In order to work with Neumann conditions, we need to readapt these distances in this framework, in the following way:
\begin{equation}gin{defn}
Let $m_1, m_2\in\noalign{
}athcal{P}(\Omega)$, and let $\alpha\in(0,2)$. We define the following distance:
$$
\daw{\alpha,N}(m_1,m_2)=\sup\limits_{\substack{\norm{\phi}_{\alpha}\le 1\\aD\phi\cdot\nu_{|\partial\Omega}=0}}\ensuremath{\int_{\Omega}} \phi(x)d(m_1-m_2)(x)\,.
$$
\end{defn}
If $\alpha<1$, $\phi$ may not have a derivative. In this case, with the condition $aD\phi\cdot\nu_{|\partial\Omega}$, we mean that
$$
-\int_{\Omega}\phi\,\noalign{
}athrm{div}(aD\xi_\deltalta)\,dx+\int_{\partial\Omega}\phi\,aD\xi_\deltalta\cdot\nu\,dx\overset{\deltalta\to0}{\to}0\,,
$$
for each $\xi_\deltalta\ge0$ such that $\xi_\deltalta=0$ in $\Omega_{\deltalta}$, $\xi_\deltalta=1$ in $\partial\Omega$.
A very useful result is the following:
\begin{equation}gin{prop}
For each $\alpha\in(0,2)$ there exists a $C>0$ such that, $\forall m_1,m_2\in\noalign{
}athcal{P}(\Omega)$
$$
\daw{\alpha,N}(m_1,m_2)\le\daw{\alpha}(m_1,m_2)\le C\daw{\alpha,N}(m_1,m_2)\,.
$$
In particular, the distances $\daw{\alpha}$ and $\daw{\alpha,N}$ are equivalent.
\begin{equation}gin{proof}
The left inequality is obvious.\\
To prove the right inequality we take a function $\phi\in\noalign{
}athcal{C}^{\alpha}$ with $\norm{\phi}_\alpha\le 1$ and we make a suitable approximation of it.\\
In order to do that, we first need some useful tools.\\
For $\deltalta>0$ and $x\in\Omega\setminus\Omega_{\deltalta}$, we consider the following ODE in $\R^d$:
\begin{equation}gin{align}
\begin{equation}gin{cases}
\xi'(t;x)=-a(\xi(t;x))\nu(\xi(t;x))\,,\\
\xi(0;x)=x\,,
\end{cases}
\end{align}
and the corresponding hitting time with $\partial\Omega_\deltalta$:
$$
T(x):=\inf\left\{t\ge0 \,|\, \xi(t;x)\notin\Omega\setminus\Omega_\deltalta\right\}\,.
$$
We have that $T(x)<+\infty$ for each $x\in\Omega\setminus\Omega_{\deltalta}$. To prove that, we consider the auxiliary function
$$
\Phi(t,x)=\deltalta-d(\xi(t;x)).
$$
So, the function $T(x)$ can be rewritten as
$$
T(x)=\inf\left\{ t\ge0\,|\, \Phi(t,x)=0 \right\}\,,
$$
and his finiteness is an obvious consequence of the decreasing character of $\Phi$ in time:
$$
\partial_t\Phi(t,x)=-Dd(\xi(t;x))\cdot\xi'(t;x)=-\lambdangle a(\xi(t;x))\nu(\xi(t;x)),\nu(\xi(t;x))\rangle\le-\lambdambda<0\,.
$$
Moreover, thanks to Dini's theorem we obtain that $T(x)$ is a $\noalign{
}athcal{C}^1$ function and his gradient is given by
$$
\nabla T(x)=-\frac{\nabla_x\Phi(T(x),x)}{\partial_t\Phi(T(x),x)}=\frac{\nu(\xi(T(x);x))\noalign{
}athrm{Jac}_x\xi(T(x);x)}{\lambdangle a\nu,\nu\rangle (\xi(T(x);x))}\,.
$$
Thanks to the regularity of $a$ and $\Omega$, we can differentiate w.r.t. $x$ the ODE \eqref{ODE} and obtain that $\xi(t;\cdot)\in\noalign{
}athcal{C}^{1+\gammamma}$ for each $\gammamma<1$. This implies $T(\cdot)\in\noalign{
}athcal{C}^{\alpha}$ and
$$
\norm{T}_\alpha\le C\,,
$$
for a certain $C=C(a,\Omega)$.\\
Now we define, for $\phi\in\noalign{
}athcal{C}^{\alpha}$ and $\deltalta>0$, the approximating functions $\phi_\deltalta$ in the following way:
$$
\phi_\deltalta(x)=
\left\{
\begin{equation}gin{array}{lll}
\phi(x)\quad & \noalign{
}box{if } & x\in\Omega_\deltalta\,,\\
\phi(\xi(T(x);x))\quad & \noalign{
}box{if } & x\in\Omega\setminus\Omega_\deltalta\,,
\end{array}
\right.
$$
eventually considering a $\noalign{
}athcal{C}^{\alpha}$ regularization in $\Omega_{\deltalta}\setminus{\Omega_{2\deltalta}}\,$.
\end{proof}
\end{prop}
\end{comment}
We need the following assumptions:
\begin{equation}gin{hp}\lambdabel{ipotesi}
Assume that
\begin{equation}gin{itemize}
\item [(i)] (Uniform ellipticity) $\norm{a(\cdot)}_{1+\alpha}<\infty$ and $\exists\,\noalign{
}u>\lambdambda>0$ s.t. $\forall\xi\in\noalign{
}athbb{R}^d$ $$ \noalign{
}u|\xi|^2\ge\lambdangle a(x)\xi,\xi\rangle\ge\lambdambda|\xi|^2\,;$$
\item [(ii)]$H:\Omega\times\R^d\to\R$, $G:\Omega\times\noalign{
}athcal{P}(\Omega)\to\R$ and $F:\Omega\times\noalign{
}athcal{P}(\Omega)\to\R$ are smooth functions with $H$ Lipschitz with respect to the last variable;
\item [(iii)]$\exists C>0$ s.t.
$$
0< H_{pp}(x,p)\le C I_{d\times d}\hspace{0.08cm};
$$
\item [(iv)]$F$ satisfies, for some $0<\alpha<1$ and $C_F>0$,
$$
\ensuremath{\int_{\Omega}} \left(F(x,m)-F(x,m')\right) d(m-m')(x)\ge0
$$
and
$$
\sup\limits_{m\in\noalign{
}athcal{P}(\Omega)}\left(\norm{F(\cdot,m)}_{\alpha}+\norm{\frac{\deltalta F}{\deltalta m}(\cdot,m,\cdot)}_{\alpha,2+\alpha}\right)+\noalign{
}athrm{Lip}\left(\dm{F}\right)\le C_F\hspace{0.08cm},
$$
with
$$
\noalign{
}athrm{Lip}\left(\dm{F}\right):=\sup\limits_{m_1\neq m_2}\left(\noalign{
}athbf{d}_1(m_1,m_2)^{-1}\norm{\dm{F}(\cdot,m_1,\cdot)-\dm{F}(\cdot,m_2,\cdot)}_{\alpha,1+\alpha}\right)\hspace{0.08cm};
$$
\item [(v)]$G$ satisfies the same estimates as $F$ with $\alpha$ and $1+\alpha$ replaced by $2+\alpha$, i.e.
$$
\sup\limits_{m\in\noalign{
}athcal{P}(\Omega)}\left(\norm{G(\cdot,m)}_{2+\alpha}+\norm{\frac{\deltalta G}{\deltalta m}(\cdot,m,\cdot)}_{2+\alpha,2+\alpha}\right)+\noalign{
}athrm{Lip}\left(\dm{G}\right)\le C_G\hspace{0.08cm},
$$
with
$$
\noalign{
}athrm{Lip}\left(\dm{G}\right):=\sup\limits_{m_1\neq m_2}\left(\noalign{
}athbf{d}_1(m_1,m_2)^{-1}\norm{\dm{G}(\cdot,m_1,\cdot)-\dm{G}(\cdot,m_2,\cdot)}_{2+\alpha,2+\alpha}\right)\hspace{0.08cm};
$$
\item [(vi)] The following Neumann boundary conditions are satisfied:
\begin{equation}gin{align*}
&\left\lambdangle a(y)D_y\dm{F}(x,m,y), \nu(y)\right\rangle_{|\partial\Omega}=0\,,\qquad \left\lambdangle a(y)D_y\dm{G}(x,m,y),\nu(y)\right\rangle_{|\partial\Omega}=0\,,\\
&\lambdangle a(x)D_xG(x,m), \nu(x)\rangle_{|\partial\Omega}=0\,,
\end{align*}
for all $m\in\noalign{
}athcal{P}(\Omega)$.
\end{itemize}
\end{hp}
Some comments about the previous hypotheses: the first five are standard hypotheses in order to obtain existence and uniqueness of solutions for the Mean Field Games system. The hypotheses about the derivative of $F$ and $G$ with respect to the measure will be essential in order to obtain some estimates on a linearized MFG system.
As regards hypotheses $(vi)$, the second and the third boundary conditions are natural compatibility conditions, essential to obtain a classical solution for the $MFG$ and the linearized $MFG$ system. The first boundary condition will be essential in order to prove the Neumann boundary condition of $D_mU$, see Corollary \ref{delarue}.
With these hypotheses we are able to prove existence and uniqueness of a classical solution for the Master Equation \eqref{Master}. The main result of this paper is the following.
\begin{equation}gin{thm}\lambdabel{settepuntouno}
Suppose hypotheses \ref{ipotesi} are satisfied. Then there exists a unique classical solution $U$ of the Master Equation \eqref{Master}.
\end{thm}
But first, we have to prove some preliminary estimates about the Mean Field Games system and some other estimates on a linearyzed Mean Field Games system, which will be essential in order to ensure the $\noalign{
}athcal{C}^1$ character of $U$ with respect to $m$.
\section{Preliminary estimates and Mean Field Games system}
In this section we start giving some technical results for linear parabolic equations, which will be useful in the rest of the Chapter.
Then we will obtain some preliminary estimates for the Master Equation, obtained by a deep analysis of the Mean Field Games related system.
We start with this technical Lemma.
\begin{equation}gin{lem}\lambdabel{sonobravo}
Suppose $a$ satisfies $(i)$ of Hypotheses \ref{ipotesi}, $b,f\in L^\infty(Q_T)$. Furthermore, let $\psi\in\noalign{
}athcal{C}^{1+\alpha,N}(\Omega)$, with $0\le\alpha<1$. Then the unique solution $z$ of the problem
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-z_t-\noalign{
}athrm{tr}(a(x)D^2z)+b(t,x)\cdot Dz=f(t,x)\,,\\
z(T)=\psi\,,\\
aDz\cdot\nu_{|\partial\Omega}=0
\end{cases}
\end{equation*}
satisfies
\begin{equation}gin{equation}\lambdabel{estensione}
\norm{z}\amu\le C\left(\norminf{f}+\norm{\psi}_{1+\alpha}\right)\,.
\end{equation}
\begin{equation}gin{proof}
Note that, if $f$ and $b$ are continuous bounded functions, with $b$ depending only on $x$, this result is simply \emph{Theorem 5.1.18} of \cite{lunardi}. In the general case, we argue as follows.
We can write $z=z_1+z_2$, where $z_1$ satisfies
\begin{equation}gin{equation}\lambdabel{problemadue}
\begin{equation}gin{cases}
-{(z_1)}_t-\noalign{
}athrm{tr}(a(x)D^2z_1)=0\,,\\
z_1(T)=\psi\,,\\
aDz_1\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
\end{equation}
and $z_2$ satisfies
\begin{equation}gin{equation}\lambdabel{problemauno}
\begin{equation}gin{cases}
-{(z_2)}_t-\noalign{
}athrm{tr}(a(x)D^2z_2)+b(t,x)\cdot Dz_2=f(t,x)-b(t,x)\cdot Dz_1\,,\\
z_2(T)=0\,,\\
aDz_2\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation}
Since in the equation \eqref{problemadue} of $z_1$ we do not have a drift term depending on time, we can apply \emph{Theorem 5.1.18} of \cite{lunardi} and obtain
$$
\norm{z_1}\amu\le C\norm{\psi}_{1+\alpha}\,.
$$
As regards \eqref{problemauno}, obviously $z_2(T)\in W^{2,p}(\Omega)$ $\forall p$, and from the estimate of $z_1$ we know that $f-bDz_1\in L^\infty$. So we can apply the Corollary of \emph{Theorem IV.9.1} of \cite{lsu} to obtain that, $\forall r\ge\frac{d+2}{2}\,,$
\begin{equation}gin{equation*}
\norm{z_2}_{1-\frac{d+2}{2r},2-\frac{d+2}{r}}\le C\norminf{f-bDz_1}\le C\left(\norminf{f}+\norm{\psi}_{1+\alpha}\right)\,.
\end{equation*}
Choosing $r=\frac{d+2}{1-\alpha}$, one has $2-\frac{d+2}{r}=1+\alpha$, and so \eqref{estensione} is satisfied for $z_2$.
Since $z=z_1+z_2$, estimate \eqref{estensione} holds also for $z$. This concludes the proof.
\end{proof}
\end{lem}
If the data $f=0$, we can generalize the result of Lemma \ref{sonobravo} if $\psi$ is only a Lipschitz function.
This result is well-known if $a\in\noalign{
}athcal{C}^2(\Omega)$, by applying a classical Bernstein method. In our framework, we have the following result.
\begin{equation}gin{lem}\lambdabel{davverotecnico}
Suppose $a$ and $b$ be bounded continuous functions, and $\psi\in W^{1,\infty}(\Omega)$. Then the unique solution $z$ of the problem
\begin{equation}gin{equation}\lambdabel{bohmovrim}
\begin{equation}gin{cases}
-z_t-\noalign{
}athrm{tr}(a(x)D^2z)+b(t,x)\cdot Dz=0\,,\\
z(T)=\psi\,,\\
aDz\cdot\nu_{|\partial\Omega}=0
\end{cases}
\end{equation}
satisfies a H\"{o}lder condition in $t$ and a Lipschitz condition in $x$, namely $\exists C$ such that
\begin{equation}gin{align}\lambdabel{nnavonmmna}
|z(t,x)-z(s,x)|\le C\norm{\psi}_{W^{1,\infty}}|t-s|^\noalign{
}iezz\,,\qquad|z(t,x)-z(t,y)|\le C\norm{\psi}_{W^{1,\infty}}|x-y|\,.
\end{align}
\begin{equation}gin{proof}
If $\psi\in\noalign{
}athcal{C}^{1,N}$, estimates \eqref{nnavonmmna} is guaranteed by \eqref{estensione} of Lemma \ref{sonobravo}.
In the general case, we take $\psi^n\in\noalign{
}athcal{C}^{1}$ such that $\psi^n\to\psi$ in $\noalign{
}athcal{C}([0,T]\times\Omega)$ and\\$\norm{\psi^n}_{1}\le C\norm{\psi}_{W^{1,\infty}}$, and we want to make a suitable approximation of it in order to obtain a function $\tilde{\psi}^n\in\noalign{
}athcal{C}^{1,N}$, also converging to $\psi$.\\
In order to do that, we first need some useful tools.\\
For $\deltalta>0$, $d(\cdot)$ the distance function from $\partial\Omega$, $\Omega_\deltalta=\{ x\in\Omega\,|\,d(x)\ge\deltalta \}$ and $x\in\Omega\setminus\Omega_{\deltalta}$, we consider the following ODE in $\R^d$:
\begin{equation}gin{align}\lambdabel{ODE}
\begin{equation}gin{cases}
\xi'(t;x)=-a(\xi(t;x))\nu(\xi(t;x))\,,\\
\xi(0;x)=x\,,
\end{cases}
\end{align}
where $\nu$ is an extension of the outward unit normal in $\Omega\setminus\Omega_{\deltalta}$. Actually, we know from \cite{cingul} that $$Dd(x)_{|\partial\Omega}=-\nu(x)\,,$$
so a suitable extension can be $\nu(x)=-Dd(x)\,$.
Then we consider the corresponding hitting time of $\partial\Omega_\deltalta$:
$$
T(x):=\inf\left\{t\ge0 \,|\, \xi(t;x)\notin\Omega\setminus\Omega_\deltalta\right\}\,.
$$
We have that $T(x)<+\infty$ for each $x\in\Omega\setminus\Omega_{\deltalta}$. To prove that, we consider the auxiliary function
$$
\Phi(t,x)=\deltalta-d(\xi(t;x)).
$$
So, the function $T(x)$ can be rewritten as
$$
T(x)=\inf\left\{ t\ge0\,|\, \Phi(t,x)=0 \right\}\,,
$$
and his finiteness is an obvious consequence of the decreasing character of $\Phi$ in time:
$$
\partial_t\Phi(t,x)=-Dd(\xi(t;x))\cdot\xi'(t;x)=-\lambdangle a(\xi(t;x))\nu(\xi(t;x)),\nu(\xi(t;x))\rangle\le-\lambdambda<0\,.
$$
Moreover, thanks to Dini's theorem we obtain that $T(x)$ is a $\noalign{
}athcal{C}^1$ function and his gradient is given by
$$
\nabla T(x)=-\frac{\nabla_x\Phi(T(x),x)}{\partial_t\Phi(T(x),x)}=\frac{\nu(\xi(T(x);x))\noalign{
}athrm{Jac}_x\xi(T(x);x)}{\lambdangle a\nu,\nu\rangle (\xi(T(x);x))}\,.
$$
Actually, thanks to the regularity of $a$ and $\Omega$, we can differentiate w.r.t. $x$ the ODE \eqref{ODE} and obtain that $\xi(t;\cdot)\in\noalign{
}athcal{C}^{1}$.
Now we define the approximating functions $\tilde{\psi}^n$ in the following way:
\begin{equation}gin{equation}\lambdabel{psitildan}
\tilde{\psi}^n(x)=
\left\{
\begin{equation}gin{array}{lll}
\psi^n(x)\quad & \noalign{
}box{if } & x\in\Omega_\deltalta\,,\\
\psi^n(\xi(T(x);x))\quad & \noalign{
}box{if } & x\in\Omega\setminus\Omega_\deltalta\,,
\end{array}
\right.
\end{equation}
eventually considering a $\noalign{
}athcal{C}^{1}$ regularization in $\Omega_{\deltalta}\setminus{\Omega_{2\deltalta}}\,$.
From the definition of $\tilde{\psi}^n$ and the $\noalign{
}athcal{C}^1$ regularity of $\xi$ and $T$ we have $\tilde{\psi}^n\in\noalign{
}athcal{C}^1$ and
$$
\|{\tilde{\psi}^n}\|_{1}\le C\norm{\psi^n}_1\le C\norm{\psi}_{W^{1,\infty}}\,.
$$
Moreover, since near the boundary $\tilde{\psi}^n$ is constant along the trajectories $a(\cdot)\nu(\cdot)$, we have that on $\partial\Omega$
$$
a(x)D{\tilde{\psi^n}}(x)\cdot\nu(x)_{|\partial\Omega}=\frac{\partial\tilde{\psi^n}}{\partial (a\nu(x))}(x)_{|\partial\Omega}=0\,,
$$
so $\tilde{\psi}^n\in\noalign{
}athcal{C}^{1,N}$.
Now we consider $z^n$ as the solution of \eqref{bohmovrim} with $\psi$ replaced by $\tilde{\psi}^n$. Then Lemma \ref{sonobravo} implies that $\tilde{\psi}^n$ satisfies
$$
\norm{z^n}_{\noalign{
}iezz,1}\le C\|{\tilde{\psi}}\|_1\le C\norm{\psi}_{W^{1,\infty}}\,.
$$
Then, Ascoli-Arzel\`a's Theorem tells us that $\exists z$ such that $z^n\to z$ in $\noalign{
}athcal{C}([0,T]\times\Omega)$. Passing to the limit in the weak formulation of $z^n$, we obtain that $z$ is the unique solution of \eqref{bohmovrim}.
Finally, since $z^n$ satisfies \eqref{nnavonmmna}, we can pass to the pointwise limit when $n\to+\infty$ and obtain the estimate \eqref{nnavonmmna} for $z$. This concludes the Lemma.
\end{proof}
\end{lem}
Now we start with the first estimates for the Master Equation.
The first result is obtained by the study of some regularity properties of the $MFG$ system, uniformly in $m_0$.
\begin{equation}gin{prop}
The system \eqref{meanfieldgames} with conditions \eqref{fame} has a unique classical solution $(u,m)\in \noalign{
}athcal{C}^{1+\frac{\alpha}{2},2+\alpha}\times \noalign{
}athcal{C}([0,T];\noalign{
}athcal{P}(\Omega))$, and this solution satisfies
\begin{equation}gin{equation}\lambdabel{first}
\sup\limits_{t_1\neq t_2}\frac{\daw{1}(m(t_1),m(t_2))}{|t_1-t_2|^\frac 1 2}+\norm{u}_{1+\frac{\alpha}{2},2+\alpha}\le C\hspace{0.08cm},
\end{equation}
where $C$ does not depend on $(t_0,m_0)$.\\
Furthermore, $m(t)$ has a positive density for each $t>0$ and, if $m_0\in\noalign{
}athcal{C}^{2+\alpha}$ and satisfies the Neumann boundary condition
\begin{equation}gin{equation}\lambdabel{neumannmzero}
\left(a(x)Dm_0+(\tilde{b}(0,x)+H_p(x,Du(0,x)))m_0\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{equation}
then $m\in\noalign{
}athcal{C}^{1+\frac{\alpha}{2},2+\alpha}$.\\
Finally, the solution is stable: if $m_{0n}\to m_0$ in $\noalign{
}athcal{P}(\Omega)$, then there is convergence of the corresponding solutions of \eqref{meanfieldgames}-\eqref{fame}: $(u_n,m_n)\to (u,m)$ in $\noalign{
}athcal{C}^{1,2}\times\noalign{
}athcal{C}([0,T];\noalign{
}athcal{P}(\Omega))$.
\begin{equation}gin{proof}
We use a Schauder fixed point argument.\\
Let $X\subset\noalign{
}athcal{C}([t_0,T];\noalign{
}athcal{P}(\Omega))$ be the set
$$
X:=\left\{m\in\noalign{
}athcal{C}([t_0,T];\noalign{
}athcal{P}(\Omega))\noalign{
}box{ s.t. }\daw{1}(m(t),m(s))\le L|t-s|^\noalign{
}iezz\ \forall s,t\in[t_0,T] \right\}\hspace{0.08cm},
$$
where $L$ is a constant that will be chosen later.\\
It is easy to prove that $X$ is a convex compact set for the uniform distance.\\
We define a map $\Phi:X\to X$ as follows.\\
Given $\begin{equation}ta\in X$, we consider the solution of the following Hamilton-Jacobi equation
\begin{equation}gin{equation}\lambdabel{hj}
\begin{equation}gin{cases}
-u_t-\noalign{
}athrm{tr}(a(x)D^2u)+H(x,Du)=F(x,\begin{equation}ta(t))\,,\\
u(T)=G(x,\begin{equation}ta(T))\,,\\
a(x)Du\cdot\nu(x)_{|\partial\Omega}=0\,.
\end{cases}
\end{equation}
Thanks to hypothesis $(iv)$ of \ref{ipotesi}, we have ${F(\cdot,\begin{equation}ta(\cdot))}\in\noalign{
}athcal{C}^{\frac{\alpha}2,\alpha}$ and its norm is bounded by a constant independent of $\begin{equation}ta$. For the same reason $G(\cdot,\begin{equation}ta(T))\in\noalign{
}athcal{C}^{2+\alpha}$.\\
It is well known that these hypotheses guarantee the existence and uniqueness of a classical solution. A proof can be found in \cite{lsu}, \textit{Theorem V.7.4}.\\
So, we can expand with Taylor formula the gradient term and obtain a linear equation satisfied by $u$:
\begin{equation}gin{align*}
\begin{equation}gin{cases}
-u_t-\noalign{
}athrm{tr}(a(x)D^2u)+H(x,0)+V(t,x)\cdot Du=F(x,\begin{equation}ta(t))\,,\\
u(T)=G(x,\alpha(T))\,,\\
a(x)Du\cdot\nu_{\partial\Omega}=0\,.
\end{cases}
\end{align*}
with
$$
V(t,x):=\int_0^1 H_p(x,\lambdambda Du(t,x))\hspace{0.08cm} d\lambdambda\hspace{0.08cm}.
$$
Thanks to the Lipschitz hypothesis on $H$, $(ii)$ of \ref{ipotesi}, we know that $V\in L^\infty$. So, we can use the Corollary of \emph{Theorem IV.9.1} of \cite{lsu} to obtain
$$
Du\in\noalign{
}athcal{C}^{\frac{\alpha}{2},\alpha}\implies V\in\noalign{
}athcal{C}^{\frac{\alpha}2,\alpha}\hspace{0.08cm}.
$$
So, we can apply \emph{Theorem IV.5.3} of \cite{lsu} and get
\begin{equation}gin{align*}
\norm{u}_{1+\frac{\alpha}{2},2+\alpha}\le C\left(\norm{F}_{\frac{\alpha}{2},\alpha}+\norm{G}_{2+\alpha}\right)\hspace{0.08cm},
\end{align*}
where the constant $C$ does not depend on $\begin{equation}ta$, $t_0$, $m_0$.\\
Now, we define $\Phi(\begin{equation}ta)=m$, where $m\in\noalign{
}athcal{C}([t_0,T];\noalign{
}athcal{P}(\Omega))$ is the solution of the Fokker-Planck equation
\begin{equation}gin{equation}\lambdabel{fpk}
\begin{equation}gin{cases}
m_t-\noalign{
}athrm{div}(a(x)Dm)-\noalign{
}athrm{div}(m(\tilde{b}(x)+H_p(x,Du)))=0\,,\\
m(t_0)=m_0\,,\\
\left(a(x)Dm+(\tilde{b}+H_p(x,Du))m\right)\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
\end{equation}
It is easy to prove that the above equation has a unique solution in the sense of distribution. A proof in a more general case will be given in the next section, in Proposition \ref{peggiodellagerma}. We want to check that $m\in X$.\\
Thanks to the distributional formulation, we have
\begin{equation}gin{equation}\begin{equation}gin{split}\lambdabel{sotis}
&\ensuremath{\int_{\Omega}} \phi(t,x)m(t,dx)-\ensuremath{\int_{\Omega}}\phi(s,x)m(s,dx)\\\,+&\int_s^t\ensuremath{\int_{\Omega}}(-\phi_t-\noalign{
}athrm{tr}(a(x)D^2\phi)+H_p(x,Du)\cdot D\phi)m(r,dx)dr=0\hspace{0.08cm},
\end{split}\end{equation}
for each $\phi\in L^{\infty}$ satisfying in the weak sense
$$
\begin{equation}gin{cases}
-\phi_t-\noalign{
}athrm{tr}(a(x)D^2\phi)+H_p(x,Du)\cdot D\phi\in L^\infty(Q_T)\\
aD\phi\cdot\nu_{|\partial\Omega}=0
\end{cases}\hspace{0.08cm}.
$$
Take $\psi(\cdot)$ a $1$-Lipschitz function in $\Omega$. So, we choose $\phi$ in the weak formulation as the solution in $[t,T]$ of the following linear equation
\begin{equation}gin{equation}\lambdabel{coglia}\begin{equation}gin{cases}
-\phi_t-\noalign{
}athrm{tr}(a(x)D^2\phi)+H_p(x,Du)\cdot D\phi=0\,,\\
\phi(t)=\psi\,,\\
a(x)D\phi\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
\end{equation}
Thanks to Lemma \ref{davverotecnico}, we know that $\phi(\cdot,x)\in\noalign{
}athcal{C}^{\noalign{
}iezz}([0,T])$ and its H\"{o}lder norm in time is bounded uniformly if $\psi$ is $1$-Lipschitz.\\
Coming back to \eqref{sotis}, we obtain
\begin{equation}gin{align*}
\ensuremath{\int_{\Omega}}\psi(x)(m(t,dx)-m(s,dx))=\ensuremath{\int_{\Omega}}(\phi(t,x)-\phi(s,x))m(s,dx)\le C|t-s|^\noalign{
}iezz\hspace{0.08cm},
\end{align*}
and taking the $\sup$ over the $\psi$ $1$-Lipschitz,
$$
\noalign{
}athbf{d}_1(m(t),m(s))\le C|t-s|^\noalign{
}iezz\hspace{0.08cm}.
$$
Choosing $L=C$, we have proved that $m\in X$.\\
Since $X$ is convex and compact, to apply Schauder's theorem we only need to show the continuity of $\Phi$.\\
Let $\begin{equation}ta_n\to\begin{equation}ta$, and let $u_n$ and $m_n$ the solutions of \eqref{hj} and \eqref{fpk} related to $\begin{equation}ta_n$. Since $\{u_n\}_n$ is uniformly bounded in $\noalign{
}athcal{C}^{1+\frac{\alpha}{2},2+\alpha}$, from Ascoli-Arzel\`a's Theorem we have $u_n\to u$ in $\noalign{
}athcal{C}^{1,2}$.\\
To prove the convergence of $\{m_n\}_n$, we take $\phi_n$ as the solution of \eqref{coglia} with $Du$ replaced by $Du_n$. Then, as before, $\{\phi_n\}_n$ is a Cauchy sequence in $\noalign{
}athcal{C}^1$. Actually, the difference $\phi_{n,m}:=\phi_n-\phi_m$ satisfies
$$
\begin{equation}gin{cases}
-(\phi_{n,m})_t-\noalign{
}athrm{tr}(a(x)D^2\phi_{n,m})+H_p(x,Du_n)\cdot D\phi_{n,m}=(H_p(x,Du_m)-H_p(x,Du_n))\cdot D\phi_m\,,\\
\phi_{n,m}(t)=0\,,\\
\bdone{\phi_{n,m}}\,,
\end{cases}
$$
and so Lemma \ref{sonobravo} implies
$$
\norm{\phi_{n,m}}\amu\le C\norminf{(H_p(x,Du_m)-H_p(x,Du_n))\cdot D\phi_m}\le C\norminf{Du_m-Du_n}\le \omega(n,k)\,,
$$
where $\omega(n,k)\to 0$ when $n,k\to\infty$, and where we use Lemma \ref{davverotecnico} in order to bound $D\phi_m$ in $L^\infty$, without compatibility conditions.\\
Using \eqref{sotis} with $(m_n,\phi_n)$ and $(m_k,\phi_k)$, for $n,k\in\noalign{
}athbb{N}$, $s=0$, and subtracting the two equalities, we get
\begin{equation}gin{align*}
\ensuremath{\int_{\Omega}}\psi(x)(m_n(t,dx)-m_k(t,dx))=\ensuremath{\int_{\Omega}}(\phi_n(0,x)-\phi_k(0,x))m_0(dx)\le\omega(n,k)\,.
\end{align*}
Taking the sup over the $\psi$ $1$-Lipschitz and over $t\in[0,T]$, we obtain
\begin{equation}gin{align*}
\sup\limits_{t\in[0,T]}\noalign{
}athbf{d}_1(m_n(t)),m_k(t))\le\omega(n,k)\,,
\end{align*}
which proves that $\{m_n\}_n$ is a Cauchy sequence in $X$. Then, $\exists m$ such that $m_n\to m$ in $X$.\\
Passing to the limit in \eqref{fpk}, we immediately obtain $m=\Phi(\begin{equation}ta)$, which conclude the proof of continuity.\\
So we can apply Schauder's theorem and obtain a classical solution of the problem \eqref{meanfieldgames}-\eqref{fame}. The estimate \eqref{first} follows from the above estimates for \eqref{hj} and \eqref{fpk}.\\
To prove the uniqueness, let $(u_1,m_1)$, $(u_2,m_2)$ be two solutions of \eqref{meanfieldgames}-\eqref{fame}.\\
We use inequality \eqref{dopo}, whose proof will be given in the next lemma, with $m_{01}(t_0)=m_{02}(t_0)=m_0$:
\begin{equation}gin{align*}
&\intc{t_0}\noalign{
}athlarger{(}H(x,Du_2)-H(x,Du_1)-H_p(x,Du_1)(Du_2-Du_1)\noalign{
}athlarger{)}m_1(t,dx)dt\hspace{0.08cm}+\\
+&\intc{t_0}\noalign{
}athlarger{(}H(x,Du_1)-H(x,Du_2)-H_p(x,Du_2)(Du_1-Du_2)\noalign{
}athlarger{)}m_2(t,dx)dt\le 0
\end{align*}
Since $H$ is strictly convex, the above inequality gives us $Du_1=Du_2$ in the set\\ $\{m_1>0\}\cup\{m_2>0\}$. Then $m_1$ and $m_2$ solve the same Fokker-Planck equation, and for uniqueness we have $m_1=m_2$.\\
So $F(x,m_1(t))=F(x,m_2(t))$, $G(x,m_1(T))=G(x,m_2(T))$ and $u_1$ and $u_2$ solve the same Hamilton-Jacobi equation, which implies $u_1=u_2$. The proof of uniqueness is complete.\\
Finally, if $m_0\in\noalign{
}athcal{C}^{2+\alpha}$ satisfies \eqref{neumannmzero}, then, splitting the divergence terms in \eqref{fpk}, we have
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
m_t-\noalign{
}athrm{tr}(a(x)D^2m)-m\hspace{0.08cm}\noalign{
}athrm{div}\left(\tilde{b}(x)+H_p(x,Du)\right)-\left(2\tilde{b}(x)+H_p(x,Du)\right)Dm=0\\
m(t_0)=m_0\\
\left(a(x)Dm+(\tilde{b}+H_p(x,Du))m\right)\cdot\nu_{|\partial\Omega}=0
\end{cases}\hspace{0.08cm}.
\end{equation*}
Then, thanks to \textit{Theorem IV.5.3} of \cite{lsu}, $m$ is of class $\noalign{
}athcal{C}^{1+\frac{\alpha}{2},2+\alpha}$.\\
The stability of solutions is obtained in the same way we used for the continuity of $\Phi$. This concludes the proof.
\end{proof}
\end{prop}
With this proposition, we have obtained that
\begin{equation}gin{equation}\lambdabel{firstmaster}
\sup\limits_{t\in[0,T]}\sup\limits_{m\in\noalign{
}athcal{P}(\Omega)}\norm{U(t,\cdot,m)}_{2+\alpha}\le C\hspace{0.08cm},
\end{equation}
which gives us an initial regularity result for the function $U$.\\
To complete the previous proposition, we need the following lemma, based on the so-called \textit{Lasry-Lions monotonicity argument}.
\begin{equation}gin{lem}
Let $(u_1,m_1)$ and $(u_2,m_2)$ be two solutions of System \eqref{meanfieldgames}-\eqref{fame}, with $m_1(t_0)=m_{01}$, $m_2(t_0)=m_{02}$. Then
\begin{equation}gin{equation}\begin{equation}gin{split}\lambdabel{dopo}
&\intc{t_0}\noalign{
}athlarger{(}H(x,Du_2)-H(x,Du_1)-H_p(x,Du_1)(Du_2-Du_1)\noalign{
}athlarger{)}m_1(t,dx)dt\\
+&\intc{t_0}\noalign{
}athlarger{(}H(x,Du_1)-H(x,Du_2)-H_p(x,Du_2)(Du_1-Du_2)\noalign{
}athlarger{)}m_2(t,dx)dt\\\le-&\ensuremath{\int_{\Omega}}(u_1(t_0,x)-u_2(t_0,x))(m_{01}(dx)-m_{02}(dx))\hspace{0.08cm}.
\end{split}
\end{equation}
\begin{equation}gin{proof}
See \emph{Lemma 3.1.2} of \cite{card}.
\end{proof}
\end{lem}
\section{Lipschitz continuity of $U$}
\begin{equation}gin{prop}\lambdabel{holder}
Let $(u_1,m_1)$ and $(u_2,m_2)$ be two solutions of system \eqref{meanfieldgames}-\eqref{fame}, with $m_1(t_0)=m_{01}$, $m_2(t_0)=m_{02}$. Then
\begin{equation}gin{equation}\begin{equation}gin{split}\lambdabel{lipsch}
\norm{u_1-u_2}_{1,2+\alpha}&\le C\noalign{
}athbf{d}_1(m_{01},m_{02})\,,\\
\sup\limits_{t\in[t_0,T]}\noalign{
}athbf{d}_1(m_1(t),m_2(t))& \le C\noalign{
}athbf{d}_1(m_{01},m_{02})\,,
\end{split}\end{equation}
where $C$ does not depend on $t_0$, $m_{01}$, $m_{02}$. In particular
\begin{equation}gin{equation*}
\sup\limits_{t\in[0,T]}\sup_{m_1\neq m_2}\left[\left(\noalign{
}athbf{d}_1(m_1,m_2)\right)^{-1}\norm{U(t,\cdot,m_1)-U(t,\cdot,m_2)}_{2+\alpha}\right]\le C\,.
\end{equation*}
\emph{
So, the solution of the Master Equation is Lipschitz continuous in the measure variable. This will be essential in order to prove the $\noalign{
}athcal{C}^1$ character of $U$ with respect to $m$.
}
\begin{equation}gin{proof}
For simplicity, we show the result for $t_0=0$.\\
\textit{First step: An initial estimate.} Thanks to the hypotheses on $H$ and the Lipschitz bound of $u_1$ and $u_2$, \eqref{dopo} implies
\begin{equation}gin{align*}
&\ensuremath{\int_{0}^{t}\int_{\Omega}}f|Du_1-Du_2|^2(m_1(t,dx)+m_2(t,dx))dt\le\\\le C&\ensuremath{\int_{\Omega}} (u_1(0,x)-u_2(0,x))(m_{01}(dx)-m_{02}(dx))\le C\norm{u_1-u_2}\amu\noalign{
}athbf{d}_1(m_{01},m_{02}).
\end{align*}
\textit{Second step: An estimate on $m_1-m_2$}. We call $m:=m_1-m_2$. We take $\phi$ a sufficiently regular function satisfying $aD\phi\cdot\nu=0$, which will be chosen later. By subtracting the weak formulations \eqref{sotis} of $m_1$ and $m_2$ for $s=0$ and for $\phi$ as test function, we obtain
\begin{equation}gin{equation}\lambdabel{immigrato}
\begin{equation}gin{split}
&\ensuremath{\int_{\Omega}}\phi(t,x)m(t,dx)+\ensuremath{\int_{0}^{t}\int_{\Omega}}\left(-\phi_t-\noalign{
}athrm{tr}(a(x)D^2\phi)+H_p(x,Du_1)D\phi\right)m(s,dx)ds+\\+&\ensuremath{\int_{0}^{t}\int_{\Omega}}(H_p(x,Du_1)-H_p(x,Du_2))D\phi\hspace{0.08cm} m_2(s,dx)ds=\ensuremath{\int_{\Omega}}\phi(0,x)(m_{01}(dx)-m_{02}(dx))\,.
\end{split}
\end{equation}
We choose $\phi$ as the solution of \eqref{coglia} related to $u_1$, with terminal condition $\psi\in W^{1,\infty}$. Using the Lipschitz continuity of $H_p$ with respect to $p$, we get
\begin{equation}gin{align*}
\ensuremath{\int_{\Omega}} \psi(x) m(t,dx)\le C\ensuremath{\int_{0}^{t}\int_{\Omega}} |Du_1-Du_2| m_2(s,dx)ds+C\noalign{
}athbf{d}_1(m_{01},m_{02})\hspace{0.08cm},
\end{align*}
since, for Lemma \ref{davverotecnico}, $\phi$ is Lipschitz continuous with a constant bounded uniformly if $\psi$ is $1$-Lipschitz.\\
Now we use the Young's inequality and the first step to obtain
\begin{equation}gin{align*}
\ensuremath{\int_{\Omega}} \psi(x) m(t,dx)\le\hspace{0.08cm}&C\left(\ensuremath{\int_{0}^{t}\int_{\Omega}} |Du_1-Du_2|^2 m_2(s,dx)\right)^\noalign{
}iezz+C\noalign{
}athbf{d}_1(m_{01},m_{02})\le\\\le& \hspace{0.08cm} C\left(\norm{u_1-u_2}\amu^\noalign{
}iezz\noalign{
}athbf{d}_1(m_{01},m_{02})^\noalign{
}iezz+\noalign{
}athbf{d}_1(m_{01},m_{02})\right)\hspace{0.08cm},
\end{align*}
and finally, taking the sup over the $\psi$ $1$-Lipschitz and the over $t\in[0,T]$,
\begin{equation}gin{equation}\lambdabel{secondstep}
\sup\limits_{t\in[0,T]}\noalign{
}athbf{d}_1(m_1(t),m_2(t))\le C\left(\norm{u_1-u_2}\amu^\noalign{
}iezz\noalign{
}athbf{d}_1(m_{01},m_{02})^\noalign{
}iezz+\noalign{
}athbf{d}_1(m_{01},m_{02})\right)\hspace{0.08cm}.
\end{equation}
\textit{Third step: Estimate on $u_1-u_2$ and conclusion.} We call $u:=u_1-u_2$. Then $u$ solves the following equation
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-u_t-\noalign{
}athrm{tr}(a(x)D^2 u)+V(t,x)Du=f(t,x)\\
u(T)=g(x)\\
a(x)Du\cdot\nu_{|\partial\Omega}=0
\end{cases}\hspace{0.08cm},
\end{equation*}
where
\begin{equation}gin{align*}
&V(t,x)=\int_0^1 H_p(x,\lambdambda Du_1(t,x)+(1-\lambdambda)Du_2(t,x)d\lambdambda\hspace{0.08cm};\\
&f(t,x)=\int_0^1\ensuremath{\int_{\Omega}}\dm{F}(x,\lambdambda m_1(t)+(1-\lambdambda)m_2(t),y)(m_1(t,dy)-m_2(t,dy))d\lambdambda\hspace{0.08cm};\\
&g(x)=\int_0^1\ensuremath{\int_{\Omega}}\dm{G}(x,\lambdambda m_1(T)+(1-\lambdambda)m_2(T),y)(m_1(T,dy)-m_2(T,dy))d\lambdambda\hspace{0.08cm}.
\end{align*}
From the regularity of $u_1$ and $u_2$, we have $V(t,\cdot)$ bounded in $\noalign{
}athcal{C}^{\frac{\alpha}{2},\alpha}$. \\
We want to apply \emph{Theorem 5.1.21} of \cite{lunardi}. To do this, we have to estimate
$
\sup\limits_t\norm{f(t,\cdot)}_\alpha
$
First, we call
$$
m_\lambdambda(\cdot):=\lambdambda m_1(\cdot)+(1-\lambdambda)m_2(\cdot)\hspace{0.08cm}.
$$
We get
\begin{equation}gin{align*}
\sup\limits_{t\in[0,T]}\norm{f(t,\cdot)}_{\alpha}&\le\sup\limits_{t\in[0,T]}\int_0^1\norm{D_y\dm{F}(\cdot,m_\lambdambda(t),\cdot)}_{\alpha,\infty}d\lambdambda\,\noalign{
}athbf{d}_1(m_1(t),m_2(t))\\
&\le C\sup\limits_{t\in[0,T]}\noalign{
}athbf{d}_1(m_1(t),m_2(t))\hspace{0.08cm},
\end{align*}
where $C$ depends on the constant $C_F$ in hypotheses \ref{ipotesi}.\\
\begin{equation}gin{comment}
As regards the time estimate, one has
\begin{equation}gin{equation}\begin{equation}gin{split}\lambdabel{nemequittepas}
&\left|f(t,x)-f(s,x)\right|\le\\\le&\left|\int_0^1\ensuremath{\int_{\Omega}}\left(\dm{F}(x,m_\lambdambda(t),y)-\dm{F}(x,m_\lambdambda(s),y)\right)\left(m_1(t,dy)-m_2(t,dy)\right)d\lambdambda\right|+\\+&\left|\int_0^1\ensuremath{\int_{\Omega}}\dm{F}(x,m_\lambdambda(s),y)(m_1(t,dy)-m_1(s,dy)+m_2(s,dy)-m_2(t,dy)d\lambdambda\right|
\end{split}\end{equation}
Thanks to the hypotheses on $F$, the first integral is bounded above by
\begin{equation}gin{align*}
C\int_0^1\noalign{
}athbf{d}_1(m_\lambdambda(t),m_\lambdambda(s))&\noalign{
}athbf{d}_1(m_1(t),m_2(t))d\lambdambda\le\\&\le C\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\int_0^1\noalign{
}athbf{d}_1(m_\lambdambda(t),m_\lambdambda(s))d\lambdambda\hspace{0.08cm}.
\end{align*}
We compute the Wasserstein distance between $m_\lambdambda(t)$ and $m_\lambdambda(s)$. For $\phi$ $1-$Lipschitz
\begin{equation}gin{align*}
&\ensuremath{\int_{\Omega}} \phi(x)(m_\lambdambda(t,dx)-m_\lambdambda(s,dx))=\\=\lambdambda&\ensuremath{\int_{\Omega}} \phi(x)(m_1(t,dx)-m_1(s,dx))\hspace{0.08cm}+\hspace{0.08cm}(1-\lambdambda)\ensuremath{\int_{\Omega}} \phi(x)(m_2(t,dx)-m_2(s,dx))\le\\\le \lambdambda\hspace{0.08cm}&\noalign{
}athbf{d}_1(m_1(t),m_1(s))+(1-\lambdambda)\hspace{0.08cm}\noalign{
}athbf{d}_1(m_2(t),m_2(s))\le C|t-s|^\noalign{
}iezz\le C|t-s|^{\frac{\alpha}{2}},
\end{align*}
where $C$ is changed in the last inequality.\\
So, the first integral in \eqref{nemequittepas} is bounded by
$$
C|t-s|^\frac{\alpha}{2}\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\hspace{0.08cm}.
$$
Now we estimate the second integral. We have
\begin{equation}gin{align*}
\left|\int_0^1\ensuremath{\int_{\Omega}}\dm{F}(x,m_\lambdambda(s),y)(m_1(t,dy)-m_1(s,dy)+m_2(s,dy)-m_2(t,dy)d\lambdambda\right|\le\\\le C\noalign{
}athbf{d}_1(m_1(t),m_1(s))+C\noalign{
}athbf{d}_1(m_2(t),m_2(s))\le C|t-s|^\noalign{
}iezz\hspace{0.08cm}.
\end{align*}
On the other hand, we have
\begin{equation}gin{align*}
\left|\int_0^1\ensuremath{\int_{\Omega}}\dm{F}(x,m_\lambdambda(s),y)(m_1(t,dy)-m_1(s,dy)+m_2(s,dy)-m_2(t,dy)d\lambdambda\right|\le\\\le C\noalign{
}athbf{d}_1(m_1(t),m_2(t))+C\noalign{
}athbf{d}_1(m_1(s),m_2(s))\le C\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\hspace{0.08cm}.
\end{align*}
Now, we use a very easy trick: if $A\le B$ and $A\le C$ it is obvious that
\begin{equation}gin{align*}
A=A^pA^{1-p}\le B^pC^{1-p}\hspace{0.08cm},\hspace{1cm}\forall\ 0\le p\le 1\hspace{0.08cm}.
\end{align*}
So, choosing $p=\alpha$, the second integral in \eqref{nemequittepas} is bounded by
$$
C|t-s|^{\frac{\alpha}2}\left(\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\right)^{1-\frac{\alpha}2}\hspace{0.08cm}.
$$
\end{comment}
In the same way
\begin{equation}gin{align}
\norm{g(\cdot)}_{2+\alpha}\le C\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\,.
\end{align}
So we can apply \emph{Theorem 5.1.21} of \cite{lunardi} and obtain
\begin{equation}gin{equation}\lambdabel{finalcountdown}
\begin{equation}gin{split}
\norm{u_1-u_2}_{1,2+\alpha}&\le C\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\,.
\end{split}
\end{equation}
\begin{equation}gin{comment}
Conversely, \emph{Theorem 5.1.18} of \cite{lunardi} tells us that
\begin{equation}gin{equation}\lambdabel{caniofatone}
\norm{u_1-u_2}\amu\le C\left( \norminf{f}+\norm{g}_{1+\alpha}\right)\le C\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\,.
\end{equation}
\end{comment}
Coming back to \eqref{secondstep}, this implies
\begin{equation}gin{align*}
\sup\limits_{t\in[0,T]}&\noalign{
}athbf{d}_1(m_1(t),m_2(t))\le\\&\le C\left(\left(\sup\limits_{r\in[0,T]}\noalign{
}athbf{d}_1(m_1(r),m_2(r))\right)^{\noalign{
}iezz}\noalign{
}athbf{d}_1(m_{01},m_{02})^\noalign{
}iezz+\noalign{
}athbf{d}_1(m_{01},m_{02})\right)\hspace{0.08cm},
\end{align*}
and, using a generalized Young's inequality, this allows us to conclude:
\begin{equation}gin{align}\lambdabel{oterz}
\sup\limits_{t\in[0,T]}&\noalign{
}athbf{d}_1(m_1(t),m_2(t))\le C\noalign{
}athbf{d}_1(m_{01},m_{02})\,.
\end{align}
Plugging this estimate in \eqref{finalcountdown}, we finally obtain
\begin{equation}gin{align*}
&\norm{u_1-u_2}_{1,2+\alpha}\le C\noalign{
}athbf{d}_1(m_{01},m_{02})\lambdabel{osicond}\,.\\
\end{align*}
\end{proof}
\end{prop}
\section{Linearized system and differentiability of $U$ with respect to the measure}
The proof of existence and uniqueness of solutions for the Master Equation strongly relies on the $\noalign{
}athcal{C}^1$ character of $U$ with respect to $m$.\\
The definition of the derivative $\frac{\deltalta U}{\deltalta m}$ is strictly related to the solution $(v,\noalign{
}u)$ of the following \emph{linearized system}:
\begin{equation}gin{equation}\lambdabel{linDuDm}
\begin{equation}gin{cases}
-v_t-\noalign{
}athrm{tr}(a(x)D^2v)+H_p(x,Du)\cdot Dv=\noalign{
}athlarger{\frac{\deltalta F}{\deltalta m}}(x,m(t))(\noalign{
}u(t))\,,\\
\noalign{
}u_t-\noalign{
}athrm{div}(a(x)D\noalign{
}u)-\noalign{
}athrm{div}(\noalign{
}u(H_p(x,Du)+\tilde{b}))-\noalign{
}athrm{div}(mH_{pp}(x,Du)Dv)=0\,,\\
v(T,x)=\noalign{
}athlarger{\frac{\deltalta G}{\deltalta m}}(x,m(T))(\noalign{
}u(T))\,,\qquad \noalign{
}u(t_0)=\noalign{
}u_0\,,\\
a(x)Dv\cdot\nu_{|\partial\Omega}=0\,,\hspace{1cm}\left(a(x)D\noalign{
}u+\noalign{
}u(H_p(x,Du)+\tilde{b})+mH_{pp}(x,Du)Dv\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation}
where we use the notation
$$
{\dm{F}}(x,m(t))(\rho(t)):=\left\lambdangle{\dm{F}}(x,m(t),\cdot),\rho(t)\right\rangle
$$
and the same for $G$.\\
We want to prove that this system admits a solution and that the following equality holds:
\begin{equation}gin{equation}\lambdabel{reprform}
v(t_0,x)=\left\lambdangle\frac{\deltalta U}{\deltalta m}(t_0,x,m_0,\cdot),\noalign{
}u_0\right\rangle\,.
\end{equation}
First, we have to analyze separately the well-posedness of the Fokker-Planck equation in distribution sense:
\begin{equation}gin{equation}\lambdabel{linfp}
\begin{equation}gin{cases}
\noalign{
}u_t-\noalign{
}athrm{div}(a(x)D\noalign{
}u)-\noalign{
}athrm{div}(\noalign{
}u b)=f\,,\\
\noalign{
}u(0)=\noalign{
}u_0\,,\\
\left(a(x)D\noalign{
}u+\noalign{
}u b\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation}
where $f\in L^1(W^{-1,\infty})$, $\noalign{
}u_0\in\noalign{
}athcal{C}^{-(1+\alpha)}$, $b\in L^\infty$.
A suitable distributional definition of solution is the following:
\begin{equation}gin{defn}\lambdabel{canzonenuova}
Let $f\in L^1(W^{-1,\infty})$, $\noalign{
}u_0\in\noalign{
}athcal{C}^{-(1+\alpha)}$, $b\in L^\infty$. We say that a function $\noalign{
}u\in\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\cap L^1(Q_T)$ is a weak solution of \eqref{linfp} if, for all $\psi\in L^\infty(\Omega)$, $\xi\in\noalign{
}athcal{C}^{1+\alpha,N}$ and $\phi$ solution in $[0,t]\times\Omega$ of the following linear equation
\begin{equation}gin{equation}\lambdabel{hjbfp}
\begin{equation}gin{cases}
-\phi_t-\noalign{
}athrm{div}(aD\phi)+bD\phi=\psi\,,\\
\phi(t)=\xi\,,\\
aD\phi\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation}
the following formulation holds:
\begin{equation}gin{equation}\lambdabel{weakmu}
\lambdangle \noalign{
}u(t),\xi\rangle+\ensuremath{\int_{0}^{t}\int_{\Omega}}\noalign{
}u(s,x)\psi(s,x)\,dxds=\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle+\int_0^t\lambdangle f(s),\phi(s,\cdot) \rangle\,ds\,,
\end{equation}
where $\lambdangle \cdot,\cdot\rangle$ denotes the duality between $\noalign{
}athcal{C}^{-(1+\alpha),N}$ and $\noalign{
}athcal{C}^{1+\alpha,N}$ in the first case, between $\noalign{
}athcal{C}^{-(1+\alpha)}$ and $\noalign{
}athcal{C}^{1+\alpha}$ in the second case and between $W^{-1,\infty}$ and $W^{1,\infty}$ in the last case.
\end{defn}
We note that the definition is well-posed. Actually, $\phi(s,\cdot)$ is in $\noalign{
}athcal{C}^{1+\alpha}$ $\forall s$ thanks to Lemma \ref{sonobravo}, so $ \lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle$ and $\lambdangle f(s),\phi(s,\cdot)\rangle$ are well defined. Moreover, we have
$$
\norm{\phi(s,\cdot)}_{W^{1,\infty}}\le C\,.
$$
Hence, since $f\in L^1(W^{-1,\infty})$, the last integral is well defined too.
\begin{equation}gin{rem} We are mainly interested in a particular case of distribution $f$. If there exists an integrable function $c:[0,T]\times\Omega\to\R^n$ such that $\forall\phi\in W^{1,\infty}$
$$
\lambdangle f(t),\phi\rangle=\ensuremath{\int_{\Omega}} c(t,x)\cdot D\phi(x)\,dx\,,
$$
then we can write the problem \eqref{linfp} in this way:
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
\noalign{
}u_t-\noalign{
}athrm{div}(a(x)D\noalign{
}u)-\noalign{
}athrm{div}(\noalign{
}u b)=\noalign{
}athrm{div}(c)\,,\\
\noalign{
}u(0)=\noalign{
}u_0\,,\\
\left(a(x)D\noalign{
}u+\noalign{
}u b+c\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation*}
writing $f$ like a divergence and adjusting the Neumann condition, in order to make sense out of the integration by parts in the regular case.
In this case, in order to ensure the condition $f\in L^1(W^{-1,\infty})$, we can simply require $c\in L^1(Q_T)$. Actually we have, using Jensen's inequality,
\begin{equation}gin{align*}
\norm{f}_{L^1(W^{-1,\infty})}=\int_0^T\sup\limits_{\norm{\phi}_{W^{1,\infty}}\le 1}\left(\ensuremath{\int_{\Omega}} c(t,x)\cdot D\phi(x)\,dx\right)dt\le C\ensuremath{\int_{0}^{t}\int_{\Omega}}f|c(t,x)|\,dxdt=\norm{c}_{L^1}\,,
\end{align*}
where $|\cdot|$ is any equivalent norm in $\R^d$.
\end{rem}
The next Proposition gives us an exhaustive existence and uniqueness result for \eqref{linfp}.
\begin{equation}gin{prop}\lambdabel{peggiodellagerma}
Let $f\in L^1(W^{-1,\infty})$, $\noalign{
}u_0\in\noalign{
}athcal{C}^{-(1+\alpha)}$, $b\in L^\infty$. Then there exists a unique solution of the Fokker-Planck equation \eqref{linfp}.
This solution satisfies
\begin{equation}gin{equation}\lambdabel{stimefokker}
\sup_t\norm{\noalign{
}u(t)}_{-(1+\alpha),N}+\norm{\noalign{
}u}_{L^p}\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,,
\end{equation}
where $p=\frac{d+2}{d+1+\alpha}\,$.
\begin{equation}gin{comment}
\begin{equation}gin{equation}\lambdabel{acciessmij}
\begin{equation}gin{split}
\sup_t\norm{\noalign{
}u(t)}_{-(1+\alpha),N}+\norm{\noalign{
}u}_{L^p}&\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,,\\
\norm{\noalign{
}u}_{\gammamma,-(1+\alpha),N}&\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^q(W^{-1,\infty})}\right)\,,
\end{split}
\end{equation}
where $\gammamma=\noalign{
}in\left\{\frac{q-1}{q},\frac{\alpha}{2}\,\right\}\,.$
\end{comment}
Finally, the solution is stable: if $\noalign{
}u^n_0\to\noalign{
}u_0$ in $\noalign{
}athcal{C}^{-(1+\alpha)}$, $\{b^n\}_n$ uniformly bounded and $b^n\to b$ in $L^p$ $\forall\,p$, $f^n\to f$ in $L^1(W^{-1,\infty})$, then, calling $\noalign{
}u^n$ and $\noalign{
}u$ the solutions related, respectively, to $(\noalign{
}u^n_0,b^n,f^n)$ and $(\noalign{
}u_0,b,f)$, we have $\noalign{
}u^n\to\noalign{
}u$ in $\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\cap L^p(Q_T)$.
\begin{equation}gin{proof}
For the existence part, we start assuming that $f$, $b$, $\noalign{
}u_0$ are smooth functions, and that $\noalign{
}u_0$ satisfies
\begin{equation}gin{equation}\lambdabel{neumannmu}
\left(a(x)D\noalign{
}u_0+\noalign{
}u_0 b\right)\cdot\nu_{|\partial\Omega}=0\,.
\end{equation}
In this case, we can split the divergence terms in \eqref{linfp} and obtain that $\noalign{
}u$ is a solution of a linear equation with smooth coefficients. So the existence of solutions in this case is a straightforward consequence of the classical results in \cite{lsu}, \cite{lunardi}.
We consider the unique solution $\phi$ of \eqref{hjbfp} with $\psi=0$ and $\xi\in\noalign{
}athcal{C}^{1+\alpha,N}$. Multiplying the equation of $\noalign{
}u$ for $\phi$ and integrating by parts in $[0,t]\times\Omega$ we obtain
\begin{equation}gin{equation}\lambdabel{rhs}
\lambdangle \noalign{
}u(t),\xi\rangle=\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle+\int_0^t\lambdangle f(s),\phi(s,\cdot) \rangle\,ds\,.
\end{equation}
Thanks to Lemma \ref{sonobravo}, we know that
\begin{equation}gin{equation}\lambdabel{upa}
\norm{\phi}\amu\le C\norm{\xi}_{1+\alpha}\,.
\end{equation}
Then the right hand side term of \eqref{rhs} is bounded in this way:
\begin{equation}gin{equation*}
\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle+\int_0^t\lambdangle f(s),\phi(s,\cdot) \rangle\,ds\le C\norm{\xi}_{1+\alpha}\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+
\int_0^t\norm{f(s)}_{W^{1,\infty}}\right)\,.
\end{equation*}
Coming back to \eqref{rhs} and passing to the $sup$ when $\xi\in\noalign{
}athcal{C}^{1+\alpha,N}$, $\norm{\xi}_{1+\alpha}\le 1$, we obtain
\begin{equation}gin{equation}
\sup\limits_t\norm{\noalign{
}u(t)}_{-(1+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,.
\end{equation}
\begin{equation}gin{comment}
For the time estimate, we consider the same $\phi$ and, for $s<t$, we integrate the equation in $[s,t]\times\Omega$, obtaining
$$
\lambdangle\noalign{
}u(t)-\noalign{
}u(s),\xi\rangle=\lambdangle\noalign{
}u(s),\phi(s)-\phi(t)\rangle+\int_s^t\lambdangle f(r),\phi(r,\cdot)\rangle\,dr\,.
$$
The first term in the right-hand side is easily estimated, using \eqref{upa}:
$$
\lambdangle\noalign{
}u(s),\phi(s)-\phi(t)\rangle\le C\norm{\phi(s)-\phi(t)}_{1+\alpha}\norm{\noalign{
}u_0}_{-(1+\alpha)}\le C|t-s|^{\frac{\alpha}2}\norm{\xi}_{1+\alpha}\norm{\noalign{
}u_0}_{-(1+\alpha)}\,.
$$
As regards the last term, we use again \eqref{upa} and Jensen's inequality to obtain
\begin{equation}gin{align*}
&\int_s^t\lambdangle f(r),\phi(r,\cdot)\rangle\,dr\le C\norm{\xi}_{1+\alpha}\int_s^t\norm{f(r)}_{W^{-1,\infty}}\,dr\\
\le C\norm{\xi}_{1+\alpha}(t-s)^{\frac{q-1}{q}}&\left(\int_s^t\norm{f(r)}^q_{W^{-1,\infty}}\right)^{\frac 1q}= C\norm{\xi}_{1+\alpha}(t-s)^{\frac{q-1}{q}}\norm{f}_{L^q(W^{-1,\infty})}\,.
\end{align*}
Putting togethere these estimates and passing to the $sup$ again with $\xi\in\noalign{
}athcal{C}^{1+\alpha,N}$ and $\norm{\xi}_{1+\alpha}\le1$, we obtain
$$
\norm{\noalign{
}u}_{\gammamma,-(1+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^q(W^{-1,\infty})}\right)\,.
$$
\end{comment}
Now we have to prove the $L^p$ estimate. We consider the solution of \eqref{hjbfp} with $t=T$, $\xi=0$ and $\psi\in L^r$, with $r>d+2$ (we recall that in this chapter we call $d$ the dimension of the space.).
Then the Corollary of \emph{Theorem IV.9.1} of \cite{lsu} tells us that
\begin{equation}gin{equation}\lambdabel{napule}
\norm{\phi}_{1-\frac{d+2}{2r},2-\frac{d+2}{r}}\le C\norm{\psi}_{L^r}\,.
\end{equation}
Choosing $r=\frac{d+2}{1-\alpha}$, one has $2-\frac{d+2}{r}=1+\alpha$. Integrating in $[0,T]\times\Omega$ the equation of $\noalign{
}u$ one has
$$
\ensuremath{\int_{0}^{t}\int_{\Omega}}f\noalign{
}u\psi\,dxds=\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle+\int_0^T\lambdangle f(s),\phi(s,\cdot)\rangle\,ds\,.
$$
Thanks to \eqref{napule} we can estimate the terms on the right-hand side and obtain
\begin{equation}gin{equation}\lambdabel{minecessita}
\ensuremath{\int_{0}^{t}\int_{\Omega}}f\noalign{
}u\psi\,dxds\le C\norm{\psi}_{L^r}\left( \norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})} \right)\,.
\end{equation}
Passing to the $sup$ for $\norm{\psi}_{L^r}\le 1$, we finally get
$$
\norm{\noalign{
}u}_{L^p}\le C\left( \norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})} \right)\,,
$$
with $p$ defined as the conjugate exponent of $r$, i.e. $p=\frac{d+2}{d+1+\alpha}$.
This proves estimates \eqref{stimefokker} in the regular case.\\
In the general case, we consider suitable smooth approximations $\noalign{
}u_{0}^k$, $f^k$, $b^k$ converging to $\noalign{
}u_0$, $f$, $b$ respectively in $\noalign{
}athcal{C}^{-(1+\alpha),N}$, $L^1(W^{-1,\infty})$ and $L^q(Q_T)$ $\forall q\ge 1$, with $b_k$ bounded uniformly in $k$ and with $\noalign{
}u_0^k$ satisying \eqref{neumannmu}.
We call $\noalign{
}u^k$ the related solution of \eqref{linfp}. The above convergences tells us that, for a certain $C$,
\begin{equation}gin{align*}
\|{\noalign{
}u^k_0}\|_{-(1+\alpha)}\le C\norm{\noalign{
}u_0}_{-(1+\alpha)}\,,\qquad&\norminf{b_k}\le C\norminf{b}\\
&\|{f^k}\|_{L^1(W^{-1,\infty})}\le C\norm{f}_{L^1(W^{-1,\infty})}\,.
\end{align*}
Then we apply \eqref{stimefokker}, to obtain, uniformly in $k$,
\begin{equation}gin{equation}\lambdabel{blaffoff}
\sup_t\|{\noalign{
}u^k(t)}\|_{-(1+\alpha),N}+\|{\noalign{
}u^k}\|_{L^p}\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,,
\end{equation}
\begin{equation}gin{comment}
\begin{equation}gin{equation*}
\begin{equation}gin{split}
\sup_t\|{\noalign{
}u^k(t)}\|_{-(1+\alpha),N}+\|{\noalign{
}u^k}\|_{L^p}&\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,,\\
\|{\noalign{
}u^k}\|_{\gammamma,-(1+\alpha),N}&\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^q(W^{-1,\infty})}\right)\,.
\end{split}
\end{equation*}
\end{comment}
where $C$ actually depends on $b^k$, but since $b^k\to b$ it is bounded uniformly in $k$.
Moreover, the function $\noalign{
}u^{k,h}:=\noalign{
}u^k-\noalign{
}u^h$ also satisfies \eqref{linfp} with data $b=b^k$,\\$f=f^k-f^h+\noalign{
}athrm{div}(\noalign{
}u^h(b^k-b^h))$, $\noalign{
}u^0=\noalign{
}u^k_0-\noalign{
}u^h_0$. Then estimates \eqref{stimefokker} tell us that
\begin{equation}gin{align*}
&\sup_t\|{\noalign{
}u^{k,h}(t)}\|_{-(1+\alpha),N}\,+\,\|{\noalign{
}u^{k,h}}\|_{L^p}\\\le C&\left(\|{\noalign{
}u^k_0-\noalign{
}u^h_0}\|_{-(1+\alpha)}+\|{f^k-f^h}\|_{L^1(W^{-1,\infty})}+\|{\noalign{
}athrm{div}(\noalign{
}u^h(b^k-b^h))}\|_{L^1(W^{-1,\infty})}\right)\,,
\end{align*}
The first two terms in the right-hand side easily go to $0$ when $h,k\to+\infty$, since $\noalign{
}u^k_0$ and $f^k$ are Cauchy sequences. As regards the last term, calling $p'$ the conjugate exponent of $p$, we have
\begin{equation}gin{align}\lambdabel{luigicoibluejeans}
\|{\noalign{
}athrm{div}(\noalign{
}u^h(b^k-b^h))}\|_{L^1(W^{-1,\infty})}\le C\ensuremath{\int_{0}^{t}\int_{\Omega}}f\left|\noalign{
}u^h(b^k-b^h)\right|\,dxdt\le C\|{b^k-b^h}\|_{L^{p'}}\,,
\end{align}
since $\noalign{
}u^k$ is bounded in $L^p$ by \eqref{blaffoff} (here $C$ depends also on $\noalign{
}u_0$ and $f$). So, also the last term goes to $0$ since $b^k$ is a Cauchy sequence in $L^q$ $\forall q\ge1$.
Hence, $\{\noalign{
}u^k\}_k$ is a Cauchy sequence, and so there exists $\noalign{
}u\in \noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\cap L^p(Q_T)$ such that
\begin{equation}gin{comment}
Then we can use Ascoli-Arzel\`a Theorem, the reflexivity of $L^p$ for $p>1$ and Banach-Alaoglu Theorem in order to obtain the existence of $\noalign{
}u\in\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\cap L^1(Q_T)$ such that
$$
\noalign{
}u^k\to\noalign{
}u\qquad\noalign{
}box{strongly in }\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\,,\noalign{
}box{ weakly in } L^p(Q_T)\,.
$$
\end{comment}
$$
\noalign{
}u^k\to\noalign{
}u\qquad\noalign{
}box{strongly in }\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\,,\noalign{
}box{ strongly in } L^p(Q_T)\,.
$$
Furthermore, $\noalign{
}u$ satisfies \eqref{stimefokker}.
To conclude, we have to prove that $\noalign{
}u$ is actually a solution of \eqref{linfp} in the sense of Definition \ref{canzonenuova}.
We take $\phi$ and $\phi^k$ as the solutions of \eqref{hjbfp} related to $b$ and $b^k$. The weak formulation for $\noalign{
}u^k$ implies that
\begin{equation}gin{equation*}
\lambdangle \noalign{
}u^k(t),\xi\rangle+\ensuremath{\int_{0}^{t}\int_{\Omega}}\noalign{
}u^k(s,x)\psi(s,x)\,dxds=\lambdangle\noalign{
}u_0^k,\phi^k(0,\cdot)\rangle+\int_0^t\lambdangle f^k(s),\phi^k(s,\cdot) \rangle\,ds\,,
\end{equation*}
We can immediately pass to the limit in the left-hand side, using the convergence of $\noalign{
}u^k$ previously obtained.
For the right-hand side, we first need to prove the convergence of $\phi^k$ towards $\phi$. This is immediate: actually, the function $\tilde{\phi}^k:=\phi^k-\phi$ satisfies
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-\tilde{\phi}^k_t-\noalign{
}athrm{div}(aD\tilde{\phi}^k)+b^kD\tilde{\phi}^k=(b^k-b)D\phi\,,\\
\tilde{\phi}^k(t)=0\,,\\
aD\tilde{\phi}^k\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
\end{equation*}
Then, the Corollary of \emph{Theorem IV.9.1} of \cite{lsu} implies, for a certain $q>d+2$ and depending on $\alpha$,
$$
\|{\tilde{\phi}^k}\|\amu\le C\|{(b^k-b)D\phi}\|_{L^q}\to0\,,
$$
since $D\phi$ is bounded in $L^\infty$ using Lemma \ref{sonobravo}.
Hence, $\phi^k\to\phi$ in $\noalign{
}athcal{C}^{\frac{1+\alpha} 2,1+\alpha}$. This allows us to pass to the limit in the right-hand side too and prove that \eqref{weakmu} holds true, and so that $\noalign{
}u$ is a weak solution of \eqref{linfp}. This concludes the existence part.\\
For the uniqueness part, we consider $\noalign{
}u_1$ and $\noalign{
}u_2$ two weak solutions of the system. Then, by linearity, the function $\noalign{
}u:=\noalign{
}u_1-\noalign{
}u_2$ is a weak solution of
$$
\begin{equation}gin{cases}
\noalign{
}u_t-\noalign{
}athrm{div}(a(x)D\noalign{
}u)-\noalign{
}athrm{div}(\noalign{
}u b)=0\,,\\
\noalign{
}u(0)=0\,,\\
\left(a(x)D\noalign{
}u+\noalign{
}u b\right)\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
$$
Hence, the weak estimation \eqref{weakmu} implies, $\forall \psi\in L^\infty$ and $\forall\xi\in\noalign{
}athcal{C}^{1+\alpha,N}$,
$$
\lambdangle \noalign{
}u(t),\xi\rangle+\ensuremath{\int_{0}^{t}\int_{\Omega}}\noalign{
}u(s,x)\psi(s,x)\,dxds=0\,,
$$
which implies $$\norm{\noalign{
}u}_{L^1}=\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(1+\alpha),N}=0$$ and concludes the uniquess part.\\
Finally, the stability part is an easy consequence of the estimates obtained previously. Let $f^n\to f$, $\noalign{
}u^n_0\to\noalign{
}u_0$ and $b^n\to b$. Then the function $\tilde{\noalign{
}u}^n:=\noalign{
}u^n-\noalign{
}u$ satisfies \eqref{linfp} with $b\,,\noalign{
}u_0$ and $f$ replaced by $b^n$, $\noalign{
}u^n_0-\noalign{
}u_0$, $f^n-f+\noalign{
}athrm{div}(\noalign{
}u(b^n-b))$. Then we use \eqref{stimefokker} to obtain
\begin{equation}gin{align*}
&\sup_t\|{\tilde{\noalign{
}u}^n}\|_{-(1+\alpha),N}\,+\,\|{\tilde{\noalign{
}u}^n}\|_{L^p}\\\le C&\left(\|{\noalign{
}u^n_0-\noalign{
}u_0}\|_{-(1+\alpha)}+\|{f^n-f}\|_{L^1(W^{-1,\infty})}+\|{\noalign{
}athrm{div}(\noalign{
}u(b^n-b))}\|_{L^1(W^{-1,\infty})}\right)\,,
\end{align*}
The first two terms in the right-hand side go to $0$. For the last term, the same computations of \eqref{luigicoibluejeans} imply
$$
\|{\noalign{
}athrm{div}(\noalign{
}u(b^n-b))}\|_{L^1(W^{-1,\infty})}\le C\|{b^n-b}\|_{L^{p'}}\to0\,.
$$
Then $\noalign{
}u^n\to\noalign{
}u$ in $\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})\cap L^p(Q_T)$, which concludes the Proposition.
\end{proof}
\end{prop}
The last proposition allows us to get another regularity result of $\noalign{
}u$, when the data $b$ is more regular. This result will be essential in order to improve the regularity of $\dm{U}$ with respect to $y$.
\begin{equation}gin{cor}
Let $\noalign{
}u_0\in\noalign{
}athcal{C}^{-(1+\alpha)}$, $f\in L^1(W^{-1,\infty})$, $b\in\noalign{
}athcal{C}^{\frac\alpha 2,\alpha}$. Then the unique solution $\noalign{
}u$ of \eqref{linfp} satisfies
\begin{equation}gin{equation}\lambdabel{forsemisalvo}
\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(2+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,.
\end{equation}
\begin{equation}gin{proof}
We take $\phi$ as the solution of \eqref{hjbfp}, with $\xi\in C^{2+\alpha,N}(\Omega)$ and $\psi=0$. Then we know from the classical results of \cite{lsu}, \cite{lunardi} (it is important here that $b\in\noalign{
}athcal{C}^{\frac\alpha2,\alpha}$), that
$$
\norm{\phi}\amd\le C\norm{\xi}_{2+\alpha}\,.
$$
The weak formulation of $\noalign{
}u$ \eqref{weakmu} tells us that
$$
\lambdangle\noalign{
}u(t),\xi\rangle=\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle+\int_0^T\lambdangle f(s),\phi(s,\cdot)\rangle\,ds\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\norm{\xi}_{2+\alpha}\,.
$$
Hence, we can pass to the $sup$ for $\xi\in\noalign{
}athcal{C}^{2+\alpha,N}$ with $\norm{\xi}_{2+\alpha}\le 1$ and obtain \eqref{forsemisalvo}.
\end{proof}
\begin{equation}gin{rem}
We stress the fact that \emph{we shall not formulate problem \eqref{linfp} directly with $\noalign{
}u_0\in\noalign{
}athcal{C}^{-(2+\alpha)}$.} Actually, the core of the existence theorem is the $L^p$ bound in space-time of $\noalign{
}u$, and this is obtained by duality, considering test functions $\phi$ with data $\psi\in L^r$. For this function it is not guaranteed that $\phi(0,\cdot)\in\noalign{
}athcal{C}^{2+\alpha}(\Omega)$, and an estimation like \eqref{minecessita} is no longer possible.
\end{rem}
\end{cor}
We can also obtain some useful estimates for the density function $m$, as stated in the next result.
\begin{equation}gin{cor}\lambdabel{samestrategies}
Let $(u,m)$ be the solution of the MFG system defined in \eqref{meanfieldgames}-\eqref{fame}. Then we have $m\in L^p(Q_T)$ for $p=\frac{d+2}{d+1+\alpha}$, with
\begin{equation}gin{equation}\lambdabel{mlp}
\norm{m}_{L^p}\le C\norm{m_0}_{-(1+\alpha)}\,.
\end{equation}
Furthermore, if $(u_1,m_1)$ and $(u_2,m_2)$ are two solutions of \eqref{meanfieldgames}-\eqref{fame} with initial conditions $m_{01}$ and $m_{02}$, then we have
\begin{equation}gin{equation}\lambdabel{m12p}
\norm{m_1-m_2}_{L^p(Q_T)}\le C\noalign{
}athbf{d}_1(m_{01},m_{02})\,.
\end{equation}
\begin{equation}gin{proof}
Since $m$ satisfies \eqref{linfp} with $\noalign{
}u=m_0\in\noalign{
}athcal{P}(\Omega)\subset\noalign{
}athcal{C}^{-(1+\alpha)}$, $b=H_p(x,Du)+\tilde{b}\in L^\infty$ and $f=0$, inequality \eqref{mlp} comes from Proposition \ref{peggiodellagerma}.
For the second inequality, we consider $m:=m_1-m_2$. Then $m$ solves the equation
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
m_t-\noalign{
}athrm{div}(aDm)-\noalign{
}athrm{div}(m(H_p(x,Du_1)+\tilde{b}))=\noalign{
}athrm{div}(m_2(H_p(x,Du_2)-H_p(x,Du_1)))\,,\\
m(t_0)=m_{01}-m_{02}\,,\\
\left[aDm+m\tilde{b}+m_1H_p(x,Du_1)-m_2H_p(x,Du_2)\right]\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation*}
i.e. $m$ is a solution of \eqref{linfp} with $f=\noalign{
}athrm{div}(m_2(H_p(x,Du_2)-H_p(x,Du_1)))$, $\noalign{
}u_0=m_{01}-m_{02}$, $b=H_p(x,Du_1)$. Then estimations \eqref{stimefokker} imply
$$
\norm{m_1-m_2}_{L^p(Q_T)}\le C\left(\norm{\noalign{
}u_0}_{-(1+\alpha)}+\norm{f}_{L^1(W^{-1,\infty})}\right)\,.
$$
We estimate the right-hand side term. As regards $\noalign{
}u_0$ we have
$$
\norm{\noalign{
}u_0}_{-(1+\alpha)}=\sup\limits_{\norm{\phi}_{1+\alpha}\le 1}\ensuremath{\int_{\Omega}} \phi(x)(m_{01}-m_{02})(dx)\le C\noalign{
}athbf{d}_1(m_{01},m_{02})\,.
$$
For the $f$ term we argue in the following way:
\begin{equation}gin{align*}
\norm{f}_{L^1(W^{-1,\infty})}&=\int_0^T\sup\limits_{\norm{\phi}_{W^{1,\infty}}\le 1}\left(\ensuremath{\int_{\Omega}} H_p(x,Du_2)-H_p(x,Du_1)D\phi\,m_2(t,dx)\right)\,dt\\&\le C\norm{u_1-u_2}\amu\le C\noalign{
}athbf{d}_1(m_{01},m_{02})\,,
\end{align*}
which allows us to conclude.
\end{proof}
\end{cor}
In order to prove the representation formula \eqref{reprform}, we need to obtain some estimates for a more general linearized system of the form
\begin{equation}gin{equation}\lambdabel{linear}
\begin{equation}gin{cases}
-z_t-\noalign{
}athrm{tr}(a(x)D^2z)+H_p(x,Du)Dz=\noalign{
}athlarger{\dm{F}}(x,m(t))(\rho(t))+h(t,x)\,,\\
\rho_t-\noalign{
}athrm{div}(a(x)D\rho)-\noalign{
}athrm{div}(\rho(H_p(x,Du)+\tilde{b}))-\noalign{
}athrm{div}(m H_{pp}(x,Du) Dz+c)=0\,,\\
z(T,x)=\noalign{
}athlarger{\dm{G}}(x,m(T))(\rho(T))+z_T(x)\,,\qquad\rho(t_0)=\rho_0\,,\\
a(x)Dz\cdot\nu_{|\partial\Omega}=0\,,\quad\left(a(x)D\rho+\rho(H_p(x,Du)+\tilde{b})+mH_{pp}(x,Du) Dz+c\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation}
where we require
$$
z_T\in\noalign{
}athcal{C}^{2+\alpha},\quad\rho_0\in\noalign{
}athcal{C}^{-(1+\alpha)},\quad h\in \noalign{
}athcal{C}^{0,\alpha}([t_0,T]\times\Omega),\quad c\in L^1([t_0,T]\times\Omega)\,.
$$
Moreover, $z_T$ satisfies
\begin{equation}gin{equation}\lambdabel{neumannzT}
aDz_T\cdot\nu_{|\partial\Omega}=0\,.
\end{equation}
A suitable definition of solution for this system is the following:
\begin{equation}gin{defn}\lambdabel{defn}
\begin{equation}gin{comment}
Let $\noalign{
}athcal{C}^{-(2+\alpha),N}(\Omega))$ the dual space of $\{\phi\in\noalign{
}athcal{C}^{2+\alpha}(\Omega)\noalign{
}box{ s.t. }a(x)D\phi(x)\cdot\nu(x)=0\ \forall x\in\partial\Omega\}$, endowed with the norm
$$
\norm{\rho}_{-(2+\alpha),N}=\sup\limits_{\substack{\norm{\phi}_{2+\alpha}\le 1\\aD\phi\cdot\nu_{|\partial\Omega}=0}}\lambdangle \rho,\phi \rangle\,.
$$
\end{comment}
We say that a couple $(z,\rho)\in\noalign{
}athcal{C}^{1,2+\alpha}\times\,\left(\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N}(\Omega))\cap L^1(Q_T)\right)$ is a solution of the equation \eqref{linear} if
\begin{equation}gin{itemize}
\item $z$ is a classical solution of the linear equation;
\item $\rho$ is a distributional solution of the Fokker-Planck equation in the sense of Definition \ref{canzonenuova}.
\end{itemize}
\end{defn}
We start with the following existence result.
\begin{equation}gin{prop}\lambdabel{linearD}
Let hypotheses \ref{ipotesi} hold for $0<\alpha<1$. Then there exists a unique solution $(z,\rho)\in\noalign{
}athcal{C}^{1,2+\alpha}\times\,\left(\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N}(\Omega))\cap L^1(Q_T)\right)$ of system \eqref{linear}. This solution satisfies, for a certain $p>1$,
\begin{equation}gin{equation}
\begin{equation}gin{split}\lambdabel{stimelin}
\norm{z}_{1,2+\alpha}+\sup\limits_t\norm{\rho(t)}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le CM\hspace{0.08cm},
\end{split}
\end{equation}
where $C$ depends on $H$ and where $M$ is given by
\begin{equation}gin{equation}\lambdabel{emme}
M:=\norm{z_T}_{2+\alpha}+\norm{\rho_0}_{-(1+\alpha)}+\norm{h}_{0,\alpha}+\norm{c}_{L^1}\,.
\end{equation}
\begin{equation}gin{proof}
As always, we can assume $t_0=0$ without loss of generality.
The main idea is to apply Schaefer's Theorem.\\
\emph{Step 1: Definition of the map $\noalign{
}athbf{\Phi}$ satisfying Schaefer's Theorem}.
We set $X:=\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^{-(1+\alpha),N})$, endowed with the norm
\begin{equation}gin{equation*}
\norm{\phi}_X:=\sup\limits_{t\in[0,T]}\norm{\phi(t)}_{-(1+\alpha),N}\,.
\end{equation*}
For $\rho\in X$, we consider the classical solution $z$ of the following equation
\begin{equation}gin{equation}
\lambdabel{zlin}
\begin{equation}gin{cases}
-z_t-\tr{z}+H_p(x,Du)Dz=\noalign{
}athlarger{\dm{F}}(x,m(t))(\rho(t))+h(t,x)\,,\\
z(T)=\noalign{
}athlarger{\dm{G}}(x,m(T))(\rho(T))+z_T\,,\\
a(x)Dz\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
\end{equation}
We note that, from Hypotheses \ref{ipotesi}, we have
$$
\lambdangle a(x)D_xG(x,m), \nu(x)\rangle_{|\partial\Omega}=0\quad\forall m\in\noalign{
}athcal{P}(\Omega)\implies\left\lambdangle a(x)D_x\dm{G}(x,m(T))(\noalign{
}u(T)), \nu(x)\right\rangle_{|\partial\Omega}\!\!\!\!\!\!\!=0\,.
$$
Hence, compatibility conditions are satisfied for equation \eqref{zlin} and, from \emph{Theorem 5.1.21} of \cite{lunardi}, $z$ satisfies
\begin{equation}gin{equation}\lambdabel{stimz}
\begin{equation}gin{split}
\norm{z}_{1,2+\alpha}&\le C\left(\norm{z_T}_{2+\alpha}+\sup\limits_{t\in[0,T]}\norm{\rho(t)}_{-(2+\alpha),N}+\norm{h}_{0,\alpha}\right)\\&\le C\left(M+\sup\limits_{t\in[0,T]}\norm{\rho(t)}_{-(1+\alpha),N}\right)\,,
\end{split}
\end{equation}
where we also use hypothesis $(vi)$ of \ref{ipotesi}, for the boundary condition of $\dm{F}$.
Then we define $\noalign{
}athbf{\Phi}(\rho):=\tilde{\rho}$, where $\tilde{\rho}$ is the solution in the sense of Definition \ref{canzonenuova} to:
\begin{equation}gin{equation}
\lambdabel{plin}
\begin{equation}gin{cases}
\tilde{\rho}_t-\noalign{
}athrm{div}(a(x)D\tilde{\rho})-\noalign{
}athrm{div}(\tilde{\rho} (H_p(x,Du)+\tilde{b}))-\noalign{
}athrm{div}(mH_{pp}(x,Du) Dz+c)=0\\
\tilde{\rho}(0)=\rho_0\\
\left(a(x)D\tilde{\rho}+\tilde{\rho}(H_p(x,Du)+\tilde{b})+mH_{pp}(x,Du) Dz+c\right)\cdot\nu_{|\partial\Omega}=0
\end{cases}\hspace{0.08cm}.
\end{equation}
Thanks to Proposition \ref{peggiodellagerma}, we have $\tilde{\rho}\in X$. We want to prove that the map $\noalign{
}athbf{\Phi}$ is continuous and compact.\\
For the compactness, let $\{\rho_n\}_n\subset X$ be a subsequence with $\norm{\rho_n}_X\le{C}$ for a certain $C>0$. We consider for each $n$ the solutions $z_n$ and $\tilde{\rho}_n$ of \eqref{zlin} and \eqref{plin} associated to $\tilde{\rho}_n$.\\
Using \eqref{stimz}, we have $\norm{z_n}_{1,2+\alpha}\le C_1$, where $C_1$ depends on $C$. Then, thanks to Ascoli-Arzel\`a's Theorem, and using also \eqref{precisissimongulaeva}, $\exists z$ s.t. $z_n\to z$ up to subsequences at least in $\noalign{
}athcal{C}([0,T];\noalign{
}athcal{C}^1(\Omega))$.
Using the pointwise convergence of $Dz_n$ and the $L^p$ boundedness of $m$ stated in \eqref{mlp}, we immediately obtain
$$
mH_{pp}(x,Du)Dz_n\,+\,c\to mH_{pp}(x,Du)Dz\,+\,c\qquad\noalign{
}box{in }L^1(Q_T)\,,
$$
which immediately implies
$$
\noalign{
}athrm{div}(mH_{pp}(x,Du)Dz_n\,+\,c)\to\noalign{
}athrm{div}{(mH_{pp}(x,Du)Dz\,+\,c)}\qquad\noalign{
}box{in }L^1(W^{-1,\infty})\,.
$$
Hence, stability results proved in Proposition \ref{peggiodellagerma} proves that $\tilde{\rho}_n\to\tilde{\rho}$ in $X$, where $\tilde{\rho}$ is the solution related to $Dz$. This proves the compactness result.
The continuity of $\Phi$ can be proved used the same computations of the compactness.
Finally, in order to apply Schaefer's theorem, we have to prove that
$$
\exists M>0 \noalign{
}box{ s.t. } \rho=\sigma\noalign{
}athbf{\Phi}(\rho)\ \noalign{
}box{ and }\sigma\in[0,1]\implies\norm{\rho}_X\le M\hspace{0.08cm}.
$$
We will prove in the next step that, if $\rho=\sigma\noalign{
}athbf{\Phi}(\rho)$, then the couple $(z,\rho)$ satisfies \eqref{stimelin}. This allows us to apply Schaefer's theorem and also gives us the desired estimate \eqref{stimelin}, since each solution $(z,\rho)$ of the system satisfies $\rho=\sigma\noalign{
}athbf{\Phi}(\rho)$ with $\sigma=1$.\\
\emph{Step 2: Estimate of $\rho$ and $z$}. Let $(\rho,\sigma)\in X\times[0,1]$ such that $\rho=\sigma\noalign{
}athbf{\Phi}(\rho)$. Then the couple $(z,\rho)$ satisfies
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-z_t-\noalign{
}athrm{tr}(a(x)D^2z)+H_p(x,Du)Dz=\noalign{
}athlarger{\dm{F}}(x,m(t))(\rho(t))+h(t,x)\\
\rho_t-\noalign{
}athrm{div}(a(x)D\rho)-\noalign{
}athrm{div}(\rho(H_p(x,Du)+\tilde{b}))-\sigma\noalign{
}athrm{div}(mH_{pp}(x,Du) Dz+c)=0\\
z(T,x)=\noalign{
}athlarger{\dm{G}}(x,m(T))(\rho(T))+z_T(x)\hspace{2cm}\rho(0)=\sigma\rho_0\\
a(x)Dz\cdot\nu_{|\partial\Omega}=0\hspace{1cm}\left(a(x)D\rho+\rho(H_p(x,Du)+\tilde{b})+\sigma(mH_{pp}(x,Du) Dz+c)\right)\cdot\nu_{|\partial\Omega}=0
\end{cases}\hspace{0.08cm}.
\end{equation*}
We want to use $z$ as test function for the equation of $\rho$. This is allowed since $z$ satisfies \eqref{hjbfp} with
\begin{equation}gin{align*}
\psi=\dm{F}(x,m(t))(\rho(t))+h(t,x)\in L^\infty(\Omega)\,,\qquad\xi=\dm{G}(x,m(T))(\rho(T))+z_T(x)\in\noalign{
}athcal{C}^{1+\alpha,N}
\end{align*}
We obtain from the weak formulation of $\rho$:
\begin{equation}gin{equation*}
\begin{equation}gin{split}
&\ensuremath{\int_{\Omega}} \left(\rho(T,x)z(T,x)-\sigma\rho_0(x)z(0,x)\right)dx=-\sigma\ensuremath{\int_{0}^{t}\int_{\Omega}}f\lambdangle c,Dz\rangle dxdt+\\
-&\ensuremath{\int_{0}^{t}\int_{\Omega}}f\rho(t,x)\left(\dm{F}(x,m(t))(\rho(t))+h\right)dxdt-\sigma\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du) Dz,Dz\rangle\hspace{0.08cm} dxdt\,.
\end{split}
\end{equation*}
Using the terminal condition of $z$ and the monotonicity of $F$ and $G$, we get a first estimate:
\begin{equation}gin{equation}\lambdabel{stimasigma}
\begin{equation}gin{split}
\sigma\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du) Dz,Dz\rangle\hspace{0.08cm} dxdt
\le&\sup\limits_{t\in[0,T]}\norm{\rho(t)}_{-(2+\alpha),N}\norm{z_T}_{2+\alpha}+\norm{\rho}_{L^p}\norminf{h}\\
+&\norm{z}_{1,2+\alpha}\left(\norm{\rho_0}_{-(2+\alpha),N}+\norm{c}_{L^1}\right)\\\le\,&M \left(\sup\limits_{t\in[0,T]}\norm{\rho(t)}_{-(1+\alpha),N}+\norm{\rho}_{L^1}+\norm{z}_{1,2+\alpha}\right)\,.
\end{split}
\end{equation}
We already know an initial estimate on $z$ in \eqref{stimz}. Now we need to estimate $\rho$.
Using \eqref{stimefokker} we obtain
\begin{equation}gin{equation}\lambdabel{duality}
\sup\limits_{t\in[0,T]}\norm{\rho}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le C\left(\norm{\sigma mH_{pp}(x,Du)Dz}_{L^1}+\norm{c}_{L^1}+\norm{\rho_0}_{-(1+\alpha)}\right)
\end{equation}
As regards the first term in the right hand side, we can use H\"{o}lder's inequality and \eqref{stimasigma} to obtain
\begin{equation}gin{align*}
&\norm{mH_{pp}(x,Du)Dz}_{L^1}=\sigma\sup\limits_{\substack{\norminf{\phi}\le 1\\\phi\in L^\infty(Q_T;\R^d)}}\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du)Dz,\phi\rangle\,dxdt\\
&\le\sigma\left(\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du) Dz,Dz\rangle\hspace{0.08cm} dxdt\right)^\noalign{
}iezz\left(\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du)\phi,\phi\rangle\hspace{0.08cm} dxdt\right)^\noalign{
}iezz\\
&\le
M^\noalign{
}iezz\left(\sup\limits_{t\in[0,T]}\norm{\rho(t)}^\noalign{
}iezz_{-(1+\alpha),N}+\norm{\rho}^\noalign{
}iezz_{L^1}+\norm{z}^\noalign{
}iezz_{1,2+\alpha}\right)\,
\end{align*}
Putting these estimates into \eqref{duality} we obtain
\begin{equation}gin{align*}
\sup\limits_{t\in[0,T]}\norm{\rho}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le C\left( M+M^\noalign{
}iezz\left(\sup\limits_{t\in[0,T]}\norm{\rho(t)}^\noalign{
}iezz_{-(1+\alpha),N}+\norm{\rho}^\noalign{
}iezz_{L^1}+\norm{z}^\noalign{
}iezz_{1,2+\alpha}\right)\right)\,.
\end{align*}
Using a generalized Young's inequality with suitable coefficients, we get
\begin{equation}gin{align}\lambdabel{stimarho}
\sup\limits_{t\in[0,T]}\norm{\rho}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le C\left(M+M^\noalign{
}iezz\norm{z}_{1,2+\alpha}^\noalign{
}iezz\right)\hspace{0.08cm}.
\end{align}
This gives us an initial estimate for $\rho$, depending on the estimate of $z$.
Coming back to \eqref{stimz}, \eqref{stimarho} implies
\begin{equation}gin{align*}
\norm{z}_{1,2+\alpha}\le C\left(M+M^\noalign{
}iezz\norm{z}_{1,2+\alpha}^\noalign{
}iezz\right)\hspace{0.08cm}.
\end{align*}
Using a generalized Young's inequality with suitable coefficients, this implies
$$
\norm{z}_{1,2+\alpha}\le Cm\,.
$$
Plugging this estimate in \eqref{stimarho}, we finally obtain
\begin{equation}gin{align*}
\norm{z}_{1,2+\alpha}+\sup\limits_{t\in[0,T]}\norm{\rho}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le CM\hspace{0.08cm}.
\end{align*}
This concludes the existence result.\\\\
\emph{Step 3. Uniqueness}. Let $(z_1,\rho_1)$ and $(z_2,\rho_2)$ be two solutions of \eqref{linear}. Then the couple $(z,\rho):=(z_1-z_2,\rho_1-\rho_2)$ satisfies the following linear system:
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-z_t-\noalign{
}athrm{tr}(a(x)D^2z)+H_p(x,Du)Dz=\noalign{
}athlarger{\dm{F}}(x,m(t))(\rho(t))=0\,,\\
\rho_t-\noalign{
}athrm{div}(a(x)D\rho)-\noalign{
}athrm{div}(\rho(H_p(x,Du)+\tilde{b}))-\noalign{
}athrm{div}(m H_{pp}(x,Du) Dz)=0\,,\\
z(T,x)=\noalign{
}athlarger{\dm{G}}(x,m(T))(\rho(T))\,,\qquad\rho(t_0)=0\,,\\
a(x)Dz\cdot\nu_{|\partial\Omega}=0\,,\quad\left(a(x)D\rho+\rho(H_p(x,Du)+\tilde{b})+mH_{pp}(x,Du) Dz\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation*}
i.e., a system of the form \eqref{linear} with $h=c=z_T=\rho_0=0$. Then estimation \eqref{stimelin} tells us that
$$
\norm{z}_{1,2+\alpha}+\sup\limits_{t\in[0,T]}\norm{\rho}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le 0\hspace{0.08cm}.
$$
and so $z=0$, $\rho=0$. This concludes the Proposition.
\end{proof}
\end{prop}
\begin{equation}gin{comment}
Once proved the existence result of \eqref{linear}, and consequently of \eqref{linDuDm}, we can obtain a different kind of regularity, that will be essential in order to prove the Lipschitz estimate of $\dm{U}$.
\begin{equation}gin{cor}
Suppose hypotheses \eqref{ipotesi} be satisfied. Then, if $(z,\rho)$ is a solution of \eqref{linear} and $c\in L^1(Q_T)$, we have
\begin{equation}gin{equation}\lambdabel{stimauno}
\norm{z}\amu+\sup\limits_{t\in[t_0,T]}\norm{\rho(t)}_{-1}\le C\left(\norm{z_T}_{1+\alpha}+\sup\limits_{t\in[t_0,T]}\norm{b(t,\cdot)}_1+\norm{c}_{L^1}+\norm{\rho_0}_{-1}\right)\,,
\end{equation}
where $\norm{c}_{L^1}=\int_{t_0}^T\norm{c(t)}_0 dt$, and $\norm{c(t)}_0$ is defined in this way:
$$
\norm{c(t)}_{0}:=\sup\limits_{\norminf{\phi}\le 1}\lambdangle c(t),\phi\rangle\,.
$$
Moreover, if $(v,\noalign{
}u)$ is a solution of \eqref{linDuDm}, we have $\forall t\in(t_0,T]$
\begin{equation}gin{equation}\lambdabel{stimadue}
\norm{\noalign{
}u(t)}_{L^1(\Omega)}\le \frac{C}{\sqrt{t-t_0}}\norm{\noalign{
}u_0}_{-1}\,.
\end{equation}
\begin{equation}gin{proof}
We suppose, without loss of generality, $t_0=0$.
We start from \eqref{stimauno}.
From now on, we call for simplicity
$$
R:=\norm{z_T}_{1+\alpha}+\sup\limits_{t\in[t_0,T]}\norm{b(t,\cdot)}_1+\norm{c}_{L^1}+\norm{\rho_0}_{-1}\,.
$$
Thanks to \emph{Theorem 5.1.18} of \cite{lunardi}, we have
$$
\norm{z}\amu\le C\left( \norm{z_T}_{1+\alpha}+\sup\limits_t\norm{\rho(t)}_{-1}+\norminf{b} \right)\le C\left(R+\sup\limits_t\norm{\rho(t)}_{-1}\right)\,.
$$
We have to estimate $\sup\limits_{t}\norm{\rho(t)}_{-1}$. First, the duality computation \eqref{duality} with $\sigma=1$ implies in this case
$$
\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du) Dz,Dz\rangle\, dxdt\le R\left(\sup\limits_t\norm{\rho(t)}_{-1}+\norm{z}\amu\right)\,.
$$
In order to estimate $\sup\limits_{t}\norm{\rho(t)}_{-1}$, we consider the solution $w$ of \eqref{eqw} with $\xi$ a Lipschitz function.
Then $w$ is a Lipschitz function with respect to $x$ and we have
$$
\sup\limits_t\norm{w(t,\cdot)}_1\le C\norm{\xi}_1\,.
$$
Equation \eqref{sonno} with $\sigma=1$ implies here, with the same techniques of the previous Theorem,
$$
\ensuremath{\int_{\Omega}}\rho(t)\xi\,dx\le C\left(R+R^\noalign{
}iezz(\sup\limits_t\norm{\rho(t)}_{-1}^\noalign{
}iezz+\norm{z}\amu^\noalign{
}iezz)\right)\norm{\xi}_1\,.
$$
Then, passing to the sup when $\norm{\xi}_1\le 1$ and using the estimate on $\norm{z}\amu$ and a generalized Young's inequality, we easily obtain \eqref{stimauno}.
Now, we consider a solution $(v,\noalign{
}u)$ of \eqref{linDuDm}. Since here $b=c=z_T=0$, we have $R=\norm{\noalign{
}u_0}_{-1}$.
Thanks to \eqref{stimauno} we have
\begin{equation}gin{equation}\lambdabel{corri}
\norm{v}\amu\le C\norm{\noalign{
}u_0}_{-1}\,.
\end{equation}
Taking $w$ as solution of \eqref{eqw} with $\xi\in L^\infty(\Omega)$, we have
$$
\norm{w(s,\cdot)}_1\le C\sqrt{t-s}\norminf{\xi}
$$
Moreover, \eqref{porretta} and \eqref{corri} easily imply
$$
\intm{t}m|Dz|^2\,dxds\le C\norm{\noalign{
}u_0}_{-1}^2\,,\qquad\intm{t}m|Dw|^2\, dxds\le C\norminf{\xi}^2\,.
$$
Then, from \eqref{sonno} we easily obtain
$$
\ensuremath{\int_{\Omega}}\xi\noalign{
}u(t,dx)\le C\norminf{\xi}\norm{\noalign{
}u_0}_{-1}+\frac1{\sqrt{t}}\norminf{\xi}\,.
$$
Passing to the sup when $\norminf{\xi}\le 1$, we obtain \eqref{stimadue}.
\end{proof}
\end{cor}
Now we come back to system \eqref{linDuDm}.
Since this system is a particular case of \eqref{linear}, with $b=c=z_T=0$, existence and uniqueness of solutions $(v,\noalign{
}u)$ in the space $\noalign{
}athcal{C}([t_0,T];\noalign{
}athcal{C}^{2+\alpha}\times\noalign{
}athcal{C}^{-\alpha})$ is already proved in Proposition \ref{linearD}.
But, in order to work with the Master Equation, we need further estimates for this system in the space $\noalign{
}athcal{C}^{-(2+\alpha),N}$, i.e. the dual space of $\{\phi\in\noalign{
}athcal{C}^{2+\alpha}(\Omega)\noalign{
}box{ s.t. }a(x)D\phi(x)\cdot\nu(x)=0\ \forall x\in\partial\Omega\}$, endowed with the norm
The following result gives us these estimates.
\begin{equation}gin{prop}\lambdabel{linearM}
The unique solution $(v,\noalign{
}u)$ of the problem \eqref{linDuDm} satisfies
\begin{equation}gin{equation}\lambdabel{stimelin0}
\norm{v}\amd +\norm{\noalign{
}u}_{\frac{\alpha}{2},-(2+\alpha),N}\le C\norm{\noalign{
}u_0}_{-(2+\alpha),N}\,.
\end{equation}
\begin{equation}gin{proof}
As always, we can assume $t_0=0$ without loss of generality.
We take the couple $(v,\noalign{
}u)$ solution of \eqref{linDuDm} in $\noalign{
}athcal{C}([t_0,T];\noalign{
}athcal{C}^{2+\alpha}\times\noalign{
}athcal{C}^{-\alpha})$.
Since $\{\phi\in\noalign{
}athcal{C}^{2+\alpha}(\Omega)\noalign{
}box{ s.t. }a(x)D\phi(x)\cdot\nu(x)=0\ \forall x\in\partial\Omega\}\subsetneq\noalign{
}athcal{C}^\alpha$, passing to the dual spaces we have
$$
\noalign{
}athcal{C}^{-\alpha}\subset\noalign{
}athcal{C}^{-(2+\alpha),N}\,.
$$
Hence, $(v,\noalign{
}u)$ belongs to the space $\noalign{
}athcal{C}([t_0,T];\noalign{
}athcal{C}^{2+\alpha}\times\noalign{
}athcal{C}^{-(2+\alpha),N})$.\\
In order to obtain \eqref{stimelin0}, we have to readapt the computations developed in Proposition \ref{linearD} in this framework.
From \emph{Theorem IV.5.3} of \cite{lsu}, $v$ satisfies
$$
\norm{v}\amd\le C\left(\norm{v(T)}_{2+\alpha}+\norm{\dm{F}(x,m(t))(\noalign{
}u(t))}\am\right)\,,
$$
and so, using the regularity of $F$ and $G$,
\begin{equation}gin{equation}\lambdabel{stimv}
\norm{v}\amd\le C\norm{\noalign{
}u}_{\frac\alpha 2,-(2+\alpha),N}\,.
\end{equation}
We stress the fact that, in order to obtain \eqref{stimv}, we strongly need the boundary conditions $(vi)$ of Hypotheses \eqref{ipotesi}.
The duality argument between $v$ and $\noalign{
}u$ tells us that
\begin{equation}gin{equation*}
\begin{equation}gin{split}
&\ensuremath{\int_{\Omega}} \left(\noalign{
}u(T,x)v(T,x)-\noalign{
}u_0(x)v(0,x)\right)dx=
-\ensuremath{\int_{0}^{t}\int_{\Omega}}f\noalign{
}u(t,x)\dm{F}(x,m(t))(\noalign{
}u(t))\,dxdt\\-&\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Dv) Dv,Dv\rangle\hspace{0.08cm} dxdt\hspace{0.08cm}.
\end{split}
\end{equation*}
Using the terminal condition of $v$ and the monotonicity of $F$ and $G$, we get this initial estimate:
\begin{equation}gin{equation}\lambdabel{stimasigma0}
\begin{equation}gin{split}
\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du) Dv,Dv\rangle\hspace{0.08cm} dxdt\le \norm{\noalign{
}u_0}_{-(2+\alpha),N}\norm{v}\amd
\end{split}
\end{equation}
We already know an initial estimate on $v$ in \eqref{stimv}. Now we need to estimate $\noalign{
}u$, and we will do it by duality. Let $t\in(0,T]$, $\xi\in\noalign{
}athcal{C}^{2+\alpha}$ such that $a(x)D\xi\cdot\nu_{|\partial\Omega}=0$ and let $\phi$ be the solution to the backward equation
\begin{equation}gin{equation}\lambdabel{eqw0}
\begin{equation}gin{cases}
-w_t-\tr{w}+H_p(x,Du)Dw=0\,,\hspace{2cm}\noalign{
}box{ in }[0,t]\times\Omega\,,\\
w(t)=\xi\,,\\
\bdone{w}\,.
\end{cases}\hspace{0.08cm}.
\end{equation}
Thanks to \emph{Theorem 5.1.20} of \cite{lunardi}, we have
\begin{equation}gin{equation}\lambdabel{sphi}
\norm{w}\amd\le C\norm{\xi}_{2+\alpha}\,.
\end{equation}
By duality, we obtain
\begin{equation}gin{equation}\lambdabel{sonno0}
\begin{equation}gin{split}
\ensuremath{\int_{\Omega}} \noalign{
}u(t)\xi\hspace{0.08cm} dx-\ensuremath{\int_{\Omega}} \noalign{
}u_0 w(0)\hspace{0.08cm} dx=-\intm{t}m\lambdangle H_{pp}(x,Du) Dv,Dw\rangle\hspace{0.08cm} dxds\,.
\end{split}
\end{equation}
The last term in the left hand side is easily bounded by
$$
C\norm{\noalign{
}u_0}_{-(2+\alpha),N}\norm{w}\amd\le C\norm{\noalign{
}u_0}_{-(2+\alpha),N}\norm{\xi}_{2+\alpha}\,.
$$
As regards the term in the right hand side, we can use H\"{o}lder's inequality and \eqref{stimasigma0} to bound the integral by
\begin{equation}gin{align*}
&\left(\intm{t}m\lambdangle H_{pp}(x,Du) Dv,Dv\rangle\hspace{0.08cm} dxds\right)^\noalign{
}iezz\left(\intm{t}m\lambdangle H_{pp}(x,Du) Dw,Dw\rangle\hspace{0.08cm} dxds\right)^\noalign{
}iezz\hspace{0.08cm}\le\\\le\hspace{0.08cm}&C\norm{\xi}_{2+\alpha}\sigma\norm{\noalign{
}u_0}_{-(2+\alpha),N}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\,.
\end{align*}
Taking the $\sup$ with $\norm{\xi}_{2+\alpha}\le1$, we get
\begin{equation}gin{align*}
\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(2+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha),N}+\norm{\noalign{
}u_0}_{-(2+\alpha),N}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\right)\,.
\end{align*}
This gives us an initial space estimate for $\noalign{
}u$, depending on $v$.\\
As regards the time estimate, we argue in a similar way.
Given $t\in(0,T]$, we take $w$ as in \eqref{eqw0}. For $s<t$, we use $w$ in duality with $\noalign{
}u$ in $(s,t)\times\Omega$:
\begin{equation}gin{align*}
\ensuremath{\int_{\Omega}} (\noalign{
}u(t)-\noalign{
}u(s))\xi\hspace{0.08cm} dx=\ensuremath{\int_{\Omega}}\noalign{
}u(s)(w(s)-w(t))\,+&\int_{s}^{t}\ensuremath{\int_{\Omega}}(\noalign{
}u w)_t dxd\tau\le\\
\le\norm{w(s)-w(t)}_{2+\alpha}\norm{\noalign{
}u(s)}_{-(2+\alpha),N}-&\int_s^t\ensuremath{\int_{\Omega}} m H_{pp}(x,Du) DvDw\hspace{0.08cm} dxd\tau\,.
\end{align*}
The first term, thanks to \eqref{sphi}, is bounded by
\begin{equation}gin{align*}
C|t-s|^{\frac{\alpha}2}\norm{\xi}_{2+\alpha}\left(\norm{\noalign{
}u_0}_{-(2+\alpha),N}+\norm{\noalign{
}u_0}_{-(2+\alpha),N}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\right)\,.
\end{align*}
For the second term we argue as in the estimation of $sup\norm{\noalign{
}u(t)}$:
\begin{equation}gin{align*}
&\qquad\int_s^t\ensuremath{\int_{\Omega}} m H_{pp}(x,Du) DvDw\hspace{0.08cm} dxd\tau\le\\&\le \left(\ensuremath{\int_{0}^{t}\int_{\Omega}}f m H_{pp}(x,Du) DvDv\right)^\noalign{
}iezz\left(\int_s^t\ensuremath{\int_{\Omega}} m H_{pp}(x,Du) DwDw\right)^\noalign{
}iezz\le\\&\le\hspace{0.08cm} C\norm{\xi}_{2+\alpha}|t-s|^\noalign{
}iezz\norm{\noalign{
}u_0}_{-(2+\alpha)}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\,.
\end{align*}
Putting these inequalities together and passing to the $sup$ when $\norm{\xi}_{2+\alpha}\le 1$, we obtain
\begin{equation}gin{align*}
\norm{\noalign{
}u(t)-\noalign{
}u(s)}_{-(2+\alpha),N}\le C|t-s|^\noalign{
}iezz\left(\norm{\noalign{
}u_0}_{-(2+\alpha),N}+\norm{\noalign{
}u_0}_{-(2+\alpha)}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\right)\,,
\end{align*}
and so
\begin{equation}gin{align}
\lambdabel{stimamu}
\norm{\noalign{
}u}_{\frac\alpha 2,-(2+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha),N}+\norm{\noalign{
}u_0}_{-(2+\alpha),N}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\right)\,.
\end{align}
Coming back to \eqref{stimv}, \eqref{stimamu} implies
\begin{equation}gin{align*}
\norm{v}\amd\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha),N}+\norm{\noalign{
}u_0}_{-(2+\alpha),N}^\noalign{
}iezz\norm{v}\amd^\noalign{
}iezz\right)\,.
\end{align*}
Using a generalized Young's inequality with suitable coefficients, this implies
\begin{equation}gin{align}\lambdabel{finalmente0}
\norm{v}\amd+\norm{\noalign{
}u}_{\frac\alpha 2,-(2+\alpha),N}\le C\norm{\noalign{
}u_0}_{-(2+\alpha),N}\hspace{0.08cm}.
\end{align}
This concludes the proof.
\end{proof}
\end{prop}
\end{comment}
We are ready to prove that \eqref{linDuDm} has a fundamental solution. This solution will be the desired derivative $\dm{U}$.
\begin{equation}gin{prop}
Equation \eqref{linDuDm} has a fundamental solution, i.e. there exists a function $K:[0,T]\times\Omega\times\noalign{
}athcal{P}(\Omega)\times\Omega\to\R$ such that, for any $(t_0,m_0,\noalign{
}u_0)$ we have
\begin{equation}gin{equation}\lambdabel{repres}
v(t_0,x)=_{-(1+\alpha)}\!\!\lambdangle \noalign{
}u_0,K(t_0,x,m_0,\cdot)\rangle_{1+\alpha}
\end{equation}
Moreover, $K(t_0,\cdot,m_0,\cdot)\in\noalign{
}athcal{C}^{2+\alpha}(\Omega)\times \noalign{
}athcal{C}^{1+\alpha}(\Omega)$ with
\begin{equation}gin{equation}\lambdabel{kappa}
\sup\limits_{(t,m)\in[0,T]\times\noalign{
}athcal{P}(\Omega)}\norm{K(t,\cdot,m,\cdot)}_{2+\alpha,1+\alpha}\le C\,,
\end{equation}
and the second derivatives w.r.t. $x$ and the first derivatives w.r.t. $y$ are continuous in all variables.
\begin{equation}gin{proof}
From now on, we indicate with $v(t,x;\noalign{
}u_0)$ the solution of the first equation of \eqref{linDuDm} related to $\noalign{
}u_0$.
We start considering, for $y\in\Omega$, $\noalign{
}u_0=\deltalta_y$, the Dirac function at $y$. We define
$$
K(t_0,x,m_0,y)=v(t_0,x;\deltalta_y)
$$
Thanks to \eqref{stimelin}, one immediately knows that $K$ is twice differentiable w.r.t. $x$ and
$$
\norm{K(t_0,\cdot,m_0,y)}_{2+\alpha}\le C\norm{\deltalta_y}_{-(1+\alpha)}=C
$$
Moreover, we can use the linearity of the system \eqref{linear} to obtain
$$
\frac{K(t_0,x,m_0,y+he_j)-K(t_0,x,m_0,y)}h=v(t_0,x;\Delta_{h,j}\deltalta_{y})\,,
$$
where $\Delta_{h,j}\deltalta_{y}=\frac1h(\deltalta_{y+he_j}-\deltalta_y)$. Using stability results for \eqref{linDuDm}, proved previously, we can pass to the limit and find that
$$
\frac{\partial K}{\partial y_j}(t_0,x,m_0,y)=v(t_0,x;-\partial_{y_j}\deltalta_y)\,,
$$
where the derivative of the Dirac delta function is in the sense of distribution.
Since $\partial_{y_i}\deltalta_y$ is bounded in $\noalign{
}athcal{C}^{-(1+\alpha)}$ for all $i,j$, from \eqref{stimelin} we deduce that the second derivatives of $K$ with respect to $x$ are well defined and bounded.
The representation formula \eqref{repres} is an immediate consequence of the linear character of the equation and of the density of the set generated by the Dirac functions. This concludes the proof.
\end{proof}
\end{prop}
Now we are ready to prove the differentiability of the function $U$ with respect to the measure $m$.
In particular, we want to prove that this fundamental solution $K$ is actually the derivative of $U$ with respect to the measure.
\begin{equation}gin{thm}
Let $(u_1,m_1)$ and $(u_2,m_2)$ be two solutions of the Mean Field Games system \eqref{meanfieldgames}-\eqref{fame}, associated with the starting initial conditions $(t_0,m_0^1)$ and $(t_0,m_0^2)$.
Let $(v,\noalign{
}u)$ be the solution of the linearized system \eqref{linDuDm} related to $(u_2,m_2)$, with initial condition $(t_0,m_0^1-m_0^2)$. Then we have
\begin{equation}gin{equation}\lambdabel{boundmder}
\norm{u_1-u_2-v}_{1,2+\alpha}+\sup\limits_{t\in[0,T]}\norm{m_1(t)-m_2(t)-\noalign{
}u(t)}_{-(1+\alpha),N}\le C\noalign{
}athbf{d}_1(m_0^1,m_0^2)^{2}\,,
\end{equation}
Consequently, the function $U$ defined in \eqref{U} is differentiable with respect to $m$.
\begin{equation}gin{proof}
We call $(z,\rho)=(u_1-u_2-v,m_1-m_2-\noalign{
}u)$. Then $(z,\rho)$ satisfies
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-z_t-\noalign{
}athrm{tr}(a(x)D^2z)+H_p(x,Du_2)Dz=\noalign{
}athlarger{\dm{F}}(x,m_2(t))(\rho(t))+h(t,x)\,,\\
\rho_t-\noalign{
}athrm{div}(a(x)D\rho)-\noalign{
}athrm{div}(\rho(H_p(x,Du_2)+\tilde{b}))-\noalign{
}athrm{div}(m H_{pp}(x,Du_2) Dz+c)=0\,,\\
z(T,x)=\noalign{
}athlarger{\dm{G}}(x,m_2(T))(\rho(T))+z_T(x)\,,\qquad\rho(t_0)=0,,\\
a(x)Dz\cdot\nu_{|\partial\Omega}=0\,,\quad\left(a(x)D\rho+\rho(H_p(x,Du)+\tilde{b})+mH_{pp}(x,Du) Dz+c\right)\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation*}
\begin{equation}gin{align*}
&h(t,x)=h_1(t,x)+h_2(t,x)\,,\\
&h_1=-\int_0^1 (H_p(x,sDu_1+(1-s)Du_2)-H_p(x,Du_2))\cdot D(u_1-u_2)\,ds\,,\\
&h_2=\int_0^1\!\ensuremath{\int_{\Omega}}\left(\dm{F}(x,sm_1(t)+(1-s)m_2(t),y)-\dm{F}(x,m_2(t),y)\right)(m_1(t)-m_2(t))(dy)ds,\\
&c(t)=c_1(t)+c_2(t)\,,\\
&c_1(t)=(m_1(t)-m_2(t))H_{pp}(x,Du_2)(Du_1-Du_2)\,,\\
&c_2(t)=m_1\int_0^1\left(H_{pp}(x,sDu_1+(1-s)Du_2)-H_{pp}(x,Du_2)\right)(Du_1-Du_2)\,ds\,,\\
&z_T=\int_0^1\ensuremath{\int_{\Omega}}\left(\dm{G}(x,sm_1(T)+(1-s)m_2(T),y)\right.\\
&\left.\hspace{6cm}-\dm{G}(x,m_2(T),y)\right)(m_1(T)-m_2(T))(dy)ds\,.
\end{align*}
So, \eqref{stimelin} implies that
\begin{equation}gin{equation}\lambdabel{rogueuno}
\norm{u_1-u_2-v}_{1,2+\alpha}+\sup\limits_{t\in[0,T]}\norm{m_1(t)-m_2(t)-\noalign{
}u(t)}_{-(1+\alpha),N}\le C\left(\norm{h}_{0,\alpha}+\norm{c}_{L^1}+\norm{z_T}_{2+\alpha}\right)\,.
\end{equation}
Now we bound the right-hand side term in order to obtain \eqref{boundmder}.
We start with the term $h=h_1+h_2$. We can write
$$
h_1=-\int_0^1\int_0^1 s\, \lambdangle H_{pp}(x,rsDu_1+(1-rs)Du_2)\,(Du_1-Du_2)\,,\, (Du_1-Du_2)\rangle\,drds\,.
$$
Using the properties of H\"{o}lder norm and \eqref{lipsch}, it is immediate to obtain
\begin{equation}gin{align*}
\norm{h_1}_{0,\alpha}\le C\norm{D(u_1-u_2)}_{0,\alpha}^2\le C\noalign{
}athbf{d}_1(m_{0}^1,m_{0}^2)^2\,.
\end{align*}
As regards the $h_2$ term, we can immediately bound the quantity
$$
|h_2(t,x)-h_2(t,y)|
$$
by
\begin{equation}gin{align*}
|x-y|^\alpha\noalign{
}athbf{d}_1(m_1(t),m_2(t))\int_0^1\norm{D_m F(\cdot,sm_1(t)+(1-s)m_2(t),\cdot)-D_m F(\cdot,m_2(t),\cdot)}_{\alpha,\infty}ds\,.
\end{align*}
Using the regularity of $F$ and \eqref{lipsch}, we get
$$
\norm{h_2}_{0,\alpha}=\sup\limits_{t\in[0,T]}\norm{h_2(t,\cdot)}_\alpha\le C\noalign{
}athbf{d}_1(m_0^1,m_0^2)^2\,.
$$
\begin{equation}gin{comment}
For the time regularity, we use the notation $m_{1+s}(t)=sm_1(t)+(1-s)m_2(t)$. Then we have
\begin{equation}gin{align*}
|b(t,x)-&b(s,x)|\le\int_0^1\ensuremath{\int_{\Omega}}\left(\dm{F}(m_{1+s}(t))-\dm{F}(m_{1+s}(r))\right)(m_1(t)-m_2(t))(dy)ds\\
+&\int_0^1\ensuremath{\int_{\Omega}} \left(\dm{F}(m_2(r))-\dm{F}(m_2(t))\right)(m_1(t)-m_2(t))(dy)ds\\+&\int_0^1\ensuremath{\int_{\Omega}}\left(\dm{F}(m_s(r))-\dm{F}(m_2(r))\right)(m_1(t)-m_1(r)+m_2(r)-m_2(t))(dy)ds\,.
\end{align*}
Arguing as in the proof of the H\"{o}lder regularity of $U$, we obtain
$$
|b(t,x)-b(s,x)|\le C|t-s|^{\frac{\alpha}{2}}\noalign{
}athbf{d}_1(m_{01},m_{02})^{2-2\alpha}
$$
This means that
$$
\norm{b_2}\am\le C\noalign{
}athbf{d}_1(m_0^1,m_0^2)^{}\implies\norm{b}\am\le C\noalign{
}athbf{d}_1(m_0^1,m_0^2)^{2-2\alpha}\,.
$$
\end{comment}
A similar estimate holds for the function $z_T$. As regards the function $c$, we have
\begin{equation}gin{align*}
\norm{c_1}_{L^1}=\ensuremath{\int_{0}^{t}\int_{\Omega}}f H_{pp}(x,Du_2)(Du_1-Du_2)(m_1(t,dx)-m_2(t,dx))\,dt\\ \le C\norm{u_1-u_2}_{1,2+\alpha}\noalign{
}athbf{d}_1(m_1(t),m_2(t))\le C\noalign{
}athbf{d}_1(m_0^1,m_0^2)^{2}\,,
\end{align*}
and, using the notation $u_{1+s}:=sDu_1+(1-s)Du_2$,
\begin{equation}gin{align*}
\norm{c_2}_{L^1}=&\int_0^1\ensuremath{\int_{0}^{t}\int_{\Omega}}f \left(H_{pp}(x,Du_{1+s})-H_{pp}(x,Du_2)\right)(Du_1-Du_2)m_1(t,dx)\,dtds\\
\le&\,C\norminf{Du_1-Du_2}^2\le C\noalign{
}athbf{d}_1(m_0^1,m_0^2)^2\,.
\end{align*}
Substituting these estimates in \eqref{rogueuno}, we obtain \eqref{boundmder} and we conclude the proof.
\end{proof}
\end{thm}
Since
$$
v(t_0,x)=\ensuremath{\int_{\Omega}} K(t_0,x,m_{02},y)(m_{01}(dy)-m_{02}(dy))\,,
$$
equation \eqref{boundmder} implies
$$
\norminf{U(t_0,\cdot,m_{01})-U(t_0,\cdot,m_{02})-\ensuremath{\int_{\Omega}} K(t_0,\cdot,m_{02},y)(m_{01}-m_{02})(dy) }\le C\noalign{
}athbf{d}_1(m_{01},m_{02})^{2}.
$$
As a straightforward consequence, we have that $U$ is differentiable with respect to $m$ and
$$
\dm{U}(t,x,m,y)=K(t,x,m,y)\,.
$$
Consequently, using \eqref{kappa} we obtain
\begin{equation}gin{equation}\lambdabel{regdu}
\sup\limits_t\norm{\dm{U}(t,\cdot,m,\cdot)}_{2+\alpha,1+\alpha}\le C\,.
\end{equation}
But, in order to make sense to equation \eqref{Master}, we need at least that $\dm{U}$ is almost everywhere twice differentiable with respect to $y$.
To do that, we need to improve the estimates \eqref{stimelin} for a couple $(v,\noalign{
}u)$ solution of \eqref{linDuDm}.
\begin{equation}gin{prop}
Let $\noalign{
}u_0\in\noalign{
}athcal{C}^{-(1+\alpha)}$. Then the unique solution $(v,\noalign{
}u)$ satisfies
\begin{equation}gin{equation}\lambdabel{sbrigati}
\norm{v}_{1,2+\alpha}+\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(2+\alpha),N}\le C\norm{\noalign{
}u_0}_{-(2+\alpha)}\,.
\end{equation}
\begin{equation}gin{proof}
We consider the solution $(v,\noalign{
}u)$ obtained in Proposition \ref{linearD}. Since $\noalign{
}u$ satisfies $\noalign{
}u=\sigma\Phi(\noalign{
}u)$ with $\sigma=1$, we can use \eqref{stimz} with $z_T=h=0$ and obtain
\begin{equation}gin{equation}\lambdabel{cumnupnat}
\norm{v}_{1,2+\alpha}\le C\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(2+\alpha),N}\,.
\end{equation}
We want to estimate the right-hand side. Using \eqref{forsemisalvo} we have
\begin{equation}gin{equation}\lambdabel{ngroc}
\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(2+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha)}+\norm{ mH_{pp}(x,Du)Dv}_{L^1}\right)\,.
\end{equation}
The last term is estimated, as in Proposition \ref{linearD}, by
\begin{equation}gin{equation}\lambdabel{mannaggia}
\norm{\sigma mH_{pp}(x,Du)Dv}_{L^1}\le C\left(\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du)Dv,Dv\rangle\,dxdt\right)^\noalign{
}iezz\,.
\end{equation}
The right-hand side term can be bounded using \eqref{stimasigma} with $h=z_T=c=0$:
\begin{equation}gin{equation}\lambdabel{crist}
\begin{equation}gin{split}
\ensuremath{\int_{0}^{t}\int_{\Omega}}f m\lambdangle H_{pp}(x,Du) Dv,Dv\rangle\hspace{0.08cm} dxdt
\le\norm{v}_{1,2+\alpha}\norm{\noalign{
}u_0}_{-(2+\alpha)}\,.
\end{split}
\end{equation}
Hence, plugging estimates \eqref{mannaggia} and \eqref{crist} into \eqref{ngroc} we obtain
\begin{equation}gin{equation}\lambdabel{probbiatottquanta}
\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(2+\alpha),N}\le C\left(\norm{\noalign{
}u_0}_{-(2+\alpha)}+\norm{v}_{1,2+\alpha}^\noalign{
}iezz\norm{\noalign{
}u_0}_{-(2+\alpha),N}^\noalign{
}iezz\right)\,.
\end{equation}
Coming back to \eqref{cumnupnat} and using a generalized Young's inequality, we get
$$
\norm{v}_{1,2+\alpha}\le C\norm{\noalign{
}u_0}_{-(2+\alpha)}\,,
$$
and finally, substituting the last estimate into \eqref{probbiatottquanta}, we obtain \eqref{sbrigati} and we conclude.
\end{proof}
\end{prop}
As an immediate Corollary, we get the desired estimate for $\dm{U}$.
\begin{equation}gin{cor}
Suppose hypotheses \ref{ipotesi} satisfied. Then the derivative $\dm{U}$ is twice differentiable with respect to $y$, together with its first and second derivatives with respect to $x$, and the following estimate hold:
\begin{equation}gin{equation}\lambdabel{lentezza}
\norm{\dm{U}(t,\cdot,m,\cdot)}_{2+\alpha,2+\alpha}\le C\,.
\end{equation}
\begin{equation}gin{proof}
We want to prove that, $\forall\,i,j$, the incremental ratio
\begin{equation}gin{equation}\lambdabel{alotteriarubabbeh}
R^h_{i,j}(x,y):=\frac{\partial_{y_i}\dm{U}(t_0,x,m_0,y+he_j)-\partial_{y_i}\dm{U}(t_0,x,m_0,y)}{h}
\end{equation}
is a Cauchy sequence for $h\to0\,$ together with its first and second derivatives with respect to $x$. Then we have to estimate, for $h,k>0$, the quantity $\left|D^l_xR^h_{i,j}(x,y)-D^l_xR^k_{i,j}(x,y)\right|\,,$ for $|l|\le 2$.
We already know that
$$
\partial_{y_i}\dm{U}(t_0,x,m_0,y)=v(t_0,x;-\partial_{y_i}\deltalta_y)\,.
$$
Using the linearity of the system \eqref{linDuDm}, we obtain that
$$
\left|D^l_xR^h_{i,j}(x,y)-D^l_xR^k_{i,j}(x,y)\right|=D^l_xv\left(t_0,x;\Delta_h^j(-\partial_{y_i}\deltalta_y)-\Delta_k^j(-\partial_{y_i}\deltalta_y)\right)\,,
$$
where $\Delta_h^j(-\partial_{y_i}\deltalta_y)=-\frac1h(\partial_{y_i}\deltalta_{y+he_j}-\partial_{y_i}\deltalta_y)\,.$
Hence, estimate \eqref{sbrigati} and Lagrange's Theorem implies
\begin{equation}gin{align*}
&\left|D^l_xR^h_{i,j}(x,y)-D^l_xR^k_{i,j}(x,y)\right|\le C\norm{\Delta_h^j(-\partial_{y_i}\deltalta_y)-\Delta_k^j(-\partial_{y_i}\deltalta_y)}_{-(2+\alpha)}\\&=\sup\limits_{\norm{\phi}_{2+\alpha}\le 1}\left(\frac{\partial_{y_i}\phi(y+he_j)-\partial_{y_i}\phi(y)}{h}-\frac{\partial_{y_i}\phi(y+ke_j)-\partial_{y_i}\phi(y)}{k}\right)\\&=\sup\limits_{\norm{\phi}_{2+\alpha}\le 1}\left(\partial^2_{y_iy_j}\phi(y_{\phi,h})-\partial^2_{y_iy_j}\phi(y_{\phi,k})\right)\le\sup\limits_{\norm{\phi}_{2+\alpha}\le 1}|y_{\phi,h}-y_{\phi,k}|^\alpha\le |h|^\alpha+|k|^\alpha\,,
\end{align*}
for a certain $y_{\phi,h}$ in the line segment between $y$ and $y+he_j$ and $y_{\phi,k}$ in the line segment between $y$ and $y+ke_j$.
Since the last term goes to $0$ when $h,k\to0$, we have proved that the incremental ratio \eqref{alotteriarubabbeh} and its first and second derivative w.r.t $x$ are Cauchy sequences in $h$, and so converging when $h\to0$. This proves that $D^l_x\dm{U}$ is twice differentiable with respect to $y$, for all $0\le|l|\le2\,$.
In order to show the H\"{o}lder bound for $\dm{U}$ w.r.t. $y$, we consider $y,y'\in\Omega$ and we consider the function
$$
R^h_{i,j}(x,y)-R^h_{i,j}(x,y')\,.
$$
Then we know from the linearity of \eqref{linDuDm}
$$
R^h_{i,j}(x,y)-R^h_{i,j}(x,y')=v(t_0,x;\Delta^j_h(-\partial_{y_i}\deltalta_y)-\Delta^j_h(-\partial_{y_i}\deltalta_{y'}))\,,
$$
and so, using \eqref{sbrigati} and
$$
\norm{R^h_{i,j}(\cdot,y)-R^h_{i,j}(\cdot,y')}_{2+\alpha}\le C\norm{\Delta^j_h(-\partial_{y_i}\deltalta_y)-\Delta^j_h(-\partial_{y_i}\deltalta_{y'})}_{-(2+\alpha)}\,.
$$
Now we pass to the limit when $h\to0$.
It is immediate to prove that
$$
\Delta^j_h(-\partial_{y_i}\deltalta_y)-\Delta^j_h(-\partial_{y_i}\deltalta_{y'})\overset{h\to0}{\longrightarrow}\partial_{y_j}\partial_{y_i}\deltalta_y-\partial_{y_j}\partial_{y_i}\deltalta_{y'}\qquad\noalign{
}box{in }\noalign{
}athcal{C}^{-(2+\alpha)}\,.
$$
Since $D^l_xR^h_{i,j}(x,y)\to \partial^2_{y_iy_j}D^l_x\dm{U}(x,y)$ for all $|l|\le2$, we can use Ascoli-Arzel\`a to obtain that
$$
\norm{\partial^2_{y_iy_j}\dm{U}(t,\cdot,m,y)-\partial^2_{y_iy_j}\dm{U}(t,\cdot,m,y')}_{2+\alpha}\le C\norm{\partial_{y_j}\partial_{y_i}\deltalta_y-\partial_{y_j}\partial_{y_i}\deltalta_{y'}}_{-(2+\alpha)}\le C|y-y'|^\alpha\,,
$$
which proves \eqref{lentezza} and concludes the proof.
\end{proof}
\end{cor}
We conclude this part with a last property on the derivative $D_mU$, which will be essential in order to prove the uniqueness of solutions for the Master Equation.
\begin{equation}gin{cor}\lambdabel{delarue}
The function $U$ satisfies the following Neumann boundary conditions:
\begin{equation}gin{equation*}
\begin{equation}gin{split}
&a(x)D_x\dm{U}(t,x,m,y)\cdot\nu(x)=0\,,\qquad\forall x\in\partial\Omega, y\in\Omega,t\in[0,T],m\in\noalign{
}athcal{P}(\Omega)\,,\\
&a(y)D_mU(t,x,m,y)\,\,\,\,\cdot\nu(y)=0\,,\qquad\forall x\in\Omega, y\in\partial\Omega,t\in[0,T],m\in\noalign{
}athcal{P}(\Omega)\,.
\end{split}
\end{equation*}
\begin{equation}gin{proof}
Since $\dm{U}(t_0,x,m_0,y)=v(t_0,x)$, where $(v,\noalign{
}u)$ is the solution of \eqref{linDuDm} with $\noalign{
}u_0=\deltalta_y$, the first condition is immediate because of the Neumann condition of \eqref{linDuDm}.
For the second condition, we consider $y\in\partial\Omega$ and we take
$$
\noalign{
}u_0=-\partial_w(\deltalta_y)\,,\qquad\noalign{
}box{with }w=a(y)\nu(y)\,.
$$
\begin{equation}gin{comment}
Then we consider the unique solution $(v,\noalign{
}u)$ of \eqref{linDuDm}. The estimate \eqref{stimelin} tells us that
$$
\norm{v}_{1,2+\alpha}+\sup\limits_{t\in[0,T]}\norm{\noalign{
}u(t)}_{-(1+\alpha),N}+\norm{\rho}_{L^p}\le C\norm{-\partial_w\deltalta_y}_{-(1+\alpha)}.
$$
We compute the right-hand side term. For a function $\varphi\in \noalign{
}athcal{C}^{2+\alpha}(\Omega)$ such that $a(x)D\varphi(x)\cdot\nu(x)_{|\partial\Omega}=0$ we have
$$
\lambdangle -\partial_w\deltalta_y,\varphi\rangle=\lambdangle\deltalta_y,\partial_w\varphi\rangle=a(y)D\varphi(y)\cdot\nu(y)=0\,.
$$
Hence $\norm{-\partial_t\deltalta_y}_{-(2+\alpha),N}=0$, which implies
\begin{equation}gin{equation}\lambdabel{bastavaquesto}
v=0\,,\qquad\lambdangle\noalign{
}u,\varphi\rangle=0\quad\forall\varphi\in\noalign{
}athcal{C}^{2+\alpha} \noalign{
}box{ s.t. }aD\varphi\cdot\nu_{|\partial\Omega}=0\,.
\end{equation}.
\end{comment}
We want to prove that $(v,\noalign{
}u)=(0,\noalign{
}u)$ is a solution of \eqref{linDuDm} with $\noalign{
}u_0=-\partial_w\deltalta_y$, where $\noalign{
}u$ is the unique solution in the sense of Definition \ref{canzonenuova} of
$$
\begin{equation}gin{cases}
\noalign{
}u_t-\noalign{
}athrm{div}(a(x)D\noalign{
}u)-\noalign{
}athrm{div}(\noalign{
}u (H_p(x,Du)+\tilde{b}))=0\,,\\
\noalign{
}u(t_0)=\noalign{
}u_0\,,\\
\left(a(x)D\noalign{
}u+\noalign{
}u (H_p(x,Du)+\tilde{b})\right)\cdot\nu_{|\partial\Omega}=0\,.
\end{cases}
$$
We only have to check that, if $\noalign{
}u$ is a solution of this equation, then $v=0$ solves
\begin{equation}gin{equation}\lambdabel{muovt}
\begin{equation}gin{cases}
-v_t-\noalign{
}athrm{tr}(a(x)D^2v)+H_p(x,Du)\cdot Dv=\noalign{
}athlarger{\frac{\deltalta F}{\deltalta m}}(x,m(t))(\noalign{
}u(t))\,,\\
v(T,x)=\noalign{
}athlarger{\frac{\deltalta G}{\deltalta m}}(x,m(T))(\noalign{
}u(T))\,,\\
a(x)Dv\cdot\nu_{|\partial\Omega}=0\,,
\end{cases}
\end{equation}
which reduces to prove that
$$
\dm{F}(x,m(t))(\noalign{
}u(t))=\dm{G}(x,m(T))(\noalign{
}u(T))=0\,.
$$
\begin{equation}gin{comment}
This is already proved in \eqref{bastavaquesto}, since $\dm{F}(x,m(t),\cdot)$ and $\dm{G}(x,m(T),\cdot)$ satisfy the condition $aD\varphi\cdot\nu_{|\partial\Omega}=0$ and are in $\noalign{
}athcal{C}^{2+\alpha}$, thanks to hypotheses $(iv)$, $(v)$ and $(vi)$ of \ref{ipotesi}.
\end{comment}
We will give a direct proof.
Choosing a test function $\phi(t,y)$ satisfying \eqref{hjbfp}, with $\psi(t,y)=0$ and $\xi(y)=\dm{F}(x,m(t),y)$, we have from boundary conditions of $\dm{F}$ that $\phi$ is a $\noalign{
}athcal{C}^{\frac{1+\alpha}{2},1+\alpha}$ function satisfying Neumann boundary conditions.
It follows from the weak formulation of $\noalign{
}u$ that
$$
\dm{F}(x,m(t))(\noalign{
}u(t))=\lambdangle \noalign{
}u(t),\dm{F}(x,m(t),\cdot)\rangle=\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle=0\,,
$$
since $aD\phi\cdot\nu_{|\partial\Omega}=0$ and
$$
\lambdangle\noalign{
}u_0,\phi(0,\cdot)\rangle=\lambdangle -\partial_w\deltalta_y,\phi(0,\cdot)\rangle=a(y)D\phi(0,y)\cdot\nu(y)=0\,.
$$
Same computations hold for $\dm{G}$, proving that $v=0$ satisfies \eqref{muovt}.
Then we can easily conclude:
\begin{equation}gin{align*}
a(y)D_mU(t_0,x,m_0,y)\cdot\nu(y)&=D_y\dm{U}(t_0,x,m_0,y)\cdot w\\&=\left\lambdangle\dm{U}(t_0,x,m_0,\cdot),\noalign{
}u_0\right\rangle=v(t_0,x)=0\,.
\end{align*}
\end{proof}
\end{cor}
\section{Solvability of the first-order Master Equation}
The $\noalign{
}athcal{C}^1$ character of $U$ with respect to $m$ is crucial in order to prove the main theorem of this chapter.
\begin{equation}gin{proof}[Proof of Theorem \ref{settepuntouno}]
We start from the existence part.\\
\emph{Existence}. We start assuming that $m_0$ is a smooth and positive function satisfying \eqref{neumannmzero}, and we consider $(u,m)$ the solution of $MFG$ system starting from $m_0$ at time $t_0$. Then
$$
\partial_t U(t_0,x,m_0)
$$
can be computed as the sum of the two limits:
$$
\lim\limits_{h\to0} \frac{U(t_0+h,x,m_0)-U(t_0+h,x,m(t_0+h))}{h}
$$
and
$$
\lim\limits_{h\to0} \frac{U(t_0+h,x,m(t_0+h))-U(t_0,x,m_0)}{h}\,.
$$
The second limit, using the very definition of $U$, is equal to
\begin{equation}gin{align*}
\lim\limits_{h\to0} \frac{u(t_0+h,x)-u(t_0,x)}{h}=u_t(t_0,x)=-\noalign{
}athrm{tr}(a(x)D^2u(t_0,x))+H(x,Du(t_0,x))\\-F(x,m(t_0))=-\noalign{
}athrm{tr}(a(x)D^2_xU(t_0,x,m_0))+H(x,D_xU(t_0,x,m_0))-F(x,m_0)\,.
\end{align*}
As regards the first limit, defining $m_s:=(1-s)m(t_0)+sm(t_0+h)$ and using the $\noalign{
}athcal{C}^1$ regularity of $U$ with respect to $m$, we can write it as
\begin{equation}gin{align*}
-\lim\limits_{h\to 0}\int_0^1\ensuremath{\int_{\Omega}}\dm{U}(t_0+h,x,m_s,y)\frac{(m(t_0+h,y)-m(t_0,y))}h\,dyds\\
=-\int_0^1\ensuremath{\int_{\Omega}}\dm{U}(t_0,x,m_0,y)m_t(t_0,y)\,dyds=\ensuremath{\int_{\Omega}}\dm{U}(t_0,x,m_0,y)m_t(t_0,y)\,dy\\=-\ensuremath{\int_{\Omega}}\dm{U}(t_0,x,m_0,y) \,\noalign{
}athrm{div}\!\left(a(y)Dm(t_0,y) +m(t_0,y)(\tilde{b}+H_p(y,Du(t_0,y)))\right)dy\,.
\end{align*}
Taking into account the representation formula \eqref{reprform} for $\dm{U}$ , we integrate by parts and use the boundary condition of $\dm{U}$ and $m$ to obtain
\begin{equation}gin{align*}
\ensuremath{\int_{\Omega}}\left[H_p(y,D_xU(t_0,y,m_0))D_mU(t_0,x,m_0,y)-\noalign{
}athrm{tr}\left(a(y)D_yD_mU(t_0,x,m_0,y)\right)\right]dm_0(y)
\end{align*}
So with the computation of the two limits we obtain
\begin{equation}gin{align*}
&\partial_t U(t,x,m)=-\noalign{
}athrm{tr}\left(a(x)D_x^2 U(t,x,m)\right)+H\left(x,D_x U(t,x,m)\right)\\&-\noalign{
}athlarger{\ensuremath{\int_{\Omega}}}\noalign{
}athrm{tr}\left(a(y)D_y D_m U(t,x,m,y)\right)dm(y)+\\&\noalign{
}athlarger{\ensuremath{\int_{\Omega}}} D_m U(t,x,m,y)\cdot H_p(y,D_x U(t,y,m))dm(y)- F(x,m)\,.
\end{align*}
So the equation is satisfied for all $m_0\in\noalign{
}athcal{C}^\infty$ satisfying \eqref{neumannmzero}, and so, with a density argument, for all $m_0\in\noalign{
}athcal{P}(\Omega)$.
The boundary conditions are easily verified thanks to Corollary \ref{delarue}. This concludes the existence part.\\
\emph{Uniqueness}. Let $V$ be another solution of the Master Equation \eqref{Master} with Neumann boundary conditions. We consider, for fixed $t_0$ and $m_0$, with $m_0$ smooth satisfying \eqref{neumannmzero}, the solution $\tilde{m}$ of the Fokker-Planck equation:
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
\tilde{m}_t-\noalign{
}athrm{div}(a(x)D\tilde{m})-\noalign{
}athrm{div}\left(\tilde{m}\left(H_p(x,D_xV(t,x,\tilde{m}))+\tilde{b}\right)\right)=0\,,\\
\tilde{m}(t_0)=m_0\,,\\
\left[a(x)D\tilde{m}+(\tilde{b}+D_xV(t,x,\tilde{m}))\right]\cdot\nu(x)_{|\partial\Omega}=0\,.
\end{cases}
\end{equation*}
This solution is well defined since $D_xV$ is Lipschitz continuous with respect to the measure variable.
Then we define $\tilde{u}(t,x)=V(t,x,\tilde{m}(t))$. Using the equations of $V$ and $\tilde{m}$, we obtain
\begin{equation}gin{align*}
\tilde{u}_t(t,x)=&\,V_t(t,x,\tilde{m}(t))+\ensuremath{\int_{\Omega}}\dm{V}(t,x,\tilde{m}(t),y)\,\tilde{m}_t(t,y)\,dy\\
=&\,V_t(t,x,\tilde{m}(t))+\ensuremath{\int_{\Omega}}\dm{V}(t,x,\tilde{m}(t),y)\,\noalign{
}athrm{div}\!\left(a(y)D\tilde{m}(t,y)\right)\,dy\\+&\,\ensuremath{\int_{\Omega}} \dm{V}(t,x,\tilde{m}(t),y)\,\noalign{
}athrm{div}\!\left(\tilde{m}\left(H_p(x,D_xV(t,x,\tilde{m}))+\tilde{b}\right)\right)\,dy\,.
\end{align*}
We compute the two integrals by parts. As regards the first, we have
\begin{equation}gin{align*}
&\ensuremath{\int_{\Omega}}\dm{V}(t,x,\tilde{m}(t),y)\,\noalign{
}athrm{div}\!\left(a(y)D\tilde{m}(t,y)\right)\,dy\\=-&\ensuremath{\int_{\Omega}} a(y)D\tilde{m}(t,y)\,D_mV(t,x,\tilde{m}(t),y)\,dy+\int_{\partial\Omega}\dm{V}(t,x,\tilde{m}(t),y)\,a(y)D\tilde{m}(t,y)\cdot\nu(t,y)\,dy\\
=&\ensuremath{\int_{\Omega}}\noalign{
}athrm{div}(a(y)D_yD_mV(t,x,\tilde{m}(t),y))\,\tilde{m}(t,y)\,dy-\ensuremath{\int_{\Omega}} a(y)D_mV(t,x,\tilde{m}(t),y)\cdot\nu(y)\tilde{m}(t,y)dy\\+&\int_{\partial\Omega}\dm{V}(t,x,\tilde{m}(t),y)\,a(y)D\tilde{m}(t,y)\cdot\nu(t,y)\,dy\,,
\end{align*}
while for the second
\begin{equation}gin{align*}
&\ensuremath{\int_{\Omega}}\dm{V}(t,x,\tilde{m}(t),y)\,\noalign{
}athrm{div}\!\left(\tilde{m}\left(H_p(x,D_xV(t,x,\tilde{m}))+\tilde{b}\right)\right)\,dy\\
=-&\ensuremath{\int_{\Omega}}\left(H_p(x,D_xV(t,x,\tilde{m}))+\tilde{b}\right)D_mV(t,x,\tilde{m},y)\tilde{m}(t,y)dy\\+&\int_{\partial\Omega}\dm{V}(t,x,\tilde{m}(t),y)\left(H_p(x,D_xV(t,x,\tilde{m}))+\tilde{b}\right)\cdot\nu(y)\,\tilde{m}(t,y)dy\,.
\end{align*}
Putting together these estimates and taking into account the boundary conditions on $V$ and $m$:
$$
\left[a(x)D\tilde{m}+(\tilde{b}+D_xV(t,x,\tilde{m}))\right]\cdot\nu(x)_{|x\in\partial\Omega}=0\,,\qquad a(y)D_mV(t,x,m,y)\cdot\nu(y)_{|y\in\partial\Omega}=0\,,
$$
and the relation between the divergence and the trace term
$$
\noalign{
}athrm{div}(a(x)D\phi(x))=\noalign{
}athrm{tr}(a(x)D^2\phi(x))+\tilde{b}(x)D\phi(x)\,,\qquad\forall\phi\in W^{2,\infty}(\Omega)\,,
$$
we find
\begin{equation}gin{align*}
\tilde{u}_t(t,x)=&\,V_t(t,x,\tilde{m}(t))+\ensuremath{\int_{\Omega}}\noalign{
}athrm{tr}(a(y)D_yD_mV(t,x,\tilde{m},y))\,d\tilde{m}(y)\\-&\,\ensuremath{\int_{\Omega}} -H_p(y,D_xV(t,y,\tilde{m})) D_mV(t,x,\tilde{m},y)\,d\tilde{m}(y)\\=&\,-\noalign{
}athrm{tr}(a(x)D^2_xV(t,x,\tilde{m}(t)))+H(x,D_xV(t,x,\tilde{m}(t)))-F(x,\tilde{m}(t))\\=&\,-\noalign{
}athrm{tr}(a(x)D^2\tilde{u}(t,x))+H(x,D\tilde{u}(t,x))-F(x,\tilde{m}(t))\,.
\end{align*}
This means that $(\tilde{u},\tilde{m})$ is a solution of the MFG system \eqref{meanfieldgames}-\eqref{fame}. Since the solution of the Mean Field Games system is unique, we get $(\tilde{u},\tilde{m})=(u,m)$ and so $V(t_0,x,m_0)=U(t_0,x,m_0)$ whenever $m_0$ is smooth.\\
Then, using a density argument, the uniqueness is proved.
\end{proof}
\textbf{Acknowledgements.}
I wish to sincerely thank P. Cardaliaguet and A. Porretta for the help and the support during the preparation of this article. I wish to thank also F. Delarue for the enlightening ideas he gave to me.
\begin{equation}gin{thebibliography}{abc}
\bibitem{gol} Achdou, Y., Buera, F. J., Lasry, J.-M., Lions, P.-L., Moll, B. (2014). {\it Partial differential equation models in macroeconomics.} Phil. Trans. R Soc. A 372(2028):20130397. DOI: 10. 1098/rsta.2013.0397.
\bibitem{ags} Ambrosio, L., Gigli, N., Savar\'e, G. (2008). \textit{Gradient flows in metric spaces and in the space of probability measures. Second edition.} Lectures in Mathematics ETH Z\"{u}rich. Birkh\"{a}user Verlag, Basel
\bibitem{dybala} Bayraktar, E., Cecchin, A., Cohen, A., Delarue, F. (2019). {\it Finite state mean field games with wright-fisher common noise}. arXiv preprint arXiv:1912.06701.
\bibitem{nuova1} Bayraktar, E., Cohen, A. (2018). {\it Analysis of a finite state many player game using its master equation}. SIAM Journal on Control and Optimization, 56(5), 3538-3568.
\bibitem{14} Bensoussan, A., Frehse, J., Yam. S.C.P. (2015). {\it The Master Equation in mean field theory}. J. Math. Pures et Appliqu\'ees, 103, 1441-1474.
\bibitem{15} Bensoussan, A. Frehse, J., Yam, S.C.P. (2017). {\it On the interpretation of the Master Equation}. Stoc. Proc. App., 127, 2093-2137.
\bibitem{gomez} Bertucci, C. (2020). {\it Monotone solutions for mean field games master equations: finite state space and optimal stopping.} arXiv preprint arXiv:2007.11854.
\bibitem{bucchin} Buckdahn, R., Li, J., Peng, S., Rainer, C. (2017). {\it Mean-field stochastic differential equations and associated PDEs.} Ann. Probab., 45, 824-878.
\bibitem{28} Chassagneux, J.F., Crisan, D., Delarue, F. (2014). {\it Classical solutions to the Master Equation for large population equilibria}. arXiv preprint arXiv:1411.3009.
\bibitem{nuova14}
{\sc Cardaliaguet, P., Cirant, M., Porretta, A.} (2018). {\em Remarks on nash
equilibria in mean field game models with a major player}, arXiv preprint
arXiv:1811.02811.
\bibitem{card} Cardaliaguet, P., Delarue, F., Lasry, J.-M., Lions, P.-L. (2019). \textit{The Master Equation and the Convergence Problem in Mean Field Games}. Annals of Mathematics Studies, Vol. 2.
\bibitem{resultuno} Carmona, R., Delarue, F. (2013). \textit{Probabilist analysis of Mean Field Games}. SIAM Journal on Control and Optimization, 51(4), 2705-2734.
\bibitem{24} Carmona, R., Delarue, F. (2014). {\it The Master Equation for large population equilibriums}. Stochastic Analysis and Applications 2014, Editors: D. Crisan, B. Hambly, T. Zariphopoulou. Springer.
\bibitem{loacker} Carmona, R., Delarue, F., Lacker, D. (2016). {\it Probabilistic anal- ysis of mean field games with a common noise}. Ann. Probab, 44, 3740-3803.
\bibitem{cicciocaputo} Cecchin, A., Delarue, F. (2020). {\it Selection by vanishing common noise for potential finite state mean field games}. arXiv preprint arXiv:2005.12153.
\bibitem{nuova11} Cecchin, A., Pelino, G. (2019). {\it Convergence, fluctuations and large deviations for finite state mean field games via the master equation}. Stochastic Processes and their Applications, 129(11), 4510-4555.
\bibitem{ramadan} Delarue, F., Lacker, D., Ramanan, K. (2019). {\it From the master equation to mean field game limit theory: a central limit theorem}. Electron. J. Probab. 24, no. 51, 1-54.
\bibitem{nuova4} Delarue, F., Lacker, D., Ramanan, K. (2018). {\it From the master equation to mean field game limit theory: Large deviations and concentration of measure}. arXiv preprint arXiv:1804.08550.
\bibitem{cingul} Delfour, M.C., Zolesio, J.-P. (1994). {\it Shape analysis via oriented distance function}. J. Funct. Anal. 123, 129-201.
\bibitem{fifa21} Gangbo, W., Mészáros, A. R. (2020). \textit{Global well-posedness of Master Equations for deterministic displacement convex potential mean field games.} arXiv preprint arXiv:2004.01660.
\bibitem{tonali} Gangbo, W., Mészáros, A. R., Mou, C., Zhang, J. (2021). \textit{Mean Field Games Master Equations with Non-separable Hamiltonians and Displacement Monotonicity.} arXiv preprint arXiv:2101.12362.
\bibitem{nuova16}
{Gangbo, W., Swiech, A. (2015).}, {\em Existence of a solution to an equation
arising from the theory of mean field games}, Journal of Differential
Equations, 259, pp.~6573--6643.
\bibitem{HCM} Huang, M., Caines, P.E., Malham\'e, R.P. (2006). {\it Large population stochastic dynamic games: closed-loop McKean-Vlasov systems and the Nash certainty equivalence principle}, Comm. Inf. Syst. {\bf 6}, 221--251.
\bibitem{resultdue} Huang, M., Caines, P.E., Malham\'e, R.P. (2007). {\it Large population Cost-Coupled LQG Problems With Nonuniform Agents: Individual-Mass Behavior and Decentralized $\varepsilon$-Nash Equilibria}. IEEE Transactions on Automatic Control, 52(9), 1560-1571.
\bibitem{resulttre}Kolokoltsov, V.N., Li, J., Yang, W. (2011). \textit{Mean Field Games and nonlinear Markov Processes}. Preprint arXiv:1112.3744.
\bibitem{lsu} Lady\v{z}enskaja, O.A., Solonnikov, V.A., Ural'ceva, N.N. (1967). \textit{Linear and Quasi-linear Equations of Parabolic Type}. Translations of Mathematical Monographs, Vol. 23, American Mathematical Society, Providence R.I..
\bibitem{LL1} Lasry, J.-M., Lions, P.-L. (2006). {\it Jeux \`a champ moyen. I. Le cas stationnaire.}
C. R. Math. Acad. Sci. Paris 343, 619--625.
\bibitem{LL2} Lasry, J.-M., Lions, P.-L. (2006). {\it Jeux \`a champ moyen. II. Horizon fini et contr$\hat{o}$le optimal.}
C. R. Math. Acad. Sci. Paris 343, 679--684.
\bibitem{LL-japan} Lasry, J.-M., Lions, P.-L. (2007). {\it Mean field games.} Jpn. J. Math. 2 , no. 1, 229--260.
\bibitem{LL3} Lasry, J.-M., Lions, P.-L., Gu\`eant, O. (2011). {\it Application of Mean Field Games to Growth Theory.}
In: Paris-Princeton lectures on mathematical finance; Lecture notes in Mathematics. Springer, Berlin.
\bibitem{prontoprontopronto} Lions, P.-L. {\it Cours au Coll\`ege de France}. www.college-de-france.fr\,.
\bibitem{lunardi} Lunardi, A. (2012). \textit{Analytic Semigroups and Optimal Regularity in Parabolic Problems}. Modern Birkh\"{a}user Classics.
\bibitem{koulibaly} Mayorga, S. (2020). \textit{Short time solution to the master equation of a first order mean field game.} Journal of Differential Equations, 268(10), 6251-6318.
\bibitem{prossimamente} Ricciardi, M. (2021). \textit{The convergence problem in Mean Field Games with Neumann Conditions}. Technical report.
\end{thebibliography}
\end{document} |
\begin{document}
\begin{abstract} We obtain results on the condensation principle called local club condensation. We prove that in extender models an equivalence between the failure of local club condensation and subcompact cardinals holds. This gives a characterization of $\subseteqquare_{\kappa}$ in terms of local club condensation in extender models. Assuming $\axiomfont{GCH}$, given an interval of ordinals $I$ we verify that iterating the forcing defined by Holy-Welch-Wu, we can preserve $\axiomfont{GCH}$, cardinals and cofinalities and obtain a model where local club condensation holds for every ordinal in $I$ modulo those ordinals which cardinality is a singular cardinal.
We prove that if $\kappa$ is a regular cardinal in an interval $I$, the above iteration provides enough condensation for the combinatorial principle $\Dl_{S}^{*}(\Pi^{1}_{2})$, and in particular $\diamondsuit(S)$, to hold for any stationary $S \subsetequbseteq \kappa$.
\end{abstract}
\date{\today}
\title{On Local Club Condensation}
\subseteqection{Introduction}
\emph{Local club condensation} is a condensation principle that abstracts some of the condensation properties of $L$, G\"odels constructible hierarchy. Local club condensation was first defined in \cite{FHl} and it is part of the outer model program which searches for forcing models that have $L$-like features.
\begin{convention}
The class of ordinals is denoted by $\ord$.
The transitive closure of a set $X$ is denoted by $\trcl(X)$,
and the Mostowski collapse of a structure $\mathfrak B$ is denoted by $\clps(\mathfrak B)$.
\end{convention}
In order to define condensation principles we define filtrations which is an abstraction of the stratfication $\langle L_{\alpha} \mathrel{|}\allowbreak \alpha < \ord \rangle $
of $L$.
\begin{defn}
Given ordinals $\alpha < \beta$ we say that $\langle M_{\xi} \mathrel{|}\allowbreak \alpha < \xi < \beta \rangle $ is a \emph{filtration} iff
\begin{enumerate}
\item for all $\xi \in (\alpha,\beta)$, $M_{\xi}$ is transitive, $\xi \subsetequbseteq M_{\xi}$,
\item for all $\xi \in (\alpha,\beta)$, $M_{\xi} \cap \ord = \xi $,
\item for all $\xi \in (\alpha,\beta)$, $ |M_{\xi}| \leq \max \{\aleph_0,|\xi|\}$,
\item if $\xi < \zeta$, then $M_{\xi} \subsetequbseteq M_{\zeta}$,
\item if $\xi$ is a limit ordinal, then $M_{\xi}=\bigcup_{\alpha < \xi} M_{\alpha}$.
\end{enumerate}
\end{defn}
\begin{convention}\label{Union}
Given a filtration $\langle M_{\xi} \mathrel{|}\allowbreak \xi < \beta \rangle$, if $\beta$ is a limit ordinal we let $M_{\beta}:=\bigcup_{\gamma < \beta} M_{\gamma}$.
\end{convention}
The following is an abstract formulation of the Condensation lemma that holds for the constructible hirerarchy $\langle L_{\alpha} \mathrel{|}\allowbreak \alpha \in \ord \rangle $:
\begin{defn} Suppose that $\kappa$ and $\lambda$ are regular cardinals and that $ \vec{M} = \langle M_{\alpha} \mathrel{|}\allowbreak \kappa < \alpha < \lambda \rangle $ is a filtration. We say that $\vec{M}$ satisfies \emph{strong condensation} iff for every $\alpha \in (\kappa,\lambda)$ and every $ (X,\in) \prec_{\Sigma_{1}} (M_{\alpha},\in)$ there exists $\bar{\alpha}$ such that $\text{clps}(X,\in) = (M_{\bar{\alpha}},\in)$.
\end{defn}
While strong condensation is not consistent with the existence of large cardinals, see \cite{FHl} and \cite{schvlck}, Local club condensation, which we define below, is consistent with any large cardinal, see \cite[Theorem1]{FHl}.
\begin{defn}[Holy,Welch,Wu,Friedman \cite{HWW},\cite{FHl}] \label{LCCupto}
Let $ \kappa $ be a cardinal of uncountable cofinality.
We say that $\vec{M}=\langle M_\beta \mathrel{|}\allowbreak \beta < \kappa \rangle $ is a witnesses to the fact that \emph{local club condensation holds in $(\eta,\zeta)$},
and denote this by $\langle H_{\kappa},{\in}, \vec M\rangle \models \axiomfont{LCC}(\eta,\zeta)$,
iff all of the following hold true:
\begin{enumerate}
\item $\eta < \zeta \leq \kappa+1$;
\item $\vec M$ is a \emph{ filtration} such that $M_{\kappa}= H_\kappa$ \footnote{See Convention \ref{Union}},
\item For every ordinal $\alpha$ in the interval $(\eta,\zeta)$ and every sequence $\mathcal{F} = \langle (F_{n},k_{n}) \mathrel{|}\allowbreak n \in \omega \rangle$ such that, for all $n \in \omega$, $k_{n} \in \omega$ and $F_{n} \subsetequbseteq (M_{\alpha})^{k_{n}}$, there is a sequence
$\vec{\mathfrak{B}} = \langle \mathcal{B}_{\beta} \mathrel{|}\allowbreak \beta < |\alpha| \rangle $ having the following properties:
\begin{enumerate}
\item for all $\beta<|\alpha|$, $\mathcal{B}_{\beta}$ is of the form $\langle B_{\beta},{\in}, \vec{M} \mathbin\upharpoonright (B_{\beta} \cap\ord), (F_n\cap(B_\beta)^{k_n})_{n\in\omega} \rangle$;
\item for all $\beta<|\alpha|$, $\mathcal{B}_{\beta} \prec \langle M_{\alpha},{\in}, \vec{M}\mathbin\upharpoonright \alpha, (F_n)_{n\in\omega} \rangle$;\footnote{Note that the case $ \alpha= \kappa $ uses Convention~\ref{Union}.}
\item for all $\beta<|\alpha|$, $\beta\subseteq B_\beta$ and $|B_{\beta}| < |\alpha|$;
\item for all $\beta < |\alpha|$, there exists $\bar{\beta}<\kappa$ such that
$$\clps(\langle B_{\beta},{\in}, \langle B_{\delta} \mathrel{|}\allowbreak \delta \in B_{\beta}\cap\ord \rangle \rangle) = \langle M_{\bar{\beta}},{\in}, \langle M_{\delta} \mathrel{|}\allowbreak \delta \in \bar{\beta} \rangle \rangle;$$
\item $\langle B_\beta\mathrel{|}\allowbreak\beta<|\alpha|\rangle$ is $\subseteq$-increasing, continuous and converging to $M_\alpha$.
\end{enumerate}
\end{enumerate}
For $\vec{\mathfrak{B}}$ as in Clause~(3) above we say that
\emph{$\vec{\mathfrak{B}}$ witnesses $\axiomfont{LCC}(\eta,\zeta)$ at $\alpha$ with respect to $\mathcal{F}$}.
We write $\axiomfont{LCC}(\eta,\zeta]$ for $\axiomfont{LCC}(\eta,\zeta+1)$.
\end{defn}
In section 2 we present our resutls regarding Local Club Condensation in extender models.
An \emph{extender model} is an inner model of the form $L[E]$, it is a generalization of $L$ that can accommodate large cardinals. An inner model of the form $L[E]$ is the smallest transitive proper class that is a model of $\axiomfont{ZF}$ and is closed under the operator $x \mapsto x \cap E$, where $E:\ord \rightarrow V$ and each $E_{\alpha} = \emptyset $ or $E_{\alpha}$ is a partial extender. $L[E]$ models can be stratified using the $L$-hirearchy and the $J$-hirearchy, for example:
\begin{itemize}
\item $J_{\empty}^{E}=\emptyset$,
\item $J_{\alpha+1}^{E}= rud_{E}(J_{\alpha}^{E}\cup\{J_{\alpha}^{E}\})$,
\item $J_{\gamma}^{E}=\bigcup_{\beta < \gamma}J_{\beta}^{E}$ if $\gamma$ is a limit ordinal.
\end{itemize}
and finally $$L[E] = \bigcup_{\alpha \in \ord}J_{\alpha}^{E}.$$
In \cite[Theorem 8]{FHl} it is shown that Local Club Condensation holds in various extender models, we extend \cite[Theorem~8]{FHl} to an optimal result for extender models that are weakly iterable (see Defnition \ref{weaklyit}). We carachterize Local club condensation in extender models in terms of subcompact cardinals\footnote{A subcompact cardinal is a large cardinal that is located in the consistency strengh hirearchy below a supercompact cardinal and above a superstrong cardinal. See definition in \cite{SquareinK}}.
\begin{thma} \label{NoSubcompact}
If $L[E]$ is an extender model that is weakly iterable, then given an infinite cardinal $\kappa$ the following are equivalent:
\begin{itemize}
\item[(a)] $\langle L_{\kappa^{+}}[E],{\in},\langle L_\beta[E]\mathrel{|}\allowbreak\beta\in\kappa^{+} \rangle\rangle\models\axiomfont{LCC}(\kappa^{+},\kappa^{++}]$.
\item[(b)] $L[E] \models ( \kappa ~ \text{is not a subcompact cardinal})$.
\end{itemize}
In addtion for every limit cardinal $\kappa$ with $\cf(\kappa)>\omega$ we have \begin{center}$\langle L_{\kappa^{+}}[E],{\in},\langle L_\beta[E]\mathrel{|}\allowbreak\beta\in\kappa^{+} \rangle\rangle\models\axiomfont{LCC}(\kappa,\kappa^{+}].$\end{center}
\end{thma}
We warn the reader that it is not known how to construct an extender model that is weakly iterable and has a subcompact cardinal, but this is part of the aim of the inner model theory program and it is desirable to know what hold in such models.
Corollary~A provides an equivalence between $\subseteqquare_{\kappa}$ and a condensation principle that holds in the interval $(\kappa^{+},\kappa^{++})$, Corollary~ A is immediate from Theorem~A and the main result in \cite{MR2081183}:
\begin{cora} If $L[E]$ is an extender model with Jensen's $\lambda$-indexing that is weakly iterable, then given $\kappa$, an $L[E]$ cardinal, the following are equivalent:
\begin{itemize}
\item[(a)] $L[E]\models \subseteqquare_{\kappa}$
\item[(b)] $\langle L_{\kappa}[E], \in, \langle L_{\beta}[E] \mathrel{|}\allowbreak \beta < \kappa^{+} \rangle \rangle \models \axiomfont{LCC}(\kappa^{+},\kappa^{++})$
\end{itemize} \end{cora}
We verify that a subcompact cardinal is an even more severe impediment for $\axiomfont{LCC}$ to hold:
\begin{thmb} Suppose $L[E]$ is an extender model with Jensen's $\lambda$-indexing such that every countable elementary submodel of $L[E]
$ is $(\omega_{1}+1,\omega_{1})$-iterable. In $L[E]$, if an ordinal $\kappa$ is a subcompact cardinal, then there is no $\vec{M}$ such that $\langle M_{\kappa^{++}}, \in , \vec{M} \rangle \models \axiomfont{LCC}(\kappa^{+},\kappa^{++})$ and $M_{\kappa^{++}}=H_{\kappa^{++}}^{L[E]}$ and $M_{\kappa^{+}}=H_{\kappa^{+}}^{L[E]}$.
\end{thmb}
In section 3 we prove how to force local club condensation on a given interval of ordinals $I$ modulo ordinals with singular cardinality (Theorem~C) . It was already obtained in \cite{FHl} a model where local club condensation holds on arbritrary intervals $I$ including ordinals with singular cardinality, this was done via class forcing, although we do not obtain as much condensation as in \cite{FHl}, building on \cite{HWW} we define a set forcing $\mathbb{P}$ which is simpler than the forcing in \cite{FHl}, and which will force enough condensation for a few applications, see section 4.
\begin{thmc}
If $\axiomfont{GCH}$ holds and $\kappa$ is a regular cardinal and $\alpha$ is an ordinal, then there is a set forcing $\mathbb{P}$ which is $<\kappa$-directed closed and $\kappa^{+\alpha+1}$-cc, $\axiomfont{GCH}$ preserving such that in $V^{\mathbb{P}}$ there is a filtration $\langle M_{\alpha} \mathrel{|}\allowbreak \alpha < \kappa^{+\alpha+1} \rangle $ such that for every regular cardinal $ \theta \in [\kappa,\kappa^{+\alpha+1}]$ we have $H_{\theta}=M_{\theta}$ and $\langle M_{\alpha}\mathrel{|}\allowbreak \alpha < \kappa^{+\alpha}\rangle \models \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\alpha}) $.
\end{thmc}
In Section 4 we show that the iteration of the forcing from \cite{HWW} implies $\Dl^{*}_{S}(\Pi^{1}_{2})$ (see definition in \cite{FMR}) which is a combinatorial principle defined in \cite{FMR} and is a variation of Devlin's $\diamondsuit^{\subseteqharp}_{\kappa}$ (see \cite{Devlin}).
\begin{cord} Let $\kappa$ be an uncountable regular cardinal and let $\mu$ be a cardinal such that $\mu^{+} \leq \kappa$. If $\axiomfont{GCH}$ holds, then there is a set forcing $\mathbb{P}$ which is $<\mu^{+}$-directed and $\kappa^{+}$-cc, $\axiomfont{GCH}$ preserving, such that in $V^{\mathbb{P}}$ we have $\Dl^{*}_{S}(\Pi^{1}_{2})$, in particular $\diamond(S)$, for any stationary $S \subsetequbseteq \kappa$.
\end{cord}
\subseteqection{Local club condensation in extender models}
The main result in this section is Theorem~A which extends \cite{FHl}[Theorem~8] and gives a characterization of local club condensation in terms of subcompact cardinals.
For the standard notation on inner model theory and fine structure like \textit{premouse, projectum, standard parameter} and etc. we refer the reader to \cite{MR1876087}.
\begin{defn}
Given a premouse $\mathcal{M}$, a parameter $p \in (\mathcal{M} \cap \ord^{<\omega})$ and $\xi \in \mathcal{M} \cap \ord$ and $\langle \varphi_{i} \mathrel{|}\allowbreak i \in \omega \rangle $ a primitive recursive enumeration of all $\Sigma_{1}$ formulas in the premice language we define $$T_{p}^{M}(\xi)=\{ (a,i) \in (\xi^{<\omega}\times \omega) \mathrel{|}\allowbreak M\models \varphi_{i}(a,p)\}.$$
\end{defn}
\begin{fact}\label{Phi} Given $\langle \varphi_{n} \mathrel{|}\allowbreak n \in \omega \rangle $ a primitive recursive enumeration of all $ \Sigma_1 $ formulas in the premice language, there exists a $\Sigma_{1}$-formula $\Phi(w,x,y)$ in the premice language such that for any premouse $\mathcal{M}$ the following hold:
\begin{itemize}
\item If $n \in \omega$ is such that $\varphi_{n}(x) = \exists y \phi_n(x,y)$ and $\phi_n$ is $\Sigma_0$, then for every $x \in \mathcal{M}$ there exists
$ y_0 \in \mathcal{M}$ such that $(\mathcal{M}\models \phi_n(x,y_0))$ iff there exists $y_1 \in \mathcal{M}$ such that $ (\mathcal{M} \models \Phi(n,x,y_1))$
\item For every $n \in \omega$ and for every $x \in \mathcal{M}$, if there are $y_{0},y_{1} \in \mathcal{M}$ such that $ (\mathcal{M} \models \Phi(n,x,y_0) \wedge \Phi(n,x,y_1) ))$ then $y_0 = y_1$
\end{itemize}
\end{fact}
\begin{defn}
Let $\mathcal{M}$ be a premouse we denote by $h_1^{\mathcal{M}}$ the partial function from $\omega \times \mathcal{M} $ into $\mathcal{M}$ defined by the formula $\Phi$ from Fact \ref{Phi}. Given $ X \subsetequbseteq \mathcal{M}$ and $ p \in \mathcal{M}$ we denote by $h_{1}^{\mathcal{M}}[X,p]$ the set $h_{1}^{\mathcal{M}}[(X\times\{p\})^{<\omega}]$.
\end{defn}
\begin{fact} \label{Fact1}
\begin{enumerate}
\item Suppose $L[E]$ is an extender model. Let $\gamma$ be an ordinal such that $E_{\gamma}\neq\emptyset$, then there exists $g \in J_{\gamma+1}^{E}$ such that $g:\lambda(E_{\gamma})\rightarrow \gamma$ onto.
\item $\mathcal{P}(J_{\gamma}^{E})\cap J_{\gamma+1}^{E} = \Sigma_{\omega}(J_{\gamma}^{E}) $
\end{enumerate}
\end{fact}
\begin{lemma} \label{Collapse}
Suppose $L[E]$ is an extender model and $\gamma$ is such that $E_{\gamma} \neq \emptyset$ and $L_{\gamma}[E]=J_{\gamma}^{E}$. Then there exists $g \in L_{\gamma+1}[E]$ such that $g:\lambda(E_{\gamma})\rightarrow \gamma$ and $g$ is onto.
\end{lemma}
\begin{proof} It follows from Fact \ref{Fact1} and that $\Sigma_{\omega}(J_{\gamma}^{E}) = L_{\gamma+1}[E]$.
\end{proof}
\begin{remark}
Notice that in particular for any premouse $\mathcal{M}$, if $\gamma \in \mathcal{M} \cap \ord $ and $\mathcal{M}\models ``\gamma \text{ is a cardinal}"$ it follows from Fact \ref{Fact1} that $ E_{\gamma} = \emptyset$, as otherwise $$J_{\gamma+1}^{E} \models ``\gamma \text{ is not a cardinal},"$$ and hence $$\mathcal{M}\models ``\gamma \text{ is not a cardinal}."$$
\end{remark}
\begin{defn}\label{weaklyit}
We say that an extender model $L[E]$ is \emph{weakly iterable} iff for every $\alpha \in \ord$ if there exists an elementary embedding $\pi:\bar{\mathcal{M}} \rightarrow (J_{\alpha}^{E},\in,E|\alpha,E_{\alpha})$, then $\bar{\mathcal{M}}$ is $(\omega_{1}+1,\omega_{1})$-iterable.\footnote{See definiton 9.1.10 in \cite{MR1876087} for the definition of $(\omega_{1}+1,\omega_{1})$-iterable. }
\end{defn}
\begin{lemma} \label{Condensation} Let $L[E]$ be an extender model that is weakly iterable and let $\kappa$ be a cardinal in $L[E]$.
Suppose $i:\mathcal{N} \rightarrow \mathcal{M}$ is the inverse of the Mostowisk collapse of $h_{1}^{\mathcal{M}}[\gamma \cup \{p_{1}^{\mathcal{M}}\}]$, $\rho_{1}(N)=\gamma$, $\crit(\pi)=\gamma$, $\gamma < \kappa$, $\mathcal{M} = \langle J_{\alpha}^{E}, \in, E\mathbin\upharpoonright \alpha, E_{\alpha} \rangle$ for some $\alpha \in (\kappa^{+},\kappa^{++})$. Then $\mathcal{N} \triangleleft \mathcal{M}$ if and only if $E_{\gamma}=\emptyset$.
\end{lemma}
\begin{proof} The proof is a special case of condensation lemma. Suppose that $E_{\gamma}= \emptyset$, we will verify that $\mathcal{N} \triangleleft \mathcal{M}$.
Let $H \prec_{\Sigma_{\omega}} V_{\Omega}$ for some $\Omega$ large enough, where $i \in H$ and $H$ is countable. Let $ \pi:\bar{H} \rightarrow V_{\Omega}$ be the inverse of the Mostowisk colapse of $H$, let $\pi(\bar{\mathcal{N}}) = \mathcal{N}$, $\pi(\bar{\mathcal{M}})=\mathcal{M}$ and $\pi(\bar{i})=i$.
Let $e$ be an enumeration of $\bar{\mathcal{M}}$ and let $ \Sigma$ be an $e$-minimal $(\omega_{1},\omega_{1}+1)$-strategy for $\bar{M}$ \footnote{The existence of an $e$-minimal iteration strategy follows from the hypothesis that $L[E]$ is weakly iterable and Neeman-Steel lemma, see \cite[Theorem9.2.11]{MR1876087}}. Since $\bar{i}$ embedds $\langle \bar{\mathcal{M}},\bar{\mathcal{N}},\bar{\gamma} \rangle$ into $\bar{\mathcal{M}}$, it follows from 9.2.12 in \cite{MR1876087} that we can compare $\langle \bar{\mathcal{M}},\bar{\mathcal{N}},\bar{\gamma}\rangle$ and $\bar{\mathcal{M}}$ and we have the following:
\begin{itemize}
\item $\bar{\mathcal{M}}$ wins the comparison,
\item the last model on the phalanx side is above $\bar{\mathcal{N}}$,
\item there is no drop on the branch of the phalanx side.
\end{itemize}
From the fact that $h_{1}^{\mathcal{N}}(\gamma \cup \bar{p}) = \mathcal{\mathcal{N}}$ it follows that $h_{1}^{\bar{\mathcal{N}}}(\bar{\gamma}\cup q)=\bar{\mathcal{N}}$ where $\pi(q)=\bar{p}$. This implies that $\bar{\mathcal{N}}$ can not move in the comparison, as otherwise it would drop and we already know that it is the $\bar{\mathcal{M}}$ side which wins the comparison. Let $\mathcal{T}$ be the iteration tree on $\bar{\mathcal{M}}$ and $\mathcal{U}$ the iteration tree on the phalanx $\langle \bar{M},\bar{N},\bar{\gamma} \rangle$.
\begin{claim}
$\bar{\mathcal{N}} \neq \mathcal{M}^{\mathcal{T}}_{\infty}$
\end{claim}
\begin{proof} We already know that $\bar{\mathcal{N}} \triangleleft \mathcal{M}^{\mathcal{T}}_{\infty}$. If $\bar{\mathcal{M}}$ does not move then $\bar{\mathcal{N}} \neq \mathcal{M}^{\mathcal{T}}_{\infty}=\mathcal{M}$ since they have different cardinality. Suppose $\mathcal{T}$ is non-trivial and $\mathcal{M}^{\mathcal{T}}_{\infty} = \bar{\mathcal{N}}$ let $b^{\mathcal{T}}$ be the main branch in $\mathcal{T}$. Let $\eta $ be the last drop in $b^{\mathcal{T}}$. In order to $\mathcal{M}^{\mathcal{T}}_{\infty}$ be 1-sound we need $\crit(E_{\eta}^{\mathcal{T}}) < \rho_{1}(\mathcal{M}_{\eta}^{\mathcal{T}})$ and since $\lambda(E_{0}^{\mathcal{T}}) > \gamma$ we have $\lambda(E_{\eta}^{\mathcal{T}}) > \gamma$. This implies that $\rho_{1}(\mathcal{M}_{\infty}^{\mathcal{T}}) \geq \rho_{1}(\mathcal{M}_{\eta+1}^{\mathcal{T}}) > \pi_{\eta^{*},\eta+1}^{\mathcal{T}}(\kappa_{\eta}) \geq \gamma = \rho_{1}(\mathcal{N})$, which is a contradiction since we are assuming that $\mathcal{M}^{\mathcal{T}}_{\infty} = \bar{\mathcal{N}}$.
\end{proof}
Since $\bar{\mathcal{N}} $ is a proper initial segment of $\mathcal{M}^{\mathcal{T}}_{\infty}$ it will follows that $\bar{\mathcal{M}}$ does not move. For a contradiction, suppose $\bar{\mathcal{M}}$ moves then the index $\lambda(E_{0}^{\mathcal{U}}) $ of the first extender used on the $\bar{\mathcal{M}}$ side is greater than $\gamma$ since $\bar{\mathcal{N}}\mathbin\upharpoonright \gamma = \bar{\mathcal{M}}\mathbin\upharpoonright \gamma$ and, by our hypothesis, $E_{\gamma}= \emptyset$. Moreover the cardinal in $\mathcal{M}^{\mathcal{U}}_{lh(\mathcal{T}-1)}$, the last model in the iteration on the $\bar{\mathcal{M}}$ side of the comparison. We have the following: \begin{itemize}
\item $\bar{\mathcal{N}}$ is a proper initial segment of $\mathcal{M}^{\mathcal{T}}_{\infty}$,
\item $\lambda(E_{0}^{\mathcal{U}}) \leq (\bar{\mathcal{N}}\cap \ord)$,
\item $h_{1}^{\bar{\mathcal{N}}}(\bar{\gamma}\cup q)=\bar{\mathcal{N}}$,
\item $h_{1}^{\bar{\mathcal{N}}} \mathbin\upharpoonright (\bar{\gamma}\cup \{q\}) \in \mathcal{M}^{\mathcal{T}}_{\infty}$,
\end{itemize} then there exists a surjection from $\bar{\gamma}$ onto the index of $E_{0}^{\mathcal{T}}$ in $\mathcal{M}^{\mathcal{T}}_{\infty}$ which is a contradiction.
Thus we must have $\bar{\mathcal{N}} \triangleleft \bar{\mathcal{M}}$ and by elementarity of $\pi$ we have $\mathcal{N} \triangleleft \mathcal{M}$.
\end{proof}
\begin{lemma} \label{Club}
Let $L[E]$ be an extender model that is weakly iterable. In $L[E]$, let $\kappa$ be a cardinal which is not a subcompact cardinal. Let $ \beta \in (\kappa^{+},\kappa^{++})$ and $\mathcal{M}= (J_{\beta}^{E},\in,E\mathbin\upharpoonright \beta, E_{\beta})$ and suppose that $\rho_{1}(\mathcal{M})= \kappa^{+}$. Then there is club $C \subsetequbseteq \kappa^{+}$ such that for all $\gamma \in C $ if $\mathcal{N} = \text{clps}(h_{1}^{\mathcal{M}}(\gamma \cup \{p_{1}^{\mathcal{M}}\}))$ then $\rho_{1}(\mathcal{N})= \gamma$.
\end{lemma}
\begin{proof}
Let $g$ be a function with domain $\kappa^{+}$ such that for each $\xi < \kappa^{+}$ we have that $g(\xi) = h_{1}^{\mathcal{M}}(\xi \cup \{p_{1}^{\mathcal{M}}\}) \cap \ord^{<\omega}$.
Let $f: \kappa^{+} \rightarrow \kappa^{+}$ where given $ \gamma < \kappa^{+} $, $f(\gamma) $ is the least ordinal such that for every $r \in g(\xi)$ we have that $ T_{r}^{\mathcal{M}}(\gamma) \in J_{f(\gamma)}^{E}$. Notice that $T_{r}^{\mathcal{M}}(\gamma) \subsetequbseteq \bigcup_{n \in \omega} \mathcal{P}([\gamma]^{n})$, hence it can be codded as a subset of $\gamma$ and therefore, by acceptability, it follows that $f(\gamma) < \kappa^{+}$.
Let $ C$ be a club subset of the club of closure points of $f$ and such that $\gamma \in C $ implies $\gamma = h_{1}^{\mathcal{M}}(\gamma \cup \{p_{1}^{\mathcal{M}}\}) \cap \kappa^{+}$. We will verify that $C$ is the club we sought.
Let $ \gamma \in C $. Let $\pi:\mathcal{N}\rightarrow J_{\beta}^{E}$ be the inverse of the Mostowisk collapse of $h_{1}^{\mathcal{M}}(\gamma \cup \{p_{1}^{\mathcal{M}}\})$. Then for each $ \xi < \gamma $ we have $T_{r}^{\mathcal{N}}(\xi)=T_{\pi(r)}^{\mathcal{M}}(\xi) \in J_{\gamma}^{E} = \mathcal{N}\mathbin\upharpoonright \gamma$. Therefore $\rho_{1}(\mathcal{N}) \geq \gamma$.
Notice that by a standard diagonal argument $a = \{ \xi \in \gamma \mathrel{|}\allowbreak \xi \not\in h^{\mathcal{N}_{\gamma}}_{1}(\xi,p_{1})\} \not\in \mathcal{N}_{\gamma}$ since $\mathcal{N}_{\gamma}=h_{1}[\gamma \cup \{p_{1}\}]$, thus $\rho_{1}^{\mathcal{M}}\geq \gamma$.
\end{proof}
\begin{lemma}\label{NoSubcompact} Let $L[E]$ be an extender model that is weakly iterable. Given $\kappa \in \ord$ if $\kappa$ is a successor cardinal in $L[E]$ then the following are equivalent:
\begin{itemize}
\item[(a)] $\langle L_{\kappa^{+}}[E],{\in},\langle L_\beta[E]\mathrel{|}\allowbreak\beta\in\kappa^{+} \rangle\rangle\models\axiomfont{LCC}(\kappa^{+},\kappa^{++}]$.
\item[(b)] $L[E] \models ( \kappa ~ \text{is not a subcompact cardinal})$.
\end{itemize}
and if $\kappa$ is a limit cardinal of uncountable cofinality, then $${\langle L_{\kappa^{+}}[E],{\in},\langle L_\beta[E]\mathrel{|}\allowbreak\beta\in\kappa^{+} \rangle\rangle\models\axiomfont{LCC}(\kappa,\kappa^{+}]}.$$
\end{lemma}
\begin{proof} Let $\alpha \in (\kappa^{+},\kappa^{++})$. Let $\beta \geq \alpha$ such that $\beta \in (\kappa^{+},\kappa^{++})$ and $\rho_{1}((J_{\beta}^{E},\in,E\mathbin\upharpoonright\beta,E_{\beta}) ) = \kappa^{+}$.
Let $\mathcal{M} = (J_{\beta}^{E},\in,E\mathbin\upharpoonright\beta,E_{\beta})$, \[D= \{ \gamma <\kappa^{+} \mathrel{|}\allowbreak h_{1}(\gamma \cup \{p_{1}\})\cap \kappa^{+}=\gamma \}\] and for each $\gamma \in D$ let $\mathcal{N}_{\gamma} = \clps(h_{1}(\gamma \cup \{p_{1}\}))$. By Lemma \ref{Club} $D$ contains a club $ F \subsetequbseteq D$ such that $\gamma \in F$ implies that there are $\pi_{\gamma}:N_{\gamma} \rightarrow J_{\beta}^{E}$ where $\pi_{\gamma}$ is $\Sigma^{(1)}_{1}$, $\pi_{\gamma}\mathbin\upharpoonright \gamma = id \mathbin\upharpoonright \gamma$, $\pi_{\gamma}(\gamma) = \kappa^{+}$, $\rho_{1}(N_{\gamma}) = \gamma$. We can also assume that $\gamma \in F $ implies $L_{\gamma}[E]=J_{\gamma}^{E}$.
We verify first the implication $\neg(b) rootightarrow \neg(a)$.
Suppose that $\langle B_{\gamma} \mathrel{|}\allowbreak \gamma < |\alpha| \rangle $ is a continuous chain of elementary submodels of $\mathcal{N} = \langle J_{\alpha}^{E},\in,E|\alpha,E_{\alpha} \rangle$ such that for all $\gamma < |\alpha|$ we have $|B_{\gamma}| < |\alpha|$ and $\bigcup_{\gamma < |\alpha|} B_{\gamma} = \mathcal{M}$. We will verify that for stationary many $\gamma$'s we have that $\clps(B_{\gamma}) $ is not of the form $J_{\zeta}^{E}$ for any $\zeta$.
As $|\alpha|=\kappa$ is a regular cardinal it follows that for club many $\gamma$'s we have $B_{\gamma} = h_{1}^{\mathcal{M}}(\gamma\cup\{p_{1}\}) \cap \mathcal{N} = \pi^{-1}(L_{\alpha}[E])$
From $\neg(b)$ by Schimmerling-Zeman carachterization of $\subseteqquare_{\kappa}$ (see \cite{SquareinK}][Theorem~0.1]), we can assume that for stationary many $\gamma \in F$ we have $E_{\gamma}^{\mathcal{M}} \neq \emptyset$ . Notice that $N_{\gamma} \models ``\gamma \text{ is a cardinal}"$ and therefore $E^{\mathcal{N}_{\gamma}}=\emptyset$ by Proposition \ref{Collapse}. On the other hand, from Proposition \ref{Collapse} we have $L_{\gamma+1}^{E} \models ``\gamma \text{ is not a cardinal}"$. Since $L_{\gamma+1}[E] \subsetequbseteq J_{\gamma+1}[E]$ it follows that $\mathcal{N}_{\gamma} = \clps(B_{\gamma})$ is different from $J_{\zeta}^{E}$ for every $\zeta > \gamma$. Therefore $\axiomfont{LCC}(\kappa^{+},\kappa^{++})$ does not hold.
Next we verify $(b) \rightarrow (a)$. Suppose $\mathfrak{N} = \langle L_{\alpha}^{E},\in, E | \alpha, E_{\alpha}, (\mathcal{F}_{n} \mathrel{|}\allowbreak n \in \omega ) \rangle $. We can assume without loss of generality that $\beta$ is large enough so that $\mathfrak{N} \in \mathcal{M}$.
We verify that $\langle L_{\tau}[E] \mathrel{|}\allowbreak \tau < \kappa^{++} \rangle $ witnesses $\axiomfont{LCC}(\kappa^{+},\kappa^{++}] $ at $\alpha$.
Let $ \vec{\mathcal{R}} := \{ h_{1}^{\mathcal{M}}[\gamma \cup \{p_{1}^{\mathcal{M}} \}\cup \{u^{\mathcal{M}}_{1}\}] \mathrel{|}\allowbreak \gamma < \kappa^{+} \}$ where for any given $X \subsetequbseteq \mathcal{M}$, $h_{1}^{\mathcal{M}}[X]$ denotes the $\Sigma_{1}$-Skolem hull of $X$ in $\mathcal{M}$ and $ p^{\mathcal{M}_{1}}$ is the first standard parameter.
It follows that
$$C= \{ \gamma < \kappa^{+} \mathrel{|}\allowbreak \crit(\clps(h_{1}^{\mathcal{M}}[\gamma \cup \{p_{1}^{\mathcal{M}}, u^{\mathcal{M}}_{1}\}])) = \gamma\} $$ is a club
and by lemma \ref{Club}
$$D=\{\gamma < \kappa^{+} \mathrel{|}\allowbreak \rho_{1}(\mathcal{N}_{\gamma})=\gamma\} $$ is also a club. From Theorem 1 in \cite{MR1860606} and $(b)$ it follows that there is a club $F \subsetequbseteq \{ \gamma < \kappa^{+} \mathrel{|}\allowbreak E_{\gamma}=\emptyset \} $.
By Lemma \ref{Condensation}, for every $\gamma \in D \cap F$ we have $N_{\gamma}\triangleleft \mathcal{M}$. We have $L_{\alpha}[E] \triangleleft \mathcal{M}$, therefore $\clps(L_{\alpha}[E]\cap h_{1}^{\mathcal{M}}(\gamma \cup \{p_{1}\})) \triangleleft \mathcal{N}_{\gamma}$, hence $ \clps(L_{\alpha}[E]\cap h_{1}^{\mathcal{M}}(\gamma \cup \{p_{1}\}))=\clps(B_{\gamma}) \triangleleft \mathcal{M}$ which verifies the equivalence between $(b) \rightarrow (a)$
Now suppose $\kappa$ is a limit cardinal. The same argument used for the implication $(b) rootightarrow (a)$ follows with the difference that we do not use Theorem 1 of \cite{MR1860606}, instead we use that the cardinals below $\kappa$ form a club and that for every cardinal $\mu< \kappa$ we have $E_{\mu}=\emptyset$.
\end{proof}
\begin{defn}
Given two predicates $A$ and $E$ we say that $A$ is equivalent to $E$ iff $J_{\alpha}^{A}=J_{\alpha}^{E}$ for all $\alpha < \ord$.
\end{defn}
\begin{cor}\label{IncompatiblePredicates}
If $A \subsetequbseteq \ord$ is such that \begin{itemize}
\item $L[A] \models (\kappa$ is a subcompact cardinal $),$ and
\item $\langle L_{\kappa^{++}}[A],\in, \langle L_{\beta}[A] \mathrel{|}\allowbreak \beta < \kappa^{++} \rangle \rangle \models \axiomfont{LCC}(\kappa^{+},\kappa^{++}]$,
\end{itemize} then there is no extender sequence such that $L[E]$ is weakly iterable and $E$ is equivalent to $A$.
\end{cor}
\begin{remark} In \cite{FHl} from the hypothesis that there is $\kappa$ a subcompact cardinal in $V$, it is obtained $A \subsetequbseteq \ord$ in a class generic extension which satisfies the hypothesis of corollary \ref{IncompatiblePredicates}.
\end{remark}
\begin{cor}
\label{NoWitness} Suppose that $L[E]$ is a extender model with Jensen's $\lambda$-indexing and for every ordinal $\alpha$ the premouse $\mathcal{J}_{\alpha}^{E}$ is weakly iterable. If $\kappa$ is an ordinal such that $$L[E] \models \kappa \text{ is a subcompact cardinal,}$$ then for no $\vec{M}=\langle M_{\alpha} \mathrel{|}\allowbreak \alpha < \kappa^{++} \rangle $ with $M_{\kappa^{+}}=H_{\kappa^{+}} $, $M_{\kappa^{++}}=H_{\kappa^{++}} $ and $\langle H_{\kappa^{++}},\in,\vec{M}\rangle \models \axiomfont{LCC}(\kappa^{+},\kappa^{++})$.
\end{cor}
\begin{defn} We say that a nice filtration $\vec{M}$ for $H_{\kappa^{+}}$ strongly fails to condensate iff there is a stationary set $S \subsetequbseteq \kappa^{+}$ such that for any $\beta \in S $ and any continuous chain $\vec{B}$ of elementary submodels of $M_{\beta}$ there are stationary many points $\alpha$ where $B_{\alpha}$ does not condensate.
\end{defn}
\begin{lemma} If $\vec{M}$ is a filtration for $H_{\kappa^{+}}$ with $M_{\kappa} = H_{\kappa}$ that strongly fails to condensate, then there is no filtration $\vec{N}$ of $H_{\kappa^{+}}$ with $N_{\kappa}= H_{\kappa}$ that witnesses $\axiomfont{LCC}(\kappa,\kappa^{+})$.
\end{lemma}
\begin{proof} Let $\vec{N}$ be a filtration of $H_{\kappa^{+}}$ with $N_{\kappa}=H_{\kappa}$ and $N_{\kappa^{+}} = H_{\kappa^{+}}$. Then there is a club $D \subsetequbseteq \kappa^{+} $ where $ N_{\beta} = M_{\beta}$ for every $\beta \in D$. Let $ \beta\in S \cap D $, and let $\vec{\mathfrak{B}}=\langle \mathfrak{B}_{\tau} \mathrel{|}\allowbreak \tau < |\beta|=\kappa \rangle $ be any chain of elementary submodels of $M_{\beta}=N_{\beta}$.
\end{proof}
\begin{cor}
Suppose $V$ is an extender model which is weakly iterable. If there exists $\kappa$ such that $L[E] \models ``\kappa \text{ is a subcompact cardinal}"$, then there is no sequence $\vec{M} = \langle M_{\alpha} \mathrel{|}\allowbreak \alpha < \kappa^{+}\rangle$ in $L[E]$ such that $\langle M_{\kappa^{+}}, \in, \vec{M} \rangle \models \axiomfont{LCC}(\kappa^{+},\kappa^{++})$.
\end{cor}
\subseteqection{Forcing Local Club Condensation}
In \cite{FHl} it is shown, via class forcing, how to obtain a model of local club condensation for all ordinals above $\omega_{1}$. Later a simpler forcing was presented in \cite{HWW} which forces condensation on an interval of the form $(\kappa,\kappa^{+})$ where $\kappa$ is a regular cardinal. In this section we show that iterating the forcing from \cite{HWW} and obtain a set forcing $\mathbb{P}$ which forces local club condensation on all ordinals of an interval $(\kappa,\kappa^{+\alpha})$ modulo ordinals with singular cardinality. We will denote by $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\alpha})$ (see Definition \ref{DefLCCReg}) the property that local club condensation holds for all ordinals in the interval $(\kappa,\kappa^{+\alpha})$ modulo those which cardinality is a singular cardinal.
Iterating the forcing from \cite{HWW} gives us a set forcing which is relatively simpler than the class forcing from \cite{FHl} and $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\alpha})$ is enough condensation for applications where $\axiomfont{LCC}(\kappa,\kappa^{+\alpha})$ was used before, see Section 4 for applications.
\begin{defn} \label{Slow}
Let $\kappa$ be a regular cardinal and $\alpha$ an ordinal such that $\kappa^{+\alpha}$ is a regular cardinal. We say that $\Psi(\vec{M},\vec{\theta})$ holds iff $\vec{M}=\langle M_{\gamma} \mathrel{|}\allowbreak \gamma < \kappa^{+\alpha} \rangle$ is a filtration and for every regular cardinal $\theta \in (\kappa,\kappa^{+\alpha})$ we have $M_{\theta} = H_{\theta}$.\end{defn}
\begin{defn} \label{DefLCCReg}
Let $\kappa$ be a regular cardinal and $\alpha$ an ordinal. We say that $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\alpha})$ holds iff there is a filtration $\vec{M}=\langle M_{\gamma} \mathrel{|}\allowbreak \gamma < \kappa^{+\alpha} \rangle$ such that $\langle M_{
\kappa^{+\alpha}}, \langle M_{\gamma} \mathrel{|}\allowbreak \gamma < \kappa^{+\alpha} \rangle \rangle \models \axiomfont{LCC}(\alpha) $ for all $\alpha \in (\kappa,\kappa^{+\alpha})$ with $|\alpha|\in\reg$.
\end{defn}
The main result of this section is the following:
\begin{thmc}\label{ThmHWW}
Suppose $V$ models $\axiomfont{ZF}c + \axiomfont{GCH}$ and $\kappa$ is a regular cardinal and $\beta$ is an ordinal. Then there exists a set-sized forcing $\mathbb{P}$ which is cardinal preserving, cofinality preserving, $\axiomfont{GCH}$ preserving and forces the existence of a filtration $\vec{M}$ such that $\Psi(\vec{M},\kappa,\kappa^{+\beta})$ holds and $\langle M_{\kappa^{+\beta}},\in,\vec{M}\rangle \models \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta})$.
\end{thmc}
We start recalling the forcing from \cite{HWW} which we will iterate to obtain our model. We present the definitions of the forcing from \cite{HWW} for self containment, we will work mainly with abstract properties of the forcing from Theorem \ref{HWW} below.
\begin{convention}\begin{itemize}\item \label{IS} If $ a, b $ are sets of ordinals we write $ a\triangleleft b$ iff $ \subsetequp(a) \cap b = a $.
\item If $ \mathbb{P} = \langle \langle \mathbb{P}_{\alpha} \mathrel{|}\allowbreak \alpha < \beta \rangle, \langle \mathbb{\dot{Q}}_{\alpha} \mathrel{|}\allowbreak \alpha + 1 < \beta \rangle \rangle $ is a forcing iteration, given $\zeta < \beta$ we denote by $\mathbb{\dot{R}}_{\zeta,\beta}$ a $\mathbb{P}_{\alpha}$-name such that $ \mathbb{P} = \mathbb{P}_{\zeta}*\mathbb{\dot{R}}_{\zeta,\beta}$ (For the existence of such name $\dot{\mathbb{R}}_{\zeta,\beta}$ see for example \cite[Section 5]{MR823775}).
\end{itemize}
\end{convention}
\begin{defn} Let $\kappa$ be a regular cardinal. Suppose $\kappa \leq \alpha < \kappa^{+}$, a \emph{condition at $\alpha$} is a pair $(f_{\alpha},c_{\alpha})$ which is either trivial, i.e. $(f_{\alpha},c_{\alpha}) = (\emptyset,\emptyset)$, or there is $\gamma_{\alpha} < \kappa$ such that
\begin{enumerate}
\item $c_{\alpha}:\gamma_{\alpha} \rightarrow 2$ is such that $C_{\alpha}((f_{\alpha},c_{\alpha}):=\{ \delta < \gamma_{\alpha} \mathrel{|}\allowbreak c_{\alpha}(\delta)=1 \} = c_{\alpha}^{-1}\{1\}$.
\item $f:\max(C_{\alpha}) \rightarrow \alpha$ is an injection and
\item $f_{\alpha}[\max(C_{\alpha})] \subsetequbseteq \max(C_{\alpha})$
\end{enumerate}
\end{defn}
\begin{defn}
Let $\kappa$ be a regular cardinal and $\alpha \in (\kappa,\kappa^{+})$ we also define a function $A$ with domain $[\kappa,\kappa^{+})$ such that for every $\alpha$, $A(\alpha)$ is a $\mathbb{H}_{\alpha}$-name for either $0$ or $1$. We fix a wellorder $\mathcal{W}$ of $H_{\kappa^{+}}$ of order-type $\kappa^{+}$. Let $\beta \in [\kappa,\kappa^{+})$ and assume that $A \mathbin\upharpoonright \beta$ and $\mathbb{H}_{\beta}$ have been defined.
Let $A(\beta)$ be the canonical $\mathbb{H}_{\beta}$-name for either $0$ or $1$ such that for any $\mathbb{H}_{\beta}$-generic $G_{\beta}$, $A(\beta) = 1$ iff $ \beta = {\prec} \gamma, {\prec} \delta, \varepsilon {\subsetequcc} {\subsetequcc}$ \footnote{${\prec} \gamma, {\prec} \delta, \varepsilon {\subsetequcc} {\subsetequcc}$ denotes the G\"odel pairing.}, $\dot{x}$ is the $\gamma^{\text{th}}$-name (in the sense of $\mathcal{W}$) $\mathbb{H}_{\delta}$-nice name for a subset of $\kappa$, $\varepsilon < \kappa$ and $\varepsilon \in \dot{x}^{G_{\beta}}$ \footnote{As $\delta < \beta$, we identify $\dot{x}$ with a $\mathbb{H}_{\beta}$-name using the induction hypothesis that $\mathbb{H}_{\delta} \prec \mathbb{H}_{\beta}$.}
Suppose that $ A\mathbin\upharpoonright \beta $ is defined, we proceed to define $\mathbb{H}_{\beta}$. Suppose $p$ is an $\beta$-sequence such that for each $\alpha < \beta$ we have $p(\alpha) \in \mathbb{H}_{\alpha}$ and suppose that $|\subsetequpp(p)| = | \{\tau < \beta \mathrel{|}\allowbreak p(\tau) \neq 1_{\mathbb{H}_{\tau}}\}| < \kappa$. If $\beta = \alpha +1$ for some $\alpha$, then we require that
\begin{itemize}
\item $p\mathbin\upharpoonright \beta \in \mathbb{H}_{\beta}$ for every $
\beta < \alpha$ and if $\alpha = \beta + 1$, the following holds:
\item $p(\beta)=(f_{\beta},c_{\beta})$ is a condition at $\beta$,
\item if $C_{\beta}\neq \emptyset$, then $p\mathbin\upharpoonright \beta$ decides $A(\beta)=a_{\beta}$,
\item for all $\delta \in C_{\beta}\neq \emptyset$,$p(\otp(f_\beta[\delta]))=a_\beta$,
\item $\gamma^{p} = \subsetequpp(p)\cap \kappa = \gamma_\beta = dom(c_\beta)$ for any $\beta \in C\text{-}\subsetequpp(p)$, where $C\text{-}\subsetequpp(p):=\{\gamma < \beta \mathrel{|}\allowbreak C_\gamma(p(\gamma)\neq \emptyset \}$
\item $\exists \delta^{p}$ such that for all $\beta \in C\text{-}\subsetequpp(p), \max(C_\beta)=\delta^{p}$,
\item $\beta_0 <\beta_1 $ both in $C\text{-}\subsetequpp(p)$, $$f_{\beta_{0}}[\delta^{p}] \triangleleft f_{\beta_1}[\delta^{p}]$$
\footnote{See Convention \ref{IS}} and
$$f_{\beta_1}[\delta^{p}] \subseteqetminus \beta_0 \neq \emptyset$$
For $p$ and $q$ in $\mathbb{H}_{\alpha}$ we let $q\leq p $ iff $q \mathbin\upharpoonright \kappa \leq p \mathbin\upharpoonright \kappa$ and for every $\beta \in [\lambda,\alpha)
$, $q(\beta) \leq p(\beta)$.
\end{itemize}
\end{defn}
We will work with a forcing that is equivalent to $\mathbb{H}_{\beta}$ and is a subset of $ H_{\kappa^{+}}$.
\begin{defn}\label{Forcingdefn}
If $\kappa$ is a regular cardinal and $\pi:\mathbb{H}_{\kappa,\kappa^{+}} \rightarrow H_{\kappa^{+}}$ is such that $\pi(p) = p \mathbin\upharpoonright \subsetequpp(p)$, then we define $\mathbb{P}_{\kappa,\kappa^{+}}:= \rng(\pi)$ and given $s,t \in \mathbb{P}_{\kappa,\kappa^{+}}$ we let $s \leq_{\mathbb{P}_{\kappa,\kappa^{+}}} t $ iff $\pi^{-1}(s) \leq_{\mathbb{H}_{\kappa,\kappa^{+}}} \pi^{-1}(t)$.
\end{defn}
Next we describe how we will iterate the forcing from Definition \label{Forcingdefn}.
\begin{defn}
Let $\alpha$ be an ordinal and $\kappa$ a regular cardinal. We define $\mathbb{P}_{\kappa,\kappa^{+\alpha}} $ as the iteration $ \langle \langle \mathbb{P}_{\kappa,\kappa^{+\tau}} \mathrel{|}\allowbreak \tau \leq \alpha \rangle , \langle \dot{\mathbb{Q}}_{\tau} \mathrel{|}\allowbreak \tau < \alpha \rangle \rangle $ as follows:
\begin{enumerate}
\item If $\tau = \beta+1$ for some $\beta < \tau$ and $\kappa^{+\beta}$ is a regular cardinal. If there exists $\dot{\mathbb{Q}}_{\beta}\subsetequbseteq H_{\kappa^{+\beta+1}}$ such that $\mathbb{P}_{\kappa,\kappa^{+\beta}} \Vdash \dot{\mathbb{Q}}_{\beta}=\mathbb{P}_{\kappa^{+\beta},\kappa^{+\beta+1}}$ we let $\mathbb{P}_{\kappa,\kappa^{+\beta+1}} = \mathbb{P}_{\kappa,\kappa^{+\beta}}*\dot{\mathbb{Q}}_{\beta}$, otherwise we stop the iteration.
\item If $\tau = \beta+1$ for some $\beta < \tau$ and $\kappa^{+\beta}$ is a singular cardinal, we let $\dot{\mathbb{Q}}_{\beta} = \axiomfont{CH}eck{1}$.
\item If $\beta$ is a limit ordinal and $\kappa^{+\beta}$ is a regular cardinal, then $\mathbb{P}_{\beta}$ is the direct limit of $\langle \mathbb{P}_{\kappa,\kappa^{+\beta}} , \dot{\mathbb{Q}}_{\beta}
\mathrel{|}\allowbreak \theta < \tau \rangle $.
\item If $\tau$ is a limit ordinal and $\kappa^{+\beta}$ is singular, then $\mathbb{P}_{\kappa,\kappa^{+\tau}}$ is the inverse limit of $\langle \mathbb{P}_{\kappa,\kappa^{+\theta}} , \dot{\mathbb{Q}}_{\theta}
\mathrel{|}\allowbreak \theta < \tau \rangle $.
\end{enumerate}
\end{defn}
\begin{remark}
Given an ordinal $\alpha$ and a regular cardinal $\kappa$ the forcing $\mathbb{P}_{\kappa,\kappa^{+\alpha}}$ is obtained forcing $\mathbb{P}_{\kappa^{\beta},\kappa^{+\beta+1}} $ for each successor ordinal $\beta < \alpha$ such that $\kappa^{+\beta}$ is a regular cardinal. If $\beta$ is a limit ordinal but not an inaccessible cardinal, then we take inverse limits, if $\kappa$ is an inaccessible cardinal we take direct limits.
\end{remark}
\begin{remark} \label{SeqBijections}
Let $\kappa$ be a regular cardinal and let $G$ be $\mathbb{P}_{\kappa,\kappa^{+}}$-generic. Consider $$f_{\alpha}:= \bigcup \{f \mathrel{|}\allowbreak \exists p ( \alpha \in \dom(p)\wedge p \in G \wedge p(\alpha)= (c,f) ) \}$$ By a standard density argument we have that $f_{\alpha}$ is a bijection from $\kappa$ onto $\alpha$. It also holds that $\alpha \in A $ iff $\{\gamma < \kappa \mathrel{|}\allowbreak \otp(f_{\alpha}[\gamma]) \in A\}$ contains a club and $\alpha \not\in A$ iff $\{\gamma < \kappa \mathrel{|}\allowbreak \otp(f_{\alpha}[\gamma]) \not\in A\}$ contains a club.
\end{remark}
\begin{thm}[\cite{HWW}] \label{HWW} Suppose $\axiomfont{GCH}$ holds and $\kappa$ is a cardinal. Then $\mathbb{P}_{\kappa}$ is a $<\kappa$- directed closed, $\kappa^{+}$-cc forcing such that $|\mathbb{P}|=\kappa$ and for all $G$, $\mathbb{P}$-generic, the following holds in $V[G]$:
\begin{itemize}
\item There is $\vec{M} =\langle M_{\alpha} \mathrel{|}\allowbreak \alpha \leq \kappa^{+} \rangle $ which witnesses $\axiomfont{LCC}(\kappa,\kappa^{+})$,
\item $M_{\kappa}= H_{\kappa}$,
\item $M_{\kappa^{+}}=H_{\kappa^{+}}$,
\item There exists $A \subsetequbseteq \kappa^{+} $ such that for all $ \beta < \kappa^{+}$ we have $( M_{\beta}=L_{\beta}[A])$.
\end{itemize}
\end{thm}
We will need the following facts:
\begin{fact} \label{Baum} \cite[Theorem 2.7]{MR823775} Let $\mathbb{P}_{\alpha}$ be the inverse limit of $\langle \mathbb{P}_\beta, \dot{\mathbb{Q}}_{\beta} \mathrel{|}\allowbreak \beta < \alpha \rangle$. Suppose that $\kappa$ is a regular cardinal and for all $\beta < \alpha$, $$\Vdash_{\mathbb{P}_{\beta}} \dot{\mathbb{Q}}_{\beta} \text{ is } \kappa\text{-directed closed}.$$
Suppose also that all limits are inverse or direct and that if $\beta \leq \alpha$, $\beta $ is a limit ordinal and $\cf(\beta)<\kappa$, then $\mathbb{P}_{\beta}$ is the inverse limit of $\langle \mathbb{P}_{\gamma} \mathrel{|}\allowbreak \gamma < \beta \rangle$. Then $\mathbb{P}_{\alpha}$ is $\kappa$-directed closed.
\end{fact}
\begin{fact} \label{cc} Suppose $\cf(\kappa)>\omega$. If $\mathbb{P}$ is a $\kappa$-cc forcing and $\mathbb{P} \Vdash \dot{\mathbb{Q}}$ is $\kappa$-cc, then $\mathbb{P}*\mathbb{Q}$ is $\kappa$-cc.
\end{fact}
\begin{fact} \label{Htheta} Let $\mathbb{P}$ be a partial order and $\theta$ a regular cardinal. Suppose $\mathbb{P}$ preserves cardinals. If $G$ is $\mathbb{P}$-generic, then $H_{\theta}^{V} = H_{\theta}^{V[G]}$.
\end{fact}
\begin{proof} Let $G$ be $\mathbb{P}$-generic and $w \in H_{\theta}^{V[G]}$. Let $\delta = | \text{trcl}\{w\}|$ and suppose $f: \delta \rightarrow \text{trcl}\{w\}$ is a bijection. Let $ R \subsetequbseteq \delta $ such that $ (x,y) \in R $ if and only if $f(x) \in f(y)$. Then the Mostowski collapse of $(\delta,R)$ is equal to $w$. Since $\mathbb{P}$ is $< \theta$-closed, it follows that $(\delta,R) \in V$ and hence $ w \in V$. Since $\mathbb{P}$ preserves cardinals it follows that $|w|=\delta$ and $w \in H_{\theta}$.
\end{proof}
\begin{fact}
\label{ccHmu} Let $\mu$ be a cardinal. Suppose $\mathbb{P}$ is a forcing that is $\mu^{+}$-cc and $\mathbb{P}\subsetequbseteq H_{\mu^{+}}$. If $G$ is $\mathbb{P}$-generic, then $H_{\mu^{+}}^{V[G]}=H_{\mu^{+}}[G]$.
\end{fact}
\begin{proof} We proceed by $\in$ induction on the elements of $H_{\mu^{+}}^{V[G]}$. Notice that it suffices to prove the result for subsets of $\kappa^{+}$, since every $ x \in H_{\kappa^{+}}^{V[G]}$ is of the form $\trcl(\gamma,R)$ for some $\gamma < \kappa^{+}$ and $R \subsetequbseteq \gamma\times \gamma$. Let $x = \subseteqigma[G] \in H_{\mu^{+}}^{V[G]}$ such that $x \subsetequbseteq \kappa^{+}$. As $\mathbb{P}$ is $\mu^{+}$-cc, there is an ordinal $\gamma$ such that $ x\subsetequbseteq \gamma $ and $1_{\mathbb{P}}\Vdash \subseteqigma \subsetequbseteq \gamma$. Let $\theta = \bigcup\{ A_{\tau}\times\{\tau\} \mathrel{|}\allowbreak \exists p \in \mathbb{P} \exists \xi \in \gamma (p \Vdash \axiomfont{CH}eck{\xi} = \pi ) \wedge \tau \in H_{\mu^{+}} \}$ and each $A_{\tau} \subsetequbseteq \mathbb{P}$ such that:
\begin{enumerate}
\item $A_{\tau}$ is an antichain,
\item $q \in A_{\tau}$ implies $ q \Vdash \tau \in \subseteqigma $,
\item $q \in A_{\tau}$ is maximal with respect to the above two properties.
\end{enumerate}
It follows that $\theta \in H_{\mu^{+}}$ and $\theta[G]=\subseteqigma[G]$.
\end{proof}
\begin{remark}
If $\mathbb{P}$ and $\mu$ satisfy the hypothesis from Fact \ref{ccHmu} and $\subseteqigma$ is a $\mathbb{P}$-name such that $1_{\mathbb{P}} \Vdash \subseteqigma \subsetequbseteq H_{\mu^{+}}$, then using Fact \ref{ccHmu} we can find a $\mathbb{P}$-name $\pi \subsetequbseteq H_{\mu^{+}}$ such that $1_{\mathbb{P}} \Vdash \subseteqigma = \pi$.
\end{remark}
\begin{remark}\label{Inacc}
Given a regular cardinal $\kappa$ and a limit ordinal $\beta$, we have $\cf(\kappa^{+\beta}) = \cf(\beta)$.
Therefore if $\beta < \kappa^{+\beta}$ it follows that $\kappa^{+\beta}$ is singular. On the other hand if $\kappa^{+\beta} = \cf(\kappa^{+\beta}) = \cf(\beta) \leq \beta$, then $\beta$ is a weakly inaccessible cardinal, i.e. a cardinal that is a limit cardinal and regular. Thus $\kappa^{+\beta}$ is regular iff $\beta$ is a weakly inaccessible cardinal.
\end{remark}
\begin{convention} Let $\mathbb{P}$ be a set forcing and $\varphi(\subseteqigma_0,\cdots,\subseteqigma_n)$ a formula in the forcing language. We write $\mathbb{P} \Vdash \varphi(\subseteqigma_0,\cdots, \subseteqigma_n)$ if for all $p \in \mathbb{P}$ we have $p \Vdash \varphi(\subseteqigma_0,\cdots,\subseteqigma_n)$.
\end{convention}
\begin{lemma} Suppose $\axiomfont{GCH} $ holds. Let $\kappa$ be a regular cardinal and $\beta$ an ordinal. Then $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ preserves $\axiomfont{GCH}$, cardinals and cofinalities and if $\kappa^{+\beta}$ is a regular cardinal then there exists $\dot{\mathbb{Q}}_{\beta} \subsetequbseteq H_{\kappa^{+\beta+1}}$ a $\mathbb{P}_{\kappa,\kappa^{+\beta}}$-name such that $\mathbb{P}_{\kappa,\kappa^{+\beta}}\Vdash \mathbb{P}_{\kappa^{+\beta},\kappa^{+\beta+1}}=\dot{\mathbb{Q}}_{\beta}$.
\end{lemma}
\begin{proof} We prove the lemma by induction. Besides the statement of the lemma we carry the following additional induction hypothesis:
\begin{enumerate}[$(1)_\beta$]
\item $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ preserves cardinals and cofinalities,
\item If $\kappa^{+\beta}$ is a regular cardinal and not the successor of a singular cardinal, then $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ is $\kappa^{+\beta}\text{-cc}$ and there exists $\dot{\mathbb{Q}}_{\beta} \subsetequbseteq H_{\kappa^{+\beta+1}}$ such that $\mathbb{P}_{\kappa,\kappa^{+\beta}} \Vdash (\mathbb{P}_{\kappa^{+\beta},\kappa^{+\beta+1}} = \dot{\mathbb{Q}}_{\beta}) $.
\item If $\kappa^{+\beta}$ is a successor of a singular cardinal, then $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ is $\kappa^{+\beta+1}$-cc,
\item If $\kappa^{+\beta}$ is a singular cardinal, then $|\mathbb{P}_{\beta}| \leq \kappa^{+\beta+1}$
\end{enumerate}
For $\beta = 1 $ the lemma follows from Theorem \ref{HWW}. Suppose that $(1)_{\theta}$ to $(4)_{\theta}$ and that the lemma holds for all $\theta < \beta$. We will verify that $(1)_{\beta}$ to $(4)_{\beta}$ and that the lemma holds for $\beta$.
$\blacktriangleright $ Suppose $\beta = \theta + 1 $ for some ordinal $\theta$ such that $\kappa^{+\theta}$ is regular. From $(2)_{\theta}$ in our induction hypothesis, $\mathbb{P}_{\kappa,\kappa^{+\theta}}$ is $\kappa^{+\theta}$-cc, hence by Fact \ref{ccHmu}, for any $G$, $\mathbb{P}_{\kappa,\kappa^{+\theta}}$-generic, we have $H_{\kappa^{+\theta+1}}[G]= H_{\kappa^{+\theta+1}}^{V[G]}$. Thus there exists $\dot{\mathbb{Q}}_{\beta} \subsetequbseteq H_{\kappa^{\theta+1}}$ such that $\mathbb{P}_{\kappa,\kappa^{+\beta}} \Vdash ``\mathbb{P}_{\kappa^{\beta},\kappa^{+\beta+1}} = \dot{\mathbb{Q}}_{\beta}"$.
From our induction hypothesis $\mathbb{P}_{\kappa,\kappa^{+\theta}}$ preserves $\axiomfont{GCH}$, cardinals and cofinalities and from $(2)_{\theta}$ we have that $\mathbb{P}_{\kappa,\kappa^{+\theta}}$ is $\kappa^{+\theta}$-cc. We also have that $$\mathbb{P}_{\kappa,\kappa^{+\theta}} \Vdash ``\mathbb{P}_{\kappa^{+\theta},\kappa^{+\theta+1}} \text{ preserves } \axiomfont{GCH}, \text{ cardinals, cofinalities and it is } \kappa^{+\theta+1}\text{-cc}".$$
Altogether implies that $\mathbb{P}_{\kappa,\kappa^{+\beta}}$, which is $\mathbb{P}_{\kappa,\kappa^{+\theta}}*\dot{\mathbb{Q}}_{\beta}$, preserves $\axiomfont{GCH}$, cardinals and cofinalities. By Fact \ref{cc} we have that $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ is $\kappa^{+\beta}$-cc.
$\blacktriangleright $ Suppose $\kappa^{+\theta}$ is singular and $\beta = \theta + 1 $. By our induction hypothesis $(4)_{\theta}$ we have $|\mathbb{P}_{\kappa^{+\theta}}| \leq \kappa^{+\theta+1}$. As $ \dot{\mathbb{Q}}_{\theta}$ is the trivial forcing, it follows that $|\mathbb{P}_{\kappa,\kappa^{+\beta}}|\leq \kappa^{+\beta}$ and $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ is $\kappa^{+\beta +1}$-cc. Therefore if $G$ is $\mathbb{P}_{\kappa,\kappa^{+\beta+1}}$-generic, $H_{\kappa^{+\beta+1}}[G] = H_{\kappa^{+\beta+1}}^{V[G]}$, hence we can find $\dot{\mathbb{Q}}_{\beta+1} \subsetequbseteq H_{\kappa^{+\beta+1}}$ as sought and $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ preserves $\axiomfont{GCH}$, cardinals and cofinalities.
$\blacktriangleright $ Suppose that $\beta$ is a limit ordinal and $\kappa^{+\beta}$ is a singular cardinal.
From our induction hypothesis we have that for every $\zeta < \beta $ the forcing $\mathbb{P}_{\kappa,\kappa^{+\zeta}}$ preserves cardinals and by Theorem \ref{Baum} $$\mathbb{P}_{\kappa,\kappa^{+\zeta}} \Vdash ``\dot{\mathbb{R}}_{\kappa^{+\zeta},\kappa^{+\beta}} \text{ is } {<}\kappa^{+\tau}\text{-closed}."$$ Therefore all cardinals below $\kappa^{+\beta}$ are preserved. Thus $\kappa^{+\beta}$ remains a cardinal in $V[G_{\beta}]$ and $\cf(\kappa^{+\beta})^{V[G_{\beta}]}=(\cf(\kappa^{+\beta}))^{V[G_\tau]}= (\cf(\kappa^{+\beta}))^V$.
As $ \cf(\kappa^{+\beta})^{+} < \kappa^{+\beta}$, we can fix $\tau < \beta$ such that $\kappa^{+\tau} \geq cf(\kappa^{+\beta})$. From our induction hypothesis we have that $\mathbb{P}_{\kappa,\kappa^{+\tau+1}}$ preserves cardinals. From Theorem \ref{Baum} we have that $\mathbb{P}_{\kappa,\kappa^{+\tau +1}}$ forces $\dot{\mathbb{R}_{\kappa^{+\tau+1},\kappa^{+\beta}}}$ to be ${<}\cf(\kappa^{+\beta})^{+}$-closed. Therefore $((\kappa^{+\beta})^{\cf(\kappa^{+\beta}}))^{V[G_{\tau}]} = ((\kappa^{+\beta})^{\cf(\kappa^{+\beta}}))^{V[G_{\beta}]} $ and $(\kappa^{+\beta+1})^{V[G_\theta]} = (\kappa^{+\beta+1})^{V[G_{\beta}]}$. We have verified above that
\begin{itemize}
\item $\mathbb{P}_{\kappa,\kappa^{+\beta}}\Vdash (\kappa^{+\beta})^{V} \text{is a cardinal}$
\item $\mathbb{P}_{\kappa,\kappa^{+\beta}} \Vdash (\cf(\kappa^{+\beta}))=\cf^{V}(\kappa^{+\beta})$
\item $\mathbb{P}_{\kappa,\kappa^{+\beta}}\Vdash 2^{\kappa^{+\beta}}=\kappa^{+\beta+1}$
\end{itemize}
It is also clear from the above that $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ preserves $\axiomfont{GCH}$, cardinals and cofinalities below $\kappa^{+\beta}$.
From our induction hypothesis $(2)_{\theta}$ it follows that for each $\theta < \beta$ we have $|\dot{\mathbb{Q}}_{\theta}|\leq \kappa^{+\theta+1}$, then using $\axiomfont{GCH}$ it follows that $|\mathbb{P}_{\kappa,\kappa^{+\beta}}| \leq \kappa^{+\beta+1}$ and hence $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ is $\kappa^{+\beta+2}$-cc.
Thus $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ preserves $\axiomfont{GCH}$, cardinals and cofinalities above $\kappa^{+\beta+2}$.
$\blacktriangleright$ If $\kappa^{+\beta}$ is a limit cardinal and regular, then $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ is the direct limit of $\langle \mathbb{P}_{\kappa,\kappa^{+\tau}},\dot{\mathbb{Q}}_{\tau+1} \mathrel{|}\allowbreak \tau<\kappa^{+\beta} \rangle$. From our induction hypothesis $(2)_{\theta}$ for $\theta < \beta$, we have $|\dot{\mathbb{Q}}_{\tau}| \leq \kappa^{+\tau+1}$. Therefore $|\mathbb{P}_{\kappa^{+\beta}}| \leq \kappa^{+\beta}$ and hence $\mathbb{P}_{\kappa^{+\beta}}$ is $\kappa^{+\beta+1}$-cc and preserves $\axiomfont{GCH}$, cardinals and cofinalties at cardinals greater or equal than $\kappa^{+\beta}$. From our induction hypothesis we have that cofinalities cardinals and $\axiomfont{GCH}$ are preserved below $\kappa^{+\beta}$. Hence $\mathbb{P}_{\kappa,\kappa^{+\beta}}$ preserves cofinalities, cardinals and $\axiomfont{GCH}$.
\end{proof}
\begin{comment}
\begin{subclaim} If $G_{0}$ is $\mathbb{P}_{\kappa}$-generic and $G_{1}$ is $\mathbb{Q}^{V[G_{0}]}_{\kappa^{+}}$-generic, then $H_{\lambda^{+}}^{V[G_{0}][G_{1}]}=H_{\lambda^{+}}^{V[G_{0}]}$.
\end{subclaim}
\begin{proof} We have that $H^{V[G_{0}][G_{1}]}_{\lambda^{+}} = L_{\lambda^{+}}[B] $. Since $\mathbb{Q}_{\lambda^{+}}$ is ${<}\lambda^{+}$-closed, it follows that $(B^{<\lambda^{+}} )^{V[G_{0}][G_{1}]} = (B^{<\lambda^{+}})^{V[G_{0}]}$ and hence for every $\gamma < \lambda^{+}$ we have $B\mathbin\upharpoonright \gamma \in N_{\lambda^{+}} = L_{\lambda^{+}}[A]=H^{V[G_{0}]}_{\lambda^{+}}$. This implies that $L_{\lambda^{+}}[B] \subsetequbseteq L_{\lambda^{+}}[A] $, which verifies the subclaim.
\end{proof}
From $H_{\lambda^{+}}^{V[G_{0}][G_{1}]} = H_{\lambda^{+}}^{V[G_{0}]}$, it follows that there is a club $C$ in $\lambda^{+}$ such that $\vec{N} \mathbin\upharpoonright C = \vec{M} \mathbin\upharpoonright C$.
It is clear that if $\alpha \in (\lambda,\lambda^{+}]$ then $\vec{N}^{\frown}\vec{M} \mathbin\upharpoonright_{\lambda^{++} \subseteqetminus \lambda^{+}}$ witnesses $\axiomfont{LCC}(\lambda,\lambda^{++}]$ at $\alpha$. We are left with verifying it for $\alpha \in (\lambda^{+},\lambda^{++}]$. Consider $\vec{\mathfrak{B}}=\langle \mathfrak{B}_{\tau} \mathrel{|}\allowbreak \tau < |\alpha| = \lambda^{+} \rangle$ a continuous chain obtained from the fact that $\vec{M}$ witnesses $\axiomfont{LCC}(\lambda^{+},\lambda^{++}]$ at $\alpha$.
For each $\tau \in \vec{B} $ there is $\beta(\tau) \in \lambda^{+}$ such that $ \clps(B_{\tau}) = M_{\beta(\tau)}$ and $\{\beta(\tau) \mathrel{|}\allowbreak \tau < \lambda^{+}\}$ forms a club in $\lambda^{+}$. Then $D = C \cap \{\beta(\tau) \mathrel{|}\allowbreak \tau < \lambda^{+}\}$ is a club and $\vec{B}\mathbin\upharpoonright \{ \tau \mathrel{|}\allowbreak \beta(\tau) \in D \}$ is the sought chain.
\end{comment}
Lemma \ref{AbstractLimit}, below, will be used in a context where $W_{\tau}= V[G_{\tau}]$ and $G_{\tau}$ is $\mathbb{P}_{\kappa,\kappa^{+\tau}}$-generic.
\begin{lemma}\label{AbstractLimit} Let $\langle W_{\tau} \mathrel{|}\allowbreak \tau \leq \beta \rangle$ be a sequence of transitive proper classes that model $\axiomfont{ZF}c$ and suppose that $\tau_0 < \tau_1 <\beta $ implies $W_{\tau_0} \subsetequbseteq W_{\tau_{1}}$ and $\card^{W_{\tau_{0}}}=\card^{W_{\tau_{1}}}$. Suppose further that the following hold:
\begin{enumerate}
\item for each $\tau < \beta$ the folowing holds in $W_{\tau}$: there exists $A_{\tau} \subsetequbseteq \kappa^{+\tau} $ such that $ \vec{M}^{\tau} = \langle L[A_{\tau}]_{\zeta} \mathrel{|}\allowbreak \zeta < \kappa^{+\tau + 1} \rangle$ witnesses $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\tau})$,
\item For $\tau_0 < \tau_1 < \beta $ we have $H_{\tau_{0}^{+}}^{W_{\tau_0}}=L[A_{\tau_0}]_{\tau_{0}^{+}} = L[A_{\tau_1}]_{\tau_{1}^{+}}=H_{\tau_{1}^{+}}^{W_{\tau_1}}$,
\item for every $\tau < \beta$ we have $H_{\tau}^{W_{\tau}} = H_{\tau}^{W_{\beta}}$ and
\item $$\mathbb{A}:=\bigcup\{ A_\tau \mathbin\upharpoonright (\kappa^{+\tau},\kappa^{+\tau+1}) \mathrel{|}\allowbreak \reg(\kappa^{+\tau}) \wedge \tau < \beta \} \cup \bigcup \{A_\tau \mathbin\upharpoonright (\kappa^{+\tau},\kappa^{+\tau+2}) \mathrel{|}\allowbreak \text{Sing}(\kappa^{+\tau})\}$$ is an element of
$ W_{\beta}$.
\end{enumerate}
Then $\vec{M} = \langle L_{\zeta}[\mathbb{A}]
\mathrel{|}\allowbreak \zeta < \kappa^{+\beta} \rangle $ witnesses $\axiomfont{LCC}_{\reg}(\kappa,
\kappa^{+\beta})$ in $W_{\beta}$. \end{lemma}
\begin{proof} We work in $W_{\beta}$. Let $\alpha \in (\kappa,\kappa^{+\beta})$ such that $|\alpha|$ is a regular cardinal. Let $ \mathbb{S}= \langle L_{\alpha}[\mathbb{A}], \in, (\mathcal{F}_{n})_{n\in\omega} \rangle \in H_{|\alpha|^{+}}$.
We will find $\vec{B}$ that witnesses $\axiomfont{LCC}$ at $\alpha$ for $\mathbb{S}$. There is $\vec{B_{0}} \in W_{\tau}$ where $\kappa^{+\tau} = |\alpha|$, which witnesses $\axiomfont{LCC}$ at $\alpha$ in $W_{\tau}$ with respect to $(\mathcal{F}_{n})_{n \in \omega}$. Since $L_{\tau}[A_{\tau}]=H_{\tau}^{W_{\tau}} = H_{\tau}^{W_{\tau^{+}}}=L_{\tau}[A_{\tau^{+}}]$, it follows that there is a club $C \subsetequbseteq \kappa^{+\tau}$ such that $ \iota \in C $ implies $L_{\iota}[A_{\tau}] = L_{\iota}[\mathbb{A}]$. Thus $\vec{B}= \vec{B_0} \mathbin\upharpoonright C $ will witness $\axiomfont{LCC}$ at $\alpha$ with respect to $(\mathcal{F}_n)_{n \in \omega}$ in $W_{\beta}$.
\end{proof}
\begin{lemma}\label{Succ} Let $\kappa$ be a regular cardinal and $\beta$ an ordinal. Suppose that there exists $\vec{M} = \langle L_{\alpha}[A] \mathrel{|}\allowbreak \kappa \leq \alpha < \kappa^{+\beta} \rangle$ which witnesses $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta})$ and $\Psi(\vec{M},\kappa,\kappa^{+\beta})$ holds. If $\kappa^{+\beta}$ is a regular cardinal, then $\mathbb{P}_{\kappa^{+\beta},\kappa^{+\beta+1}} \Vdash \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta})$ and if $ \kappa^{+\beta}$ is a singular cardinal then $\mathbb{P}_{\kappa^{+\beta+1},\kappa^{+\beta+2}} \Vdash \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta+1}) \wedge \Psi(\vec{M},\kappa,\kappa^{+\beta}) $.
\end{lemma}
\begin{proof} We split the proof into two cases depending on whether $\kappa^{+\beta}$ is regular or not.
$\blacktriangleright$ Suppose $\kappa^{+\beta}$ is a regular cardinal. Let $B \subsetequbseteq (\kappa^{\beta},\kappa^{\beta+1})$ such that $\langle L_{\alpha}[B] \mathrel{|}\allowbreak \alpha < \kappa^{+\beta+1} \rangle $ witnesses $\axiomfont{LCC}_{\reg}(\kappa^{\beta},\kappa^{+\beta+1})$. Since $\mathbb{P}_{\kappa^{+\beta},\kappa^{+\beta+1}} $ is $< \kappa^{+\beta}$-closed, it follows that for $G$, $\mathbb{P}_{\kappa^{+\beta},\kappa^{+\beta+1}}$-generic we have, by Fact \ref{Htheta} that $(H_{\kappa^{+\beta}})^{V[G]} = (H_{\kappa^{+\beta}})^{V}$.
We then let $\vec{N}= \langle L_{\alpha}[C] \mathrel{|}\allowbreak \alpha < \kappa^{+\beta+1} \rangle $ where $C = (A \cap \kappa^{+\beta}) \cup (B \subseteqetminus \kappa^{+\beta})$, witness $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta+1})$.
$\blacktriangleright$ Suppose $\kappa^{+\beta}$ is a singular cardinal. Let $G$ be $\mathbb{P}_{\kappa^{+\beta+1},\kappa^{+\beta+2}}$-generic over $V$. Let $G$ be $\mathbb{P}$-generic, from Fact \ref{Htheta} it follows that for every cardinal $\theta < \kappa^{+\beta+1}$ we have $H_{\theta}^{V} = H_{\theta}^{V[G]}$.
Let $ B \subsetequbseteq \kappa^{+\beta+2}$ be such that $\vec{N}= \langle L_{\gamma}[B] \mathrel{|}\allowbreak \gamma < \kappa^{+\beta+2} \rangle$ witnesses $\axiomfont{LCC}(\kappa^{+\beta+1},\kappa^{+\beta+2}) $ in $V[G]$. Let $ C:= A \cup (B \subseteqetminus \kappa^{+\beta})$. Then $\vec{W}:= \langle L_{\alpha}[C] \mathrel{|}\allowbreak \alpha < \kappa^{+\beta+2} \rangle $ witnesses $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta+2})$.
\end{proof}
\begin{thmc} \label{succSing} If $\axiomfont{GCH}$ holds and $\kappa$ is a regular cardinal and $\alpha$ is an ordinal, then there is a set forcing $\mathbb{P}$ which is $<\kappa$-directed closed and $\kappa^{+\alpha+1}$-cc, $\axiomfont{GCH}$ preserving such that in $V^{\mathbb{P}}$ there is a filtration $\langle M_{\alpha} \mathrel{|}\allowbreak \alpha < \kappa^{+\alpha} \rangle $ such that $\Psi(\vec{M},\kappa,\kappa^{+\beta+1})$ holds and $\langle M_{\alpha}\mathrel{|}\allowbreak \alpha < \kappa^{+\alpha}\rangle \models \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\alpha}) $
\end{thmc}
\begin{proof} We prove by induction that the following hold:
\begin{enumerate}
\item for each $\tau < \beta$ there exists $A_{\tau} \subsetequbseteq \kappa^{+\tau} \in V[G_{\tau}]$ such that, in $V[G_{\tau}]$ we have that $ \vec{M}^{\tau} = \langle L[A_{\tau}]_{\zeta} \mathrel{|}\allowbreak \zeta < \kappa^{+\tau + 1} \rangle \models \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\tau})$ and $\Psi(\vec{M},\kappa,\kappa^{+\tau})$
\item For $\tau_0 < \tau_1 < \beta $ we have $H_{\tau_{0}^{+}}^{V[G_{\tau}]}=L_{\tau_{0}^{+}}[A_{\tau_0}] = L_{\tau_{0}^{+}}[A_{\tau_1}]=H_{\tau_{1}^{+}}^{V[G_{\tau}] }$,
\item for every $\tau < \beta$ we have $H_{\tau}^{V[G_{\tau}]} = H_{\tau}^{V[G_{\beta}]}$ and
\item $$\mathbb{A}:=\bigcup\{ A_\tau \mathbin\upharpoonright (\kappa^{+\tau},\kappa^{+\tau+1}) \mathrel{|}\allowbreak \reg(\kappa^{+\tau}) \wedge \tau < \beta \} \cup \bigcup \{A_\tau \mathbin\upharpoonright (\kappa^{+\tau},\kappa^{+\tau+2}) \mathrel{|}\allowbreak \text{Sing}(\kappa^{+\tau})\}$$ is an element of
$ V[G_{\beta}]$.
\end{enumerate}
If $\beta=1$ the lemma follows from Theorem \ref{HWW}. If $\beta = \theta+1$, from our induction hypothesis and Lemma \ref{Succ} it follows that $\mathbb{P}_{\kappa,\kappa^{+\beta+}} \Vdash \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\beta})$.
If $\beta$ is a limit ordinal all we need to verify is that \begin{equation}\label{eq1} \begin{gathered} H_{\kappa^{+\tau+1}}^{V_{\tau}} = H_{\kappa^{+\tau+1}}^{V_{\beta}} \end{gathered} \end{equation} for every $\tau < \beta$ in order to apply Lemma \ref{AbstractLimit}. Since for each $\tau < \beta$ we have that $\mathbb{P}_{\kappa,\kappa^{+\zeta}} \Vdash ``\dot{\mathbb{R}}_{\kappa^{+\zeta}} \text{ is } <\kappa^{+\zeta}\text{-closed}"$ and $\mathbb{P}_{\kappa,\kappa^{+\zeta}}$ preserve cardinals and cofinalities \eqref{eq1} follows from Fact \ref{Htheta}
\begin{comment}Suppose that $\kappa^{\beta}$ is a limit cardinal. Let $B = \bigcup_{\theta < \beta} A_{\theta} \mathbin\upharpoonright (\kappa^{\theta},\kappa^{+\theta + 1})$, where $A_{\theta} \subsetequbseteq \kappa^{+\theta+1}$ is the predicate witnessing $\axiomfont{LCC}(\kappa^{+\theta},\kappa^{\theta+1})$. We will prove that $\vec{M} = \langle L_{\alpha}[A] \mathrel{|}\allowbreak \alpha < \theta \rangle \models \axiomfont{LCC}_{\reg}(\kappa, \kappa^{+\theta})$.
We have that $\mathbb{P}_{\kappa,\kappa^{\zeta}} \Vdash ``\dot{\mathbb{R}}_{\kappa^{\zeta}} \text{ is } <\kappa^{+\zeta}\text{-closed}"$ and $\mathbb{P}_{\kappa,\kappa^{\zeta}}$ preserve cardinals and cofinalities. Therefore for any $\beta < \theta $ such that $\kappa^{+\beta}$ is regular we have $(H_{\kappa^{+\beta}})^{V^{\mathbb{P}_{\kappa,\kappa^{+\theta}}}} = (H_{\kappa^{+\theta}})^{V^{\mathbb{P}_{\theta}}}$
$(H_{\kappa^{+\beta+1}})^{V^{\mathbb{P}_{\kappa,\kappa^{+\beta}}}} = (H_{\kappa^{+\beta+1}})^{V^{\mathbb{P}_{\beta+1}}}.$
\end{comment}
\end{proof}
\subseteqection{Applciations}
In this section we show that the iteration of the forcing from \cite{HWW} can replace some uses of the main forcing in \cite{FHl}.
\begin{comment}
\begin{lemma}\label{Delta1} Let $\kappa$ be a regular cardinal and $\alpha\in \ord$. Suppose $\axiomfont{GCH}$ holds in $V$. Let $\mu $ be a regular cardinal such that $\mu^{+}\leq \kappa$. Then $\mathbb{P}_{\mu,\kappa^{+}}$ forces that there exists $\vec{M}= \langle M_{\alpha} \mathrel{|}\allowbreak \alpha < \kappa^{+} \rangle$, a filtration, such that \begin{enumerate}
\item $H_{\kappa}=M_{\kappa}$, $H_{\kappa^{+}}=M_{\kappa^{+}}$
\item there is $ A \subsetequbseteq \kappa^{+}$ such that for all $\alpha < \kappa^{+}$ we have $ M_{\alpha}= L_{\alpha}[A]$
\item $\langle M,\in,\vec{M} \rangle \models \axiomfont{LCC}(\kappa,\kappa^{+})$
there is a well order of $H_{\kappa^{+}}$ that is $\Delta_{1}$ on a parameter $a \subsetequbseteq \kappa$
\end{enumerate}
\end{lemma}
\begin{proof} Let $\vec{M}$ be given by Theorem~C. We know by Theorem~C that (1) and (2) hold for $\vec{M}$. Let us verify that (3) also holds. Let $ \langle f_{\beta} \mathrel{|}\allowbreak \kappa < \beta < \kappa^{+} \rangle $ be the sequence of bijections obtained by forcing with $\mathbb{P}_{\kappa,\kappa^{+}}$. Then we have that $ \beta \in A$ iff $\{\gamma < \alpha \mathrel{|}\allowbreak \otp(f_{\beta}[\delta]) \in A \}$ contains a club and $\beta \not\in A $ iff $ \{ \gamma < \alpha \mathrel{|}\allowbreak \otp(f_{\beta}[\gamma]) \not\in A \} $ contains a club.
\end{proof}
\end{comment}
\begin{defn} Let $\mu, A, \vec{F}$ be sets. We say that $\Xi(A,\mu,\vec{f})$ holds iff $\mu$ is a regular cardinal, $A $ is a function such that $A:\mu^{+}\rightarrow 2 $ and $\vec{f}$ is a sequence of bijections $\langle f_{\beta} \mathrel{|}\allowbreak \kappa \leq \beta < \mu^{+} \rangle $ such that for each $\beta < \mu$, $f_{\beta}:\mu \rightarrow \beta$, and the following hold:
\begin{itemize}
\item $H_{\mu^{+}} = L_{\mu^{+}}[A]$,
\item $ (\xi,1)\in A \subseteqetminus \mu \leftrightarrow \exists C ( C \text{ is a club } \wedge C \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f_\xi[\gamma])\in A \})$,
\item $ (\xi,1) \in A \subseteqetminus \mu \leftrightarrow \exists C ( C \text{ is a club } \wedge C \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f_\xi[\gamma])\in A \})$.
\end{itemize}
\end{defn}
\begin{lemma}\label{complexity} Let $\mu$ be a regular cardinal, $A $ a function $A:\mu^{+}\rightarrow 2 $ and $\vec{f}=\langle f_{\beta} \mathrel{|}\allowbreak \kappa \leq \beta < \mu^{+} \rangle $ a sequence of bijections such that $f_{\beta}:\mu \rightarrow \beta$ for each $\beta < \mu^{+}$. Suppose $\Xi(A,\mu,\vec{f})$ holds. Given $\zeta \in \mu^{+}\subseteqetminus \mu$, the following are equivalent:
\begin{enumerate}
\item $\zeta \in A \subseteqetminus \mu $,
\item $\exists f \exists C ( f:\mu \rightarrow \zeta \wedge f \text{ is a bijection } \wedge C \text{ is a club } \wedge C \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f[\gamma])\in A \}$,
\item $ \forall f \exists C ( f:\mu \rightarrow \zeta \wedge f \text{ is a bijection } \wedge C \text{ is a club } \wedge C \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f[\gamma])\in A \}$,
\item $\forall f \forall C ( f:\mu \rightarrow \zeta \wedge f \text{ is a bijection } \wedge C \text{ is a club } \rightarrow C \not\subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f[\gamma])\not\in A \}$.
\end{enumerate} Moreover $\xi \in A \subseteqetminus \mu$ is $\Delta_1(\{A \mathbin\upharpoonright \mu,\xi\})$ over $H_{\mu^{+}}$.
\end{lemma}
\begin{proof} Let $\zeta \in \mu^{+}\subseteqetminus \mu$. As $\mu, A, \vec{f}$ witness the condensation axiom, it follows that
$\zeta \in A \subseteqetminus \mu \leftrightarrow \exists C ( C \text{ is a club } \wedge C \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f_\zeta[\gamma])\in A \}$.
Let $f$ be a bijection from $\mu$ onto $\zeta$. Then from the regularity of $\mu$ it follows that there exists $ ( C \text{ a club }$ such that $D \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f_\zeta[\gamma])\in A \}$ iff there exists $D $ a club such that $ C \subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f_\zeta[\gamma])\in A \}$.
Thus (1) (2) and (3) are equivalent.
Let us verify that (4) is equivalent to (1). Since $ \mu,A,\vec{f}$ witness the condensation axiom, it follows that $ \zeta \not\in A $ iff there exists a club $C$ such that $ C \subsetequbseteq \{ \gamma < \mu \mathrel{|}\allowbreak \otp(f_{\zeta}[\gamma]) \not\in A \}$. Let $f$ be a bijection $f:\mu \rightarrow \zeta$. From the regularity of $\mu$ it follows that there exists a club $C$ such that $ C \subsetequbseteq \{ \gamma < \mu \mathrel{|}\allowbreak \otp(f_{\zeta}[\gamma]) \not\in A \}$ iff there exists a club $D$ such that $ D \subsetequbseteq \{ \gamma < \mu \mathrel{|}\allowbreak \otp(f_{\zeta}[\gamma]) \not\in A \}$. Thus (1) is equivalent to (4).
The moreover part follows from the equivalence between (1),(2) and (4), and the fact that $ C \not\subsetequbseteq \{\gamma < \mu \mathrel{|}\allowbreak \otp(f[\gamma])\not\in A \}$ is equivalent to $ \forall h \forall \gamma \forall \beta ( (\gamma \in C \wedge h:\beta \rightarrow f[\gamma] \wedge h \text{ is an isomorphis}) \rightarrow (\beta,0) \in A)$.
\end{proof}
Our next result, Theorem~D, is an adaptation of \cite[Theorem~39]{FHl}.
\begin{thmd} Suppose that $\theta$ is an ordinal, $\kappa$ is a regular cardinal and $\kappa^{+\theta}$ is a regular cardinal. Then $\mathbb{P}_{\kappa,\kappa^{+\theta+1}}$ forces that $\axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\theta+1}) $ holds and that there exists a well order of $H_{\kappa^{+}}$ that is $\Delta_1$ definable over $H_{\kappa^{+}}$ in a parameter $a \subsetequbseteq \kappa^{+\theta}$.
\end{thmd}
\begin{proof} We have that $\mathbb{P}_{\kappa,\kappa^{+\theta+1}}$ forces that there exists $\vec{M}= \langle M_{\alpha} \mathrel{|}\allowbreak \alpha < \kappa^{+\theta+1} \rangle$, a filtration, such that \begin{enumerate}
\item $H_{\kappa^{+\beta}}=M_{\kappa^{+\beta}}$ for every $\beta \leq \theta+1$,
\item there exists $ A \subsetequbseteq \kappa^{+\theta+1}$ such that for all $\alpha < \kappa^{+\theta+1}$ we have $ M_{\alpha}= L_{\alpha}[A]$
\item $\langle M,\in,\vec{M} \rangle \models \axiomfont{LCC}_{\reg}(\kappa,\kappa^{+\theta+1})$
\end{enumerate}
Let $ \langle f_{\beta} \mathrel{|}\allowbreak \kappa < \beta < \kappa^{+} \rangle $ be the sequence of bijections obtained by forcing with $\mathbb{P}_{\kappa,\kappa^{+}}$, see remark \ref{SeqBijections}. Then we have that $ \beta \in A$ iff $\{\gamma < \kappa^{+\theta} \mathrel{|}\allowbreak \otp(f_{\beta}[\delta]) \in A \}$ contains a club and $\beta \not\in A $ iff $ \{ \gamma < \kappa^{+\theta} \mathrel{|}\allowbreak \otp(f_{\beta}[\gamma]) \not\in A \} $ contains a club.
Therefore by Lemma \ref{complexity} we can define $A \cap \kappa^{+\theta+1}$ in $H_{\kappa^{+\theta+1}}$ using $A \cap \kappa^{+\theta}$ with a $\Delta_1$ formula. The concatenation of the definition of $A$ with the $\Delta_1$ well order of $L_{\kappa^{+\theta+1}}[A]$ gives the $\Delta_1$ well order we sought.
\end{proof}
\begin{cord} Suppose that $\theta$ is an ordinal, $\kappa$ is a regular cardinal. Then $\mathbb{P}_{\kappa,\kappa^{+\theta+1}}$ forces that for every $S \subsetequbseteq \kappa$ stationary we have $\Dl^{*}_{S}(\Pi^{1}_{2})$ and in particular $\diamondsuit(S)$.
\end{cord}
\begin{proof} Follows from Theorem~D and \cite[Theorem~2.24]{FMR}.
\end{proof}
\subseteqection{acknowledgments}
The author is greatful to Assaf Rinot and Miguel Moreno for several discussions on local club condensation.
The author thanks Liuzhen Wu and Peter Holy for discussions on how to force local club condensation, and Farmer Schlutzenberg and Martin Zeman for discussions on condensation properties of extender models.
\end{document} |
\begin{document}
\title{Digital nets in dimension two with the optimal order of $L_p$ discrepancy}
\begin{abstract}
We study the $L_p$ discrepancy of two-dimensional digital nets for finite $p$. In the year 2001 Larcher and Pillichshammer identified a class of digital nets for which the symmetrized version in the sense of Davenport has $L_2$ discrepancy of the order $\sqrt{\log N}/N$, which is best possible due to the celebrated result of Roth. However, it remained open whether this discrepancy bound also holds for the original digital nets without any modification.
In the present paper we identify nets from the above mentioned class for which the symmetrization is not necessary in order to achieve the optimal order of $L_p$ discrepancy for all $p \in [1,\infty)$.
Our findings are in the spirit of a paper by Bilyk from 2013, who considered the $L_2$ discrepancy of lattices consisting of the elements $(k/N,\{k \alpha\})$ for $k=0,1,\ldots,N-1$, and who gave Diophantine properties of $\alpha$ which guarantee the optimal order of $L_2$ discrepancy.
\end{abstract}
\centerline{\begin{minipage}[hc]{130mm}{
{\em Keywords:} $L_p$ discrepancy, digital nets, Hammersley net\\
{\em MSC 2010:} 11K06, 11K38}
\end{minipage}}
\allowdisplaybreaks
\section{Introduction}
Discrepancy is a measure for the irregularities of point distributions in the unit interval (see, e.g., \cite{kuinie}).
Here we study point sets $\mathcal{P}$ with $N$ elements in the two-dimensional unit interval $[0,1)^2$. We define
the {\it discrepancy function} of such a point set by
$$ \Delta_{\mathcal{P}}(\boldsymbol{t})=\frac{1}{N}\sum_{\boldsymbol{z}\in\mathcal{P}}\boldsymbol{1}_{[\boldsymbol{z}ero,\boldsymbol{t})}(\boldsymbol{z})-t_1t_2, $$
where for $\boldsymbol{t}=(t_1,t_2)\in [0,1]^2$ we set $[\boldsymbol{z}ero,\boldsymbol{t})=[0,t_1)\times [0,t_2)$ with area $t_1t_2$
and denote by $\boldsymbol{1}_{[\boldsymbol{z}ero,\boldsymbol{t})}$ the indicator function of this interval. The {\it $L_p$ discrepancy} for $p\in [1,\infty)$ of $\mathcal{P}$ is given by
$$ L_{p}(\mathcal{P}):=\|\Delta_{\mathcal{P}}\|_{L_{p}([0,1]^2)}=\left(\int_{[0,1]^2}|\Delta_{\mathcal{P}}(\boldsymbol{t})|^p\,\mathrm{d} \boldsymbol{t}\right)^{\frac{1}{p}} $$
and the {\it star discrepancy} or {\it $L_{\infty}$ discrepancy} of $\mathcal{P}$ is defined as
$$ L_{\infty}(\mathcal{P}):=\|\Delta_{\mathcal{P}}\|_{L_{\infty}([0,1]^2)}=\sup_{\boldsymbol{t} \in [0,1]^2}|\Delta_{\mathcal{P}}(\boldsymbol{t})|. $$
The $L_p$ discrepancy is a quantitative measure for the irregularity of distribution of a point set. Furthermore, it is intimately related to the worst-case integration error of quasi-Monte Carlo rules; see \cite{DP10,kuinie, LP14,Nied92}.
It is well known that for every $p\in [1,\infty)$ we have\footnote{Throughout this paper, for functions $f,g:\mathbb{N} \rightarrow \mathbb{R}^+$, we write $g(N) \lesssim f(N)$,
if there exists a $C>0$ such that $g(N) \le C f(N)$ with a positive constant $C$ that is independent of $N$. Likewise, we write $g(N) \gtrsim f(N)$ if $g(N) \geq C f(N)$. Further, we write $f(N) \asymp g(N)$ if the relations $g(N) \lesssim f(N)$ and $g(N) \gtrsim f(N)$ hold simultaneously.}
\begin{equation} \label{roth}
L_p(\mathcal{P}) \gtrsim_p \frac{\sqrt{\log{N}}}{N},
\end{equation}
for every $N \ge 2$ and every $N$-element point set $\mathcal{P}$ in $[0,1)^2$. Here $\log$ denotes the natural logarithm. This was first shown by Roth \cite{Roth2} for $p = 2$ and hence for all $p \in [2,\infty]$ and later by
Schmidt \cite{schX} for all $p\in(1,2)$. The case $p=1$ was added by Hal\'{a}sz \cite{hala}. For the star discrepancy we have according to Schmidt~\cite{Schm72distrib} that
\begin{equation} \label{schmidt}
L_{\infty}(\mathcal{P}) \gtrsim \frac{\log{N}}{N},
\end{equation}
for every $N \ge 2$ and every $N$-element point set $\mathcal{P}$ in $[0,1)^2$.
\paragraph{Irrational lattices.}
It is well-known, that the lower bounds in \eqref{roth} and \eqref{schmidt} are best possible in the order of magnitude in $N$. For example, when the irrational number $\alpha=[a_0;a_1,a_2,\ldots]$ has bounded partial quotients in it's continued fraction expansion, then the lattice $\mathcal{P}_{\alpha}$ consisting of the points $(k/N,\{k \alpha\})$ for $k=0,1,\ldots,N-1$, where $\{\cdot\}$ denotes reduction modulo one, has optimal order of star discrepancy in the sense of \eqref{schmidt} (see, e.g., \cite{lerch} or \cite[Corollary~3.5 in combination with Lemma~3.7]{Nied92}). This is, in this generality, not true anymore when, e.g., the $L_2$ discrepancy is considered. However, in 1956 Davenport~\cite{daven} showed that the symmetrized version $\mathcal{P}_{\alpha}^{{\rm sym}}:=\mathcal{P}_{\alpha}\cup \mathcal{P}_{-\alpha}$ of $\mathcal{P}_{\alpha}$ consisting of $2N$ points has $L_2$ discrepancy of the order $\sqrt{\log N}/N$ which is optimal with respect to \eqref{roth}.
Later Bilyk~\cite{bil} introduced a further condition on $\alpha$ which guarantees the optimal order of $L_2$ discrepancy without the process of symmetrization. If and only if the bounded partial quotients satisfy $|\sum_{k=0}^{N-1} (-1)^k a_k| \lesssim_{\alpha} \sqrt{n}$, then $L_2(\mathcal{P}_{\alpha}) \asymp_{\alpha} \sqrt{\log N}/N$.
\paragraph{Digital nets.} In this paper we study analog questions for digital nets over $\mathbb{Z}_2$, which are an important class of point sets with low star discrepancy. Since we only deal with digital nets over $\mathbb{Z}_2$ and in dimension 2 we restrict the necessary definitions to this case. For the general setting we refer to the books of Niederreiter~\cite{Nied92} (see also \cite{Nied87}), of Dick and Pillichshammer~\cite{DP10}, or of Leobacher and Pillichshammer~\cite{LP14}.
Let $n\in \mathbb{N}$ and let $\mathbb{Z}_2$ be the finite field of order 2, which we identify with the set $\{0,1\}$ equipped with arithmetic operations modulo 2. A two-dimensional digital net over $\mathbb{Z}_2$ is a point set $\{\boldsymbol{x}_0,\ldots, \boldsymbol{x}_{2^n-1}\}$ in $[0,1)^2$, which is generated by two $n\times n$ matrices over $\mathbb{Z}_2$. The procedure is as follows.
\begin{enumerate}
\item Choose two $n \times n$ matrices $C_1$ and $C_2$ with entries from $\mathbb{Z}_2$.
\item For $r\in\{0,1,\dots,2^n-1\}$ let $r=r_0+2r_1 +\cdots +2^{n-1}r_{n-1}$ with $r_i\in\{0,1\}$ for all $i\in\{0,\dots,n-1\}$ be the dyadic expansion of $r$, and set $\vec{r}=(r_0,\ldots,r_{n-1})^{\top}\in \mathbb{Z}_2^n$.
\item For $j=1,2$ compute $C_j \vec{r}=:(y_{r,1}^{(j)},\ldots ,y_{r,n}^{(j)})^{\top}\in \mathbb{Z}_2^n$, where all arithmetic operations are over $\mathbb{Z}_2$.
\item For $j=1,2$ compute $x_r^{(j)}=\frac{y_{r,1}^{(j)}}{2}+\cdots +\frac{y_{r,n}^{(j)}}{2^n}$ and set $\boldsymbol{x}_{r}=(x_r^{(1)},x_{r}^{(2)})\in [0,1)^2$.
\item Set $\mathcal{P}:=\{\boldsymbol{x}_0,\dots,\boldsymbol{x}_{2^n-1}\}$. We call $\mathcal{P}$ a {\it digital net over $\mathbb{Z}_2$} generated by $C_1$ and $C_2$.
\end{enumerate}
One of the most well-known digital nets is the {\it 2-dimensional Hammersley net $\mathcal{P}^{{\rm Ham}}$ in base 2} which is generated by the matrices
$$C_1 = \left ( \begin{array}{llcll}
0 & 0 & \cdots & 0 & 1\\
0 & 0 & \cdots & 1 & 0 \\
\multicolumn{5}{c}\dotfill\\
0 & 1 & \cdots & 0 & 0 \\
1 & 0 & \cdots & 0 & 0
\end{array} \right ) \ \ \mbox{ and } \ \
C_2 = \left ( \begin{array}{llcll}
1 & 0 & \cdots & 0 & 0\\
0 & 1 & \cdots & 0 & 0 \\
\multicolumn{5}{c}\dotfill\\
0 & 0 & \cdots & 1 & 0 \\
0 & 0 & \cdots & 0 & 1
\end{array} \right ).$$
Due to the choice of $C_1$ the first coordinates of the elements of the Hammersley net are $x_r^{(1)}=r/2^n$ for $r=0,1,\ldots,2^n -1$.
\paragraph{$(0,n,2)$-nets in base 2.}
A point set $\mathcal{P}$ consisting of $2^n$ elements in $[0,1)^2$ is called a {\it $(0,n,2)$-net in base 2}, if every dyadic box
$$\left[\frac{m_1}{2^{j_1}},\frac{m_1+1}{2^{j_1}}\right) \times \left[\frac{m_2}{2^{j_2}},\frac{m_2+1}{2^{j_2}}\right),$$
where $j_1,j_2\in\mathbb{N}_0$ and $m_1\in\{0,1,\dots,2^{j_1}-1\}$ and $m_2\in\{0,1,\dots,2^{j_2}-1\}$ with volume $2^{-n}$, i.e. with $j_1+j_2=n$, contains exactly one element of $\mathcal{P}$.
It is well known that a digital net over $\mathbb{Z}_2$ is a $(0,n,2)$-net in base 2 if and only if the following condition holds: For every choice of integers $d_1,d_2\in \mathbb{N}_0$ with $d_1+d_2=n$ the first $d_1$ rows of $C_1$ and the first $d_2$ rows of $C_2$ are linearly independent.
Every digital $(0,n,2)$-net achieves the optimal order of star discrepancy in the sense of \eqref{schmidt}, whereas there exist nets which do not have the optimal order of $L_p$ discrepancy for finite $p$. One example is the Hammersley net as defined above for which we have (see \cite{FauPil,Lar,Pill}) $$L_p(\mathcal{P}^{{\rm Ham}})=\left(\left(\frac{n}{8 \cdot 2^n}\right)^p+O(n^{p-1})\right)^{1/p} \ \ \mbox{for all $p\in [1,\infty)$}$$ and $$L_{\infty}(\mathcal{P}^{{\rm Ham}})=\frac{1}{2^n} \left(\frac{n}{3}+\frac{13}{9}-(-1)^n \frac{4}{9 \cdot 2^n}\right).$$
\paragraph{Symmetrized nets.}
Motivated by the results of Davenport for irrational lattices, Larcher and Pillichshammer~\cite{lp01} studied the symmetrization of digital nets. Let $\boldsymbol{x}_r=(x_r,y_r)$ for $r=0,1,\ldots,2^n-1$ be the elements of a digital net generated by the matrices $$C_1 = \left ( \begin{array}{llcll}
0 & 0 & \cdots & 0 & 1\\
0 & 0 & \cdots & 1 & 0 \\
\multicolumn{5}{c}\dotfill\\
0 & 1 & \cdots & 0 & 0 \\
1 & 0 & \cdots & 0 & 0
\end{array} \right )\ \ \mbox{ and }\ \ C_2 = \left ( \begin{array}{llcll}
1 & a_{1,2} & \cdots & a_{1,n-1} & a_{1,n}\\
0 & 1 & \cdots & a_{2,n-1} & a_{2,n} \\
\multicolumn{5}{c}\dotfill\\
0 & 0 & \cdots & 1 & a_{n-1,n} \\
0 & 0 & \cdots & 0 & 1
\end{array} \right ),
$$
with entries $a_{j,k} \in \mathbb{Z}_2$ for $1 \le j <k \le n$. The matrix $C_2$ is a so-called ``{\it non-singular upper triangular (NUT) matrix}''. Then the {\it symmetrized net} $\mathcal{P}^{{\rm sym}}$ consisting of $(x_r,y_r)$ and $(x_r,1-y_r)$ for $r=0,1,\ldots,2^n-1$ has $L_2$ discrepancy of optimal order $$L_2(\mathcal{P}^{{\rm sym}}) \asymp \frac{\sqrt{n}}{2^{n+1}} \ \ \ \mbox{for every $n \in \mathbb{N}$.}$$
In the present paper we show in the spirit of the paper of Bilyk~\cite{bil} that there are NUT matrices $C_2$ such that symmetrization is not required in order to achieve the optimal order of $L_2$ discrepancy. Or result we be true for the $L_p$ discrepancy for all finite $p$ and not only for the $L_2$ case.
\section{The result}
The central aim of this paper is to provide conditions on the generating matrices $C_1,C_2$ which lead to the optimal order of $L_p$ discrepancy of the corresponding nets. We do so for a class of nets which are generated by $n\times n$ matrices over $\mathbb{Z}_2$ of the following form:
\begin{equation} \label{matrixa} C_1=
\begin{pmatrix}
0 & 0 & \cdots & 0 & 1\\
0 & 0 & \cdots & 1 & 0 \\
\multicolumn{5}{c}\dotfill\\
0 & 1 & \cdots & 0 & 0 \\
1 & 0 & \cdots & 0 & 0
\end{pmatrix}
\end{equation}
and a NUT matrix of the special form
\begin{equation} C_2=
\begin{pmatrix}
1 & a_{1} & a_{1} & \cdots & a_{1} & a_{1} & a_{1} \\
0 & 1 & a_{2} & \cdots & a_{2} & a_{2} & a_{2} \\
0 & 0 & 1 & \cdots & a_{3} & a_{3} & a_{3} \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \\
0 & 0 & 0 & \cdots & 1 & a_{n-2} & a_{n-2} \\
0 & 0 & 0 & \cdots & 0 & 1 & a_{n-1} \\
0 & 0 & 0 & \cdots & 0 & 0 & 1
\end{pmatrix},
\end{equation}
where $a_i\in \mathbb{Z}_2$ for all $i\in\{1,\dots,n-1\}$. We study the $L_p$ discrepancy of the digital net $\mathcal{P}_{\boldsymbol{a}}$ generated by $C_1$ and $C_2$, where $\boldsymbol{a}=(a_1,\dots,a_{n-1})\in \mathbb{Z}_2^{n-1}$. The set $\mathcal{P}_{\boldsymbol{a}}$ can be written as
\begin{equation}\label{darstPa}
\mathcal{P}_{\boldsymbol{a}}=\left\{\bigg(\frac{t_n}{2}+\dots+\frac{t_1}{2^n},\frac{b_1}{2}+\dots+\frac{b_n}{2^n}\bigg):t_1,\dots, t_n \in\{0,1\}\right\},
\end{equation}
where $b_k=t_k\oplus a_{k}(t_{k+1}\oplus \dots \oplus t_n)$ for $k\in\{1,\dots,n-1\}$ and $b_n=t_n$. The operation $\oplus$ denotes addition modulo 2.
The following result states that the order of the $L_p$ discrepancy of the digital nets $\mathcal{P}_{\boldsymbol{a}}$ is determined by the number of zero elements in $\boldsymbol{a}$.
\begin{theorem} \label{theo1}
Let $h_n=h_n(\boldsymbol{a})=\sum_{i=1}^{n-1}(1-a_i)$ be the number of zeroes in the tuple $\boldsymbol{a}$. Then we have for all $p\in[1,\infty)$
$$L_p(\mathcal{P}_{\boldsymbol{a}})\asymp_p \frac{\max\{\sqrt{n},h_n(\boldsymbol{a})\}}{2^n}.$$
In particular, the net $\mathcal{P}_{\boldsymbol{a}}$ achieves the optimal order of $L_p$ discrepancy for all $p\in [1,\infty)$ if and only if $h_n(\boldsymbol{a})\lesssim \sqrt{n}$.
\end{theorem}
The proof of Theorem~\ref{theo1}, which will be given in Section~\ref{haarf}, is based on Littlewood-Paley theory and tight estimates of the Haar coefficients of the discrepancy function $\Delta_{\mathcal{P}_{\boldsymbol{a}}}$.
For example, if $\boldsymbol{a}=\boldsymbol{z}ero:=(0,0,\ldots,0)$ we get the Hammersley net $\mathcal{P}^{{\rm Ham}}$ in dimension 2. We have $h_n(\boldsymbol{z}ero)=n-1$ and hence $$L_p(\mathcal{P}_{\boldsymbol{z}ero})\asymp_p \frac{n}{2^n}.$$ If $\boldsymbol{a}=\boldsymbol{1}:=(1,1,\ldots,1)$, then we have $h_n(\boldsymbol{1})=0$ and hence $$L_p(\mathcal{P}_{\boldsymbol{1}})\asymp_p \frac{\sqrt{n}}{2^n}.$$
\begin{remark} \rm
The approach via Haar functions allows the precise computation of the $L_2$ discrepancy of digital nets via Parseval's identity. We did so for a certain class of nets in~\cite{Kritz}. It would be possible but tedious to do the same for the class $\mathcal{P}_{\boldsymbol{a}}$ of nets considered in this paper. However, we only executed the massive calculations for the special case where $\boldsymbol{a}=\boldsymbol{1}:=(1,1,\dots,1)$, hence where $C_2$ is a NUT matrix filled with ones in the upper right triangle. We conjecture that this net has the lowest $L_2$ discrepancy among the class of nets $\mathcal{P}_{\boldsymbol{a}}$ for a fixed $n\in\mathbb{N}$. The exact value of its $L_2$ discrepancy is given by
\begin{equation}\label{LpP1}
L_2(\mathcal{P}_{\boldsymbol{1}})=\frac{1}{2^n}\left(\frac{5n}{192}+\frac{15}{32}+\frac{1}{4\cdot 2^{n}}-\frac{1}{72\cdot 2^{2n}}\right)^{1/2}.
\end{equation}
We omit the lengthy proof, but its correctness may be checked with Warnock's formula~\cite{Warn} (see also \cite[Proposition~2.15]{DP10})for small values of $n$. Compare \eqref{LpP1} with the exact $L_2$ discrepancy of $\mathcal{P}^{{\rm Ham}}=\mathcal{P}_{\boldsymbol{z}ero}$ which is given by (see \cite{FauPil,HaZa,Pill,Vi})
$$L_2(\mathcal{P}_{\boldsymbol{z}ero})=\frac{1}{2^n}\left(\frac{n^2}{64}+\frac{29n}{192}+\frac{3}{8}-\frac{n}{16 \cdot 2^n}+\frac{1}{4\cdot 2^n}-\frac{1}{72 \cdot 2^{2n}}\right)^{1/2}.$$
\end{remark}
\section{The proof of Theorem~\ref{theo1} via Haar expansion of the discrepancy function} \label{haarf}
A dyadic interval of length $2^{-j}, j\in {\mathbb N}_0,$ in $[0,1)$ is an interval of the form
$$ I=I_{j,m}:=\left[\frac{m}{2^j},\frac{m+1}{2^j}\right) \ \ \mbox{for } \ m\in \{0,1,\ldots,2^j-1\}.$$
The left and right half of $I_{j,m}$ are the dyadic intervals $I_{j+1,2m}$ and $I_{j+1,2m+1}$, respectively. The Haar function $h_{j,m}$
is the function on $[0,1)$ which is $+1$ on the left half of $I_{j,m}$, $-1$ on the right half of $I_{j,m}$ and 0 outside of $I_{j,m}$. The $L_\infty$-normalized Haar system consists of
all Haar functions $h_{j,m}$ with $j\in{\mathbb N}_0$ and $m=0,1,\ldots,2^j-1$ together with the indicator function $h_{-1,0}$ of $[0,1)$.
Normalized in $L_2([0,1))$ we obtain the orthonormal Haar basis of $L_2([0,1))$.
Let ${\mathbb N}_{-1}=\mathbb{N}_0 \cup \{-1\}$ and define ${\mathbb D}_j=\{0,1,\ldots,2^j-1\}$ for $j\in{\mathbb N}_0$ and ${\mathbb D}_{-1}=\{0\}$.
For $\boldsymbol{j}=(j_1,j_2)\in{\mathbb N}_{-1}^2$ and $\boldsymbol{m}=(m_1,m_2)\in {\mathbb D}_{\boldsymbol{j}} :={\mathbb D}_{j_1} \times {\mathbb D}_{j_2}$,
the Haar function $h_{\boldsymbol{j},\boldsymbol{m}}$ is given as the tensor product
$$h_{\boldsymbol{j},\boldsymbol{m}}(\boldsymbol{t}) = h_{j_1,m_1}(t_1) h_{j_2,m_2}(t_2) \ \ \ \mbox{ for } \boldsymbol{t}=(t_1,t_2)\in[0,1)^2.$$
We speak of $I_{\boldsymbol{j},\boldsymbol{m}} = I_{j_1,m_1} \times I_{j_2,m_2}$ as dyadic boxes with level $|\boldsymbol{j}|=\max\{0,j_1\}+\max\{0,j_2\}$, where we set $I_{-1,0}=\boldsymbol{1}_{[0,1)}$. The system
$$ \left\{2^{\frac{|\boldsymbol{j}|}{2}}h_{\boldsymbol{j},\boldsymbol{m}}: \boldsymbol{j}\in\mathbb{N}_{-1}^2, \boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}\right\} $$
is an orthonormal basis of $L_2([0,1)^2)$ and we have Parseval's identity which states that for every function $f\in L_2([0,1)^2)$ we have
\begin{equation} \label{parseval}
\|f\|_{L_2([0,1)^2)}^2=\sum_{\boldsymbol{j}\in \mathbb{N}_{-1}^2} 2^{|\boldsymbol{j}|} \sum_{\boldsymbol{m}\in\mathbb{D}_{\boldsymbol{j}}} |\mu_{\boldsymbol{j},\boldsymbol{m}}|^2,
\end{equation}
where the numbers $\mu_{\boldsymbol{j},\boldsymbol{m}}=\mu_{\boldsymbol{j},\boldsymbol{m}}(f)=\langle f, h_{\boldsymbol{j},\boldsymbol{m}} \rangle =\int_{[0,1)^2} f(\boldsymbol{t}) h_{\boldsymbol{j},\boldsymbol{m}}(\boldsymbol{t})\,\mathrm{d}\boldsymbol{t}$ are the so-called Haar coefficients of $f$. There
is no such identity for the $L_p$ norm of $f$ for $p \not=2$; however, for a function $f\in L_p([0,1)^2)$ we have a so-called Littlewood-Paley inequality. It involves the square function $S(f)$ of a function $f\in L_p([0,1)^2)$ which is given as
$$S(f) = \left( \sum_{\boldsymbol{j} \in \mathbb{N}_{-1}^2} \sum_{\boldsymbol{m} \in \mathbb{D}_{\boldsymbol{j}}} 2^{2|\boldsymbol{j}|} \, |\mu_{\boldsymbol{j},\boldsymbol{m}}|^2 \, {\mathbf 1}_{I_{\boldsymbol{j},\boldsymbol{m}}} \right)^{1/2},$$ where ${\mathbf 1}_I$ is the characteristic function of $I$.
\begin{lemma}[Littlewood-Paley inequality]\label{lpi}
Let $p \in (1,\infty)$ and let $f\in L_p([0,1)^2)$. Then
$$ \| S(f) \|_{L_p} \asymp_{p} \| f \|_{L_p}.$$
\end{lemma}
In the following let $\mu_{\boldsymbol{j},\boldsymbol{m}}$ denote the Haar coefficients if the local discrepancy function $\Delta_{\mathcal{P}_{\boldsymbol{a}}}$, i.e., $$\mu_{\boldsymbol{j},\boldsymbol{m}}=\int_{[0,1)^2} \Delta_{\mathcal{P}_{\boldsymbol{a}}}(\boldsymbol{t}) h_{\boldsymbol{j},\boldsymbol{m}}(\boldsymbol{t}) \,\mathrm{d} \boldsymbol{t}.$$ In order to estimate the $L_p$ discrepancy of $\mathcal{P}_{\boldsymbol{a}}$ by means of Lemma~\ref{lpi} we require good estimates of the Haar coefficients $\mu_{\boldsymbol{j},\boldsymbol{m}}$. This is a very technical and tedious task which we defer to the appendix. In the following we just collect the obtained bounds:
\begin{lemma} \label{coro1}
Let $\boldsymbol{j}=(j_1,j_2)\in \mathbb{N}_{0}^2$. Then
\begin{itemize}
\item[(i)] if $j_1+j_2\leq n-3$ and $j_1,j_2\geq 0$ then $|\mu_{\boldsymbol{j},\boldsymbol{m}}| \lesssim 2^{-2n}$.
\item[(ii)] if $j_1+j_2\ge n-2$ and $0\le j_1,j_2\le n$ then $|\mu_{\boldsymbol{j},\boldsymbol{m}}| \lesssim 2^{-n-j_1-j_2}$ and
$|\mu_{\boldsymbol{j},\boldsymbol{m}}| = 2^{-2j_1-2j_2-4}$ for all but at most $2^n$ coefficients $\mu_{\boldsymbol{j},\boldsymbol{m}}$ with $\boldsymbol{m}\in {\mathbb D}_{\boldsymbol{j}}$.
\item[(iii)] if $j_1 \ge n$ or $j_2 \ge n$ then $|\mu_{\boldsymbol{j},\boldsymbol{m}}| = 2^{-2j_1-2j_2-4}$.
\end{itemize}
Now let $\boldsymbol{j}=(-1,k)$ or $\boldsymbol{j}=(k,-1)$ with $k\in \mathbb{N}_0$. Then
\begin{itemize}
\item[(iv)] if $k<n$ then $|\mu_{\boldsymbol{j},\boldsymbol{m}}| \lesssim 2^{-n-k}$.
\item[(v)] if $k\ge n$ then $|\mu_{\boldsymbol{j},\boldsymbol{m}}| = 2^{-2k-3}$.
\end{itemize}
Finally, if $h_n=\sum_{i=1}^{n-1}(1-a_i)$, then
\begin{itemize}
\item[(vi)] $\mu_{(-1,-1),(0,0)} = 2^{-n-3}(h_n+5)+2^{-2n-2}$.
\end{itemize}
\end{lemma}
\begin{remark}\rm
We remark that Proposition~\ref{coro1} shows that the only Haar coefficient that is relevant in our analysis is the coefficient $\mu_{(-1,-1),(0,0)}$. All other coefficients do not affect the order of $L_p$ discrepancy significantly: they are small enough such that their contribution to the over all $L_p$ discrepancy is of the order of Roth's lower bound.
The proof of Proposition~\ref{coro1} is split into several cases which take several pages of very technical and tedious computations. We would like to mention that the proof of the formula for the important coefficient $\mu_{(-1,-1),(0,0)}$ is manageable without excessive effort.
\end{remark}
Now the proof of Theorem~\ref{theo1} can be finished by inserting the upper bounds on the Haar coefficients of $\Delta_{\mathcal{P}_{\boldsymbol{a}}}$ into Lemma~\ref{lpi}. This shows the upper bound. For details we refer to the paper \cite{HKP14} where the same method was applied (we remark that our Proposition~\ref{coro1} is a direct analog of \cite[Lemma~1]{HKP14}; hence the proof of Theorem~\ref{theo1} runs along the same lines as the proof of
\cite[Theorem 1]{HKP14} but with \cite[Lemma~1]{HKP14} replaced by Proposition~\ref{coro1}).
The matching lower bound is a consequence of $$L_p(\mathcal{P}_{\boldsymbol{a}}) \ge L_1(\mathcal{P}_{\boldsymbol{a}}) =\int_{[0,1]^2} | \Delta_{\mathcal{P}_{\boldsymbol{a}}}(\boldsymbol{t})| \,\mathrm{d} \boldsymbol{t} \ge \left|\int_{[0,1]^2} \Delta_{\mathcal{P}_{\boldsymbol{a}}}(\boldsymbol{t}) \,\mathrm{d} \boldsymbol{t}\right|=|\mu_{(-1,-1),(0,0)}|$$ and item {\it (vi)} of Lemma~\ref{coro1}.
\section{Appendix: Computation of the Haar coefficients $\mu_{\boldsymbol{j},\boldsymbol{m}}$}
Let $\mathcal{P}$ be an arbitrary $2^n$-element point set in the unit square. The Haar coefficients of its discrepancy function $\Delta_{\mathcal{P}}$ are given as follows (see~\cite{hin2010}). We write $\boldsymbol{z}=(z_1,z_2)$.
\begin{itemize}
\item If $\boldsymbol{j}=(-1,-1)$, then
\begin{equation} \label{art1} \mu_{\boldsymbol{j},\boldsymbol{m}}=\frac{1}{2^n}\sum_{\boldsymbol{z}\in \mathcal{P}} (1-z_1)(1-z_2)-\frac14. \end{equation}
\item If $\boldsymbol{j}=(j_1,-1)$ with $j_1\in \mathbb{N}_0$, then
\begin{equation} \label{art2} \mu_{\boldsymbol{j},\boldsymbol{m}}=-2^{-n-j_1-1}\sum_{\boldsymbol{z}\in \mathcal{P}\cap I_{\boldsymbol{j},\boldsymbol{m}}} (1-|2m_1+1-2^{j_1+1}z_1|)(1-z_2)+2^{-2j_1-3}. \end{equation}
\item If $\boldsymbol{j}=(-1,j_2)$ with $j_2\in \mathbb{N}_0$, then
\begin{equation} \label{art3} \mu_{\boldsymbol{j},\boldsymbol{m}}=-2^{-n-j_2-1}\sum_{\boldsymbol{z}\in \mathcal{P}\cap I_{\boldsymbol{j},\boldsymbol{m}}} (1-|2m_2+1-2^{j_2+1}z_2|)(1-z_1)+2^{-2j_2-3}. \end{equation}
\item If $\boldsymbol{j}=(j_1,j_2)$ with $j_1,j_2\in \mathbb{N}_0$, then
\begin{align} \label{art4} \mu_{\boldsymbol{j},\boldsymbol{m}}=&2^{-n-j_2-j_2-2}\sum_{\boldsymbol{z}\in \mathcal{P}\cap I_{\boldsymbol{j},\boldsymbol{m}}} (1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|) \nonumber \\ &-2^{-2j_1-2j_2-4}. \end{align}
\end{itemize}
In all these identities the first summands involving the sum over $\boldsymbol{z}\in \mathcal{P}\cap I_{\boldsymbol{j},\boldsymbol{m}}$ come from the counting part $\frac{1}{N}\sum_{\boldsymbol{z}\in\mathcal{P}}\boldsymbol{1}_{[\boldsymbol{z}ero,\boldsymbol{t})}(\boldsymbol{z})$ and the second summands come from the linear part $-t_1t_2$ of the discrepancy function, respectively.
Note that we could also write $\boldsymbol{z}\in \mathring{I}_{\boldsymbol{j},\boldsymbol{m}}$, where $\mathring{I}_{\boldsymbol{j},\boldsymbol{m}}$ denotes the interior of $I_{\boldsymbol{j},\boldsymbol{m}}$, since the summands in the formulas~\eqref{art2}--\eqref{art4} vanish if $\boldsymbol{z}$ lies on the boundary of the dyadic box. Hence, in order to compute the Haar coefficients of the discrepancy function, we have to deal with the sums over $\boldsymbol{z}$ which appear in the formulas above and to determine which points $\boldsymbol{z}=(z_1,z_2)\in \mathcal{P}$ lie in the dyadic box $I_{\boldsymbol{j},\boldsymbol{m}}$ with $\boldsymbol{j}\in \mathbb{N}_{-1}^2$ and $\boldsymbol{m}=(m_1,m_2)\in\mathbb{D}_{\boldsymbol{j}}$. If $m_1$ and $m_2$ are non-negative integers, then they have a dyadic expansion of the form
\begin{equation} \label{mdyadic} m_1=2^{j_1-1}r_1+\dots+r_{j_1} \text{\, and \,} m_2=2^{j_2-1}s_1+\dots+s_{j_2} \end{equation}
with digits $r_{i_1},s_{i_2}\in\{0,1\}$ for all $i_1\in\{1,\dots,j_1\}$ and $i_2\in\{1,\dots,j_2\}$, respectively.
Let $\boldsymbol{z}=(z_1,z_2)=\big(\frac{t_n}{2}+\dots+\frac{t_1}{2^n},\frac{b_1}{2}+\dots+\frac{b_n}{2^n}\big)$ be a point of our point set $\mathcal{P}_{\boldsymbol{a}}$. Then $\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}$
if and only if
\begin{equation} \label{cond} t_{n+1-k}=r_k \text{\, for all \,} k\in \{1,\dots, j_1\} \text{\, and \,} b_k=s_k \text{\, for all \,} k\in \{1,\dots, j_2\}. \end{equation}
Further, for such a point $\boldsymbol{z}=(z_1,z_2)\in I_{\boldsymbol{j},\boldsymbol{m}}$ we have
\begin{equation} \label{z1} 2m_1+1-2^{j_1+1}z_1=1-t_{n-j_1}-2^{-1}t_{n-j_1-1}-\dots-2^{j_1-n+1}t_1 \end{equation}
and
\begin{equation} \label{z2} 2m_2+1-2^{j_2+1}z_2=1-b_{j_2+1}-2^{-1}b_{j_2+2}-\dots-2^{j_2-n+1}b_n. \end{equation}
There are several parallel tracks between the proofs in this section and the proofs in~\cite[Section 3]{Kritz},
where we computed the Haar coefficients for a simpler class of digital nets. \\
Let in the following $\mathcal{H}_j:=\{i\in\{1,\dots,j\}: a_i=0\}$ for $j\in\{1,\dots,n-1\}$. Then $h_n=|\mathcal{H}_{n-1}|$ is the parameter as defined in Theorem~\ref{theo1}.
\paragraph{Case 1: $\boldsymbol{j}\in\mathcal{J}_1:=\{(-1,-1)\}$}
\begin{proposition} \label{prop1}
Let $\boldsymbol{j}\in \mathcal{J}_1$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$. Then we have
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=\frac{h_n+5}{2^{n+3}}+\frac{1}{2^{2n+2}}. $$
\end{proposition}
\begin{proof}
By~\eqref{art1} we have
\begin{align*}
\mu_{\boldsymbol{j},\boldsymbol{m}}=& \frac{1}{2^n}\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}} (1-z_1)(1-z_2)-\frac14 \\
=&1-\frac{1}{2^n}\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_1-\frac{1}{2^n}\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_2+\frac{1}{2^n}\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_1z_2-\frac14 \\
=&-\frac14+\frac{1}{2^n}+\frac{1}{2^n}\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_1z_2,
\end{align*}
where we regarded $\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_1=\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_2=\sum_{l=0}^{2^n-1}l/2^n=2^{n-1}-2^{-1}$ in the last step.
It remains to evaluate $\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}}z_1z_2$. Using the representation of $\mathcal{P}_{\boldsymbol{a}}$ in \eqref{darstPa}, we have
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}}z_1z_2=& \sum_{t_1,\dots,t_n=0}^1 \left(\frac{t_n}{2}+\dots+\frac{t_{1}}{2^n}\right)\left(\frac{b_1}{2}+\dots+\frac{b_n}{2^n}\right) \\
=&\sum_{k=1}^n \sum_{t_1,\dots,t_n=0}^1 \frac{t_kb_k}{2^{n+1-k}2^k}+\sum_{\substack{k_1,k_2=1 \\ k_1 \neq k_2}}^n \sum_{t_1,\dots,t_n=0}^1 \frac{t_{k_1}b_{k_2}}{2^{n+1-k_1}2^{k_2}}=:S_1+S_2.
\end{align*}
Note that $b_k$ only depends on $t_k,t_{k+1},\ldots,t_n$ and $b_n=t_n$. We have
\begin{align*}
S_1=& \frac{1}{2^{n+1}}\sum_{k=1}^n 2^{k-1} \sum_{t_k\dots,t_n=0}^1 t_kb_k=\frac{1}{2^{n+2}}2^n \sum_{t_n=0}^1 t_nb_n+\frac{1}{2^{n+2}}\sum_{k=1}^{n-1} 2^k \sum_{t_k\dots,t_n=0}^1 t_kb_k \\
=& \frac14+\frac{1}{2^{n+2}}\sum_{k=1}^{n-1} 2^k \sum_{t_{k+1}\dots,t_n=0}^1 (1\oplus a_k(t_{k+1}\oplus \dots \oplus t_n)) \\
=& \frac14+\frac{1}{2^{n+2}}\sum_{k=1}^{n-1} 2^k 2^{n-k-1}(2-a_k)=\frac14+\frac18 \left(n-1+\sum_{k=1}^{n-1}(1-a_k)\right)=\frac18(n+h_n+1).
\end{align*}
To compute $S_2$, assume first that $k_1<k_2$. Then
\begin{align*}
\sum_{t_1,\dots,t_n=0}^1 t_{k_1}b_{k_2}=& 2^{k_1-1} \sum_{t_{k_1},\dots,t_n=0}^1 t_{k_1}b_{k_2}=2^{k_1-1} \sum_{t_{k_1+1},\dots,t_n=0}^1 b_{k_2}\\
=&2^{k_1-1}2^{k_2-k_1-1}\sum_{t_{k_2},\dots,t_n=0}^1 b_{k_2}=2^{k_1-1}2^{k_2-k_1-1}2^{n-k_2}=2^{n-2}.
\end{align*}
Similarly, we observe that we obtain the same result also for $k_1>k_2$ and hence
$$ S_2=\frac{1}{2^{n+1}}\sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^n 2^{k_1-k_2}2^{n-2}=\frac18 \sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^n 2^{k_1-k_2}=\frac{1}{8}\left(-n+2^{n+1}-4+\frac{2}{2^n}\right). $$
Now we put everything together to arrive at the claimed formula. \end{proof}
\paragraph{Case 2: $\boldsymbol{j}\in\mathcal{J}_2:=\{(-1,j_2): 0\leq j_2 \leq n-2\}$}
\begin{proposition} \label{prop2}
Let $\boldsymbol{j}=(-1,j_2)\in \mathcal{J}_2$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$. If $\mathcal{H}_{j_2}=\{1,\dots,j_2\}$, then
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=2^{-2n-2j_2-4}\left(-2^{2j_2+2}(a_{j_2+1}-1)+2^{n+j_2}(a_{j_2+1}a_{j_2+2}-2)+2^{2n+2}\sum_{k=1}^{j_2}\frac{s_k}{2^{n+1-k}}\right), $$
where the latter sum is zero for $j_2=0$.
Otherwise, let $w\in\{1,\dots,j_2\}$ be the greatest index with $a_w=1$. If $a_{j_2+1}=0$, then
\begin{align*} \mu_{\boldsymbol{j},\boldsymbol{m}}=&2^{-2n-2}-2^{-n-j_2-3}+2^{-n-2j_2+w-5}+2^{-2j_2-2}\varepsilon \\ &+2^{-2n-j_2+w-4}a_{j_2+2}(1-2(s_{w}\oplus \dots \oplus s_{j_2})). \end{align*}
If $a_{j_2+1}=1$, then
\begin{align*} \mu_{\boldsymbol{j},\boldsymbol{m}}=&-2^{-n-j_2-3}+2^{-j_2+w-2n-3}+2^{-2j_2-n+w-4}+2^{-2j_2-2}\varepsilon \\ &-2^{-2n-j_2+w-2}(s_{w}\oplus \dots \oplus s_{j_2})+2^{-n-j_2-4}a_{j_2+2}. \end{align*}
In the latter two expressions, we put $\varepsilon=\sum_{\substack{k=1 \\k\neq w} }^{j_2}\frac{t_k(m_2)}{2^{n+1-k}}$, where the values $t_k(m_2)$ depend only on $m_2$ and are either 0 or 1. Hence, in any case we have $|\mu_{\boldsymbol{j},\boldsymbol{m}}|\lesssim 2^{-n-j_2}$.
\end{proposition}
\begin{proof}
We only show the case where $j_2\geq 1$ and $\mathcal{H}_{j_2} \neq \{1,\dots,j_2\}$, since the other case is similar but easier.
Let $w\in\{1,\dots,j_2\}$ be the greatest index with $a_w=1$. By~\eqref{art3}, we need to evaluate the sum
$$ \sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}}(1-z_1)(1-|2m_2+1-2^{j_2+1}z_2|). $$
By~\eqref{cond}, the condition $\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}$ yields the identities $b_k=s_k$ for all $k\in \{1,\dots,j_2\}$, which lead to
$t_k=s_k$ for all $k\in\{1,\dots,j_2\}$ such that $a_k=0$. Assume
that $$ \{k\in \{1,\dots,j_2\}: a_k=1\}=\{k_1,\dots,k_l\} $$ for some $l\in\{1,\dots j_2\}$, where $k_1<k_2<\dots<k_l$ and $k_l=w$.
We have $t_{k_i}=s_{k_i}\oplus s_{k_i+1}\oplus \dots \oplus s_{k_{i+1}}$ for all $i\in\{1,\dots,l-1\}$ and
$t_w=s_w \oplus \dots \oplus s_{j_2} \oplus t_{j_2+1}\oplus \dots \oplus t_n$. Hence, we can write
$$ 1-z_1=1-u-\frac{t_{j_2+1}}{2^{n-j_2}}-\frac{s_w \oplus \dots \oplus s_{j_2} \oplus t_{j_2+1}\oplus \dots \oplus t_n}{2^{n+1-w}}-\varepsilon, $$
where $u=2^{-1}t_n+\dots+2^{-(n-j_2-1)}t_{j_2+2}$ and $$\varepsilon=\varepsilon(m_2)=\sum_{\substack{k=1 \\k\neq w} }^{j_2}\frac{t_k(m_2)}{2^{n+1-k}}.$$
For the expression $1-|2m_2+1-2^{j_2+1}z_2|$ we find by~\eqref{z2}
$$ 1-|2m_2+1-2^{j_2+1}z_2|=1-|1-t_{j_2+1}\oplus a_{j_2+1}(t_{j_2+2}\oplus \dots \oplus t_n)-v|, $$
where $v=v(t_{j_2+2},\dots,t_n)=2^{-1}b_{j_2+2}+\dots+2^{-(n-j_2-1)}b_n.$ With these observations, we find (writing $T_j=t_j\oplus \dots\oplus t_n$ for $1\leq j \leq n-1$ and $t_w(t_{j_2+1})=s_{w}\oplus\dots\oplus s_{j_2}\oplus t_{j_2+1} \oplus T_{j_2+2}$)
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}}&(1-z_1)(1-|2m_2+1-2^{j_2+1}z_2|) \\
=& \sum_{t_{j_2+1}, \dots , t_n=0}^{1}\left(1-u-\frac{t_{j_2+1}}{2^{n-j_2}}-\frac{t_w(t_{j_2+1})}{2^{n+1-w}}-\varepsilon\right) \\
&\times\left(1-|1-t_{j_2+1}\oplus a_{j_2+1}T_{j_2+2}-v|\right) \\
=&\sum_{t_{j_2+2}, \dots , t_n=0}^{1} \bigg\{ \left(1-u-\frac{a_{j_2+1}T_{j_2+2}}{2^{n-j_2}}-\frac{t_w(a_{j_2+1}T_{j_2+1})}{2^{n+1-w}}-\varepsilon\right)v \\
&+ \left(1-u-\frac{a_{j_2+1}T_{j_2+2}\oplus 1}{2^{n-j_2}}-\frac{t_w(a_{j_2+1}T_{j_2+1}\oplus 1)}{2^{n+1-w}}-\varepsilon\right)(1-v) \bigg\}\\
=& \sum_{t_{j_2+2}, \dots , t_n=0}^{1} 2^{-n-1}\bigg(-2^{j_2+1}-2^w+2^{n+1}-2^{n+1}\varepsilon+2^wt_w(a_{j_2+1}T_{j_2+1})-2^{n+1}u \\ &+2^{j_2+1}v+2^w v
-2^{w+1}t_w(a_{j_2+1}T_{j_2+1})v-2^{j_2+1}(a_{j_2+1}T_{j_2+2})(2v-1)\bigg).
\end{align*}
Let first $a_{j_2+1}=1$ and hence $t_w(a_{j_2+1}T_{j_2+2})=t_w(T_{j_2+2})=s_{w}\oplus\dots\oplus s_{j_2}$ does not depend on t $t_i$.
Since
$$ \sum_{t_{j_2+2}, \dots , t_n=0}^{1} u=\sum_{t_{j_2+2}, \dots , t_n=0}^{1} v=\sum_{l=0}^{2^{n-j_2-1}-1}\frac{l}{2^{n-j_2+1}}=2^{n-j_2-2}-\frac12, $$
we obtain
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}}&(1-z_1)(1-|2m_2+1-2^{j_2+1}z_2|) \\
=& 2^{-n-1}\bigg((-2^{j_2+1}-2^w+2^{n+1}-2^{n+1}\varepsilon+2^wt_w(T_{j_2+2}))2^{n-j_2-1} \\
&+(2^w+2^{j_2+1}-2^{n+1}-2^{w+1}t_w(T_{j_2+2}))\left(2^{n-j_2-2}-\frac12\right) \\
&-2^{j_2+1}\sum_{t_{j_2+2}, \dots , t_n=0}^{1}T_{j_2+2}(2v-1)\bigg).
\end{align*}
We analyze the last expression. We find
\begin{align*} \sum_{t_{j_2+2}, \dots , t_n=0}^{1}& T_{j_2+2}(2v-1) \\ =& 2\sum_{t_{j_2+2}, \dots , t_n=0}^{1}T_{j_2+2}v-\sum_{t_{j_2+2}, \dots , t_n=0}^{1}T_{j_2+2}=2\sum_{t_{j_2+2}, \dots , t_n=0}^{1}T_{j_2+1}v-2^{n-j_2-2},\end{align*}
where
\begin{align*}
\sum_{t_{j_2+2}, \dots , t_n=0}^{1}T_{j_2+2}v=&\sum_{t_{j_2+2}, \dots , t_n=0}^{1} (t_{j_2+2}\oplus T_{j_2+3})\left(\frac{t_{j_2+2}\oplus a_{j_2+2} T_{j_2+3}}{2}+\frac{b_{j_2+3}}{4}+\cdots+\frac{b_n}{2^{n-j_1-1}}\right) \\
=& \sum_{t_{j_2+3}, \dots , t_n=0}^{1} \left(\frac{(T_{j_2+3}\oplus 1)\oplus a_{j_2+2} T_{j_2+3}}{2}+\frac{b_{j_2+3}}{4}+\cdots+\frac{b_n}{2^{n-j_2-1}}\right) \\
=& \sum_{t_{j_2+3}, \dots , t_n=0}^{1} \frac{1\oplus (1-a_{j_2+2})T_{j_2+3}}{2}+\sum_{l=0}^{2^{n-j_2-2}-1}\frac{l}{2^{n-j_2-1}} \\
=&\frac12 \sum_{t_{j_2+3}, \dots , t_n=0}^{1} \left(1-(1-a_{j_2+2})T_{j_2+3}\right)+2^{n-j_2-4}-\frac14 \\
=&\frac12 \left(2^{n-j_2-2}-(1-a_{j_2+2})2^{n-j_2-3}\right)+2^{n-j_2-4}-\frac14 \\
=&2^{n-j_2-4}(1+a_{j_2+2})+2^{n-j_2-4}-\frac14.
\end{align*}
We put everything together and apply~\eqref{art3} to find the result for $a_{j_2+1}=1$. \\
Now assume that $a_{j_2+1}=0$. Then $t_w(a_{j_2+1}T_{j_2+2})=t_w(0)=s_w\oplus\dots\oplus s_{j_2} \oplus T_{j_2+2}$. Hence we have
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}}&(1-z_1)(1-|2m_2+1-2^{j_2+1}z_2|) \\
=& 2^{-n-1}\bigg( (-2^{j_1+1}+2^{n+1}-2^w-2^{n+1}\varepsilon)2^{n-j_2-1}+(2^{j_2+1}-2^{n+1})(2^{n-j_2-2}-\frac12) \\
&+ 2^w \cdot 2^{n-j_2-2}-2^{w+1}\sum_{t_{j_2+2},\dots,t_n=0}^{1} vt_w(0)\bigg).
\end{align*}
We considered $\sum_{t_{j_2+2},\dots,t_n=0}^{1}t_w(0)=2^{n-j_2-2}.$ It remains to evaluate $\sum_{t_{j_2+2},\dots,t_n=0}^{1} vt_w(0).$ We find
\begin{eqnarray*}
\lefteqn{\sum_{t_{j_2+2},\dots,t_n=0}^{1} (s_w\oplus\dots\oplus s_{j_2}\oplus t_{j_2+2}\oplus T_{j_2+3})\left(\frac{t_{j_2+2}\oplus a_{j_2+2}T_{j_2+3}}{2}+\frac{b_{j_2+2}}{4}+\dots+\frac{b_n}{2^{n-j_2-1}}\right)} \\
&=& \sum_{t_{j_2+3},\dots,t_n=0}^{1} \left(\frac{(s_w\oplus\dots\oplus s_{j_2}\oplus T_{j_2+3} \oplus 1)\oplus a_{j_2+1}T_{j_2+3}}{2}+\frac{b_{j_2+2}}{4}+\dots+\frac{b_n}{2^{n-j_2-1}}\right) \\
&=& \frac12 \sum_{t_{j_2+3},\dots,t_n=0}^{1} (1-a_{j_2+2})T_{j_2+3}\oplus s_w\oplus\dots\oplus s_{j_2} \oplus 1 +\sum_{l=0}^{2^{n-j_2-2}-1}\frac{l}{2^{n-j_2-1}} \\
&=& 2^{n-j_2-4}(1+a_{j_2+2}(1-2(s_w\oplus\dots\oplus s_{j_2})) +2^{n-j_2-4}-\frac14.
\end{eqnarray*}
Again, we put everything together and apply~\eqref{art3} to find the result for $a_{j_2+1}=0$.
\end{proof}
\paragraph{Case 3: $\boldsymbol{j}\in\mathcal{J}_3:=\{(k,-1): k\geq n\}\cup\{(-1,k): k\geq n\}$}
\begin{proposition}
Let $\boldsymbol{j}\in \mathcal{J}_3$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$. Then we have
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=\frac{1}{2^{2k+3}}. $$
\end{proposition}
\begin{proof}
This claim follows from~\eqref{art2} and~\eqref{art3} together with the fact that no point of $\mathcal{P}_{\boldsymbol{a}}$ is contained
in the interior of $I_{\boldsymbol{j},\boldsymbol{m}}$ if $j_1\geq n$ or $j_2\geq n$. Hence, only the linear part of $\Delta_{\mathcal{P}_{\boldsymbol{a}}}$
contributes to the Haar coefficients in this case.
\end{proof}
\paragraph{Case 4: $\boldsymbol{j}\in\mathcal{J}_4:=\{(0,-1)\}$}
\begin{proposition} \label{prop5}
Let $\boldsymbol{j}\in \mathcal{J}_4$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$. Then we have
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=-\frac{1}{2^{n+3}}+\frac{1}{2^{2n+2}}. $$
\end{proposition}
\begin{proof}
For $\boldsymbol{z}=(z_1,z_2)\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}=\mathcal{P}_{\boldsymbol{a}} $ we have $1-z_2=1-\frac{b_1}{2}-\dots-\frac{b_n}{2^n}$ and
$$ 1-|2m_1+1-2z_1|=1-\left|1-t_n-\frac{t_{n-1}}{2}-\dots-\frac{t_1}{2^{n-1}}\right| $$ by~\eqref{z1}.
We therefore find, after summation over $t_n$,
\begin{align*}
\sum_{\boldsymbol{z} \in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}}&(1-|2m_1+1-2z_1|)(1-z_2) \\
=&\sum_{t_1,\dots,t_n=0}^1 \left(1-\left|1-t_n-\frac{t_{n-1}}{2}-\dots-\frac{t_1}{2^{n-1}}\right|\right)\left(1-\frac{b_1(t_n)}{2}-\dots-\frac{b_n(t_n)}{2^n}\right) \\
=&\sum_{t_1,\dots,t_{n-1}=0}^1 \left(u(1-v(0))+(1-u)\left(1-v(1)-\frac{1}{2^n}\right)\right) \\
=& \sum_{t_1,\dots,t_{n-1}=0}^1 \left(1-\frac{1}{2^n}-v(1)+\frac{1}{2^n}u+uv(1)-uv(0)\right) \\
=& 2^{n-1}\left(1-\frac{1}{2^n}\right)+\left(\frac{1}{2^n}-1\right)(2^{n-2}-2^{-1})+\sum_{t_1,\dots,t_{n-1}=0}^1uv(1)-\sum_{t_1,\dots,t_{n-1}=0}^1uv(0).
\end{align*}
Here we use the short-hands $u=2^{-1}t_{n-1}+\dots+2^{-n+1}t_1$ and $v(t_n)=2^{-1}b_1(t_n)+\dots+2^{-n+1}b_{n-1}(t_n)$ and the fact
that $\sum_{t_1,\dots,t_{n-1}=0}^1 u= \sum_{t_1,\dots,t_{n-1}=0}^1 v(1)=2^{n-2}-2^{-1}$. It is not difficult to observe
that $\sum_{t_1,\dots,t_{n-1}=0}^1 uv(0)= \sum_{t_1,\dots,t_{n-1}=0}^1 uv(1)$; hence
$$ \sum_{\boldsymbol{z} \in \mathcal{P}_{\boldsymbol{a}}}(1-|2m_1+1-2z_1|)(1-z_2)=\frac14+2^{n-2}-\frac{1}{2^{n+1}}. $$
The rest follows with~\eqref{art2}.
\end{proof}
For the following two propositions, we use the shorthand $R=r_1\oplus \dots \oplus r_{j_1}$.
\paragraph{Case 5: $\boldsymbol{j}\in\mathcal{J}_5:=\{(j_1,-1): 1\leq j_1 \leq n-2 \}$}
\begin{proposition}
Let $\boldsymbol{j}\in \mathcal{J}_5$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$. Then we have
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=2^{-2n-2}-2^{-n-j_1-3}+2^{-2j_1-2}\varepsilon-2^{-2n-1}R-2^{-n-j_1-3}a_{n-j_1-1}(1-2R), $$
where \begin{equation} \label{abcd} \varepsilon=\varepsilon(m_1)=\frac{r_1}{2^n}+\sum_{k=2}^{j_1}\frac{r_k \oplus a_{n+1-k}(r_{k-1}\oplus\dots\oplus r_1)}{2^{n+1-k}}.\end{equation}
Hence, we have
\end{proposition}
\begin{proof}
By~\eqref{art2}, we need to evaluate the sum
$$\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} (1-|2m_1+1-2^{j_1+1}z_1|)(1-z_2).$$
The condition $\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}$ forces $t_n=r_1,\dots t_{n+1-j_1}=r_{j_1}$ and therefore
\begin{align*}
1-z_2=& 1-\frac{b_1}{2}-\dots-\frac{b_n}{2^n}=1-v(t_{n-j_1})-\frac{t_{n-j_1}\oplus a_{n-j_1} R}{2^{n-j_1-1}}-\varepsilon,
\end{align*}
where
\begin{align*} v(t_{n-j_1})&=\frac{b_1}{2}+\dots+\frac{b_{n-j_1-1}}{2^{n-j_1-1}} \\ &=\frac{t_1\oplus a_1(t_2\oplus\dots\oplus t_{n-j_1}\oplus R)}{2}+\dots+\frac{t_{n-j_1-1}\oplus a_{n-j_1-1}(t_{n-j_1}\oplus R)}{2^{n-j_1-1}} \end{align*}
and $\varepsilon$ as in~\eqref{abcd}.
Further, by~\eqref{z1} we write $2m_1+1-2^{j_1+1}z_1=1-t_{n-j_1}-u$, where $u=2^{-1}t_{n-j_1-1}+\dots+2^{j_1-n+1}t_1$. Then
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} &(1-|2m_1+1-2^{j_1+1}z_1|)(1-z_2) \\
=& \sum_{t_1,\dots,t_{n-j_1}=0}^{1}\left(1-v(t_{n-j_1})-\frac{t_{n-j_1}\oplus a_{n-j_1} R}{2^{n-j_1-1}}-\varepsilon\right)(1-|1-t_{n-j_1}-u|) \\
=& \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} \Bigg\{\left(1-v(0)-\frac{a_{n-j_1} R}{2^{n-j_1-1}}-\varepsilon\right)u\\&+\left(1-v(1)-\frac{1\oplus a_{n-j_1} R}{2^{n-j_1-1}}-\varepsilon\right)(1-u)\Bigg\}\\
=& \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} \{ 1-2^{j_1-n}-\varepsilon+2^{j_1-n}a_{n-j_1}R+2^{j_1-n}u-v(1)\\ &-2^{1+j_1-n}a_{n-j_1}Ru+uv(1)-uv(0) \} \\
=& 2^{n-j_1-1}( 1-2^{j_1-n}-\varepsilon+2^{j_1-n}a_{n-j_1}R) \\&+\left(2^{n-j_1-2}-2^{-1}\right)(2^{j_1-n}-1-2^{1+j_1-n}a_{n-j_1}R)
+\sum_{t_1,\dots,t_{n-j_1-1}=0}^{1}(uv(1)-uv(0)).
\end{align*}
We understand $b_1,\dots,b_{n-j_1-1}$ as functions of $t_{n-j_1}$ and have
\begin{align*}
\sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} uv(0)=& \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1}\left(\frac{t_{n-j_1-1}}{2}+\dots+\frac{t_1}{2^{n-j_1-1}}\right)\left(\frac{b_1(0)}{2}+\dots+\frac{b_{n-j_1-1}(0)}{2^{n-j_1-1}}\right) \\
=& \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} \left(\sum_{k=1}^{n-j_1-1} \frac{t_kb_k(0)}{2^{n-j_1-k}2^k}+\sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^{n-j_1-1} \frac{t_{k_1}b_{k_2}(0)}{2^{n-j_1-k_1}2^{k_2}}\right).
\end{align*}
The first sum simplifies to
\begin{align*}
\sum_{k=1}^{n-j_1-1}& 2^{k-1}\sum_{t_k,\dots,t_{n-j_1-1}=0}^{1}\frac{t_kb_k(0)}{2^{n-j_1-k}2^k} \\
=& \frac{1}{2^{n-j_1}}\sum_{k=1}^{n-j_1-2}2^{k-1}\sum_{t_k,\dots,t_{n-j_1-1}=0}^{1}t_k(t_k\oplus a_k(t_{k+1}\oplus t_{n-j_1-1}\oplus R)) \\
&+\frac{1}{2^{n-j_1}}2^{n-j_1-2}\sum_{t_{n-j_1-1}=0}^{1}t_{n-j_1-1}(t_{n-j_1-1}\oplus a_{n-j_1-1}R) \\
=& \frac{1}{2^{n-j_1}}\sum_{k=1}^{n-j_1-2}2^{k-1}\sum_{t_{k+1},\dots,t_{n-j_1-1}=0}^{1}(1\oplus a_k(t_{k+1}\oplus t_{n-j_1-1}\oplus R)) \\
&+\frac{1}{4}(1 \oplus a_{n-j_1-1}(R\oplus 1)) \\
=& \frac{1}{2^{n-j_1}}\sum_{k=1}^{n-j_1-2}2^{k-1}2^{n-j_1-k-2}(2-a_k)+\frac14 (1 \oplus a_{n-j_1-1}(R\oplus 1)) \\
=& \frac{1}{8}\sum_{k=1}^{n-j_1-2}(2-a_k)+\frac14 (1 \oplus a_{n-j_1-1}(R\oplus 1)).
\end{align*}
Basically by the same arguments as in the proof of Proposition~\ref{prop1} we also find
\begin{align*}
\sum_{t_1,\dots,t_{n-j_1-1}=0}^{1}\sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^{n-j_1-1} \frac{t_{k_1}b_{k_2}}{2^{n-j_1-k_1}2^{k_2}}=\frac18 \sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^1 2^{k_1-k_2}.
\end{align*}
Hence, we obtain
$$ \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} uv(0)=\frac{1}{8}\sum_{k=1}^{n-j_1-2}(2-a_k)+\frac14(1 \oplus a_{n-j_1-1}(R\oplus 1))+\frac18 \sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^1 2^{k_1-k_2}. $$
We can evaluate $\sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} uv(1)$ in almost the same way; the result is
$$ \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} uv(1)=\frac{1}{8}\sum_{k=1}^{n-j_1-2}(2-a_k)+\frac14 (1 \oplus a_{n-j_1-1}R)+\frac18 \sum_{\substack{k_1,k_2=0 \\ k_1 \neq k_2}}^1 2^{k_1-k_2}. $$
Hence the difference of these two expressions is given by
$$ \sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} uv(1)-\sum_{t_1,\dots,t_{n-j_1-1}=0}^{1} uv(0)=\frac14 a_{n-j_1-1}(2R-1). $$
Now we put everything together and use~\eqref{art2} to find the claimed result on the Haar coefficients.
\end{proof}
\paragraph{Case 6: $\boldsymbol{j}\in\mathcal{J}_6:=\{(j_1,j_2): j_1+j_2 \leq n-3 \}$}
\begin{proposition}
Let $\boldsymbol{j}\in \mathcal{J}_6$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$. If $\mathcal{H}_{j_2}=\{1,\dots,j_2\}$ or if $j_2=0$, then we have
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=2^{-2n-2}(1-2a_{n-j_1}R)(1-a_{j_2+1}). $$
Otherwise, let $w\in\{1,\dots,j_2\}$ be the greatest index with $a_w=1$. If $a_{j_2+1}=0$, then
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=2^{-2n-2}(1-2a_{n-j_1}R). $$
If $a_{j_2+1}=1$, then
$$ \mu_{\boldsymbol{j},\boldsymbol{m}}=-2^{-2n-j_2+w-3}(1-2a_{n-j_1}R)(1-2(s_w\oplus\dots\oplus s_{j_2})). $$
Note that for $j_1=0$ we set $a_{n-j_1}R=0$ in all these formulas. Hence, in any case we have $|\mu_{\boldsymbol{j},\boldsymbol{m}}|\lesssim 2^{-2n}$
\end{proposition}
\begin{proof}
The proof is similar in all cases; hence we only treat the most complicated case where $j_2\geq 1$ and $\mathcal{H}_{j_2} \neq \{1,\dots,j_2\}$.
By~\eqref{art4}, we need to study the sum
$$ \sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} (1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|), $$
where the condition $\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}$ forces $t_{n+1-k}=r_k$ for all $k\in\{1,\dots,j_1\}$
as well as $b_k=s_k$ for all $k\in\{1,\dots,j_2\}$.
We have already seen in the proof of Proposition 2 that the latter equalities allow us to
express the digits $t_k$ by the digits $s_1,\dots,s_{j_2}$ of $m_2$ for all $k\in\{1,\dots,j_2\}\setminus\{w\}$.
We also have $t_w=s_w\oplus\dots\oplus s_{j_2}\oplus t_{j_2+1}\oplus\dots\oplus t_n$. With~\eqref{z1}, these observations lead to
$$ 2m_1+1-2^{j_1+1}z_1=1-t_{n-j_1}-u-2^{j_1+j_2-n+1}t_{j_2+1}-2^{j_1+w-1}t_w-\varepsilon_2(m_2), $$
where $u=2^{-1}t_{n-j_1-1}+\dots+2^{j_1+j_2-n+2}t_{j_2+2}$ and
$ \varepsilon_2 $ is determined by $m_2$.
Further, we write with~\eqref{z2}
$$ 2m_2+1-2^{j_2+1}z_2=1-b_{j_2+1}-v-2^{j_1+j_2-n+1}b_{n-j_1}-\varepsilon_1(m_1), $$
where $v=v(t_{n-j_1})=2^{-1}b_{j_1+2}+\dots+2^{j_1+j_2-n+2}b_{n-j_1-1}$ and $\varepsilon_1$ is obviously determined by $m_1$. Hence, we have
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} &(1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|) \\
=& \sum_{t_{j_2+1},\dots,t_{n-j_1}=0}^1 \left(1-|1-t_{n-j_1}-u-2^{j_1+j_2-n+1}t_{j_2+1}-2^{j_1+w-1}t_w-\varepsilon_2(m_2)|\right) \\
&\times \bigg(1-|1-t_{j_2+1}\oplus a_{j_2+1}(t_{j_2+2}\oplus \dots \oplus t_{n-j_1}\oplus R)-v(t_{n-j_1})\\ &-2^{j_1+j_2-n+1}(t_{n-j_1}\oplus a_{n-j_1}R)-\varepsilon_1)|\bigg).
\end{align*}
Recall we may write $t_w=s_w\oplus \dots \oplus s_{j_2} \oplus t_{j_2+1}\oplus t_{j_2+2}\oplus\dots\oplus t_{n-j_1-1} \oplus t_{n-j_1}\oplus R$. We stress the
dependence of $t_w$ on $t_{j_2+1}\oplus t_{n-j_1}$ by writing $t_w(t_{j_2+1}\oplus t_{n-j_1})$. If $a_{j_2+1}=0$, then we
obtain after summation over $t_{j_2+1}$ and $t_{n-j_1}$
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} &(1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|) \\
=& \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} \bigg\{
(u+2^{j_1+w-1}t_w(0)+\varepsilon_2)(v(0)+2^{j_1+j_2-n+1}a_{n-j_1}R+\varepsilon_1)\\
&+ (u+2^{j_1+j_2-n+1}+2^{j_1+w-1}t_w(1)+\varepsilon_2)(1-v(0)-2^{j_1+j_2-n+1}a_{n-j_1}R-\varepsilon_1) \\
&+ (1-u-2^{j_1+w-1}t_w(1)-\varepsilon_2)(v(0)+2^{j_1+j_2-n+1}(a_{n-j_1}R\oplus 1)+\varepsilon_1)\\
&+ (1-u-2^{j_1+j_2-n+1}-2^{j_1+w-1}t_w(0)-\varepsilon_2)(1-v(1) \\ &-2^{j_1+j_2-n+1}(a_{n-j_1}R\oplus 1)-\varepsilon_1)
\bigg\} \\
=& \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} \bigg\{1+2^{2(n+j_1+j_2+1)}+2^{j_1+w-1}-2^{2j_1+j_2-n+w}-2^{2j_1+2j_2-2n+3}a_{n-j_1}R \\
&+(2^{1+2j_1+j_2-n+w}-2^{j_1+w})t_w(0) +2^{j_1+w}(2t_w(0)-1)+2^{n+j_1+j_2+1}(v(1)-v(0)) \\
&-2^{w+j_1-1}(v(1)+v(0))+2^{j_1+w}(t_w(0)v(0)+t_w(0)v(1)).\bigg\}
\end{align*}
We regarded $t_w(1)=1-t_w(0)$. By standard argumentation, we find
$$ \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}v(0)=\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}v(1)=2^{n-j_1-j_2-3}-\frac12 $$
and
$$ \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} t_w(0)=\sum_{t_{j_2+2},\dots,t_{n-j_1-2}=0}^{1}1=2^{n-j_1-j_2-3}. $$
We use the short-hand $T=t_{j_2+3}\oplus\dots\oplus t_{n-j_1-1}$, which allows us to write
\begin{align*}
\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} &t_w(0)v(0)=\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} (s_w\oplus\dots \oplus s_{j_2}\oplus t_{j_2+2} \oplus \dots\oplus t_{n-j_1-1}\oplus R) \\
&\times \bigg( \frac{t_{j_2+2}\oplus a_{j_2+2}(t_{j_2+3}\oplus\dots\oplus t_{n-j_1-1}\oplus R)}{2} \\
&+\frac{t_{j_2+3}\oplus a_{j_2+3}(t_{j_2+4}\oplus\dots\oplus t_{n-j_1-1}\oplus R)}{4}+\dots+\frac{t_{n-j_1-1}\oplus a_{n-j_1-1}R}{2^{n-j_1-j_2-2}}\bigg) \\
=& \sum_{t_{j_2+3},\dots,t_{n-j_1-1}=0}^{1}\frac12 (s_w\oplus\dots \oplus s_{j_2}\oplus T \oplus R \oplus 1 \oplus a_{j_2+2}(T\oplus R)) \\&+\sum_{l=0}^{2^{n-j_1-j_2-3}-1}\frac{l}{2^{n-j_1-j_2-2}} \\
=& \sum_{t_{j_2+3},\dots,t_{n-j_1-1}=0}^{1}\frac12 (s_w\oplus\dots \oplus s_{j_2}\oplus 1 \oplus (1-a_{j_2+2})(T\oplus R))\\ &+2^{n-j_1-j_2-5}-\frac14.
\end{align*}
Similarly, we can show
\begin{align*}
\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} t_w(0)v(1)=&\sum_{t_{j_2+3},\dots,t_{n-j_1-1}=0}^{1}\frac12 (s_w\oplus\dots \oplus s_{j_2} \oplus (1-a_{j_2+2})(T\oplus R \oplus 1))
\\ &+2^{n-j_1-j_2-5}-\frac14
\end{align*}
and therefore
$$ \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} t_w(0)(v(0)+v(1))=2^{n-j_1-j_2-4}+2^{n-j_1-j_2-5}-\frac14, $$
a fact which can be found by distinguishing the cases $a_{j_2+1}=0$ and $a_{j_2+1}=1$.
We put everything together and obtain
\begin{align*} \sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} &(1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|) \\ &=2^{j_1+j_2-n}+2^{n-j_1-j_2-2}-2^{-n+j_1+j_2+1}a_{n-j_1}R, \end{align*}
which leads to the claimed result for $a_{j_2+1}=0$ via~\eqref{art4}.\\
Now assume that $a_{j_2+1}=1$. In this case, it is more convenient to consider $t_w$ as a function
of $t_{j_2+1}\oplus\dots\oplus t_{n-j_1}\oplus R$. We obtain after summation over $t_{j_2+1}$ and $t_{n-j_1}$
\begin{align*}
\sum_{\boldsymbol{z}\in \mathcal{P}_{\boldsymbol{a}}\cap I_{\boldsymbol{j},\boldsymbol{m}}} &(1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|) \\
=& \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} \bigg\{
(u+2^{j_1+j_2-n+1}(T\oplus R)+2^{j_1+w-1}t_w(0)+\varepsilon_2)\\&\times(v(0) +2^{j_1+j_2-n+1}a_{n-j_1}R+\varepsilon_1)\\
&+ (u+2^{j_1+j_2-n+1}(T\oplus R\oplus 1)+2^{j_1+w-1}t_w(1)+\varepsilon_2)\\&\times(1-v(0)-2^{j_1+j_2-n+1}a_{n-j_1}R-\varepsilon_1) \\
&+ (1-u-2^{j_1+j_2-n+1}(T\oplus R\oplus 1)-2^{j_1+w-1}t_w(0)-\varepsilon_2)\\&\times(v(1)+2^{j_1+j_2-n+1}(a_{n-j_1}R\oplus 1)+\varepsilon_1)\\
&+ (1-u-2^{j_1+j_2-n+1}(T\oplus R)-2^{j_1+w-1}t_w(1)-\varepsilon_2)\\&\times(1-v(1)-2^{j_1+j_2-n+1}(a_{n-j_1}R\oplus 1)-\varepsilon_1)
\bigg\} \\
=& \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1} 2^{-2n}\bigg\{2^{n+j_1+j_2+1}+2^{2j_1+j_2+w+1}(1-2t_w(0)+2a_{n-j_1}R(2t_w(0)-1)) \\
&-2^{2(j_1+j_2+1)}+2^{2n}+(2^{2j_1+2j_2+3}-2^{n+j_1+j_2+2})(T\oplus R) \\ &+2^{n+j_1+j_2+2}\varepsilon_1(2(T\oplus R)-1)\\
&-2^{n+j_1+j_2+1}(v(0)+v(1))+2^{n+j_1+j_2+1}(2t_w(0)-1)(v(1)-v(0))\\&+2^{n+j_1+w}(v(0)+v(1)) +2^{n+j_1+j_2+2}(T\oplus R)(v(1)+v(0)).\bigg\}
\end{align*}
Again, we used $t_w(1)=1-t_w(0)$. Note that $t_w(0)=s_w\oplus\dots\oplus s_{j_2}$ is independent of the
digits $t_{j_2+2},\dots,t_{n-j_1-1}$. We have
$$ \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}T=\sum_{t_{j_2+2},\dots,t_{n-j_1-2}=0}^{1}1=2^{n-j_1-j_2-3} $$
and we know the sums $\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}v(0)$ and $\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}v(1)$ from above. Similarly as above we can show
\begin{align*}
\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}&(T\oplus R)v(0) \\
=& \frac12 \sum_{t_{j_2+3},\dots,t_{n-j_1-1}=0}^{1} (1\oplus (1-a_{j_2+2})(t_{j_2+3}\oplus \dots\oplus t_{n-j_1-1}\oplus R)) \\&+\sum_{l=0}^{2^{n-j_1-j_2-3}-1} \frac{l}{2^{n-j_1-j_2-2}}
\end{align*}
as well as
\begin{align*}
\sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}&(T\oplus R)v(1) \\
=& \frac12 \sum_{t_{j_2+3},\dots,t_{n-j_1-1}=0}^{1} (1-a_{j_2+2})(t_{j_2+3}\oplus \dots\oplus t_{n-j_1-1}\oplus R\oplus 1)\\&+\sum_{l=0}^{2^{n-j_1-j_2-3}-1} \frac{l}{2^{n-j_1-j_2-2}},
\end{align*}
which yields
$$ \sum_{t_{j_2+2},\dots,t_{n-j_1-1}=0}^{1}(T\oplus R)(v(1)+v(0))=2^{n-j_1-j_2-4}+2\sum_{l=0}^{2^{n-j_1-j_2-3}-1} \frac{l}{2^{n-j_1-j_2-2}}. $$
Now we can combine our results with~\eqref{art4} to obtain the claimed result.
\end{proof}
\paragraph{Case 7: $\boldsymbol{j}\in\mathcal{J}_7:=\{(j_1,j_2): 0\leq j_1,j_2 \leq n-1 \textit{\, and \,} j_1+j_2\geq n-2\}$}
\begin{proposition} Let $\boldsymbol{j}\in \mathcal{J}_7$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$.
Then we have $|\mu_{\boldsymbol{j},\boldsymbol{m}}|\lesssim 2^{-n-j_1-j_2}$ for all $\boldsymbol{m}\in\mathbb{D}_{\boldsymbol{j}}$ and $|\mu_{\boldsymbol{j},\boldsymbol{m}}|=2^{-2j_1-2j_2-4}$ for all
but at most $2^n$ elements $\boldsymbol{m}\in\mathbb{D}_{\boldsymbol{j}}$.
\end{proposition}
\begin{proof}
At most $2^n$ of the $2^{|\boldsymbol{j}|}$ dyadic boxes $I_{\boldsymbol{j},\boldsymbol{m}}$ for $\boldsymbol{m}\in\mathbb{D}_{\boldsymbol{j}}$ contain points. For the empty boxes, only the linear part of the discrepancy function contributes
to the corresponding Haar coefficients; hence $|\mu_{\boldsymbol{j},\boldsymbol{m}}|=2^{-2j_1-2j_2-4}$ for all
but at most $2^n$ elements $\boldsymbol{m}\in\mathbb{D}_{\boldsymbol{j}}$. The non-empty boxes contain at most 4 points. Hence we find by~\eqref{art4}
\begin{align*}
|\mu_{\boldsymbol{j},\boldsymbol{m}}|\leq& 2^{-n-j_1-j_2-2}\sum_{\boldsymbol{z} \in \mathcal{P}\cap I_{\boldsymbol{j},\boldsymbol{m}}}|(1-|2m_1+1-2^{j_1+1}z_1|)(1-|2m_2+1-2^{j_2+1}z_2|)| \\&+2^{-2j_1-2j_2-4} \\
\leq& 2^{-n-j_1-j_2-2}4 +2^{-2j_1-2j_2-4}\leq 2^{-n-j_1-j_2}+2^{-j_1-j_2-(n-2)-4}\lesssim 2^{-n-j_1-j_2}.
\end{align*}
\end{proof}
\paragraph{Case 8: $\boldsymbol{j}\in\mathcal{J}_8:=\{(n-1,-1),(-1,n-1)\}$}
\begin{proposition} Let $\boldsymbol{j}\in \mathcal{J}_8$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$.
Let $\boldsymbol{j}=(n-1,-1)$ or $\boldsymbol{j}=(-1,n-1)$. Then $\mu_{\boldsymbol{j},\boldsymbol{m}}\lesssim 2^{-2n}$.
\end{proposition}
\begin{proof}
At most 2 points lie in $I_{\boldsymbol{j},\boldsymbol{m}}$. Hence, if $\boldsymbol{j}=(n-1,-1)$, then by~\eqref{art2} we have
\begin{align*} |\mu_{\boldsymbol{j},\boldsymbol{m}}|\leq& 2^{-n-j_1-1}\sum_{\boldsymbol{z} \in \mathcal{P}\cap I_{\boldsymbol{j},\boldsymbol{m}}}|(1-|2m_1+1-2^{j_1+1}z_1|)(1-z_2)|+2^{-2j_1-3} \\
=& 2^{-n-j_1-1}2+2^{-2j_1-3}=2^{-2n+1}+2^{-2n-1}\lesssim 2^{-2n}.
\end{align*}
The case $\boldsymbol{j}=(-1,n-1)$ can be shown the same way.
\end{proof}
\paragraph{Case 9: $\boldsymbol{j}\in\mathcal{J}_{9}:=\{(j_1,j_2): j_1\geq n \textit{\, or \,} j_2\geq n\}$}
\begin{proposition} Let $\boldsymbol{j}\in \mathcal{J}_{9}$ and $\boldsymbol{m}\in \mathbb{D}_{\boldsymbol{j}}$.
Then $\mu_{\boldsymbol{j},\boldsymbol{m}}=-2^{-2j_1-2j_2-4}$.
\end{proposition}
\begin{proof}
The reason is that no point is contained in the interior of $I_{\boldsymbol{j},\boldsymbol{m}}$ in this case and hence only the
linear part of the discrepancy function contributes to the Haar coefficient in~\eqref{art4}.
\end{proof}
\noindent{\bf Authors' Address:}
\noindent Ralph Kritzinger and Friedrich Pillichshammer, Institut f\"{u}r Finanzmathematik und angewandte Zahlentheorie, Johannes Kepler Universit\"{a}t Linz, Altenbergerstra{\ss}e 69, A-4040 Linz, Austria.\\
{\bf Email:} ralph.kritzinger(at)jku.at and friedrich.pillichshammer(at)jku.at
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Cryptanalysis of the Hillery-Bu\v{z}ek-Berthiaume quantum secret-sharing protocol}
\author{Su-Juan Qin$^{1,2}$, Fei Gao$^{1}$, Qiao-Yan Wen$^{1}$, and Fu-Chen Zhu$^{3}$\\
(1. State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications, Beijing, 100876, China) \\
(2. School of Science, Beijing University of Posts and Telecommunications, Beijing, 100876, China)\\
(3. National Laboratory for Modern Communications, P.O.Box 810, Chengdu, 610041, China)\\ Email: qsujuan@sohu.com}
\date{\today}
\date{\today}
\begin{abstract}
The participant attack is the most serious threat for quantum
secret-sharing protocols. We present a method to analyze the
security of quantum secret-sharing protocols against this kind of
attack taking the scheme of Hillery, Bu\v{z}ek, and Berthiaume
(HBB) [Phys. Rev. A \textbf{59} 1829 (1999)] as an example. By
distinguishing between two mixed states, we derive the necessary
and sufficient conditions under which a dishonest participant can
attain all the information without introducing any error, which
shows that the HBB protocol is insecure against dishonest
participants. It is easy to verify that the attack scheme of
Karlsson, Koashi, and Imoto [Phys. Rev. A 59, 162 (1999)] is a
special example of our results. To demonstrate our results
further, we construct an explicit attack scheme according to the
necessary and sufficient conditions. Our work completes the
security analysis of the HBB protocol, and the method presented
may be useful for the analysis of other similar protocols.
\end{abstract}
\pacs{03.67.Dd, 03.67.Hk}
\maketitle
\section{\label{sec:level1}Introduction}
Quantum cryptography is a technique which permits parties to
communicate over an open channel in a secure way. Quantum secret
sharing (QSS) is an important branch of quantum cryptography,
which allows a secret to be shared among many participants in such
a way that only the authorized groups can reconstruct it. In fact,
there are two types in quantum secret sharing, that is, the
sharing of classical secret and that of quantum information. The
former was first proposed by Hillery, Bu\v{z}ek and Berthiaume
\cite{Hbb99} (called HBB hereafter), and the latter was first
presented by Cleve, Gottesman and Lo \cite{Cgl99}. Since the above
pioneering works appeared, QSS has attracted a great deal of
attention (please see \cite{KKI,type1} for the sharing of
classical secret and \cite{type2} for that of quantum
information).
As we know, the designing schemes and analyzing their security are
two inherent directions of cryptography, which are opposite to but
stimulate each other. Each of them is necessary to the development
of cryptography. This is also the case in quantum
cryptography~\cite{L96,FGG97,DB,BM,SP00,GL03}. However, because
the theory of quantum information remains still far from
satisfactorily known, the development of quantum cryptanalysis is
relatively slow, especially in QSS. In fact, it is complex to
analyze the security of QSS protocols because multiple
participants are involved and not all are honest, and therefore
few results \cite{SGPRL,SGPRA,SSZ} have been obtained.
In this paper, we present a method to analyze the security of QSS
protocols taking the HBB scheme \cite{Hbb99} as an example. The
security of HBB has been discussed from several aspects.
Ref.~\cite{Hbb99} analyzed an intercept-resend attack by a
dishonest participant and an entangle-measure attack by an
external attacker. References~\cite{SGPRL,SGPRA,SSZ} investigated
the relation between security and the violation of some Bell¡¯s
inequalities by analyzing several eavesdropping scenarios.
However, their analyses are incomplete because not all the
individual attacks are covered. Reference~\cite{KKI} showed that
the HBB scheme was insecure to a skillful attack, and gave a
remedy; but this analysis is not systematic. Here, we consider the
original HBB protocol and give a complete and systematic analysis
of security against a participant attack. From our analysis we
also get the same result as Ref.~\cite{KKI}, and, moreover, we
derive the necessary and sufficient (NAS) conditions for a
successful attack, which is more important. From the NAS
conditions, we can find many attack schemes easily (including the
eavesdropping strategy in Ref.~\cite{KKI}), which will deal with
the difficulty that breaking a protocol is unsystematic. Although
the result is partly not new~\cite{KKI}, the method (which is
indeed our main aim) is. This method might be useful for the
analysis of other protocols.
The paper is structured as follows. In Sec. II, we review the HBB
protocol briefly. In Sec. III, we analyze general participant
attack strategies, and derive the NAS conditions under which a
dishonest participant attains the whole secret without introducing
any error. In Sec. IV, we give a simple scheme to achieve the
attack successfully. Finally, we give a conclusion and discussion
in Sec. V. Cumbersome computations and formulas are summarized in
the Appendix.
\section{The HBB protocol}
Let us introduce the principle of the HBB scheme~\cite{Hbb99}
first. The dealer Alice wants to divide her secret message between
her two agents, Bob, and Charlie. At the beginning, Alice prepares
a sequence of GHZ triplets in the state
$(1/\sqrt{2})(|000\rangle+|111\rangle)_{ABC}$, where the
subscripts \emph{A}, \emph{B} and \emph{C} denote the three
particles for Alice, Bob and Charlie, respectively. For each
triplet, Alice keeps particle \emph{A} and sends particle \emph{B}
to Bob and \emph{C} to Charlie. As in the Bennett-Brassard 1984
scheme~\cite{Bb84} scheme, all the three parties choose randomly
the measuring basis (MB) $x$ or $y$ to measure their particles and
then they publish their MBs. The announcement should be done in
the following way: Bob and Charlie both send their MBs to Alice,
who then sends all three MBs to Bob and Charlie \cite{Note}. Note
that no one can learn other's bases before having to reveal his,
otherwise as pointed out in Ref.~\cite{Hbb99}, he could cheat more
successfully. When the number of the parties who choose $x$ is
odd, the outcomes are useful. Thanks to the features of the GHZ
state, Charlie and Bob can deduce the outcomes of Alice when they
cooperate (see Table~\ref{tab:table1}~\cite{Hbb99}). To check for
eavesdropping, Alice chooses randomly a large subset of the
outcomes to analyze the error rate. That is, Alice requires Bob
and Charlie to announce their outcomes of the samples in public.
If the error rate is lower than a threshold value, they keep the
remaining outcomes as secret key.
\begin{table}
\caption{\label{tab:table1} Correlations between Alice's, Bob's
measurement results and Charlie's results. Alice's (Bob's)
measurement results are listed in the first column (line).}
\begin{ruledtabular}
\begin{tabular}{ccccc}
Alice/Bob & $x^{+}$ & $x^{-}$ & $y^{+}$ & $y^{-}$ \\
\hline
$x^{+}$ & $x^{+}$ & $x^{-}$ & $y^{-}$ & $y^{+}$\\
$x^{-}$ & $x^{-}$ & $x^{+}$ & $y^{+}$ & $y^{-}$\\
$y^{+}$ & $y^{-}$ & $y^{+}$ & $x^{-}$ & $x^{+}$\\
$y^{-}$ & $y^{+}$ & $y^{-}$ & $x^{+}$ & $x^{-}$\\
\end{tabular}
\end{ruledtabular}
\end{table}
\section{The attack on the HBB protocol}
Now let us give a complete discussion of the security of the HBB
scheme. As pointed out in Refs.~\cite{Qin06,Deng, Gao07}, a
participant generally has more advantages in an attack than an
outside eavesdropper in the secret-sharing protocols. If a QSS
protocol is secure for a dishonest participant, it is secure for
any eavesdropper. Therefore, to analyze the security, we should
concentrate our attention on participant attack. Without loss of
generality, we assume the attacker is Charlie, denoted Charlie*.
He seeks to learn Alice's secret himself without introducing any
error during the eavesdropping check. In order to take advantage
of Alice's and Bob's delayed information about their MBs, a wise
attack strategy for Charlie* is as follows. When the qubits
\emph{B} and \emph{C} are sent out by Alice, he lets an ancilla,
initially in some state $|\chi\rangle$, interact unitarily with
them (the dimensionality of the ancilla is a free variable which
causes no loss in generality). After the interaction, Charlie*
sends qubit \emph{B} to Bob, stores qubit \emph{C} and his ancilla
until Alice announces the MBs used by the three parties. Finally,
Charlie* measures the qubits at his site to achieve the secret
according to Alice's announcements.
We now describe the procedure in detail. After Alice sends out the
two qubits, \emph{B} and \emph{C}, Charlie* intercepts them and
they interacts with his ancilla. After that, the state of the
whole system may be written as
\begin{eqnarray}
|\Psi\rangle_{ABCE}=\sum_{i,j=0}^1a_{ij}|ij\rangle_{AB}|\varepsilon_{ij}\rangle_{CE},
\end{eqnarray}
where $|\varepsilon_{ij}\rangle$ refers to the state of Charlie*
after the interaction and is normalized, and $a_{ij}$ is complex
number that satisfies
\begin{eqnarray}
\sum_{i,j=0}^1|a_{ij}|^{2}=1.
\end{eqnarray}
\subsection{The conditions to escape detection}
As mentioned above, to use the information about Alice's and Bob's
MBs, Charlie* does not measure his qubits until Alice reveals
them, and then he can choose different methods accordingly. Note
that when Alice requires Charlie* to declare his MBs, Charlie*
generates a random sequence of $x$ and $y$ to forge his MBs,
actually he does not measure any qubit. If the MBs chosen by all
the three parties satisfy the condition that the number of $x$ is
odd, the results are kept, otherwise they are discarded. Therefore
Charlie* knows Alice's and Bob's MBs for every useful triplet
which can be utilized in the subsequent steps. When some triplets
are chosen by Alice to detect eavesdropping, Charlie* then
measures his corresponding qubits and announces outcomes according
to Alice's and Bob's MBs. Now we explore the conditions they must
be satisfied if Charlie* wants to escape from being detected.
Let us first consider the case where both Alice and Bob measure
their qubits in $x$ direction, and of course, Charlie* declares
$x$. The state of the whole system $|\Psi\rangle_{ABCE}$ can be
rewritten as
\begin{widetext}
\begin{eqnarray}
|\Psi\rangle_{ABCE}=\frac{1}{2}&[&|x^{+}\rangle_{A}|x^{+}\rangle_{B}(a_{00}|\varepsilon_{00}\rangle+a_{01}|\varepsilon_{01}\rangle+a_{10}|\varepsilon_{10}\rangle+a_{11}|\varepsilon_{11}\rangle)_{CE}\nonumber\\
&+&|x^{+}\rangle_{A}|x^{-}\rangle_{B}(a_{00}|\varepsilon_{00}\rangle-a_{01}|\varepsilon_{01}\rangle+a_{10}|\varepsilon_{10}\rangle-a_{11}|\varepsilon_{11}\rangle)_{CE}\\
&+&|x^{-}\rangle_{A}|x^{+}\rangle_{B}(a_{00}|\varepsilon_{00}\rangle+a_{01}|\varepsilon_{01}\rangle-a_{10}|\varepsilon_{10}\rangle-a_{11}|\varepsilon_{11}\rangle)_{CE}\nonumber\\
&+&|x^{-}\rangle_{A}|x^{-}\rangle_{B}(a_{00}|\varepsilon_{00}\rangle-a_{01}|\varepsilon_{01}\rangle-a_{10}|\varepsilon_{10}\rangle+a_{11}|\varepsilon_{11}\rangle)_{CE}].\nonumber
\end{eqnarray}
\end{widetext}
We can see from Table ~\ref{tab:table1} that without
eavesdropping, if Alice's and Bob's results are
$\emph{x}^{+}\emph{x}^{+}$ or $\emph{x}^{-}\emph{x}^{-}$,
Charlie*'s announcement should be $\emph{x}^{+}$, otherwise, his
announcement should be $\emph{x}^{-}$. In a convenient depiction,
we denote Charlie*'s state as $|\varphi_{j^{m}{k}^{n}}\rangle$
which is normalized, when Alice's and Bob's results are
$\emph{j}^{m}$ and $\emph{k}^{n}$, where $j,k\in \{x,y\}$ and
$m,n\in\{+,-\}$. To avoid being found out, Charlie* should have
the ability to discriminate completely between the two sets
$\{|\varphi_{x^{+}{x}^{+}}\rangle,
|\varphi_{x^{-}{x}^{-}}\rangle\}$,
$\{|\varphi_{x^{+}{x}^{-}}\rangle,
|\varphi_{x^{-}{x}^{+}}\rangle\}$. As shown in Ref.~\cite{Discri},
two sets $S_{1}$, $S_{2}$ can be perfectly discriminated if and
only if the subspaces they span are orthogonal. So the scalar
products of Charlie*'s states have to satisfy four constraints:
\begin{eqnarray}
\left\{\begin{array}{l}
\langle\varphi_{x^{+}{x}^{+}}|\varphi_{x^{+}{x}^{-}}\rangle=0,\\
\langle\varphi_{x^{+}{x}^{+}}|\varphi_{x^{-}{x}^{+}}\rangle=0,\\
\langle\varphi_{x^{-}{x}^{-}}|\varphi_{x^{+}{x}^{-}}\rangle=0,\\
\langle\varphi_{x^{-}{x}^{-}}|\varphi_{x^{-}{x}^{+}}\rangle=0.
\end{array}\right.
\end{eqnarray}
From Eqs. (3) and (4), we obtain
\begin{eqnarray}
\left\{\begin{array}{l}
a_{00}^{\ast}a_{01}\langle\varepsilon_{00}|\varepsilon_{01}\rangle-a_{11}^{\ast}a_{10}\langle\varepsilon_{11}|\varepsilon_{10}\rangle=0,\\
a_{00}^{\ast}a_{10}\langle\varepsilon_{00}|\varepsilon_{10}\rangle-a_{11}^{\ast}a_{01}\langle\varepsilon_{11}|\varepsilon_{01}\rangle=0,\\
|a_{01}|^{2}-a_{01}^{\ast}a_{10}\langle\varepsilon_{01}|\varepsilon_{10}\rangle+a_{10}^{\ast}a_{01}\langle\varepsilon_{10}|\varepsilon_{01}\rangle-|a_{10}|^{2}=0,\\
|a_{00}|^{2}-a_{00}^{\ast}a_{11}\langle\varepsilon_{00}|\varepsilon_{11}\rangle+a_{11}^{\ast}a_{00}\langle\varepsilon_{11}|\varepsilon_{00}\rangle-|a_{11}|^{2}=0.
\end{array}\right.
\end{eqnarray}
Similarly, the constraints are then found in the Appendix for
other cases. Finally, we obtain results from Eqs. (5), (A.3),
(A.6) and (A.9) :
\begin{eqnarray}
\left\{\begin{array}{l}
a_{00}^{\ast}a_{01}\langle\varepsilon_{00}|\varepsilon_{01}\rangle=a_{00}^{\ast}a_{10}\langle\varepsilon_{00}|\varepsilon_{10}\rangle=0,\\
a_{00}^{\ast}a_{11}\langle\varepsilon_{00}|\varepsilon_{11}\rangle=a_{01}^{\ast}a_{10}\langle\varepsilon_{01}|\varepsilon_{10}\rangle=0,\\
a_{01}^{\ast}a_{11}\langle\varepsilon_{01}|\varepsilon_{11}\rangle=a_{10}^{\ast}a_{11}\langle\varepsilon_{10}|\varepsilon_{11}\rangle=0,\\
|a_{00}|=|a_{11}|,\\
|a_{01}|=|a_{10}|.
\end{array}\right.
\end{eqnarray}
Obviously, Charlie* can succeed in escaping detection by Alice and
Bob when his operations satisfy Eq. (6).
\subsection{The maximum information the attacker can attain}
After escaping from detection, Charlie* measures the remaining
qubits to deduce Alice's secret. Now let us compute the maximum
information that Charlie* can gain. From Eqs. (3) and (6), we can
see if Alice's result is $x^{+}$, Charlie*'s state collapses to
$|\varphi_{x^{+}{x}^{+}}\rangle$ or
$|\varphi_{x^{+}{x}^{-}}\rangle$ with equal probability, otherwise
collapses to $|\varphi_{x^{-}{x}^{+}}\rangle$ or
$|\varphi_{x^{-}{x}^{-}}\rangle$ with equal probability. So to get
information of Alice's result, $x^{+}$ or ${x}^{-}$, Charlie*
should distinguish between two mixed states
$\rho_{x^+}=\frac{1}{2}|\varphi_{x^{+}{x}^{+}}\rangle\langle\varphi_{x^{+}{x}^{+}}|
+\frac{1}{2}|\varphi_{x^{+}{x}^{-}}\rangle\langle\varphi_{x^{+}{x}^{-}}|$
and
$\rho_{x^-}=\frac{1}{2}|\varphi_{x^{-}{x}^{+}}\rangle\langle\varphi_{x^{-}{x}^{+}}|
+\frac{1}{2}|\varphi_{x^{-}{x}^{-}}\rangle\langle\varphi_{x^{-}{x}^{-}}|$
occurring with equal a priori probability. Generally, there are
two ways to discriminate between two states, minimum error
discrimination and unambiguous discrimination. In
Ref.~\cite{twodis}, the authors showed the minimum failure
probability $Q_{F}$ attainable in unambiguous discrimination is
always at least twice as large as the minimum-error probability
$P_{E}$ in ambiguous discrimination for two arbitrary mixed
quantum states. So we should take the ambiguous discrimination to
get the maximum information. Utilizing the well-known
result~\cite{minerror}that to discriminate between two mixed
states $\rho_{1}$ and $\rho_{2}$ occurring with a priori
probabilities $p_{1}$ and $p_{2}$, respectively, where
$p_{1}+p_{2}=1$, the minimum-error probability attainable is
$P_{E}=\frac{1}{2}-\frac{1}{2}\|p_{2}\rho_{2}-p_{1}\rho_{1}\|$,
where $\|\Lambda\|=$Tr$\sqrt{\Lambda^{\dagger}\Lambda}$, we get
the minimum-error probability to discriminate between $\rho_{x^+}$
and $\rho_{x^-}$ under the constraints of Eq. (6)
\begin{eqnarray}
P_{E}=\frac{1}{2}(1-4|a_{00}|\cdot|a_{10}|).
\end{eqnarray}
Considering the other three cases (see the Appendix A) with
similar strategy, we get the same results as Eq. (7).
The mutual information between Alice and Charlie* in terms of
Shannon entropy is given by
\begin{eqnarray}
I^{AC}=1+P_{E}\log P_{E}+(1-P_{E})\log(1-P_{E}).
\end{eqnarray}
Now the task is maximizing $I^{AC}$ with the constraints of Eqs.
(2) and (6). Using the Lagrange multiplier method, we attain the
maximum $I^{AC}_{max}=1$ under conditions
\begin{eqnarray}
\left\{\begin{array}{l}
\langle\varepsilon_{00}|\varepsilon_{01}\rangle=\langle\varepsilon_{00}|\varepsilon_{10}\rangle=\langle\varepsilon_{00}|\varepsilon_{11}\rangle=0,\\
\langle\varepsilon_{01}|\varepsilon_{10}\rangle=\langle\varepsilon_{01}|\varepsilon_{11}\rangle=\langle\varepsilon_{10}|\varepsilon_{11}\rangle=0,\\
|a_{00}|=|a_{01}|=|a_{10}|=|a_{11}|=\frac{1}{2}.
\end{array}\right.
\end{eqnarray}
Now, we have the NAS conditions for a dishonest participant to
attack HBB successfully. Therefore the HBB protocol is insecure
(in its original form). Obviously, $|\varepsilon_{00}\rangle$,
$|\varepsilon_{01}\rangle$, $|\varepsilon_{10}\rangle$, and
$|\varepsilon_{11}\rangle$ are orthogonal to each other, which
indicates that a dishonest participant need prepare one additive
qubit at least. It is easy to verify that the eavesdropping
strategy in Ref.~\cite{KKI} is a special example of our results,
where two additive qubits are used and
$a_{00}|\varepsilon_{00}\rangle=\frac{1}{2}|000\rangle$,
$a_{01}|\varepsilon_{01}\rangle=-\frac{1}{2}|001\rangle$,
$a_{10}|\varepsilon_{10}\rangle=\frac{1}{2}|110\rangle$, and
$a_{11}|\varepsilon_{11}\rangle=-\frac{1}{2}|111\rangle$.
\section{An example of successful attack}
According to Eq. (9), we can construct some attack schemes easily.
Here we give an even simpler scheme than Ref.~\cite{KKI} with only
one additive qubit. Generally, the ancilla is the standard state
$|0\rangle$. We choose
$a_{00}|\varepsilon_{00}\rangle=\frac{1}{2}|00\rangle$,
$a_{01}|\varepsilon_{01}\rangle=\frac{1}{2}|01\rangle$,
$a_{10}|\varepsilon_{10}\rangle=\frac{1}{2}|10\rangle$, and
$a_{11}|\varepsilon_{11}\rangle=-\frac{1}{2}|11\rangle$ which
satisfy Eq. (9). Comparing the initial state with the state after
interaction (see Eq. (1)), we can derive the operations performed
by Charlie*.
Now we describe the attack orderly. Charlie* prepares the ancilla
\emph{E} in state $|0\rangle$. After Alice sends out two qubits
\emph{B} and \emph{C}, Charlie* intercepts them, performs
$H=(|0\rangle\langle0|+|1\rangle\langle0|+|0\rangle\langle1|-|1\rangle\langle1|)/\sqrt{2}$
on the qubit \emph{B} and CNOT operation on \emph{B}, \emph{E}
(see Fig.~\ref{fig:one}). The entangled state of Alice, Bob and
Charlie* is converted from
$|\Psi_{0}\rangle=\frac{1}{\sqrt{2}}(|000\rangle+|111\rangle)_{ABC}
\otimes|0\rangle_{E}$ to
\begin{eqnarray}
|\Psi_{1}\rangle=\frac{1}{2}(|00\rangle_{AB}|00\rangle_{CE}+|01\rangle_{AB}|01\rangle_{CE}\\
+|10\rangle_{AB}|10\rangle_{CE}-|11\rangle_{AB}|11\rangle_{CE}).\nonumber
\end{eqnarray}
After Alice and Bob measure their qubits, the whole system is
changed into $|\Psi_{2}\rangle$ (see Fig.~\ref{fig:two} and
Fig.~\ref{fig:three}) which varies according to their MBs. Let us
describe all the cases in detail.
\begin{figure}
\caption{\label{fig:one}
\label{fig:one}
\end{figure}
(i) If both Alice's and Bob's MBs are $x$, Charlie*'s state
collapses to one of the four results
\begin{eqnarray}
|\varphi_{x^{+}{x}^{+}}\rangle=\frac{1}{2}(|00\rangle+|01\rangle+|10\rangle-|11\rangle)_{CE},\nonumber\\
|\varphi_{x^{+}{x}^{-}}\rangle=\frac{1}{2}(|00\rangle-|01\rangle+|10\rangle+|11\rangle)_{CE},\\
|\varphi_{x^{-}{x}^{+}}\rangle=\frac{1}{2}(|00\rangle+|01\rangle-|10\rangle+|11\rangle)_{CE},\nonumber\\
|\varphi_{x^{-}{x}^{-}}\rangle=\frac{1}{2}(|00\rangle-|01\rangle-|10\rangle-|11\rangle)_{CE}.\nonumber
\end{eqnarray}
(ii) When Alice and Bob measure their qubits in $x$, $y$ basis,
respectively, Charlie*'s state may be one of the four states
\begin{eqnarray}
|\varphi_{x^{+}{y}^{+}}\rangle=\frac{1}{2}(|00\rangle-i|01\rangle+|10\rangle+i|11\rangle)_{CE},\nonumber\\
|\varphi_{x^{+}{y}^{-}}\rangle=\frac{1}{2}(|00\rangle+i|01\rangle+|10\rangle-i|11\rangle)_{CE},\\
|\varphi_{x^{-}{y}^{+}}\rangle=\frac{1}{2}(|00\rangle-i|01\rangle-|10\rangle-i|11\rangle)_{CE},\nonumber\\
|\varphi_{x^{-}{y}^{-}}\rangle=\frac{1}{2}(|00\rangle+i|01\rangle-|10\rangle+i|11\rangle)_{CE}.\nonumber
\end{eqnarray}
(iii) When Alice and Bob measure their qubits in $y$, $x$ basis,
respectively, Charlie*'s state may be one of the four states
\begin{eqnarray}
|\varphi_{y^{+}{x}^{+}}\rangle=\frac{1}{2}(|00\rangle+|01\rangle-i|10\rangle+i|11\rangle)_{CE},\nonumber\\
|\varphi_{y^{+}{x}^{-}}\rangle=\frac{1}{2}(|00\rangle-|01\rangle-i|10\rangle-i|11\rangle)_{CE},\\
|\varphi_{y^{-}{x}^{+}}\rangle=\frac{1}{2}(|00\rangle+|01\rangle+i|10\rangle-i|11\rangle)_{CE},\nonumber\\
|\varphi_{y^{-}{x}^{-}}\rangle=\frac{1}{2}(|00\rangle-|01\rangle+i|10\rangle+i|11\rangle)_{CE}.\nonumber
\end{eqnarray}
(iv) When Alice's and Bob's MBs are $y$, Charlie*'s state
collapses to one of the four results
\begin{eqnarray}
|\varphi_{y^{+}{y}^{+}}\rangle=\frac{1}{2}(|00\rangle-i|01\rangle-i|10\rangle+|11\rangle)_{CE},\nonumber\\
|\varphi_{y^{+}{y}^{-}}\rangle=\frac{1}{2}(|00\rangle+i|01\rangle-i|10\rangle-|11\rangle)_{CE},\\
|\varphi_{y^{-}{y}^{+}}\rangle=\frac{1}{2}(|00\rangle-i|01\rangle+i|10\rangle-|11\rangle)_{CE},\nonumber\\
|\varphi_{y^{-}{y}^{-}}\rangle=\frac{1}{2}(|00\rangle+i|01\rangle+i|10\rangle+|11\rangle)_{CE}.\nonumber
\end{eqnarray}
\begin{figure}
\caption{\label{fig:two}
\label{fig:two}
\end{figure}
\begin{figure}
\caption{\label{fig:three}
\label{fig:three}
\end{figure}
It is easy to validate that the four states are orthogonal to each
other in every case, which implies that they can be distinguished
perfectly. Consequently, Charlie* can not only get the secret of
Alice but also escape from detection. In fact, we only need
distinguish between two different results because the qubits are
used to either detect eavesdropping or distill information.
Therefore there are some simple ways to fulfill Charlie*'s
objective.
We take case (i) as an example to describe Charlie*'s operations.
Let us first explain how Charlie* can escape from being detected
when the qubits are chosen to check eavesdropping. Charlie* wants
to deduce his proper declaration $x^{+}$ or ${x}^{-}$; therefore,
he need discriminate between $\{|\varphi_{x^{+}{x}^{+}}\rangle,
|\varphi_{x^{-}{x}^{-}}\rangle\}$ and
$\{|\varphi_{x^{+}{x}^{-}}\rangle,
|\varphi_{x^{-}{x}^{+}}\rangle\}$. A particularly simple circuit
to achieve this task is illustrated in Fig.~\ref{fig:two} (Here
$U=V=W=H$). Concretely, after the operations of CNOT and \emph{W},
the four states in Eq. (11) are converted into
\begin{eqnarray}
|\varphi_{x^{+}{x}^{+}}\rangle=\frac{1}{\sqrt{2}}(|01\rangle+|10\rangle)_{CE},\nonumber\\
|\varphi_{x^{+}{x}^{-}}\rangle=\frac{1}{\sqrt{2}}(|00\rangle-|11\rangle)_{CE},\\
|\varphi_{x^{-}{x}^{+}}\rangle=\frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)_{CE},\nonumber\\
|\varphi_{x^{-}{x}^{-}}\rangle=\frac{1}{\sqrt{2}}(-|01\rangle+|10\rangle)_{CE}.\nonumber
\end{eqnarray}
Then Charlie* measures each qubit in computational basis. If the
measurement results of \emph{C}, \emph{E} are 00 or 11, Charlie*'s
announcement is 1 (corresponding to $|1\rangle, |x^{-}\rangle$ or
$|y^{-}\rangle$ hereafter), otherwise his announcement is 0
(corresponding to $|0\rangle, |x^{+}\rangle$ or $|y^{+}\rangle$
hereafter). According to Table~\ref{tab:table1}, we can see no
error occurs, and therefore Charlie* can escape from being
detected.
We now discuss how Charlie* can obtain the secret information from
his qubits. He only needs distinguish between
$\{|\varphi_{x^{+}{x}^{+}}\rangle,
|\varphi_{x^{+}{x}^{-}}\rangle\}$ and
$\{|\varphi_{x^{-}{x}^{+}}\rangle,
|\varphi_{x^{-}{x}^{-}}\rangle\}$ to get Alice's secret $x^{+}$ or
${x}^{-}$. The circuit to achieve this task is illustrated in
Fig.~\ref{fig:three}. After the $U$ operation, the states in Eq.
(11) are changed into
\begin{eqnarray}
|\varphi_{x^{+}{x}^{+}}\rangle=\frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)_{CE},\nonumber\\
|\varphi_{x^{+}{x}^{-}}\rangle=\frac{1}{\sqrt{2}}(|00\rangle-|11\rangle)_{CE},\\
|\varphi_{x^{-}{x}^{+}}\rangle=\frac{1}{\sqrt{2}}(|01\rangle+|10\rangle)_{CE},\nonumber\\
|\varphi_{x^{-}{x}^{-}}\rangle=\frac{1}{\sqrt{2}}(-|01\rangle+|10\rangle)_{CE}.\nonumber
\end{eqnarray}
From Eq. (16), we can see clearly that the measurement results, 01
or 10, imply that Alice's secret is $x^{-}$, and 00 or 11 indicate
$x^{+}$.
For other cases (ii), (iii) and (iv), Charlie* can also
distinguish between the corresponding states by choosing different
\emph{U} and \emph{W} according to Table~\ref{tab:table2}, avoid
being detected by announcing his results according to
Table~\ref{tab:table3} and then deduce Alice's secret according to
Table~\ref{tab:table4}.
\begin{table}
\caption{\label{tab:table2}The unitary operators for $U$, $V$, $W$
in different cases.}
\begin{ruledtabular}
\begin{tabular}{ccccc}
& i & ii & iii & iv
\\ \hline
$U$ & $H$& $H$ & $SH$ & $SH$\\
$V$ & $H$& $SH$ & $H$ & $SH$\\
$W$ & $H$& $SH$ & $SH$ & $H$\\
\end{tabular}
\end{ruledtabular}
\end{table}
\begin{table}
\caption{\label{tab:table3} Relations between Charlie*'s
measurement results and his announcements (the first column) for
the detection qubits.}
\begin{ruledtabular}
\begin{tabular}{ccccc}
& i & ii & iii & iv
\\ \hline
0 & 10, 01 & 10, 11 & 10, 01 & 10, 11\\
1 & 00, 11 & 00, 01 & 00, 11 & 00, 01\\
\end{tabular}
\end{ruledtabular}
\end{table}
\begin{table}
\caption{\label{tab:table4}Relations between Charlie*'s
measurement results and Alice's secret (the first column) for the
information qubits.}
\begin{ruledtabular}
\begin{tabular}{ccccc}
& i & ii & iii & iv
\\ \hline
0 & 00, 11 & 00, 11 & 10, 01 & 10, 01\\
1 & 10, 01 & 10, 01 & 00, 11 & 00, 11\\
\end{tabular}
\end{ruledtabular}
\end{table}
\section{Conclusion and discussion}
The object of QSS protocols is to transmit a secret in such a way
that only the authorized groups can access it, and no other
combination of parties can get any information about it. The worst
case for QSS protocols is that some participants are dishonest,
and try to find the secret by themselves. Therefore, participant
attack is the most serious threat for the security of QSS
protocols, and that is exactly what we study. The purpose of this
paper is to give a method to analyze a participant attack in QSS.
We introduce this method taking the HBB scheme~\cite{Hbb99} as an
example. A dishonest participant intercepts all the qubits, they
interact with his ancilla, and he then resends them out. He then
measures his qubits after other participants reveal their useful
information. By discriminating between two mixed states, we obtain
the NAS conditions under which the dishonest participant can
attain all the information without introducing any error. This
result shows that the HBB protocol is insecure (in its original
form). Finally, we give an example achieving the proposed attack
to demonstrate our results further.
Although the result that the HBB scheme is insecure (in its
original form) is not new, the method of analyzing the participant
attack is, to our knowledge. The treatment we have presented
appears to cover all individual participant attacks allowed by
physical laws. This method can be applied to other similar QSS
protocols with some modifications. We believe that this method
would be useful in designing related schemes and analyzing their
security. On the one hand, we can construct attack strategies
easily according to the NAS conditions when a protocol has
security loopholes. On the other hand, we can show that protocol
is secure if the attack conditions cannot be reached. For example,
applying this method to the enhanced protocol~\cite{KKI}, we can
show it is secure (Such analysis is beyond the scope of this
paper).
\begin{acknowledgments}
We thank the anonymous reviewer for helpful comments. This work is
supported by the National High Technology Research and Development
Program of China, Grant No. 2006AA01Z419; the National Natural
Science Foundation of China, Grant Nos. 90604023, 60373059; the
National Research Foundation for the Doctoral Program of Higher
Education of China, Grant No.20040013007; the National Laboratory
for Modern Communications Science Foundation of China, Grant No.
9140C1101010601; the Natural Science Foundation of Beijing, Grant
No. 4072020; and the ISN Open Foundation.
\end{acknowledgments}
\appendix
\section{Constraints on Charlie*'s probes}
In this appendix, we find the conditions which Charlie*'s
operations need satisfy when no errors are to occur in the
procedure of detection in other three cases.
(1) When Alice, Bob and Charlie* choose the MBs \emph{x},
\emph{y}, \emph{y} respectively, the whole system
$|\Psi\rangle_{ABCE}$ can be rewritten as
\begin{eqnarray}
&&|\Psi\rangle_{ABCE}=\nonumber\\
&&\frac{1}{2}[|x^{+}y^{+}\rangle(a_{00}|\varepsilon_{00}\rangle-ia_{01}|\varepsilon_{01}\rangle+a_{10}|\varepsilon_{10}\rangle-ia_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|x^{+}y^{-}\rangle(a_{00}|\varepsilon_{00}\rangle+ia_{01}|\varepsilon_{01}\rangle+a_{10}|\varepsilon_{10}\rangle+ia_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|x^{-}y^{+}\rangle(a_{00}|\varepsilon_{00}\rangle-ia_{01}|\varepsilon_{01}\rangle-a_{10}|\varepsilon_{10}\rangle+ia_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|x^{-}y^{-}\rangle(a_{00}|\varepsilon_{00}\rangle+ia_{01}|\varepsilon_{01}\rangle-a_{10}|\varepsilon_{10}\rangle-ia_{11}|\varepsilon_{11}\rangle)].\nonumber\\
\end{eqnarray}
According to Table~\ref{tab:table1}, when Alice's and Bob's
results are $x^{+}{y}^{+}$ or $x^{-}{y}^{-}$, Charlie*'s
announcement should be ${y}^{-}$, otherwise, his announcement
should be ${y}^{+}$. Therefore, Charlie* should be capable of
distinguishing between the two sets,
$\{|\varphi_{x^{+}{y}^{+}}\rangle,|\varphi_{x^{-}{y}^{-}}\rangle\}$
and
$\{|\varphi_{x^{+}{y}^{-}}\rangle,|\varphi_{x^{-}{y}^{+}}\rangle\}$,
to avoid being detected. That is
\begin{eqnarray}
\left\{\begin{array}{l}
\langle\varphi_{x^{+}{y}^{+}}|\varphi_{x^{+}{y}^{-}}\rangle=0,\\
\langle\varphi_{x^{+}{y}^{+}}|\varphi_{x^{-}{y}^{+}}\rangle=0,\\
\langle\varphi_{x^{-}{y}^{-}}|\varphi_{x^{+}{y}^{-}}\rangle=0,\\
\langle\varphi_{x^{-}{y}^{-}}|\varphi_{x^{-}{y}^{+}}\rangle=0.
\end{array}\right.
\end{eqnarray}
Then we get
\begin{eqnarray}
&&a_{00}^{\ast}a_{01}\langle\varepsilon_{00}|\varepsilon_{01}\rangle+a_{11}^{\ast}a_{10}\langle\varepsilon_{11}|\varepsilon_{10}\rangle=0,\nonumber\\
&&a_{00}^{\ast}a_{10}\langle\varepsilon_{00}|\varepsilon_{10}\rangle-a_{11}^{\ast}a_{01}\langle\varepsilon_{11}|\varepsilon_{01}\rangle=0,\\
&&|a_{01}|^{2}-ia_{01}^{\ast}a_{10}\langle\varepsilon_{01}|\varepsilon_{10}\rangle-ia_{10}^{\ast}a_{01}\langle\varepsilon_{10}|\varepsilon_{01}\rangle-|a_{10}|^{2}=0,\nonumber\\
&&|a_{00}|^{2}+ia_{00}^{\ast}a_{11}\langle\varepsilon_{00}|\varepsilon_{11}\rangle+ia_{11}^{\ast}a_{00}\langle\varepsilon_{11}|\varepsilon_{00}\rangle-|a_{11}|^{2}=0.\nonumber
\end{eqnarray}
(2) When Alice, Bob and Charlie* choose the MBs \emph{y},
\emph{x}, \emph{y}, respectively, $|\Psi\rangle_{ABCE}$ can be
rewritten as
\begin{eqnarray}
&&|\Psi\rangle_{ABCE}=\nonumber\\
&&\frac{1}{2}[|y^{+}x^{+}\rangle(a_{00}|\varepsilon_{00}\rangle+a_{01}|\varepsilon_{01}\rangle-ia_{10}|\varepsilon_{10}\rangle-ia_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|y^{+}x^{-}\rangle(a_{00}|\varepsilon_{00}\rangle-a_{01}|\varepsilon_{01}\rangle-ia_{10}|\varepsilon_{10}\rangle+ia_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|y^{-}x^{+}\rangle(a_{00}|\varepsilon_{00}\rangle+a_{01}|\varepsilon_{01}\rangle+ia_{10}|\varepsilon_{10}\rangle+ia_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|y^{-}x^{-}\rangle(a_{00}|\varepsilon_{00}\rangle-a_{01}|\varepsilon_{01}\rangle+ia_{10}|\varepsilon_{10}\rangle-ia_{11}|\varepsilon_{11}\rangle)].\nonumber\\
\end{eqnarray}
According to Table~\ref{tab:table1}, the results, $y^{+}{x}^{+}$
or $y^{-}{x}^{-}$, imply Charlie*'s announcement should be
${y}^{-}$, and others imply ${y}^{+}$. For the same reason, we let
\begin{eqnarray}
\left\{\begin{array}{l}
\langle\varphi_{y^{+}{x}^{+}}|\varphi_{y^{+}{x}^{-}}\rangle=0,\\
\langle\varphi_{y^{+}{x}^{+}}|\varphi_{y^{-}{x}^{+}}\rangle=0,\\
\langle\varphi_{y^{-}{x}^{-}}|\varphi_{y^{+}{x}^{-}}\rangle=0,\\
\langle\varphi_{y^{-}{x}^{-}}|\varphi_{y^{-}{x}^{+}}\rangle=0.
\end{array}\right.
\end{eqnarray}
We then have
\begin{eqnarray}
&&a_{00}^{\ast}a_{01}\langle\varepsilon_{00}|\varepsilon_{01}\rangle-a_{11}^{\ast}a_{10}\langle\varepsilon_{11}|\varepsilon_{10}\rangle=0,\nonumber\\
&&a_{00}^{\ast}a_{10}\langle\varepsilon_{00}|\varepsilon_{10}\rangle+a_{11}^{\ast}a_{01}\langle\varepsilon_{11}|\varepsilon_{01}\rangle=0,\nonumber\\
&&|a_{01}|^{2}+ia_{01}^{\ast}a_{10}\langle\varepsilon_{01}|\varepsilon_{10}\rangle+ia_{10}^{\ast}a_{01}\langle\varepsilon_{10}|\varepsilon_{01}\rangle-|a_{10}|^{2}=0,\nonumber\\
&&|a_{00}|^{2}+ia_{00}^{\ast}a_{11}\langle\varepsilon_{00}|\varepsilon_{11}\rangle+ia_{11}^{\ast}a_{00}\langle\varepsilon_{11}|\varepsilon_{00}\rangle-|a_{11}|^{2}=0.\nonumber\\
\end{eqnarray}
(3) When Alice, Bob and Charlie* choose the MBs \emph{y},
\emph{y}, \emph{x}, respectively, $|\Psi\rangle_{ABCE}$ can be
rewritten as
\begin{eqnarray}
&&|\Psi\rangle_{ABCE}=\nonumber\\
&&\frac{1}{2}[|y^{+}y^{+}\rangle(a_{00}|\varepsilon_{00}\rangle-ia_{01}|\varepsilon_{01}\rangle-ia_{10}|\varepsilon_{10}\rangle-a_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|y^{+}y^{-}\rangle(a_{00}|\varepsilon_{00}\rangle+ia_{01}|\varepsilon_{01}\rangle-ia_{10}|\varepsilon_{10}\rangle+a_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|y^{-}y^{+}\rangle(a_{00}|\varepsilon_{00}\rangle-ia_{01}|\varepsilon_{01}\rangle+ia_{10}|\varepsilon_{10}\rangle+a_{11}|\varepsilon_{11}\rangle)\nonumber\\
&&+|y^{-}y^{-}\rangle(a_{00}|\varepsilon_{00}\rangle+ia_{01}|\varepsilon_{01}\rangle+ia_{10}|\varepsilon_{10}\rangle-a_{11}|\varepsilon_{11}\rangle)].\nonumber\\
\end{eqnarray}
The results, $y^{+}{y}^{+}$ or $y^{-}{y}^{-}$, imply Charlie*'s
announcement should be ${x}^{-}$, and others imply ${x}^{+}$. For
the same reason, we let
\begin{eqnarray}
\left\{\begin{array}{l}
\langle\varphi_{y^{+}{y}^{+}}|\varphi_{y^{+}{y}^{-}}\rangle=0,\\
\langle\varphi_{y^{+}{y}^{+}}|\varphi_{y^{-}{y}^{+}}\rangle=0,\\
\langle\varphi_{y^{-}{y}^{-}}|\varphi_{y^{+}{y}^{-}}\rangle=0,\\
\langle\varphi_{y^{-}{y}^{-}}|\varphi_{y^{-}{y}^{+}}\rangle=0.
\end{array}\right.
\end{eqnarray}
We then have
\begin{eqnarray}
&&a_{00}^{\ast}a_{01}\langle\varepsilon_{00}|\varepsilon_{01}\rangle+a_{11}^{\ast}a_{10}\langle\varepsilon_{11}|\varepsilon_{10}\rangle=0,\nonumber\\
&&a_{00}^{\ast}a_{10}\langle\varepsilon_{00}|\varepsilon_{10}\rangle+a_{11}^{\ast}a_{01}\langle\varepsilon_{11}|\varepsilon_{01}\rangle=0,\\
&&|a_{01}|^{2}-a_{01}^{\ast}a_{10}\langle\varepsilon_{01}|\varepsilon_{10}\rangle+a_{10}^{\ast}a_{01}\langle\varepsilon_{10}|\varepsilon_{01}\rangle-|a_{10}|^{2}=0,\nonumber\\
&&|a_{00}|^{2}+a_{00}^{\ast}a_{11}\langle\varepsilon_{00}|\varepsilon_{11}\rangle-a_{11}^{\ast}a_{00}\langle\varepsilon_{11}|\varepsilon_{00}\rangle-|a_{11}|^{2}=0.\nonumber
\end{eqnarray}
\end{document} |
\begin{document}
\includepdf[pages=1-last]{PostdoctorThesisLYJ-utf.pdf}
\end{document}
\documentclass[12pt,a4paper,openany]{book}
\renewcommand1.5{1.2}
\usepackage{CJKutf8,CJKnumb}
\usepackage[all]{xy}
\usepackage{amscd}
\usepackage{amsmath,amsthm,amssymb}
\usepackage{amssymb}
\usepackage{amsfonts,amssymb,amsthm}
\usepackage{extarrows}
\usepackage{enumerate}
\usepackage{mathrsfs}
\usepackage{graphicx}
\usepackage{algorithmic,algorithm}
\renewcommand{\alphalgorithmiccomment}[1]{
$\rhd???$\textit{#1}}
\usepackage{mathtools}
\mathfrak{p}agestyle{plain}
\topmargin 0.5cm \operatorname{add}tolength{\textheight}{1.5cm}
\operatorname{add}tolength{\textwidth}{2cm} \oddsidemargin 0.7cm \evensidemargin
0.2cm
\theoremstyle{bfupright head,slanted body}
\mathfrak{n}ewtheorem{res}{}[section] \mathfrak{n}ewtheorem*{res*}{}
\mathfrak{n}ewtheorem{eqres}[equation]{}
\theoremstyle{bfupright head,upright body}
\mathfrak{n}ewtheorem{bfhpg}[res]{} \mathfrak{n}ewtheorem*{bfhpg*}{}
\theoremstyle{numbered paragraph}
\mathfrak{n}ewtheorem{ipg}[res]{}
\mathfrak{n}ewcommand{\rev}[1]{\begingroup#1\endgroup}
\mathfrak{n}ewcommand{\ensuremath{P_{\mathfrak{m}athrm{fin}}}}{\ensuremath{P_{\mathfrak{m}athrm{fin}}}}
\mathfrak{n}ewcommand{\subsetLeq}[1]{\mathfrak{m}athop{\leq^{\mathfrak{m}athrm{dom}}_#1}}
\mathfrak{n}ewcommand{\mathfrak{m}athop{\leq^{\mathfrak{m}athrm{dom}}}}{\mathfrak{m}athop{\leq^{\mathfrak{m}athrm{dom}}}}
\mathfrak{n}ewcommand{\mathfrak{m}athop{\supseteq^{\mathfrak{m}athrm{dom}}}}{\mathfrak{m}athop{\supseteq^{\mathfrak{m}athrm{dom}}}}
\mathfrak{n}ewcommand{\Graphs}[0]{\ensuremath{\mathfrak{m}athsf{Graphs}}}
\mathfrak{n}ewcommand{\mathfrak{m}athop{\xrightarrow{_F}}}{\mathfrak{m}athop{\xrightarrow{_F}}}
\mathfrak{n}ewcommand{\mathfrak{m}athop{\leq^F}}{\mathfrak{m}athop{\leq^F}}
\let\Homeq\sim
\mathfrak{n}ewcommand{\mathfrak{m}athop{\xrightarrow{_F}}eq}{\mathfrak{m}athop{\Homeq^{F}}}
\mathfrak{n}ewcommand{\ConnGraph}[0]{\ensuremath{\mathfrak{m}athsf{ConnGraph}}}
\mathfrak{n}ewcommand{\DiGraphs}[0]{\ensuremath{\mathfrak{m}athsf{DiGraphs}}}
\mathfrak{n}ewcommand{\DiCycle}[0]{\ensuremath{\mathfrak{m}athsf{DiCycle}}}
\mathfrak{n}ewcommand{\DiCycles}[0]{\ensuremath{\mathfrak{m}athsf{DiCycles}}}
\mathfrak{n}ewcommand{\Cycle}[0]{\ensuremath{\mathfrak{m}athsf{Cycle}}}
\mathfrak{n}ewcommand{\Cycles}[0]{\ensuremath{\mathfrak{m}athsf{Cycles}}}
\mathfrak{n}ewcommand{\UnorPath}[0]{\ensuremath{\mathfrak{m}athsf{UnorPath}}}
\mathfrak{n}ewcommand{\mathbb{P}ath}[0]{\ensuremath{\mathfrak{m}athsf{Path}}}
\mathfrak{n}ewcommand{\DiPath}[0]{\ensuremath{\mathfrak{m}athsf{DiPath}}}
\mathfrak{n}ewcommand{\Matrices}[0]{\ensuremath{\mathfrak{m}athsf{DRM}}}
\mathfrak{n}ewtheorem{thm}{Theorem}[section]
\mathfrak{n}ewtheorem{lem}[thm]{Lemma}
\mathfrak{n}ewtheorem{claim}[thm]{Claim}
\mathfrak{n}ewtheorem{prop}[thm]{Proposition}
\mathfrak{n}ewcommand{\mathfrak{p}gref}[1]{(\ref{#1})}
\mathfrak{n}ewcommand{\thmref}[2][Theorem~]{#1\mathfrak{p}gref{thm:#2}}
\mathfrak{n}ewcommand{\corref}[2][Corollary~]{#1\mathfrak{p}gref{cor:#2}}
\mathfrak{n}ewcommand{\mathfrak{p}rpref}[2][Proposition~]{#1\mathfrak{p}gref{prp:#2}}
\mathfrak{n}ewcommand{\lemref}[2][Lemma~]{#1\mathfrak{p}gref{lem:#2}}
\mathfrak{n}ewcommand{\dfnref}[2][Definition~]{#1\mathfrak{p}gref{dfn:#2}}
\mathfrak{n}ewcommand{\rmkref}[2][Remark~]{#1\mathfrak{p}gref{rmk:#2}}
\mathfrak{n}ewcommand{\secref}[2][Section~]{#1\ref{sec:#2}}
\mathfrak{n}ewcommand{\mathfrak{p}artpgref}[2]{(\ref{#1})\mathfrak{p}rtlbl{#2}}
\mathfrak{n}ewcommand{\mathfrak{p}artlemref}[3][Lemma~]{#1\mathfrak{p}artpgref{lem:#2}{#3}}
\renewcommand{\eqref}[1]{\mathfrak{p}gref{eq:#1}}
\mathfrak{n}ewcommand{\thmcite}[2][?]{\cite[thm.~#1]{#2}}
\mathfrak{n}ewcommand{\corcite}[2][?]{\cite[cor.~#1]{#2}}
\mathfrak{n}ewcommand{\mathfrak{p}rpcite}[2][?]{\cite[prop.~#1]{#2}}
\mathfrak{n}ewcommand{\lemcite}[2][?]{\cite[lem.~#1]{#2}}
\mathfrak{n}ewcommand{\dfncite}[2][?]{\cite[def.~#1]{#2}}
\mathfrak{n}ewcommand{\seccite}[2][?]{\cite[sec.~#1]{#2}}
\mathfrak{n}ewcommand{\Ro}{\mathfrak{m}athrel{\overset{0}{\sim}}}
\mathfrak{n}ewcommand{\Rl}{\mathfrak{m}athrel{\overset{1}{\sim}}}
\mathfrak{n}ewcommand{\Rlstar}{\mathfrak{m}athrel{\overset{1\star}{\sim}}}
\mathfrak{n}ewcommand{\Rldstar}{\mathfrak{m}athrel{\overset{1\star}{\rightharpoonup}}}
\mathfrak{n}ewcommand{\Rk}{\mathfrak{m}athrel{\overset{\ge k}{\sim}}}
\mathfrak{n}ewcommand{\Rld}{\mathfrak{m}athrel{\overset{1}{\rightharpoonup}}}
\mathfrak{n}ewcommand{\Rldk}{\mathfrak{m}athrel{\overset{k}{\rightharpoonup}}}
\mathfrak{n}ewcommand{\mathfrak{m}athrel{\rightsquigarrow}}{\mathfrak{m}athrel{\rightsquigarrow}}
\renewcommand{\mathbb{P}}{\mathfrak{m}athbb{P}}
\mathfrak{n}ewcommand{\lca}[1]{\mathfrak{m}athop{lca}(#1)}
\mathfrak{n}ewcommand{\rt}[1]{\ensuremath{\mathfrak{m}athsf{#1}}}
\mathfrak{n}ewcommand{\ensuremath{M^\odot}}{\ensuremath{M^\odot}}
\renewcommand{\odot}{\odot}
\mathfrak{n}ewtheorem{theorem}{Theorem}[section]
\mathfrak{n}ewtheorem{corollary}[theorem]{Corollary}
\mathfrak{n}ewtheorem{lemma}[theorem]{Lemma}
\mathfrak{n}ewtheorem{proposition}[theorem]{Proposition}
\theoremstyle{definition}
\mathfrak{n}ewtheorem{definition}[theorem]{Definition}
\mathfrak{n}ewtheorem{defi}[theorem]{Definition}
\mathfrak{n}ewtheorem{example}[theorem]{Example}
\mathfrak{n}ewtheorem{remark}[theorem]{Remark}
\mathfrak{n}umberwithin{equation}{section}
\def{
$\Box$
}{{
$\Box$\mathfrak{m}edskip}}
\def\mathop{\mathrm{Forb}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Forb}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Age}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Age}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Cl}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Cl}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Acl}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Acl}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Rel}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Rel}}\mathfrak{n}olimits}
\def\ensuremath{M^\odot}d{\mathfrak{m}athop{\mathfrak{m}athrm{Mod}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Dom}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Dom}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Aut}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Aut}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Cl}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Cl}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Cc}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Cc}}\mathfrak{n}olimits}
\def\mathop{\mathrm{Ec}}\nolimits{\mathfrak{m}athop{\mathfrak{m}athrm{Ec}}\mathfrak{n}olimits}
\mathfrak{n}ewcommand{\mathfrak{m}athop{\xrightarrow{_E}}}{\mathfrak{m}athop{\xrightarrow{_E}}}
\mathfrak{n}ewcommand{\mathfrak{m}athbb{N}}{\mathfrak{m}athbb{N}}
\mathfrak{n}ewcommand{\mathfrak{m}athbb{Z}}{\mathfrak{m}athbb{Z}}
\renewcommand{\alpha}{\alphalpha}
\mathfrak{n}ewcommand{\cong}{\cong}
\mathfrak{n}ewcommand{\simeq}{\simeq}
\mathfrak{n}ewcommand{{\mathcal K}er}[1]{\mathfrak{n}obreak{\operatorname{Ker}#1}}
\mathfrak{n}ewcommand{\Coker}[1]{\mathfrak{n}obreak{\operatorname{Coker}#1}}
\renewcommand{\Im}[1]{\operatorname{Im}{#1}}
\mathfrak{n}ewcommand{\operatorname{dim}R}{\operatorname{dim}R}
\mathfrak{n}ewcommand{\varphid}[2][R]{\operatorname{fd}_{#1}#2}
\mathfrak{n}ewcommand{\varphidd}[2][\widehat{R}]{\operatorname{fd}_{#1}#2}
\mathfrak{n}ewcommand{\operatorname{depth}}{\operatorname{depth}}
\renewcommand{\operatorname{dim}}{\operatorname{dim}}
\mathfrak{n}ewcommand{\dif}[2][]{{\mathfrak{p}artial}^{#2}_{#1}}
\mathfrak{n}ewcommand{\Co}[2][]{\operatorname{C}_{#1}(#2)}
\mathfrak{n}ewcommand{\Cy}[2][]{\operatorname{Z}_{#1}(#2)}
\renewcommand{\H}[2][]{\operatorname{H}_{#1}(#2)}
\renewcommand{\Im}[1]{\operatorname{Im}{#1}}
\mathfrak{n}ewcommand{\operatorname{depth}R}{\operatorname{depth}R}
\mathfrak{n}ewcommand{\operatorname{depth}S}{\operatorname{depth}S}
\mathfrak{n}ewcommand{\wdt}[2][R]{\operatorname{width}_{#1}#2}
\mathfrak{n}ewcommand{\wdtt}[2][S]{\operatorname{width}_{#1}#2}
\mathfrak{n}ewcommand{\dpt}[2][R]{\operatorname{depth}_{#1}#2}
\mathfrak{n}ewcommand{\dptt}[2][S]{\operatorname{depth}_{#1}#2}
\mathfrak{n}ewcommand{\id}[2][R]{\operatorname{id}_{#1}#2}
\mathfrak{n}ewcommand{\ids}[2][S]{\operatorname{id}_{#1}#2}
\mathfrak{n}ewcommand{\idd}[2][\widehat{R}]{\operatorname{id}_{#1}#2}
\mathfrak{n}ewcommand{\mathfrak{p}d}[2][R]{\operatorname{pd}_{#1}#2}
\mathfrak{n}ewcommand{\mathfrak{p}ds}[2][S]{\operatorname{pd}_{#1}#2}
\mathfrak{n}ewcommand{\mathfrak{p}dd}[2][\widehat{R}]{\operatorname{pd}_{#1}#2}
\mathfrak{n}ewcommand{\Gpd}[2][R]{\operatorname{Gpd}_{#1}#2}
\mathfrak{n}ewcommand{\Gpdd}[2][S]{\operatorname{Gpd}_{#1}#2}
\mathfrak{n}ewcommand{\Hom}[3][R]{\operatorname{Hom}_{#1}(#2,#3)}
\mathfrak{n}ewcommand{\Homm}[3][S]{\operatorname{Hom}_{#1}(#2,#3)}
\mathfrak{n}ewcommand{\RHom}[3][\widehat{R}]{\operatorname{\mathfrak{m}athbf{R}Hom}_{#1}(#2,#3)}
\mathfrak{n}ewcommand{\RHomm}[3][R]{\operatorname{\mathfrak{m}athbf{R}Hom}_{#1}(#2,#3)}
\mathfrak{n}ewcommand{\RHoms}[3][S]{\operatorname{\mathfrak{m}athbf{R}Hom}_{#1}(#2,#3)}
\mathfrak{n}ewcommand{\Ext}[4][R]{\operatorname{Ext}_{#1}^{#2}(#3,#4)}
\mathfrak{n}ewcommand{\Extt}[4][S]{\operatorname{Ext}_{#1}^{#2}(#3,#4)}
\mathfrak{n}ewcommand{\tp}[3][R]{\mathfrak{n}obreak{#2\otimes_{#1}#3}}
\mathfrak{n}ewcommand{\tpP}[3][R]{\left(\tp[#1]{#2}{#3}\right)}
\mathfrak{n}ewcommand{\tpp}[3][S]{\mathfrak{n}obreak{#2\otimes_{#1}#3}}
\mathfrak{n}ewcommand{\Ltp}[3][\widehat{R}]{\mathfrak{n}obreak{#2\otimes_{#1}^{\mathfrak{m}athbf{L}}#3}}
\mathfrak{n}ewcommand{\Ltpr}[3][\comp{R}]{\mathfrak{n}obreak{#2\otimes_{#1}^{\mathfrak{m}athbf{L}}#3}}
\mathfrak{n}ewcommand{\Ltpp}[3][R]{\mathfrak{n}obreak{#2\otimes_{#1}^{\mathfrak{m}athbf{L}}#3}}
\mathfrak{n}ewcommand{\Ltps}[3][S]{\mathfrak{n}obreak{#2\otimes_{#1}^{\mathfrak{m}athbf{L}}#3}}
\mathfrak{n}ewcommand{\Ltpk}[3][k]{\mathfrak{n}obreak{#2\otimes_{#1}^{\mathfrak{m}athbf{L}}#3}}
\mathfrak{n}ewcommand{\Tor}[4][R]{\operatorname{Tor}^{#1}_{#2}(#3,#4)}
\mathfrak{n}ewcommand{\Gidd}[2][R]{\operatorname{Gid}_{#1}#2}
\mathfrak{n}ewcommand{\Gids}[2][S]{\operatorname{Gid}_{#1}#2}
\mathfrak{n}ewcommand{\Gid}[2][\widehat{R}]{\operatorname{Gid}_{#1}#2}
\mathfrak{n}ewcommand{\longrightarrow}{\longrightarrow}
\mathfrak{n}ewcommand{\xla}[2][]{\xleftarrow[#1]{\;#2\;}}
\mathfrak{n}ewcommand{\xra}[2][]{\xrightarrow[#1]{\;#2\;}}
\mathfrak{n}ewcommand{\xra{\;\qis\;}}{\xra{\;\simeq\;}}
\mathfrak{n}ewcommand{\xla{\;\qis\;}}{\xla{\;\simeq\;}}
\mathfrak{n}ewcommand{\setof}[3][\mathfrak{m}space{2mu}]{\{#1#2 \mathfrak{m}id #3#1\}}
\mathfrak{n}ewcommand{\mathfrak{m}apdef}[4][\rightarrow]{\mathfrak{n}obreak{#2\colon #3 #1 #4}}
\mathfrak{n}ewcommand{\simeqdef}[4][\xra{\simeq}]{\mathfrak{n}obreak{#2\colon #3 #1 #4}}
\mathfrak{n}ewcommand{\Gdim}[2][R]{\operatorname{G-dim}_{#1}#2}
\mathfrak{n}ewcommand{\Gdimm}[2][S]{\operatorname{G-dim}_{#1}#2}
\renewcommand{\alpha}{\alphalpha}
\renewcommand{\Im}[1]{\operatorname{Im}{#1}}
\renewcommand{\operatorname{dim}}{\operatorname{dim}}
\renewcommand{\H}[2][]{\operatorname{H}_{#1}(#2)}
\renewcommand{\Im}[1]{\operatorname{Im}{#1}}
\mathfrak{n}ewcommand{\mathfrak{m}}{\mathfrak{m}athfrak{m}}
\mathfrak{n}ewcommand{\mathfrak{n}}{\mathfrak{m}athfrak{n}}
\mathfrak{n}ewcommand{\mathrm{edim}}{\mathfrak{m}athrm{edim}}
\mathfrak{n}ewcommand{\comp}[1]{\widehat{#1}}
\mathfrak{n}ewcommand{\Gdimv}[2][\varphi]{\operatorname{G-dim}_{#1}#2}
\mathfrak{n}ewcommand{\Gdimr}[2][R']{\operatorname{G-dim}_{#1}#2}
\mathfrak{n}ewcommand{\mathfrak{p}dv}[2][\varphi]{\operatorname{pd}_{#1}#2}
\mathfrak{n}ewcommand{\mathfrak{p}dr}[2][R']{\operatorname{pd}_{#1}#2}
\mathfrak{n}ewcommand{\idr}[2][R']{\operatorname{id}_{#1}#2}
\mathfrak{n}ewcommand{\idv}[2][\varphi]{\operatorname{id}_{#1}#2}
\mathfrak{n}ewcommand{\mathrm{amp}}{\mathfrak{m}athrm{amp}}
\mathfrak{n}ewcommand{\Gpdr}[2][R']{\operatorname{Gpd}_{#1}#2}
\mathfrak{n}ewcommand{\Gpdv}[2][\varphi]{\operatorname{Gpd}_{#1}#2}
\mathfrak{n}ewcommand{\operatorname{Gpd}}{\operatorname{Gpd}}
\mathfrak{n}ewcommand{\operatorname{Gid}}{\operatorname{Gid}}
\mathfrak{n}ewcommand{\operatorname{Gfd}}{\operatorname{Gfd}}
\mathfrak{n}ewcommand{\Thb}[2]{#2_{{\scriptscriptstyle\ge}#1}}
\mathfrak{n}ewcommand{\Gidr}[2][R']{\operatorname{Gid}_{#1}#2}
\mathfrak{n}ewcommand{\Gidv}[2][\varphi]{\operatorname{Gid}_{#1}#2}
\mathfrak{n}ewcommand{\Gfd}[2][R]{\operatorname{Gfd}_{#1}#2}
\mathfrak{n}ewcommand{\Gfdd}[2][S]{\operatorname{Gfd}_{#1}#2}
\mathfrak{n}ewcommand{\Gfdr}[2][R']{\operatorname{Gfd}_{#1}#2}
\mathfrak{n}ewcommand{\Gfdv}[2][\varphi]{\operatorname{Gfd}_{#1}#2}
\mathfrak{n}ewcommand{\Gfdhat}[2][\comp{R}]{\operatorname{Gfd}_{#1}#2}
\mathfrak{n}ewcommand{\Gdimvv}[2][\widehat{\varphi}]{\operatorname{G-dim}_{#1}#2}
\mathfrak{n}ewcommand{\dpts}[2][\comp{S}]{\operatorname{depth}_{#1}#2}
\mathfrak{n}ewcommand{\dptr}[2][R']{\operatorname{depth}_{#1}#2}
\mathfrak{n}ewcommand{\edim}[1]{\operatorname{edim}#1}
\mathfrak{n}ewcommand{\rid}[2][R]{\operatorname{rid}_{#1}#2}
\mathfrak{n}ewcommand{\ridd}[2][S]{\operatorname{rid}_{#1}#2}
\mathfrak{n}ewcommand{\ridr}[2][R']{\operatorname{rid}_{#1}#2}
\mathfrak{n}ewcommand{\Rid}[2][R]{\operatorname{Rid}_{#1}#2}
\mathfrak{n}ewcommand{\Ridd}[2][S]{\operatorname{Rid}_{#1}#2}
\mathfrak{n}ewcommand{\Ridr}[2][R']{\operatorname{Rid}_{#1}#2}
\mathfrak{n}ewcommand{\rfd}[2][R]{\operatorname{rfd}_{#1}#2}
\mathfrak{n}ewcommand{\rfdd}[2][S]{\operatorname{rfd}_{#1}#2}
\mathfrak{n}ewcommand{\rfdr}[2][R']{\operatorname{rfd}_{#1}#2}
\mathfrak{n}ewcommand{\Rfd}[2][R]{\operatorname{Rfd}_{#1}#2}
\mathfrak{n}ewcommand{\Rfdd}[2][S]{\operatorname{Rfd}_{#1}#2}
\mathfrak{n}ewcommand{\Rfdr}[2][R']{\operatorname{Rfd}_{#1}#2}
\mathfrak{n}ewcommand{\rpd}[2][R]{\operatorname{rpd}_{#1}#2}
\mathfrak{n}ewcommand{\rpdd}[2][S]{\operatorname{rpd}_{#1}#2}
\mathfrak{n}ewcommand{\rpdr}[2][R']{\operatorname{rpd}_{#1}#2}
\mathfrak{n}ewcommand{\Rpd}[2][R]{\operatorname{Rpd}_{#1}#2}
\mathfrak{n}ewcommand{\Rpdd}[2][S]{\operatorname{Rpd}_{#1}#2}
\mathfrak{n}ewcommand{\Rpdr}[2][R']{\operatorname{Rpd}_{#1}#2}
\mathfrak{n}ewcommand{\Rfdv}[2][\varphi]{\operatorname{Rfd}_{#1}#2}
\mathfrak{n}ewcommand{\rfdv}[2][\varphi]{\operatorname{rfd}_{#1}#2}
\mathfrak{n}ewcommand{\Ridv}[2][\varphi]{\operatorname{Rid}_{#1}#2}
\mathfrak{n}ewcommand{\ridv}[2][\varphi]{\operatorname{rid}_{#1}#2}
\mathfrak{n}ewcommand{\rpdv}[2][\varphi]{\operatorname{rpd}_{#1}#2}
\mathfrak{n}ewcommand{\Rpdv}[2][\varphi]{\operatorname{Rpd}_{#1}#2}
\mathfrak{n}ewcommand{\operatorname{depth}_{\comp{S}}}{\operatorname{depth}_{\comp{S}}}
\mathfrak{n}ewcommand{\operatorname{depth}_{R'}}{\operatorname{depth}_{R'}}
\mathfrak{n}ewcommand{\operatorname{cmd}}{\operatorname{cmd}}
\mathfrak{n}ewcommand{\Ltpi}[3][R']{\mathfrak{n}obreak{#2\otimes_{#1}^{\mathfrak{m}athbf{L}}#3}}
\mathfrak{n}ewcommand{\mathfrak{p}}{\mathfrak{m}athfrak{p}}
\mathfrak{n}ewcommand{\operatorname{Spec}}{\operatorname{Spec}}
\mathfrak{n}ewcommand{\wdts}[2][\comp{S}]{\operatorname{width}_{#1}#2}
\mathfrak{n}ewcommand{\wdtr}[2][R']{\operatorname{width}_{#1}#2}
\mathfrak{n}ewcommand{\Gidvv}[2][\sigma\varphi]{\operatorname{rid}_{#1}#2}
\mathfrak{n}ewcommand{\mathfrak{p}dvv}[2][\sigma]{\operatorname{pd}_{#1}#2}
\mathfrak{n}ewcommand{\wdttt}[2][T]{\operatorname{width}_{#1}#2}
\mathfrak{n}ewcommand{\dpttt}[2][T]{\operatorname{depth}_{#1}#2}
\mathfrak{n}ewcommand{\operatorname{End}}{\operatorname{End}}
\mathfrak{n}ewcommand{\operatorname{Add}}{\operatorname{Add}}
\mathfrak{n}ewcommand{\operatorname{add}}{\operatorname{add}}
\mathfrak{n}ewcommand{\operatorname{Prod}}{\operatorname{Prod}}
\mathfrak{n}ewcommand{\operatorname{fin.inj.dim}}{\operatorname{fin.inj.dim}}
\mathfrak{n}ewcommand{\operatorname{Fin.inj.dim}}{\operatorname{Fin.inj.dim}}
\mathfrak{n}ewcommand{\operatorname{Findim}}{\operatorname{Findim}}
\mathfrak{n}ewcommand{\operatorname{findim}}{\operatorname{findim}}
\renewcommand{\mathfrak{m}od}{\operatorname{mod}}
\mathfrak{n}ewcommand{R^\circ}{R^\circ}
\mathfrak{n}ewcommand{\:\is\:}{\:\cong\:}
\mathfrak{n}ewcommand{\eqclbl}[1]{{\upshape(\textit{#1})}}
\mathfrak{n}ewcommand{\varphi}{\varphi}
\mathfrak{n}ewcommand{\:=\:}{\:=\:}
\mathfrak{n}ewcommand{\Bo}[2][]{\operatorname{B}_{#1}(#2)}
\mathfrak{n}ewcommand{\set}[2][\mathfrak{m}space{1mu}]{\{#1 #2 #1\}}
\mathfrak{n}ewcommand{\mathrm{rid}}{\mathfrak{m}athrm{rid}}
\mathfrak{n}ewcommand{\mathrm{id}}{\mathfrak{m}athrm{id}}
\mathfrak{n}ewcommand{\mathrm{Rid}}{\mathfrak{m}athrm{Rid}}
\mathfrak{n}ewcommand{\mathrm{Ext}}{\mathfrak{m}athrm{Ext}}
\renewcommand{\mathrm{Hom}}{\mathfrak{m}athrm{Hom}}
\mathfrak{n}ewcommand{\mathrm{Gid}}{\mathfrak{m}athrm{Gid}}
\mathfrak{n}ewcommand{\mathrm{pd}}{\mathfrak{m}athrm{pd}}
\mathfrak{n}ewcommand{\mathrm{id}}{\mathfrak{m}athrm{id}}
\mathfrak{n}ewtheorem{cor}[thm]{Corollary}
\def\str#1{\mathfrak{m}athbf {#1}}
\def\ostr#1{{\overrightarrow{\mathfrak{m}athbf {#1}}}}
\def\alpharity#1{a(\rel{}{#1})}
\def\mathfrak{n}brel#1#2{R_{#1}^{#2}}
\def\rel#1#2{R_{\mathfrak{m}athbf{#1}}^{#2}}
\def\varphiunc#1#2{F_{\mathfrak{m}athbf{#1}}^{#2}}
\def\relfunc#1#2{R_{\mathfrak{m}athbf{#1}^-}^{F#2}}
\def{\mathcal F}{{\mathfrak{m}athcal F}}
\def{\mathcal K}{{\mathfrak{m}athcal K}}
\def{\mathcal F}raisse{Fra\"{\i}ss\' e}
\def\mathfrak{p}roclaim#1{\vskip.3cm\mathfrak{n}oindent{\bf#1.}\quad\it}
\def\par\vskip.3cm\rm{\mathfrak{p}ar\vskip.3cm\rm}
\begin{document}
\begin{CJK*}{UTF8}{song}
\CJKtilde
\thispagestyle{empty}
\begin{titlepage}
~~~
{\bf \LARGE
\begin{center} 树结构和图同态
\end{center}}
\centerline{\bf \Large 龙旸靖}
\hspace{4cm}{合~~作~~导~~师:\ \ \underline{~~~~~~~~~~吴耀琨教授~~~~~~~~~~~~}}
\hspace{4cm}{专~~业~~名~~称:\ \ \underline{~~~~~~~~~~~~~~~~数~学~~~~~~~~~~~~~}}
\hspace{4cm}{完~~成~~日~~期:\ \ \underline{2015 年3月--2017 年9 月}}
\hspace{4cm}{提~~交~~日~~期:\ \ \underline{ ~~~~~~~~~~2017 年9 月~~~~~~~~~~}}
{\large \bf
\begin{center}
上海交通大学\ 数学科学学院
\end{center}}
{\large \bf
\begin{center} { 2017年9月}
\end{center}}
\end{titlepage}
\end{CJK*}
\mathfrak{n}ewpage\thispagestyle{empty}
\begin{titlepage}
~~~
{\bf \LARGE
\begin{center} Phylogenetic trees and homomorphisms
\end{center}}
\centerline{\bf \Large Yangjing Long}
{\large \bf
\begin{center}
School of Mathematical Sciences\\ Shanghai Jiaotong University \\
Shanghai, China
\end{center}}
{\large \bf
\begin{center} { Semptember\ \ 2017}
\end{center}}
\end{titlepage}
\mathfrak{n}ewpage\thispagestyle{empty}
~~~~~
\mathfrak{n}ewpage
\mathfrak{p}agenumbering{roman}
~~~
\begin{CJK*}{UTF8}{song}
\operatorname{add}contentsline{toc}{chapter}{\mathfrak{n}umberline {}{\bf 摘要 }}
{\LARGE \bf \begin{center} 摘\ \ 要
\end{center}}
\hspace{0.23cm}
第一章中我们完全刻画了全同态序中形成间隔的有限图对。通过这一刻画可以给出推广对偶对的存在性的一个简单证明。
这一结果不仅对于无向图成立,并且对于有向图和双边的关系结构也成立。
\hspace{0.23cm}第二章中我们给出了一个图同态序的普适性的新的简单证明,并且讨论了这一证明的一些应用。
\hspace{0.23cm}第三章中我们用第二章中的简单证明,证明了图同态序的分形这一美妙的性质。
\hspace{0.23cm}第四章中我们从组合学的角度分析了系统发生信息。考虑任意两个叶子节点
之间的路径上的标号是孤一关系还是非孤一关系。我们证明了表示这种双边关系的图表示一定是树,并且我们完整刻画了这种树和其对应的系统发生树。
\hspace{0.23cm}第五章中我们给出了符号标记无根树和符合三元度量的一一对应。
\mathfrak{m}edskip
\mathfrak{m}edskip
\mathfrak{n}oindent{\bf 关键词:}
图同态; 同态序; 稠密; 普适性; 全同态;
间隔; 分形性质;
系统发生组合学; 稀有事件 ; 双边关系;
符合三元度量; 中点 ; 无根系统发生树
\end{CJK*}
\mathfrak{n}ewpage
~~~
\operatorname{add}contentsline{toc}{chapter}{\mathfrak{n}umberline {}{\bf Abstract }}
{\LARGE \bf \begin{center} Abstract\end{center}}
In Chapter 1
we fully characterise pairs of finite graphs which form a gap in
the full homomorphism order. This leads to a simple proof of the existence
of generalised duality pairs. We also discuss how such results can be carried to
relational structures with unary and binary relations.
In Chapter 2 we show a very simple and versatile argument based on divisibility
which immediately yields the universality of the homomorphism order of directed
graphs and discuss three applications.
In chapter 3, we show that every interval in the homomorphism order of finite
undirected graphs is either universal or a gap.
Together with density and universality this ``fractal'' property contributes to
the spectacular properties of the homomorphism order.
In Chapter 4 we analyze the
phylogenetic information content from a combinatorial point of view by
considering the binary relation on the set of taxa defined by the existence of
a single event separating two taxa. We show that the graph-representation of
this relation must be a tree. Moreover, we characterize completely the
relationship between the tree of such relations and the underlying phylogenetic
tree.
In 1998, B\"{o}cker and Dress gave a 1-to-1 correspondence
between symbolically
dated rooted trees and symbolic ultrametrics.
In Chapter 5 we consider the corresponding problem for unrooted trees.
More precisely, given a tree $T$ with leaf set $X$ and a proper
vertex colouring of its interior vertices, we can map every triple
of three different leaves to the colour of its median vertex. We
characterise all ternary maps that can be obtained in this way in terms
of 4- and 5-point conditions, and we show that the corresponding tree
and its colouring can be reconstructed from a ternary map that
satisfies those conditions. Further, we give an additional
condition that characterises whether the tree is binary,
and we describe an algorithm that reconstructs general trees in
a bottom-up fashion.
\mathfrak{m}edskip
\mathfrak{m}edskip
\mathfrak{n}oindent{\bf Key Words:}
graph homomorphism ; homomorphism order; density; universality; full homomorpism;
gaps; fractal property;
Phylogenetic Combinatorics; Rare events ; Binary
relations;
symbolic ternary metric; median vertex ; unrooted phylogenetic tree
\setlength{\topskip}{-35mm} \tableofcontents
\mathfrak{n}ewpage
\thispagestyle{empty} ~~~
~~~~~
\mathfrak{n}ewpage
\mathfrak{n}ewpage
\renewcommand1.5{1.2}
\mathfrak{p}agenumbering{arabic}
\chapter{A brief introduction}
This thesis is a combination of several published or submitted papers I wrote together with my coauthors during my postdoc period
at Shanghai Jiao Tong University.
The papers are listed below and each chapter is based on each paper with the same order number.
Due to the limit of the length of this thesis I decide to skip the introduction and preliminary Chapter. The necessary preliminary and introduction
can be found in the beginning of each chapter. One can also find my detailed introduction and preliminary in the Introduction section
of my PhD thesis~\cite{Long14}.
\begin{itemize}
\item[1.] Gaps in full homomorphism order, joint with J. Fiala, J. Hubi\v cka,
{\it Electronic Notes in Discrete Mathematics}, (2017), {\bf 61}, 429--435
\item[2.] An universality argument for graph homomorphisms, joint with J.
Fiala, J. Hubi\v cka, {\it Electronic Notes in Discrete Mathematics}, (2015),
{\bf 49}, 643--649.
\item[3.] Fractal property of homomorphism order, joint with J. Fiala, J.
Hubi\v cka, J. Ne\v set\v ril,
{\it European Journal of Combinatorics}, in press,
{\it http://www.sciencedirect.com/science/article/pii/S0195669817300914}
\item[4.] Inferring Phylogenetic Trees from the Knowledge of Rare Evolutionary
Events, joint with M. Hellmuth, M. Hernandez-Rosales, P. F. Stadler,
to appear in {\it Journal of Mathematical Biology},
available at {\it https://arxiv.org/abs/1612.09093}
\item[5.] Reconstructing unrooted phylogenetic trees from symbolic ternary
metrics, joint with S. Gr\" unewald, Y. Wu,
minor revision in {\it Bulletin of Mathematical Biology},
available at {\it https://arxiv.org/abs/1702.00190}
\end{itemize}
\chapter{Gaps in full homomorphism order}
\section{Introduction}
For given graphs $G=(V_G,E_G)$ and $H=(V_H,E_H)$ a {\em homomorphism} $f:G\to H$
is a
mapping $f:V_G\to V_H$ such that $\{u,v\}\in E_G$ implies $\{f(u),f(v)\}\in
E_H$. (Thus it is an edge preserving mapping.)
The existence of a homomorphism $f:G\to H$ is traditionally denoted by $G\Hom
H$.
This allows us to consider the existence of a homomorphism, $\Hom$, to be a
(binary) relation on the class of graphs.
A homomorphism $f$ is {\em full} if $\{u,v\}\mathfrak{n}otin E_G$ implies
$\{f(u),f(v)\}\mathfrak{n}otin
E_H$. (Thus it is an edge and non-edge preserving mapping). Similarly we will
denote by $G\mathfrak{m}athop{\xrightarrow{_F}} H$ the existence of a full homomorphism $f:G\to H$.
As it is well known, the relations $\to$ and $\mathfrak{m}athop{\xrightarrow{_F}}$ are reflexive (the
identity is a
homomorphism) and transitive (a composition of two homomorphisms is still a
homomorphism). Thus the existence of a homomorphism as well as the existence of
full homomorphisms induces a quasi-order on
the class of all finite graphs. We denote the quasi-order induced by
the existence of homomorphisms and the existence of full homomorphism on finite
graphs by ($\Graphs,\leq)$ and ($\Graphs,\mathfrak{m}athop{\leq^F})$ respectively.
(Thus when speaking of orders, we use $G\leq H$ in the same sense as $G\Hom H$
and $G\mathfrak{m}athop{\leq^F} H$ in the sense $G\mathfrak{m}athop{\xrightarrow{_F}} H$.)
These quasi-orders can be easily transformed
into partial orders by choosing a particular representative for each
equivalence class. In the case of graph homomorphism such representative is up
to isomorphism unique vertex minimal element of each class, the {\em (graph)
core}. In the case
of full homomorphisms we will speak of {\em F-core}.
The study of homomorphism order is a well established discipline and one
of main topics of nowadays classical monograph of Hell and
Ne\v{s}et\v{r}il~\cite{Hell2004}. The order $(\Graphs,\mathfrak{m}athop{\leq^F})$ is a topic
of several publications \cite{Xie2006,Feder2008,Hell2013,Ball2010,Fiala} which
are primarily concerned about the full homomorphism equivalent of the
homomorphism
duality~\cite{Nesetril2000}.
In this work we further contribute to this line of research by characterising
{\em F-gaps} in $(\Graphs,\mathfrak{m}athop{\leq^F})$. That is pairs of non-isomorphic F-cores
$G\mathfrak{m}athop{\leq^F} H$ such
that every F-core $H'$, $G\mathfrak{m}athop{\leq^F} H'\mathfrak{m}athop{\leq^F} H$, is isomorphic either to $G$
or $H$. We will show:
\begin{thm}
\label{thm:fullgap}
If $G$ and $H$ are F-cores and $(G,H)$ is an F-gap, then $G$ can be obtained
from $H$
by removal of one vertex.
\end{thm}
First we show a known fact that F-cores correspond to point-determining graphs
which have been studied in 70's by Sumner~\cite{Sumner1973} (c.f. Feder and
Hell~\cite{Feder2008}). We also show that there is a full homomorphism between
two F-cores if
and only if there is an embedding from one to another (see~\cite[Section
3]{Feder2008}). These two observations shed a lot of light into the nature of
full homomorphism order and makes the
characterisation of F-gaps look particularly innocent (clearly gaps in embedding
order are characterised by an equivalent of Theorem~\ref{thm:fullgap}). The
arguments in this area are however surprisingly subtle. This becomes even more
apparent when one generalise the question to classes of graphs as done
by Hell and Hern\'andez-Cruz~\cite{Hell2013} where both results of
Sumner~\cite{Sumner1973} and Feder and Hell~\cite{Feder2008} are given for
digraphs by new arguments using what one could consider to be surprisingly
elaborate (and interesting) machinery needed to carry out the analysis.
We focus on minimising arguments about the actual structure of graphs and
use approach which generalises easily to digraphs and binary
relational structures in general (see Section~\ref{sec:relational}). In
Section~\ref{sec:pd} we outline the connection of point determining graphs and
F-cores. In Section~\ref{sec:gaps} we show proof of the main result.
In Section~\ref{sec:dualities} we show how the existence of gaps leads to a
particularly
easy proof of the existence of generalised dualities (main results
of~\cite{Xie2006,Feder2008,Hell2013,Ball2010}).
\section{F-cores are point-determining}
\label{sec:pd}
In a graph $G$, the {\em neighbourhood} of a vertex $v\in V_G$, denoted
by $N_G(v)$, is the set of all vertices $v'$ of $G$ such that $v$ is adjacent to
$v'$ in $G$. {\em Point-determining graphs} are
graphs in which no two vertices have the same neighbourhoods. If we start with
any graph $G$, and gradually merge vertices with the same neighbourhoods, we
obtain a point-determining graph, denoted by $G_{\mathfrak{m}athrm{pd}}$.
We write $G\mathfrak{m}athop{\xrightarrow{_F}}eq H$ for any pairs of graphs such that $G\mathfrak{m}athop{\xrightarrow{_F}} H$ and
$H\mathfrak{m}athop{\xrightarrow{_F}} G$.
It is easy to observe that $G_{\mathfrak{m}athrm{pd}}$ is always an induced subgraph of
$G$. Moreover, for every graph $G$ it holds that $G_{\mathfrak{m}athrm{pd}}\mathfrak{m}athop{\xrightarrow{_F}}
G\mathfrak{m}athop{\xrightarrow{_F}} G_{\mathfrak{m}athrm{pd}}$ and thus $G\mathfrak{m}athop{\xrightarrow{_F}}eq G_{\mathfrak{m}athrm{pd}}$. This
motivates the following proposition:
\begin{prop}[\cite{Feder2008}]
\label{prop:F-core}
A finite graph $G$ is an F-core if and only if it is point-determining.
\end{prop}
\begin{proof}
Recall that $G$ is an F-core if it is minimal (in the number of vertices) within
its
equivalence class of $\mathfrak{m}athop{\xrightarrow{_F}}eq$. If $G$ is an F-core, $G_{\mathfrak{m}athrm{pd}}$ can
not be smaller than $G$ and thus $G=G_{\mathfrak{m}athrm{pd}}$.
It remains to show that every point-determining graph is an F-core.
Consider two point-determining graphs $G\mathfrak{m}athop{\xrightarrow{_F}}eq H$ that are not isomorphic.
There are
full homomorphisms $f:G\mathfrak{m}athop{\xrightarrow{_F}} H$ and $g:H\mathfrak{m}athop{\xrightarrow{_F}} G$. Because injective full
homomorphisms are embeddings, it follows that either $f$ or $g$ is not
injective. Without loss of generality, assume that $f$ is not injective.
Consider $u,v\in V_G$, $u\mathfrak{n}eq v$, such
that $f(u)=f(v)$. Because full homomorphisms preserve both edges and non-edges,
the preimage of any edge is a complete bipartite graph.
If we apply this fact on edges incident with $f(u)$,
we derive that $N_G(u)=N_G(v)$.
\end{proof}
\begin{prop}[\cite{Feder2008,hell2014connected}]
\label{prop:thincmp}
For F-cores $G$ and $H$ we have $G\mathfrak{m}athop{\xrightarrow{_F}} H$ if and only if $G$ is an induced
subgraph of $H$.
\end{prop}
\begin{proof}
Embedding is a special case of a full homomorphisms. In the opposite direction
consider a full homomorphism $f:G \mathfrak{m}athop{\xrightarrow{_F}} H$. By the same
argument as in the proof of Proposition \ref{prop:F-core} we get that $f$ is
injective,
as otherwise $G$ would not be point-determining.
\end{proof}
\section{Main result: characterisation of F-gaps}
\label{sec:gaps}
Given a graph $G$ and a vertex $v\in V_G$ we denote by $G\setminus v$ the graph
created from $G$ by removing vertex $v$.
We say that vertex $v$ \emph{determines} a pair of vertices $u$ and $u'$ if
$N_{G\setminus v}(u)=N_{G\setminus v}(u')$.
This relation (pioneered in \cite{Feder2008} and used
in~\cite{Feder2008,Hell2013,Xie2006})will play key role in our analysis.
We make use of the following Lemma:
\begin{lem}
\label{lem:determining}
Given a graph $G$ and a subset $A$ of the set of vertices of $G$ denote by $L$ a
graph on the vertices of $G$, where $u$ and $u'$ are adjacent if and only if
there
is $v\in A$ that determines $u$ and $u'$. Let $S$ be any spanning tree of $L$.
Denote by $B\subseteq A$ the set of vertices that determine some pair of
vertices connected by an
edge of $L$ and by $C\subseteq B$ set of vertices that determine some pair of
vertices connected by
an edge of $S$. Then $B=C$.
\end{lem}
\begin{proof}
Because for every pair of vertices there is at most one vertex determining them
clearly $C\subseteq B\subseteq A$.
Assume to the contrary that there is vertex $v\in B\setminus C$ and thus every
pair determined by $v$ is an edge of $L$ but not an edge of $S$. Denote by
$\{u,u'\}$ some such edge of $L$ determined by $v\in B$.
Adding this edge to $S$ closes a cycle. Denote by $u=v_1,
v_2,\ldots v_n=u'$ the vertices of $G$ such that every consecutive pair is an
edge of $S$. Without loss of generality, we can assume that $v\in N_G(v_1)$ and
$v\mathfrak{n}otin
N_G(v_n)$. Because $v\in N_G(v_i)$ implies $v\in N_G(v_{i+1})$ unless $v$
determines pair $\{v_i,v_{i+1}\}$ we also know that there is $1\leq i<n$
such that $v$ determines $v_i$ and $v_{i+1}$. A contradiction with the fact
that
${v_i, v_{i+1}}$ forms an edge of $S$.
\end{proof}
As a warmup we show the following theorem which also follows by
\cite{Sumner1973} (also shown as Corollary 3.2 in \cite{Feder2008} for graphs
and \cite{Hell2013} for digraphs):
\begin{thm}[\cite{Sumner1973,Feder2008,Hell2013}]
\label{thm:sub}
Every F-core $G$ with at least 2 vertices contains an $F$-core with $|V_G|-1$
vertices as an induced subgraph.
\end{thm}
\begin{proof}
Denote by $n$ number of vertices of $G$.
If there is a vertex $v$ of $G$ such that the graph $G\setminus v$ is
point-determining,
it is the desired F-core. Consider graph $S$ as in Lemma~\ref{lem:determining}
where $A$ is the
vertex set of $G$. Because $S$ has at most $n-1$ edges and every edge of $S$ is
determined
by at most one vertex, we know that there is vertex $v$ which does not determine
any
pair of vertices and thus $G\setminus v$ is point-determining.
\end{proof}
In fact both \cite{Sumner1973,Hell2013} shows that every F-core $G$ with at
least 2 vertices
contains vertices $v_1\mathfrak{n}eq v_2$ such that both $G\setminus v_1$ and $G\setminus
v_2$ are F-cores.
This follows by our argument, too but needs bit more detailed analysis. The
main idea of the
following proof of Theorem~\ref{thm:fullgap} can also be adapted to show this.
\begin{proof}(of Theorem~\ref{thm:fullgap})
Assume to the contrary that there are F-cores $G$ and $H$ such that $(G,H)$ is
an F-gap, but $G$ differs from $H$ by more than one vertex. By induction we
construct two infinite sequences of vertices of $H$ denoted by
$u_0,u_1,\ldots$ and $v_0,v_1,\ldots$ along with two infinite sequences of
induced subgraphs of $H$ denoted by $G_0,G_1,\ldots$ and $G'_0,G'_1,\ldots$ such
that for every $i\geq 0$ it holds that:
\begin{enumerate}
\setlength\itemsep{0em}
\item $G_i$ and $G'_i$ are isomorphic to $G$,
\item $G_i$ does not contain $u_i$ and $v_i$,
\item $G'_i$ does not contain $u_i$ and $v_{i+1}$,
\item $u_i$ and $u_{i+1}$ is determined by $v_i$, and,
\item $v_i$ and $v_{i+1}$ is determined by $u_i$.
\end{enumerate}
Put $G_0=G$ and $A=V_H\setminus V_G$.
Consider the spanning tree $S$ given by Lemma~\ref{lem:determining}.
Because no vertex of $A$ can be removed to obtain an induced
point-determining subgraph, it follows that every vertex must have a
corresponding edge in $S$. Consequently the number of edges of $S$ is at least
$|A|$. Because $G$ itself is point-determining, it follows that every edge of
$S$ must contain at least one vertex of $A$. These two conditions yields to
the pair of vertices $v_0\in A=V_H\setminus V_G$ and $v_1\in V_G$ connected by
an
edge in $S$ and consequently we have a vertex $u_0\in A$ which determines them.
We have obtained $G_0, u_0, v_0, v_1$ with the desired properties.
This finishes the initial step of the induction.
\mathfrak{m}edskip
At the induction step assume we have constructed $G_i, u_i, v_i, v_{i+1}$.
We show the construction of $G'_i$ and $u_{i+1}$. We consider two cases.
If $v_{i+1}\mathfrak{n}otin V_{G_i}$ we put $G'_i=G_i$. If $v_{i+1}\in V_{G_i}$ we let
$G'_i$ to be
the graph induced by $H$ on $(V_{G_i}\setminus\{v_{i+1}\})\cup \{v_i\}$.
Because the neighbourhood of $v_i$ and $v_{i+1}$ differs only by a vertex
$u_i\mathfrak{n}otin G_i$ which
determines them we know that $G'_i$ is isomorphic to $G_i$ (and thus also to
$G$) and moreover that $u_i$ is not a vertex of $G'_i$ (because $u_i\mathfrak{n}otin
V_{G_i}$ can not determine itself and thus $u_i\mathfrak{n}eq v_i$). If $H$ was
point-determining
after removal of $v_{i+1}$ we would obtain a contradiction similarly as before.
We can thus assume that $v_{i+1}$
determines at least one pair of vertices. Because neighbourhood $v_{i+1}$ and
$v_i$ differs only by $u_i$
we know that one vertex of this pair is $u_i$. Denote by $u_{i+1}$ the second
vertex.
Given $G'_i, u_i, u_{i+1}, v_{i+1}$ we proceed similarly.
If $u_{i+1}\mathfrak{n}otin V_{G'_i}$ we put $G_{i+1}=G'_i$. If $u_{i+1}\in V_{G'_i}$ we
let $G_{i+1}$ to be
the graph induced by $H$ on $(V_{G'_i}\setminus\{u_{i+1}\})\cup \{u_i\}$.
Again $G_{i+1}$ is isomorphic to $G$ and does not contain $u_{i+1}$ nor
$v_{i+1}$. Denote by $v_{i+2}$ a vertex determined by $u_{i+1}$ from
$v_{i+1}$ (which again must exist by our assumption) and we have obtained
$G_{i+1}, u_{i+1}, v_{i+1}, v_{i+2}$ with the desired properties.
This finishes the inductive step of the construction.
\mathfrak{m}edskip
Because $H$ is finite, we know that both sequences $u_0,u_1,\ldots$ and
$v_0,v_1,\ldots$ contains repeated vertices.
Without loss of generality we can assume that repeated vertex with lowest index
$j$ appears in the first sequence. We thus have $u_j=u_i$ for some $i< j$. By
minimality of $j$ we can assume
that $v_i,v_{i+1},\ldots v_{j-1}$ are all unique. Assume that $v_i$ is
in the neighbourhood of $u_i$, then $v_i$ is not in the neighbourhood of
$u_{i+1}$ (because it determines this pair) and consequently also
$u_{i+1},u_{i+2},\ldots,u_{j}$.
A contradiction with $u_j=u_i$. If $v_i$ is not in the neighbourhood of $u_i$ we
proceed analogously.
\end{proof}
\section{Generalised dualities always exist}
\label{sec:dualities}
To demonstrate the usefulness of Theorem~\ref{thm:fullgap} and
Propositions~\ref{prop:F-core} and~\ref{prop:thincmp}
give a simple proof of the existence of generalised dualities in full
homomorphism
order.
For two finite sets of graphs $\mathfrak{m}athcal{F}$ and $\mathfrak{m}athcal{D}$ we say that
$(\mathfrak{m}athcal{F},\mathfrak{m}athcal{D})$ is a {\em generalised finite $F$-duality pair}
(sometimes also {\em $\mathfrak{m}athcal D$-obstruction}) if for any graph $G$ there
exists $F\in \mathfrak{m}athcal{F}$ such that $F\mathfrak{m}athop{\xrightarrow{_F}} G$ if and only if $G\mathfrak{m}athop{\xrightarrow{_F}} D$
for no $D\in \mathfrak{m}athcal{D}$.
Existence of (generalised) dualities have several consequences. To mention one,
it implies that the decision problem ``given graph $G$ is there $D\in
\mathfrak{m}athcal{D}$ and full homomorphism $G\to D$?'' is polynomial time solvable for
every fixed finite family $\mathfrak{m}athcal D$ of finite graphs.
In the graph homomorphism order the dualities (characterised
in~\cite{Nesetril2000}) are rare. In the case of full homomorphisms they are
however always guaranteed to exist.
\begin{thm}[\cite{Xie2006,Feder2008,Hell2013,Ball2010}]
For every finite set of graphs $\mathfrak{m}athcal{D}$ there is a finite set of graphs
$\mathfrak{m}athcal{F}$ such that $(\mathfrak{m}athcal{F},\mathfrak{m}athcal{D})$ is a generalised finite
F-duality pair.
\end{thm}
\begin{proof}
Without loss of generality assume that $\mathfrak{m}athcal D$ is a non-empty set of
F-cores.
Consider set $\mathfrak{m}athcal X$ of all F-cores $G$ such that there is $D\in \mathfrak{m}athcal
D$, $G\to D$.
Because, by Proposition~\ref{prop:thincmp},
the number of vertices of every such $G$ is bounded from above by the number of
vertices of $D$ and because $\mathfrak{m}athcal D$
is finite, we know that $\mathfrak{m}athcal X$ is finite.
Now denote by $\mathfrak{m}athcal{F}$ the set of all F-cores $H$ such that $H\mathfrak{n}otin
\mathfrak{m}athcal X$ and there
is $G\in \mathfrak{m}athcal X$ such that $(G,H)$ is a gap. By Theorem~\ref{thm:fullgap}
this set is finite.
We show that $(\mathfrak{m}athcal{F},\mathfrak{m}athcal{D})$ is a duality pair.
Consider an F-core $G$, either $G\in \mathfrak{m}athcal X$ and thus there is $D\in
\mathfrak{m}athcal
D$, $G\to D$ or $G\mathfrak{n}otin \mathfrak{m}athcal X$ and then consider a sequence of $F$-cores
$G_1,G_2,\ldots, G_{|G|}=G$ such that $G_1\in \mathfrak{m}athcal X$ consists of single
vertex, $G_{i+1}$ is created from $G_i$ by adding a single vertex for every
$1\leq i<|G|$ (such sequence exists by Theorem~\ref{thm:sub}). Clearly there is
$1\leq j<|G|$ such that $G_j\in \mathfrak{m}athcal X$
and $G_{j+1}\mathfrak{n}otin \mathfrak{m}athcal X$. Because $(G_j,G_{j+1})$ forms a gap, we know
that $G_{j+1}\in \mathfrak{m}athcal F$.
\end{proof}
\begin{remark}
A stronger result is shown by Feder and Hell \cite[Theorem 3.1]{Feder2008} who
shows
that if $\mathfrak{m}athcal D$ consists of single graph $G$ with $k$ vertices, then
$\mathfrak{m}athcal F$ can
be chosen in a way so it contains graphs with at most $k+1$ vertices and there
are at
most two graphs having precisely $k+1$ vertices.
While, by Theorem~\ref{thm:fullgap}, we can also give the same upper bound on
number
of vertices of graphs in $\mathfrak{m}athcal F$, it does not really follow that there
are at most two graphs needed. It appears that the full machinery
of~\cite{Feder2008} is necessary
to prove this result.
In the opposite direction it does not seem to be possible to derive
Theorem~\ref{thm:fullgap}
from this characterisation of dualities, because given pair of non-isomorphic
F-cores $G\mathfrak{m}athop{\xrightarrow{_F}} H$
and $\mathfrak{m}athcal D$ a full homomorphism dual of $\{G\}$ it does not hold that for a
graph $F\in \mathfrak{m}athcal D$
such that $D\mathfrak{m}athop{\xrightarrow{_F}} H$ there is also full homomorphism $G\mathfrak{m}athop{\xrightarrow{_F}} H$.
\end{remark}
\section{Full homomorphisms of relational structures}
\label{sec:relational}
To the date, the full homomorphism order has been analysed in the context
of graphs and digraphs only. Let us introduce generalised setting of relational
structures:
A language $L$ is a set of relational symbols $\rel{}{}\in L$, each associated
with natural number $\alpharity{}$ called \emph{arity}.
A \emph{(relational) $L$-structure} $\str{A}$ is a pair
$(A,(\rel{A}{};\rel{}{}\in L))$ where $\rel{A}{}\subseteq A^{\alpharity{}}$ (i.e.
$\rel{A}{}$ is a $\alpharity{}$-ary relation on $A$). The set $A$ is called the
\emph{vertex set} of $\str{A}$ and elements of $A$ are \emph{vertices}. The
language is usually fixed and understood from the context. If the set $A$ is
finite we call \emph{$\str A$ finite structure}. The class of all finite
relational $L$-structures will be denoted by $\mathop{\mathrm{Rel}}\nolimits(L)$.
A \emph{homomorphism} $f:\str{A}\to \str{B}=(B,(\rel{B}{};\rel{}{}\in L))$ is a
mapping $f:A\to B$ satisfying for every $\rel{}{}\in L$ the implication
$(x_1,x_2,\ldots, x_{\alpharity{}})\in \rel{A}{}\implies
(f(x_1),f(x_2),\ldots,f(x_{\alpharity{}}))\in \rel{B}{}$. A homomorphism is
\emph{full} if the above implication is equivalence, i.e. if for every
$\rel{}{}\in L$ we have $(x_1,x_2,\ldots, x_{\alpharity{}})\in \rel{A}{}\iff
(f(x_1),f(x_2),\ldots,f(x_{\alpharity{}}))\in \rel{B}{}$.
Given structure $\str{A}$ its vertex $v$ is contained in a {\em loop} if there
exists $(v,v,\ldots,\alphallowbreak v)\in \rel{A}{}$ for some $\rel{}{}\in L$ of
arity at least 2.
Given relation $\rel{A}{}$ we denote by $\overline{R}_\str{A}$ its complement,
that is the set of all $\alpharity{}{}$-tuples $\vec{t}$ of vertices of $A$ that are
not in $\rel{A}{}$.
When considering full homomorphism order in this context, the first problem is
what should be considered to be the neighbourhood of a vertex. This can be
described as follows:
Given $L$-structure $\str{A}$, relation $\rel{}{}\in L$ and vertex $v\in A$ such
that $(v,v,\ldots, v)\mathfrak{n}otin \rel{A}{}$ the {\em $\rel{}{}$-neighbourhood} of $v$
in $\str{A}$, denoted by $N^{\rel{}{}}_\str{A}(v)$ is the set of all tuples
$\vec{t}\setminus v$ created from
$\vec{t}\in \rel{A}{}$ containing $v$. Here by $\vec{t}\setminus v$ we denote
tuple created from $\vec{t}$ by replacing all occurrences of vertex $v$ by a
special symbol $\bullet$ which is not part of any vertex set. If $(v,v\ldots,
v)\in \rel{A}{}$ then the $\rel{}{}$-neighbourhood $N^{\rel{}{}}_\str{A}(v)$ is
the set of all tuples $\vec{t}\setminus v$ created from $\vec{t}\in
\overline{R}_\str{A}\cup \{(v,v,\ldots, v)\}$. The {\em neighbourhood} of $v$ in
$\str{A}$ is a function assigning every relational symbol its neighbourhood:
$N_\str{A}(v)(R)=N^{\rel{}{}}_\str{A}(v).$
We say that $L$-structure $\str{A}$ is {\em point-determining} if there are no
two vertices with same neighbourhood. With these definitions direct analogies
of Proposition~\ref{prop:F-core} and \ref{prop:thincmp} for $\mathop{\mathrm{Rel}}\nolimits(L)$ follows.
Analogies of Lemma~\ref{lem:determining}, Theorem~\ref{thm:sub} and
Theorem~\ref{thm:fullgap} do not
follow for relational structures in general. Consider, for example, a
relational structure with three vertices $\{a,b,c\}$
and a single ternary relation $R$ containing one tuple $(a,b,c)$. Such structure
is point-determining, but the only point-determining
substructures consist of single vertex.
There is however deeper problem with carrying Lemma~\ref{lem:determining} to
relational structures: if a pair of
vertices $u,u'$ is determined by vertex $v$ their neighbourhood may differ by
tuples containing additional vertices. Thus
the basic argument about cycles can not be directly applied here. We
consequently formulate results for relational language
consisting of unary and binary relations only (and, as a special case, to
digraphs):
\begin{thm}
\label{thm:fullgap2}
Let $L$ be a language containing relational symbols of arity at most 2. If
$\str{A}$ and $\str{B}$ are (relational) F-cores and $(\str{A},\str{B})$ is an
F-gap, then $\str{A}$ can be obtained from $\str{B}$ by removal of one vertex.
\end{thm}
The example above shows that the limit on arity of relational symbols is
actually necessary. This may be seen as a surprise, because the results about
digraph homomorphism orders tend to generalise naturally to relational
structures and we thus close this paper by an open problem of characterising
gaps in full homomorphism order of relational structures in general.
\chapter{Universality of homomorphism order}
It is a non-trivial result that every countable partial order can be found as a
suborder of the homomorphism order of graphs. This has been first proved in the
even stronger setting of category theory \cite{Pultr1980}.
Subsequently, it has been shown that many restricted classes of graphs (such as
oriented trees \cite{Hubicka2005}, oriented paths \cite{Hubicka2004}, partial
orders and lattices \cite{Lehtonen2008}) admit this universality property.
We show a very simple and versatile argument based on divisibility which
immediately yields the universality of the homomorphism order of directed graphs
and discuss three applications.
\section{Universal partial orders}
In this section we give a construction of a universal partial order.
Let us first review
some basic concept and notations.
In the whole paper we consider only finite and countable partial orders.
An {\em embedding} of a partial order $(Q,\leq_Q)$ in $(P,\leq_P)$ is a mapping
$e:P\to Q$ satisfying
$x\leq_P y$ if and only if $e(x)\leq_Q e(y)$. In such a case we also say that
$(Q,\leq_Q)$ is a {\em suborder} of $(P,\leq_P)$.
For a given partial order $(P,\leq)$, the {\em down-set} $\mathfrak{m}athop{\downarrow} x$
is $\{y\in P \mathfrak{m}id y\leq x\}$.
Any finite partial order $(P,\leq_P)$ can be represented by finite sets ordered
by the inclusion, e.g.
when $x$ is represented by $\mathfrak{m}athop\downarrow x$.
This is a valid embedding, because $\mathfrak{m}athop\downarrow x \subseteq\
\mathfrak{m}athop\downarrow y$ if and only if $x\leq_P y$.
Without loss of generality we may assume that $P$ a subset of some fixed
countable set $A$, e.g. $\mathfrak{m}athbb N$.
Consequently, the partial order formed by the system $\ensuremath{P_{\mathfrak{m}athrm{fin}}}(A)$ of all finite
subsets of $A$
ordered by the inclusion contains any finite partial order as a suborder. Such
orders are called are {\em finite-universal}.
We reserve the term {\em universal} for orders that contain every countable
partial order as a suborder.
Finite-universal and universal orders may be viewed as countable orders of rich
structure --- they are of infinite dimension, and that they contain finite
chains, antichains and decreasing chains of arbitrary length. While
finite-univer\-sal partial orders are rather easy to construct, e.g., as the
disjoint union of all finite partial orders, the existence of a universal
partial order can be seen as a counter-intuitive fact: there are uncountably
many countable partial orders, yet all of them can be ``packed'' into a single
countable structure.
The universal partial order can be build in two steps.
For these we need further terminology: An order is {\em past-finite}, if every
down-set is finite.
An order is {\em past-finite-universal} if it contains every past-finite order.
Analogously,
{\em future-finite} and {\em future-finite-universal} orders are defined w.r.t.
finiteness of up-sets.
\mathfrak{n}oindent
{\bf{1.}} Observe that the mapping $e(x) = \mathfrak{m}athop\downarrow x$ is also an
embedding $e:(P,\leq)\to(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(A),\subseteq)$ in the case when $(P,\leq)$
is past-finite and $P\subseteq A$.
Since a past-finite partial order turns to be future-finite when the direction
of inequalities is reversed, we get:
\begin{prop}
\label{prop:pastfuturefiniteuniv}
For any countably infinite set $A$ it holds that
\begin{itemize}
\item[(i)] the order $(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(A),\subseteq)$ is past-finite-universal, and
\item[(ii)] the order $(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(A),\supseteq)$ is future-finite-universal.
\end{itemize}
\end{prop}
\mathfrak{n}oindent
{\bf{2.}}
For a given partial order $(Q,\leq)$ we construct the {\em subset order},\\
$(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(Q),\subsetLeq{Q})$, where
$$X\subsetLeq{Q} Y\iff \hbox{ for every }x\in X \hbox{ there exists }y\in Y
\hbox{ such that }x\leq y.$$
We show that the subset order is universal:
\begin{thm}
\label{thm:univ2}
For every future-finite-universal partial order $(F,\leq_F)$ it holds that
$(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(F),\subsetLeq{F})$ is universal.
\end{thm}
\begin{proof}{Proof (sketch).}
It is easy to check that $(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(F),\subsetLeq{F})$ is indeed partial order. We
sketch the way to embed any given partial order in $(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(F),\subsetLeq{F})$.
Let be given any countable partial order $(P,\leq_P)$. Without loss of
generality we may assume that $P\subseteq \mathfrak{m}athbb{N}$. This
way we enforce a linear order $\leq$ on the elements of $P$. The order $\leq$ is
unrelated to the partial order $\leq_P$.
We decomposed $(P,\leq_P)$ into:
\begin{enumerate}
\item The \emph{forward order} $\leq_f$, where $x\leq_f y$ if and only if
$x\leq_P y$ and $x \leq y$, and
\item the \emph{backward order} $\leq_b$, where $x\leq_b y$ if and only if
$x\leq_P y$ and $x \geq y$.
\end{enumerate}
For every $x\in P$ both sets $\{y\mathfrak{m}id y\leq_f x\}$ and $\{y\mathfrak{m}id x\leq_b y\}$ are
finite.
In other words $(P,\leq_f)$ is past-finite and $(P,\leq_b)$ is future-finite.
Since $(F,\leq_F)$ is future-finite-universal,
there is an embedding $e: (P,\mathfrak{m}athop{\leq_b}) \to (F,\leq_F)$.
For every $x\in P$ we now define:
$g(x)=\{e(y)\mathfrak{m}id y\leq_f x\}.$
\end{proof}
An example of this construction is depicted in Figure~\ref{fig:sampleposet}. We
chose $F$ to be set of prime numbers
for reasons that will become clear shortly.
\begin{figure}
\caption{A representation of $(P,\leq_P)$ according to Theorem~\ref{thm:univ2}
\label{fig:sampleposet}
\end{figure}
We remark that the embedding $g$ constructed in the proof of Theorem
\ref{thm:univ2} has the property that $g(x)$ depends only on elements $y<x$. Such
embeddings are known as on-line embeddings because they can be constructed
inductively without a-priori knowledge of the whole partial order. See also
\cite{Hubicka2004,Hubicka2005,Hubicka2011} for similar constructions.
By Proposition~\ref{prop:pastfuturefiniteuniv} we see that a particular example
of a past-finite-universal order is $(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(\mathfrak{m}athbb{P}),\subseteq)$, where
$\mathfrak{m}athbb{P}$ is the class of all odd prime numbers. It follows that
$(\ensuremath{P_{\mathfrak{m}athrm{fin}}}(\mathfrak{m}athbb{P}),\supseteq)$ is future-finite-universal.
As for $X,Y\in \ensuremath{P_{\mathfrak{m}athrm{fin}}}(\mathfrak{m}athbb{P})$ holds that $X \subseteq Y$ if and only if
$\mathfrak{p}rod X$ divides $\mathfrak{p}rod Y$, we immediately obtain a special embeddings of the
subset orders by divisibility as:
\begin{prop}\label{prop:setdiviuniv}~
\begin{itemize}
\item[a)] The divisibility order $(\mathfrak{m}athbb{N},|)$ is past-finite-universal,
\item[b)] the reversed divisibility order $(\mathfrak{m}athbb{N},\overleftarrow|)$ is
future-finite-universal,
\item[c)] the \emph{subset reverse divisibility order} $(\ensuremath{P_{\mathfrak{m}athrm{fin}}}({\mathfrak{m}athbb
N}),\overleftarrow|^{dom}_{\mathfrak{m}athbb N})$ is universal.
\end{itemize}
\end{prop}
In the following we show that the subset reverse divisibility order can be
directly represented in the homomorphism order.
\section{The homomorphism order}
For given directed graphs $G$ and $H$ a {\em homomorphism} $f:G\to H$ is a
mapping $f:V_G\to V_H$ such that $(u,v)\in V_G$ implies $(f(u),f(v))\in V_H$.
He existence of homomorphism $f:G\to H$ is traditionally denoted by $G\Hom H$.
This allows us to consider the existence of a homomorphism, $\Hom$, to be a
(binary) relation on the class of directed graphs.
The relation $\to$ is reflexive (the identity is a homomorphism) and transitive
(a composition of two homomorphisms is still a homomorphism). Thus the existence
of a homomorphism induces a quasi-order on the class of all finite directed
graphs. We denote the quasi-order induced by the existence of homomorphisms on
directed graph by $(\DiGraphs,\leq)$ and on undirected graphs by
($\Graphs,\leq)$. When speaking of orders, we use $G\leq H$ in the same sense
as $G\Hom H$. These quasi-orders can be easily transformed into a partial order
by choosing a particular representative for each equivalence class. In the case
of graph homomorphism such representative is up to isomorphism unique vertex
minimal element of each class, the {\em graph core}.
Both homomorphism orders $(\DiGraphs,\leq)$ and $(\Graphs,\leq)$ have been
extensively studied and proved to be fruitful areas of research, see
\cite{Hell2004}.
The original argument for universality of
partial order \cite{Pultr1980} used complex graphs and ad-hoc
constructions. It thus came as a surprise that the homomorphism order is
universal
even on the class of oriented paths~\cite{Hubicka2011}. While oriented paths is
a very simple
class of graphs, the universality argument remained rather complex. We can show
show the universality of another restricted class easily.
Let $\overrightarrow C_k$ stand for the directed cycle on $k$ vertices with
edges oriented in the same direction; $\DiCycle$ is the class of directed
graphs formed by all $\overrightarrow{C}_k$, $k\geq 3$; and $\DiCycles$ is the
class of directed graphs formed by disjoint union of finitely many graphs in
$\DiCycle$.
\begin{thm}
\label{thm:cycles}
The partial order $(\DiCycles,\leq)$ is universal.
\end{thm}
\begin{proof}
As $\overrightarrow{C}_k\to \overrightarrow{C}_l$ if and only if $k
\overleftarrow| l$, we get the conclusion directly from
Proposition~\ref{prop:setdiviuniv}.
\end{proof}
\section{Applications}
\subsection{The fractal property of the homomorphism order}
As a strengtening of the universality of homomorphism order we can show that
every non-trivial
interval in the order is in universal. This property under name of fractal
property was first shown by Ne\v set\v ril \cite{Nesetril1999} but
the proof
was difficult and never published. Easier proof based on the divisibility
argument can be found in Chapter 3 and~\cite{Hubickab}.
\subsection{Universality of order induced by locally injective homomorphisms}~\\
Graph homomorphisms are just one of many mappings between graphs that
induce a partial order. Monomorphisms, embeddings or full homomorphisms also
induce
partial orders. The homomorphism order however stands out as especially
interesting and the universality result is one of unique properties of it.
Other orders fails to be universal for rather trivial reasons, such as lack of
infinite increasing or decreasing chains. A notable exception is the graph minor
order, that is known to not be universal as a consequence of
celebrated result of Robertson and Seymour \cite{Robertson2004}. We consider the
following order:
A homomorphism $f:G\to H$ is {\em locally
injective}, if for every vertex $v$ the restriction of the mapping $f$ to the
domain $N_G(v)$ and range $N_H(f(v))$ is injective. (Here $N_G(v)$ denote the
open neighborhood of a vertex). This order was first studied by Fiala, Paulusma
and Telle in \cite {Fiala2005} where the degree refinement matrices are used
to describe several interesting properties. We can further show:
\begin{thm}
\label{thm:locallyinjective}
The class of all finite connected graphs ordered by the existence of locally
injective homomorphisms is universal.
\end{thm}
The proof of this theorem is based on a simple observation that every
homomorphism between directed cycles is also locally injective homomorphism.
The universality of locally injective homomorphism order on $\DiCycles$ thus
follows from
Theorem~\ref{thm:cycles}. This is a key difference between
Theorem~\ref{thm:cycles} and the universality of oriented paths:
homomorphisms between oriented paths require flipping that can not be easily
interpreted by locally injective homomorphisms.
In the second part of proof of Theorem~\ref{thm:locallyinjective} the cycles
need to be connected together into a single connected graph in a way preserving
all homomorphisms intended. This argument is technical and will appear in
\cite{Fiala}.
\subsection{Universality of homomorphism order of line graphs}
We close the paper by yet another application answering question of
Roberson \cite{Roberson} asking about the universality of homomorphism order on
the class
of linegraphs of graphs with a vertices of degree at most $d$. We were able
to give an affirmative answer.
\begin{thm}[\cite{Fiala2014}]
The homomorphism order of line graphs of regular graphs with maximal degree $d$
is universal for every $d\geq 3$.
\end{thm}
This result may seem counter-intuitive with respect to the Vizing theorem.
Vizing
class 1 contains the graphs whose chromatic index is the same as the maximal
degree of a vertex, while Vizing class 2 contains the remaining graphs.
Because the Vizing class 1 is trivial it may seem that the homomorphism order
on the Vizing class 2 should be simple, too. The converse is true.
\chapter{Fractal property of the graph homomorphism order}
\section{Introduction}
In this note we consider finite simple graphs and countable partial orders. On
these graphs we consider
all homomorphisms between them.
Recall that for graphs $G=(V_G,E_G)$ and $H=(V_H,E_H)$ a \emph{homomorphism
$f:G\to H$} is an edge preserving mapping $f:V_G\to V_H$, that is:
$$\{x,y\}\in E_G \implies \{f(x),f(y)\}\in E_H.$$
If there exists a homomorphism from graph $G$ to $H$, we write $G\to H$.
Denote by $\mathfrak{m}athscr C$ the class of all finite simple undirected graphs without
loops and multiple edges, and by $\leq$ the following order:
$$G\to H\iff G\leq H.$$
$(\mathfrak{m}athscr C,\leq)$ is called the {\em homomorphism order}.
The relation $\leq$ is clearly a quasiorder which becomes a partial order when
factorized by homomorphism equivalent graphs.
This homomorphism equivalence takes particularly simple form, when we represent
each class by the so called core. Here,
a \emph{core} of a graph is its minimal homomorphism equivalent subgraph.
It is well known that up to an isomorphism every equivalence class contains a
unique core~\cite{Hell2004}.
However, for our purposes it is irrelevant whether we consider $(\mathfrak{m}athscr
C,\leq)$ as a quasiorder or a partial order. For brevity we speak of the
homomorphism order in both cases.
The homomorphism order has some special properties, two of which are expressed
as follows:
\begin{thm}
\label{thm:univ1}
$(\mathfrak{m}athscr C,\leq)$ is (countably) universal.
Explicitly: For every countable partial order $P$ there exists an embedding of
$P$ into $(\mathfrak{m}athscr C,\leq)$.
\end{thm}
Here an {\em embedding} of partial order $(P,\leq)$ to partial order
$(P',\leq')$ is an injective function $f:P\to P'$
such that for every $u,v\in P$, $u\leq v'$ if and only if $f(u)\leq'f(v)$.
\begin{thm}
\label{thm:dense}
$(\mathfrak{m}athscr C,\leq)$ is dense.
Explicitly: For every pair of graphs $G_1<G_2$ there exists $H$ such that
$G_1<H<G_2$.
This holds with the single exception of $K_1<K_2$, which forms the only gap of
the homomorphism order of undirected graphs.
\end{thm}
As usual, $K_n$ denotes the complete graph with $n$ vertices. We follow the
standard graph terminology as e.g. \cite{Hell2004}.
As the main result of this paper we complement these structural results by the
following statement:
\begin{thm}
\label{thm:main}
$(\mathfrak{m}athscr C,\leq)$ has the fractal property.
Explicitly: For every pair $G_1< G_2$, distinct from $K_1$ and $K_2$
(i.e. the pair $(G_1,G_2)$ is not a gap), there exists an order-preserving
embedding $\mathbb{P}hi$ of $\mathfrak{m}athscr C$ into the interval
$$[G_1,G_2]_\mathfrak{m}athscr C=\{H;G_1<H<G_2\}.$$
\end{thm}
Putting otherwise, every nonempty interval in $\mathfrak{m}athscr C$ contains a copy of
$\mathfrak{m}athscr C$ itself.
Theorem~\ref{thm:univ1} was proved first
in~\cite{Hedrlin1969,Pultr1980} and reproved
in~\cite{Hubicka2004,Hubicka2005}. Theorem~\ref{thm:dense} was proved
in~\cite{Welzl1982} and particularly simpler proof was given by Perles and
Ne\v set\v ril~\cite{Nesetril1999}, see also~\cite{Nesetril2000,Hell2004}.
Theorem~\ref{thm:main} was formulated in \cite{Nesetril1999} and remained
unpublished since. The principal ingredient of the proof is the Sparse
Incomparability Lemma \cite{Nesetril1989}.
In addition, we give yet another proof of Theorem~\ref{thm:main}. In fact, we
prove all three Theorems~\ref{thm:univ1}, \ref{thm:dense} and \ref{thm:main}
(Theorem~\ref{thm:dense} is a corollary of Theorem~\ref{thm:main}).
First, to make the paper self-contained we also give in Section~\ref{sec:univ} a
short and easy proof of universality
of $(\mathfrak{m}athscr C,\leq)$ which was developed in \cite{Fiala2014} and
sketched in \cite{Fiala2015}.
Then, in Section~\ref{sec:fractalprop} we give first proof of
Theorem~\ref{thm:main} based on the Sparse Incomparability
Lemma~\cite{Nesetril1989,Hell2004}.
Then in Section~\ref{sec:secondproof} we prove a strenghtening of
Theorem~\ref{thm:dense} (stated as Lemma~\ref{lem:fatgap}). This will be needed
for our second proof of Theorem~\ref{thm:main}
which is flexible enough for applications. Thus this paper summarizes perhaps
surprisingly easy proofs of theorems which originally had difficult proofs.
\section{Construction of a universal order}
\label{sec:univ}
\subsection{Particular universal partial order}
\label{ssec:universal}
Let $(\mathfrak{m}athcal P,\leq_{\mathfrak{m}athcal P})$ be a partial order, where $\mathfrak{m}athcal P$
consists of all finite sets of
odd integers, and where for $A,B\in \mathfrak{m}athcal P$ we put $A\leq_{\mathfrak{m}athcal P} B$ if
and only if for every $a\in
A$ there is $b\in B$ such that $b$ divides $a$. We make use of the following:
\begin{thm}[\cite{Fiala2014}]
\label{thm:universal}
The order $(\mathfrak{m}athcal P,\leq_{\mathfrak{m}athcal P})$ is a universal partial order.
\end{thm}
To make the paper self-contained we give a brief proof of this assertion.
(See also \cite{Hedrlin1969,Hubicka2004,Hubicka2011} for related
constructions of universal
partial orders.) The proof of Theorem~\ref{thm:universal} follows from two
simple lemmas.
We say that a countable partial order is {\em past-finite} if every down-set
$x^\downarrow = \{y; y\leq x\}$ is
finite. A countable partial order is {\em past-finite-universal}, if it contains
every
past-finite partial order as a suborder.
{\em Future-finite} and {\em future-finite-universal} orders are defined
analogously with respect to up-sets $x^\uparrow = \{y; y\geq x\}$.
Let $P_f(X)$ denote the set of all finite subsets of $X$. The following lemma
extends a well known fact about representing finite partial orders by sets
ordered by the subset relation.
\begin{lem}
\label{lem:pastfiniteuniv}
For any countably infinite set $X$, the partial order $(P_f(X),\subseteq)$ is
past-finite-universal.
\end{lem}
\begin{proof}
Consider an arbitrary past-finite order $(Q,\leq_Q)$. Without loss of generality
we may assume that $Q\subseteq X$.
Let $\mathbb{P}hi$ be the mapping that assigns every $x\in Q$ its down-set, i.e.
$\mathbb{P}hi(x) = \{y\in Q; y\leq x\}$.
It is easy to verify that $\mathbb{P}hi$ is indeed an embedding
$(Q,\leq_Q)\to(P_f(X),\subseteq)$.
\end{proof}
By the {\em divisibility partial order}, denoted by $(\mathfrak{m}athbb{N},\leq_d)$, we
mean the partial order on positive integers, where $n \leq_d m$ if and only if
$n$ is divisible by $m$.
Denote by $\mathfrak{m}athbb{Z}_o$ the set of all odd integers $n$, $n\geq 3$.
\begin{lem}
\label{lem:futurefiniteuniv}
The divisibility partial order $(\mathfrak{m}athbb{Z}_o,\leq_d)$ is
future-finite-universal.
\end{lem}
\begin{proof}
Denote by $\mathfrak{m}athbb P$ the set of all odd prime numbers. Apply Lemma
\ref{lem:pastfiniteuniv} for $X=\mathfrak{m}athbb P$.
Observe that $A\in P_f(\mathfrak{m}athbb P)$ is a subset of $B\in P_f(\mathfrak{m}athbb P)$ if and
only if $\mathfrak{p}rod_{p\in A} p$ divides $\mathfrak{p}rod_{p\in B} p$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:universal}]
Let $(Q,\leq_Q)$ be a given partial order. Without loss of generality we may
assume that
$Q$ is a subset of $\mathfrak{m}athbb{P}$. This way we obtain also the usual linear order
$\leq$
(i.e. the comparison by the size) on the elements of $Q$.
In the following construction, the order $\leq$ determines in which sequence the
elements of $Q$ are processed
(it could be also interpreted as the time of creation of the elements of $Q$).
We define two new orders on $Q$: the \emph{forwarding order} $\leq_f$ and the
\emph{backwarding order} $\leq_b$ as follows:
\begin{enumerate}
\item We put $x\leq_f y$ if and only if $x\leq_Q y$ and $x \leq y$.
\item We put $x\leq_b y$ if and only if $x\leq_Q y$ and $x \geq y$.
\end{enumerate}
Thus the partial order $(Q,\leq_Q)$ has been decomposed into $(Q,\leq_f)$ and
$(Q,\leq_b)$.
For every vertex $x\in Q$ both sets $\{y; y\leq_f x\}$ and $\{y; x\leq_b y\}$
are finite.
It follows that $(Q,\leq_f)$ is past-finite and that $(Q,\leq_b)$ is
future-finite.
Since $(\mathfrak{m}athbb{Z}_o,\leq_d)$ is future-finite-universal
(Lemma~\ref{lem:futurefiniteuniv}), there is an embedding $\mathbb{P}hi:
(Q,\mathfrak{m}athop{\leq_b}) \to (\mathfrak{m}athbb{Z}_o,\leq_d)$. The desired embedding
$U:(Q,\leq_Q) \to (\mathfrak{m}athcal P,\leq_{\mathfrak{m}athcal P})$ is obtained by representing
each $x\in Q$ by a set system $U(x)$ defined by (see Figure~\ref{fig:poset}):
$$U(x)=\{\mathbb{P}hi(y); y\leq_f x\}.$$
\begin{figure}
\caption{Example of the construction of an embedding $U:(Q,\leq_Q) \to (\mathfrak{m}
\label{fig:poset}
\end{figure}
To argue the correctness we first show that $U(x)\leq_{\mathfrak{m}athcal P} U(y)$ implies
$x\leq_Q y$.
From the definition of $\leq_{\mathfrak{m}athcal P}$ and the fact that $\mathbb{P}hi(x)\in U(x)$
follows that at least one $w\in Q$ exists, such that
$\mathbb{P}hi(w)\in U(y)$ and $\mathbb{P}hi(x)\leq_d \mathbb{P}hi(w)$. By the definition of $U$,
$\mathbb{P}hi(w)\in U(y)$ if and only if $w\leq_f y$. By the definition of $\mathbb{P}hi$,
$\mathbb{P}hi(x)\leq_d \mathbb{P}hi(w)$ if and only if $x\leq_b w$.
It follows that $x\leq_b w\leq_f y$ and thus also $x\leq_Q w\leq_Q y$ and
consequently $x\leq_Q y$.
To show that $x\leq_Q y$ implies $U(x)\leq_{\mathfrak{m}athcal P} U(y)$ we consider two
cases.
\begin{enumerate}
\item When $x\leq y$ then $U(x)\subseteq U(y)$ and thus also $U(x)\leq_{\mathfrak{m}athcal
P} U(y)$.
\item Assume $x>y$ and take any $w\in Q$ such that $\mathbb{P}hi(w)\in U(x)$. From the
definition of $U(x)$ we have
$w\leq_f x$. Since $x\leq_Q y$ we have $w\leq_Q y$.
If $w\leq y$, then $w\leq_f y$ and we have $\mathbb{P}hi(w)\in U(y)$. In the other case
if $w>y$ then $w\leq_b y$ and thus $\mathbb{P}hi(w)\leq_d \mathbb{P}hi(y)$. Because the choice
of $w$ is arbitrary, it follows that $U(x)\leq_{\mathfrak{m}athcal P} U(y)$.
\end{enumerate}
\end{proof}
Clearly, as in e.g.~\cite{Hubicka2004} this can be interpreted as Alice-Bob game
played on finite partial orders. Alice always wins.
\subsection{Representing divisibility}
Denote by $\overrightarrow{C_p}$ the directed cycle of length $p$, i.e. the
graph $(\{0,1,\ldots, p-1\},\{(i,i+1); i=0,1,\ldots, p-1\})$, where addition is
performed modulo $p$.
Denote by $\mathfrak{m}athcal D$ the class of disjoint unions of directed cycles.
\begin{thm}
\label{thm:univ}
The homomorphism order $(\mathfrak{m}athcal D,\leq)$ is universal.
\end{thm}
\begin{proof}
Observe first that a homomorphism $f:\overrightarrow{C_p}\to
\overrightarrow{C_q}$ between two cycles $\overrightarrow{C_p}$ and
$\overrightarrow{C_q}$ exists if and only if $q$ divides $p$.
Consequently, for two collections of disjoint cycles $\sum_{p\in
A}\overrightarrow{C_p}$ and $\sum_{q\in B}\overrightarrow{C_q}$
a homomorphism
$$f:\sum_{p\in A}\overrightarrow{C_p}\to \sum_{q\in B}\overrightarrow{C_q}$$
exists if and only if
$$A\leq_{\mathfrak{m}athcal P} B,$$
with respect to the universal partial order $(\mathfrak{m}athcal P,\leq_{\mathfrak{m}athcal P})$ of
Theorem~\ref{thm:universal}.
Since we have used odd primes in the proof of Lemma~\ref{lem:futurefiniteuniv},
the minimum of each set in $\mathfrak{m}athcal P$ is at least three.
Hence, each $A\in {\mathfrak{m}athcal P}$ corresponds to a disjoint union of odd cycles.
\end{proof}
\begin{remark}
Denote by $\overrightarrow{\mathfrak{m}athscr C}$ the class of all directed graphs.
Theorem~\ref{thm:univ} yields immediately that the order
$(\overrightarrow{\mathfrak{m}athscr C},\leq)$ is also universal.
The class of disjoint union of directed odd cycles is probably the simplest
class for which the homomorphism
order is universal. However note that here the key property is that
objects are not connected and contains odd cycles of unbounded length.
If we want to obtain connected graphs with bounded cycles then we have
to refer to \cite{Hubicka2004,Hubicka2011,Hubicka2005} where it is proved that
that the
class of finite oriented trees $\mathfrak{m}athcal T$ and even the class of finite
oriented
paths form universal partial orders. These strong notions are not needed in
this
paper. However note that from our results here it also follows that not only the
class
of planar graphs but also the class of outer-planar graphs form a universal
partial
order.
\end{remark}
\section{The fractal property}
\label{sec:fractalprop}
To prove Theorem~\ref{thm:main}, we use the following result proved by
Ne\v{s}et\v{r}il and R\"odl by non-constructive methods~\cite{Nesetril1989}.
Later, non-trivial constructions were given by Matou\v{s}ek-Ne\v set\v ril
\cite{Matousek2004} and Kun \cite{Kun2013}:
\begin{thm}[Sparse Incomparability Lemma \cite{Nesetril1989}, see e.g. Theorem
3.12 of \cite{Hell2004}]
\label{thm:sparse}
Let $l$ be positive integer. For any non-bipartite graphs $G_1$ and $G_2$ with
$G_1 < G_2$, there exists a connected graph $F$ such that
\begin{itemize}
\item $F$ is (homomorphism) incomparable with $G_1$ (i.e. $F\mathfrak{n}ot \leq G_1$ and
$G_1\mathfrak{n}ot \leq F$);
\item $F < G_2$; and
\item $F$ has girth at least $l$, where the girth of a graph is the length of
its shortest cycle.
\end{itemize}
\end{thm}
In the sequel we combine the Sparse Incomparability Lemma and the universality
of $(\mathfrak{m}athcal D,\leq)$, together with the standard indicator
technique developed by Hedrl\'{\i}n and Pultr~\cite{Hedrlin1964,Hell2004}.
The essential construction of this method takes
an oriented graph $G$ and a graph $I$ with two distinguished vertices $a$, $b$
and creates a graph $G*(I,a,b)$, obtained by substituting every arc $(x,y)$ of
$G$ by a copy of the graph $I$, where $x$ is identified with $a$ and $y$ is
identified with $b$, see Figure~\ref{fig:suffices2} for an example.
\begin{figure}
\caption{Construction of $\mathfrak{p}
\label{fig:suffices2}
\end{figure}
\begin{proof}[Proof of Theorem \ref{thm:main}]
Let be given undirected graphs $G_1<G_2$ not forming a gap. By our assumptions
$G_2$ is a core distinct from $K_2$ as otherwise $G_1=K_1$. (If $G_2=K_2$ then
$K_1=K_1$ and we have a gap). We may also assume without loss of generality
that $G_1$ is not bipartite since in such a case we may replace $G_1$ by a graph
$G_1<G_1'<G_2$ (given by Theorem~\ref{thm:dense}), making the interval
$[G_1',G_2]_\mathfrak{m}athscr C$ even narrower than $[G_1,G_2]_\mathfrak{m}athscr C$. Because all
bipartite graphs are homomorphically equivalent it follows that $G_1'$ is
non-bipartite.
Let $l\ge 5$ be any odd integer s.t. the longest odd cycle of $G_2$ has length
at most $l$.
For the indicator we use the graph $I_l$, depicted in
Figure~\ref{fig:indicator}.
The graph $I_l$ can be viewed either as a subdivision of $K_4$, or as 3 cycles
of length $l+2$ amalgamated together.
\begin{figure}
\caption{The indicator $I_l$ and its homomorphism to $C_l$. Dashed lines
represent paths with $l-5$ internal vertices.}
\label{fig:indicator}
\end{figure}
The indicator $I_l$ is rigid, i.e. the only homomorphism $I_l\to I_l$ is the
identity~\cite[Proposition 4.6]{Hell2004}. Note also that $I_l$ allows a
homomorphism to the undirected cycle of length $l$, as is also depicted in
Figure~\ref{fig:indicator}.
We continue with the construction of a graph $H_A$ from a set of odd integers
$A\in {\mathfrak{m}athcal P}$.
Let $F$ be a connected graph satisfying the conclusions of the Sparse
Incomparability Lemma. We fix an arbitrary vertex $u$ of $F$.
Then, given a positive integer $p\geq 3$, we apply the indicator $I_l,a,b$ on
the directed cycle of length $l\cdot p$ to obtain
$\overrightarrow{C_{lp}}*(I_l,a,b)$.
(Observe that $\overrightarrow{C_{lp}} * (I_l,a,b) \to \overrightarrow{C_{lq}} *
(I_l,a,b)$ if and only if $q$ divides $p$.)
We then join any vertex of the original cycle $\overrightarrow{C_{lp}}$ to $u$
by a path of length $|V_{G_2}|$, see Figure~\ref{fig:graphH}.
\begin{figure}
\caption{Graph $H_p$.}
\label{fig:graphH}
\end{figure}
Observe that the resulting graph $H_p$ allows a homomorphism to $G_2$,
since:
\begin{enumerate}
\item There exists a homomorphism $f:F \to G_2$ by Theorem~\ref{thm:sparse};
\item the indicator $I_l$ has a homomorphism to a cycle of length $l$, which can
be simply transformed to a homomorphism $g$ to any odd cycle of length $l'\le l$
in $G_2$ (by the choice of $l$);
\item the mapping $g$ could be chosen s.t. $g(a)=g(b)$, hence it can be extended
to all vertices of $\overrightarrow{C_p}$;
\item the distance between the image of $u$ and the cycle of length $l'$ is at
most $|V_{G_2}|$, therefore both homomorphisms $f$ and $g$ can be combined
together and extended to the whole graph $H_p$ straightforwardly.
\end{enumerate}
To complete the construction of $H_A$, we put
$$H_A=\sum_{p\in A} H_p + G_1.$$
The conclusion of Theorem~\ref{thm:main} follows from the following three
properties:
\begin{enumerate}
\item For every $A\in {\mathfrak{m}athcal P}: G_1 < H_A$.
The $\le$ inequality is obvious, since $G_1$ is a component of each $H_A$.
Since $F$ is a subgraph of each $H_p$, by Theorem~\ref{thm:sparse} there is no
homomorphism $H_A\to G_1$ whenever $A$ is nonempty.
\item For every $A\in {\mathfrak{m}athcal P}: H_A < G_2$.
The existence of homomorphisms $H_p\to G_2$ and $G_1 \to G_2$ yields a
homomorphism $H_A \to G_2$.
As $G_2 \mathfrak{n}ot\leq F$, and as the shortest cycle in
$\overrightarrow{C_{lp}}*(I_l,a,b)$ has length $l+2$, which is by the choice of
$l$ longer than the length of any odd cycle in $G_2$, there is no homomorphism
$G_2 \to H_A$.
\item For every $A, B \in {\mathfrak{m}athcal P}: H_A \to H_B$ if and only if
$A\leq_{\mathfrak{m}athcal P} B$.
It is easy to see that $q$ divides $p$ iff $\overrightarrow{C_{lp}}*(I_l,a,b)\to
\overrightarrow{C_{lq}}*(I_l,a,b)$. This is a standard argument.
Note that the paths between $F$ and $\overrightarrow{C_{lp}}$ in $H_p$, and
between $F$ and $\overrightarrow{C_{lq}}$ in $H_q$ have the same length and the
vertex $u$ of attachment has been chosen in the same way in both cases.
Therefore, $H_p\to H_q$ and consequently, $A\leq_{\mathfrak{m}athcal P} B$ implies $H_A\to
H_B$.
Assume now that $H_A\to H_B$. We have already excluded $H_p \to G_1$, hence by
the connectivity of each $H_p$ neccessarily follows that $\sum_{p\in A}H_p\to
\sum_{q\in B}H_q$.
This in turns leads to $\sum_{p\in A} \overrightarrow{C_{lp}}*(I_l,a,b)\to
\sum_{q\in B}\overrightarrow{C_{lq}}*(I_l,a,b)$ which is equivalent to
$A\leq_{\mathfrak{m}athcal P} B$.
\end{enumerate}
These three properties guarantee that gave a full embedding of $(\mathfrak{m}athcal
D,\leq)$ into $(\mathfrak{m}athscr C,\leq)$, which maps every $\sum_{p\in I}
\overrightarrow{C_p}$ into the interval $[G,G']_\mathfrak{m}athscr C$ in $\mathfrak{m}athscr C$. By
Theorem~\ref{thm:univ} the universality of $[G,G']_\mathfrak{m}athscr C$ follows.
\end{proof}
\section{Alternative elementary proof}
\label{sec:secondproof}
The sparse incomparability lemma holds for ``dense'' classes of
graphs. Here we establish the fractal property of graphs by a different
technique which allows us to prove the fractal property of some ``sparse''
classes of graphs
as well.
For example we can reduce the (stronger form of) density for planar graphs to
the fractal property of the class of planar graphs. But first we formulate
the proof for general graphs. We shall make use of the following two assertions.
\begin{lem}
\label{lem:fatgap}
Given graphs $G_1<G_2$, $G_2$ non-bipartite, there exists connected graphs $H_1$
and $H_2$ with
properties:
\begin{enumerate}
\item $H_1$ and $H_2$ are homomorphically incomparable, and,
\item $G_1<H_i<G_2$ for $i=1,2$.
\end{enumerate}
In other words, any non-gap interval in the homomorphism order contains
two incomparable graphs.
\end{lem}
\begin{proof}
Proof is a variant of the Ne\v{s}et\v{r}il-Perles' proof of
density~\cite{Hell2004}.
Put $$H_1=(G_2\times H)+G_1,$$
where $H$ is a graph that we specify later, $+$ is the disjoint union and
$\times$ denotes the direct product.
Then obviously $G_1\leq H_1\leq G_2$. If the odd girth of $G_2$ is larger than
the odd
girth of $G_1$ then $G_2\mathfrak{n}ot \to H_1$. If the chromatic number $\chi(H)>|
V(G_1)|^{|V(G_2)|}$
then any homomorphism $G_2\times H\to G_1$ induces a homomorphism $G_2\to G_1$
which is absurd (see \cite[Theorem 3.20]{Hell2004}). Thus $G_2\times H\mathfrak{n}ot \to
G_1$ and $$G_1<H_1<G_2.$$
We choose $H$ to be a graph with large odd girth and chromatic number (known to
exist~\cite{Erdos1960}). This finishes
construction of $H_1$.
(Note that here we use the fact that the odd girth of the product is the maximum
of the odd girths of its factors.)
Now we repeat the same argument with the pair $G_1$ and $G_2\times H$, put
$$H_2=(G_2\times H')+G_1.$$
If the odd girth of $H'$ is larger than the odd girth of $G_2\times H$
then $G_2\times H\mathfrak{n}ot \to H_2$ (assuming $H$
and thus $G_2\times H$ is connected). Thus in turn $H_1\mathfrak{n}ot \to H_2$.
If $\chi(H')>\mathfrak{m}ax(|V(G_2\times H)|^{|V(G_2)|},|V(G_2)|^{|V(G_1)|})$
then again $G_2\times H'\to G_2\times H$ implies $G_2\to G_2\times H$ which
is absurd. Similarly $G_2\times H'\to G_1$ implies $G_2\to G_1$ and thus
$G_2\times H'\mathfrak{n}ot \to G_2$
We may also assume that graphs $H_1$ and $H_2$ from Lemma~\ref{lem:fatgap}
are connected as otherwise we can join components by a long enough path.
Connectivity also follows from the following folklore fact:
\begin{claim}
\label{claim:paths}
For every connected non-bipartite graph $H$ there exists an integer $l$ such
that for any two vertices
$x,y\in V(H)$ and any $l'\geq l$ there exists a homomorphism
$f:P_{l'}\to H$ such that $f(0)=x$ and $f(l')=y$. ($P_{l'}$ is the path
of length $l'$ with vertices $\{0,1,\ldots,l'\}$).
\end{claim}
This concludes the construction of $H_1$ and $H_2$.
\end{proof}
\begin{proof}[Second proof of Theorem~\ref{thm:main}]
Let $G_1<G_2$ be a non-gap, thus $G_2$ is non-bipartite. Assume without loss of
generality that $G_2$ is connected.
Since the homomorphism order is universal, we prove the universality of the
interval $(G_1,G_2)$ by embedding the homomorphism
order into it.
Let $H_1$, $H_2$ be two connected graphs given
by Lemma~\ref{lem:fatgap}. We may assume that both $H_1$ and $H_2$ are cores.
Let $l$ be the number given by Claim~\ref{claim:paths}
for graph $G_2$. We may assume $l>\mathfrak{m}ax\{|V(H_1)|,\alphallowbreak
|V(H_2)|,\alphallowbreak | V(G_2)|\}$.
We construct the gadget $I$ consisting of graphs $H_1$, $H_2$ joined together by
two paths of length $2l$ and $2l+1$.
We choose two distinguished vertices $a$, $b$ to be the middle vertices of these
two paths, see Figure~\ref{fig:twocycle}.
\begin{figure}
\caption{Gadget $I$.}
\label{fig:twocycle}
\end{figure}
We observe that any homomorphism $f:I\to I$ is surjective (because both $G_1$
and $G_2$ are cores), it is also an identity on vertices of $I\setminus (G_1\cup
G_2)$ and that
there exists a homomorphism $f:I\to G_2$ such that $f(a)=f(b)$.
For every oriented graph $G$ define graph
$\mathbb{P}hi(G)$ as $\mathbb{P}hi(G)=G*(I,a,b)$. We know $G_1 < \mathbb{P}hi(G)<G_2$ (as any
homomorphisms $f_i:H_i\to G_2$, $i\in \{1,2\}$, can be extended to a
homomorphism $\mathbb{P}hi(G)\to G_2$).
We finish the proof by proving $\mathbb{P}hi(G)\to \mathbb{P}hi(G')$ if and only if $G\to G'$.
Assume first that there exists a homomorphism $f:G\to G'$. Consider the function
$g$ defined as $f$ on vertices of $G$ and as the unique mapping which maps a
copy of $(I,a,b)$ in $G$ corresponding to edge $(u,v)$ to the copy of $(I,a,b)$
in $G'$ corresponding to edge $(f(u),f(v))$. Hence $g$ is a homomorphism.
Let now $g$ be a homomorphism $\mathbb{P}hi(G)\to\mathbb{P}hi(G')$. By the girth assumption and
connectivity of $H_1$ and $H_2$ we know that $g$ maps every copy of $H_1$ (or
$H_2)$) in $\mathbb{P}hi(G)$ to a copy of $H_1$ (or $H_2$) in $\mathbb{P}hi(G')$. Again, by the
girth argument it follows that every copy of the indicator $(I,a,b)$ in $G$ is
mapped to a copy of the indicator $(I,a,b)$ in $G'$. But the only copies of
$(I,a,b)$ in both $G$ and $G'$ are those corresponding to the edges of $G$ and
$G'$.
Since $I$ is a core, it follows that any pair of vertices $(a,b)$ in a copy of
$(I,a,b)$ has to be mapped to the vertices $(a,b)$ in any other copy of
$(I,a,b)$. As copies of $(I,a,b)$ and hence also the pairs $(a,b)$ correspond to
edges of $G'$, it follows that $g$ induces a mapping $f:V(G)\to V(G')$, which is
a homomorphism $G\to G'$.
This argument concludes the second proof of Theorem~\ref{thm:main}.
\end{proof}
\begin{remark}
Note that in this second proof we have an one-to-one correspondence between
homomorphisms $G\to G$ and $\mathbb{P}hi(G)\to \mathbb{P}hi(G')$.
\end{remark}
\section{Concluding remarks}
\mathfrak{n}oindent
{\bf 1.} Gaps on oriented graphs and on more general relational structures are
more sophisticated. They were characterized by Ne\v{s}et\v{r}il and
Tardif~\cite{Nesetril2000}.
In the same paper, a nice 1-1 correspondence between gaps and dualities has been
shown.
Consequently, the full discussion of fractal property of relational structures
is more complicated and it will appear elsewhere.
\mathfrak{n}oindent
{\bf 2.} The whole paper deals with finite graphs but there is no difficulty in
generalizing our results to infinite graphs.
\mathfrak{n}oindent
{\bf 3.} An interesting question (already considered in~\cite{Nesetril2000}) is:
which intervals induce {\em isomorphic orders}.
We provide neither a characterization nor a conjecture in this direction.
\mathfrak{n}oindent
{\bf 4.} It seems that any natural universal class $\mathfrak{m}athcal K$ of structures
posesses the \emph{gap-universal dichotomy}: An interval $[A,B]_{\mathfrak{m}athcal K}$
in $\mathfrak{m}athcal K$ either contains a gap or it contains a copy of $\mathfrak{m}athcal K$
itself.
While in general this fails, it remains to be seen, whether this is true for
some general class of structures.
\mathfrak{n}oindent
{\bf 5.} There is a great difference in treating universality, density and
fractal property.
Whereas the universality was established in many classes of partial orders (and
categories), the density and fractal property was only established for just a
few basic classes.
Perhaps this should be investigated in greater depth. Apart from general
relational structures (which we hope to treat in another paper) another
interesting case is
provided by structures with two equivalences (here the universality is
established by \cite{Nesetril1971}).
\chapter{Inferring Phylogenetic Trees from the Knowledge of Rare Evolutionary
Events}
Rare events have played an increasing role in molecular phylogenetics as
potentially homoplasy-poor characters. In this contribution we analyze the
phylogenetic information content from a combinatorial point of view by
considering the binary relation on the set of taxa defined by the existence of
a single event separating two taxa. We show that the graph-representation of
this relation must be a tree. Moreover, we characterize completely the
relationship between the tree of such relations and the underlying phylogenetic
tree. With directed operations such as tandem-duplication-random-loss events in
mind we demonstrate how non-symmetric information constrains the position of
the root in the partially reconstructed phylogeny.
\sloppy
\section{Introduction}
Shared derived characters (synapomorphies or ``Hennigian markers'') that
are unique to specific clades form the basis of classical cladistics
\cite{Hennig:50}. In the context of molecular phylogenetics \emph{rare
genomic changes (RGCs)} can play the same important role
\cite{Rokas:00,Boore:06}. RGCs correspond to rare mutational events that
are very unlikely to occur multiple times and thus are (almost) free of
homoplasy. A wide variety of processes and associated markers have been
proposed and investigated. Well-studied RGCs include presence/absence
patterns of protein-coding genes \cite{Dutilh:08} as well as microRNAs
\cite{Sempere:06}, retroposon integrations \cite{Shedlock:00}, insertions
and deletions (indels) of introns \cite{Rogozin:05}, pairs of mutually
exclusive introns (NIPs) \cite{Krauss:08a}, protein domains
\cite{Deeds:05,Yang:05}, RNA secondary structures \cite{Misof:03}, protein
fusions \cite{Stechmann:03}, changes in gene order
\cite{Sankoff:82,Boore:98,Lavrov:07}, metabolic networks
\cite{Forst:01,Forst:06a,Mazurie:08}, transcription factor binding sites
\cite{Prohaska:04a}, insertions and deletions of arbitrary sequences
\cite{Simmons:00,Ashkenazy:14,Donath:14a}, and variations of the genetic
code \cite{Abascal:12}. RGCs clearly have proved to be phylogenetically
informative and helped to resolve many of the phylogenetic questions where
sequence data lead to conflicting or equivocal results.
Not all RGCs behave like cladistic characters, however. While
presence/absence characters are naturally stored in character matrices
whose columns can vary independently, this is not the case e.g.\ for gene
order characters. From a mathematical point of view, character-based
parsimony analysis requires that the mutations have a product structure in
which characters are identified with factors and character states can vary
independently of each other \cite{Wagner:03a}. This assumption is violated
whenever changes in the states of two distinct characters do not commute.
Gene order is, of course, the prime example on non-commutative events.
Three strategies have been pursued in such cases\rev{: (i)} Most
importantly, the analog of the parsimony approach is considered for a
particular non-commutative model. For the genome rearrangements an
elaborated theory has been developed that considers various types of
operations on (usually signed) permutations. Already the computation of
editing distances is non-trivial. An added difficulty is that the interplay
of different operations such as reversals, transpositions, and
tandem-duplication-random-loss (TDRL) events is difficult to handle
\cite{Bernt:07a,Hartmann:16}. \rev{(ii)} An alternative is to focus on
distance-based methods \cite{Wang:06}. Since good rate models are usually
unavailable, distance measures usually are not additive and thus fail to
precisely satisfy the assumptions underlying the most widely used methods
such as neighbor joining. \rev{(iii) Finally, the non-commutative data
structure can be converted into} a presence-absence structure, e.g., by
using pairwise adjacencies \cite{Tang:05} as a representation of
permutations or using list alignments in which rearrangements appear as
pairs of insertions and deletions \cite{Fritzsch:06a}. While this yields
character matrices that can be fed into parsimony algorithms, these can
only result in approximate heuristics.
While it tends to be difficult to disentangle multiple, super-imposed
complex changes such as genome rearrangements or tandem duplication, it is
considerably simpler to recognize whether two genes or genomes differ by a
single RGC operation. It make sense therefore to ask just how much
phylogenetic information can be extracted from elementary RGC events. Of
course, we cannot expect that a single RGC will allow us to (re)construct a
detailed phylogeny. It can, however, provide us with solid, well-founded
constraints. Furthermore, we can hope that the combination of such
constraints can be utilized as a practicable method for phylogenetic
inference. Recently, we have shown that orthology assignments in large gene
families imply triples that must be displayed by the underlying species
tree \cite{HernandezRosales:12a,Hellmuth:13a}. In a phylogenomics setting a
sufficient number of such triple constraints can be collected to yield
fully resolved phylogenetic trees \cite{Hellmuth:15a}, see \cite{HW:16b}
for an overview.
A plausible application scenario for our setting is the rearrangement of
mitogenomes \cite{Sankoff:82}. Since mitogenomes are readily and cheaply
available, the taxon sampling is sufficiently dense so that the gene orders
often differ by only a single rearrangement or not at all. These cases are
identifiable with near certainty \cite{Bernt:07a}. Moreover, some RGC are
inherently directional. Probably the best known example is the tandem
duplication random loss (TDRL) operation \cite{Chaudhuri:06}. We will
therefore also consider a directed variant of the problem.
In this contribution, we ask how much phylogenetic information can be
retrieved from single RGCs. More precisely, we consider a scenario in which
we can, for every pair of taxa distinguish, for a given type of RGC,
whether $x$ and $y$ have the same genomic state, whether $x$ and $y$ differ
by exactly one elementary change, or whether their distance is larger than
a single operation. We formalize this problem in the following way. Given a
relation $\sim$, there is a phylogenetic tree $T$ with an edge labeling
$\lambda$ (marking the elementary events) such that $x\sim y$ if and only
if the edge labeling along the unique path $\mathfrak{m}athbb{P}(x,y)$ from $x$ to $y$ in
$T$ has a certain prescribed property $\mathbb{P}i$. After defining the necessary
notation and preliminaries, we give a more formal definition of the general
problem in section~\ref{sect:theory}.
\rev{The graphs defined by path relations on a tree are closely related to
\emph{pairwise compatibility graphs} (PCGs). A graph $G =(V,E)$ is a PCG
if there is a tree $T$ with leaf set $V$, a positive edge-weight function
$w:E(T)\to \mathfrak{m}athbb{R}^+$, and two nonnegative real numbers $d_{\mathfrak{m}in}\le
d_{\mathfrak{m}ax}$ such that there is an edge $uv \in E(G)$ if and only if
$d_{\mathfrak{m}in}\leq d_{T,w}(x,y) \leq d_{\mathfrak{m}ax}$, where $d_{T,w}(x,y)$ is the
sum of the weights of the edges on the unique path $\mathfrak{m}athbb{P}(x,y)$ in
$T$. One writes $G = \mathfrak{m}athrm{PCG}(T, w, d_{\mathfrak{m}in} , d_{\mathfrak{m}ax})$.} In this
contribution we will primarily be interested in the special case where
$\mathbb{P}i$ is ``a single event along the path''. \rev{Although PCGs have been
studied extensively, see e.g.,
\cite{PCGsurvey,YHTR:08,YBR:10,CMPS:13,MR:13,DMR:13}, the questions are
different from our motivation and, to our knowledge, no results have been
obtained that would simplify the characterization of the PCGs
corresponding to the ``single-1-relation'' in Section~\ref{sect:single1}.
Furthermore, PCGs are always treated as undirected graphs in the
literature. We also consider an antisymmetric (Section~\ref{sect:1dir})
and a general directed (Section~\ref{sect:mixed}) versions of the
single-1-relation motivated by RGCs with directional information.}
\rev{The main result of this contribution can be summarized as follows: (i)
The graph of a single-1-relation is always a forest. (ii) If the
single-1-relation is connected, there is a unique minimally resolved tree
that explains the relation. The same holds true for the connected
components of an arbitrary relation. (iii) Analogous results hold for the
anti-symmetric and the mixed variants of the single-1-relation. In this
case not only the tree topology but also the position of the root can be
determined or at least constrained.} \rev{Together, these results in a
sense characterize the phylogenetic information contained in rare events:
if the single-1-relation graph is connected, it is a tree that through a
bijection corresponds to a uniquely defined, but not necessarily fully
resolved, phylogenetic tree. Otherwise, it is forest whose connected
components determine subtrees for which the rare events provide at least
some phylogenetically relevant information.}
\section{Preliminaries}
\label{sec:prelim}
\subsection{Basic Notation}
We largely follow the notation and terminology of \rev{the book by}
\cite{sem-ste-03a}. Throughout, $X$ denotes always a finite set of at
least three taxa. We will consider both undirected and directed graphs
$G=(V,E)$ with finite vertex set $V(G)\coloneqq V$ and edge set or arc set
$E(G)\coloneqq E$. For a digraph $G$ we write $\underline{G}$ for its
\emph{underlying undirected graph} where $V(G)=V(\underline{G})$ and
$\{x,y\}\in E(\underline{G})$ if $(x,y)\in E(G)$ or $(y,x)\in E(G)$. Thus,
$\underline{G}$ is obtained from $G$ by ignoring the direction of edges.
For simplicity, edges $\{x,y\}\in E(G)$ (in the undirected case) and arcs
$(x,y)\in E(G)$ (in the directed case) will be both denoted by $xy$.
The representation $G(R)=(V,E)$ of a relation $R\subseteq V\times V$ has
vertex set $V$ and edge set $E=\{xy\mathfrak{m}id (x,y)\in R\}$. If $R$ is
irreflexive, then $G$ has no loops. If $R$ is symmetric, we regard $G(R)$
as an undirected graph. A \emph{clique} is a complete subgraph that is
maximal w.r.t.\ inclusion. An equivalence relation is \emph{discrete} if
all its equivalence classes consist of single vertices.
A tree $T=(V,E)$ is a connected cycle-free undirected graph. The vertices
of degree $1$ in a tree are called leaves, all other vertices of $T$ are
called \emph{inner vertices}.
An edge of $T$ is \emph{interior} if both of
its end vertices are inner vertices, otherwise the edge is called
\emph{terminal}.
\rev{For technical reasons, we call a vertex $v$ an inner vertex and leaf
if $T$ is a single vertex graph
$(\{v\},\emptyset)$. However, if
$T$ is an edge $vw$ we refer to $v$ and $w$ as leaves but not
as inner vertices. Hence, in this case the edge $vw$ is not an
interior edge}
A \emph{star} $S_m$ with $m$ leaves
is a tree that has at most one inner vertex. A \emph{path} $P_n$ (on $n$
vertices) is a tree with two leaves and $n-3$ interior edges. There is a
unique path $\mathfrak{m}athbb{P}(x,y)$ connecting any two vertices $x$ and $y$ in a
tree $T$. We write $e\in\mathfrak{m}athbb{P}(x,y)$ if the edge $e$ connects two
adjacent vertices along $\mathfrak{m}athbb{P}(x,y)$. We say that a directed graph is
a tree if its underlying undirected graph is a tree. A directed path $P$
is a tree on vertices $x_1,\dots,x_n$ s.t. $x_ix_{i+1}\in E(P)$, $1\leq
i\leq n-1$. A graph is a forest if all its connected components are trees.
A tree is \emph{rooted} if there is a distinguished vertex $\rho\in V$
called the \emph{root}. Throughout this contribution we assume that the
root is an inner vertex. \rev{Given a rooted tree $T=(V,E)$, there is a
partial order $\mathfrak{p}receq$ on $V$ defined as $ v \mathfrak{p}receq u$ if $u$ lies on
the path from $v$ to the root. Obviously, the root is the unique maximal
element w.r.t\ $\mathfrak{p}receq$. For a non-empty subset of $W\subseteq V$, we
define $\lca{W}$, or the \emph{least common ancestor of $W$}, to be the
unique $\mathfrak{p}receq_T$-minimal vertex of $T$ that is an ancestor of every
vertex in $W$. In case $W=\{x,y \}$, we put $\lca{x,y}:=\lca{\{x,y\}}$.}
If $T$ is rooted, then by definition $\lca{x,y}$ is a uniquely defined
inner vertex along $\mathfrak{m}athbb{P}(x,y)$.
We write $L(v)$ for the set of leaves in the subtree below a fixed vertex
$v$, i.e., $L(v)$ is the set of all leaves for which $v$ is located on the
unique path from $x\in L(v)$ to the root of $T$. The \emph{children} of an
inner vertex $v$ are its direct descendants, i.e., vertices $w$ with $vw\in
E(T)$ s.t.\ that $w$ is further away from the root than $v$. A rooted or
unrooted tree that has no vertices of degree two \rev{(except possibly the
root of $T$)} and leaf set $X$ is called a \emph{phylogenetic tree $T$
(on $X$)}.
Suppose $X'\subseteq X$. A phylogenetic tree $T$ on $X$ \emph{displays} a
phylogenetic tree $T'$ on $X'$ if $T'$ can be obtained from $T$ by a series
of vertex deletions, edge deletions, and suppression of vertices of degree
$2$ other than possibly the root, i.e., the replacement of an inner
vertex $u$ and its two incident edges $e'$ and $e''$ by a single edge $e$,
cf.\ \rev{Def.\ 6.1.2 in the book by \cite{sem-ste-03a}}. In the rooted
case, only a vertex between two \rev{incident} edges may be suppressed;
furthermore, if $X'$ is contained in a single subtree, then the $\lca{X'}$
becomes the root of $T'$. It is useful to note that $T'$ is displayed by
$T$ if and only if it can be obtained from $T$ step-wisely by removing an
arbitrarily selected leaf $y\in X\setminus X'$, its incident edge $e=yu$,
and suppression of $u$ provided $u$ has degree $2$ after removal of $e$.
We say that a rooted tree $T$ \emph{contains} or \emph{displays} the triple
$\rt{xy|z}$ if $x,y,$ and $z$ are leaves of $T$ and the path from $x$ to
$y$ does not intersect the path from $z$ to the root of $T$. A set of
triples $\mathfrak{m}athcal R$ is consistent if there is a rooted tree that contains
all triples in $\mathfrak{m}athcal R$. For a given leaf set $L$, a triple set
$\mathfrak{m}athcal R$ is said to be \emph{strict dense} if for any three distinct
vertices $x,y,z\in L$ we have $|\{\rt{xy|z}, \rt{xz|y}, \rt{yz|x}\}\cap
R|=1$. It is well-known that any consistent strict-dense triple set
$\mathfrak{m}athcal R$ has a unique representation as a binary tree
\cite[Suppl. Material]{Hellmuth:15a}. For a consistent set $R$ of rooted
triples we write $R\vdash \rt{(xy|z)}$ if any phylogenetic tree that
displays all triples of $R$ also displays $\rt{(xy|z)}$.
\rev{\cite{BS:95} extend and generalized results by \cite{Dekker86} and
showed} under which conditions it is possible to infer triples by using
only subsets $R'\subseteq R$, i.e., under which conditions $R\vdash
\rt{(xy|z)} \implies R'\vdash \rt{(xy|z)}$ for some $R'\subseteq R$. In
particular, we will use the following inference rules:
\renewcommand{\roman{equation}}{\roman{equation}}
\begin{align}
\{\rt{(ab|c)}, \rt{(ad|c)}\} &\vdash \rt{(bd|c)}
\label{eq:infRule1} \\
\{\rt{(ab|c)}, \rt{(ad|b)}\} & \vdash \rt{(bd|c)},\rt{(ad|c)}
\label{eq:infRule2} \\
\{\rt{(ab|c)}, \rt{(cd|b)}\} &\vdash \rt{(ab|d)},\rt{(cd|a)}.
\label{eq:infRule3}
\end{align}
\section{Path Relations and Phylogenetic Trees}
\label{sect:theory}
Let $\Lambda$ be a non-empty set. Throughout this contribution we consider
a \rev{phylogenetic tree} $T=(V,E)$ with edge-labeling $\lambda \colon E\to
\Lambda$. An edge $e$ with label $\lambda(e)=k$ will be called a
\emph{k-edge}. We interpret $(T,\lambda)$ so that a RGC occurs along edge
$e$ if and only if $\lambda(e)=1$. Let $\mathbb{P}i$ be a subset of the set of
$\Lambda$-labeled paths. We interpret $\mathbb{P}i$ as a property of the path and
its labeling. The tree $(T,\lambda)$ and the property $\mathbb{P}i$ together define
a binary relation $\sim_{\mathbb{P}i}$ on $X$ by setting
\begin{equation}
x\sim_{\mathbb{P}i} y \quad\iff\quad (\mathfrak{m}athbb{P}(x,y),\lambda) \in \mathbb{P}i
\end{equation}
The relation $\sim_{\mathbb{P}i}$ has the graph representation $G(\sim_{\mathbb{P}i})$ with
vertex set $X$ and edges $xy\in E(G(\sim_{\mathbb{P}i}))$ if and only if
$x\sim_{\mathbb{P}i} y$.
\begin{definition}
Let $(T,\lambda)$ be a $\Lambda$-labeled phylogenetic tree with leaf set
$L(T)$ and let $G$ be a graph with vertex set $L(T)$. We say that
\emph{$(T,\lambda)$ explains $G$ (w.r.t.\ to the path property $\mathbb{P}i$)} if
$G=G(\sim_{\mathbb{P}i})$.
\end{definition}
For simplicity we also say ``$(T,\lambda)$ explains $\sim$'' for the binary
relation $\sim$.
We consider in this contribution the conceptually ``inverse problem'':
Given a definition of the predicate $\mathbb{P}i$ as a function of edge labels
along a path and a graph $G$, is there an edge-labeled tree $(T,\lambda)$
that explains $G$? Furthermore, we ask for a characterization of the class
of graph that can be explained by edge-labeled trees and a given predicate
$\mathbb{P}i$.
A straightforward biological interpretation of an edge labeling
\rev{$\lambda: E\to \{0,1\}$} is that a certain type of evolutionary event
has occurred along $e$ if and only if $\lambda(e)=1$. This suggests that in
particular the following path properties and their associated relations on
$X$ are of practical interest:
\begin{itemize}
\item[{$x \Ro y$}] if and only if all edges in $\mathfrak{m}athbb{P}(x,y)$ are
labeled $0$; For convenience we set $x \Ro x$ for all $x\in X$.
\item[{$x \Rl y$}] if and only if all but one edges along $\mathfrak{m}athbb{P}(x,y)$
are labeled $0$ and exactly one edge is labeled $1$;
\item[{$x \Rld y$}] if and only if all edges along $\mathfrak{m}athbb{P}(u,x)$ are
labeled $0$ and exactly one edge along $\mathfrak{m}athbb{P}(u,y)$ is labeled $1$,
where $u=\lca{x,y}$.
\item[{$x \Rk y$}] with \rev{$k\geq1$} if and only if at least $k$
edges \rev{along $\mathfrak{m}athbb{P}(x,y)$} are labeled $1$;
\item[{$x \mathfrak{m}athrel{\rightsquigarrow} y$}] \rev{if all edges along $\mathfrak{m}athbb{P}(u,x)$ are labeled
$0$ and there are one or more edges along $\mathfrak{m}athbb{P}(u,y)$ with a
non-zero label, where $u=\lca{x,y}$.}
\end{itemize}
We will call the relation $\Rl$ the \emph{single-1-relation}. It will be
studied in detail in the following section. Its directed variant $\Rld$
will be investigated in Section~\ref{sect:1dir}. The more general relations
$\Rk$ and $\mathfrak{m}athrel{\rightsquigarrow}$ will be studied \rev{in future work.}
\rev{As noted in the introduction there is close relationship between the
graphs of path relations introduced above and PCGs. For instance, the
single-1-relations correspond to a graph of the form $G =
\mathfrak{m}athrm{PCG}(T,\lambda,1,1)$ for some tree $T$. The exact-$k$ leaf power
graph $\mathfrak{m}athrm{PCG}(T,\lambda,k,k)$ arise when $\lambda(e)=1$ for all
$e\in E(T)$ \cite{BVR:10}. The ``weight function'' $\lambda$, however,
may be $0$ in our setting. It is not difficult to transform our weight
functions to strictly positive values albeit at the expense of using less
``beautiful'' values of $d_{\mathfrak{m}in}$ and $d_{\mathfrak{m}ax}$. The literature on the
PCG, to our knowledge, does not provide results that would simplify our
discussion below. Furthermore, the applications that we have in mind for
future work are more naturally phrased in terms of Boolean labels, such as
the ``at least one 1'' relation, or even vector-valued structures. We
therefore do not pursue the relationship with PCGs further.}
The combinations of labeling systems and path properties of primary
interest to us have \emph{nice properties}:
\begin{itemize}
\item[(L1)] The label set $\Lambda$ is endowed with a semigroup $\boxplus:
\Lambda\times\Lambda\to\Lambda$.
\item[(L2)] There is a subset $\Lambda_{\mathbb{P}i}\subseteq\Lambda$ of labels
such that $(\mathfrak{m}athbb{P}(x,y),\lambda) \in \mathbb{P}i$ if and only if
$\lambda(\mathfrak{m}athbb{P}(x,y)):=\boxplus_{e\in\mathfrak{m}athbb{P}(x,y)}\lambda(e) \in
\rev{\Lambda_{\mathbb{P}i}}$ or
$\lambda(\mathfrak{m}athbb{P}(x,y)):=\boxplus_{e\in\mathfrak{m}athbb{P}(\lca{x,y},y)}\lambda(e)
\rev{\in \Lambda_{\mathbb{P}i}}$.
\end{itemize}
For instance, we may set $\Lambda=\mathfrak{m}athbb{N}$ and use the usual addition
for $\boxplus$. Then $\Ro$ corresponds to $\Lambda_{\mathbb{P}i}=\{0\}$, $\Rl$
corresponds to $\Lambda_{\mathbb{P}i}=\{1\}$, etc. \rev{The bounds $d_{\mathfrak{m}in}$ and
$d_{\mathfrak{m}ax}$ in the definition of PCGs of course is just a special case of
of the predicate $\Lambda_{\mathbb{P}i}$.}
We now extend the concept of a \rev{phylogenetic tree} displaying another
one to the $\Lambda$-labeled case.
\begin{definition}
Let $(T,\lambda)$ and $(T',\lambda')$ be two \rev{phylogenetic trees}
with $L(T')\subseteq L(T)$. Then $(T,\lambda)$ displays $(T',\lambda')$
\rev{w.r.t.\ a path property $\mathbb{P}i$} if (i) $T$ displays $T'$ and (ii)
$(\mathfrak{m}athbb{P}_{T}(x,y),\lambda) \in \rev{\Lambda_{\mathbb{P}i}}$ if and only if
$(\mathfrak{m}athbb{P}_{T'}(x,y),\rev{\lambda'}) \in \rev{\Lambda_{\mathbb{P}i}}$ for all
$x,y\in L(T')$.
\end{definition}
The definition is designed to ensure that the following property is satisfied:
\begin{lemma}
Suppose $(T,\lambda)$ displays $(T',\lambda')$ and $(T,\lambda)$ explains
a graph $G$. Then $(T',\lambda')$ explains the induced subgraph
$G[L(T')]$.
\end{lemma}
\begin{lemma}
Let $(T,\lambda)$ display $(T',\lambda')$. Assume that the labeling
system satisfies (L1) and (L2) and suppose
$\lambda'(e)=\lambda(e')\boxplus\lambda(e'')$ whenever $e$ is the edge
resulting from suppressing the inner vertex between $e'$ and $e''$.
If $T'$ is displayed by $T$ then $(T',\lambda')$ is displayed by
$(T,\lambda)$ \rev{(w.r.t.\ any path property $\mathbb{P}i$).}
\end{lemma}
\begin{proof}
Suppose $T'$ is obtained from $T$ by removing a single leaf $w$. By
construction $T'$ is displayed by $T$ and $\lambda(\mathfrak{m}athbb{P}(x,y))$ is
preserved upon removal of $w$ and suppression of its neighbor. Thus
(L2) implies that $(T,\lambda)$ displays $(T',\lambda')$. For an arbitrary
$T'$ displayed by $T$ this argument can be repeated for each individual
leaf removal on the editing path from $T$ to $T'$.
\end{proof}
\rev{We note in passing that this construction is also well behaved for
PCGs: it preserves path length, and thus distances between leaves, by
summing up the weights of edges whenever a vertex of degree 2 between
them is omitted.}
Let us now turn to the properties of the specific relations that are of
interest in this contribution.
\begin{lemma}
The relation $\Ro$ is an equivalence relation.
\end{lemma}
\begin{proof}
By construction, $\Ro$ is symmetric and reflexive. To establish
transitivity, suppose $x\Ro y$ and $y\Ro z$, i.e., $\lambda(e)=0$ for all
$e\in \mathfrak{m}athbb{P}(x,y)\cup \mathfrak{m}athbb{P}(y,z)$. By uniqueness of the path
connecting vertices in a tree, $\mathfrak{m}athbb{P}(x,z)\subseteq
\mathfrak{m}athbb{P}(x,y)\cup \mathfrak{m}athbb{P}(y,z)$, i.e., $\lambda(e)=0$ for all $e\in
\mathfrak{m}athbb{P}(x,z)$ and therefore $x \Ro z$.
\end{proof}
Since $\Ro$ is an equivalence relation, the graph $G(\Ro)$ is a disjoint
union of complete graphs, or in other words, each connected component of
$G(\Ro)$ is a clique.
We are interested here in characterizing the pairs of trees and labeling
functions $(T,\lambda)$ that explain a given relation $\rho$ as its $\Ro$,
$\Rl$ or $\Rld$ relation. More precisely, we are interested in the least
resolved trees with this property.
\begin{definition}
Let $(T,\lambda)$ be an edge-labeled phylogenetic tree with leaf set
$X=L(T)$. We say that $(T',\lambda')$ is \emph{edge-contracted from
$(T,\lambda)$} if the following conditions hold: (i) $T'=T/e$ is the
usual graph-theoretical edge contraction for some interior edge
$e=\{u,v\}$ of $T$.
\mathfrak{n}oindent (ii) The labels satisfy $\lambda'(e')=\lambda(e')$ for
all $e'\mathfrak{n}e e$.
\end{definition}
Note that we do not allow the contraction of terminal edges, i.e., of edges
incident with leaves.
\rev{
\begin{definition}[Least and Minimally Resolved Trees]
Let $R\in \{\Ro,\ \Rl,\ \Rld,\ \Rl/\Ro,\ \Rld/\Ro\}$.
A pair $(T,\lambda)$ is \emph{least resolved} for a prescribed relation
$R$ if no edge contraction leads to a tree
$T',\lambda')$ of $(T,\lambda)$ that explains $R$.
A pair $(T,\lambda)$ is \emph{minimally resolved} for a
prescribed relation $R$ if it has the fewest number of
vertices among all trees that explain $R$.
\end{definition}
Note that every minimally resolved tree is also least resolved, but not
\textit{vice versa}. }
\section{The single-1-relation}
\label{sect:single1}
The single-1-relation does not convey any information on the location of
the root and the corresponding partial order on the tree. We therefore
regard $T$ as unrooted in this section.
\begin{lemma}
\label{lem:notriangle}
Let $(T,\lambda)$ be an edge-labeled \rev{phylogenetic tree with leaf set
$X$ and} resulting relations $\Ro$ and $\Rl$ over $X$. Assume that
$A,B$ are distinct cliques in $G(\Ro)$ and suppose $x\Rl y$ where $x\in
A$ and $y\in B$. Then $x'\Rl y'$ holds for all $x'\in A$ and $y'\in B$.
\end{lemma}
\begin{proof}
First, observe that $\mathfrak{m}athbb{P}(x',y') \subseteq
\mathfrak{m}athbb{P}(x',x)\cup \mathfrak{m}athbb{P}(x,y)\cup \mathfrak{m}athbb{P}(y,y')$ in $T$.
Moreover, $\mathfrak{m}athbb{P}(x',x)$ and $\mathfrak{m}athbb{P}(y,y')$ have only edges with
label $0$. As $\mathfrak{m}athbb{P}(x,y)$ contains exactly one non-0-label, thus
$\mathfrak{m}athbb{P}(x',y')$ contains at most one non-0-label. If there was
no non-0-label, then $\mathfrak{m}athbb{P}(x,y) \subseteq
\mathfrak{m}athbb{P}(x,x')\cup \mathfrak{m}athbb{P}(x',y')\cup \mathfrak{m}athbb{P}(y',y)$ would imply
that $\mathfrak{m}athbb{P}(x,y)$ also has only 0-labels, a contradiction. Therefore
$x'\Rl y'$.
\end{proof}
As a consequence it suffices to study the single-1-relation on the quotient
graph $G(\Rl)/\Ro$. To be more precise, $G(\Rl)/\Ro$ has as vertex set the
equivalence classes of $\Ro$ and two vertices $c_i$ and $c_j$ are connected
by an edge if there are vertices $x\in c_i$ and $y\in c_j$ with $x\Rl
y$. Analogously, the graph $G(\Rld)/\Ro$ is defined.
For a given $(T,\lambda)$ and its corresponding relation $\Ro$
consider an arbitrary nontrivial equivalence class $c_i$ of $\Ro$. Since
$\Ro$ is an equivalence relation, the induced subtree $T'$ with leaf set
$c_i$ and inner vertices $\lca{c}$ for any subset $c\subseteq c_i$
contains only 0-edges and is maximal w.r.t. this property. Hence, we
could remove $T'$ from $T$ and identify the root $\lca{c_i}$ of $T'$ in
$T$ by a representative of $c_i$, while keeping the information of $\Ro$
and $\Rl$. Let us be a bit more explicit about this point. Consider trees
$(T_Y,\lambda_Y)$ displayed by $(T,\lambda)$ with leaf sets $Y$ such that
$Y$ contains exactly one (arbitrarily chosen) representative from each
$\Ro$ equivalence class of $(T,\lambda)$. For any such trees
$(T_Y,\lambda_Y)$ and $(T_Y',\lambda_Y')$ with the latter property, there
is an isomorphism $\alphalpha: T_Y\to T_{Y'}$ such that $\alphalpha(y)\Ro y$ and
$\lambda_{Y'}(\alphalpha(e))=\lambda_{Y'}(e)$. Thus, all such
$(T_Y,\lambda_Y)$ are isomorphic and differ basically only in the choice
of the particular representatives of the equivalence classes of $\Ro$.
Furthermore, $T_{Y}$ is isomorphic to the quotient graph $T/\Ro$ obtained
from $T$ by replacing the (maximal) subtrees where all edges are labeled
with $0$ by a representative of the corresponding $\Ro$-class. Suppose
$(T,\lambda)$ explains $G$. Then $(T_Y,\lambda_Y)$ explains $G[Y]$ for a
given $Y$. Since all $(T_Y,\lambda_Y)$ are isomorphic, all $G[Y]$ are
also isomorphic, and thus $G[Y]=G/\Ro$ for all $Y$.
To avoid unnecessarily clumsy language we will say that ``$(T,\lambda)$
explains \hbox{$G(\Rl)/\Ro$}'' instead of the more accurate wording
``$(T,\lambda)$ displays $(T_Y,\lambda_Y)$ where $Y$ contains exactly one
representative of each $\Ro$ equivalence class such that $(T_Y,\lambda_Y)$
explains $G(\Rl)/\Ro$''.
In contrast to $\Ro$, the single-1-relation $\Rl$ is not transitive. As an
example, consider the star $S_3$ with leaf set $\{x,y,z\}$, inner
vertex $v$, and edge labeling $\lambda(v,x)=\lambda(v,z)=1\mathfrak{n}eq
\lambda(v,y)=0$. Hence $x\Rl y$, $y\Rl z$ and $x\mathfrak{n}ot \Rl z$. In fact, a
stronger property holds that forms the basis for understanding the
single-1-relation:
\begin{lemma}
If $x\Rl y$ and $x\Rl z$, then $y\mathfrak{n}ot\Rl z$.
\label{lem:not-trans}
\end{lemma}
\begin{proof}
Uniqueness of paths in $T$ implies that there is a unique inner vertex
$u$ in $T$ such that
$\mathfrak{m}athbb{P}(x,y)=\mathfrak{m}athbb{P}(x,u)\cup\mathfrak{m}athbb{P}(u,y)$,
$\mathfrak{m}athbb{P}(x,z)=\mathfrak{m}athbb{P}(x,u)\cup\mathfrak{m}athbb{P}(u,z)$,
$\mathfrak{m}athbb{P}(y,z)=\mathfrak{m}athbb{P}(y,u)\cup\mathfrak{m}athbb{P}(u,z)$. By assumption,
each of the three sub-paths $\mathfrak{m}athbb{P}(x,u)$, $\mathfrak{m}athbb{P}(y,u)$, and
$\mathfrak{m}athbb{P}(z,u)$ contains at most one 1-label. There are only two cases:
(i) There is a 1-edge in $\mathfrak{m}athbb{P}(x,u)$. Then neither
$\mathfrak{m}athbb{P}(y,u)$ nor $\mathfrak{m}athbb{P}(z,u)$ may have another 1-edge, and thus
$y\Ro z$, which implies that $y\mathfrak{n}ot\Rl z$. (ii) There is no 1-edge in
$\mathfrak{m}athbb{P}(x,u)$. Then both $\mathfrak{m}athbb{P}(y,u)$ and $\mathfrak{m}athbb{P}(z,u)$ must
have exactly one 1-edge. Thus $\mathfrak{m}athbb{P}(y,z)$ harbors exactly two
1-edges, whence $y\mathfrak{n}ot\Rl z$. \end{proof}
Lemma \ref{lem:not-trans} can be generalized as follows.
\begin{lemma}
Let $x_1,\dots,x_n$ be vertices s.t.\ $x_{i}\Rl x_{i+1}$, $1\leq i\leq
n-1$. Then, for all $i,j$, $x_i\Rl x_j$ if and only if $|i-j|=1$.
\label{lem:cycle-free}
\end{lemma}
\begin{proof}
For $n=3$, we can apply Lemma \ref{lem:not-trans}. Assume the assumption
is true for all $n<K$. Now let $n=K$. Hence, for all vertices $x_i,x_j$
along the paths from $x_1$ to $x_{K-1}$, as well as the paths from $x_2$
to $x_K$ it holds that $|i-j|=1$ if and only if we have $x_i\Rl
x_j$. Thus, for the vertices $x_i,x_j$ we have $|i-j|>1$ if and only if
we have $x_i\mathfrak{n}ot \Rl x_j$. Therefore, it remains to show that $x_1\mathfrak{n}ot\Rl
x_n$.
Assume for contradiction, that $x_1\Rl x_n$. Uniqueness of paths on $T$
implies that there is a unique inner vertex $u$ in $T$ that lies on
all three paths $\mathfrak{m}athbb{P}(x_1,x_2)$, $\mathfrak{m}athbb{P}(x_1,x_n)$, and
$\mathfrak{m}athbb{P}(x_2,x_n)$.
There are two cases, either there is a 1-edge in $\mathfrak{m}athbb{P}(x_1,u)$ or
$\mathfrak{m}athbb{P}(x_1,u)$ contains only 0-edges.
If $\mathfrak{m}athbb{P}(x_1,u)$ contains a 1-edge, then all edges along the path
$\mathfrak{m}athbb{P}(u, x_n)$ must be $0$, and all the edge on path $\mathfrak{m}athbb{P}(u,
x_2)$ must be $0$, However, this implies that $x_2\Ro x_n$, a
contradiction, as we assumed that $\Ro$ is discrete.
Thus, there is no 1-edge in $\mathfrak{m}athbb{P}(x_1,u)$ and hence, both paths
$\mathfrak{m}athbb{P}(u,x_n)$ and $\mathfrak{m}athbb{P}(u,x_2)$ contain each exactly one
1-edge.
Now consider the unique vertex $v$ that lies on all three paths
$\mathfrak{m}athbb{P}(x_1,x_2)$, $\mathfrak{m}athbb{P}(x_1,x_3)$, and $\mathfrak{m}athbb{P}(x_2,x_3)$.
Since $u,v\in \mathfrak{m}athbb{P}(x_1,x_2)$, we have either (A) $v\in
\mathfrak{m}athbb{P}(x_1,u)$ where $u=v$ is possible, or (B) $u\in
\mathfrak{m}athbb{P}(x_1,v)$ and $u\mathfrak{n}eq v$. We consider the two cases separately.
Case (A): Since there is no 1-edge in $\mathfrak{m}athbb{P}(x_1,u)$ and
$x_1\Rl x_n$, resp., $x_1\Rl x_2$ there is exactly one 1-edge in
$\mathfrak{m}athbb{P}(u,x_n)$, resp., $\mathfrak{m}athbb{P}(u,x_2)$. Moreover, since $x_2\Rl
x_3$ the path $\mathfrak{m}athbb{P}(v,x_3)$ contains only 0-edges, and thus $x_3\Rl
x_n$, a contradiction.
Case (B): Since there is no 1-edge in
$\mathfrak{m}athbb{P}(x_1,u)$ and $x_1\Rl x_n$, the path $\mathfrak{m}athbb{P}(u,x_n)$
contains exactly one 1-edge.
In the following, we consider paths between two vertices $x_i, x_{n-i}
\in \{x_1,\dots,x_n\}$ step-by-step, starting with $x_1$ and $x_{n-1}$.
The induction hypothesis implies that $x_1\mathfrak{n}ot \Rl x_{n-1}$ and since $\Ro$
is discrete, we can conclude that $x_1\mathfrak{n}ot \Ro x_{n-1}$. Let
$\mathfrak{m}athbb{P}(x_1,x_n) = \mathfrak{m}athbb{P}(x_1,a)\cup ab \cup \mathfrak{m}athbb{P}(b,x_n)$
where $e=ab$ is the 1-edge contained in $\mathfrak{m}athbb{P}(x_1,x_n)$. Let $c_1$
be the unique vertex that lies on all three paths $\mathfrak{m}athbb{P}(x_1,x_n),
\mathfrak{m}athbb{P}(x_1,x_{n-1})$, and $\mathfrak{m}athbb{P}(x_{n-1},x_n)$. If $c_1$ lies
on the path $\mathfrak{m}athbb{P}(x_1,a)$, then $\mathfrak{m}athbb{P}(c_1,x_{n-1})$ contains
only 0-edges, since $\mathfrak{m}athbb{P}(x_{n-1},x_n) =
\mathfrak{m}athbb{P}(x_{n-1},c_1)\cup \mathfrak{m}athbb{P}(c_1,a)\cup ab \cup
\mathfrak{m}athbb{P}(b,x_n)$ and $x_n \Rl x_{n-1}$. However, in this case the path
$\mathfrak{m}athbb{P}(x_{1},c_1)\cup\mathfrak{m}athbb{P}(c_1, x_{n-1})$ contains only
0-edges, which implies that $x_1\Ro x_{n-1}$, a contradiction. Thus, the
vertex $c_1$ must be contained in $\mathfrak{m}athbb{P}(b,x_n)$. Since $x_1\Rl
x_n$, the path $\mathfrak{m}athbb{P}(c_1,x_n)$ contains only 0-edges. Hence, the
path $\mathfrak{m}athbb{P}(c_1,x_{n-1})$ contains exactly one 1-edge, because $x_n
\Rl x_{n-1}$. In particular, by construction we see that
$\mathfrak{m}athbb{P}(x_1,x_n) = \mathfrak{m}athbb{P}(x_1,u)\cup
\mathfrak{m}athbb{P}(u,c_1)\cup\mathfrak{m}athbb{P}(c_1,x_n)$.
Now consider the vertices $x_n$ and $x_{n-2}$. Let $a'b'$ be the 1-edge
on the path $\mathfrak{m}athbb{P}(c_1,x_{n-1}) = \mathfrak{m}athbb{P}(c_1,a') \cup a'b' \cup
\mathfrak{m}athbb{P}(b',x_{n-1})$. Since $x_{n}\mathfrak{n}ot \Rl x_{n-2}$ and $x_{n}\mathfrak{n}ot
\Ro x_{n-2}$ we can apply the same argument and conclude that there is a
vertex $c_2\in \mathfrak{m}athbb{P}(b',x_{n-1})$ s.t.\ the path
$\mathfrak{m}athbb{P}(c_2,x_{n-2})$ contains exactly one 1-edge. In particular, by
construction we see that $\mathfrak{m}athbb{P}(x_1,x_{n-2}) = \mathfrak{m}athbb{P}(x_1,u)\cup
\mathfrak{m}athbb{P}(u,c_1)\cup \mathfrak{m}athbb{P}(c_1,c_2) \cup\mathfrak{m}athbb{P}(c_2,x_{2})$
s.t.\ the path $\mathfrak{m}athbb{P}(c_1,c_2)$ contains exactly one 1-edge.
Repeating this argument, we arrive at vertices $x_2$ and $x_4$ and can
conclude analogously that there is a path $\mathfrak{m}athbb{P}(c_{n-2},x_2)$ that
contains exactly one 1-edge and in particular, that $\mathfrak{m}athbb{P}(x_1,x_2)
= \mathfrak{m}athbb{P}(x_1,u)\cup \mathfrak{m}athbb{P}(u,c_1)\cup \left(\bigcup_{1\leq i\leq
n-3}\mathfrak{m}athbb{P}(c_i,c_{i+1})\right) \cup\mathfrak{m}athbb{P}(c_{n-2},x_{n-2})$,
where each of the distinct paths $\mathfrak{m}athbb{P}(c_i,c_{i+1})$, $1\leq i\leq
n-3$ contains exactly one 1-edge. However, this contradicts that $x_1\Rl
x_2$.
\end{proof}
\begin{cor}
The graph $G(\Rl)/\Ro$ is a forest, and hence all paths in $G(\Rl)/\Ro$
are induced paths.
\label{cor:cycle-free}
\end{cor}
Next we analyze the effect of edge contractions in $T$.
\begin{lemma} \label{lem:contract}
Let $(T,\lambda)$ explain $G(\Rl)/\Ro$ and let $(T',\lambda')$ be the
result of contracting an \rev{interior} edge $e$ in $T$. If $\lambda(e)=0$
then $(T',\lambda')$ explains $G(\Rl)/\Ro$. If $G(\Rl)/\Ro$ is connected
and $\lambda(e)=1$ then $(T',\lambda')$ does not explain $G(\Rl)/\Ro$.
\rev{If $G(\Rl)/\Ro$ is connected and $(T,\lambda)$ is a tree that
explains \mathfrak{m}box{$G(\Rl)/\Ro$}, then $(T,\lambda)$ is least resolved if
and only if all 0-edge are incident to leaves and each inner vertex is
incident to exactly one 0-edge.}
\rev{If, in addition, $(T,\lambda)$ is minimally resolved, then all
0-edge are incident to leaves and each inner vertex is incident to
exactly one 0-edge.}
\end{lemma}
\begin{proof}
Let $\Rl_T$, $\Ro_T$, $\Rl_{T'}$, and $\Ro_{T'}$ be the relations
explained by $T$ and $T'$, respectively. Since $(T,\lambda)$ explains
$G(\Rl)/\Ro$ , we have $\Rl_T\,=\,\Rl/\Ro$. Moreover, since $\Ro_T$ is
discrete, no two distinct leaves of $T$ are in relation $\Ro_T$.
If $\lambda(e)=0$, then contracting the interior edge $e$ clearly
preserves the property of $\Ro_{T'}$ being discrete. Since only interior
edges are allowed to be contracted, we have $L(T)=L(T')$. Therefore,
$\Ro_T =\Ro_{T'}$ and the 1-edges along any path from $x\in L(T)=L(T')$
to $y\in L(T)=L(T')$ remains unchanged, and thus $\Rl_T=\Rl_{T'}$.
Hence, $(T',\lambda')$ explains $G(\Rl)/\Ro$.
If $G(\Rl)/\Ro$ is connected, then for every 1-edge $e$ there is a pair
of leaves $x'$ and $x''$ such that $x'\Rl x''$ and $e$ is the only 1-edge
along the unique path connecting $x'$ and $x''$. Consequently,
contracting $e$ would make $x'$ and $x''$ non-adjacent w.r.t.\ the
resulting relation. Thus no 1-edge can be contracted in $T$ without
changing $G(\Rl)/\Ro$.
\rev{Now assume that $(T,\lambda)$ is a least resolved tree that explains
the connected graph $G(\Rl)/\Ro$. By the latter arguments, all
interior edges of $(T,\lambda)$ must be 1-edges and thus any 0-edge
must be incident to leaves. Assume for contradiction, that there is an
inner vertex $w$ such that for all adjacent leaves $x',x''$ we have
$\lambda(wx') = \lambda(wx'') = 1$. Thus, for any such leaves we have
$x'\mathfrak{n}ot\Rl x''$. In particular, any path from $x'$ to any other leaf
$y$ (distinct from the leaves adjacent to $w$) contains an interior
1-edge. Thus, $x'\mathfrak{n}ot \Rl y$ for any such leaf of $T$. However, this
immediately implies that $x'$ is an isolated vertex in $G(\Rl)/\Ro$; a
contradiction to the connectedness of $G(\Rl)/\Ro$. Furthermore, if
there is an inner vertex $w$ such that for adjacent leaves $x',x''$ it
holds that $\lambda(wx') = \lambda(wx'') = 0$, then $x'\Ro x''$; a
contradiction to $\Ro$ being discrete. Therefore, each inner vertex is
incident to exactly one 0-edge.
If $G(\Rl)/\Ro$ is connected and $(T,\lambda)$ explains $G(\Rl)/\Ro$
such that all 0-edge are incident to leaves, then all interior edges
are 1-edges. As shown, no interior 1-edge can be contracted in $T$
without changing the corresponding $\Rl$ relation. Moreover, no
leaf-edge can be contracted since $L(T) = V(G(\Rl)/\Ro)$. Hence,
$(T,\lambda)$ is least resolved.
Finally, since any minimally resolved tree is least resolved, the last
assertions follows from the latter arguments. }
\end{proof}
\begin{algorithm}[tbp]
\caption{Compute $(T(Q), \lambda)$}
\label{alg:Q}
\begin{algorithmic}[1]
\REQUIRE $Q$
\ENSURE $T(Q)$
\STATE set $T(Q)\leftarrow Q$
\STATE Retain the labels of all leaves of $Q$ in $T(Q)$ and relabel
all inner vertices $u$ of $Q$ as $u'$.
\STATE Label all edges of the copy of $Q$ by $\lambda(e)=1$.
\STATE For each inner vertex $u'$ of $Q$ add a vertex $u$ to $T(Q)$ and
insert the edge $uu'$.
\STATE Label all edges of the form $e=uu'$ with $\lambda(e)=0$.
\end{algorithmic}
\end{algorithm}
Let $\mathfrak{m}athbb{T}$ be the set of all trees with vertex set $X$ but no
edge-labels and $\mathfrak{m}athcal{T}$ denote the set of all edge-labeled 0-1-trees
$(T,\lambda)$ with leaf set $X$ such that each inner vertex $w\in W$ has
degree at least $3$ and there is exactly one adjacent leaf $v$ to $w$ with
$\lambda(wv)=0$ while all other edges $e$ in $T$ have label $\lambda(e)=1$.
\begin{figure}
\caption{\rev{Illustration of the bijection $\varphi$. It contracts, at
each inner vertex (white) of $(T,\lambda)$ the unique $0$-edge and
transfers $y\in X$ as vertex label at the inner vertex of $Q$.
Leafs in $(T,\lambda)$ with incident to $1$-edges remain unchanged.
The inverse map $\varphi^{-1}
\label{fig:bijection}
\end{figure}
\rev{
\begin{lemma}
The map $\varphi : \mathfrak{m}athcal{T} \to \mathfrak{m}athbb{T}$ with
$\varphi: (T,\lambda)\ \mathfrak{m}apsto Q$,
$V(Q)=L(T)$, and
$Q\simeq T^*$, where $T^*$ is the underlying unlabeled tree
obtained from $(T,\lambda)$ by contracting all edges labeled $0$,
is a bijection.
\label{lem:bijection}
\end{lemma}
\begin{proof}
We show first that $\varphi$ and $\varphi^{-1}$ are maps.
Clearly, $\varphi$ is a map, since the edge-contraction
is well-defined and leads to exactly one tree in $\mathfrak{m}athbb{T}$.
For $\varphi^{-1}$ we construct $(T,\lambda)$ from $Q$
as in Algorithm \ref{alg:Q}.
It is easy to see that $(T,\lambda) \in \mathfrak{m}athcal{T}$.
Now consider $T^*$ obtained from $(T,\lambda)$ by
contracting all edges labeled $0$. By construction,
$T^* \simeq Q$ (see Fig. \ref{fig:bijection}).
Hence, $\varphi : \mathfrak{m}athcal{T} \to \mathfrak{m}athbb{T}$ is bijective.
\end{proof}
}
\rev{The bijection is illustrated in Fig.~\ref{fig:bijection}.}
\rev{
\begin{lemma}
Let $(T,\lambda) \in \mathfrak{m}athcal{T}$ and $Q= \varphi((T,\lambda)) \in
\mathfrak{m}athbb{T}$. The set $\mathfrak{m}athcal{T}$ contains \emph{all} least resolved
trees that explain $Q$.
Moreover, if $Q$ is considered as a graph \mathfrak{m}box{$G(\Rl)/\Ro$} with vertex
set $X$, then $(T,\lambda)$ is the unique least resolved tree
$\mathfrak{m}athcal{T}$
that
explains $Q$ and therefore, the unique minimally resolved tree that
explains $Q$.
\label{lem:Umin}
\end{lemma}
\begin{proof}
We start with showing that $(T,\lambda)$ explains $Q$. Note, since
$Q\in\mathfrak{m}athbb{T}$, the graph $Q$ must be connected. By construction and
since $Q= \varphi((T,\lambda))$, $T^* \simeq Q$ where $T^*$ is the tree
obtained from $T$ after contracting all 0-edges. Let $v,w\in X$ and
assume that there is exactly a single $1$ along the path from $v$ to $w$
in $(T,\lambda)$. Hence, after contracting all edges labeled $0$ we see
that $vw\in E(T^*)$ where $T^* \simeq Q$ and thus $v\Rl w$. Note, no path
between any two vertices in $(T,\lambda)$ can have only 0-edges (by
construction). Thus, assume that there is more than a single 1-edge on
the path between $v$ and $w$. Hence, after after contracting all edges
labeled $0$ we see that there is still a path in the tree $T^*$ from $v$
to $w$ with more than one 1-edge. Since $T^* \simeq Q$, we have $vw\mathfrak{n}ot
\in E(Q)$ and therefore, $v\mathfrak{n}ot\Rl w$. Thus, $(T,\lambda)$ explains $Q$.
By construction of the trees in $\mathfrak{m}athcal{T}$ all 0-edges are incident to
a leaf. Thus, by Lemma \ref{lem:contract}, every least resolved tree that
explains $Q$ is contained in $\mathfrak{m}athcal{T}$.
It remains to show that the least resolved tree $(T,\lambda)\in
\mathfrak{m}athcal{T}$ with $T^*\simeq Q$ that explains $Q$ is minimally resolved.
Assume there is another least resolved tree $(T',\lambda') \in
\mathfrak{m}athcal{T}$ with leaf set $V(Q)$ that explains $Q$. By Lemma
\ref{lem:bijection}, there is a bijection between those $Q$ and elements
in $\mathfrak{m}athcal{T}$ for which $T^*\simeq Q$. Thus, $T'^*\simeq Q'\mathfrak{n}ot\simeq
Q \simeq T^*$. However, this implies that $T'\mathfrak{n}ot\simeq T$. However,
since in this case $(T',\lambda')$ explains $Q'$ and $Q'\mathfrak{n}ot\simeq Q$,
the pair $(T',\lambda')$ cannot explain $Q$; a contradiction.
\end{proof}
}
\rev{As an immediate consequence of these considerations we obtain}
\begin{thm}
Let $Q$ be a connected component in $G(\Rl)/\Ro$ with vertex set $X$.
Then the tree $(T,\lambda)$ constructed in Algorithm~\ref{alg:Q} is the unique
\rev{minimally} resolved tree that explains $Q$.
\rev{Moreover, for any pair $(T',\lambda')$ that explains $Q$, the tree
$T$ is obtained from $T'$ by contracting all \emph{interior} 0-edges
and putting $\lambda(e) = \lambda'(e)$ for all edges that are not
contracted. }
\label{thm:connComp}
\end{thm}
\begin{proof}
\rev{The first statement follows from Lemma \ref{lem:contract} and
\ref{lem:Umin}. To see the second statement, observe that Lemma
\ref{lem:contract} implies that no interior 1-edge but every 0-edge can
be contracted. Hence, after contracting all 0-edges, no edge can be
contracted and thus, the resulting tree is least resolved. By Lemma
\ref{lem:contract}, we obtain the result.}
\end{proof}
\rev{ We emphasize that although the minimally resolved tree that explains
$G(\Rl)/\Ro$ is unique, this statement is in general not satisfied for
least resolved trees, see Figure \ref{fig:nonU-LRT}. }
\begin{figure}
\caption{\rev{Least resolved trees explaining a given relation are unique
whenever
$G(\Rl)/\Ro$ is connected (cf.\ Lemma \ref{lem:Umin}
\label{fig:nonU-LRT}
\end{figure}
We are now in the position to demonstrate how to obtain a least resolved
tree that explains $G(\Rl)/\Ro$ also in the case that $G(\Rl)/\Ro$ itself
is not connected. To this end, denote by $Q_1,\dots Q_k$ the connected
components of $G(\Rl)/\Ro$. We can construct a \rev{phylogenetic tree
$T(G(\Rl)/\Ro)$ with leaf set $X$} for $G(\Rl)/\Ro$ using Alg.\
\ref{alg:all}. It basically amounts to constructing a star $S_k$ with
inner vertex $z$, where its leaves are identified with the trees
$T(Q_i)$.
\begin{algorithm}[tbp]
\caption{Compute $(T(G(\Rl)/\Ro)), \lambda)$}
\label{alg:all}
\begin{algorithmic}[1]
\REQUIRE disconnected $G(\Rl)/\Ro)$
\ENSURE $T(G(\Rl)/\Ro))$
\STATE $T(G(\Rl)/\Ro)) \gets (\{\rev{z_T}\},\emptyset)$
{\mathcal F}OR{For each connected component $Q_i$}
\STATE construct $(T(Q_i), \lambda_i)$ with Alg.\ \ref{alg:Q}
and add to $T(G(\Rl)/\Ro))$.
\IF{$T(Q_i)$ is the single vertex graph $(\{x\},\emptyset)$}
\STATE add edge $\rev{z_T}x$
\ELSIF{$T(Q_i)$ is the edge $v_iw_i$}
\STATE remove the edge
$v_iw_i$ from $T(Q_i)$, insert a vertex $x_i$ in $T(Q_i)$
and the edges $x_iv_i$, $x_iw_i$.
\STATE set either $\lambda_{i}(x_iv_i)=1$ and $\lambda(x_iw_i)=0$
or $\lambda_{i}(x_iv_i)=0$ and $\lambda(x_iw_i)=1$.
\label{step:edge}
\STATE add edge $\rev{z_T}x_i$ to $T(G(\Rl)/\Ro))$.
\ELSE \STATE \label{item:z}
add edge $\rev{z_T}q'_i$ to $T(G(\Rl)/\Ro))$
for an arbitrary inner vertex $q'_i$ of $T(Q_i)$.
\ENDIF
\ENDFOR
\STATE Set $\lambda(\rev{z_T}v)=1$ for all edges $\rev{z_T}v$ and
$\lambda(e)=\lambda_i(e)$ for all edges $e\in T(Q_i)$.
\end{algorithmic}
\end{algorithm}
\rev{
\begin{lemma}
Let $G(\Rl)/\Ro$ have connected components $Q_1,\dots Q_k$. Let $T'$ be
a tree that explains $G(\Rl)/\Ro$ and $T'_i$ be the subtree of $T'$ with
leaf set $V(Q_i)$ that is minimal w.r.t.\ inclusion, $1\le i\le k$.
Then, $V(T'_i)\cap V(T'_j) =\emptyset$, $i\mathfrak{n}eq j$ and, in particular, any
two vertices in $T'_i$ and $T'_j$, respectively, have distance at least
two in $T'$.
\label{lem:subtrees}
\end{lemma}
\begin{proof}
We start to show that two distinct subtrees $T'_i$, and $T'_j$ do not
have a common vertex in $T'$. If one of $Q_i$ or $Q_j$ is a single
vertex graph, then $T'_i$ or $T'_j$ consists of a single leaf only, and
the statement holds trivially.
Hence, assume that both $Q_i$ and $Q_j$ have at least three vertices.
Lemma \ref{lem:contract} implies that each inner vertex of the minimally
resolved trees $T(Q_i)$ and $T(Q_j)$ is incident to exactly one 0-edge as
long as $Q_i$ is not an edge. Since $T(Q_i)$ can be obtained from $T'_i$
by the procedure above, for each inner vertex $v$ in $T'_i$ there is a
leaf $x$ in $T'_i$ such that the unique path from $v$ to $x$ contains
only 0-edges. The same arguments apply, if $Q_i$ is an edge $xy$. In
this case, the tree $T'_i$ must have $x$ and $y$ as leaves, which implies
that $T'_i$ has at least one inner vertex $v$ and that there is exactly
one 1-edge along the path from $x$ to $y$. Thus, for each inner vertex in
$T'_i$ there is a path to either $x$ or $y$ that contains only
0-edges.
Let $v$ and $w$ be arbitrary inner vertices of $T'_i$ and $T'_j$,
respectively, and let $x$ and $y$ be leaves that are connected to $v$ and
$w$, resp., by a path that contains only 0-edges. If $v=w$, then $x\Ro
y$, contradicting the property of $\Ro$ being discrete. Thus, $T'_i$ and
$T'_j$ cannot have a common vertex in $T'$. Moreover, there is no edge
$vw$ in $T'$, since otherwise either $x\Ro y$ (if $\lambda(vw)=0$) or
$x\Rl y$ (if $\lambda(vw)=1$). Hence, any two distinct vertices in
$T'_i$ and $T'_j$ have distance at least two in $T'$.
\end{proof}
}
\rev{We note that Algorithm~\ref{alg:all} produces a tree with a single
vertex of degree $2$, namely $z_T$, whenever $G(\Rl)/\Ro$ consists of
exactly two components. Although this strictly speaking violates the
definition of phylogenetic trees, we tolerate this anomaly for the
remainder of this section.}
\begin{thm}
Let $Q_1,\dots Q_k$ be the connected components in $G(\Rl)/\Ro$. Up to
the choice of the vertices $q'_i$ in Line \ref{item:z} of Alg.\
\ref{alg:all}, the tree $T^* = T(G(\Rl)/\Ro))$ is a \rev{minimally
resolved tree that explains $G(\Rl)/\Ro$. It is unique up to the choice
of the $\rev{z_{T}}q'_i$ in Line \ref{item:z}.}
\label{thm:star-tree}
\end{thm}
\begin{proof}
\rev{Since every tree $T(Q_i)$ explains a connected component in
$G(\Rl)/\Ro$, from the construction of $T^*$ it is easily seen that
$T^*$ explains $G(\Rl)/\Ro$. Now we need to prove that $T^*$ is a
minimally resolved tree that explains $G(\Rl)/\Ro$.
To this end, consider an arbitrary tree $T'$ that explains
$G(\Rl)/\Ro$. Since $T'$ explains $G(\Rl)/\Ro$, it must explain each
of the connected components $Q_1,\dots Q_k$. Thus, each of the subtrees
$T'_i$ of $T'$ with leaf set $V(Q_i)$ that are minimal w.r.t.\
inclusion must explain the connected component $Q_i$, $1\le i \le k$.
Note the $T'_i$ may have vertices of degree 2.
We show first that $T(Q_i)$ is obtained from $T'_i$ by contracting all
interior 0-edges and all 0-edges of degree 2. If there are no vertices
of degree 2, we can immediately apply Thm.\ \ref{thm:connComp}.
If there is a vertex $v$ of degree 2, then $v$ cannot be incident to
two 1-edges, as otherwise the relation explained by $T'_i$ would not be
connected, contradicting the assumption that $T'_i$ explains the
connected component $Q_i$. Thus, if there is a vertex $v$ of degree 2
it must be incident to a 0-edge $vw$. Contracting $vw$ preserves the
property of $\Ro$ being discrete. If $w$ is a leaf, we can contract
the edge $vw$ to a new leaf vertex $w$; if $vw$ is an interior edge we
simply contract it to some new inner vertex. In both cases, we can
argue analogously as in the proof of Lemma \ref{lem:contract} that the
tree obtained from $T'_i$ after contracting $vw$, still explains $Q_i$.
This procedure can be repeated until no degree-two vertices are in the
contracted $T'_i$.
In particular, the resulting tree is a phylogenetic tree that explains
$Q_i$. Now we continue to contract all remaining interior 0-edges.
Thm.\ \ref{thm:connComp} implies that in this manner we eventually
obtain $T(Q_i)$.
By Lemma \ref{lem:subtrees}, two distinct tree $T(Q_i)$ and $T(Q_j)$ do
not have a common vertex, and moreover, any two vertices in $T(Q_i)$
and $T(Q_j)$, respectively, have distance at least two in $T'$.
This implies that the construction as in Alg.\ \ref{alg:all} yields a
least resolved tree. In more detail, since the subtrees explaining
$Q_i$ in any tree that explains $G(\Rl)/\Ro$ must be vertex disjoint,
the minimally resolved trees $T(Q_1),\dots,T(Q_k)$ must be subtrees of
any minimally resolved tree that explain $G(\Rl)/\Ro$, as long as all
$Q_i$ are single vertex graphs or have at least one inner vertex.
If $Q_i$ is a single edge $v_iw_i$ and thus $T(Q_i) = v_iw_i$ where
$\lambda(v_iw_i)=1$, we modify $T(Q_i)$ in Line \ref{step:edge} to
obtain a tree isomorphic to $S_2$ with inner vertex $x_i$. This
modification is necessary, since otherwise (at least one of) $v_i$ or
$w_i$ would be an inner vertex in $T^*$, and we would loose the
information about the leaves $v_i,w_i$. In particular, we need to add
this vertex $x_i$ because we cannot attach the leaves $v_i$ (resp.\
$w_i$) by an edge $x_jv_i$ (resp.\ $x'_jw_i$) to some subtree subtree
$T(Q_j)$. To see this, note that at least one of the edges $x_jv_i$
and $x'_jw_i$ must be a 0-edge. However, $x_j$ and $x'_j$ are already
incident to a 0-edge $x_jv'_i$ or $x'_jw'_i$ (cf.\ Lemma
\ref{lem:contract}), which implies that $\Ro$ would not be discrete; a
contradiction. By construction, we still have $v_i\Rl w_i$ in Line
\ref{step:edge}.
Finally, any two distinct vertices in $T(Q_i)$ and $T(Q_j)$ have
distance at least two in $T^*$, as shown above. Hence, any path
connecting two subtrees $T(Q_i)$ in $T^*$ contains and least two edges
and hence at least one vertex that is not contained in any of the
$T(Q_i)$. Therefore, any tree explaining $Q$ has at least $1+\sum_i
|V(T(Q_i))|$ vertices.
We now show that adding a single vertex \rev{$z_T$}, which we may consider
as
a trivial tree $(\{\rev{z_T}\},\emptyset)$, is sufficient. Indeed, we may
connect the different trees to $z_T$ by insertion of an edge
$\rev{z_T}q'_i$,
where $q'_i$ is an arbitrary inner vertex of $T(Q_i)$ and label these
edges $\lambda(\rev{z_T}q'_i)=1$. Thus, no two leaves $u$ and $w$ of
distinct
trees are either in relation $\Ro$ or $\Rl$, as required. The resulting
trees have the minimal possible number of vertices, i.e., they are
minimally resolved. }
\end{proof}
\subsection*{Binary trees}
Instead of asking for least resolved trees that explain $G(\Rl)/\Ro$, we may
also consider the other extreme and ask which binary, i.e., fully resolved
tree can explain $G(\Rl)/\Ro$. Recall that an $X$-tree is called binary or
fully resolved if the root has degree $2$ while all other inner vertices
have degree $3$. From the construction of the least resolved trees we
immediately obtain the following:
\begin{cor}
A least resolved tree $T(Q)$ for a connected component $Q$ of
$G(\Rl)/\Ro$ is binary if and only if $Q$ is a path.
\end{cor}
If a least resolved tree $T(Q)$ of $G(\Rl)/\Ro$ is a star, we have:
\begin{lemma}\label{lem:star}
If a least resolved tree $T(Q)$ explaining $G(\Rl)/\Ro$ is a star
with $n$ leaves, then either
\begin{itemize}
\item[(a)] all edges in $T(Q)$ are 1-edges and $Q$ has no edge, or
\item[(b)] there is exactly one 0-edge in $T(Q)$ and $Q$ is a star
with $n-1$ leaves.
\end{itemize}
\end{lemma}
\begin{proof}
For implication in case (a) and (b) we can re-use exactly the same
arguments as in the proofs of Theorem \ref{thm:connComp} and
\ref{thm:star-tree}.
Now suppose there are at least two (incident) 0-edges in $T(Q)$, whose
endpoints are the vertices $u$ and $v$. Then $u\Ro v$, which is
impossible in $G(\Rl)/\Ro$.
\end{proof}
\rev{
\begin{lemma}\label{lem:local-star}
Let $(T,\lambda)$ be a least resolved tree that explains $G(\Rl)/\Ro$.
Consider an arbitrary subgraph $S_k$ that is induced by an inner vertex
$v_0$ and \emph{all} of its $k$ neighbors. Then, $S_k$ with its
particular labeling $\lambda_{|E(S_k)}$ is always of type (a) or (b) as
in Lemma \ref{lem:star}.
\end{lemma}
\begin{proof}
This is an immediate consequence of Lemma \ref{lem:contract}
and the fact that $\Ro$ is discrete.
\end{proof}
}
To construct the binary tree \rev{explaining} the star $Q=S_n$, we consider
the set of all binary trees with $n$ leaves and 0/1-edge labels. If $S_n$
is of type (a) in Lemma~\ref{lem:star}, then all terminal edges are labeled
$1$ and all interior edges are arbitrarily labeled $0$ or $1$.
Figure~\ref{fig:non_lrt} shows an example for $S_6$. If $S_n$ is of type
(b), we label the terminal edges in the same way as in $T(Q)$ and all
interior edge are labeled 0. In this case, for each binary tree there is
exactly one labeling.
\begin{figure}
\caption{For fixed underlying tree $T(Q)$, in this case a star $S_6$ with
all $1$, there are in general multiple labelings $\lambda$ that
\rev{explains}
\label{fig:non_lrt}
\end{figure}
In order to obtain the complete set of binary trees that explain $G$ we can
proceed as follows. If $G$ is connected, there is a single minimally resolved
tree $T(G)$ explaining $G$. If $G$ is not connected then there are multiple
minimally resolved trees $T$.
\rev{Let $\mathfrak{m}athcal{T}_{\mathfrak{m}athrm{lrt}}$ be the set of all least resolved
trees that explain $G$. For every such least resolved tree $T\in
\mathfrak{m}athcal{T}_{\mathfrak{m}athrm{lrt}}$} we iterate over all vertices $v_0$ of $T$
with degree $k>3$ and perform the following manipulations:
\begin{itemize}
\item[{1.}] Given a vertex $v_0$ of $T$ with degree $k>3$, denote the set of
its neighbors $v_1, v_2,\dots, v_k$ by $N(v_0)$.
Delete vertex $v_0$ and its attached
edges from $T$, and rename the neighbors $v_i$ to $v_i'$ for all $1\leq
i\leq k$. Denote the resulting forest by $F(v_0)$.
\item[{2.}] Generate all binary trees with leaves $v_1',\dots, v_k'$.
\item[{3.}] Each of these binary trees is inserted into a copy of the
forest $F(v_0)$ by \rev{identifying} $v_i$ and $v_i'$ for all $1\leq
i\leq k$.
\item[{4.}] \rev{ For each of the inserted binary trees $T'$ that results
from a ``local'' star $S_k$ in step 3. we must place an edge label. \\
Put $\lambda(xv_i)= \lambda(v_0v_i)$ for all edges $xv_i$ in
$T'$ and mark $xv_i$ as \emph{LABELED}.
If $S_k$ is of type (a) (cf.\, Lemma~\ref{lem:star}), then
choose an arbitrary 0/1-label for the interior edges of $T'$. If $S_k$
is of type (b) we need to consider the two exclusive cases for the
vertex $v_j$ for which $\lambda(v_jx)=0$:
\begin{itemize}
\item[(i)] For all $y\in V(G)$ for which $v_j \Rl y$, label all
\emph{interior}
edges on the unique path $\mathfrak{m}athbb{P}(v_j,y)$ that are also contained in
$T'$ and are not marked
as \emph{LABELED} with 0 and mark them as \emph{LABELED}
and
choose an arbitrary 0/1-label for all other un\emph{LABELED} interior
edges of $T'$.
\item[(ii)] Otherwise, choose an arbitrary 0/1-label for the interior
edges of $T'$.
\end{itemize}
}
\end{itemize}
\rev{It is well known that each binary tree has $k-3$ interior edges
\cite{sem-ste-03a}. Hence, for a binary tree there are $2^{k-3}$
possibilities to place a 0/1 label on its interior edges. Let $t(k)$
denote the number of binary trees with $k$ leaves and $V_a,V_b$ be a
partition of the inner vertices into those where the neighborhood
corresponds to a star of type (a) and (b), respectively. Note, if $T$ is
minimally resolved, then $|V_a|\le1$. For a given least resolved tree
$T\in \mathfrak{m}athcal{T}_{\mathfrak{m}athrm{lrt}}$, the latter procedure yields the set of
all $ (\mathfrak{p}rod_{v\in V_a} t(\mathfrak{m}athrm{deg}(v_0))2^{\mathfrak{m}athrm{deg}(v_0)-3} )
\cdot (\mathfrak{p}rod_{v\in V_b} t(\mathfrak{m}athrm{deg}(v_0) )) $ pairwise distinct binary
trees that one can obtain from $T$. The union of these tree sets and its
particular labeling over all $T\in \mathfrak{m}athcal{T}_{\mathfrak{m}athrm{lrt}}$ is then
the set of all binary trees explaining $G$. } To establish the
correctness of this procedure, we prove \mathfrak{n}ewline
\begin{lemma}\label{lem:binary}
The procedure outlined above generates all binary trees $(T,\lambda)$
\rev{explaining} $G$.
\end{lemma}
\rev{
\begin{proof}
We first note that there may not be a binary tree explaining $G$. This is
case whenever $T(G)$ has a vertex of degree $2$, which is present in
particular if $G$ is forest with two connected components.
Now consider an arbitrary binary tree $(T_B(G), \lambda)$ that is not
least resolved for $G$. Then a least resolved tree $T(G)$ explaining $G$
can be obtained from $T_B(G)$ by contracting edges and retaining the the
labeling of all non-contracted edges. In the following we will show that
the construction above can be be used to recover $T_B(G) = (V,E)$ from
$T(G)$. To this end, first observe that only interior edges can be
contracted in $T_B(G)$ to obtain $T(G)$. Let $E' = \{e_1,\dots,e_h\}$ be
a maximal (w.r.t.\ inclusion) subset of contracted edges of $T_B(G)$ such
that the subgraph $(V' = \cup_{i=1}^h e_i, E')$ is connected, and thus
forms a subtree of $T_B(G)$. Furthermore, let $F = \{f_1,\dots,f_k\}
\subseteq E\setminus E'$ be a maximal subset of edges of $T_B(G)$ such
that for all $f_i\in F$ there is an edge $e_j\in E'$ such that $f_i\cap
e_j\mathfrak{n}eq \emptyset$. Moreover, set $W = \cup_{i=1}^k f_i$. Thus, the
contracted subtree $(V',E')$ locally corresponds to the vertex $v_0$ of
degree $k>3$ and thus, to a local star $S_k$. Now, replacing $S_k$ by
the tree $(V'\cup W, E'\cup F)$ (as in Step 3) yields the subtree from
which we have contracted all interior edges that are contained in $E'$.
Since the latter procedure can be repeated for all such maximal sets
$E'$, we can recover $T_B(G)$.
It remains to show that one can also recover the labeling $\lambda$ of
$T_B(G)$. Since $T(G)$ is a least resolved tree obtained from $T_B(G)$
that explains $G$, we have, by definition, $u \Rl w$ in $T(G)$ if and
only if $u \Rl w$ in $T_B(G)$. By Lemma \ref{lem:local-star}, every
local star $S_k$ in $T(G)$ is either of type (a) or (b). Assume it is of
type (a), i.e., $\lambda(v_0v_i)=1$ for all $1\leq i\leq k$. Let $u$ and
$w$ be leaves of $G$ for which the unique path connecting them contains
the edge $v_0v_i$ and $v_0v_j$. Thus, there are at least two edges
labeled $1$ along the path; hence $u\mathfrak{n}ot\Rl w$. The edges $xv_i$ and
$x'v_j$ in $T'$ are both labeled by 1; therefore $u$ and $w$ are not in
relation $\Rl$ after replacing $S_k$ by $T'$. Therefore all possible
labelings can be used in Step 4 except for the edges that are not
contained in $T'$ and $xv_i$ and $x'v_j$ which are marked as
\emph{LABELED}). Therefore, we also obtain the given labeling of the
subtree $T = (V'\cup W, E'\cup F)$ as a result.
If the local star $S_k$ is of type (b), then there is exactly one edge
$v_0v_j$ with $\lambda(v_0v_j) = 0$. By Lemma \ref{lem:contract} and
because $(T(G),\lambda)$ is least resolved, the vertex $v_j$ must be a
leaf of $G$. For two leaves $u,w$ of $T(G)$ there are two cases: either
the unique path $\mathfrak{m}athbb{P}(u,w)$ in $T(G)$ contains $v_0$ or not.
If $\mathfrak{m}athbb{P}(u,w)$ does not contain $v_0$, then this path and its
labeling remains unchanged after replacing $S_k$ by $T'$. Hence, the
relations $\Rl$ and $\mathfrak{n}ot \Rl$ are preserved for all such vertices $u,w$.
First, assume that $\mathfrak{m}athbb{P}(u,w)$ contains $v_0$ and thus two edges
incident to vertices in $N(v_0)$. If the path $\mathfrak{m}athbb{P}(u,w)$ contains
two edges $v_0v_i$ and $v_0v_l$ with $i,l\mathfrak{n}eq j$, then $\lambda(v_0v_i) =
\lambda(v_0v_l)=1$. Thus, $u\mathfrak{n}ot\Rl w$ in $T(G)$. The extended path
(after replacing $S_k$ by $T'$) still contains the two 1-edges $xv_i$ and
$x'v_l$, independently from the labeling of all other edges in $T'$ that
have remained un\emph{LABELED} edges up to this point. Thus, $u\mathfrak{n}ot\Rl w$
is preserved after replacing $S_k$ by $T'$.
Next, assume that $\mathfrak{m}athbb{P}(u,w)$ contains $v_0$ and the 0-edge
$v_0v_j$ in $T(G)$. In the latter case, $u=v_j$ of $w=v_j$. Note, there
must be another edge $v_0v_l$ in $\mathfrak{m}athbb{P}(u,w)$ with $v_l \in N(v_0)$,
$l\mathfrak{n}eq j$ and therefore, with $\lambda(v_0v_l)=1$. There are two cases,
either $u\Rl w$ or $u\mathfrak{n}ot\Rl w$ in $T(G)$.
If $u\Rl w$ then there is exactly one 1-edge (the edge $v_0v_l$)
contained in $\mathfrak{m}athbb{P}(u,w)$ in $T(G)$. By construction, all interior
edges on the path $\mathfrak{m}athbb{P}(u,w)$ that are contained in $T'$ are
labeled with $0$ and all other edge-labelings remain unchanged in $T(G)$
after replacing $S_k$ by $T'$. Thus, $u\Rl w$ in $T(G)$ after replacing
$S_k$ by $T'$. Analogously, if $u\mathfrak{n}ot\Rl w$, then there are at least two
edges 1-edges $\mathfrak{m}athbb{P}(u,w)$ in $T(G)$. Since $\lambda(v_0v_j)=0$ and
$\lambda(v_0v_l)=1$, the 1-edge different from $v_0v_l$ is not contained
in $T'$ and its label 1 remains unchanged. Moreover, the edge $xv_l$ in
$T'$ gets also the label 1 in Step 4. Thus, $\mathfrak{m}athbb{P}(u,w)$ still
contains at least two 1-edges in $T(G)$ after replacing $S_k$ by $T'$
independently of the labeling chosen for the other un\emph{LABELED}
interior edges of $T'$. Thus, $u\mathfrak{n}ot\Rl w$ in $T(G)$ after replacing
$S_k$ by $T'$.
We allow all possible labelings and fix parts where necessary. In
particular, we obtain the labeling of the subtree $(V'\cup W, E'\cup F)$
that coincides with the labeling of $T_B(G)$. Thus, we can repeat this
procedure for all stars $S_k$ in $T(G)$ and their initial labelings.
Therefore, we can recover both $T_B(G)$ and its edge-labeling $\lambda$.
Clearly, every binary tree $T_B(G)$ that explains $G$ is either already
least resolved or there is a least tree $T(G)\in
\mathfrak{m}athcal{T}_{\mathfrak{m}athrm{lrt}}$ from which $T_B(G)$ can be recovered by the
construction as outlined above.
\end{proof}
}
\rev{As a consequence of} the proof of Lemma~\ref{lem:binary} we
immediately obtain the following Corollary that characterizes the condition
that $Q$ cannot be explained by a binary tree.
\begin{cor}
$G(\Rl)/\Ro$ cannot be explained by a binary tree if and only if
$G(\Rl)/\Ro$ \rev{is a forest with} exactly two connected components.
\end{cor}
The fact that exactly two connected components appear as a special case is
the consequence of a conceptually too strict definition of ``binary
tree''. If we allow a single ``root vertex'' of degree $2$ in this special
case, we no longer have to exclude two-component graphs.
\section{The antisymmetric single-1 relation}
\label{sect:1dir}
The antisymmetric version $x\Rld y$ of the 1-relation shares many basic
properties with its symmetric cousin. We therefore will not show all formal
developments in full detail. Instead, we will where possible appeal to the
parallels between $x\Rld y$ and $x\Rl y$. For convenience we recall the
definition: $x \Rld y$ if and only if all edges along $\mathfrak{m}athbb{P}(u,x)$ are
labeled $0$ and exactly one edge along $\mathfrak{m}athbb{P}(u,y)$ is labeled $1$,
where $u=\lca{x,y}$. As an immediate consequence we may associate with
$\Rld$ a symmetrized 1-relation $x\Rl y$ whenever $x\Rld y$ or $y\Rld
x$. Thus we can infer (part of) the underlying unrooted tree topology by
considering the symmetrized version $\Rl$. On the other hand, $\Rld$ cannot
convey more information on the unrooted tree from which $\Rld$ and its
symmetrization $\Rl$ are derived. It remains, however, to infer the
position of the root from directional information. Instead of the
quadruples used for the unrooted trees in the previous section, structural
constraints on rooted trees are naturally expressed in terms of triples.
In the previous section we have considered $\Rl$ in relation to unrooted
trees only. Before we start to explore $\Rld$ we first ask whether $\Rl$
contains any information about the position of the root and if it already
places any constraints on $\Rld$ beyond those derived for $\Rl$ in the
previous section. In general the answer to this question will be negative,
as suggested by the example of the tree $T_5^*$ in Figure
\ref{fig:root}. Any of its inner vertex can be chosen as the root, and
each choice of a root vertex yields a different relation $\Rld$.
Nevertheless, at least partial information on $\Rld$ can be inferred
uniquely from $\Rl$ and $\Ro$. Since all connected components in
$G(\Rl)/\Ro$ are trees, we observe that the underlying graphs
$\underline{G(\Rld)/\Ro}$ of all connected components in $G(\Rld)/\Ro$ must
be trees as well. Moreover, since $\Ro$ is discrete in $G(\Rl)/\Ro$, it is
also discrete in $G(\Rld)/\Ro$.
Let $Q$ be a connected component in ${G(\Rld)/\Ro}$. \rev{If $Q$ is an
isolated vertex or a single edge, there is only a single phylogenetic
rooted tree (a single vertex and a tree with two leaves and one inner
root vertex, resp.) that explains $Q$ and the position of its root is
uniquely determined.}
Thus we assume that $Q$ contains at least three
vertices from here on. By construction, any three vertices $x,y,z$ in a
connected component $Q$ in $G(\Rld)/\Ro$ either induce a disconnected
graph, or a tree on three vertices. Let $x,y,z\in V(Q)$ induce such a
tree. Then there are three possibilities (up to relabeling of the vertices)
for the induced subgraph contained in $G(\Rld)/\Ro = (V,E)$:
\begin{itemize}
\item[(i)] $xy, yz \in E$ implying that $x\Rld y \Rld z$,
\item[(ii)] $yx, yz \in E$ implying that $y\Rld x$ and $y \Rld z$,
\item[(iii)] $xy, zy \in E$ implying that $x\Rld y$ and $z \Rld y$.
\end{itemize}
Below, we will show that Cases (i) and (ii) both imply a unique tree on the
three leaves $x,y,z$ together with a unique 0/1-edge labeling for the
unique resolved tree $T(Q)$ that displays $Q$, see Fig.\
\ref{fig:2cases}. Moreover, we shall see that Case (iii) cannot occur.
\begin{figure}
\caption{Placing the root. The tree $T$ in the upper left is the unique
minimally resolved tree that explains $G(\Rl)/\Ro$ (shown below $T$).
Each of the tree inner vertices $a$, $b$, or $c$ of $T$ can be chosen as
the root, giving rise to three distinct relations $\Rld$. For the
``siblings'' in the unrooted tree $x_1,x_2$ as well as $x_4,x_5$ it holds
that $x_2\Rld x_1$ and $x_4\Rld x_5$ for all three distinct relations.
Thus, there are uniquely determined parts of $\Rld$ conveyed by the
information of $\Ro$ and $\Rl$ only.}
\label{fig:root}
\end{figure}
\begin{figure}
\caption{There are only two possibilities for induced connected subgraphs
$H$ in $G(\Rld)/\Ro$ on three vertices, cf.\ Lemma \ref{lem:case-i}
\label{fig:2cases}
\end{figure}
\begin{lemma}
In Case (i), the unique triple $\rt{yz|x}$ must be displayed by
any tree $T(Q)$ that explains $Q$. Moreover, the paths $\mathbb{P}(u,v)$ and
$\mathbb{P}(v,z)$ in $T(Q)$ contain both exactly one 1-edge, while the other
paths $\mathbb{P}(u,x)$ and $\mathbb{P}(v,y)$ contain only 0-edges, where
$u=\lca{xy}=\lca{xz} \mathfrak{n}eq \lca{yz}=v$.
\label{lem:case-i}
\end{lemma}
\begin{proof}
Let $x,y,z\in V(Q)$ such that $xy, yz \in E$ and thus, $x\Rld y \Rld z$.
Notice first that there must be two distinct last common ancestors for
pairs of the three vertices $x,y,z$; otherwise, if
$u=\lca{xy}=\lca{xz} = \lca{yz}$, then the path $\mathbb{P}(uy)$ contains a
1-edge (since $x\Rld y$) and hence $y\Rld z$ is impossible. We
continue to show that $u=\lca{xy}=\lca{xz}$. Assume that $u=\lca{xy}\mathfrak{n}eq
\lca{xz}$. Hence, either the triple $\rt{xz|y}$ or $\rt{xy|z}$ is
displayed by $T(Q)$. In either case the path $\mathbb{P}(u,y)$ contains a 1-edge,
since $x\Rld y$. This, however, implies $y\mathfrak{n}ot\Rld z$, a contradiction.
Thus, $u=\lca{xy} = \lca{xz}$. Since there are two distinct last common
ancestors, we have $u\mathfrak{n}eq v=\lca{yz}$. Therefore, the triple $\rt{yz|x}$
must be displayed by $T(Q)$. From $y\Rld z$ we know that $\mathbb{P}(v,y)$ only
contains 0-edges and $\mathbb{P}(v,z)$ contains exactly one 1-edge; $x\Rld y$
implies that $\mathbb{P}(x,u)$ contains only 0-edges. Moreover, since $\mathbb{P}(x,y) =
\mathbb{P}(x,u)\cup \mathbb{P}(u,v)\cup \mathbb{P}(v,y)$ and $x\Rld y$, the path $\mathbb{P}(u,v)$ must
contain exactly one 1-edge.
\end{proof}
\begin{lemma}
In Case (ii), there is a unique tree on the three vertices $x,y,z$
with single root $\rho$ displayed by any least resolved tree $T(Q)$ that
explains $Q$. Moreover, the path $\mathbb{P}(\rho,y)$ contains only 0-edges,
while the other paths $\mathbb{P}(\rho,x)$ and $\mathbb{P}(\rho,z)$ must both contain
exactly one 1-edge.
\label{lem:case-ii}
\end{lemma}
\begin{proof}
Assume for contradiction that there is a least resolved tree $T(Q)$
that displays $\rt{xy|z}$, $\rt{yz|x}$, or $\rt{xz|y}$.
The choice of $\rt{xy|z}$ implies $u=\lca{xy}\mathfrak{n}eq \lca{xz}=\lca{yz}=v$.
Since $y\Rld x$ and $y\Rld z$, $\mathbb{P}(v,y) \subsetneq \mathbb{P}(u,y)$ contain only
0-edges, while $\mathbb{P}(u,x)$ and $\mathbb{P}(v,z)$ each must contain exactly one
1-edge, respectively. This leads to a tree $T'$ that yields the correct
$\Rld$-relation. However, this tree is not least resolved. By
contracting the path $\mathbb{P}(u,v)$ to a single vertex $\rho$ and maintaining
the labels on $\mathbb{P}(\rho,x)$, $\mathbb{P}(\rho,y)$, and $\mathbb{P}(\rho,z)$ we obtain the
desired labeled least resolved tree with single root.
For the triple $\rt{yz|x}$ the existence of the unique, but not least
resolved tree can be shown by the same argument with exchanged roles of
$x$ and $y$.
For the triple $\rt{xz|y}$ we $u = \lca{xy} = \lca{yz} \mathfrak{n}eq \lca{xz} =
v$. From $y\Rld x$ and $y\Rld z$ we see that both paths $\mathbb{P}(u,x) =
\mathbb{P}(u,v)\cup \mathbb{P}(v,x)$ and $\mathbb{P}(u,z) = \mathbb{P}(u,v)\cup \mathbb{P}(v,z)$ contain exactly
one 1-edge, while all edges in $\mathbb{P}(u,y)$ are labeled $0$. There are two
cases: (1) The path $\mathbb{P}(u,v)$ contains this 1-edge, which implies that
both paths $\mathbb{P}(v,x)$ and $\mathbb{P}(v,z)$ contain only 0-edges. But then $x\Ro
z$, a contradiction to $\Ro$ being discrete. (2) The path $\mathbb{P}(u,v)$
contains only 0-edges, which implies that each of the paths $\mathbb{P}(v,x)$ and
$\mathbb{P}(v,z)$ contain exactly one 1-edge. Again, this leads to a tree that
yields the correct $\Rld$-relation, but it is not least resolved. By
contracting the path $\mathbb{P}(u,v)$ to a single vertex $\rho$ and maintaining
the labels on $\mathbb{P}(\rho,x)$, $\mathbb{P}(\rho,y)$, and $\mathbb{P}(\rho,z)$ we obtain the
desired labeled least resolved tree with single root.
\end{proof}
\begin{lemma}
Case (iii) cannot occur.
\label{lem:case-iii}
\end{lemma}
\begin{proof}
Let $x,y,z\in V(Q)$ such that $xy, zy \in E$ and thus, $x\Rld y$ and $z
\Rld y$. Hence, in the rooted tree that \rev{explains} this relationship
we have the following situation: All edges along $\mathfrak{m}athbb{P}(u,x)$ are
labeled $0$; exactly one edge along $\mathfrak{m}athbb{P}(u,y)$ is labeled $1$,
where $u=\lca{x,y}$; all edges along $\mathfrak{m}athbb{P}(v,z)$ are labeled $0$,
and exactly one edge along $\mathfrak{m}athbb{P}(v,y)$ is labeled $1$, where
$v=\lca{y,z}$. Clearly, $\lca{x,y,z}\in \{u,v\}$. If $u=v$, then all
edges in $\mathfrak{m}athbb{P}(u,x)$ and $\mathfrak{m}athbb{P}(u,z)$ are labeled $0$,
implying that $x\Ro y$, contradicting that $\Ro$ is discrete.
Now assume that $u=\lca{x,y}\mathfrak{n}eq v=\lca{y,z}$. Hence, one of the triples
$\rt{xy|z}$ or $\rt{yz|x}$ must be displayed by $T(Q)$. W.l.o.g., we can
assume that $\rt{yz|x}$ is displayed, since the case $\rt{xy|z}$ is shown
analogously by interchanging the role of $x$ and $z$. Thus, $\lca{x,y,z}
= \lca{x, y} = u\mathfrak{n}eq \lca{yz}=v$. Hence, $\mathfrak{m}athbb{P}(u,y) =
\mathfrak{m}athbb{P}(u,v) \cup \mathfrak{m}athbb{P}(v,y)$. Since $z\Rld y$, the path
$\mathfrak{m}athbb{P}(v,y)$ contains a single 1-edge and $\mathfrak{m}athbb{P}(v,z)$ contains
only 0-edges. Therefore, the paths $\mathfrak{m}athbb{P}(u,x)$ and
$\mathfrak{m}athbb{P}(u,v)$ contain only 0-edges, since $x\Rl y$. Since
$\mathfrak{m}athbb{P}(x,z) = \mathfrak{m}athbb{P}(x,u)\cup \mathfrak{m}athbb{P}(u,v)\cup
\mathfrak{m}athbb{P}(v,z)$ and all edges along $\mathfrak{m}athbb{P}(u,x)$, $\mathfrak{m}athbb{P}(u,v)$
and $\mathfrak{m}athbb{P}(v,z)$ are labeled $0$, we obtain $x\Ro z$, again a
contradiction.
\end{proof}
Taken together, we obtain the following immediate implication:
\begin{cor}
The graph $G(\Rld)/\Ro$ does not contain a pair of edges of the form $xv$
and $yv$.
\label{cor:edge-pointing}
\end{cor}
Recall that the connected components $\underline{Q}$ in $G(\Rld)/\Ro$ are
trees. By Cor.\ \ref{cor:edge-pointing}, $Q$ must be composed of
distinct paths that ``point away'' from each other. In other words,
let $P$ and $P'$ be distinct directed path in $Q$ that share a vertex
$v$, then it is never the case that there is an edge $xv$ in $P$ and an
edge $yv$ in $P'$, that is, both edges ``pointing'' to the same vertex
$v$. We first consider directed paths in isolation.
\begin{lemma}
Let $Q$ be a connected component in $G(\Rld)/\Ro$ that is a
directed path with $n\ge 3$ vertices labeled $x_1,\dots,x_n$ such that
$x_ix_{i+1}\in E(Q)$, $1\leq i\leq n-1$.
Then the tree $T(Q)$ explaining $Q$ must display all triples in $\mathfrak{m}athcal
R_Q = \{\rt{x_ix_j|x_l} \mathfrak{m}id i,j>l\geq 1\}$. Hence, $T(Q)$ must display
$\binom{n}{3}$ triples and is therefore the unique (least resolved)
binary rooted tree $(\dots(x_n,x_{n-1})x_{n-2})\dots)x_2)x_1$ that
explains $Q$. Moreover, all interior edges in $T(Q)$ and the edge incident
to $x_n$ are labeled $1$ while all other edges are labeled $0$.
\label{lem:path-tree}
\end{lemma}
\begin{proof}
Let $Q$ be a directed path as specified in the lemma. We prove the
statement by induction. For $n=3$ the statement follows from Lemma
\ref{lem:case-i}. Assume the statement is true for $n=k$. Let $Q$ be a
directed path with vertices $x_1,\dots,x_k, x_{k+1}$ and edges
$x_ix_{i+1}$, $1\leq i\leq k$ and let $T(Q)$ be a tree that explains
$Q$. For the subpath $Q'$ on the vertices $x_2,\dots,x_{k+1}$ we can
apply the induction hypothesis and conclude that $T(Q')$ must display the
triples $\rt{x_ix_j|x_l}$ with $i,j>l\geq 2$ and that all interior edges in
$T(Q')$ and the edge incident to $x_{k+1}$ are labeled $1$ while all
other edges are labeled $0$. Since $T(Q)$ must explain in particular the
subpath $Q'$ and since $T(Q')$ is fully resolved, we can conclude that
$T(Q')$ is displayed by $T(Q)$ and that all edges in $T(Q)$ that are also
in $T(Q')$ retain the same label as in $T(Q')$ .
Thus $T(Q)$ displays in particular the triples $\rt{x_ix_j|x_l}$ with
$i,j>l\geq 2$. By Lemma \ref{lem:case-i}, and because there are edges
$x_1x_2$ and $x_2x_3$, we see that $T(Q)$ must also display
$\rt{x_2x_3|x_1}$. Take any triple $\rt{x_3x_j|x_2}$, $j>3$.
Application of the triple-inference rules shows that any tree that
displays $\rt{x_2x_3|x_1}$ and $\rt{x_3x_j|x_2}$ must also display
$\rt{x_2x_j|x_1}$ and $\rt{x_3x_j|x_1}$. Hence, $T(Q)$ must display these
triples. Now we apply the same argument to the triples $\rt{x_2x_j|x_1}$
and $\rt{x_ix_j|x_2}$, $i,j>2$ and conclude that in particular, the
triple $\rt{x_ix_j|x_1}$ must be displayed by $T(Q)$ and thus, the the
entire set of triples $\{\rt{x_ix_j|x_l} \colon i,j>l\geq 1\}$. Hence,
there are $\binom{n}{3}$ triples and thus, the set of triples that needs
to be displayed by $T(Q)$ is strictly dense. Making use of a technical
result from \cite[Suppl. Material]{Hellmuth:15a}, we obtain that $T(Q)$
is the unique binary tree $(\dots(x_n,x_{n-1})x_{n-2})\dots)x_2)x_1$. Now
it is an easy exercise to verify that the remaining edge containing $x_1$
must be labeled $0$, while the interior edge not contained in $T(Q')$ must
all be 1-edges.
\end{proof}
\begin{figure}
\caption{Connected components $Q$ in $G(\Rld)/\Ro$ are trees composed of
paths pointing away from each other. On the top, a directed path
$\mathfrak{m}
\label{fig:Qtree}
\end{figure}
If $Q$ is connected but not a simple path, it is a tree composed of the
paths pointing away from each other as shown in Fig. \ref{fig:Qtree}. It
remains to show how to connect the distinct trees that explain these paths
to obtain a tree $T(Q)$ for $Q$. To this end, we show first that there is
a unique vertex $v$ in $Q$ such that no edge ends in $v$.
\begin{lemma}
Let $Q$ be a connected component in $G(\Rld)/\Ro$. Then there is a
unique vertex $v$ in $Q$ such that there is no edge $xv\in E(Q)$.
\label{lem:unique-v}
\end{lemma}
\begin{proof}
Corollary \ref{cor:edge-pointing} implies that for each vertex $v$ in $Q$
there is at most one edge $xv\in E(Q)$. If for all vertices $w$ in $Q$
we would have an edge $xw\in E(Q)$, then $Q$ contains cycles,
contradicting the tree structure of $Q$. Hence, there is at least one
vertex $v$ so that there is no edge of the form $xv\in E(Q)$. \rev{
Assume there are two vertices $v,v'$ so that there are no edges of the
form $xv, yv'$, then all edges incident to $v,v'$ are of the form $vx,
v'y$. However, in this case, the unique path connecting $v,v'$ in $Q$
must contain two edges of the form $aw$, $bw$; a contradiction to
Corollary \ref{cor:edge-pointing}.} Thus, there is exactly one vertex
$v$ in $Q$ such that there is no edge $xv\in E(Q)$.
\end{proof}
By Lemma \ref{lem:unique-v}, for each connected component $Q$ of
$G(\Rld)/\Ro$ there is a unique vertex $v_Q$ s.t.\ all edges incident to
$v_Q$ are of the form $v_Qx$. That is, all directed paths that are maximal
w.r.t.\ inclusion start in $v_Q$. Let $\mathfrak{m}athcal P_Q$ denote the sets of
all such maximal paths. Thus, for each path $P \in \mathfrak{m}athcal P_Q$
there is the triple set $\mathfrak{m}athcal{R}_{P}$ according to Lemma
\ref{lem:path-tree} that must be displayed by any tree that explains also
$Q$. Therefore, $T(Q)$ must display all triples in $\mathfrak{m}athcal{R}_Q =
\cup_{P\in \mathfrak{m}athcal{P}_Q} \mathfrak{m}athcal{R}_{P}$.
The underlying undirected graph $\underline{G(\Rld)/\Ro}$ is isomorphic to
$G(\Rl)/\Ro$. Thus, with Algorithm \ref{alg:Q}, one can similar to the
unrooted case, first construct the tree $T(\underline{Q})$ and then set the
root $\rho_Q=v'_Q$ to obtain $T(Q)$. It is easy to verify that this tree
$T(Q)$ displays all triples in $\mathfrak{m}athcal{R}_Q$. Moreover, any
edge-contradiction in $T(Q)$ leads to the loss of an input triple
$\mathfrak{m}athcal{R}_Q$ and in particular, to a wrong pair of vertices w.r.t.\
$\Rld$ or $\Ro$. Thus, $T(Q)$ is a least resolved tree for
$\mathfrak{m}athcal{R}_Q$ and therefore, a least resolved tree that explains $Q$.
We summarize these arguments in
\begin{cor}
Let $Q$ be a connected component in $G(\Rld)/\Ro$. \rev{Then a tree
$T(Q)$ that explains $Q$ can be obtained from the unique minimally
resolved tree $T(\underline{Q})$ that explains $\underline{Q}$} by
choosing the unique vertex $v$ where all edges incident to $v$ are of the
form $vx$ as the root $\rho_Q$.
\label{cor:Q-root}
\end{cor}
If $G(\Rld)/\Ro$ is disconnected, one can apply Algorithm \ref{alg:all}, to
obtain the tree $T(G(\Rl)/\Ro)$ and then chose either one of the vertices
$\rho_Q$ or the vertex $z_T$ as root to obtain $T(G(\Rld)/\Ro)$, in which
case all triples of $\mathfrak{m}athcal{R}_{G(\Rld)/\Ro} = \cup_Q \mathfrak{m}athcal R_Q$ are
displayed. Again, it is easy to verify that any edge-contradiction leads to
a wrong pair of vertices in $\Rld$ or $\Ro$. Thus, $T(G(\Rld)/\Ro)$ is a
least resolved tree for $\mathfrak{m}athcal{R}_{G(\Rld)/\Ro}$.
To obtain uniqueness of minimally resolved trees
one can apply similar arguments as in the proofs of
Theorems \ref{thm:connComp} and \ref{thm:star-tree}. This yields the
following characterization:
\begin{thm}
Let $Q_1,\dots Q_k$ be the connected components in $G(\Rld)/\Ro$. Up to
the choice of the vertices $q'_i$ in Line \ref{item:z} of Alg.\
\ref{alg:all} for the construction of $T(\underline{Q_i})$ and the choice
of the root $\rho\in \{\rho_{Q_1},\ldots \rho_{Q_k}, z\}$, the tree $T^*
= T(G(\Rld)/\Ro))$ is the unique \rev{minimally} resolved tree that
explains $G(\Rld)/\Ro$.
\label{thm:star-tree-dir}
\end{thm}
\section{Mix of symmetric and anti-symmetric relations}
\label{sect:mixed}
In real data, e.g., in the application to mitochondrial genome
arrangements, one can expect that the known relationships are in part
directed and in part undirected. Such data are naturally encoded by a
relation $\Rld$ with directional information and a relation $\Rlstar$
comprising the set of pairs for which it is unknown whether \rev{ one of
$x\Rld y$ and $y\Rld x$ or $x\Rl y$ are true. Here, $\Rl$ is a subset of
$\Rlstar$.} The disjoint union $\Rlstar\uplus\Rld$ of these two parts
can be seen as refinement of a corresponding symmetrized relation $x\Rl y$.
Ignoring the directional information one can still construct the tree
$T(G(\Rl)/\Ro)$. In general there will be less information of the
placement of the root in $T(G(\Rlstar\cup\Rld)/\Ro)$ than with a fully
directed edge set.
\rev{In what follows, we will consider all edges of
$G(\Rlstar\cup\Rld)/\Ro$ to be directed, that is, for a symmetric pair
$(a,b)\in\Rlstar$ we assume that both directed edges $ab$ and $ba$ are
contained in $G(\Rlstar\cup\Rld)/\Ro$. Still, for any connected
component $Q$ the underlying undirected graph $\underline{Q}$ is a tree.}
Given a component $Q$ we say that a directed edge $xy \in E(Q)$
\emph{points away from the vertex $v$} if the unique path in
$\underline{Q}$ from $v$ to $x$ does not contain $y$. In this case the path
from $v$ to $y$ must contain $x$. Note that in this way we defined
``pointing away from $v$'' not only for the edges incident to $v$, but for
all directed edges. A vertex $v$ is a \emph{central vertex}
if, for any two distinct vertices $x,y\in V$ that form an edge in $T$,
either $xy$ or $yx$ in $T$ points away from $v$.
As an example consider the tree $a\leftarrow b\rightarrow c \leftrightarrow
d\rightarrow e$. \rev{There is only the edge $bc$ containing $b$ and $c$.
However, $bc$ does not point away from vertex $d$, since the unique path
from $d$ to $b$ contains $c$.}
Thus $d$ is not central. On the other hand, $b$ is a
central vertex. The only possibility in this example to obtain a valid
relation $\Rld$ that can be displayed by rooted 0/1-edge-labeled tree is
provided by removing the edge $dc$, since otherwise Cor.\
\ref{cor:edge-pointing} would be violated.
In the following, for given relations $\Rlstar$ and $\Rld$ we will denote
with $\Rldstar$ a relation that contains $\Rld$ and exactly one pair,
either $(x,y)$ or $(y,x)$, from $\Rlstar$.
\begin{lemma}
For a given graph $G(\Rlstar\cup\Rld)/\Ro$ the following statements
are equivalent:
\begin{itemize}
\item[(i)] There is a relation $\Rldstar$ that is the antisymmetric
single-1-relation of some 0/1-edge-labeled tree.
\item[(ii)] There is a central vertex in each connected component $Q$ of
$G(\Rlstar\cup\Rld)/\Ro$.
\end{itemize}
\end{lemma}
\begin{proof}
If there is a relation $\Rldstar$ that can be displayed by a rooted
0/1-edge-labeled tree, then $G(\Rldstar)/\Ro$ consists of connected
components $Q$ where each connected component is a tree composed of
maximal directed paths that point away from each other. Hence, for each
connected component $Q$ there is the unique vertex $v_Q$ such that all
edges incident to $v_Q$ are of the form $v_Qx$ and, in particular, $v_Q$
is a central vertex $v_Q$ in $Q$ and thus, in
$G(\Rlstar\cup\Rld)/\Ro$.
Conversely, assume that each connected component $Q$ has a central vertex
$v_Q$. Hence, one can remove all edges that do not point away
from $v_Q$ and hence obtain a connected component $Q'$ that is
still a tree with $V(Q)=V(Q')$ so that all maximal directed paths point
away from each other and in particular, start in $v_Q$. Thus,
for the central vertex $v_Q$ all edges incident to
$v_Q$ are of the form $v_Qx$. Since $Q'$ is now a
connected component in $G(\Rldstar)/\Ro$, we can apply Cor.\
\ref{cor:Q-root} to obtain the tree $T(Q')$ and Thm.\
\ref{thm:star-tree-dir} to obtain $T(G(\Rldstar)/\Ro)$.
\end{proof}
The key consequence of this result is the following characterization of the
constraints on the possible placements of the root.
\begin{cor}
Let $Q$ be a connected component in $G(\Rlstar\cup\Rld)/\Ro$ and let
$T(\underline{Q})$ be the unique least resolved tree that explains the
underlying undirected graph $\underline{Q}$. Then each copy $v'$ of a
vertex $v$ in $Q$ can be chosen to be the root in $T(\underline{Q})$ to
obtain $T(Q)$ if and only if $v$ is a central vertex in $Q$.
\end{cor}
\section{Concluding Remarks}
In this contribution we have introduced a class of binary relations
deriving in a natural way from edge-labeled trees. This construction has
been inspired by the conceptually similar class of relations induced by
vertex-labeled trees \cite{HW:17}. The latter have co-graph structure and
are closely related to orthology and paralogy
\cite{Hellmuth:13a,Lafond:14,Hellmuth:15a,HW:15}. Defining $x\sim y$
whenever at least one 1-edge lies along the path from $x$ to $y$ is related
to the notion of xenology: the edges labeled 1 correspond to horizontal
gene transfer events, while the 0-edge encode vertical transmission. In its
simplest setting, this idea can also be combined with vertex labels,
leading to the directed analog of co-graphs \cite{HSW:16}. Here, we have
explored an even simpler special case: the existence of a single 1-label
along the connecting path, which captures the structure of rare event data
as we have discussed in the introduction. We have succeeded here in giving
a complete characterization of the relationships between admissible
relations, which turned out to be forests, and the underlying phylogenetic
tree. Moreover, for all such cases we gave polynomial-time algorithms to
compute the trees explaining the respective relation.
\rev{The characterization of single-1 relations is of immediate relevance
for the use of rare events in molecular phylogenetics. In particular, it
determines lower bounds on the required data: if too few events are
known, many taxa remain in $\Ro$ relation and thus unresolved. On the
other hand, if taxa are spread too unevenly, $G(\Rld)/\Ro$ will be
disconnected, consisting of connected components separated by multiple
events. The approach discussed here, of course, is of practical use only
if the available event data are very sparse. If taxa are typically
separated by multiple events, classical phylogenetic approaches, i.e.,
maximum parsimony, maximum likelihood, or Bayesian methods, will
certainly be preferable. An advantage of using the single-1 relation is
that it does not require the product structure of independent characters
implicit in the usual, sequence or character-based methods, nor does it
require any knowledge of the algebraic properties of the underlying
operations as, e.g., in phylogenetic reconstruction from (mitochondrial)
gene order rearrangements. This begs the question under which conditions
on the input data identical results are obtained from the direct
translation of the single-1 relation and maximum parsimony on character
data or break point methods for genome rearrangments.
}
\rev{A potentially useful practical application of our results is a new
strategy to incorporate rare event data into conventional phylogenetic
approaches. Trees obtained from a single-1 relation will in general be
poorly resolved. Nevertheless, they determine some monophyletic groups
(in the directed case) or a set of split (in the undirected case) that
have to be present in the true phylogeny. Many of the software tools
commonly used in molecular phylogenetic can incorporate this type of
constraints. Assuming the there is good evidence that the rare events are
homoplasy-free, the $T(G(\Rld)/\Ro)$ represents the complete information
from the rare events that can be used constrain the tree reconstruction
process.}
The analysis presented here makes extensive use of the particular
properties of the single-1 relation and hence does not seem to generalize
easily to other interesting cases. Horizontal gene transfer, for example,
is expressed naturally in terms of the ``at-least-one-1'' relation
$\mathfrak{m}athrel{\rightsquigarrow}$. It is worth noting that $\mathfrak{m}athrel{\rightsquigarrow}$ also has properties (L1) and (L2) and
hence behaves well w.r.t.\ contraction of the underlying tree and
restriction to subsets of leaves. Whether this is sufficient to obtain a
complete characterization remains an open question.
Several general questions arise naturally. For instance, is there a
characterization of admissible relations in terms of forbidden subgraphs
graphs or minors? For instance, the relation $\Rld/\Ro$ is characterized in
terms of the forbidden subgraph $x\rightarrow v \leftarrow y$. Hence, it
would be of interest, whether such characterizations can be derived for
arbitrary relations $\Rldk$ or for $\mathfrak{m}athrel{\rightsquigarrow}$. If so, can these forbidden
substructures be inferred in a rational manner from properties of vertex
and/or edge labels along the connecting paths in the explaining tree?
Is this the case at least for labels and predicates satisfying (L1)
and (L2)?
\chapter{Reconstructing unrooted phylogenetic trees from symbolic ternary metrics}
In 1998, B\"{o}cker and Dress presented a 1-to-1 correspondence
between symbolically
dated rooted trees and symbolic ultrametrics.
We consider the corresponding problem for unrooted trees.
More precisely, given a tree $T$ with leaf set $X$ and a proper
vertex coloring of its interior vertices, we can map every triple
of three different leaves to the color of its median vertex. We
characterize all ternary maps that can be obtained in this way in terms
of 4- and 5-point conditions, and we show that the corresponding tree
and its coloring can be reconstructed from a ternary map that
satisfies those conditions. Further, we give an additional
condition that characterizes whether the tree is binary,
and we describe an algorithm that reconstructs general trees in
a bottom-up fashion.
\section{Introduction}
A phylogenetic tree is a rooted or unrooted tree where the leaves are labeled
by
some objects of interest, usually taxonomic units (taxa) like species. The
edges
have a positive edge length, thus the tree defines a metric on the taxa set. It
is a classical result in phylogenetics that the tree can be reconstructed from
this metric, if it is unrooted or ultrametric. The latter means that the tree
is
rooted and all taxa are equally far away from the root. An ultrametric tree is
realistic whenever the edge lengths are proportional to time and the taxa are species
that can be observed in the present. In an ultrametric tree, the distance
between two taxa is twice of the distance between each of the taxa and their last
common ancestor (lca), hence pairs of taxa with the same lca must have the same
distance. For three taxa $x,y,z$, it follows that there is no unique maximum
within their three pairwise distances, thus we have $d(x,y) \le
\mathfrak{m}box{max}\{d(x,z),d(y,z)\}$. This 3-point condition turns out to be sufficient
for a metric to be ultrametric, too, and it is the key for reconstructing
ultrametric trees from their distances. In 1995, Bandelt and
Steel~\cite{Bandelt1995} observed that
the complete ordering of the real numbers is not necessary to reconstruct
trees,
and they showed that the real-valued distances can be replaced by maps from the
pairs of taxa into a cancellative abelian monoid. Later, B\" ocker and
Dress~\cite{Bocker1998}
pushed this idea to the limit by proving that the image set of the symmetric
map does not need any structure at all (see Section~\ref{sect:result} for details).
While this result is useful for understanding how little information it
takes to reconstruct an ultrametric phylogenetic tree, it was not until recently that
it turned out to have some practical applications. In 2013, Hellmuth et al.
\cite{Hellmuth2013} found an alternative characterization of symbolic ultrametrics
in terms of cographs and showed that, for perfect data, phylogenetic trees can be
reconstructed from orthology information. By adding some optimization tools,
this concept was then applied to analyze real data \cite{Hellmuth2015}.
Motivated by the practical applicability of symbolic ultrametrics, we are
considering their unrooted version. However, in an unrooted tree there is in
general no interior vertex associated to a pair of taxa that would correspond
to
the last common ancestor in a rooted tree. Instead, there is a median
associated
to every set of three taxa that represents, for every possible rooting of the tree,
a last common ancestor of at least two of the three taxa.
Therefore, we consider ternary maps from the triples of taxa into an image set
without any structure. We will show that an unrooted phylogenetic tree with a
proper vertex coloring can be reconstructed from the function that maps every
triple of taxa to the color of its median.
In order to apply our results to real data, we need some way to assign a state to
every set of three taxa, with the property that 3-sets with the same median will
usually have the same state. For symbolic ultrametrics, the first real application
was found 15 years after the development of the theory. In addition to the hope that
something similar happens with symbolic ternary metrics, we have some indication that
they can be useful to construct unrooted trees from orthology relations (see Section~\ref{sect:discuss}
for details).
Consider an unrooted tree $T$ with vertex set $V$, edge set $E$, and leaf
set
$X$, and a dating map $t:V\to M^\odot$, where $M^\odot=M\cup \{\odot\}$ such that
$t(x) = \odot$ for all $x\in X$, and $t(v_1)\mathfrak{n}eq t(v_2)$ if $v_1v_2\in E$.
For any $S=\{x,y\}\in {V \choose 2}$ there is a unique path $[x,y]$ with end
points $x$ and $y$, and for any 3-set
$S=\{x,y,z\}\in {V\choose 3}$ there is a unique \textit{triple point} or
\textit{median} $\text{med}(x,y,z)$ such that
$[x,y]\cap [y,z]\cap [x,z]=\{\text{med}(x,y,z)\}$. Putting $[x,x]=\{x\}$,
the
definition also works, if some or all of $x,y$
and $z$ equal.
Given a phylogenetic tree $T$ on $X$ and a dating map $t: V\to M^\odot $, we can
define the symmetric symbolic ternary map
$d_{(T;t)}: X\times X\times X\to M^\odot$
by
$d_{(T;t)}(x,y,z)=t(\text{med}(x,y,z))$.
In this set-up, our question can be phrased as follows: Suppose we are given an arbitrary
symbolic ternary map $\delta: X\times X\times X\to M^\odot$,
can we determine if there is a pair $(T;t)$ for which
$d_{(T:t)}(x,y,z)=\delta(x,y,z)$ holds for all $x,y,z\in X$?
The rest of this paper is organized as follows.
In Section~\ref{sect:prelim}, we present the basic and relevant concepts used
in this paper.
In Section~\ref{sect:result} we recall the one-to-one correspondence between
symbolic ultrametrics and
symbolically dated trees, and introduce our main results
Theorem~\ref{thm:ultra-ternary} and Theorem~\ref{thm:binary}.
In Section~\ref{sect:proof} we give the proof of Theorem~\ref{thm:ultra-ternary}.
In order to prove our main result, we first introduce the connection between
phylogenetic trees and quartet systems on $X$
in Subsection~\ref{subsect:quartet}. Then we use a graph representation
to analyze all cases of the map $\delta$ for 5-taxa subsets of $X$ in
Subsection~\ref{subsect:graphical}.
In Section~\ref{sect:binary} we use a similar method to prove
Theorem~\ref{thm:binary}, which gives
a sufficient and necessary condition to reconstruct a binary phylogenetic tree on
$X$.
In Section~\ref{sect:pseudo-cherries}, we give a criterion to identify
all pseudo-cherries of the underlying tree from a symbolic ternary metric.
This result makes it possible to reconstruct the tree in
a bottom-up fashion.
In the last section we discuss some open questions and future work.
\subsection{Preliminaries}
\label{sect:prelim}
We introduce the relevant basic concepts and
notation. Unless
stated otherwise, we will follow the monographs ~\cite{Semple2003} and~\cite{Dress2012}.
In the remainder of this paper, $X$ denotes a finite set of size at least three.
An \textit{(unrooted) tree} $T=(V,E)$ is an undirected connected acyclic graph with
vertex
set $V$ and edge set $E$. A vertex of $T$ is a \textit{leaf}
if it is of degree 1, and all vertices with
degree at least two are \textit{interior} vertices.
A \textit{rooted tree} $T=(V,E)$ is a tree that contains a distinguished vertex
$\rho_T \in V$ called the \textit{root}. We define a partial order $\mathfrak{p}receq_T$
on $V$ by setting $v\mathfrak{p}receq_T w$ for any two vertices $v,w\in V$ for which $v$
is a vertex on the path from $\rho_T$ to $w$. In particular, if $v\mathfrak{p}receq_T w$
and $v\mathfrak{n}eq w$
we call $v$ an \textit{ancestor} of $w$.
An unrooted \textit{phylogenetic tree} $T$ on $X$ is an unrooted tree with
leaf
set $X$ that does not contain any vertex of degree 2. It is \textit{binary}, if
every interior vertex has degree 3.
A rooted phylogenetic tree $T$ on $X$ is a rooted tree with leaf set $X$ that
does not contain any vertices with in- and out-degree one,
and whose root $\rho_T$ has in-degree zero.
For a set $A\subseteq X$ with cardinality at least 2, we define \textit{the last common
ancestor} of $A$, denoted by $\text{lca}_T(A)$, to be the unique vertex in $T$
that is the greatest lower bound of $A$ under the partial order $\mathfrak{p}receq_T$.
In case $A=\{x,y\}$ we put $\text{lca}_T(x,y) = \text{lca}_T(\{x,y\})$.
Given a set $Q$ of four taxa $\{a,b,c,d\}$,
there exist always exactly three partitions into two pairs:
$\{\{a,b\},\{c,d\}\}$,$\{\{a,c\},\{b,d\}\}$ and $\{\{a,d\},\{b,c\}\}$.
These partitions are called {\em quartets}, and they
represent the three non-isomorphic unrooted binary trees with leaf set $Q$. These
trees are usually called quartet trees, and they -- as well as the corresponding
quartets
--are symbolized by
$ab|cd, ac|bd, ad|bc$ respectively. We use $Q(X)$ to denote the set of all quartets
with four taxa in $X$. A phylogenetic tree $T$ on $X$ {\em displays} a quartet
$ab|cd \in Q(X)$, if the path from $a$ to $b$ in $T$ is vertex-disjoint with the path from
$c$ to $d$. The collection of all quartets that are displayed by $T$ is denoted by $Q_T$.
Let $M$ be a non-empty finite set, $\odot$ denotes a special element not
contained in $M$,
and $M^{\odot}:= M\cup \{\odot\}$. Note that in biology the symbol $\odot$
corresponds to
a "non-event" and is introduced for purely technical
reasons~\cite{Hellmuth2013}. A \textit{symbolic ternary map} is a mapping from $X\times X\times X$ to $M^{\odot}$. Suppose we have a symbolic ternary map $\delta: X\times X\times X
\to M^{\odot}$,
we say $\delta$ is \textit{symmetric}
if the value of
$\delta(x,y,z)$ is only related to the set $\{x,y,z\}$ but not on the
ordering of $x,y,z$, i.e., if $\delta(x,y,z)=\delta(y,x,z)=\delta(z,y,x)=\delta(x,z,y)$ for all $x,y,z\in
X$.
For simplicity, if a map $\delta: X\times X\times X \to M^{\odot}$ is
symmetric,
then
we can define $\delta$ on the set $\{x,y,z\}$ to be $\delta(x,y,z)$.
Let $S$ be a set, we define $|S|$ to be the number of elements in $S$.
\section{Symbolic ultrametrics and our main results}\label{sect:result}
In this section, we first recall the main result
concerning symbolic ultrametrics by B\"{o}cker and Dress~\cite{Bocker1998}.
Suppose $\delta: X\times X\to M^{\odot}$ is a map. We call
$\delta$ a \textit{symbolic ultrametric} if it satisfies the following
conditions:
(U1) $\delta(x,y)=\odot$ if and only if $x=y$;
(U2) $\delta(x,y)=\delta(y,x)$ for all $x,y\in X$, i.e., $\delta$ is symmetric;
(U3) $|\{\delta(x,y),\delta(x,z),\delta(y,z)\}|\leq 2$ for all $x,y,z\in X$; and
(U4) there exists no subset $\{x,y,u,v\} \in {X \choose 4}$ such that
$\delta(x,y) = \delta(y,u) = \delta(u,v) \mathfrak{n}eq \delta(y,v) = \delta(x,v) =
\delta(x,u)$.
Now suppose that $T=(V,E)$ is a rooted phylogenetic tree on $X$ and that
$t:V\to
M^{\odot}$ is a map such that
$t(x)=\odot$ for all $x\in X$.
We call such
a map $t$ a \textit{symbolic dating
map} for $T$; it is
\textit{discriminating} if $t(u)\mathfrak{n}eq t(v)$, for all edges $\{u,v\}\in E$.
Given $(T,t)$, we associate the map $d_{(T;t)}$ on $X\times X$ by setting,
for all $x,y\in X$, $d_{(T;t)}(x,y)=t(\text{lca}_T(x,y))$. Clearly
$\delta= d_{(T;t)}$ satisfies
Conditions (U1),(U2),(U3),(U4) and we say that $(T;t)$ is a {\em symbolic representation}
of $\delta$. B\"{o}cker and Dress established in 1998 the following
fundamental result which gives a
1-to-1 correspondence between symbolic ultrametrics and symbolic
representations~\cite{Bocker1998},
i.e., the map defined by $(T,t) \mathfrak{m}apsto d_{(T,t)}$ is a bijection from the set of
symbolically dated trees into the set of symbolic ternary metrics.
\begin{theorem}[B\"{o}cker and Dress 1998~\cite{Bocker1998}]
Suppose $\delta: X\times X\to M^{\odot}$ is a map. Then there is a
discriminating symbolic representation of
$\delta$ if and only if $\delta$ is a symbolic ultrametric. Furthermore, up to
isomorphism, this representation is unique.
\end{theorem}
Similarly, we consider unrooted trees. Suppose that $T=(V,E)$ is an unrooted
tree
on $X$ and that $t:V\to M^{\odot}$ is a symbolic dating map, i.e.,
$t(x)=\odot$ for all $x\in X$, it is discriminating if $t(x)\mathfrak{n}eq t(y)$ for all $(x,y)\in E$.
Given the pair $(T;t)$, we associate the map
$\delta_{(T;t)}$ on $X\times X\times X$ by setting,
for all $x,y,z\in X$, $\delta_{(T;t)}(x,y,z)=t(\text{med}(x,y,z))$.
Before stating our main results, we need the following definition:
\begin{defi}[$n$-$m$ partitioned]
Suppose $\delta: X\times X\times X\to M^{\odot}$ is a symmetric map. We say
that a subset $S$ of $X$ is \textit{$n$-$m$ partitioned (by $\delta$)}, if
among all the 3-element subsets of $S$, there are in
total 2 different values of $\delta$,
and $n$ of those 3-sets are mapped to one value while all other $m$ 3-sets
are mapped to the other value.
\end{defi}
Note that $S$ can be $n$-$m$ partitioned, only when $\binom{|S|} { 3} = m+n$.
\begin{defi}[symbolic ternary metrics]
We say $\delta:X\times X\times X\to M^{\odot}$ is a \textit{symbolic
ternary metric}, if the following conditions hold.
(1) $\delta$ is symmetric, i.e.,
$\delta(x,y,z)=\delta(y,x,z)=\delta(z,y,x)=\delta(x,z,y)$ for all $x,y,z\in X$.
(2) $\delta(x,y,z)=\odot$ if and only if $x=z$ or $y=z$ or $x=y$.
(3) for any distinct $x,y,z,u$ we have
$$|\{\delta(x,y,z),\delta(x,y,u),\delta(x,z,u),\delta(y,z,u)\}|\leq 2,$$
and when the equality holds then $\{x,y,z,u\}$ is 2-2 partitioned by $\delta$.
(4) there is no distinct 5-element subset $\{x,y,z,u,e\}$ of $X$ which
is 5-5 partitioned by $\delta$.
\end{defi}
We will refer to these conditions throughout the paper.
Our main result is:
\begin{theorem} \label{thm:ultra-ternary}
There is a 1-to-1 correspondence between the discriminating symbolically
dated phylogenetic trees and
the symbolic ternary metrics on $X$.
\end{theorem}
Let $\delta$ be a ternary symbolic ultrametric on $X$. Then we call
$\delta$ {\em fully resolved}, if the following condition holds:
(*) If $|\{\delta(x,y,z),\delta(x,y,u),\delta(x,z,u),\delta(y,z,u)\}|= 1$, then
there exists $e\in X$ such that $e$ can resolve $xyzu$.
i.e., the set $\{x,y,z,u,e\}$ is 4-6 partitioned by $\delta$.
Now we can characterize ternary symbolic ultrametrics that
correspond to binary phylogenetic trees:
\begin{theorem} \label{thm:binary}
There is a 1-to-1 correspondence between the discriminating symbolically
dated binary phylogenetic trees and the
fully resolved symbolic ternary metrics on $X$.
\end{theorem}
\section{Reconstructing a symbolically dated phylogenetic tree}
\label{sect:proof}
The aim of this section is to prove Theorem~\ref{thm:ultra-ternary}.
\subsection{Quartet systems}
\label{subsect:quartet}
We will use quartet systems to prove Theorem~\ref{thm:ultra-ternary}.
In 1981, Colonius and Schulze~\cite{Colonius1981} found that,
for a quartet system $Q$ on
a finite taxa set X, there is a phylogenetic tree $T$ on $X$ such that
$Q=Q_T$, if and only if certain conditions on subsets of $X$ with up to
five elements hold. The following theorem (Theorem 3.7 in~\cite{Dress2012})
states their result.
A quartet system $Q$ is \textit{thin}, if for every 4-subset ${a, b, c,
d}\subseteq X$,
at most one of the three quartets $ab|cd$, $ac|bd$ and $ad|bc$ is contained
in $Q$. It is \textit{transitive}, if for any 5 distinct elements $a, b , c , d , e \in X$, the
quartet
$ab |cd$ is in $Q$ whenever both of the quartets $ab |ce$ and $a b |d e$ are
contained in $Q$. It is \textit{saturated}, if for any
five distinct elements $a$ , $b$ , $c$ , $d$ , $e \in X$ with $a b
|c d \in Q$, at least one of
the two quartets $a e|c d$ and $a b |c e$ is also in $Q$.
\begin{theorem}\label{thm:quartet2}
A quartet system $Q \subseteq Q(X)$ is of the form $Q = Q(T)$ for some
phylogenetic tree $T$ on $X$ if and only if
$Q$ is thin, transitive and saturated.
\end{theorem}
We can encode a phylogenetic tree on $X$ in terms of a quartet system by taking
all the quartets displayed by the tree, as
two phylogenetic
trees on $X$ are isomorphic if and only if
the associated quartet systems coincide~\cite{Dress2012}.
Hence, a quartet system that satisfies Theorem~\ref{thm:quartet2} uniquely determines a phylogenetic
tree.
\subsection{Graph representations of a ternary map}
\label{subsect:graphical}
Suppose we have a symmetric map $\delta: X\times X\times X \to M^{\odot}$.
Then we can represent the restriction of $\delta$ to all 3-element subsets
of any 5-element subset $\{x,y,z,u,v\}$ of $X$ by an edge-colored complete
graph on the 5 vertices
$x,y,z,u,v$. For any distinct $a,b,c,d\in \{x,y,z,u,v\}$, edge $ab$ and edge
$cd$ have the same color if and only if the value of $\delta$ for
$\{x,y,z,u,v\}\setminus \{a,b\}$ is the same as for
$\{x,y,z,u,v\}\setminus \{c,d\}$.
It follows from Condition (3) in the definition of a symbolic ternary metric that,
for any vertex of the graph,
either 2 incident edges have one color and the other 2 edges have another
color,
or all 4 incident edges have the same color. By symmetry, there are exactly five
non-isomorphic graph representations.
\begin{lemma}\label{lem:graph}
Let the edges of a $K_5$ be colored such that for each vertex, the 4
incident edges
are either colored by the same color, or 2 of them colored by one color and
the other 2 by another color.
Then there are exactly 5 non-isomorphic colorings, and they are depicted in
Figure~\ref{fig:5-subset-full}.
\begin{figure}
\caption{The 5 non-isomorphic colorings of $K_5$ for which every color class induces an Eulerian graph. Note that in type 1 there
are 3 types of edges, solid edges, dotted edge
and dashed edges}
\label{fig:5-subset-full}
\end{figure}
\end{lemma}
\begin{proof}
It follows from the condition on the coloring that every color class induces
an {\em Eulerian} subgraph (a graph where all vertices have even
degree) of $K_5$. Therefore, ignoring isolated vertices, every such induced subgraph
either is a cycle or it contains a vertex of degree four. Since there are only
ten edges, the only way to have three color classes is two triangles and one
4-cycle. In that case each of the triangles must contain two non-adjacent
vertices of the 4-cycle and the vertex that is not in the 4-cycle, thus we get a
coloring isomorphic to Type 1 in Figure~\ref{fig:5-subset-full}. If there are
exactly two color classes, then one of them has to be a cycle and the other one
its complement. This yields Types 2, 3, and 4, if the length of the cycle is 5,
4, and 3, respectively. Finally, if there is only one color, we get Type 5.
\end{proof}
Note that the vertices are not labeled and it does not matter which color we
are using.
We will prove Theorem~\ref{thm:ultra-ternary} by obtaining a quartet system
from any symbolic ternary metric. More precisely, we say that the symbolic
ternary metric $\delta$ on $X$ {\em generates} the quartet $xy|zu$ if either
$\delta(x,z,u)=\delta(y,z,u) \mathfrak{n}eq \delta(x,y,z)=\delta(x,y,u)$, or
$|\{\delta(x,y,z),\delta(x,y,u),\delta(x,z,u),\delta(y,z,u)\}|=1$ and there is $e \in X$
such that
$$\delta(x,y,e)=\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(z,u,e)=\delta(y
,z,u)$$
$$\mathfrak{n}eq \delta(x,u,e)=\delta(x,z,e)=\delta(y,z,e)=\delta(y,u,e).$$
In the latter case, we say that $e$ {\em resolves} $x,y,z,u$. Note that the 3-sets
obtained by adding $e$ to the pairs of the generated quartet both have the same
$\delta$-value as the subsets of $\{x,y,z,u\}$.
The following lemma
will show that the set of all quartets generated by a symbolic ternary metric is
thin.
\begin{lemma} \label{lem:quartet2}
Let $\delta: X\times X\times X \to M^{\odot}$ be a symbolic ternary metric
and let $x,y,z,u\in X$ be four different taxa with
$|\{\delta(x,y,z),\delta(x,y,u),\delta(x,z,u),\delta(y,z,u)\}|= 1$. Let
$e,e' \in X-\{x,y,z,u\}$ such that $\{x,y,z,u,e\}$ and $\{x,y,z,u,e'\}$ are
both 4-6-partitioned, and let
$$\delta(x,y,e)=\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(z,u,e)=\delta(y
,z,u)$$
$$\mathfrak{n}eq \delta(x,u,e)=\delta(x,z,e)=\delta(y,z,e)=\delta(y,u,e).$$
Then we also have
$$\delta(x,y,e')=\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(z,u,e')=\delta(y
,z,u)$$
$$\mathfrak{n}eq \delta(x,u,e')=\delta(x,z,e')=\delta(y,z,e')=\delta(y,u,e').$$
\end{lemma}
\begin{proof}
We already know that
$|\{\delta(x,y,z),\delta(x,y,u),\delta(x,z,u),\delta(y,z,u)\}|= 1$ and
$\{x,y,z,u,e'\}$ is 4-6-partitioned.
So there are three possible cases for the values of $\delta$ on $\{x,y,z,u,e'\}$.
(1) $\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(y,z,u)$ and the rest 6 are equal.
Then consider $\delta$ on $\{x,y,z,e'\}$, if it is 1-3 partitioned instead of 2-2 partitioned,
then it contradicts to the definition of symbolic ternary metric, thus this case would not happen.
(2) $\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(y,z,u) =\delta(e',a,b)=\delta(e',a,c)$ where
$\{a,b,c\}\in {\{x,y,z,u\} \choose 3}$. There are totally 12 different cases. Since $x,y,z,u$ are symmetric, w.l.o.g., we assume
$\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(y,z,u)=\delta(e',x,y)=\delta(e',x,z)$ and the rest are equal.
Then consider $\delta$ on $\{e',x,y,z\}$, if it is 1-3 partitioned instead of 2-2 partitioned,
then it contradicts to the definition of symbolic ternary metric, thus this case would not happen.
(3) $\delta(x,y,z)=\delta(x,y,u)=\delta(x,z,u)=\delta(y,z,u) =\delta(e',a,b)=\delta(e',c,d)$ where
$\{a,b,c,d\}\in {\{x,y,z,u\} \choose 4}$. There are totally 3 different cases.
Suppose the statement of the lemma is wrong. Then because $x,y,z,u$ are symmetric w. l.
o. g. we can assume that
$$\delta(x,z,e')=\delta(x,y,z)=\delta(x,z,u)=\delta(y,u,e')=\delta(x,y,u)=\delta(y
,z,u)$$
$$\mathfrak{n}eq \delta(x,u,e')=\delta(x,y,e')=\delta(y,z,e')=\delta(z,u,e').$$
Case (3a): $\delta(x,u,e)\mathfrak{n}eq \delta(x,u,e')$. We assume that
$\delta(x,u,e)$ is dashed, $\delta(x,u,e')$ is dotted, and
$\delta(x,y,z)$ is solid.
Since $\delta $ is a symbolic
ternary metric, by Lemma~\ref{lem:graph}, the graph representation of
$\{y,z,u,e,e'\}$ has to be Type 1, so the color classes are
one 4-cycle and two 3-cycles. The values of $\delta$
for the sets that contain at most one of $e$ and $e'$ are shown in
Figure~\ref{fig:example_lem2}. There is a path of
length 3 that is colored with $\delta(x,y,z)$ (solid), and there are paths of length 2
colored with $\delta(x,u,e)$ (dashed) and $\delta(x,u,e')$ (dotted),
respectively. It follows that
we only can get Type 1 by coloring the edges connecting the end vertices
of each of those paths with the same color as the edges on the path. We get
$\delta(u,e,e')=\delta(x,y,z)$ (solid), $\delta(z,e,e')=\delta(x,u,e')$ (dotted), and
$\delta(y,e,e')=\delta(x,u,e)$ (dashed). Now doing the same analysis for $\{x,y,z,e,e'\}$
yields $\delta(z,e,e')=\delta(x,u,e)$, in contradiction to $\delta(z,e,e')=\delta(x,u,e')$.
\begin{figure}
\caption{The partial coloring of $K_5$ as described in Case (a).}
\label{fig:example_lem2}
\end{figure}
Case (3b): $\delta(x,u,e)= \delta(x,u,e')$. The graph representation of $\{y,z,u,e,e'\}$
can be obtained from Figure~\ref{fig:example_lem2} by identifying the colors dashed and dotted.
It contains a path of length 3
of edges colored with $\delta(x,y,z)$ and a path of length 4 of edges colored
with $\delta(x,u,e)$. Since a path is not an Eulerian graph, both colors must be
used for at least one of the remaining three edges, thus Type 1 is not possible.
Due to $\delta$ being a symbolic ternary map,
$\{y,z,u,e,e'\}$ is not 5-5-partitioned, and since there are only two colors
with at least 4 edges of one color and 5 edges of the other color, Lemma~\ref{lem:graph}
implies that the corresponding graph representation must be Type 2 and
therefore, $\{y,z,u,e,e'\}$ is 4-6-partitioned.
We get $\delta(u,e,e')=\delta(x,y,z)$,
$\delta(y,e,e')=\delta(z,e,e')=\delta(x,u,e)$. Now we consider the graph representation
of $\{x,z,u,e,e'\}$, and we observe that the edges colored with $\delta(x,y,z)$ contain a
path of length 4, and the edges colored with $\delta(x,u,e)$ contain a 5-cycle. Hence,
$\{x,z,u,e,e'\}$ is 5-5-partitioned, in contradiction to Condition (4).
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:ultra-ternary}]
By the definitions of the median, the ternary map $\delta_{(T;t)}$
associated with a discriminating
symbolically dated phylogenetic tree $(T,t)$ on $X$ satisfies Conditions
(1) and (2). For any distinct leaves $x,y,z,u$, the smallest subtree of $T$ connecting
those four leaves has at most two vertices of degree larger than two. If there are two such vertices,
then each of them has degree 3 and is the median of two 3-sets in $\{x,y,z,u\}$.
Therefore, $\delta_{(T;t)}$ satisfies Condition (3).
For any 5 distinct leaves, the smallest subtree of $T$ connecting them either has
three vertices of degree 3, or one vertex of degree 3 and one of degree 4, or one
vertex of degree 5, while all other vertices have degree 1 or 2. The first case is depicted in
Figure~\ref{fig:5-point}. There $v_1$ is the median for the 3-sets that contain $x_1$ and $x_2$,
$v_3$ is the median for the 3-sets that contain $z_1$ and $z_2$, and $v_2$ is the median for the
remaining four 3-sets. Hence, either $\{x_1,x_2,y,z_1,z_2\}$ is 4-6-partitioned
(if $t(v_1)=t(v_3)$), or
there are three different values of $\delta_{(T;t)}$ within those five taxa. For the other two cases,
the set of five taxa is either 3-7-partitioned or $\delta_{(T;t)}$ is constant on all its subsets with 3 taxa.
Hence, no subset of $X$ of cardinality
five is 5-5-partitioned by $\delta_{(T;t)}$, thus $\delta_{(T;t)}$ satisfies Condition (4).
On the other hand, let $\delta$ be a symbolic ternary metric on $X$.
By Lemma~\ref{lem:graph}, taking any 5-element subset of $X$,
the possible graph representations of the delta system satisfying (1), (2), and (3)
are
shown in Figure~\ref{fig:5-subset-full}. Except for Type 2, all other types
satisfy (4).
For the first type, the delta system is
$\delta(y,z,u)=\delta(x,y,z)=\delta(w,y,z)\mathfrak{n}eq
\delta(w,x,z)\\
=\delta(w,x,u)=\delta(w,x,y)\mathfrak{n}eq \delta(x,z,u)=\delta(w,z,u)
=\delta(x,y,u)=\delta(w,y,u)\mathfrak{n}eq \delta(y,z,u)$.
The corresponding quartet system is $\{xw|yu,xw|zu,xw|yz,uw|yz,xu|yz\}$.
For the third type, the delta system is
$\delta(y,z,u)=\delta(x,z,u)=\delta(w,z,u)=
\delta(x,y,w)\\
=\delta(y,u,w)=\delta(y,z,w)\mathfrak{n}eq \delta(x,y,z)=\delta(w,x,z)
=\delta(x,y,u)=\delta(w,x,u)$.
The corresponding quartet system is $\{wy|xu,wy|xz,xy|zu,wy|zu,wx|zu\}$.
For the fourth type, the delta system is
$\delta(w,x,z)=\delta(w,x,u)=\delta(w,x,y)\\
\mathfrak{n}eq
\delta(y,z,u)=\delta(x,y,z)=\delta(x,z,u)= \delta(w,z,u)=\delta(w,y,z)
=\delta(x,y,u)=\delta(w,y,u)$.
The corresponding quartet system is $\{xw|yu,xw|zu,xw|yz\}$.
For the fifth type, the delta system is
$\delta(y,z,u)=\delta(x,y,z)=\delta(w,y,z)=
\delta(w,x,z)\\
=\delta(w,x,u)=\delta(w,x,y)= \delta(x,z,u)=\delta(w,z,u)
=\delta(x,y,u)=\delta(w,y,u)$.
The corresponding quartet system is $\emptyset$.
All quartet systems are thin, transitive, and saturated. Indeed, the delta systems
of Types 1 and 3 generate all quartets displayed by a binary tree, Type 4 generates
all quartets displayed by a tree with exactly one interior edge, and Type 5 corresponds to the
star tree with 5 leaves. Now we take the union of all quartets generated
by $\delta$. The resulting quartet system is transitive and saturated, since we have verified these
properties for all subsets of five taxa, and it is thin in view of Lemma~\ref{lem:quartet2}.
Therefore, by Theorem~\ref{thm:quartet2},
every delta system satisfying Conditions (1), (2), (3) and (4)
uniquely determines a phylogenetic tree $T$ on $X$.
It only remains to show that two 3-element subsets of $X$ that have the same median in $T$ must be
mapped to the same value of $\delta$, since then we can define $t$ to be the dating map with
$t(v)=\delta(x,y,z)$ for every interior vertex $v$ of $T$ and for all 3-sets $\{x,y,z\}$ whose median is $v$.
It suffices to consider two sets which intersect in two taxa, as the general
case follows by exchanging one taxon up to three times. We assume that $v$ is the median of both,
$\{w,x,y\}$ and $\{w,x,z\}$. If $\delta(w,x,y) \mathfrak{n}eq \delta(w,x,z)$, then by the definition of
symbolic ternary metric, $\{x,y,w,z\}$ is 2-2-partitioned by $\delta$, thus $\delta$ generates one of the quartets
$wy|xz$ and $wz|xy$, thus $T$ must display that quartet. However, in both cases $\{w,x,y\}$ and $\{w,x,z\}$
do not have the same median in $T$. We also claim that the associated $t$ is discriminating. Suppose otherwise,
there is an edge $uv$ in $T$ such that $t(u)=t(v)$. Thus for any four leaves $x_1,x_2,y_1,y_2$ such that
$\text{med}(x_1,x_2,y_1) = \text{med}(x_1,x_2,y_2) = u$
and $\text{med}(y_1,y_2,x_1) = \text{med}(y_1,y_2,x_2) = v$, we have
$\delta(x_1,x_2,y_1)=\delta(x_1,x_2,y_2)=\delta(y_1,y_2,x_1)=\delta(y_1,y_2,x_2)$.
Suppose there is a leave $e$ which resolves $x_1x_2y_1y_2$, since $uv$ is an edge in $T$, then there exists $i$ and $j$
such that $\text{med}(x_i,y_j,e) \in \{u,v\}$, we have $\delta(x_i,y_j,e)=\delta(x_1,x_2,y_1)$,
which means $e$ cannot resolve $x_1x_2y_1y_2$, a contradiction.
Hence a thin, transitive and saturated quartet system uniquely determines a discriminating symbolic dated tree.
\end{proof}
\begin{figure}
\caption{The leaves and median vertices for a 5-taxa binary tree.}
\label{fig:5-point}
\end{figure}
The set of quartets generated by Type 2 does not satisfy the condition of being saturated
from Theorem~\ref{thm:quartet2}.
Without loss of generality, label the vertices by $x,y,z,u,w$ as in Figure~\ref{fig:5-subset-2}.
Then the quartet system is $\{yw|zu,xu|yz,xz|uw,xy|zw,xw|yu\}$. In order
to be saturated, the presence of $xu|yz$ would induce that we have $xw|yz$
or $xu|yw$, but we have $xy|zw$, $xw|yu$ instead.
\begin{figure}
\caption{The graph representation of a ternary map satisfying Conditions (1), (2), and (3), but not (4).}
\label{fig:5-subset-2}
\end{figure}
\section{Reconstructing a binary phylogenetic tree}
\label{sect:binary}
The aim of this section is to prove Theorem~\ref{thm:binary}.
A quartet system $Q$ on $X$ is \textit{complete}, if
$$|\{Q\cap \{ab|cd,ac|bd,ad|bc\}\}| = 1$$
holds for all $\{a,b,c,d\}\in {X \choose 4}$. Using the easy observation that
a phylogenetic tree is binary if and only if it displays a quartet for every 4-set,
the following result is a direct consequence of Theorem~\ref{thm:quartet2}.
\begin{corollary}~\label{cor:quartet}
A quartet system $Q \subseteq Q(X)$ is of the form $Q = Q(T)$ for some
binary phylogenetic tree $T$ on $X$ if and only if
$Q$ is complete, transitive, and saturated.
\end{corollary}
Condition (*) ensures that a ternary metric
$\delta$
generates a quartet for every set of four taxa, even if $\delta$ is constant on all
of its 3-taxa subsets. In view of Lemma~\ref{lem:quartet2} we also have that
$\delta$ can not generate two different quartets for the same 4-set. Hence,
we have the following corollary.
\begin{corollary}\label{cor:complete}
A symbolic ternary metric $\delta:X\times X \times X\to M^{\odot}$
that satisfies Condition~(*) generates a complete quartet system on $X$.
\end{corollary}
Now we prove Theorem~\ref{thm:binary}.
\begin{proof}
Let $(T,t)$ be a symbolically dated binary phylogenetic tree.
By Theorem~\ref{thm:ultra-ternary}, $\delta_{(T;t)}$ is a
symbolic ternary metric. Since $T$ is binary, it displays a
quartet for every 4-taxa subset $\{x,y,z,u\}$ of $X$. Assume
that $T$ displays $xu|yz$, thus
$\text{med}(x,y,z) \mathfrak{n}eq \text{med}(x,y,u)$. If
$|\{\delta_{(T;t)}(x,y,z),\delta_{(T;t)}(x,y,u),\delta_{(T;t)}(x,z,u),\delta_{(T;t)}(y,z,u)\}|= 1$,
then \\there is at least one vertex $v$ on the path in $T$ connecting
$\text{med}(x,y,z)$ and $\text{med}(x,y,u)$ with $t(v) \mathfrak{n}eq t(\text{med}(x,y,z))$, as
$t$ is discriminating. Hence, there is a leaf $e \in X$ such that
$v = \text{med}(x,y,e)$.
It follows that the set $\{x,y,z,u,e\}$ is 4-6 partitioned by $\delta_{(T;t)}$,
thus $\delta_{(T;t)}$ satisfies Condition (*).
On the other hand, if $\delta$ is a symbolic
ternary metric on $X$ and satisfies (*),
then by Corollary~\ref{cor:complete}, it corresponds to a unique complete
quartet system, thus it encodes a binary phylogenetic tree $T$ in view of
Corollary~\ref{cor:quartet}.
As in the last paragraph of the proof of
Theorem~\ref{thm:ultra-ternary}, we can define a
dating map $t$ by $t(v)=\delta(x,y,z)$ for every interior vertex $v$ of $T$ and for all
3-sets $\{x,y,z\}$ whose median is $v$. Hence, $(T,t)$ is a symbolically dated binary
phylogenetic tree.
\end{proof}
\section{The recognition of pseudo-cherries}
\label{sect:pseudo-cherries}
In Theorem~\ref{thm:ultra-ternary}, we have established a 1-to-1 correspondence
between symbolically dated phylogenetic trees and symbolic ternary metrics on $X$, and
a bijection is given by mapping $(T,t)$ to $d_{(T,t)}$. To get the inverse of this map, we can
first compute the set of all quartets generated by a symbolic ternary metric, and then
apply an algorithm that reconstructs a phylogenetic tree from the collection of all its displayed
quartets. Finally, the dating map is defined as in our proof of Theorem~\ref{thm:ultra-ternary}.
This approach would correspond to first extracting rooted triples from a symbolic ultrametric
and then reconstruct the rooted tree (see Section 7.6 of ~\cite{Semple2003}). However, a
more direct way to reconstruct the corresponding tree from a symbolic ultrametric was presented
in ~\cite{Hellmuth2013}. It is based on identifying maximal sets of at least two taxa that are
adjacent to the same interior vertex, so-called {\em pseudo-cherries}. These can iteratively
be identified into a single new taxon, thereby reconstructing the corresponding tree in a bottom-up fashion.
Here we show how to find the pseudo-cherries of $T$ from a symbolic ternary metric
$d_{(T,t)}$. Then an algorithm to reconstruct $T$ can be designed exactly as in ~\cite{Hellmuth2013}.
We point out that it is not necessary to check Condition (4) of a symbolic ternary metric, as a violation would
make the algorithm recognize that the ternary map does not correspond to a tree.
Given an arbitrary symbolic ternary map $\delta: X\times X\times X\to M^\odot$ satisfying
Conditions (1), (2), and (3).
For $x,y \in X$ and $m\in M^{\odot}$, we say that $x$ and $y$ are {\em $m$-equivalent},
if there is $z \in X$ such that $\delta(x,y,z)=m$, and for $u,v \in X-x-y$, $\delta (x,u,v)=m$ if and only if
$\delta (y,u,v)=m$.
\begin{lemma} \label{lem:transitive}
If $x$ and $y$ are $m$-equivalent and $y$ and $z$ are $m'$-equivalent, then $m=m'$ and $x$ and $z$ are
$m$-equivalent.
\end{lemma}
\begin{proof}
Assume $\delta(x,y,z) \mathfrak{n}eq m$. Then let $u \in X$ with $\delta(x,y,u)=m$. Since $\delta$ is not constant on
$\{x,y,z,u\}$, this 4-set must be 2-2-partitioned, thus exactly one of $\delta(x,u,z)=m$ and $\delta(y,u,z)=m$
must hold, in contradiction to $x$ and $y$ being $m$-equivalent. Hence, we have $\delta(x,y,z) = m$, and
by symmetry we also have $\delta(x,y,z) = m'$, thus $m=m'$. In order to verify that $x$ and $z$ must be
$m$-equivalent, we have already shown $\delta(x,y,z) = m$. For $w,w' \in X-\{x,y,z\}$, we have
$\delta(x,w,w')=m$ if and only if $\delta(y,w,w')=m$, since $x$ and $y$ are $m$-equivalent, and we have
$\delta(y,w,w')=m$ if and only if $\delta(z,w,w')=m$, since $y$ and $z$ are $m$-equivalent.
Finally, we have $\delta(x,y,w)=m$ if and only if $\delta(x,z,w)=m$ if and only if $\delta(y,z,w)=m$.
Hence $x$ and $z$ are $m$-equivalent.
\end{proof}
We say $x,y\in X$ are {\em $\delta$-equivalent}, denoted by {\em $x\sim_{\delta} y$}, if there exists
$m\in M^\odot$
such that $x$ and $y$ are $m$-equivalent.
\begin{lemma}
The relation of being $\delta$-equivalent is an equivalence relation.
\end{lemma}
\begin{proof}
For any $x\in X$, since $\delta(x,x,y)=\odot$ for any $y\in X$, by definition $x$ and $x$ are $\odot$-equivalent,
hence $x$ and $x$ are $\delta$-equivalent. Hence $\sim_{\delta}$ is reflexive. For any $x\sim_{\delta} y$, we know that there exists an $m\in M^{\odot}$
such that $x$ and $y$ are $m$-equivalent. Since $\delta$ is symmetric, by the definition of $m$-equivalent, $y$ and $x$
are also $m$-equivalent, thus $y\sim_{\delta} x$. Hence, $\sim_{\delta}$ is symmetric.
To prove the transitivity of $\sim_{\delta}$, assume $x\sim_{\delta} y$ and $y\sim_{\delta} z$, by Lemma~\ref{lem:transitive}
we know that $x\sim_{\delta} z$. Therefore, $\delta$-equivalent is an equivalence relation.
\end{proof}
Suppose that $T$ is a phylogenetic tree on $X$. Let $C\subseteq X$ be a
subset of $X$ with $|C| \ge 2$.
We call $C$ a {\em pseudo-cherry} of $T$, if there is an interior vertex
$v$ of $T$ such that $C$ is the set of all leaves adjacent to $v$ .
\begin{theorem}
If $(T;t)$ is a symbolically dated phylogenetic tree, then
a non-empty subset $C$ of $X$ is a non-trivial equivalence class of $\sim_{\delta_{(T,t)}}$
if and only if $C$ is a pseudo-cherry of $T$.
\end{theorem}
\begin{proof}
For the ease of notation, we let $\delta = \delta_{(T,t)}$.
Since $t$ is discriminating, the definition of a pseudo-cherry
immediately implies that any pseudo-cherry of $T$ must be contained in a
non-trivial equivalence class of $\sim_{\delta}$.
Conversely, if a non-trivial equivalence class $C$ of $\sim_{\delta}$ is not a pseudo-cherry,
then there are $x_1,x_2 \in C$ such that the path in $T$ that contains $x_1$ and $x_2$
has length at least 3,
and since $t$ is discriminating, it has at least 2 interior vertices labeled by two
different elements of $M$. Suppose that all elements of $C$ are $m$-equivalent, and
that $v$ is an interior vertex on the path from $x_1$ to $x_2$ such that $t(v)=m'$ and
$m' \mathfrak{n}eq m$. Further, let $y \in X$ such that $v=\text{med}(x_1,x_2,y)$. Since $x_1$ and
$x_2$ are $m$-equivalent, there is $z \in X$ such that $\delta(x_1,x_2,z)=m$. Then the
median $u$ of $x_1,x_2,z$ is also on the path from $x_1$ and $x_2$, and we assume
without loss of generality that
$u$ is on the path from $x_1$ to $v$. It follows that $\delta(x_1,y,z) =m$ but $\delta(x_2,y,z)=m'$
in contradiction to $x_1\sim_mx_2$.
\end{proof}
\section{Discussions and open questions}
\label{sect:discuss}
The proofs of our main results heavily rely on extracting the corresponding quartet set from
a symbolic ternary metric and then checking that our Conditions (3) and (4) guarantee the
quartet system to be thin, transitive, and saturated, and adding (*) makes the quartet system complete.
The conditions look like (3) corresponds to thin and transitive, (4) to saturated, and (*) to complete.
However, this is not true, and removing (4) from Theorem~\ref{thm:binary} does not necessarily
yield a transitive complete quartet system. While for five taxa, a 5-5-partition yields the only
non-saturated complete transitive quartet system, Lemma~\ref{lem:quartet2} does not hold without
Condition (4). Indeed the ternary map that is visualized in Figure~\ref{fig:new} suffices
Conditions (1), (2), (3), and (*), but it generates two quartets on each of $\{a_1,a_2,b_1,b_2\}$
and $\{a_1,a_2,c_1,c_2\}$. It can be shown by checking the remaining 5-sets in Case 2 of our
proof of Lemma~\ref{lem:quartet2} that every ternary map on 6 taxa satisfying Conditions
(1), (2), (3), (*) that does not yield a thin quartet system is isomorphic to this example. This
raises the question whether ternary maps satisfying these four conditions can be completely
characterized. The hope is to observe something similar to the Clebsch trees that were observed
by Jan Weyer-Menkhoff~\cite{Weyer2003}. As a result, a phylogenetic tree with all interior vertices
of degree 3, 5, or 6 can be reconstructed from every transitive complete quartet set.
\begin{figure}
\caption{The 5-taxa trees respectively graph representations generated by a ternary map satisfying Conditions (1), (2), (3), (*) but not (4).}
\label{fig:new}
\end{figure}
Another direction to follow up this work would be to consider more general graphs than
trees. A {\em median graph} is a graph for which every three vertices have a unique median.
Given a vertex-colored median graph and a subset $X$ of its vertex set, we can get
a symmetric ternary map on $X \times X \times X$ by associating the color of the median
to every 3-subset of $X$. It would be interesting to see whether this map can be used
to reconstruct the underlying graph for other classes of median graphs than
phylogenetic trees. In phylogenetics, median graphs are used to represent non-treelike
data. Since the interior vertices of those so-called {\em splits graphs}
do in general not correspond to
any ancestor of some of the taxa, reconstructing a collection of splits from the ternary
map induced by a vertex-colored splits graph is probably limited to split systems
that are almost compatible with a tree.
It is one of the main observations of ~\cite{Hellmuth2013} that the 4-point condition
for symbolic ultrametrics can be formulated in terms of {\em cographs} which
are graphs that do
not contain an induced path of length 3. For the special case that
$\delta: X\times X\to M^{\odot}$ with $|M|=2$ and $m \in M$, consider the graph with
vertex set $X$ where two vertices $x,y$ are adjacent, if and only if $\delta(x,y)=m$.
Then deciding whether $\delta$ is a symbolic
ultrametric can be reduced to checking whether this graph (as well as its complement)
is a cograph. This is useful for analyzing real data which will usually not provide a perfect
symbolic ultrametric, thus some approximation is required. For ternary maps and
unrooted trees, the 5-taxa case looks promising, as Condition (4) translates to
a forbidden graph representation that splits the edges of a $K_5$ into two
5-cycles, thus we have a self-complementary forbidden induced subgraph.
However, for more taxa, the 3-sets that are mapped to the same value of a ternary map
$\delta$ define a 3-uniform hypergraph on $X$ and formulating Condition (4) in
terms of this hypergraph does not seem to be promising. In addition, even if there
are only two values of $\delta$ for 3-sets, Condition (3) does not become obsolete.
We leave it as an open question, whether an alternative characterization of
symbolic ternary metrics exists that makes it easier to solve the corresponding
approximation problem.
\operatorname{add}contentsline{toc}{chapter}{\mathfrak{n}umberline {}{\bf Bibliography }}
~~~~~
\mathfrak{n}ewpage
\renewcommand{1.5}{1.5}
\large\mathfrak{n}ormalsize
\begin{CJK*}{UTF8}{song}
\CJKtilde
\operatorname{add}contentsline{toc}{chapter}{\mathfrak{n}umberline {}{\bf 博士后期间研究成果 }}
{\LARGE \bf
\begin{center} 博士后在站期间研究成果
\end{center}}
\mathfrak{n}oindent 一、博士后在站期间发表和在审的论文列表
\begin{enumerate}
\item[1.] Inferring Phylogenetic Trees from the Knowledge of Rare Evolutionary
Events, joint with M. Hellmuth, M. Hernandez-Rosales, P. F. Stadler,
to appear in {\it Journal of Mathematical Biology},
available at {\it https://arxiv.org/abs/1612.09093}
\item[2.] Reconstructing unrooted phylogenetic trees from symbolic ternary
metrics, joint with S. Gr\" unewald, Y. Wu,
minor revision in {\it Bulletin of Mathematical Biology},
available at {\it https://arxiv.org/abs/1702.00190}
\item[3.] Fractal property of homomorphism order, joint with J. Fiala, J.
Hubi\v cka, J. Ne\v set\v ril,
{\it European Journal of Combinatorics}, in press,
{\it http://www.sciencedirect.com/science/article/pii/S0195669817300914}
\item[4.] Gaps in full homomorphism order, joint with J. Fiala, J. Hubi\v cka,
{\it Electronic Notes in Discrete Mathematics}, (2017), {\bf 61}, 429--435
\item[5.] An universality argument for graph homomorphisms, joint with J.
Fiala, J. Hubi\v cka, {\it Electronic Notes in Discrete Mathematics}, (2015),
{\bf 49}, 643--649.
\item[6.] Word-representability of split graphs, joint with Sergey Kitaev, Jun Ma, Hehui Wu, submitted.
\end{enumerate}
\mathfrak{n}oindent 二、博士后在站期间主持和参与的科研项目
\begin{itemize}
\item[$\diamond$] 2016 -- 2017, {\bf Principal Investigator} of the project "Some problems on
graph homomorphisms", No. 154463 \\
\textbf{50000 RMB}, National Natural Science Fund of
Chinese Postdoc, CN.
\item[$\diamond$] 2013 -- 2016 {\bf Participant} of the project
"Several problems on discrete process: structure, simulation, and algorithm" (11271255) \\
\textbf{600000 RMB}, National Natural Science Fund, CN.
\item[$\diamond$] 2017 -- 2020 {\bf Main Participant} of the project "Discrete Dynamics and
Beyond" (11671258) \\
\textbf{480000 RMB}, National Natural Science Fund, CN.
\end{itemize}
\end{CJK*}
\mathfrak{n}ewpage\thispagestyle{empty}
~~~~~
\mathfrak{n}ewpage
\renewcommand{1.5}{1.5}
\large\mathfrak{n}ormalsize
\begin{CJK*}{UTF8}{song}
\CJKtilde
\operatorname{add}contentsline{toc}{chapter}{\mathfrak{n}umberline {}{\bf 致谢 }}
{\LARGE \bf
\begin{center} 致\ \ 谢
\end{center}}
\hspace{0.23cm}
首先我要深深地感谢我的合作导师吴耀琨老师,吴老师对科研有激情,投入,懂的方向很多,在他的带领下我学习和接触了很多新的领域。
吴老师组里聚集着一群聪明,有想法有活力的本科生和研究生们,组里讨论的氛围非常好,使我受益匪浅。虽然我比这些学生们学数学的时间长,
但是从他们那里我学到了很多,特别是一些新的想法和思路,给我不少启发。
\hspace{0.23cm}感谢我的合作者们,感谢我的博士导师Peter F. Stadler,通过和他们合作我学到了很多东西。
\hspace{0.23cm}感谢一些其他关心和指导过我的老师,武同锁老师,王维克老师,张晓东老师,厦门大学金贤安老师,湖南大学彭岳建,
复旦大学吴河辉老师。感谢数学院行政部门的老师,郭恒亮老师,尚建辉老师,陈青老师等等,他们对待工作专业和负责,给我提供了很多工作上的协助。
\hspace{0.23cm}感谢我的办公室同事们,吴德军老师和王晓东博士,他们给予了我不少帮助。
\hspace{0.23cm}感谢我在上海的好友们,是他们让上海的两年成为我近几年生活上最开心的日子。感谢父母和家人一直以来的支持。
\vskip 0.6in \centerline{{\mathfrak{m}box{}\hskip 8cm} 龙旸靖}
\vskip 0.1in \centerline{{\mathfrak{m}box{}\hskip 8cm} 二零一七年九月于上海}
\end{CJK*}
\end{document} |
\begin{document}
\begin{abstract} This note presents some properties of the variety of planes $F_2(X)\subset G(3,7)$ of a cubic $5$-fold $X\subset \mathbb P^6$. A cotangent bundle exact sequence is first derived from the remark made in \cite{Iliev-Manivel_cub_hyp_int_syst} that $F_2(X)$ sits as a Lagrangian subvariety of the variety of lines of a cubic $4$-fold, which is a hyperplane section of $X$. Using the sequence, the Gauss map of $F_2(X)$ is then proven to be an embedding. The last section is devoted to the relation between the variety of osculating planes of a cubic $4$-fold and the variety of planes of the associated cyclic cubic $5$-fold.
\end{abstract}
\dedicatory{\large Dedicated to Claire Voisin on the occasion of her 60th birthday}
\title{Remarks on the geometry of the variety of planes of a cubic fivefold}
\section{Introduction}
To understand the topology and the geometry of smooth complex hypersurfaces $X\subset \mathbb P(V^*)\simeq\mathbb P^{n+1}$, various auxiliaries manifolds have been introduced in the past century, the intermediate Jacobian $$J^n(X):=(H^{k-1,k+2}(X)\oplus\cdots\oplus H^{0,n})/H^n(X,\mathbb Z)_{/torsion}$$ when $n=2k+1$ is odd, being, since the seminal work of Clemens-Griffiths (\cite{Cl-Gr}) on the cubic threefold, one of the most widely known.\\
\indent Cubic fivefolds are classically (\cite{griffiths_periods}) known to be the only hypersurfaces of dimension $>3$ for which the intermediate Jacobian, which is in general just a (polarised) complex torus, is a (non-trivial) principally polarised abelian variety.\\
\indent Another interesting series of varieties classically associated to $X$ are the varieties $F_m(X)\subset G(m+1,V)$ of $m$-planes contained in $X$.\\
\indent Starting from Collino (\cite{Coll_cub}), some properties of the variety of planes $F_2(X)\subset G(3,V)$ of a cubic $5$-fold $X$ have been studied in connection with the $21$-dimensional intermediate Jacobian $J^5(X)$. In \textit{loc. cit}, the following is proven
\begin{theoreme}\label{thm_Collino_intro} For a general cubic $X\subset \mathbb P(V^*)\simeq \mathbb P^6$, $F_2(X)$ is a smooth irreducible surface and the Abel-Jacobi of the family of planes $\Phi_{\mathcal P}:F_2(X)\rightarrow J^5(X)$ is an immersion i.e. the associate tangent map is injective and induces an isomorphism of abelian varieties $$\phi_{\mathcal P}:Alb(F_2(X))\xrightarrow{\sim} J^5(X),$$ where $\mathcal P\in {\rm CH}^5(F_2(X)\times X)$ is the universal plane over $F_2(X)$. Equivalently, $q_*p^*:H^3(F_2(X),\mathbb Z)_{/torsion}\rightarrow H^5(X,\mathbb Z)$ is an isomorphism of Hodge structures where the maps are defined by $\begin{small}\xymatrix{\mathcal P\ar[r]^q\ar[d]^p &X\\
F_2(X) &}\end{small}$.
\end{theoreme}
In the present note, we investigate some additional properties of $F_2(X)$.\\
\indent In the first section, we establish the following cotangent bundle exact sequence
\begin{theoreme}\label{thm_1} Let $X\subset \mathbb P(V^*)$ be a smooth cubic $5$-fold for which $F_2(X)$ is a smooth irreducible surface. Then the cotangent bundle $\Omega_{F_2(X)}$ fits in the exact sequence
\begin{equation}\label{ex_seq_tgt_bundle_seq} 0\rightarrow \mathcal Q_{3|F_2(X)}^*\rightarrow {\rm Sym}^2\mathcal E_{3|F_2(X)}\rightarrow \Omega_{F_2(X)}\rightarrow 0
\end{equation}
where the tautological rank $3$ quotient bundle $\mathcal E_3$ and the other bundle appears in the exact sequence
\begin{equation}\label{ex_seq_def_taut_3} 0\rightarrow \mathcal Q_3\rightarrow V^*\otimes \mathcal O_{G(3,V)}\rightarrow \mathcal E_3\rightarrow 0
\end{equation}
and the first map (of (\ref{ex_seq_tgt_bundle_seq})) is the contraction with an equation ${\rm eq}_X\in {\rm Sym}^3V^*$ defining $X$ i.e. for any $[P]\in F_2(X)$, $v\mapsto {\rm eq}_X(v,\cdot,\cdot)_{|P}$.
\end{theoreme}
Classically associated to the Albanese map $alb_{F_2}:F_2(X)\rightarrow Alb(F_2(X))$ of $F_2(X)$ there is the Gauss map
$$\begin{tabular}{llll}
$\mathcal G:$ &$alb_{F_2}(F_2(X))$ &$\dashrightarrow$ &$G(2, T_{Alb(F_2(X)),0})$\\
$ $ &$t$ &$\mapsto$ &$T_{alb_{F_2}(F_2(X))-t,0}$
\end{tabular}$$
where $alb_{F_2}(F_2(X))-t$ designates the translation of $alb_{F_2}(F_2(X))\subset {\rm Alb}(F_2(X))$ by $-t\in {\rm Alb}(F_2(X))$. The map $\mathcal G$ is defined on the smooth locus of $alb_{F_2}(F_2(X))$.\\
\indent In the second section of the note, we prove:
\begin{theoreme}\label{thm_gauss_map} The Albanese map is an embedding. In particular the Gauss map is defined everywhere.\\
\indent Moreover, $\mathcal G$ is an embedding and its composition with the Pl\"ucker embedding $$G(2,_{Alb(F_2(X)),0})\simeq G(2,H^0(\Omega_{F_2})^*)\subset \mathbb P(\bigwedge^2 H^0(\Omega_{F_2(X)})^*)$$ is the composition of the degree $3$ Veronese of the natural embedding $F_2(X)\subset G(3,V)\subset \mathbb P(\bigwedge^3V^*)$ followed by a linear projection.
\end{theoreme}
The last section is concerned with some properties of the variety of osculating planes of a cubic $4$-fold, namely
\begin{equation}\label{def_var_of_oscul_planes}
F_0(Z):=\{[P]\in G(3,H),\ \exists \ell\subset P\ {\rm line\ s.t.}\ P\cap Z=\ell\ {\rm (set-theoretically)}\}
\end{equation}
where $Z\subset \mathbb P(H^*)\simeq \mathbb P^5$ is a smooth cubic $4$-fold containing no plane.\\
\indent This variety admits a natural projection to the variety of lines $F_1(Z)$ of $Z$ whose image (under that projection) has been studied for example in \cite{GK_geom_lines}. The interest of the authors there, for the variety $F_0(Z)$ stems from its image in $F_1(Z)$ being the fixed locus of the Voisin self-map of $F_1(Z)$ (see \cite{Voisin_map}), a map that plays an important role in the understanding of algebraic cycles on the hyper-K\"ahler $4$-fold $F_1(Z)$ (see for example \cite{shen-vial}).\\
\indent In \cite{GK_geom_lines}, it is proven that for $Z$ general, $F_0(Z)$ is a smooth irreducible surface and some of its invariants are computed.\\
\indent We compute some more invariants of $F_0(Z)$ using its link with the variety of planes $F_2(X_Z)$ of the associated cyclic cubic $5$-fold: to a smooth cubic $4$-fold $Z=\{{\rm eq}_Z=0\}\subset \mathbb P^5$ one can associate the cubic $5$-fold $X_Z=\{X_6^3+{\rm eq}_Z(X_0,\dots,X_5)\}$ which (by linear projection) is the degree $3$ cyclic cover of $\mathbb P^5$ ramified over $Z$. We have
\begin{theoreme}\label{thm_sum_up_var_oscul_planes} For $Z$ general, $F_0(Z)$ is a smooth irreducible surface and\\
\indent (1) $F_2(X_Z)$ is a degree $3$ \'etale cover of $F_0(Z)$;\\
\indent (2) $b_1(F_0(Z))=0$; $h^2(\mathcal O_{F_0(Z)})=1070$; $h^1(\Omega_{F_0(Z)})=2207$;\\
\indent (3) ${\rm Im}(F_0(Z)\rightarrow F_1(Z))$ is a (non-normal) Lagrangian surface of $F_1(Z)$.
\end{theoreme}
\begin{remarque} {\rm As mentioned by the referee and Frank Gounelas, in \cite{GK_geom_lines}, it is proven there that $[{\rm Im}(F_0(Z)\rightarrow F_1(Z))]=21[F_1(Z\cap H)]$ in ${\rm CH}_2(F_1(Z))$, where $Z\cap H$ is a cubic $3$-fold obtained as a general hyperplane section, which imples that $[{\rm Im}(F_0(Z)\rightarrow F_1(Z))]$ is Lagrangian (see \cite[Lemma 6.4.5]{Huy_cub} for example).}
\end{remarque}
\textit{}\\
\section{Cotangent bundle exact sequence}
Let $X\subset \mathbb P(V^*)\simeq \mathbb P^6$ be a smooth cubic $5$-fold. Its variety of planes $F_2(X)\subset G(3,V)$ is the zero locus of the section of ${\rm Sym}^3\mathcal E_3$ (where $\mathcal E_3$ is defined by (\ref{ex_seq_def_taut_3})) induced by an equation ${\rm eq}_X\in H^0(\mathcal O_{\mathbb P^6}(3))$ of $X$.\\
\indent Let us gather some basic properties of $F_2(X)$ before proving Theorem \ref{thm_1}.\\
\indent It is proven in \cite[Proposition 1.8]{Coll_cub} that $F_2(X)$ is connected for any $X$ so that by Bertini type theorems, for $X$ general, $F_2(X)$ is a smooth irreducible surface.\\
\indent As such $F_2(X)$ is cut out of $G(3,V)$ by a regular section of the rank $10$ vector bundle ${\rm Sym}^3\mathcal E_3$, the Koszul resolution says that structure sheaf $\mathcal O_{F_2(X)}$ is quasi-isomorphic the complex
\begin{equation}\label{ex_seq_koszul_resol}
0\rightarrow \wedge^{10}{\rm Sym}^3\mathcal E_3^*\rightarrow\wedge^9{\rm Sym}^3\mathcal E_3^*\rightarrow \cdots\rightarrow {\rm Sym}^3\mathcal E_3^*\rightarrow \mathcal O_{G(3,V)}\rightarrow 0
\end{equation}
where the differentials are given by the section of ${\rm Sym}^3\mathcal E_3$. By the adjunction formula $$K_{F_2(X)}\simeq K_{G(3,V)}\otimes det({\rm Sym}^3\mathcal E_{3|F_2(X)})\simeq \mathcal O_{G(3,V)}(3)_{|F_2(X)}:=\mathcal O_{F_2(X)}(3).$$\\
\indent Theorem \ref{thm_Collino_intro} (see also Theorem \ref{thm_descrip_h_1_and_wedge} below) implies that $h^{1,0}(F_2(X))=h^0(\Omega_{F_2(X)})= h^{2,3}(X)=21$ and we can use a software to compute the other Hodge numbers (see also \cite{gammel}). We use the package Schubert2 of Macaulay2:\\
\begin{enumerate}
\item the Koszul resolution of $\mathcal O_{F_2(X)}$ gives $\chi(\mathcal O_{F_2(X)})=\sum_{i=0}^{10}(-1)^i\chi(\wedge^i{\rm Sym}^3\mathcal E_3^*)$. We can get the result $\chi(\mathcal O_{F_2(X)})=3213$ using the following code
\begin{verbatim}
loadPackage "Schubert2"
G=flagBundle{4,3}
(Q,E)= bundles G
F=symmetricPower(3,dual(E))
chi(exteriorPower(0,F))-chi(exteriorPower(1,F))+chi(exteriorPower(2,F))
-chi(exteriorPower(3,F))+chi(exteriorPower(4,F))-chi(exteriorPower(5,F))
+chi(exteriorPower(6,F))-chi(exteriorPower(7,F))+chi(exteriorPower(8,F))
-chi(exteriorPower(9,F))+chi(exteriorPower(10,F))
\end{verbatim}
Then we get $h^{0,2}(F_2(X))=\chi(\mathcal O_{F_2(X)})-1+h^{0,1}(F_2(X))=3233$.\\
\item Next, Noether's formula writes $\chi_{top}(F_2(X))=12\chi(\mathcal O_{F_2(X)})-\int_{F_2(X)}c_1(K_{F_2(X)})^2$ and as
$$\begin{small}\begin{aligned}\int_{F_2(X)}c_1(K_{F_2(X)})^2 &=\int_{F_2(X)}c_1(\mathcal O_{G(3,V)}(3)_{|F_2(X)})^2\\
&=\int_{G(3,V)}[F_2(X)]\cdot c_1(\mathcal O_{G(3,V)}(3))^2\\
&= 9\int_{G(3,V)}c_{10}({\rm Sym}^3\mathcal E_3)\cdot c_1(\mathcal O_{G(3,V)}(1))^2\end{aligned}\end{small}$$
the number $\int_{F_2(X)}c_1(K_{F_2(X)})^2=3^2*2835=25515$ can be obtain using the code
\begin{verbatim}
loadPackage "Schubert2"
G=flagBundle{4,3}
(Q,E)= bundles G
F=symmetricPower(3,E)
cycle=chern(1,exteriorPower(3,E))*chern(1,exteriorPower(3,E))*chern(10,F)
integral cycle
\end{verbatim}
Then we get $b_2(F_2(X))=\chi_{top}(F_2(X))-2+2b_1(F_2(X))=13041-2+4\times 21=13123$ and $h^{1,1}(F_2(X))=b_2(F_2(X))-2h^{0,2}(F_2(X))=6657$
\end{enumerate}
\textit{}\\
\indent Associated with $X$, there is also its variety of lines $F_1(X)\subset G(2,V)$. It is a smooth Fano variety of dimension $6$ which is cut out by a regular section of ${\rm Sym}^3\mathcal E_2$ where the latter is the tautological rank $2$ quotient bundle appering in an exact sequence
$$0\rightarrow \mathcal Q_2\rightarrow V^*\otimes \mathcal O_{G(2,V)}\rightarrow \mathcal E_2\rightarrow 0.$$
Let us examine the relation between the two auxiliary varieties by introducing the flag variety
$$\xymatrix{Fl(2,3,V) \ar[d]_t\ar[r]^e &Gr(2,V)\\
Gr(3,V) &}$$ where $t:Fl(2,3,V)\simeq \mathbb P(\wedge^2 \mathcal E_3)\rightarrow Gr(3,V)$ and $e:Fl(2,3,V)\simeq \mathbb P(\mathcal Q_2)\rightarrow Gr(2,V)$. For the tautological quotient line bundles, we have $\mathcal O_{t}(1)\simeq e^*\mathcal O_{Gr(2,V)}(1)$ and $\mathcal O_{e}(1)\simeq t^*\mathcal O_{Gr(3,V)}(1)\otimes e^*\mathcal O_{Gr(2,V)}(-1)$.\\
\indent On $Fl(2,3,V)$, the relation between the two tautological bundles is given by the exact sequence
\begin{equation}\label{ex_seq_taut_bundles_2_3}
0\rightarrow e^*\mathcal O_{G(2,V)}(-1)\otimes t^*\mathcal O_{G(3,V)}(1)\rightarrow t^*\mathcal E_3\rightarrow e^*\mathcal E_2\rightarrow 0.
\end{equation}
\indent We can restrict the flag bundle to get
$$\xymatrix{\mathbb P_{F_2}:=\mathbb P(\wedge^2\mathcal E_{3|F_2(X)})\ar[r]^(.65){e_{F_2}}\ar[d]_{t_{F_2}} &F_1(X)\\
F_2(X) &.}$$
We have the following property
\begin{proposition}\label{prop_immersion_plan_lines} The tangent map $T e_{F_2}$ of $e_{F_2}$ is injective i.e. $e_{F_2}$ is an immersion. Moreover, the ``normal bundle'' $N_{\mathbb P_{F_2}/F_1(X)}:= e_{F_2}^*T_{F_1(X)}/T_{\mathbb P_{F_2}}$ of $\mathbb P_{F_2}$ admits the following description:
\begin{equation}\label{ex_seq_tgt_bundle_part1}0\rightarrow t_{F_2}^*(\mathcal Q_{3|F_2(X)}^*)\otimes \mathcal O_e(1)\rightarrow t_{F_2}^*{\rm Sym}^2\mathcal E_3\otimes \mathcal O_e(1)\rightarrow N_{\mathbb P_{F_2}/F_1(X)}\rightarrow 0
\end{equation}
\end{proposition}
\begin{proof} (1) Let us first prove that $e_{F_2}$ is an immersion. Let us recall the natural isomorphism between the two presentations of the tangent space of $Fl(2,3,V)$: looking at $t$, we can write
$$T_{Fl(2,3,V), ([\ell],[P])}\simeq Hom(\langle P\rangle,V/\langle P\rangle)\oplus Hom(\langle \ell\rangle,\langle P\rangle/\langle \ell\rangle)$$
and looking at $e$, we have $$T_{Fl(2,3,V), ([\ell],[P])}\simeq Hom(\langle\ell\rangle,V/\langle\ell\rangle)\oplus Hom(\langle P\rangle/\langle\ell\rangle,V/\langle P\rangle)$$ where we denote $\langle K\rangle\subset V$ the linear subspace whose projectivisation is $K\subset \mathbb P(V^*)$. For a given decomposition $\langle P\rangle\simeq \langle\ell\rangle\oplus \langle P\rangle/\langle\ell\rangle$, the isomorphism takes the following form
$$\begin{tabular}{ccc}
$Hom(\langle P\rangle,V/\langle P\rangle)\oplus Hom(\langle\ell\rangle,\langle P\rangle/\langle\ell\rangle)$ &$\rightarrow$ &$Hom(\langle\ell\rangle,V/\langle\ell\rangle)\oplus Hom(\langle P\rangle/\langle\ell\rangle,V/\langle P\rangle)$\\
$(f,\ g)$ &$\mapsto$ &$(f_{|\langle \ell\rangle}+g,\ f_{|\langle P\rangle/\langle\ell\rangle})$
\end{tabular}$$
Notice that by definition, we have $Im(f)\cap Im(g)=\{0\}$ so that in proving that $T_{([\ell],[P])}e_{F_2}$ is injective we can examine the two components separately.
Now we have the exact sequence
$$0\rightarrow N_{\ell/P}\rightarrow N_{\ell/X}\rightarrow N_{P/X|\ell}\rightarrow 0
$$
from which we get
\begin{equation}
\label{ex_seq_normal_bdle_coh_lines}
0\rightarrow \underset{\simeq \langle\ell\rangle^*}{H^0(\mathcal O_{\ell}(1))}\rightarrow H^0(N_{\ell/X})\rightarrow H^0(N_{P/X|\ell})\rightarrow 0=H^1(\mathcal O_\ell(1))
\end{equation}
and we have $T_{F_1(X),[\ell]}\simeq H^0(N_{\ell/X})$.\\
\indent A linear form on $P$ defining $\ell$ is given by any generator of $(\langle P\rangle/\langle\ell\rangle)^*\subset \langle P\rangle^*$ so that $$T_{\mathbb P(\wedge^2 \mathcal E_{3|F_2(X)}),([\ell],[P])}\simeq \underbrace{T_{F_2(X),[P]}}_{\simeq H^0(N_{P/X})}\oplus \underbrace{\langle P\rangle^*/(\langle P\rangle/\langle\ell\rangle)^*}_{\simeq \langle\ell\rangle^*}.$$ The second summand is readily seen to inject into $T_{F_1(X),\langle\ell\rangle}$ by (\ref{ex_seq_normal_bdle_coh_lines}).
Next, we have the exact sequence
$$0\rightarrow N_{P/X}(-1)\rightarrow N_{P/X}\rightarrow N_{P/X|\ell}\rightarrow 0$$ which gives rise to
\begin{equation}\label{ex_seq_normal-1_coh}
0\rightarrow H^0(N_{P/X}(-1))\rightarrow H^0(N_{P/X})\rightarrow H^0(N_{P/X|\ell})\rightarrow H^1(N_{P/X}(-1))\rightarrow H^1(N_{P/X}).
\end{equation}
To prove that $T_{([\ell],[P])}e_{F_2}$ is injective, it is thus sufficient to prove that $H^0(N_{P/X}(-1))=0$.\\
\indent Consider the exact sequence
\begin{equation}\label{ex_seq_nomal_plane} 0\rightarrow N_{P/X}\rightarrow \underbrace{N_{P/\mathbb P^6}}_{\simeq(V/\langle P\rangle)\otimes \mathcal O_P(1)} \overset{\alpha}{\rightarrow} \underbrace{N_{X/\mathbb P^6|P}}_{\simeq \mathcal O_P(3)}\rightarrow 0.
\end{equation}
Up to a projective transformation, we can assume $P=\{X_0=\cdots =X_3=0\}$ so that ${\rm eq}_X$ has the following form:
\begin{equation}\label{normal_form_0}\begin{small}X_0Q_0 + X_1Q_1 + X_2Q_2 +X_3Q_3 + \sum_{i=4}^6X_iD_i(X_0,X_1,X_2,X_3) + R(X_0,X_1,X_2,X_3)
\end{small}
\end{equation}
where $R$ is a homogeneous cubic polynomial, $D_i$, $4\leq i\leq 6$ are homogeneous quadratic polynomials in the variables $(X_k)_{k\leq 3}$ and $Q_i$, $0\leq i\leq 3$ are homogeneous quadratic polynomials in $(X_i)_{4\leq i\leq 6}$. With these notations, $X$ is smooth along $P$ if and only if ${\rm Span}((Q_{i|P})_{i=0,\dots,3})$ is a base-point free. We recall the following result found in \cite[Proposition 1.2 and Corollary 1.4]{Coll_cub}
\begin{proposition}\label{prop_result_Collino_smoothness} For $X$ smooth along $P$, we have $F_2(X)$ is smooth at $[P]$ $\iff$ $(Q_0,\dots,Q_3)$ is linearly independent $\iff$ $H^0(\alpha):H^0(N_{P/\mathbb P^6})\simeq (V/\langle P\rangle)\otimes H^0(\mathcal O_P(1))\rightarrow H^0(N_{X/\mathbb P^6|P})\simeq H^0(\mathcal O_P(3))$, $(L_0,\dots,L_3)\mapsto \sum_iL_iQ_i$ is surjective
\end{proposition}
Now tensoring (\ref{ex_seq_nomal_plane}) by $\mathcal O_P(-1)$, we get the following long exact sequence
\begin{equation}\label{ex_seq_normal_plane-1_coh}
0\rightarrow H^0(N_{P/X}(-1))\rightarrow V/\langle P\rangle\overset{H^0(\alpha(-1))}{\rightarrow} H^0(\mathcal O_P(2))\rightarrow H^1(N_{P/X}(-1))\rightarrow 0=H^1(\mathcal O_P)^{\oplus 4}.
\end{equation}
The map $H^0(\alpha(-1))$ is given by the quadrics $(Q_0,\dots,Q_3)$. As $F_2(X)$ is smooth by assumption, the latter are linearly independent thus $H^0(\alpha(-1))$ is injective i.e. $H^0(N_{P/X}(-1))=0$. In particular $H^0(N_{P/X})\subset H^0(N_{P/X|\ell})$ hence, looking at (\ref{ex_seq_nomal_plane}) and (\ref{ex_seq_normal_bdle_coh_lines}), $T_{([\ell],[P])}e_{F_2}$ is injective.\\
\indent (2) We want now to establish the exact sequence (\ref{ex_seq_tgt_bundle_part1}). Pulling back the natural exact sequence of locally free sheaves, we get the commutative diagram:
$$\xymatrix{0 \ar[r] &T_{\mathbb P_{F_2}}\ar[r]\ar[d]^{Te_{F_2}} & T_{Fl(2,3,V)|\mathbb P_{F_2}}\ar[r]\ar[d]^{Te_{|\mathbb P_{F_2}}} &(t^*{\rm Sym}^3\mathcal E_3)_{|\mathbb P_{F_2}} \ar[r]\ar[d]^{\overline{Te_{|\mathbb P_{F_2}}}} &0\\
0 \ar[r] &e_{F_2}^*T_{F_1(X)} \ar[r] &e_{F_2}^*T_{Gr(2,V)|F_1(X)}\ar[r] &e_{F_2}^*{\rm Sym}^3\mathcal E_{2|F_1(X)}\ar[r] &0}$$
which by the snake lemma yields: $$ 0 \rightarrow Ker(Te_{|\mathbb P_{F_2}})\rightarrow Ker(\overline{Te_{|\mathbb P_{F_2}}})\rightarrow coker(Te_{F_2})\rightarrow 0.$$ By definition of the normal bundle, we get $coker(Te_{F_2})\simeq N_{\mathbb P_{F_2}/F_1(X)}$. The restriction of the following exact sequence of locally free sheaves $$\begin{small} 0\rightarrow T_{Fl(2,3,V)/Gr(2,7)}\rightarrow T_{Fl(2,3,V)}\rightarrow e^*T_{Gr(2,V)}\rightarrow 0\end{small}$$ being still exact, we get $ker(Te_{|\mathbb P_{F_2}})\simeq T_{Fl(2,3,V)/Gr(2,V)|\mathbb P_{F_2}}$. The relative tangent bundle appears in the exact sequence:
$$\begin{small}0\rightarrow \mathcal O_{Fl(2,3,V)}\rightarrow e^*V/\mathcal E_2^*\otimes \mathcal O_e(1)\rightarrow T_{Fl(2,3,V)/Gr(2,V)}\rightarrow 0.\end{small}$$
The sequence (\ref{ex_seq_taut_bundles_2_3}) also yields $$\begin{small} 0\rightarrow t^*\mathcal O_{Gr(3,V)}(-1)\otimes e^*\mathcal O_{Gr(2,V)}(1)\rightarrow V/\mathcal E_2^*\rightarrow V/\mathcal E_3^*\rightarrow 0.\end{small}$$ from which we get, after twisting by $\mathcal O_e(1)$ that last exact sequence, $T_{Fl(2,3,V)/Gr(2,V)|\mathbb P_{F_2}}\simeq t_{F_2}^*V/\mathcal E_3^*\otimes \mathcal O_e(1)$.\\
\indent Next, taking the symmetric power of (\ref{ex_seq_taut_bundles_2_3}) we get the following exact sequence $$\begin{small} 0\rightarrow e^*\mathcal O_{Gr(2,V)}(-1)\otimes t^*\mathcal O_{Gr(3,V)}(1)\otimes t^*{\rm Sym}^2\mathcal E_3\rightarrow t^*{\rm Sym}^3\mathcal E_3\rightarrow e^*{\rm Sym}^3\mathcal E_2\rightarrow 0\end{small}$$ so that $ker(\overline{Te_{|\mathbb P_{F_2}}})\simeq (e^*\mathcal O_{Gr(2,V)}(-1)\otimes t^*\mathcal O_{Gr(3,V)}(1)\otimes t^*{\rm Sym}^2\mathcal E_3)_{|\mathbb P_{F_2}}$. Putting everything together, we get the desired exact sequence.
\end{proof}
For any plane $P_0\subset X$, looking for example at the associated quadric bundle
$$\xymatrix{\widetilde{X_{P_0}}\ar[rd]^{\tilde\gamma}\ar@{^{(}->}[r] &\mathbb P(\mathcal E_4)\ar[d]^{\gamma}\\
&B}$$
where $B\simeq \{[\Pi]\in G(4,V),\ P_0\subset \Pi\}\simeq \mathbb P^3$, $\mathcal E_4\simeq \langle P\rangle^*\otimes \mathcal O_{\mathbb P^3}\oplus \mathcal O_{\mathbb P^3}(1)$ and $\widetilde{X_{P_0}}\in |\mathcal O_{\gamma}(2)\otimes \gamma^*\mathcal O_{\mathbb P^3}(1)|$, we see that the locus of quadrics of rank $\leq 2$ has codimension (at most) $\binom{4-2+1}{2}=3$ and by the Harris-Tu formula (\cite[Theorem 1 and Theorem 10]{HT_degener}), there are (at least) $2\left|\begin{smallmatrix}
c_2(\mathcal E_4\otimes L) &c_3(\mathcal E_4\otimes L)\\
c_0(\mathcal E_4\otimes L) &c_1(\mathcal E_4\otimes L
\end{smallmatrix}\right|=31$ of them (where $L$ has to be thought of as a formal square root of $\mathcal O_{\mathbb P^3}(1)$).\\
\indent In particular, the locus $\Gamma=\{([\ell],[P])\in \mathbb P_{F_2},\ \exists [P']\neq [P],\ ([\ell],[P'])\in \mathbb P_{F_2}\}$ has codimension $2$ in $\mathbb P_{F_2}$ (above the general plane $[P]\in F_2(X)$ there are finitely many lines that belong to another planes $P'\subset X$).\\
\indent To any hyperplane $H\subset \mathbb P(V^*)$ such that $Y:=X\cap H$ is a smooth cubic $4$-fold containing no plane, we can attach the morphism $j_H:F_2(X)\rightarrow F_1(Y)$ defined by $[P]\mapsto [P\cap H]$.\\
\indent The subvariety $F_1(Y)\subset F_1(X)$ is the zero locus of the regular section of $\mathcal E_{2|F_1(X)}$ induced by the equation of $H\subset \mathbb P(V^*)$. For any such $Y$ (containing no plane), $e^{-1}(F_1(Y))$ is obviously a section $Z_H$ of $\mathbb P_{F_2}\rightarrow F_2(X)$, $[P]\mapsto ([P\cap H],[P])$. The smooth surface $Z_H\simeq F_2(X)$ is thus the zero locus of a regular section of $e_{F_2}^*\mathcal E_{2|F_1(X)}$. By Bertini type theorems, for $H$ general, $Z_H\cap \Gamma$ is $0$-dimensional.\\
\indent As a result, as noticed in \cite[Proposition 7]{Iliev-Manivel_cub_hyp_int_syst} (the published version corrects the preprint, in which it is wrongly claimed that $j_H$ is an embedding, as underlined in \cite{Huy_cub}) $j_H:Z_H\simeq F_2(X)\rightarrow F_1(Y)$ is isomorphic to its image outside a $0$-dimensional subset of $F_2(X)$.\\
\indent The following diagram is commutative:
$$\xymatrix{0 \ar[r] & T_{Z_H} \ar[r]\ar[d] &T_{\mathbb P_{F_2}|Z_H}\ar[r]\ar[d] & N_{Z_H/\mathbb P_{F_2}} \ar[r]\ar[d] &0\\
0 \ar[r] & (e_{F_2}^*T_{F_1(Y)})_{|Z_H}\ar[r] & (e_{F_2}^*T_{F_1(X)})_{|Z_H}\ar[r] & (e_{F_2}^*N_{F_1(Y)/F_1(X)})_{|Z_H}\ar[r] &0.}$$
As $Z_H\subset \mathbb P_{F_2}$ is the zero locus of a regular section of $e_{F_2}^*\mathcal E_{2|F_1(X)}$, we have $N_{Z_H/\mathbb P_{F_2}}\simeq (e_{F_2}^*\mathcal E_{2|F_1(X)})_{|Z_H}$ so that the last vertical arrow in the diagram is an isomorphism. As the second vertical arrow is injective by Proposition \ref{prop_immersion_plan_lines}, the first is injective as well. So the snake lemma gives $(e_{F_2}^*T_{F_1(Y)})_{|Z_H}/T_{Z_H}\simeq N_{\mathbb P_{F_2}/F_1(X)|Z_H}$.\\
\indent According to \cite[Proposition 4]{Iliev-Manivel_cub_hyp_int_syst} ${\rm Im}(j_H)$ is a (non-normal) Lagrangian surface of the hyper-K\"ahler manifold $F_1(Y)$. In particular, outside a codimension $2$ subset of $F_2(X)$, we have $$\Omega_{Z_H}\simeq (e_{F_2}^*T_{F_1(Y)})_{Z_H}/T_{Z_H}.$$
\indent As both sheaves are locally free, the isomorphism holds globally i.e.
\begin{equation}\label{isom_cotang_normal2}\Omega_{F_2(X)}\simeq N_{\mathbb P_{F_2}/F_1(X)|Z_H}.
\end{equation}
\indent We can now prove Theorem \ref{thm_1}
\begin{proof}[Proof of Theorem \ref{thm_1}] Looking at (\ref{isom_cotang_normal2}) and (\ref{ex_seq_tgt_bundle_part1}) we see that we only have to check that $\mathcal O_e(1)_{|Z_H}\simeq \mathcal O_{Z_H}$.\\
\indent For a (general) hyperplane $H\subset \mathbb P(V^*)$, we have a rational map:
$\varphi:Gr(3,V)\dashrightarrow Gr(2,\langle H\rangle)$, $P\mapsto P\cap H$ whose indeterminacy locus is $Gr(3,\langle H\rangle)$. The morphism $j_H:F_2(X)\simeq Z_H\rightarrow F_1(Y)$ is the restriction of the map $\varphi$ to $F_2(X)$. To get the result we will show more generally that $\varphi^*\mathcal O_{Gr(2,\langle H\rangle)}(-1)\otimes \mathcal O_{Gr(3,V)}(1)$ restricts to the trivial line bundle on the open set where $\varphi$ is defined i.e. on $Gr(3,V)\backslash Gr(3,\langle H\rangle)$.\\
\indent The subvariety $Gr(3,\langle H\rangle)\subset Gr(3,V)$ is the zero locus of a regular section of $\mathcal E_3$ so that $N_{Gr(3,\langle H\rangle)/Gr(3,V)}\simeq \mathcal E_{3|Gr(3,\langle H\rangle)}$. After blowing-up this locus we get
$$\begin{small}\xymatrix{E_\tau \ar@{^{(}->}[r]^j\ar[d] & \widetilde{Gr(3,V)}\ar[d]^\tau \ar[rd]^{\widetilde{\varphi}} &\\
Gr(3,\langle H\rangle)\ar@{^{(}->}[r]^i & Gr(3,V)\ar@{-->}[r]^\varphi &Gr(2,\langle H\rangle)}\end{small}$$
where the exceptional divisor $E_\tau$ is isomorphic to $\mathbb P(\mathcal E_3^*)\simeq \mathbb P(\wedge^2\mathcal E_3\otimes det(\mathcal E_3)^{-1})$. So $E_\tau$ is isomorphic to the flag variety $Fl(2,3,\langle H\rangle)$ and $\widetilde\varphi\circ j$ correspond to the projection on the Grassmannian of lines; hence $$\begin{small}\mathcal O_{E_\tau}(1)\simeq j^*\widetilde\varphi^*\mathcal O_{Gr(2,\langle H\rangle)}(1)\otimes \tau_{E_\tau}^*i^*\mathcal O_{Gr(3,V)}(-1)\ {\rm in\ Pic}(E_\tau).\end{small}$$ As the restriction ${\rm Pic}(Gr(3,V))\rightarrow {\rm Pic}(Gr(3,\langle H\rangle))$ is an isomorphism, so is ${\rm Pic}(\widetilde{Gr(3,V)})\rightarrow {\rm Pic}(E_\tau))$ thus $$\begin{small}\mathcal O_{\widetilde{Gr(3,V)}}(-E)\simeq \widetilde\varphi^*\mathcal O_{Gr(2,\langle H\rangle)}(1)\otimes \tau^*\mathcal O_{Gr(3,V)}(-1)\ {\rm in\ Pic}(\widetilde{Gr(3,V)}).\end{small}$$
Now pushing forward by $\tau$ the short exact sequence defining $E$, we get $$\begin{small}\tau_*\widetilde\varphi^*\mathcal O_{Gr(2,\langle H\rangle)}(1)\otimes \mathcal O_{Gr(3,V)}(-1)\simeq \tau_*\mathcal O_{\widetilde{Gr(3,V)}}(-E)\simeq \mathcal I_{Gr(3,\langle H\rangle)/ Gr(3,V)}\end{small}$$ which is indeed trivial on $Gr(3,V)\backslash Gr(3,\langle H\rangle)$.
\end{proof}
\textit{}\\
\section{Gauss map of $F_2(X)$}
Let $X\subset \mathbb P(V^*)\simeq \mathbb P^6$ be a smooth cubic hypersurface such that $F_2(X)$ is a smooth (irreducible) surface. We begin this section with the following
\begin{theoreme}\label{thm_descrip_h_1_and_wedge} The following sequence is exact:
\begin{equation}\label{ex_seq_descrip_H_1}
0\rightarrow H^1(\mathcal O_{F_2(X)})\rightarrow {\rm Sym}^2V\otimes det(V)\overset{\varphi_{{\rm eq}_X}\otimes {\rm id}_{det(V)}}{\rightarrow} V^*\otimes det(V)\rightarrow 0
\end{equation}
where $\varphi_{{\rm eq}_X}$ is defined to be $e_i+e_j\mapsto {\rm eq}_X(e_i,e_j,\cdot)$.\\
\indent Moreover, we have an inclusion $\bigwedge^2H^1(\mathcal O_{F_2(X)})\subset H^2(\mathcal O_{F_2(X)})$, which by Hodge symmetry yields $\bigwedge^2H^0(\Omega_{F_2(X)})\subset H^0(K_{F_2(X)})$.
\end{theoreme}
\begin{proof} As $\mathcal O_{F_2(X)}$ admits the Koszul resolution (\ref{ex_seq_koszul_resol}), to understand the cohomology groups $H^i(\mathcal O_{F_2(X)})$, we can use the spectral sequence $$E_1^{p,q}=H^q(G(3,V),\wedge^{-p}{\rm Sym}^3\mathcal E_3^*)\Rightarrow H^{p+q}(\mathcal O_{F_2(X)}).$$
\indent As a remainder, we borrow from \cite{jiang_noether_lefschetz} (see also \cite{spandaw}) the following elementary presentation of the Borel-Weil-Bott theorem for a $G(3,W)$ with ${\rm dim}(W)=d$.\\
\indent For any vector space $L$ of dimension $f$ and any decreasing sequence of integers $a=(a_1,\dots,a_f)$ there is an irreducible $GL(L)$-representation (Weyl module) denoted $\Gamma^{(a_1,\dots,a_f)}L$.\\
\indent To two decreasing sequences $a=(a_1,\dots,a_{d-e})$ and $b=(b_1,\dots,b_e)$ we can associate the sequence $$(\phi_1,\dots,\phi_d)=\phi(a,b):=(a_1-1,a_2-2,\dots,a_{d-e}-(d-e),b_1-(d-e+1),\dots,b_e-d).$$
\indent We measure how far $\phi(a,b)$ is from being decreasing by introducing $i(a,b):=\#\{\alpha<\beta,\ \phi_\alpha>\phi_\beta\}$.\\
\indent Finally, let us denote $\phi(a,b)^+=(\phi_1^+,\dots,\phi_d^+)$ a re-ordering of $\phi(a,b)$ to make it non-increasing and set $\psi(a,b):=(\phi_1^++1,\dots,\phi_d^++d)$.\\
\indent The Borel-Weil-Bott theorem states
\begin{theoreme}\label{thm_borel_weil_bott} We have:\\
\indent (1) $H^q(G(3,W),\Gamma^a\mathcal Q_3^*\otimes\Gamma^b\mathcal E_3^*)=0$ for $q\neq i(a,b)$.\\
\indent (2) $H^{i(a,b)}(G(3,W),\Gamma^a\mathcal Q_3^*\otimes\Gamma^b\mathcal E_3^*)=\Gamma^{\psi(a,b)}W$.\\
where $\mathcal Q_3$ and $\mathcal E_3$ are defined by (\ref{ex_seq_def_taut_3}) and $\Gamma^{\psi(a,b)}W=0$ if $\psi(a,b)$ is not decreasing.
\end{theoreme}
Now, we want to apply it to compute the $E_1^{p,q}$ of the spectral sequence. Using Sage with the code
\begin{verbatim}
R=WeylCharacterRing("A2")
V=R(1,0,0)
for k in range(11): print k, V.symmetric_power(3).exterior_power(k)
\end{verbatim}
we get the decompositions into irreducible modules of $\wedge^k{\rm Sym}^3\mathcal E_3^*$. Then by Borel-Weil-Bott we have:
$$\begin{tabular}{lll}
$(0)$ &$\oplus_i^{12} H^i(\mathcal O_{G(3,V)})$ &$=\oplus_i H^i(\Gamma^{(0,\dots,0)}\mathcal Q_3^*\otimes\Gamma^{(0,0,0)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^0(\mathcal O_{G(3,V)})=\Gamma^{(0,\dots,0)}V\simeq \mathbb C$\\
$ $ &$ $ &$ $\\
$(1)$ &$\oplus_i^{12} H^i({\rm Sym}^3\mathcal E_3^*)$ &$=\oplus_i^{12} H^i(\Gamma^{(3,0,0)}\mathcal E_3^*)=0$\\
$ $ &$ $ &$ $\\
$(2)$ &$\oplus_iH^i(\wedge^2{\rm Sym}^3\mathcal E_3^*)$ &$=\oplus_i H^i(\Gamma^{(3,3,0)}\mathcal E_3^*\oplus \Gamma^{(5,1,0)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^4(\Gamma^{(5,1,0)}\mathcal E_3^*)=\Gamma^{(1,\dots,1,0)}V\simeq \wedge^6V$\\
$ $ &$ $ &$ $\\
$(3)$ &$\oplus_i H^i(\wedge^3{\rm Sym}^3\mathcal E_3^*)$ &$=\oplus_i H^i(\Gamma^{(3,3,3)}\mathcal E_3^*\oplus\Gamma^{(5,3,1)}\mathcal E_3^*\oplus \Gamma^{(6,3,0)}\mathcal E_3^*\oplus \Gamma^{(7,1,1)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^4(\Gamma^{(7,1,1)}\mathcal E_3^*)=\Gamma^{(3,1,\dots,1)}V\simeq {\rm Sym}^2V\otimes det(V)$\\
$ $ &$ $ &$ $\\
$(4)$ &$\oplus_i H^i(\wedge^4{\rm Sym}^3\mathcal E_3^*)$ &$=\oplus_iH^i(\Gamma^{(6,3,3)}\mathcal E_3^*\oplus \Gamma^{(6,4,2)}\mathcal E_3^*\oplus\Gamma^{(6,6,0)}\mathcal E_3^*\oplus \Gamma^{(7,4,1)}\mathcal E_3^*\oplus\Gamma^{(8,3,1)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^8(\Gamma^{(6,6,0)}\mathcal E_3^*)=\Gamma^{(2,\dots,2,0)}V$\\
$ $ &$ $ &$\simeq {\rm Sym}^2V^*\otimes det(V)^{\otimes 2}$\\
$ $ &$ $ &$ $\\
$(5)$ &$\oplus_iH^i(\wedge^5{\rm Sym}^3\mathcal E_3^*)$ &$\simeq\oplus_iH^i(\Gamma^{(6,6,3)}\mathcal E_3^*\oplus\Gamma^{(7,4,4)}\mathcal E_3^*\oplus\Gamma^{(7,6,2)}\mathcal E_3^*\oplus\Gamma^{(8,4,3)}\mathcal E_3^*\oplus\Gamma^{(8,6,1)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \ \ \ \oplus\Gamma^{(9,4,2)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^8(\Gamma^{(7,6,2)}\mathcal E_3^*\oplus\Gamma^{(8,6,1)}\mathcal E_3^*)$\\
$ $ &$ $ &$=\Gamma^{(3,2,\dots,2)}V\oplus \Gamma^{(4,2\dots,2,1)}V$\\
$ $ &$ $ &$\simeq ({\rm Sym}^2V\otimes V^*)\otimes det(V)^{\otimes 2}$\\
$ $ &$ $ &$ $\\
\end{tabular}$$
$$\begin{tabular}{lll}
$(6)$ &$\oplus_iH^i(\wedge^6{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i(\Gamma^{(7,7,4)}\mathcal E_3^*\oplus\Gamma^{(8,6,4)}\mathcal E_3^*\oplus\Gamma^{(9,6,3)}\mathcal E_3^*\oplus\Gamma^{(9,7,2)}\mathcal E_3^*\oplus\Gamma^{(10,4,4)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^8(\Gamma^{(9,7,2)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq \Gamma^{(5,3,2\dots,2)}V\simeq (\wedge^2{\rm Sym}^2V)\otimes det(V)^{\otimes 2}$\\
$ $ &$ $ &$ $\\
$(7)$ &$\oplus_iH^i(\wedge^7{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(7,7,7)}\mathcal E_3^*\oplus\Gamma^{(9,7,5)}\mathcal E_3^*\oplus\Gamma^{(9,9,3)}\mathcal E_3^*\oplus\Gamma^{(10,7,4)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(7,7,7)}\mathcal E_3^*)\simeq \Gamma^{(3,\dots,3)}V\simeq det(V)^{\otimes 3}$\\
$ $ &$ $ &$ $\\
$(8)$ &$\oplus_iH^i(\wedge^8{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(10,7,7)}\mathcal E_3^*\oplus \Gamma^{(10,9,5)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(10,7,7)}\mathcal E_3^*)=\Gamma^{(6,3,\dots,3)}V\simeq {\rm Sym}^3V\otimes det(V)^{\otimes 3}$\\
$ $ &$ $ &$ $\\
$(9)$ &$\oplus_iH^i(\wedge^9{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(10,10,7)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(10,10,7)}\mathcal E_3^*)\simeq \Gamma^{(6,6,3,\dots,3)}V$\\
$ $ &$ $ &$ $\\
$(10)$ &$\oplus_iH^i(\wedge^{10}{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(10,10,10)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(10,10,10)}\mathcal E_3^*)\simeq \Gamma^{(6,6,6,3\dots,3)}V\ \ \ \ \ \ \ \textit{}$
\end{tabular}$$
To understand $H^1(\mathcal O_{F_2(X)})$, we have to examine the $E_\infty^{-i,i+1}$'s, for $i=0,\dots,10$. As $E_1^{-i,i+1}=0$ for any $i\neq 3$, we get $E_\infty^{-i,i+1}=0$ for $i \neq 3$.\\
\indent On the other hand, for $r\geq 2$, $E_r^{-3,4}$ is defined as the (middle) cohomology of $$E_{r-1}^{-(2+r),2+r}\overset{d_{r-1}}{\rightarrow}E_{r-1}^{-3,4}\overset{d_{r-1}}{\rightarrow}E_{r-1}^{-4+r,6-r}.$$
From the above computations, we see that $E_1^{-i,i}=0$ for $i\geq 3$ so that $E_r^{-i,i}=0$ for any $i\geq 3$ and $r\geq 1$.\\
\indent So we get $E_2^{-3,4}=Ker(E_1^{-3,4}\overset{d_1}{\rightarrow}E_1^{-2,4})$.\\
\indent As $E_1^{-1,3}=0$, we have $E_2^{-1,3}=0$ so that $E_3^{-3,4}\simeq E_2^{-3,4}$.\\
\indent As $E_1^{0,2}=0$, we have $E_3^{0,2}=0$ so that $E_4^{-3,4}\simeq E_2^{-3,4}$.\\
\indent As $E_1^{a,b}=0$ for any $a>0$, we get $E_\infty^{-3,4}\simeq E_2^{-3,4}$ i.e. the following sequence is exact:
$$0\rightarrow H^1(\mathcal O_{F_2(X)})\rightarrow E_1^{-3,4}\overset{d_1^{-3,4}}{\rightarrow}E_1^{-2,4}.$$
Now, $d_1^{-3,4}$ is given by contracting with the section defined by ${\rm eq}_X$ so that, choosing a basis $(e_0,\dots,e_6)$ of $V$, we have
$$\begin{tabular}{llll}
$d_1^{-3,4}:$ &${\rm Sym}^2V\otimes det(V)$ &$\rightarrow$ &$\wedge^6V\simeq V^*\otimes det(V)$\\
$ $ &$(e_i+e_j)\otimes (e_0\wedge\cdots\wedge e_6)$ &$\mapsto$ &$\sum_k{\rm eq}_X(e_i,e_j,e_k)\widehat{e_k}={\rm eq}_X(e_i,e_j,\cdot)\otimes (e_0\wedge\cdots\wedge e_6)$
\end{tabular}$$
If this map is not surjective, we can choose the basis so that $e_0^*\otimes (e_0\wedge\cdots\wedge e_6)\notin {\rm Im}(d_1^{-3,4})$. Then we get ${\rm eq}_X(e_i,e_j,e_0)=0$ for any $i,j$, which means that the cubic hypersurface $X$ is a cone with vertex $[e_0]$.\\
\indent So for a smooth cubic, $d_1^{-3,4}$ is surjective, hence (\ref{ex_seq_descrip_H_1}).\\
\indent Before tackling the case of $H^2(\mathcal O_{F_2(X)})$, we notice that the exterior square of (\ref{ex_seq_descrip_H_1}) gives the following exact sequence:
\begin{equation}\label{ex_seq_exterior_square_h_1}\begin{small}\begin{aligned}
0\rightarrow \wedge^2H^1(\mathcal O_{F_2(X)})\rightarrow (\wedge^2{\rm Sym}^2V)\otimes det(V)^{\otimes 2}\overset{\varphi_{{\rm eq}_X}\otimes {\rm id}_{{\rm Sym}^2V\otimes det(V)}}{\longrightarrow}{\rm Sym}^2V\otimes V^*\otimes det(V)^{\otimes 2}\\
\overset{\varphi_{{\rm eq}_X}\otimes {\rm id}_{V^*\otimes det(V)}}{\longrightarrow}{\rm Sym}^2V^*\otimes det(V)^{\otimes 2}\rightarrow 0
\end{aligned}\end{small}
\end{equation}
To understand $H^2(\mathcal O_{F_2(X)})$, we have to examine the $E_\infty^{-i,i+2}$'s, for $i=0,\dots, 10$. As $E_1^{-i,i+2}=0$ for $i\neq 2,6,10$, we have $E_\infty^{-i,i+2}=0$ for $i\neq 2,6,10$.\\
\indent \textbf{Analysis of} $E_\infty^{-2,4}$: as $E_1^{-1,4}=0$, $E_2^{-2,4}$ is the cokernel of $d_1^{-3,4}$ which has just been proven to be surjective when $X$ is smooth. So $E_2^{-2,4}=0$; from which get $E_\infty^{-2,4}=0$.\\
\indent \textbf{Analysis of} $E_\infty^{-6,8}$: the $E_r^{-6,8}$ are the middle cohomology of $$E_{r-1}^{-(5+r),6+r}\overset{d_{r-1}}{\rightarrow}E_{r-1}^{-6,8}\overset{d_{r-1}}{\rightarrow}E_{r-1}^{-7+r,10-r}.$$
From the above computations of the cohomology groups, we see that $E_1^{-(5+r),6+r}=0$ for any $r\geq 2$ so $E_{r-1}^{-(5+r),6+r}=0$ for any $r\geq 2$.\\
\indent So $E_2^{-6,8}={\rm Ker}(E_1^{-6,8}\overset{d_1^{-6,8}}{\rightarrow}E_1^{-5,8})$.\\
\indent We see that $E_1^{-7+r,10-r}=0$ for any $r\geq 3$, so that $E_{r-1}^{-7+r,10-r}=0$ for any $r\geq 3$. As a result we get $E_\infty^{-6,8}=E_2^{-6,8}$.\\
\indent From (\ref{ex_seq_exterior_square_h_1}), we get that $Coker(E_1^{-6,8}\overset{d_1^{-6,8}}{\rightarrow}E_1^{-5,8})\simeq {\rm Sym}^2V^*\otimes det(V)^{\otimes 2}$ and $E_\infty^{-6,8}={\rm Ker}(E_1^{-6,8}\overset{d_1^{-6,8}}{\rightarrow}E_1^{-5,8})\simeq \wedge^2H^1(\mathcal O_{F_2(X)})$.\\
\indent Now, the spectral sequence computes the graded pieces of a filtration $$0=F^1\subset F^0\subset \cdots\subset F^{-10}\subset F^{-11}=H^2(\mathcal O_{F_2(X)})$$ and we have seen ($E_\infty^{-2,4}=0$) that all the graded pieces are trivial but $Gr_{-6}^F\simeq E_{\infty}^{-6,8}$ and (a priori) $Gr_{-10}^F\simeq E_\infty^{-10,12}$. As a result, we get $\wedge^2H^1(\mathcal O_{F_2(X)})\simeq E_\infty^{-6,8}=F^{-6}=\cdots=F^{-9}\subset F^{10}\subset H^2(\mathcal O_{F_2(X)})$, proving the inclusion.
\end{proof}
Moreover, we have the following proposition
\begin{proposition}\label{prop_descrip_h_0_omega} We have $H^0(\mathcal Q_{3|F_2(X)}^*)\simeq H^0(\mathcal Q_3^*)\simeq V$ and $H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})\simeq H^0({\rm Sym}^2\mathcal E_3)\simeq {\rm Sym}^2V^*$ and the following sequence is exact
\begin{equation}\label{ex_seq_H_0_tgt_bundle_ex_seq}0\rightarrow H^0(\mathcal Q_{3|F_2(X)}^*)\rightarrow H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})\rightarrow H^0(\Omega_{F_2(X)})\rightarrow 0
\end{equation} where the first map is given by $v\mapsto {\rm eq}_X(v,\cdot,\cdot)$.
\end{proposition}
\begin{proof} To understand $H^0(\mathcal Q_{3|F_2(X)}^*)$, we use again the Koszul resolution (\ref{ex_seq_koszul_resol}) tensored by $\mathcal Q_3^*$. We have the spectral sequence $$E_1^{p,q}=H^q(G(3,V),\mathcal Q_3^*\otimes \wedge^{-p}{\rm Sym}^3\mathcal E_3^*)\Rightarrow H^{p+q}(Q_{3|F_2(X)}^*).$$
We use again the Borel-Weil-Bott theorem \ref{thm_borel_weil_bott} to compute the cohomology groups on $G(3,V)$. The decompositions of the $\wedge^i{\rm Sym}\mathcal E_3^*$'s into irreducible modules have already been obtained in Theorem \ref{thm_descrip_h_1_and_wedge}. So we get:
$$\begin{tabular}{lll}
$(0)$ &$\oplus_iH^i(\mathcal Q_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*)$\\
$ $ &$ $ &$=H^0(\Gamma^{(1,0,0,0)}\mathcal Q_3^*)=V$\\
$ $ &$ $ &$ $\\
$(1)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes\Gamma^{(3,0,0)}\mathcal E_3^*)=0$\\
$ $ &$ $ &$ $\\
$(2)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^2{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(3,3,0)}\mathcal E_3^*\oplus\Gamma^{(5,1,0)}\mathcal E_3^*))=0$\\
$ $ &$ $ &$ $\\
$(3)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^3{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(3,3,3)}\mathcal E_3^*\oplus\Gamma^{(5,3,1)}\mathcal E_3^*\oplus \Gamma^{(6,3,0)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus\Gamma^{(7,1,1)}\mathcal E_3^*))$\\
$ $ &$ $ &$=H^4(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes\Gamma^{(7,1,1)}\mathcal E_3^*)\simeq \Gamma^{(3,2,1,\dots,1)}V$\\
$ $ &$ $ &$ $\\
$(4)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^4{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(6,3,3)}\mathcal E_3^*\oplus\Gamma^{(6,4,2)}\mathcal E_3^*\oplus \Gamma^{(6,6,0)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \ \oplus\Gamma^{(7,4,1)}\mathcal E_3^*\oplus\Gamma^{(8,3,1)}\mathcal E_3^*))$\\
$ $ &$ $ &$=0 $\\
$ $ &$ $ &$ $\\
$(5)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^5{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(6,6,3)}\mathcal E_3^*\oplus\Gamma^{(7,4,4)}\mathcal E_3^*\oplus \Gamma^{(7,6,2)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \ \ \oplus\Gamma^{(8,4,3)}\mathcal E_3^*\oplus\Gamma^{(8,6,1)}\mathcal E_3^*\oplus \Gamma^{(9,4,2)}\mathcal E_3^*))$\\
$ $ &$ $ &$=0 $\\
$ $ &$ $ &$ $\\
\end{tabular}$$
$$\begin{tabular}{lll}
$(6)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^6{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(7,7,4)}\mathcal E_3^*\oplus\Gamma^{(8,6,4)}\mathcal E_3^*\oplus \Gamma^{(9,6,3)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \ \ \oplus\Gamma^{(9,7,2)}\mathcal E_3^*\oplus\Gamma^{(10,4,4)}\mathcal E_3^*))$\\
$ $ &$ $ &$=H^8(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes \Gamma^{(9,7,2)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq\Gamma^{(5,3,3,2,\dots,2)}V$\\
$ $ &$ $ &$ $\\
$(7)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^7{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(7,7,7)}\mathcal E_3^*\oplus\Gamma^{(9,7,5)}\mathcal E_3^*\oplus \Gamma^{(9,9,3)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \ \ \oplus\Gamma^{(10,7,4)}\mathcal E_3^*))$\\
$ $ &$ $ &$=0$\\
$ $ &$ $ &$ $\\
$(8)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^8{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(10,7,7)}\mathcal E_3^*\oplus\Gamma^{(10,9,5)}\mathcal E_3^*))$\\
$ $ &$ $ &$=0$\\
$ $ &$ $ &$ $\\
$(9)$ &$\oplus_iH^i(\mathcal Q_3^*\otimes\wedge^9{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(10,10,7)}\mathcal E_3^*)=0$\\
$ $ &$ $ &$ $\\
\end{tabular}$$
$$\begin{tabular}{lll}
$(10)$ &$\oplus_i H^i(\mathcal Q_3^*\otimes\wedge^{10}{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(10,10,10)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(1,0,0,0)}\mathcal Q_3^*\otimes(\Gamma^{(10,10,10)}\mathcal E_3^*)\simeq\Gamma^{(6,6,6,4,3,3,3)}V$
\end{tabular}$$
The graded pieces of the filtration on $H^0(\mathcal Q_{3|F_2(X)}^*)$ are given by $E_\infty^{-i,i}$, $i=0,\dots,10$. From the above calculations, we see that $E_1^{-i,i}=0$ for any $i\geq 1$, thus $E_\infty^{-i,i}=0$ for any $i\geq 1$.\\
\indent On the other hand $E_1^{0,0}=H^0(\mathcal Q_3^*)=V$ and as $E_r^{a,b}=0$ for any $a>0$, we have $E_r^{0,0}={\rm Coker}(E_{r-1}^{-(r-1),r-2}\overset{d_{r-1}}{\rightarrow}E_{r-1}^{0,0})$ for any $r\geq 2$. But the above calculations gives $E_1^{-r,r-1}=0$ for $r\geq 0$ so that $E_r^{-r,r-1}=0$ for any $r\geq 1$. Thus $E_\infty^{0,0}=E_1^{0,0}$, proving that $H^0(\mathcal Q_{3|F_2(X)}^*)\simeq H^0(\mathcal Q_3^*)\simeq V$.\\
\indent Now, let us examine $H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})$ by the spectral sequence
$$E_1^{p,q}=H^q({\rm Sym}^2\mathcal E_3\otimes\wedge^{-p}{\rm Sym}^3\mathcal E_3^*)\Rightarrow H^{p+q}({\rm Sym}^2\mathcal E_{3|F_2(X)}).$$
\indent Using Sage with the code
\begin{verbatim}
R=WeylCharacterRing("A2")
V=R(1,0,0)
W=R(0,0,-1)
for k in range(11): print k, W.symmetric_power(2)*V.symmetric_power(3).exterior_power(k)
\end{verbatim}
and the Borel-Weil-Bott theorem \ref{thm_borel_weil_bott}, we get:
$$\begin{tabular}{lll}
$(0)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3)$ &$\simeq \oplus_iH^i(\Gamma^{(0,0,-2)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^0(\Gamma^{(0,0,-2)}\mathcal E_3^*)\simeq \Gamma^{(0,\dots,0,-2)}V\simeq {\rm Sym}^2V^*$\\
$ $ &$ $ &$ $\\
$(1)$ &$\oplus_iH^i({\rm Sym}^2\mathcal E_3\otimes{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(1,0,0)}\mathcal E_3^*\oplus \Gamma^{(2,0,-1)}\mathcal E_3^*\oplus \Gamma^{(3,0,-2)}\mathcal E_3^*)=0$\\
$ $ &$ $ &$ $\\
$(2)$ &$\oplus_iH^i({\rm Sym}^2\mathcal E_3\otimes\wedge^2{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i((\Gamma^{(3,1,0)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(3,2,-1)}\mathcal E_3^*\oplus \Gamma^{(3,3,-2)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus\Gamma^{(4,0,0)}\mathcal E_3^*\oplus \Gamma^{(4,1,-1)}\mathcal E_3^*\oplus \Gamma^{(5,1,-2)}\mathcal E_3^*\oplus \Gamma^{(5,0,-1)}\mathcal E_3^*))$\\
$ $ &$ $ &$=H^4(\Gamma^{(5,1,-2)}\mathcal E_3^*\oplus \Gamma^{(5,0,-1)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq \Gamma^{(1,\dots,1,-2)}V\oplus \Gamma^{(1,\dots,1,0,-1)}V$\\
$ $ &$ $ &$ $\\
$(3)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^3{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(3,3,1)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(4,2,1)}\mathcal E_3^*\oplus (\Gamma^{(4,3,0)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(5,1,1)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(5,2,0)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(5,3,-1)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(6,1,0)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(6,2,-1)}\mathcal E_3^*\oplus \Gamma^{(6,3,-2)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(7,1,-1)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^4((\Gamma^{(5,1,1)}\mathcal E_3^*)^{\oplus 2}\oplus(\Gamma^{(6,1,0)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(7,1,-1)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq det(V)^{\oplus 2}\oplus (\Gamma^{(2,1,\dots,1,0)}V)^{\oplus 2}\oplus \Gamma^{(3,1,\dots,1,-1)}V$\\
$ $ &$ $ &$ $\\
\end{tabular}$$
$$\begin{tabular}{lll}
$(4)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^4{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_iH^i(\Gamma^{(4,3,3)}\mathcal E_3^*\oplus \Gamma^{(4,4,2)}\mathcal E_3^*\oplus (\Gamma^{(5,3,2)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(5,4,1)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(6,2,2)}\mathcal E_3^*\oplus (\Gamma^{(6,3,1)}\mathcal E_3^*)^{\oplus 4}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(6,4,0)}\mathcal E_3^*)^{\oplus 3}\oplus \Gamma^{(6,5,-1)}\mathcal E_3^*\oplus \Gamma^{(6,6,-2)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(7,2,1)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(7,3,0)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(7,4,-1)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(8,1,1)}\mathcal E_3^*\oplus \Gamma^{(8,2,0)}\mathcal E_3^*\oplus \Gamma^{(8,3,-1)}\mathcal E_3^*)$\\
$ $ &$ $ &$=\underbrace{H^4(\Gamma^{(8,1,1)}\mathcal E_3^*)}_{\simeq {\rm Sym}^3V\otimes \det(V)}\oplus \underbrace{H^8(\Gamma^{(6,6,-2)}\mathcal E_3^*)}_{\simeq \Gamma^{(2,\dots,2,-2)}V}$\\
$ $ &$ $ &$ $\\
$(5)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^5{\rm Sym}^3\mathcal E_3^*)$ &$\simeq\oplus_iH^i(\Gamma^{(5,4,4)}\mathcal E_3^*\oplus (\Gamma^{(6,4,3)}\mathcal E_3^*)^{\oplus 3}\oplus (\Gamma^{(6,5,2)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(6,6,1)}\mathcal E_3^*)^{\oplus 3}\oplus \Gamma^{(7,3,3)}\mathcal E_3^*\oplus (\Gamma^{(7,4,2)}\mathcal E_3^*)^{\oplus 4}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(7,5,1)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(7,6,0)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(8,3,2)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(8,4,1)}\mathcal E_3^*)^{\oplus 3}\oplus \Gamma^{(8,5,0)}\mathcal E_3^*\oplus \Gamma^{(8,6,-1)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(9,2,2)}\mathcal E_3^*\oplus \Gamma^{(9,3,1)}\mathcal E_3^*\oplus \Gamma^{(9,4,0)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^8((\Gamma^{(6,6,1)}\mathcal E_3^*)^{\oplus 3}\oplus (\Gamma^{(7,6,0)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(8,6,-1)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq (\Gamma^{(2,\dots,2,1)}V)^{\oplus 3}\oplus (\Gamma^{(3,2,\dots,2,0)}V)^{\oplus 2}\oplus \Gamma^{(4,2,\dots,2,-1)}V$\\
$ $ &$ $ &$ $ \\
$(6)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^6{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i(\Gamma^{(6,6,4)}\mathcal E_3^*\oplus (\Gamma^{(7,5,4)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(7,6,3)}\mathcal E_3^*)^{\oplus 3}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(7,7,2)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(8,4,4)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(8,5,3)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(8,6,2)}\mathcal E_3^*)^{\oplus 3}\oplus \Gamma^{(8,7,1)}\mathcal E_3^*\oplus (\Gamma^{(9,4,3)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(9,5,2)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(9,6,1)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(9,7,0)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(10,4,2)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^8((\Gamma^{(7,7,2)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(8,6,2)}\mathcal E_3^*)^{\oplus 3}\oplus \Gamma^{(8,7,1)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus (\Gamma^{(9,6,1)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(9,7,0)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq (\Gamma^{(3,3,2,\dots, 2)}V)^{\oplus 2}\oplus (\Gamma^{(4,2,\dots,2)}V)^{\oplus 3}\oplus \Gamma^{(4,3,2,\dots,2,1)}V$\\
$ $ &$ $ &$\ \ \ \oplus (\Gamma^{(5,2,\dots,2,1)}V)^{\oplus 2}\oplus \Gamma^{(5,3,2\dots,2,0)}V$\\
$ $ &$ $ &$ $\\
$(7)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^7{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i((\Gamma^{(7,7,5)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(8,6,5)}\mathcal E_3^*\oplus (\Gamma^{(8,7,4)}\mathcal E_3^*)^{\oplus 2}$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(9,5,5)}\mathcal E_3^*\oplus (\Gamma^{(9,6,4)}\mathcal E_3^*)^{\oplus 2}\oplus (\Gamma^{(9,7,3)}\mathcal E_3^*)^{\oplus 3}$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(9,8,2)}\mathcal E_3^*\oplus \Gamma^{(9,9,1)}\mathcal E_3^*\oplus \Gamma^{(10,5,4)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(10,6,3)}\mathcal E_3^*\oplus \Gamma^{(10,7,2)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^8(\Gamma^{(9,8,2)}\mathcal E_3^*\oplus \Gamma^{(9,9,1)}\mathcal E_3^*\oplus \Gamma^{(10,7,2)}\mathcal E_3^*)$\\
$ $ &$ $ &$\simeq \Gamma^{(5,4,2\dots,2)}V\oplus \Gamma^{(5,5,2,\dots,2,1)}V\oplus \Gamma^{(6,3,2,\dots,2)}V$\\
$ $ &$ $ &$ $\\
$(8)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^8{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i(\Gamma^{(8,7,7)}\mathcal E_3^*\oplus \Gamma^{(9,7,6)}\mathcal E_3^*\oplus \Gamma^{(9,8,5)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(9,9,4)}\mathcal E_3^*\oplus (\Gamma^{(10,7,5)}\mathcal E_3^*)^{\oplus 2}\oplus \Gamma^{(10,8,4)}\mathcal E_3^*$\\
$ $ &$ $ &$\ \ \ \ \oplus \Gamma^{(10,9,3)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(8,7,7)}\mathcal E_3^*)\simeq \Gamma^{(4,3,\dots,3)}V$\\
$ $ &$ $ &$ $\\
$(9)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^9{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i(\Gamma^{(10,8,7)}\mathcal E_3^*\oplus \Gamma^{(10,9,6)}\mathcal E_3^*\oplus \Gamma^{(10,10,5)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(10,8,7)}\mathcal E_3^*)\simeq \Gamma^{(6,4,3,\dots,3)}V$\\
$ $ &$ $ &$ $\\
$(10)$ &$\oplus_i H^i({\rm Sym}^2\mathcal E_3\otimes\wedge^{10}{\rm Sym}^3\mathcal E_3^*)$ &$\simeq \oplus_i H^i(\Gamma^{(10,10,8)}\mathcal E_3^*)$\\
$ $ &$ $ &$=H^{12}(\Gamma^{(10,10,8)}\mathcal E_3^*)\simeq \Gamma^{(6,6,4,3,\dots,3)}V$
\end{tabular}$$
\indent The graded pieces of the filtration on $H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})$ are given by the $E_\infty^{-i,i}$. We have $E_\infty^{-i,i}=0$ for any $i\neq 0,4$ since $E_1^{-i,i}=0$, for $i\neq 0,4$.\\
\indent As $E_r^{a,b}=0$ for any $a>0$ and $E_r^{-r,r-1}=0$ (because $E_1^{-r,r-1}=0$) for any $r\geq 1$, we have $E_\infty^{0,0}=E_1^{0,0}$.\\
\indent In particular $H^0({\rm Sym}^2\mathcal E_3)\simeq E_\infty^{0,0}\subset H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})$. As $h^0({\rm Sym}^2\mathcal E_3)=dim({\rm Sym}^2V^*)=28$ we have $h^0({\rm Sym}^2\mathcal E_{3|F_2(X)})\geq 28$. By Hodge symmetry $h^0(\Omega_{F_2(X)})=h^1(\mathcal O_{F_2(X)})=21$ (Theorem \ref{thm_descrip_h_1_and_wedge}). So the exactness of the sequence $$\begin{small}0\rightarrow H^0(\mathcal Q_{3|F_2(X)}^*)\rightarrow H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})\rightarrow H^0(\Omega_{F_2(X)})\end{small}$$ implies $H^0({\rm Sym}^2\mathcal E_3)=H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})$ and the surjectivity of the last map.
\end{proof}
According to Theorem \ref{thm_descrip_h_1_and_wedge}, $\bigwedge^2H^0(\Omega_{F_2(X)})\subset H^0(K_{F_2(X)})$. As $K_{F_2(X)}\simeq \mathcal O_{G(3,V)}(3)_{|F_2(X)}$, the map $\rho:F_2(X)\dashrightarrow |\bigwedge^2H^0(\Omega_{F_2(X)})|$ is the composition of the degree $3$ Veronese of the natural embedding $F_2(X)\subset G(3,V)$ followed by a linear projection. Moreover, we have the following
\begin{lemme}\label{lem_bpf_wedge_kernel_alb} (1) The canonical bundle $K_{F_2(X)}$ is generated by the sections in $\bigwedge^2H^0(\Omega_{F_2(X)})\subset H^0(K_{F_2(X)})$. In particular, $|\bigwedge^2H^0(\Omega_{F_2(X)})|$ is base-point-free.\\
\indent (2) For any $[P]\in F_2(X)$, the following sequence is exact:
$$0\rightarrow \mathcal K_{[P]}\rightarrow H^0(\Omega_{F_2(X)})\overset{ev([P])}{\rightarrow} \Omega_{F_2(X),[P]}\rightarrow 0$$ where $\mathcal K_{[P]}=\{Q\in H^0(\mathcal O_{\mathbb P^6}(2)),\ P\subset \{Q=0\}\}/Span( ({\rm eq}_X(x,\cdot,\cdot))_{x\in \langle P\rangle})$
\end{lemme}
\begin{proof} As $\mathcal E_{3|F_2(X)}$ is globally generated (as a restriction of $\mathcal E_3$, which is globally generated (\ref{ex_seq_def_taut_3})), ${\rm Sym}^2\mathcal E_{3|F_2(X)}$ is also globally generated. The same holds for $\mathcal Q_{3|F_2(X)}^*$ (by (\ref{ex_seq_def_taut_3})). So applying the evaluation to (\ref{ex_seq_H_0_tgt_bundle_ex_seq}), we get the commutative diagram:
$$\xymatrix@-1pc{0\ar[r] &H^0(\mathcal Q_{3|F_2(X)}^*)\otimes \mathcal O_{F_2(X)}\ar[r]\ar[d]^{ev_1} &H^0({\rm Sym}^2\mathcal E_{3|F_2(X)})\otimes \mathcal O_{F_2(X)} \ar[r]\ar[d]^{ev_2} &H^0(\Omega_{F_2(X)})\otimes \mathcal O_{F_2(X)} \ar[r]\ar[d]^{ev_3} &0\\
0\ar[r] &\mathcal Q_{3|F_2(X)}^*\ar[r] &{\rm Sym}^2\mathcal E_{3|F_2(X)}\ar[r] &\Omega_{F_2(X)}\ar[r] &0}$$
where the bottom row is (\ref{ex_seq_tgt_bundle_seq}). As $ev_2$ is surjective, we get that $ev_3$ is also surjective i.e. $\Omega_{F_2(X)}$ is globally generated. Then taking the exterior square of $ev_3$, we get that $\wedge^2ev_3$ is surjective: $$\bigwedge^2H^0(\Omega_{F_2(X)})\otimes \mathcal O_{F_2(X)}\overset{\wedge^2 ev_3}{\twoheadrightarrow}\wedge^2\Omega_{F_2(X)}.$$
\indent Now a base point of $|\bigwedge^2H^0(\Omega_{F_2(X)})|$ would be a point where $\wedge^2 ev_3$ fails to be surjective. So $|\bigwedge^2H^0(\Omega_{F_2(X)})|$ is base point free.\\
\indent (2) As $H^0(\mathcal Q_{3|F_2(X)}^*)\simeq H^0(\mathcal Q_3^*)\simeq V$ by Proposition \ref{prop_descrip_h_0_omega}, (\ref{ex_seq_def_taut_3}) yields ${\rm ker}(ev_1)\simeq \mathcal E_{3|F_2(X)}^*$ so the snake lemma gives the exact sequence.
\end{proof}
Now, let us come back to the Gauss map of $F_2(X)$, that we have defined to be:
$$\begin{tabular}{llll}
$\mathcal G:$ &$alb_{F_2}(F_2(X))$ &$\dashrightarrow$ &$G(2, T_{Alb(F_2(X)),0})$\\
$ $ &$t$ &$\mapsto$ &$T_{alb_{F_2}(F_2(X))-t,0}$
\end{tabular}$$
where $alb_{F_2}(F_2(X))-t$ is the translation of $alb_{F_2}(F_2(X))\subset {\rm Alb}(F_2(X))$ by $-t\in {\rm Alb}(F_2(X))$. It is defined on the smooth locus of $alb_{F_2}(F_2(X))$.\\
\indent According to \cite[Section (III)]{Coll_cub} $T alb_{F_2}$ is injective. So the indeterminacies of $\mathcal G$ are resolved by the pre-composition with $alb_{F_2}$ i.e.
$$\begin{tabular}{llll}
$F_2(X)$ &$\rightarrow$ &$G(2,T_{Alb(F_2(X)),0})$\\
$t$ &$\mapsto$ &$T_{-alb_{F_2}(t)}{\rm Translate}(-alb_{F_2}(t))(T_t alb_{F_2}(T_{F_2(X),t})).$
\end{tabular}$$
\indent We have the Pl\"ucker embedding
$$G(2,T_{Alb(F_2(X)),0})\simeq G(2,H^0(\Omega_{F_2(X)})^*)\subset \mathbb P(\bigwedge^2H^0(\Omega_{F_2(X)})^*)$$ and the commutative diagram:
$$\xymatrix{F_2(X)\ar[r]^{alb_{F_2}}\ar[dd]^{\rho} &alb_{F_2}(F_2(X))\ar@{.>}[d]^{\mathcal G}\\
&G(2,H^0(\Omega_{F_2(X)})^*)\ar@{^{(}->}[d]\\
|\bigwedge^2H^0(\Omega_{F_2(X)})|\ar[r]^*[@]{\cong} &\mathbb P(\bigwedge^2H^0(\Omega_{F_2(X)})^*)}$$
\indent The following proposition completes the proof of Theorem \ref{thm_gauss_map}
\begin{proposition}\label{prop_rho_embedding} The morphism $\rho$ is an embedding; which implies that $alb_{F_2}$ is an isomorphism unto its image and $\mathcal G$ is an embedding.
\end{proposition}
\begin{proof} Let us denote $J_X$ the Jacobian ideal of $X$ i.e. the ideal of the polynomial ring generated by $(\frac{\partial {\rm eq}_X}{\partial X_i})_{i=0,\dots,6}$ and $J_{X,2}$ its homogeneous part of degree $2$. By Proposition \ref{prop_result_Collino_smoothness}, for any $[P]\in F_2(X)$, $dim(J_{X,2|P})=4$ so that $dim(J_X\cap \{Q\in H^0(\mathcal O_{\mathbb P^6}(2)),\ P\subset \{Q=0\}\})=3$. We have the following:
\begin{lemme}\label{lem_jacobian_ideal} (1) For $[P]\in G(3,V)$, the codimension of $L_P^2:=\{Q\in H^0(\mathcal O_{\mathbb P^6}(2)),\ P\subset \{Q=0\}\}$ in $H^0(\mathcal O_{\mathbb P^6}(2))$ is $6$. For $[P]\neq [P']\in G(3,V)$, the codimension of $L_{P,P'}^2:=\{Q\in H^0(\mathcal O_{\mathbb P^6}(2)),\ P,P'\subset \{Q=0\}\}$ inside $L_P^2$ is respectively:\\
\indent \indent (i) $6$ if $P\cap P'=\emptyset$;\\
\indent \indent (ii) $5$ if $P\cap P'=\{pt\}$;\\
\indent \indent (iii) $3$ if $P\cap P'=\{{\rm line}\}$.\\
\indent (2) For $[P]\neq [P']\in F_2(X)$ such that $P\cap P'=\{{\rm line}\}$, $dim(J_X\cap L_{P,P'}^2)\geq 1$ and if $X$ is general, we even have $dim(J_X\cap L_{P,P'}^2)\geq 2$. So that $L_P^2/(J_X\cap L_P^2)+ L_{P,P'}^2\subsetneq L_P^2$ and for $X$ general, $dim(L_P^2/(J_X\cap L_P^2)+ L_{P,P'}^2)\geq 2$.
\end{lemme}
\begin{proof} (1) It is a direct calculation.\\
\indent (2) Up to projective transformation, we can assume $P=\{X_0=\cdots=X_3=0\}$, $P'=\{X_0=X_1=X_2=X_4=0\}$. Then ${\rm eq}_X$ is of the form (\ref{normal_form_0}) with the additional conditions: $Q_3(0,X_5,X_6)=0$, $D_5(0,0,0,X_3)=0$, $D_6(0,0,0,X_3)=0$, $R(0,0,0,X_3)=0$.\\
\indent By definition, the quadrics of the Jacobian ideal are $\frac{\partial {\rm eq}_X}{\partial X_i}$'s and according to Proposition \ref{prop_result_Collino_smoothness}, $(\frac{\partial {\rm eq}_X}{\partial X_i}_{|P})_{i=0,\dots,3}$ are linearly independent so that $J_X\cap L_P^2=Span((\frac{\partial {\rm eq}_X}{\partial X_i}_{|P})_{i=4,5,6})$. For $i\in\{4,5,6\}$, $$\begin{small}\frac{\partial {\rm eq}_X}{\partial X_i}=X_0\frac{\partial Q_0}{\partial X_i}+X_1\frac{\partial Q_1}{\partial X_i}+X_2\frac{\partial Q_2}{\partial X_i}+X_3\frac{\partial Q_3}{\partial X_i}+D_i\end{small}$$ which, when restricted to $P'$ gives $\begin{small}\frac{\partial {\rm eq}_X}{\partial X_i}_{|P'}=X_3\frac{\partial Q_3}{\partial X_i}(0,X_5,X_6)+D_i(0,0,0,X_3)\end{small}$. But since $Q_3(0,X_5,X_6)=0$, we have $\frac{\partial Q_3}{\partial X_i}(0,X_5,X_6)=0$ for $i=5,6$. So that $\frac{\partial {\rm eq}_X}{\partial X_5}_{|P'}=0=\frac{\partial {\rm eq}_X}{\partial X_6}_{|P'}$ i.e. $\frac{\partial {\rm eq}_X}{\partial X_5}$, $\frac{\partial {\rm eq}_X}{\partial X_6}\in L_{P,P'}^2\cap J_X$. For $X$ general, those two quadric polynomials are independent.\\
\indent We have ${\rm dim}(J_X\cap L^2_P + L^2_{P,P'})={\rm dim}(J_X\cap L_P^2)+{\rm dim}(L^2_{P,P'})-{\rm dim}(J_X\cap L_{P,P'}^2)$ which, by the first item of the lemma, yields the result.
\end{proof}
According to the Lemma, for $[P]\neq [P']\in F_2(X)$, we can always find a quadric $Q\in H^0(\mathcal O_{\mathbb P^6}(2))$ such that $0\neq \overline Q\in L_P^2/(J_X\cap L_P^2+L_{P,P'}^2)$; in particular $Q_{|P}=0$ but $Q_{|P'}\neq 0$. Pick another $Q'\in H^0(\mathcal O_{\mathbb P^6}(2))\backslash (L_P^2\cup L_{P'}^2)$ (i.e. $Q'_{|P}\neq 0$, $Q'_{|P'}\neq 0$) such that $Q'_{|P'}$ is independent of $Q_{|P'}$ and $Q$ and $Q'$ are independent modulo $J_{X,2}$ (${\rm dim}(H^0(\mathcal O_{\mathbb P^6}(2))/(J_{X,2}\oplus\mathbb C[Q]))=5$).\\
\indent By Proposition \ref{prop_descrip_h_0_omega}, such quadrics give rise to $1$-forms on $F_2(X)$. Then $Q\wedge Q'\in \bigwedge^2H^0(\Omega_{F_2(X)})$ vanishes at $[P]$ but not at $[P']$ i.e. $|\bigwedge^2H^0(\Omega_{F_2(X)})|$ separates points.\\
\indent Now, given a $[P]\in F_2(X)$, we recall that $$T_{[P]}F_2(X)=\{u\in Hom(\langle P\rangle,V/\langle P\rangle),\ {\rm eq}_X(x,x,u(x))=0\ \forall x\in \langle P\rangle\}$$ (the first order of ${\rm eq}_X(x+u(x),x+u(x),x+u(x))=0$, $\forall x\in \langle P\rangle$).\\
\indent Let $Q\in L_P^2$ be such that $0\neq \overline Q\in H^0(\mathcal O_{\mathbb P^6}(2))/J_{X,2}$ and $T_{[P]}F_2(Q)\cap T_{[P]}F_2(X)=\{0\}$. Pick a $0\neq\overline Q'\in H^0(\mathcal O_{\mathbb P^6}(2))/J_{X,2}$, such that $Q'_{|P}\neq 0$ then $Q\wedge Q'\in \bigwedge^2H^0(\Omega_{F_2(X)})$ and $(Q\wedge Q')_{|P}=0$.\\
\indent Moreover, given a $u\in T_{[P]}F_2(X)$, $d_{[P]}Q(u)\wedge Q'_{|P}+Q_{|P}\wedge d_{[P]}Q'(u)=d_{[P]}Q(u)\wedge Q'_{|P}$ where $d_{[P]}Q(u)$ is the quadratic form $x\mapsto {\rm eq}_Q(x,u(x))$ and is non-trivial since $T_{[P]}F_2(Q)\cap T_{[P]}F_2(X)=\{0\}$. Then for $Q$ generic (containing $P$ and such that $T_{[P]}F_2(Q)\cap T_{[P]}F_2(X)=\{0\}$), $d_{[P]}Q(u)$ is linearly independent of $Q'_{|P}$ so that $Q\wedge Q'$ does vanish along the tangent vector $u$. So $|\bigwedge^2H^0(\Omega_{F_2(X)})|$ separates tangent directions.
\end{proof}
\textit{}\\
\section{Variety of osculating planes of a cubic $4$-fold}
We have previously introduced, for a smooth cubic $4$-fold containing no plane $Z\subset \mathbb P(H^*)\simeq \mathbb P^5$, the variety of osculating planes (\ref{def_var_of_oscul_planes})
$F_0(Z):=\{[P]\in G(3,H),\ \exists \ell\subset P\ {\rm line\ s.t.}\ P\cap Z=\ell\ {\rm (set-theoretically)}\}$.\\
\indent The variety $F_0(Z)$ lives naturally in $Fl(2,3,H)$ i.e. $$F_0(Z)=\{([\ell],[P])\in Fl(2,3,H),\ P\cap Z=\ell\ {\rm (set-theoretically)}\}$$ and from the exact sequence (\ref{ex_seq_taut_bundles_2_3}): $$\begin{small}0\rightarrow e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1)\rightarrow t^*\mathcal E_3\rightarrow e^*\mathcal E_2\rightarrow 0
\end{small}$$
we see that $e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1)$ is, for $([\ell],[P])\in Fl(2,3,H)$, the bundle of equations of $\ell\subset P$. As a result $F_0(Z)$ is the zero locus on $Fl(2,3,H)$ of a section of the rank $9$ vector bundle $\mathcal F$ defined by the exact sequence
\begin{equation}\label{ex_seq_def_F}
\begin{small}0\rightarrow e^*\mathcal O_{G(2,H)}(-3)\otimes t^*\mathcal O_{G(3,H)}(3)\rightarrow t^*{\rm Sym}^3\mathcal E_3\rightarrow \mathcal F\rightarrow 0\end{small}
\end{equation}
In particular, (since $\mathcal F$ is globally generated by the sections induced by $H^0(t^*{\rm Sym}^3\mathcal E_3)$) by Bertini type theorems, for $Z$ general, $F_0(Z)$ is a smooth surface with $K_{F_0(Z)}\simeq (t^*\mathcal O_{G(3,H)}(3))_{|F_0(Z)}$.\\
\indent Its link to the surface of planes of a cubic $5$-fold is the following
\begin{proposition}\label{prop_etale_cover_F_2_F_0} Denoting $X_Z=\{X_6^3-{\rm eq}_Z(X_0,\dots,X_5)=0\}$ the cyclic cubic $5$-fold associated to $Z$, the linear projection with center $p_0:=[0:\cdots:0:1]$ induces a degree $3$ \'etale cover $\pi:F_2(X_Z)\rightarrow F_0(Z)$ given by the torsion line bundle $(e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1))_{|F_0(Z)}$.\\
\indent In particular, when $F_0(Z)$ is smooth, $F_2(X_Z)$ and $F_0(Z)$ are smooth and irreducible.
\end{proposition}
\begin{proof} (1) The point $p_0$ does not belong to $X_Z$. In particular, any $[P]\in F_2(X_Z)$ is sent by $\pi_{p_0}:\mathbb P(V^*)\dashrightarrow \mathbb P(H^*)$ to a plane in $\mathbb P(H^*)$ where $V=H\oplus \mathbb C\cdot p_0$. The restriction of $\pi_{p_0}$ (also denoted $\pi_{p_0}$) to $X$ is a degree $3$ cyclic cover of $\mathbb P^5$ ramified over $Z$. Let us denote $\tau:[a_0:\cdots:a_6]\mapsto[a_0:\cdots:a_5:\xi a_6]$, with $\xi$ a primitive $3^{rd}$ root of $1$, the cover automorphism.\\
\indent For any $[P]\in F_2(X_Z)$, $\pi_{p_0}:\pi_{p_0}^{-1}(\pi_{p_0}(P))\rightarrow \pi_{p_0}(P)$ is a degree $3$ cyclic cover ramified over the cubic curve $\pi_{p_0}(P)\cap Z$. It contains the three sections $P, \tau(P),\tau^2(P)$ which in turn all contain (set-theoretically) the ramification curve $\pi_{p_0}(P)\cap Z$ so it is a line i.e. $([\{\pi_{p_0}(P)\cap Z\}_{{\rm red}}],[\pi_{p_0}(P)])\in F_0(Z)$.\\
\indent Conversely, for any $([\ell],[P])\in F_0(Z)$, $\pi_{p_0|X_Z}^{-1}(P)\rightarrow P$ is a degree $3$ cyclic cover ramified over $\{\ell\}^3$; so it consists of $3$ surfaces isomorphic each to $P$ i.e. $3$ planes. To make it even more explicit, if $P=\{X_0=X_1=X_2=0\}$ and $\ell=\{X_0=X_1=X_2=X_3=0\}$, then $\pi_{p_0|X_Z}^{-1}(P)$ is defined in $\pi_{p_0}^{-1}(P)\simeq{\rm Span}(P,p_0)\simeq \mathbb P^3$ by $X_6^3-aX_3^3$ for some $a\neq 0$ (since $Z$ contains no plane) and we have $X_6^3-aX_3^3=(X_6-bX_3)(X_6-b'X_3)(X_6-b''X_3)$ where $b,b',b''$ are the distinct roots of $y^3=a$. So $\pi:F_2(X_Z)\rightarrow F_0(Z)$ is \'etale of degree $3$.\\
\indent (2) The equation ${\rm eq}_Z$ defines a section $\sigma_{{\rm eq}_Z}\in H^0(t^*{\rm Sym}^3\mathcal E_3)\simeq H^0({\rm Sym}^3\mathcal E_3)$ and by projection in (\ref{ex_seq_def_F}) a section $\overline{\sigma_{{\rm eq}_Z}}$ of $\mathcal F$ whose zero locus if $F_0(Z)$. Restricting (\ref{ex_seq_def_F}) to $F_0(Z)$ we see that $\sigma_{{\rm eq}_Z}$ induces a section of $(e^*\mathcal O_{G(2,H)}(-3)\otimes t^*\mathcal O_{G(3,H)}(3))_{|F_0(Z)}$ which vanishes nowhere since $Z$ contains no plane. Thus $$(e^*\mathcal O_{G(2,H)}(-3)\otimes t^*\mathcal O_{G(3,H)}(3))_{|F_0(Z)}\simeq \mathcal O_{F_0(Z)}.$$
\indent Now if $(e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1))_{|F_0(Z)}\simeq \mathcal O_{F_0(Z)}$, since $(e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1))_{|F_0(Z)}$ is the bundle of equation of $\ell_x\subset P_x$ for any $x=([\ell_x],[P_x])\in F_0(Z)$, for any nowhere vanishing section $s$ of $(e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1))_{|F_0(Z)}$, we would be able to define $3$ distinct sections of $\pi:F_2(X_Z)\rightarrow F_0(Z)$, namely (symbolically) $[x\mapsto \{X_6-\xi^k s(x)\}_{{\rm Span}(P_x,p_0)}]$, $k=0,1,2$. But according to \cite[Proposition 1.8]{Coll_cub}, $F_2(X)$ is connected for any $X$. Contradiction. So $(e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1))_{|F_0(Z)}$ is non-trivial $3$-torsion line bundle.\\
\indent Moreover, we readily see that for any $[P]\in F_2(X_Z)$, $X_{6|P}\neq 0$ is an equation of the line $P\cap \mathbb P(H^*)$ i.e. $\pi^*(e^*\mathcal O_{G(2,H)}(-1)\otimes t^*\mathcal O_{G(3,H)}(1))_{|F_0(Z)}$ has a nowhere vanishing section i.e. is trivial.\\
\indent (3) When $F_0(Z)$ is smooth, since $\pi$ is \'etale, $F_2(X_Z)$ is also smooth. As $F_2(X_Z)$ is connected (by \cite[Proposition 1.8]{Coll_cub}), $F_2(X_Z)$ is irreducible and $\pi(F_2(X_Z))=F_0(Z)$ is also irreducible.
\end{proof}
\begin{remarque}\label{rmk_result_GK} {\rm That $F_0(Z)$ is smooth and irreducible, for $Z$ general, is proven in \cite[Lemma 4.3]{GK_geom_lines} without reference to $F_2(X_Z)$.}
\end{remarque}
\textit{}\\
\indent In \cite{GK_geom_lines}, the interest for the image $e(F_0(Z))\subset F_1(Z)$ stems from $e(F_0(Z))$ being the fixed locus of a rational self-map of the hyper-K\"ahler $4$-fold $F_1(Z)$ defined by Voisin (\cite{Voisin_map}).
\begin{proposition}\label{prop_image_F_0} For $Z$ general, the tangent map of $e_{F_0}:=e_{|F_0(Z)}:F_0(Z)\rightarrow F_1(Z)$ is injective, $e_{F_0}$ is the normalisation of $e_{F_0}(F_0(Z))$ and is an isomorphism unto its image outside a finite subset of $F_0(Z)$.\\
\indent Moreover $e_{F_0}(F_0(Z))$ is a (non-normal) Lagrangian surface of the hyper-K\"ahler $4$-fold $F_1(Z)$.
\end{proposition}
\begin{proof} (1) That $e_{F_0}$ is injective outside a finite number of points follows from a simple dimension count: let us introduce $I:=\{(([\ell],[P]),[Z])\in Fl(2,3,H)\times |\mathcal O_{\mathbb P^5}(3)|,\ \ell\subset Z\ {\rm and}\ Z\cap P=\ell\ {\rm set-theoretically}\}$ and $I_2:=\{(([\ell],[P_1],[P_2]),[Z])\in \mathbb P(\mathcal Q_2)\times_{G(2,H)}\mathbb P(\mathcal Q_2)\backslash \Delta_{\mathbb P(\mathcal Q_2)}\times |\mathcal O_{\mathbb P^5}(3)|,\ \ell\subset Z\ {\rm and}\ Z\cap P_i=\ell,\ i=1,2\ {\rm set-theoretically}\}$. As $Fl(2,3,H)$ and $\mathbb P(\mathcal Q_2)\times_{G(2,H)}\mathbb P(\mathcal Q_2)\backslash \Delta_{\mathbb P(\mathcal Q_2)}$ are homogeneous, the fibers of $p:I\rightarrow Fl(2,3,H)$ (resp. $p_2:I_2\rightarrow \mathbb P(\mathcal Q_2)\times_{G(2,H)}\mathbb P(\mathcal Q_2)\backslash \Delta_{\mathbb P(\mathcal Q_2)}$) are isomorphic to each other and are sub-linear systems of $|\mathcal O_{\mathbb P^5}(3)|$.\\
\indent Notice that, since $F_0(Z)$ is a surface for $Z$ general, we know that $dim(I)=dim(|\mathcal O_{\mathbb P^5}(3)|+2$.\\
\indent Let us analyse the fiber of $p_2$. To do so, we can assume $\ell=\{X_2=\cdots=X_5=0\}$, $P_1=\{X_3=X_4=X_5=0\}$ and $P_2=\{X_2=X_4=X_5=0\}$. Then the condition $Z\cap P_1=\ell$ implies that ${\rm eq}_Z$ is of the form
\begin{equation}\label{normal_form_oscul}\begin{small}{\rm eq}_Z=\alpha X_2^3 + X_3Q_3 + X_4Q_4 +X_5Q_5 + \sum_{i=0}^2X_iD_i(X_3,X_4,X_5) + R(X_3,X_4,X_5)\end{small}
\end{equation}
where the $Q_i(X_0,X_1,X_2)$ are quadratic forms in $X_0,X_1,X_2$, $D_i$ are quadratic forms in $X_3,X_4,X_5$ and $R$ is a cubic form in $X_3,X_4,X_5$. Notice that this is the general form of a member of the fiber $p^{-1}([\ell],[P_1])$, in particular $dim(p^{-1}([\ell],[P_1]))=dim(|\mathcal O_{\mathbb P^5}(3)|)+2-dim(Fl(2,3,H))=dim(|\mathcal O_{\mathbb P^5}(3)|)-9$.\\
\indent The additional condition $Z\cap P_2=\ell$ implies that $Q_3(X_0,X_1,0)=0$, $D_0(X_3,0,0)=0$, $D_1(X_3,0,0)=0$, which gives $3+1+1=5$ constraints. So that $dim(p_2^{-1}(([\ell],[P_1],[P_2])))=dim(p^{-1}([\ell],[P_1]))-5=dim(|\mathcal O_{\mathbb P^5}(3)|)-14$, hence $dim(I_2)=dim(p_2^{-1}(([\ell],[P_1],[P_2])))+2\times 3+dim(G(2,H))=dim(|\mathcal O_{\mathbb P^5}(3)|)$. As a result, the general fiber of $I_2\rightarrow |\mathcal O_{\mathbb P^5}(3)|$ is finite i.e. for $[Z]\in |\mathcal O_{\mathbb P^5}(3)|$ general there are only finitely many $\ell\subset Z$ such that there are at least two planes $P_1,P_2\subset \mathbb P^5$ such that $Z\cap P_i=\ell$, $i=1,2$ i.e. there is a finite set $\gamma\subset F_0(Z)$ such that $e_{|F_0}:F_0(Z)\backslash \gamma\rightarrow F_1(Z)$ is a bijection unto its image.\\
\indent (2) Let us give a description of $T_{F_0(Z),([\ell],[P])}$. We recall that the two projective bundle structures on $Fl(2,3,H)$ given by $e:Fl(2,3,H)\simeq \mathbb P(\mathcal Q_2)\rightarrow G(2,H)$ and $t:Fl(2,3,H)\simeq \mathbb P(\wedge^2\mathcal E_3)\rightarrow G(3,H)$ yield the following descriptions of the tangent bundle $$T_{Fl(2,3,H),([\ell],[P])}\simeq Hom(\langle\ell\rangle,H/\langle\ell\rangle)\oplus Hom(\langle P\rangle/\langle\ell\rangle,H/\langle P\rangle)$$ and $$T_{Fl(2,3,H), ([\ell],[P])}\simeq Hom(\langle P\rangle,H/\langle P\rangle)\oplus Hom(\langle \ell\rangle,\langle P\rangle/\langle \ell\rangle).$$
The isomorphism between the two takes the following form
$$\begin{tabular}{ccc}
$Hom(\langle\ell\rangle,H/\langle\ell\rangle)\oplus Hom(\langle P\rangle/\langle\ell\rangle,H/\langle P\rangle)$ &$\rightarrow$ &$Hom(\langle P\rangle,H/\langle P\rangle)\oplus Hom(\langle \ell\rangle,\langle P\rangle/\langle \ell\rangle)$\\
$(\varphi,\psi)$ &$\mapsto$ &$(\varphi_\perp+\psi,\varphi_\parallel)$
\end{tabular}$$
where $\varphi=(\varphi_\parallel,\varphi_\perp)$ is the decomposition corresponding to the choice of a decomposition $H/\langle\ell\rangle\simeq \langle P\rangle/\langle\ell\rangle\oplus H/\langle P\rangle$ coming from a decomposition $\langle P\rangle\simeq \langle\ell\rangle\oplus \langle P\rangle/\langle\ell\rangle$.\\
\indent Around $([\ell],[P])\in F_0(Z)$, the points of $Fl(2,3,H)$ are of the form $([(id_{\langle\ell\rangle}+\varphi)(\langle\ell\rangle)],[(id_{\langle P\rangle}+\varphi_\perp+\psi)(\langle P\rangle)])$. Let us choose an equation $\lambda\in \langle P\rangle^*$ (a generator of $(\langle P\rangle/\langle\ell\rangle)^*$) of $\ell\subset P$, such that ${\rm eq}_Z(x,x,x)=\lambda(x)^3$ for any $x\in \langle P\rangle$.\\
\indent The first order deformation of this equation to an equation of $(id_{\langle\ell\rangle}+\varphi)(\langle\ell\rangle)\subset (id_{\langle P\rangle}+\varphi_\perp+\psi)(\langle P\rangle)$ is given by $\lambda-\varphi^*(\lambda)$ so that the point associated to $(\varphi,\psi)$ belongs to $F_0(Z)$ if and only if
$${\rm eq}_Z(x+\varphi_\perp(x)+\psi(x),x+\varphi_\perp(x)+\psi(x),x+\varphi_\perp(x)+\psi(x))=(1+c(\varphi,\psi))(\lambda(x)-\varphi^*(\lambda)(x))^3\ \forall x\in \langle P\rangle$$ for some term $c(\varphi,\psi)=O(\varphi,\psi)$ constant on $\langle P\rangle$. So at the first order, we get
\begin{equation}\label{descript_T_F_0} {\rm eq}_Z(x,x,\varphi_\perp(x)+\psi(x))=-\lambda(x)^2\varphi^*(\lambda)(x)+\frac 1 3 c(\varphi,\psi)\lambda(x)^3\ \forall x\in\langle P\rangle.
\end{equation}
The differential of the projection $e_{F_0(Z)}:F_0(Z)\rightarrow F_1(Z)$ is simply given by $(\varphi,\psi)\mapsto \varphi$.\\
\indent Let us introduce $$J:=\{(([\ell],[P]),[Z])\in Fl(2,3,H)\times |\mathcal O_{\mathbb P^5}(3)|,\ \ell\subset Z,\ Z\cap P=\ell\ {\rm and}\ T_{([\ell],[P])}e_{|F_0} {\rm is\ not\ injective}\}$$
and analyse the fibers of $p_J:J\rightarrow Fl(2,3,H)$, which are isomorphic to each other by homogeneity of $Fl(2,3,H)$.\\
\indent So we can assume $\ell=\{X_2=\cdots=X_5=0\}$ and $P=\{X_3=\cdots=X_5=0\}$ so that ${\rm eq}_Z$ is of the form (\ref{normal_form_oscul}) with $Q_i=a_iX_0^2+b_iX_1^2+c_iX_2^2+d_iX_0X_1+e_iX_0X_2+f_iX_1X_2$, $i=3,4,5$ for some $a_i,\dots,f_i$. We recall that for $\varphi=\left(\begin{smallmatrix}u_2 &v_2\\ u_3 &v_3\\ u_4 &v_4\\ u_5 &v_5\end{smallmatrix}\right)\in Hom(\langle\ell\rangle,H/\langle\ell\rangle)$ and $\psi=\left(\begin{smallmatrix}w_3\\ w_4\\ w_5\end{smallmatrix}\right)\in Hom(\langle P\rangle/\langle\ell\rangle,H/\langle P\rangle)$ the associated subspaces are $$\ell_{(\varphi,\psi)}=[\lambda,\mu,\lambda u_2+\mu v_2,\dots,\lambda u_5+\mu v_5],\ [\lambda,\mu]\in \mathbb P^1$$ $$P_{(\varphi,\psi)}=[\lambda,\mu,\nu,\lambda u_3+\mu v_3+\nu w_3, \lambda u_4+\mu v_4+\nu w_4,\lambda u_5+\mu v_5+\nu w_5]\ [\lambda,\mu,\nu]\in \mathbb P^2.$$
\indent Now, if $(0,\psi)\in T_{F_0(Z),([\ell],[P])}$, we have at the first order
$$\begin{aligned} {\rm eq}_{Z|P_{(0,\psi)}}&=\alpha\nu^3 +\sum_{i=3}^5\nu w_i(a_i\lambda^2+b_i\mu^2+c_i\nu^2+d_i\lambda\mu+e_i\lambda\nu+f_i\mu\nu)+O((\varphi,\psi)^2)\\
&=(\alpha+c_3w_3+c_4w_4+c_5w_5)\nu^3+[(e_3w_3+e_4w_4+e_5w_5)\lambda+(f_3w_3+f_4w_4+f_5w_5)\mu]\nu^2\\
&\ \ +(a_3w_3+a_4w_4+a_5w_5)\lambda^2\nu +(b_3w_3+b_4w_4+b_5w_5)\mu^2\\
&\ \ +(d_3w_3+d_4w_4+d_5w_5)\lambda\mu\nu+ O((\varphi,\psi)^2)\end{aligned}$$
so that looking at (\ref{descript_T_F_0}), $(0,\psi)\in T_{F_0(Z),([\ell],[P])}$ if and only if $${\rm rank}\left(\begin{smallmatrix}a_3 &a_4 &a_5\\ b_3 &b_4 &b_5\\ d_3 &d_4 &d_5\\ e_3 &e_4 &e_5\\ f_3 &f_4 &f_5\end{smallmatrix}\right)\leq 2$$ which defines a subset of codimension $(3-2)(5-2)=3$.\\
\indent So $J\subset I$ has codimension $3$. As $dim(I)=dim(|\mathcal O_{\mathbb P^5}(3)|)+2$, $J$ does not dominate $|\mathcal O_{\mathbb P^5}(3)|$ i.e. for the general $Z$, $e_{F_0}$ is an immersion.\\
\indent(3) Let us prove that $e_{F_0}(F_0(Z))$ is a Lagrangian surface of $F_1(Z)$. In \cite{Iliev-Manivel_cub_hyp_int_syst}, the following explicit description of the symplectic form $\mathbb C\cdot \Omega=H^{2,0}(F_1(Z))$ is given: let us introduce the following quadratic form on $\wedge^2T_{F_1(Z),[\ell]}$ with values in $Hom((\wedge^2\langle\ell\rangle)^{\otimes 2},\wedge^4(H/\langle\ell\rangle)$
$$\begin{aligned} K(u\wedge v,u'\wedge v')&= u(x)\wedge u'(y)\wedge v(x)\wedge v'(y) - u(y)\wedge u'(y)\wedge v(x)\wedge v'(x)\\
&\ \ +u(y)\wedge u'(x)\wedge v(y)\wedge v'(x) - u(x)\wedge u'(x)\wedge v(y)\wedge v'(y)\end{aligned}$$ where $(x,y)$ is a basis of $\langle\ell\rangle$. Let us also introduce the following skew-symmetric form
$$\begin{tabular}{lcll}
$\omega:$ &$\wedge^2T_{F_1(Z),[\ell]}$ &$\rightarrow$ &$(\wedge^2\langle\ell\rangle)^{\otimes 3}$\\
$ $ &$u\wedge v$ &$\mapsto$ &${\rm eq}_Z(x,x,u(y)){\rm eq}_Z(y,y,v(x)) -{\rm eq}_Z(x,x,v(y)){\rm eq}_Z(y,y,u(x))$\\
$ $ &$ $ &$ $ &$+2{\rm eq}_Z(x,y,u(y)){\rm eq}_Z(x,x,v(y))-2{\rm eq}_Z(x,x,u(y)){\rm eq}_Z(x,y,v(y))$\\
$ $ &$ $ &$ $ &$+2{\rm eq}_Z(y,y,u(x)){\rm eq}_Z(x,y,v(x))-2{\rm eq}_Z(x,y,u(x)){\rm eq}_Z(y,y,v(x)).$
\end{tabular}$$
According to \cite[Theorem 1]{Iliev-Manivel_cub_hyp_int_syst}, for $u,v\in T_{F_1(Z),[\ell]}$ $$K(u\wedge v,u\wedge v)=w(u\wedge v)\Omega_{[\ell]}(u,v).$$
As for a general point $([\ell],[P])\in F_0(Z)$, $\ell\subset Z$ is of the first type i.e. in reference to the above presentation (\ref{normal_form_oscul}) for $\ell=\{X_2=\cdots=X_5=0\}$, $P=\{X_3=X_4=X_5=0\}$, $\left|\begin{smallmatrix}a_3 &b_3 &d_3\\
a_4 &b_4 &d_4\\ a_5 &b_5 &d_5\end{smallmatrix}\right|\neq 0$ it is sufficient to prove the vanishing of $\Omega_{[\ell]}({\rm Im}(T_{([\ell],[P])}e_{F_0}),{\rm Im}(T_{([\ell],[P])}e_{F_0}))$ for such a line. So can assume $\alpha=1$ and $$\begin{aligned}Q_3&=X_0^2+e_3X_0X_2+f_3X_1X_2+c_3X_2^2\\
Q_4&=X_0X_1+e_4X_0X_2+f_4X_1X_2+c_4X_2^2\\
Q_5&=X_1^2+e_5X_0X_2+f_5X_1X_2+c_5X_2^2.\end{aligned}$$
Then as above, for $\varphi=\left(\begin{smallmatrix}u_2 &v_2\\ u_3 &v_3\\ u_4 &v_4\\ u_5 &v_5\end{smallmatrix}\right)\in Hom(\langle\ell\rangle,H/\langle\ell\rangle)$ and $\psi=\left(\begin{smallmatrix}w_3\\ w_4\\ w_5\end{smallmatrix}\right)\in Hom(\langle P\rangle/\langle\ell\rangle,H/\langle P\rangle)$, we have $$\begin{aligned}{\rm eq}_{Z|P_{(\varphi,\psi)}} &= \nu^3+\sum_{i=3}^5(\lambda u_i+\mu v_i+\nu w_3)Q_i+O((\varphi,\psi)^2)\\
&=(1+c_3w_3+c_4w_4+c_5w_5)\nu^3+(c_3u_3+e_3w_3+c_4u_4+e_4w_4+c_5u_5+e_5w_5)\lambda\nu^2\\
&\ \ + (c_3v_3+f_3w_3+c_4v_4+f_4w_4+c_5v_5+b_5w_5)\mu\nu^2\\
&\ \ + (w_3+e_3u_3+e_4u_4+e_5u_5)\lambda^2\nu +(w_5+f_3v_3+f_4v_4+f_5v_5)\mu\nu^2\\
&\ \ +(w_4+f_3u_3+e_3v_3+f_4u_4+e_4v_4+f_5u_5+e_5v_5)\lambda\mu\nu\\
&\ \ +u_3\lambda^3+v_5\mu^2+(v_4+u_5)\lambda\mu^2 +(v_3+u_4)\lambda^2\mu + O((\varphi,\psi)^2)\end{aligned}$$
so that the description (\ref{descript_T_F_0}) of $T_{F_0(Z),([\ell],[P])}$ yields
$$\left\{\begin{aligned} c_3u_3+e_3w_3+c_4u_4+e_4w_4+c_5u_5+e_5w_5 &=-u_2\\
c_3v_3+f_3w_3+c_4v_4+f_4w_4+c_5v_5+b_5w_5 & = - v_2\\
w_3+e_3u_3+e_4u_4+e_5u_5 &= 0\\
w_5+f_3v_3+f_4v_4+f_5v_5 &=0\\
w_4+f_3u_3+e_3v_3+f_4u_4+e_4v_4+f_5u_5+e_5v_5 &=0\\
v_4 =-u_5;\ v_3 =-u_4\ u_3=0\ v_5 =0.
\end{aligned} \right.$$
The $7$ last equations yield $w_3=-(e_4u_4+e_5u_5)$, $w_4=(e_3-f_4)u_4+(e_4-f_5)u_5$, $w_5=f_3u_4+ f_4u_5$. Thus the first two gives gives a system $\left\{\begin{aligned}\alpha u_4 + \beta u_5=-u_2\\ -\delta u_4- \alpha u_5=-v_2\end{aligned}\right.$ where $\alpha=c_4-e_4f_4+e_5f_3)$, $\beta=c_5-e_3e_5+e_4^2-e_4f_5+e_5f_4)$ and $\delta=(e_3f_4-f_4^2-e_4f_3+f_3f_5-c_3)$; in particular the determinant $\Delta=-\alpha^2-\beta\delta$ of the $2\times 2$ system is non-zero for a general choice of the $(e_i,f_i,c_i)$ and $\left\{\begin{aligned}u_4&=\frac{1}{\Delta}(\alpha u_2+\beta v_2)\\
u_5&=\frac{1}{\Delta}(\delta u_2-\alpha v_2) \end{aligned}\right.$. So a basis of $T_{F_1(Z),([\ell],[P])}$ is given by ($(u_2=1,v_2=0)$ and $(u_2=0,v_2=1)$) $$\begin{tabular}{lclc}
$\varphi_{u_2}:$ &$\epsilon_0$ &$\mapsto$ &$\epsilon_2 +\frac{\alpha}{\Delta}\epsilon_4 +\frac{\delta}{\Delta}\epsilon_5$\\
$ $ &$\epsilon_1$ &$\mapsto$ &$-\frac{\alpha}{\Delta}\epsilon_3-\frac{\delta}{\Delta}\epsilon_4$
\end{tabular}$$
and
$$\begin{tabular}{lclc}
$\varphi_{v_2}:$ &$\epsilon_0$ &$\mapsto$ &$\frac{\beta}{\Delta}\epsilon_4 -\frac{\alpha}{\Delta}\epsilon_5$\\
$ $ &$\epsilon_1$ &$\mapsto$ &$\epsilon_2-\frac{\beta}{\Delta}\epsilon_3+\frac{\alpha}{\Delta}\epsilon_4$
\end{tabular}$$
where $(\epsilon_0,\dots,\epsilon_5)$ is the (dual) basis associated to the choice of coordinates $X_i$'s. Then we readily compute $$K(\varphi_{u_2}\wedge\varphi_{v_2})=\left|\begin{matrix}1 &0 &0 &1\\ 0 &-\frac{\alpha}{\Delta} &0 &-\frac{\beta}{\Delta}\\ \frac{\alpha}{\Delta} &-\frac{\delta}{\Delta} &\frac{\beta}{\Delta} &\frac{\alpha}{\Delta}\\ \frac{\delta}{\Delta} &0 &-\frac{\alpha}{\Delta} &0\end{matrix}\right|=0$$
and $\omega(\varphi_{u_2}\wedge\varphi_{v_2})=\frac{5}{\Delta}\neq 0$ hence $\Omega_{[\ell]}(\varphi_{u_2},\varphi_{v_2})=0$.
\end{proof}
\begin{remarque}\label{rmk_result_GK2} {\rm In \cite{GK_geom_lines} it is also proven that $F_0(Z)\rightarrow e(F_0(Z))$ is the normalisation and that $e(F_0(Z))$ has $3780$ non-normal isolated singularities.}
\end{remarque}
As for $Z$ general, $e_{F_0}$ is an immersion $N_{F_0(Z)/F_1(Z)}:=e_{F_0}^*T_{F_1(Z)}/T_{F_0(Z)}$ is locally free. Moreover, since $e_{F_0}$ is, outside a codimension $2$ subset of $F_0(Z)$, an isomorphism unto its image and the latter is a Lagrangian subvariety of $F_1(Z)$, we get (outside a codimension $2$ subset, thus globally) an isomorphism $$\Omega_{F_0(Z)}\simeq N_{F_0(Z)/F_1(Z)}.$$
\indent Notice that $F_0(Z)$ naturally lives in $\mathbb P(\mathcal Q_{2|F_1(Z)})\subset Fl(2,3,H)$. We have the following
\begin{lemme}\label{lem_normal_bdl_intermediate} The following sequence is exact $$\begin{aligned}0\rightarrow e_{F_1}^*\mathcal O_{F_1(Z)}(-3)\otimes t_{F_1}^*\mathcal O_{G(3,H)}(3)\rightarrow e_{F_1}^*\mathcal O_{F_1}(-1)\otimes t_{F_1}^*({\rm Sym}^2\mathcal E_3\otimes\mathcal O_{G(3,H)}(1))_{|F_1(Z)}\\\rightarrow N_{F_0(Z)/\mathbb P(\mathcal Q_{2|F_1(Z)})}\rightarrow 0\end{aligned}$$ where $e_{F_1}:\mathbb P(\mathcal Q_{2|F_1(Z)})\rightarrow F_1(Z)$ and $t_{F_1}:\mathbb P(\mathcal Q_{2|F_1(Z)})\rightarrow G(3,H)$.
\end{lemme}
\begin{proof} We have seen that $F_0(Z)\subset Fl(2,3,H)$ is the zero locus of a section of $\mathcal F$ appearing in the sequence (\ref{ex_seq_def_F}). Taking the symmetric power of (\ref{ex_seq_taut_bundles_2_3}), we have the following commutative diagram with exact rows
$$\xymatrix@-1pc@M=-0.1em{0\ar[r] &e^*\mathcal O_{G(2,H)}(-3)\otimes t^*\mathcal O_{G(3,H)}(3)\ar[r]\ar[d] &e^*\mathcal O_{G(2,H)}(-3)\otimes t^*\mathcal O_{G(3,H)}(3)\ar[r]\ar[d] &0\ar[r]\ar[d] &0\\
0\ar[r] &e^*\mathcal O_{G(2,H)}(-1)\otimes t^*({\rm Sym}^2\mathcal E_3\otimes \mathcal O_{G(3,H)}(1))\ar[r] &t^*{\rm Sym}^3\mathcal E_3\ar[r] &e^*{\rm Sym}^2\mathcal E_2\ar[r] &0.}$$
The projection of the section $\sigma_{{\rm eq}_Z}\in H^0(t^*{\rm Sym}^3E_3)$ induced by ${\rm eq}_Z$, to $e^*{\rm Sym}^2\mathcal E_2$ vanishes on $F_1(Z)$ by definition of $F_1(Z)$. So it induces a section of $e_{F_1}^*\mathcal O_{F_1(Z)}(-1)\otimes t_{F_1}^*({\rm Sym}^2\mathcal E_3\otimes \mathcal O_{G(3,H)}(1))\simeq (e^*\mathcal O_{G(2,H)}(-1)\otimes t^*({\rm Sym}^2\mathcal E_3\otimes \mathcal O_{G(3,H)}(1)))_{|\mathbb P(\mathcal Q_{2|F_1(Z)})}$. Now the snake lemma in the above diagram gives the result.
\end{proof}
The snake lemma in the following diagram with exact rows
$$\xymatrix@-1pc@M=-0.1em{0\ar[r] &T_{F_0(Z)}\ar[r]\ar[d]^{\cong} &T_{\mathbb P(\mathcal Q_{2|F_1(Z)})|F_0(Z)}\ar[r]\ar[d] &N_{F_0(Z)/\mathbb P(\mathcal Q_{2|F_1(Z)})}\ar[r]\ar[d] &0\\
0\ar[r] &T_{F_0(Z)}\ar[r] &e_{F_0}^*T_{F_1(Z)}\ar[r] &N_{F_0(Z)/F_1(Z)}\ar[r] &0.}$$
and the description of the relative tangent bundle of $e_{F_1}$ give
\begin{proposition}\label{prop_cotgt_bundle_ex_seq_F_0} The following sequence is exact $$\begin{small}\begin{aligned}0\rightarrow \mathcal O_{F_0}\rightarrow e_{F_0}^*(\mathcal Q_{2|F_1(Z)}\otimes \mathcal O_{F_1(Z)}(-1))\otimes t_{F_0}^*(\mathcal O_{G(3,H)}(1))_{|F_0}\rightarrow N_{F_0(Z)/\mathbb P(\mathcal Q_{2|F_1(Z)})}\rightarrow \Omega_{F_0(Z)}\rightarrow 0\end{aligned}\end{small}$$
\end{proposition}
We finish this section by computing the Hodge numbers of $F_0(Z)$.
\begin{proposition}\label{prop_h_1_F_0} We have $H^1(F_0(Z),\mathbb Z)=0$ for any $Z$ for which $F_0(Z)$ is smooth.
\end{proposition}
\begin{proof} For the universal variety of planes $r_{univ}:\mathcal F_2(\mathcal X)\rightarrow |\mathcal O_{\mathbb P^6}(3)|$, $R^3r_{univ,*}\mathbb Q$ is a local system over the open subset $\{[X]\in |\mathcal O_{\mathbb P^6}(3)|,\ F_2(X)\ {\rm is\ smooth}\}$ which, by Proposition \ref{prop_etale_cover_F_2_F_0}, contains an open subset of the locus of cyclic cubic $5$-folds.\\
\indent As a consequence, the Abel-Jacobi isomorphism $q_*p^*:H^3(F_2(X),\mathbb Q)\xrightarrow{\sim} H^5(X,\mathbb Q)$ given by the result of Collino (Theorem \ref{thm_Collino_intro}) for the general $X$, extends to the case of the general cyclic cubic $5$-fold.\\
\indent But, as noticed in the proof of Proposition \ref{prop_etale_cover_F_2_F_0}, for any $[P]\in F_0(Z)$, the associated cycle $q(p^{-1}(\pi^{-1}([P])))$ on $X_Z$ is the complete intersection cycle ${\rm Span}(P,p_0)\cap X_Z$, which belongs to a family of cycles parametrised by a rational variety, namely $\{[\Pi]\in G(4,V),\ p_0\in \Pi\}\simeq G(3,H)$. Now, as an abelian variety contains no rational curve, the Abel-Jacobi map $\Phi:G(3,H)\rightarrow J^5(X_Z)$, $[P]\mapsto [{\rm Span}(P,p_0)\cap X_Z] - [{\rm Span}(P_0,p_0)\cap X_Z]$ ($[P_0]$ being a reference point) is constant. Hence the restriction $\Phi_{(\pi_*,{\rm id}_{X_Z})\mathbb P(\mathcal E_3)}:F_0(Z)\rightarrow J^5(X_Z)$ of $\Phi$ to the sub-family $(\pi_*,{\rm id}_{X_Z})\mathbb P(\mathcal E_3)\subset F_0(Z)\times X_Z$ (of planes $P$ such that ${\rm Span}(P,p_0)\cap X_Z$ consists of $3$ planes) is constant i.e. $q_*p^*\pi^*:H^3(F_0(Z),\mathbb Z)\rightarrow H^5(X_Z,\mathbb Z)$ is trivial.\\
\indent As $\pi$ is \'etale, $\pi^*:H^3(F_0(Z),\mathbb Q)\rightarrow H^3(F_2(X_Z),\mathbb Q)$ is injective so that the trivial map $q_*p^*\pi^*$ is the composition of a injective map followed by an isomorphism.
\end{proof}
We can then compute the rest of the Hodge numbers:
\begin{enumerate}
\item again using the package Schubert2 of Macaulay2, we can use the Koszul resolution of $\mathcal O_{F_0(Z)}$ by $\wedge^i\mathcal F^*$ (where $\mathcal F$ is defined by (\ref{ex_seq_def_F})) to compute $\chi(\mathcal O_{F_0(Z)})=1071$ with the following code:
\begin{verbatim}
loadPackage "Schubert2"
G=flagBundle{3,3}
(Q,E)=bundles G
wE=exteriorPower(2,E)
P=projectiveBundle' wE
p=P.StructureMap
pl=exteriorPower(3,E)
pol=p^*pl**dual(OO_P(1))
F=p^*symmetricPower(3,E)-symmetricPower(3,pol)
chi(exteriorPower(0,dual(F)))-chi(exteriorPower(1,dual(F)))
+chi(exteriorPower(2,dual(F)))-chi(exteriorPower(3,dual(F)))
+chi(exteriorPower(4,dual(F)))-chi(exteriorPower(5,dual(F)))
+chi(exteriorPower(6,dual(F)))-chi(exteriorPower(7,dual(F)))
+chi(exteriorPower(8,dual(F)))-chi(exteriorPower(9,dual(F)))
\end{verbatim}
so we get $h^2(\mathcal O_{F_0(Z)})=1070$;\\
\item Then as $\pi$ is \'etale of degree $3$ we get $\chi_{top}(F_0(Z))=\frac 1 3 \chi_{top}(F_2(X_Z))=4347$. So $h^{1,1}(F_0(Z))=2207$.
\end{enumerate}
\section*{Acknowledgments}
I would like to thank Hseuh-Yung Lin for pointing me the article \cite{Iliev-Manivel_cub_hyp_int_syst} some years ago. I would like to thank also Pieter Belmans for explaining how to use Sage to decompose the tensor powers of $\mathcal E_3$ into irreducible modules and the anonymous referee for his remarks.\\
\indent Finally I am grateful to the gracious Lord for His care.\\
\indent I was partially supported by y NSF Grant, Simons Investigator Award HMS, Simons. Collaboration Award HMS, HSE University Basic Research Program, the Ministry of
Education and Science of the Republic of Bulgaria through the Scientific Program “Enhancing the
Research Capacity in Mathematical Sciences (PIKOM)” No. DO1-67/05.05.2022.
\noindent \begin{tabular}[t]{l}
\textit{rene.mboro@polytechnique.edu}\\
UMiami Miami, HSE Moscow,\\
Institute of Mathematics and Informatics, Bulgarian Academy of Sciences,\\
Acad. G. Bonchev Str. bl. 8, 1113, Sofia, Bulgaria.
\end{tabular}\\
\end{document} |
\begin{document}
\title{Rank 2 vector bundles on ind-grassmannians}
\author[I.Penkov]{\;Ivan~Penkov}
\address{
Jacobs University Bremen\footnote{International University Bremen prior
to Spring 2007} \\
School of Engineering and Science,
Campus Ring 1,
28759 Bremen, Germany}
\email{i.penkov@iu-bremen.de}
\author[Tikhomirov]{\;Alexander~S.~Tikhomirov}
\address{
Department of Mathematics\\
State Pedagogical University\\
Respublikanskaya Str. 108
\newline 150 000 Yaroslavl, Russia}
\email{astikhomirov@mail.ru}
\begin{flushright}
\begin{tabular}{l}
To Yuri Ivanovich Manin\\
on the occasion of his 70$^{th}$\\
birthday
\end{tabular}
\end{flushright}
\maketitle
\thispagestyle{empty}
\section{Introduction}
\label{Introduction}
The simplest example of an ind-Grassmannian is the infinite projective
space $\mathbf P^\infty$.
The Barth-Van de Ven-Tyurin (BVT) Theorem, proved more than 30 years
ago \cite{BV}, \cite{T},
\cite{Sa} (see also a recent proof by A. Coand\u a and G. Trautmann,
\cite{CT}), claims that any vector bundle of finite rank on $\mathbf
P^\infty$ is isomorphic to a
direct sum of line bundles. In the last decade natural examples of
infinite flag varieties (or flag
ind-varieties) have arisen as homogeneous spaces of locally linear
ind-groups, \cite{DPW},
\cite{DiP}. In the present paper we concentrate our attention to the
special case of
ind-Grassmannians, i.e. to inductive limits
of Grassmannians of growing dimension. If $V=\displaystyle\bigcup_{n>k}
V^n$ is a
countable-dimensional vector space, then the ind-variety
$\mathbf G(k;V)=\displaystyle\lim_\to G(k;V^n)$ (or
simply $\mathbf G(k;\infty)$) of $k$-dimensional subspaces of $V$ is of
course an
ind-Grassmannian: this is the simplest example beyond $\mathbf
P^\infty=\mathbf G(1;\infty)$.
A significant difference between $\mathbf G(k;V)$ and a general
ind-Grassmannian
$\mathbf X=\displaystyle\lim_\to G(k_i;V^{n_i})$ defined via a sequence
of embeddings
\begin{equation}\label{eq1}
G(k_1;V^{n_1})\stackrel{\varphi_1}{\longrightarrow}G(k_2;V^{n_2})
\stackrel{\varphi_2}{\longrightarrow}\dots\stackrel{\varphi_{m-1}}{\longrightarrow}G(k_m;V^{n_m})
\stackrel{\varphi_m}{\longrightarrow}\dots,
\end{equation}
is that in general the morphisms $\varphi_m$ can have arbitrary
degrees. We say that the ind-Grassmannian $\mathbf X$ is
\emph{twisted} if $\deg\varphi_m>1$ for infinitely many $m$, and that
$\mathbf X$ is \emph{linear} if $\deg\varphi_m=1$ for almost all $m$.
\begin{conjecture}\label{con1}
Let the ground field be $\CC$, and let $\mathbf E$ be a vector bundle of
rank $r\in\ZZ_{>0}$ on an
ind-grasmannian $\mathbf X=\displaystyle\lim_\to G(k_m;V^{n_m})$, i.e.
$\mathbf E=\displaystyle\lim_\gets E_m$, where $\{E_m\}$ is an inverse
system of vector bundles
of (fixed) rank $r$ on $G(k_m;V^{n_m})$. Then
\begin{itemize}
\item[(i)] $\mathbf E$ is semisimple: it is isomorphic to a direct sum
of simple vector bundles
on $\mathbf X$, i.e. vector bundles on $\mathbf X$ with no non-trivial
subbundles;
\item[(ii)] for $m\gg0$ the restriction of each simple bundle $\mathbf
E$ to
$G(k_m,V^{n_m})$ is a homogeneous vector bundle;
\item[(iii)] each simple bundle $\mathbf E'$ has rank 1 unless $\mathbf
X$ is isomorphic
$\mathbf G(k;\infty)$ for some $k$: in the latter case $\mathbf E'$,
twisted by a suitable
line bundle,
is isomorphic to a simple subbundle of the tensor algebra
$T^{\cdot}(\mathbf S)$, $\mathbf S$
being the tautological bundle of rank $k$ on $\mathbf G(k;\infty)$;
\item[(iv)] each simple bundle $\mathbf E$ (and thus each vector bundle
of finite rank on
$\mathbf X$) is trivial whenever $\mathbf X$ is a twisted
ind-Grassmannian.
\end{itemize}
\end{conjecture}
The BVT Theorem and Sato's theorem about finite rank bundles on
$\mathbf G(k;\infty)$,
\cite{Sa}, \cite{Sa2}, as well as the results in \cite{DP}, are
particular cases of the above conjecture.
The purpose of the present note is to prove Conjecture \ref{con1} for
vector bundles of rank 2,
and also for vector bundles of arbitrary rank $r$ on linear
ind-Grassmannians $\mathbf X$.
In the 70's and 80's Yuri Ivanovich Manin taught us mathematics in (and
beyond) his seminar,
and the theory of vector bundles was a reoccuring topic (among many
others). In 1980, he asked
one of us (I.P.) to report on A. Tyurin's paper \cite{T}, and most
importantly to try to
understand this paper. The present note is a very preliminary progress
report.
\textbf{Acknowledgement. }We acknowledge the support and hospitality of
the Max Planck Institute for Mathematics in Bonn where the present
note was conceived. A. S. T. also acknowledges partial support from
Jacobs University Bremen. Finally, we thank the referee for a number of sharp comments.
\section{Notation and Conventions}
The ground field is $\CC$. Our notation is mostly standard: if $X$ is
an algebraic variety,
(over $\CC$), $\mathcal{O}_X$ denotes its structure sheaf, $\Omega^1_X$
(respectively $T_X$) denotes
the cotangent (resp. tangent) sheaf on X under the assumption that $X$
is smooth etc.
If $F$ is a sheaf on $X$, its cohomologies are denoted by $H^i( F)$,
$h^i(F):=\dim H^i(F)$, and
$\chi(F)$ stands for the Euler characteristic of $F$.
The Chern classes of $F$ are denoted by $c_i(F)$. If $f:X\to Y$ is a
morphism, $f^*$ and $f_*$
denote respectively the inverse and direct image functors of
$\mathcal{O}$-modules. All vector bundles
are assumed to have finite rank. We denote the dual of a sheaf of
$\mathcal O_X$-modules $F$
(or that of a vector space) by the superscript $^\vee$.
Furthermore, in what follows for any ind-Grassmannian $\mathbf X$
defined by \refeq{eq1}, no
embedding $\varphi_i$ is an isomorphism.
We fix a finite dimensional space $V$ and denote by $X$ the
Grassmannian $G(k;V)$ for $k<\dim V$. In the sequel we write sometimes $G(k;n)$
indicating simply the dimension of $V$. Below we will often consider
(parts of) the following diagram of flag varieties:
\begin{equation}\label{eqDiag}
\xymatrix{
&&Z:=\Fl(k-1,k,k+1;V) \ar[ld]_{\pi_1} \ar[dr]^{\pi_2} & \\
&Y:=\Fl(k-1,k+1;V)\ar[ld]_{p_1}\ar[rd]^{p_2}&&X:=G(k;V), \\
Y^1:=G(k-1;V)&&Y^2:=G(k+1;V)&\\
}
\end{equation}
under the assumption that $k+1<\dim V$. Moreover we reserve the letters
$X,Y,Z$ for the
varieties in the above diagram. By $S_k$, $S_{k-1}$, $S_{k+1}$ we
denote the tautological
bundles on $X$,$Y$ and $Z$, whenever they are defined ($S_k$ is defined
on
$X$ and $Z$, $S_{k-1}$ is defined on $Y^1$, $Y$ and $Z$, etc.). By
$\mathcal O_X(i)$, $i\in \ZZ$, we denote the isomorphism class (in the
Picard group $\operatorname{Pic}\nolimits X$)
of the line bundle $(\Lambda^k(S_k^\vee))^{\otimes i}$, where
$\Lambda^k$ stands for the
$k^{th}$ exterior power (in this case maximal exterior power as $\rk
S_k^\vee=k$). The Picard
group of $Y$ is isomorphic to the direct product of the Picard groups
of $Y^1$ and $Y^2$,
and by $\mathcal{O}_Y(i,j)$ we denote the isomorphism class of the line bundle
$p_1^*(\Lambda^{k-1}(S_{k-1}^\vee))^{\otimes i}
\otimes_{\mathcal{O}_Y}p_2^*(\Lambda^{k+1}(S_{k+1}^\vee))^{\otimes j}$.
If $\varphi:X=G(k;V)\to X':=G(k;V')$ is an embedding, then
$\varphi^*\mathcal{O}_{X'}(1)\simeq \mathcal{O}_X(d)$
for some $d\in\ZZ_{\geq 0}$: by definition $d$ is the \emph{degree}
$\deg\varphi$ of $\varphi$.
We say that $\varphi$ is linear if $\deg\varphi=1$. By a \textit{projective
subspace}
(in particular a \emph{line}, i.e. a 1-dimensional projective subspace)
of $X$ we mean a
linearly embedded projective space into $X$. It is well known that all
such are Schubert
varieties of the form
$\{V^k\in X| V^{k-1}\subset V^k\subset V^t\}$ or $\{V^k\in X|
V^i\subset V^k\subset V^{k+1}\}$,
where $V^k$ is a variable $k$-dimensional subspace of $V$, and
$V^{k-1}$, $V^{k+1}$, $V^t$, $V^i$ are fixed subspaces of $V$ of
respective dimensions
$k-1$, $k+1$, $t$, $i$. (Here and in what follows $V^t$ always denotes
a vector space of
dimension $t$). In other words, all projective subspaces of $X$ are of
the form
$G(1;V^t/V^{k-1})$ or $G(k-i, V^{k+1}/V^i)$.
Note also that $Y=\Fl(k-1,k+1;V)$ is the variety of lines in
$X=G(k;V)$.
\section{The linear case}
We consider the cases of linear and twisted ind-Grassmannians
separately. In the case of a
linear ind-Grassmannian, we show that Conjecture \ref{con1} is a
straightforward corollary of
existing results combined with the following proposition. We recall,
\cite{DP}, that a
\textit{standard extension} of Grassmannians is an embedding of the
form
\begin{equation}\label{eq31}
G(k;V)\to G(k+a;V\oplus \hat W), \quad \{ V^k\subset
\CC^n\}\mapsto\{V^k\oplus W\subset V\oplus\hat W\},
\end{equation}
where $W$ is a fixed $a$-dimensional subspace of a finite dimensional
vector space $\hat W$.
\begin{proposition}\label{linear embed}
Let $\varphi:X=G(k;V)\to X':=G(k';V')$ be an embedding of degree 1. Then
$\varphi$ is a standard extension, or $\varphi$ factors through a standard
extension $\mathbb{P}^r\to G(k';V')$ for some $r$.
\end{proposition}
\begin{proof}
We assume that $k\leq n-k$, $k\leq n'-k'$, where $n=\dim V$ and
$n'=\dim V'$, and use induction on $k$. For $k=1$ the statement is obvious as
the image of $\varphi$ is a projective subspace of $G(k';V')$ and hence
$\varphi$ is a standard extension. Assume that the statement is true for
$k-1$. Since $\deg \varphi=1$, $\varphi$ induces an embedding $\varphi_Y:Y\to Y'$,
where $Y=\Fl(k-1,k+1;V)$ is the variety of lines in $X$ and
$Y\:=\Fl(k'-1,k'+1;V')$ is the variety of lines in $X'$. Moreover, clearly we have
a commutative diagram of natural projections and embeddings
\[
\xymatrix{
&Z\ar[rrr]^{\varphi_Z}\ar[dl]_{\pi_1}\ar[dr]^{\pi_2}&&&Z'\ar[dl]_{\pi_1'}\ar[dr]^{\pi_2'}&
\\
Y\ar[dr]&&X\ar[dr]&Y'&&X',\\
&\ar[r]_{\varphi_Y}&\ar[ur]&\ar[r]_{\varphi}&\ar[ur]&
}
\]
where $Z:=\Fl(k-1,k,k+1;V)$ and $Z':=\Fl(k'-1,k',k'+1;V')$.
We claim that there is an isomorphism
\begin{equation}\label{eqLE1}
\varphi^*_Y\mathcal{O}_{Y'}(1,1)\simeq\mathcal{O}_Y(1,1).
\end{equation}
Indeed, $\varphi^*_Y\mathcal{O}_{Y'}(1,1)$ is determined up to isomorphism by its
restriction to the fibers of $p_1$ and $p_2$ (see diagram
\refeq{eqDiag}), and therefore it is enough to check that
\begin{equation}\label{eqLE2}
\varphi^*_Y\mathcal{O}_{Y'}(1,1)_{|p_1^{-1}(V^{k-1})}\simeq\mathcal{O}_{p_1^{-1}(V^{k-1})}(1),
\end{equation}
\begin{equation}\label{eqLE21}
\varphi^*_Y\mathcal{O}_{Y'}(1,1)_{|p_2^{-1}(V^{k+1})}\simeq
\mathcal{O}_{p_2^{-1}(V^{k+1})}(1)
\end{equation}
for some fixed subspaces $V^{k-1}\subset V$, $V^{k+1}\subset V$. Note
that
the restriction of $\varphi$ to the projective subspace
$G(1;V/V^{k-1})\subset X$
is simply an isomorphism of $G(1;V/V^{k-1})$ with a projective subspace
of $X'$,
hence the map induced by $\varphi$ on the variety $G(2;V/V^{k-1})$ of
projective
lines in $G(1;V/V^{k-1})$ is an isomorphism with the Grassmannian of
2-dimensional
subspaces of an appropriate subquotient of $V'$. Note furthermore
that $p_1^{-1}(V^{k-1})$ is nothing but the variety of lines
$G(2;V/V^{k-1})$ in $G(1;V/V^{k-1})$, and that the image of $G(2;V/V^{k-1})$ under
$\varphi$ is nothing but $\varphi_Y(p_1^{-1}(V^{k-1}))$. This shows that the
restriction of $\varphi^*_Y\mathcal{O}_{Y'}(1,1)$ to $G(2;V/V^{k-1})$ is
isomorphic to the restriction of $\mathcal{O}_Y(1,1)$ to $G(2;V/V^{k-1})$, and we obtain
\refeq{eqLE2}. The isomorphism \refeq{eqLE21} follows from a very
similar argument.
The isomorphism \refeq{eqLE1} leaves us with two alternatives:
\begin{equation}\label{eqLE3}
\varphi^*_{Y}\mathcal{O}_{Y'}(1,0)\simeq\mathcal{O}_Y \mathrm{~or~}
\varphi_Y^*\mathcal{O}_{Y'}(0,1)\simeq \mathcal{O}_Y,
\end{equation}
or
\begin{equation}\label{eqLE4}
\varphi^*_{Y}\mathcal{O}_{Y'}(1,0)\simeq\mathcal{O}_Y(1,0) \mathrm{~or~}
\varphi_Y^*\mathcal{O}_{Y'}(1,0)\simeq \mathcal{O}_Y(0,1).
\end{equation}
Let \refeq{eqLE3} hold, more precisely let
$\varphi_Y^*\mathcal{O}_{Y'}(1,0)\simeq\mathcal{O}_Y$.
Then $\varphi_Y$ maps each fiber of $p_2$ into a single point in $Y'$
(depending on the image in
$Y^2$ of this fiber), say $({(V')}^{k'-1}\subset {(V')}^{k'+1})$, and
moreover the space
${(V')}^{k'-1}$ is constant. Thus $\varphi$ maps $X$ into the projective
subspace $G(1;V'/{(V')}^{k'-1})$ of $X'$. If
$\varphi_Y^*\mathcal{O}_{Y'}(0,1)\simeq\mathcal{O}_Y$, then $\varphi$ maps $X$ into the projective subspace
$G(1;{(V')}^{k'+1})$ of $X'$. Therefore, the Proposition is proved in the case
\refeq{eqLE3} holds.
We assume now that \refeq{eqLE4} holds. It is easy to see that
\refeq{eqLE4} implies that
$\varphi$ induces a linear embedding $\varphi_{Y^1}$ of $Y^1:=G(k-1;V)$ into
$G(k'-1;V')$ or
$G(k'+1;V')$. Assume that $\varphi_{Y^1}:Y^1\to {(Y')}^1:=G(k'-1;V')$ (the
other case is
completely similar). Then, by the induction assumption, $\varphi_{Y^1}$ is
a standard extension
or factors through a standard extension $\mathbb{P}^r\to {(Y')}^1$. If
$\varphi_{Y^1}$ is a standard
extension corresponding to a fixed subspace $W\subset \hat W$, then
$\varphi_{Y^1}^* S_{k'-1}\simeq S_{k-1}\oplus
\left(W\otimes_\CC\mathcal{O}_{Y^1}\right)$ and we have a vector bundle monomorphism
\begin{equation}\label{eqLE5}
0\to\pi_1^*p_1^*\varphi_{Y^1}^*S_{k'-1}\to \pi_2^*\varphi^*S_{k'}.
\end{equation}
By restricting \refeq{eqLE5} to the fibers of $\pi_1$ we see that the
quotient line bundle
$\pi_2^*\varphi^*S_{k'}/\pi_1^*p_1^*\varphi_{Y^1}^*S_{k'-1}$ is isomorphic to $S_k/S_{k-1}\otimes \pi_1^*p_1^*\mathcal{L}$, where $\mathcal{L}$
is a line bundle on $Y^1$. Applying $\pi_{2*}$ we obtain
\begin{equation}\label{eqLE6}
0\to W\otimes_\CC \mathcal{O}_X\to\pi_{2*}(\pi_2^*\varphi^*S_{k'})=\varphi^*S_{k'}\to
\pi_{2*}((S_k/S_{k-1})\otimes\pi_1^*p_1^*\mathcal{L}) \to 0.
\end{equation}
Since $\rk\varphi^*S_{k'}=k'$ and $\dim W=k'-k$,
$\rk\pi_{2*}((S_k/S_{k-1})\otimes\pi_1^*p_1^*\mathcal{L})=k$, which implies immediately that $\mathcal{L}$ is
trivial. Hence \refeq{eqLE6} reduces to $0\to
W\otimes_{\CC}\mathcal{O}_X\to\varphi^*S_{k'}\to S_k\to 0$, and thus
\begin{equation}\label{eqLE7}
\varphi^*S_{k'}\simeq S_k\oplus \left(W\otimes_\CC\mathcal{O}_X\right)
\end{equation}
as there are no non-trivial extensions of $S_k$ by a trivial bundle.
Now \refeq{eqLE7} implies
that $\varphi$ is a standard extension.
It remains to consider the case when $\varphi_{Y^1}$ maps $Y^1$ into a
projective subspace
$\mathbb{P}^s$ of ${(Y')}^1$. Then $\mathbb{P}^s$ is of the form
$G(1;V'/{(V')}^{k'-2})$ for some
${(V')}^{k'-2}\subset V'$, or of the form $G(k'-1;{(V')}^{k'})$ for
some
${(V')}^{k'}\subset V'$. The second case is clearly impossible because
it would imply that
$\varphi$ maps $X$ into the single point ${(V')}^{k'}$. Hence
$\mathbb{P}^s=G(1;V'/{(V')}^{k'-2})$ and
$\varphi$ maps $X$ into the Grassmannian $G(2;V'/{(V')}^{k'-2})$ in
$G(k';V')$. Let $S_2'$ be the
rank 2 tautological bundle on $G(2;V'/{(V')}^{k'-2})$. Then its
restriction
$S'':=\varphi^*S_2'$ to any line $l$ in $X$ is isomorphic to
$\mathcal{O}_{l}\oplus\mathcal{O}_{l}(-1)$, and we
claim that this implies one of the two alternatives:
\begin{equation}\label{eqLE8}
S''\simeq\mathcal{O}_X\oplus\mathcal{O}_X(-1)
\end{equation}
or
\begin{equation}\label{eqLE9}
S''\simeq S_2 \text{~and~} k=2,\text{~or~} S''\simeq(V\otimes_\CC
\mathcal{O}_X)/S_2\text{~and~}k=n-k=2.
\end{equation}
Let $k\geq 2$. The evaluation map $\pi_1^*\pi_{1*}\pi_2^*S''\to
\pi_2^*S''$ is a monomorphism
of the line bundle $ \pi_1^*\mathcal{L}:=\pi_1^*\pi_{1*}\pi_2^*S''$ into
$\pi_2^*S''$
(here $\mathcal{L}:=\pi_{1*}\pi_2^*S''$). Restricting this monomorphism to the
fibers of $\pi_2$ we see
immediately that $\pi_1^*\mathcal{L}$ is trivial when restricted to those
fibers and is hence trivial.
Therefore $\mathcal{L}$ is trivial, i.e. $\pi_1^*\mathcal{L}=\mathcal{O}_Z$. Push-down to $X$
yields
\begin{equation}\label{eqLE10}
0\to\mathcal{O}_X\to S''\to\mathcal{O}_X(-1)\to 0,
\end{equation}
and hence \refeq{eqLE10} splits as $\Ext^1(\mathcal{O}_X(-1),\mathcal{O}_X)=0$.
Therefore \refeq{eqLE8} holds.
For $k=2$, there is an additional possibility for the above
monomorphisms to be of the form
$\pi_1^*\mathcal{O}_Y(-1,0)\to\pi_2^*S$ (or of the form
$\pi_1^*\mathcal{O}_Y(0,-1)\to\pi_2^*S$ if $n-k=2$)
which yields the option \refeq{eqLE9}.
If \refeq{eqLE8} holds, $\varphi$ maps $X$ into an appropriate projective
subspace of $G(2;V'/{(V')}^{k'-2})$ which is then a projective subspace
of $X'$, and if \refeq{eqLE9} holds, $\varphi$ is a standard extension
corresponding to a zero dimensional space $W$. The proof is now complete.
\end{proof}
We are ready now to prove the following theorem.
\begin{theorem} Conjecture \ref{con1} holds for any linear
ind-Grassmannian $\mathbf X$.
\end{theorem}
\begin{proof}
Assume that $\deg \varphi_m=1$ for all $m$, and apply Proposition
\ref{linear embed}. If
infinitely many $\varphi_m$'s factor through respective projective
subspaces, then $\mathbf X$ is
isomorphic to $\mathbf P^\infty$ and the BVT Theorem implies Conjecture
\ref{con1}. Otherwise,
all $\varphi_m$'s are standard extensions of the form \refeq{eq31}. There
are two alternatives:
$\displaystyle\lim_{m\to\infty}
k_{m}=\lim_{m\to\infty}(n_{m}-k_{m})=\infty$, or one of the
limits $\displaystyle\lim_{m\to \infty}k_{m}$ or
$\displaystyle\lim_{m\to \infty}(n_{m}-k_{m})$
equals $l$ for some $l\in \NN$. In the first case the claim of
Conjecture \ref{con1} is proved
in \cite{DP}: Theorem 4.2. In the second case $\mathbf X$ is isomorphic
to
$\mathbf G(l;\infty)$, and therefore Conjecture \ref{con1} is proved in
this case by E. Sato
in \cite{Sa2}.
\end{proof}
\section{Auxiliary results}
In order to prove Conjecture \ref{con1} for rank 2 bundles $\mathbf E$
on a twisted
ind-Grassmannian $\mathbf X=\displaystyle \lim_\to G(k_m;V^{n_m})$, we
need to prove that the
vector bundle $\mathbf E=\displaystyle\lim_{\gets}E_m$ of rank 2 on
$\mathbf X$ is trivial,
i.e. that $E_m$ is a trivial bundle on $G(k_m;V^{n_m})$ for each $m$.
From this point on we assume that none of the Grassmannians
$G(k_m;V^{n_m})$ is a projective
space, as for a twisted projective ind-space Conjecture 1.1 is proved
in \cite{DP} for bundles
of arbitrary rank $r$.
The following known proposition gives a useful triviality criterion for
vector bundles of
arbitrary rank on Grassmannians.
\begin{prop}\label{prop31}
A vector bundle $E$ on $X=G(k;n)$ is trivial iff its restriction
$E_{|l}$ is trivial for every
line $l$ in $G(k;n)$, $l\in Y=\Fl(k-1,k+1;n)$.
\end{prop}
\begin{proof}
We recall the proof given in \cite{P}. It uses the well known fact that
the Proposition holds
for any projective space, [OSS, Theorem 3.2.1]. Let first $k=2$, $n=4$,
i.e. $X=G(2;4)$. Since
$E$ is linearly trivial, $\pi_2^*E$ is trivial along the fibers of
$\pi_1$ (we refer here to
diagram \refeq{eqDiag}). Moreover, $\pi_{1*}\pi_2^*E$ is trivial along
the images of the fibers
of $\pi_2$ in $Y$. These images are of the form $\mathbb{P}_1^1\times\mathbb{P}_2^1$,
where $\mathbb{P}_1^1$
(respectively $\mathbb{P}_2^1$) are lines in $Y^1:=G(1;4)$ and $Y^2:=G(3;4)$.
The fiber of $p_1$ is
filled by lines of the form $\mathbb{P}^1_2$, and thus $\pi_{1*}\pi_2^*E$ is
linearly trivial, and
hence trivial along the fibers of $p_1$. Finally the lines of the form
$\mathbb{P}_1^1$ fill $Y^1$,
hence ${p_1}_*\pi_{1*}\pi_2^*E$ is also a trivial bundle. This implies
that
$E=\pi_{2*}\pi_1^*p_1^*(p_{1*}\pi_{1*}\pi_2^*E)$ is also trivial.
The next case is the case when $k=2$ and $n$ is arbitrary, $n\geq 5$.
Then the above argument
goes through by induction on $n$ since the fiber of $p_1$ is isomorphic
to $G(2;n-1)$. The
proof is completed by induction on $k$ for $k\geq 3$: the base of $p_1$
is $G(k-1;n)$ and the
fiber of $p_1$ is $G(2;n-1)$.
\end{proof}
If $C\subset N$ is a smooth rational curve in an algebraic variety $N$
and $E$ is a vector
bundle on $N$, then by a classical theorem of Grothendieck,
$\displaystyle E_{|C}$ is isomorphic
to $\bigoplus_i\mathcal{O}_C(d_i)$ for some $d_1\geq d_2\geq\dots\geq d_{\rk
E}$. We call the ordered
$\rk E$-tuple $(d_1,\dots,d_{\rk E})$ \emph{the splitting type} of
$E_{|C}$ and denote it by
$\mathbf{d}_E(C)$. If $N=X=G(k;n)$, then the lines on $N$ are
parametrized by points $l\in Y$,
and we obtain a map
\[
Y\to \ZZ^{\rk E}\ :\ l\mapsto \mathbf{d}_E(l).
\]
By semicontinuity (cf. \cite[Ch.I, Lemma 3.2.2]{OSS}), there is a dense
open set $U_E\subset Y$ of lines with minimal splitting type with
respect to the lexicographical ordering on $\ZZ^{\rk E}$. Denote this
minimal splitting type by $\mathbf{d}_E$. By definition, $U_E=\{l\in Y|~
\mathbf{d}_E(l)=\mathbf{d}_E\}$ is the set of \emph{non-jumping} lines of
$E$, and its complement $Y\setminus U_E$ is the proper closed set of
\emph{jumping} lines.
A coherent sheaf $F$ over a smooth irreducible variety $N$ is called
$normal$ if for every open
set $U\subset N$ and every closed algebraic subset $A\subset U$ of
codimension at least 2 the
restriction map ${F}(U)\to {F}(U\smallsetminus A)$ is surjective. It is
well known that, since
$N$ is smooth, hence normal, a normal torsion-free sheaf $F$ on $N$ is
reflexive, i.e.
$F^{\lor\lor}=F$. Therefore, by \cite[Ch.II, Theorem 2.1.4]{OSS} $F$ is
necessarily a line
bundle (see \cite[Ch.II, 1.1.12 and 1.1.15]{OSS}).
\begin{theorem}\label{thSubbdl}
Let $E$ be a rank $r$ vector bundle of splitting type
$\mathbf{d}_E=(d_1,...,d_r),\ d_1\ge...\ge d_r,$ on $X=G(k;n)$.
If $d_s-d_{s+1}\ge2$ for some $s<r$, then there is a normal subsheaf
$F\subset E$ of rank $s$
with the following properties: over the open set
$\pi_2(\pi_1^{-1}(U_E))\subset X$ the
sheaf $F$ is a subbundle of $E$, and for any $l\in U_E$
$$
F_{|l}\simeq\overset{s}{\underset{i=1}\bigoplus}\mathcal{O}_{l}(d_i).
$$
\end{theorem}
\begin{proof}
It is similar to the proof of Theorem 2.1.4 of \cite[Ch.II]{OSS}.
Consider the vector bundle
$E'=E\bigotimes\mathcal{O}_X(-d_s)$ and the evaluation map
$\Phi:\pi_1^*\pi_{1*}\pi_2^*E'\to \pi_2^*E'$. The definition of $U_E$
implies that
$\Phi_{|\pi_1^{-1}(U_E)}$ is a morphism of constant rank $s$ and that
its image
${\rm \im}\Phi\subset \pi_2^*E'$ is a subbundle of rank $s$ over
$\pi_1^{-1}(U_E)$.
Let $M:=\pi_2^*E'/{\rm im}\Phi$, let $T(M)$ be the torsion subsheaf of
$M$, and
$F':=\ker(\pi_2^*E'\to M':=M/T(M))$. Consider the singular set $\operatorname{Sing}\nolimits
F'$ of the
sheaf $F'$ and set $A:=Z\smallsetminus\operatorname{Sing}\nolimits F'$. By the above, $A$ is
an open subset
of $Z$ containing $\pi_1^{-1}(U_E)$ and $f={\pi_2}_{|A}:A\to
B:=\pi_2(A)$ is a submersion
with connected fibers.
Next, take any point $l\in Y$ and put $ L:=\pi_1^{-1}(l)$. By
definition,
$L\simeq\mathbb{P}^1$, and we have
\begin{equation}\label{tangent}
{T_{Z/X}}_{|L}\simeq\mathcal{O}_{L}(-1)^{\oplus(n-2)},
\end{equation}
where $T_{Z/X}$ is the relative tangent bundle of Z over X. The
construction of the sheaves
$F'$ and $M$ implies that for any
$l\in U_E$:
${F'}^{\vee}_{|{L}}=\oplus_{i=1}^s\mathcal{O}_{L}(-d_i+d_s),\ \
{M'}_{|{L}} =\oplus_{i=s+1}^r\mathcal{O}_{L}(d_i-d_s)$.
This, together with (\ref{tangent}) and the condition
$d_s-d_{s+1}\ge2,$ immediately implies
that $H^0(\Omega^1_{A/B}\otimes{F'}^{\vee}\otimes M'_{|{L}})=0$. Hence
$H^0(\Omega^1_{A/B}\otimes{F'}^{\vee}\otimes M'_{|\pi_1^{-1}(U_E)})=0$,
and thus, since
$\pi_1^{-1}(U_E)$ is dense open in $Z$,
$\Hom(T_{A/B},\mathcal H om(F',M'_{|A}))=
H^0(\Omega^1_{A/B}\otimes{F'}^{\vee}\otimes M'_{|A})=0.$
Now we apply the Descent Lemma (see \cite[Ch.II, Lemma 2.1.3]{OSS}) to
the data
$(f_{|\pi_1^{-1}(U_E)}:\pi_1^{-1}(U_E)\to V_E,\ F'_{|\pi_1^{-1}(U_E)}
\subset E'_{|\pi_1^{-1}(U_E)})$. Then
$F:=(\pi_{2*}F')\otimes\mathcal{O}_X(-d_s)$
is the desired sheaf.
\end{proof}
\section{The case $\rk\mathbf{E}=2$}
In what follows, when considering a twisted ind-Grassmannian $\mathbf
X=\displaystyle\lim_\to G(k_m;V^{n_m})$ we set $G(k_m;V^{n_m})=X_m$.
\refth{thSubbdl} yields now the following corollary.
\begin{corollary}\label{d=(0,0)}
Let $\displaystyle\mathbf{E}=\lim_{\gets}E_m$ be a rank 2 vector bundle
on a twisted
ind-Grassmannian $\displaystyle\mathbf{X}=\lim_{\to}X_m$. Then there
exists $m_0\ge1$ such that $\mathbf{d}_{E_m}=(0,0)$ for any $m\ge m_0.$
\end{corollary}
\begin{proof}
Note first that the fact that $\mathbf X$ is twisted implies
\begin{equation}\label{c_1=0}
c_1(E_m)=0,\ m\ge1.
\end{equation}
Indeed, $c_1(E_m)$ is nothing but the integer corresponding to the line
bundle $\Lambda^2(E_m)$ in the identification of $\operatorname{Pic}\nolimits X_m$ with $\ZZ$.
As $\mathbf X$ is twisted,
$c_1(E_m)=\deg\varphi_m\deg\varphi_{m+1}\dots\deg\varphi_{m+k}c_1(E_{m+k+1})$ for any $k\geq 1$, in other words $c_1(E_m)$
is divisible by larger and larger integers and hence $c_1(E_m)=0$ (cf.
\cite[Lemma 3.2]{DP}). Suppose that for any $m_0\ge1$ there exists $m\ge
m_0$ such that $\mathbf{d}_{E_m}=(a_m,-a_m)$ with $a_m>0$. Then Theorem
\ref{thSubbdl} applies to $E_m$ with $s=1$, and hence $E_m$ has a
normal rank-1 subsheaf $F_m$ such that
\begin{equation}\label{F|l}
F_{m|l}\simeq\mathcal{O}_{l}(a_m)
\end{equation}
for a certain line $l$ in $X_m$. Since $F_m$ is a torsion-free normal
subsheaf of the vector bundle $E$, the sheaf $F_m$ is a line bundle,
i.e. $F_m\simeq\mathcal{O}_{X_m}(a_m)$. Therefore we have a monomorphism:
\begin{equation}\label{injectn}
0\to\mathcal{O}_{X_m}(a_m)\to E_m,\ \ \ a_m\ge1.
\end{equation}
This is clearly impossible. In fact, this monomorphism implies in view
of (\ref{c_1=0}) that any rational curve $C\subset X_m$ of degree
$\delta_m:=\deg\varphi_1\cdot...\cdot\deg\varphi_{m-1}$ has splitting type
$\mathbf{d}_{E_m}(C)=(a'_m,-a'_m)$, where $a'_m\ge a_m\delta_m\ge\delta_m$.
Hence, by semiconinuity, any line $l\in X_1$ has splitting type
$\mathbf{d}_{E_1}(l)=(b,-b),\ \ b\ge\delta_m$. Since $\delta_m\to\infty$ as
$m_0\to\infty,$ this is a contradiction.
\end{proof}
We now recall some standard facts about the Chow rings of
$X_m=G(k_m;V^{n_m}),$
(see, e.g., \cite[14.7]{F}):
\begin{itemize}
\item[(i)] $A^1(X_m)=\operatorname{Pic}\nolimits(X_m)=\mathbb{Z}[\mathbb{V}_m]$,
$A^2(X_m)=\mathbb{Z}[\mathbb{W}_{1,m}]\oplus\mathbb{Z}[\mathbb{W}_{2,m}]$,
where
$\mathbb{\mathbb{V}}_m,\mathbb{W}_{1,m},\mathbb{W}_{2,m}$ are the
following
Schubert varieties:
$\mathbb{V}_m:=\{V^{k_m}\in X_m|\ \dim(V^{k_m}\cap V_0^{n_m-k_m})\ge1$
for a fixed subspace
$V_0^{n_m-k_m-1}$ of $V^{n_m}\}$,
$\mathbb{W}_{1,m}:=\{V^{k_m}\in X_m| $ $\dim (V^{k_m}\cap
V_0^{n_m-k_m-1})\ge1$
for a fixed subspace $V_0^{n_m-k_m-1}$ in $V^{n_m}\}$,
$\mathbb{W}_{2,m}:=\{{V}^{k_m}\in X_m|\ \dim({V}^{k_m}\cap
V_0^{n_m-k_m+1})\ge2$
for a fixed subspace $V_0^{n_m-k_m+1}$ of $V^{n_m}\}$;
\item[(ii)] $[\mathbb{V}_m]^2=[\mathbb{W}_{1,m}]+[\mathbb{W}_{2,m}]$ in
$A^2(X_m)$;
\item[(iii)]
$A_2(X_m)=\mathbb{Z}[\mathbb{P}^2_{1,m}]\oplus\mathbb{Z}[\mathbb{P}^2_{2,m}]$,
where the projective planes
$\mathbb{P}^2_{1,m}$ (called \emph{$\alpha$-planes}) and
$\mathbb{P}^2_{2,m}$
(called \emph{$\beta$-planes}) are respectively the Schubert varieties
$\mathbb{P}^2_{1,m}:=\{V^{k_m}\in X_m|\ V_0^{k_m-1}\subset
{V}^{k_m}\subset V_0^{k_m+2}$
for a fixed flag $V_0^{k_m-1}\subset V_0^{k_m+2}$ in $V^{n_m}\}$,
$\mathbb{P}^2_{2,m}:=\{V^{k_m}\in X_m|\ V_0^{k_m-2}\subset {V}^{k_m}\subset
V_0^{k_m+1}$ for a fixed flag $V_0^{k_m-2}\subset V_0^{k_m+1}$ in
$V^{n_m}\};$
\item[(iv)] the bases $[\mathbb{W}_{i,m}]$ and $[\mathbb{P}^2_{j,m}]$
are dual in the standard sense that
$[\mathbb{W}_{i,m}]\cdot[\mathbb{P}^2_{j,m}]=\delta_{i,j}.$
\end{itemize}
\begin{lemma}\label{c_2(E_m)=0}
There exists $m_1\in\ZZ_{>0}$ such that for any $m\ge m_1$ one of the
following holds:
\begin{itemize}
\item[(1)] $c_2({E_m}_{|\mathbb{P}^2_{1,m}})>0,$
$c_2({E_m}_{|\mathbb{P}^2_{2,m}})\le0$,
\item[(2)] $c_2({E_m}_{|\mathbb{P}^2_{2,m}})>0,$
$c_2({E_m}_{|\mathbb{P}^2_{1,m}})\le0$,
\item[(3)] $c_2({E_m}_{|\mathbb{P}^2_{1,m}})=0$,
$c_2({E_m}_{|\mathbb{P}^2_{2,m}})=0$.
\end{itemize}
\end{lemma}
\begin{proof}
According to (i), for any $m\ge1$ there exist
$\lambda_{1m},\lambda_{2m}\in\ZZ$ such that
\begin{equation}\label{c_2(E_m)}
c_2(E_m)=\lambda_{1m}[\mathbb{W}_{1,m}]+\lambda_{2m}[\mathbb{W}_{2,m}].
\end{equation}
Moreover, (iv) implies
\begin{equation}\label{lambda_jm}
\lambda_{jm}=c_2({E_m}_{|\mathbb{P}^2_{j,m}}),\ \ j=1,2.
\end{equation}
Next, (i) yields:
\begin{equation}\label{abcd}
\varphi_m^*[\mathbb{W}_{1,m+1}]=a_{11}(m)[\mathbb{W}_{1,m}]+a_{21}(m)[\mathbb{W}_{2,m}],\
\
\varphi_m^*[\mathbb{W}_{2,m+1}]=a_{12}(m)[\mathbb{W}_{1,m}]+a_{22}(m)[\mathbb{W}_{2,m}],
\end{equation}
where $a_{ij}(m)\in\mathbb{Z}$. Consider the $2\times2$-matrix
$A(m)=(a_{ij}(m))$
and the column vector $\Lambda_m=(\lambda_{1m},\lambda_{2m})^t.$ Then,
in view of (iv),
the relation (\ref{abcd}) gives: $\Lambda_m=A(m)\Lambda_{m+1}$.
Iterating this equation and denoting by $A(m,i)$ the $2\times2$-matrix
$A(m)\cdot A(m+1)\cdot...\cdot A(m+i),\ i\ge1,$ we obtain
\begin{equation}\label{Lambda_m}
\Lambda_m=A(m,i)\Lambda_{m+i+1}.
\end{equation}
The twisting condition
$\varphi_m^*[\mathbb{V}_{m+1}]=\deg\varphi_m[\mathbb{V}_{m}]$ together with (ii) implies:
$\varphi_m^*([\mathbb{W}_{1,m+1}]+[\mathbb{W}_{2,m+1}])=(\deg\varphi_m)^2([\mathbb{W}_{1,m}]+[\mathbb{W}_{2,m}])$.
Substituting (\ref{abcd}) into the last equality, we have:
$a_{11}(m)+a_{12}(m)=a_{21}(m)+a_{22}(m)=(\deg\varphi_m)^2,\ \ \ m\ge1.$
This means that the column vector ${v}=(1,1)^t$ is an eigenvector of
$A(m)$ with eigenvalue $(\deg\varphi_m)^2$. Hence, it is an
eigenvector of $A(m,i)$ with the
eigenvalue
$d_{m,i}=(\deg\varphi_m)^2(\deg\varphi_{m+1})^2...(\deg\varphi_{m+i})^2:$
\begin{equation}\label{eigen}
A(m,i){v}=d_{m,i}{v}.
\end{equation}
Notice that the entries of $A(m),\ m\ge1,$ are nonnegative integers
(in fact, from the definition of the Schubert varieties
$\mathbb{W}_{j,m+1}$ it immediately follows
that $\varphi_m^*[\mathbb{W}_{j,m+1}]$ is an effective cycle on $X_m$, so
that (\ref{abcd}) and
(iv) give
$0\le\varphi_m^*[\mathbb{W}_{i,m+1}]\cdot[\mathbb{P}^2_{j,m}]=a_{ij}(m)$);
hence also the entries of $A(m,i),\ m,i\ge1,$ are nonnegative
integers).
Besides, clearly $d_{m,i}\to\infty$ as $i\to\infty$ for any $m\ge1$.
This, together with (\ref{Lambda_m}) and (\ref{eigen}), implies that,
for $m\gg1$, $\lambda_{1m}$ and $\lambda_{2m}$ cannot both be nonzero
and have the same sign.
This together with (\ref{lambda_jm}) is equivalent to the statement of
the Lemma.
\end{proof}
In what follows we denote the $\alpha$-planes and the $\beta$-planes on
$X=G(2;4)$ respectively by $\mathbb{P}_\alpha^2$ and $\mathbb{P}_\beta^2$.
\begin{proposition}\label{not exist}
There exists no rank 2 vector bundle $E$ on the Grassmannian $X=G(2;4)$
such that:
\begin{itemize}
\item[(a)] $c_2(E)=a[\mathbb{P}^2_{\alpha}],\ \ a>0,$
\item[(b)] $E_{|\mathbb{P}^2_{\beta}}$ is trivial for a generic
$\beta$-plane
$\mathbb{P}^2_{\beta}$ on $X$.
\end{itemize}
\end{proposition}
\begin{proof} Now assume that there exists a vector bundle $E$ on $X$
satisfying the conditions (a) and (b)
of the Proposition. Fix a $\beta$-plane $P\subset X$ such that
\begin{equation}\label{E|Y}
E_{|P}\simeq\mathcal{O}_{P}^{\oplus2}.
\end{equation}
As $X$ is the Grassmannian of lines in $\mathbb{P}^3$, the plane $P$ is
the dual plane of a
certain plane $\tilde P$ in $\mathbb{P}^3$. Next, fix a point
$x_0\in\mathbb{P}^3\smallsetminus\tilde P$ and denote by $S$ the
variety of lines in $\mathbb{P}^3$ which contain $x_0$. Consider the variety
$Q=\{(x,l)\in\mathbb{P}^3\times X\ |\ x\in l\cap\tilde P\}$ with
natural projections
$p:Q\to S:(x,l)\mapsto\Span(x,x_0)$ and $\sigma:Q\to X:(x,l)\mapsto l$.
Clearly, $\sigma$ is
the blowing up of $X$ at the plane $P$, and the exceptional divisor
$D_P=\sigma^{-1}(P)$ is
isomorphic to the incidence subvariety of $P\times\tilde{P}$. Moreover,
one easily checks that
$Q\simeq\mathbb{P}(\mathcal{O}_{S}(1)\oplus T_{S}(-1))$, so that the
projection $p:Q\to S$
coincides with the structure morphism
$\mathbb{P}(\mathcal{O}_{S}(1)\oplus T_{S}(-1))\to S$.
Let $\mathcal{O}_Q(1)$ be the Grothendieck line bundle on $Q$ such
that $p_*\mathcal{O}_Q(1)=\mathcal{O}_{S}(1)\oplus T_{S}(-1)$.
Using the Euler exact triple on $Q$
\begin{equation}\label{Euler}
0\to\Omega^1_{Q/S}\to p^*(\mathcal{O}_{S}(1)\oplus T_{S}(-1))
\otimes\mathcal{O}_Q(-1)\to\mathcal{O}_Q\to 0,
\end{equation}
we find the $p$-relative dualizing sheaf
$\omega_{Q/S}:=\det(\Omega^1_{Q/S})$:
\begin{equation}\label{rel dual}
\omega_{Q/S}\simeq\mathcal{O}_Q(-3)\otimes p^*\mathcal{O}_{S}(2).
\end{equation}
Set $\mathcal{E}:=\sigma^*E$. By construction, for each $y\in S$ the
fiber $Q_y=p^{-1}(y)$ is
a plane such that $l_y=Q_y\cap D_P$ is a line, and, by (\ref{E|Y}),
\begin{equation}\label{triv on l}
\mathcal{E}_{|l_y}\simeq\mathcal{O}_{l_y}^{\oplus2}.
\end{equation}
Furthermore, $\sigma(Q_y)$ is an $\alpha$-plane in $X$, and from
(\ref{triv on l}) it follows
clearly that
$h^0(\mathcal{E}_{|Q_y}(-1))=\mathcal{E}^\vee_{|Q_y}(-1))=0$. Hence, in view of condition (a) of the Proposition, the sheaf $\mathcal{E}_{|Q_y}$ is the
cohomology sheaf of a monad
\begin{equation}\label{eqMonad}
0\to\mathcal{O}_{Q_y}(-1)^{\oplus
a}\to\mathcal{O}_{Q_y}^{\oplus(2a+2)}\to
\mathcal{O}_{Q_y}(1)^{\oplus a}\to0
\end{equation}
(see \cite[Ch. II, Ex. 3.2.3]{OSS}). This monad immediately implies the
equalities
\begin{equation}\label{cohomology}
h^1(\mathcal{E}_{|Q_y}(-1))=h^1(\mathcal{E}_{|Q_y}(-2))=a,\ \
h^1(\mathcal{E}_{|Q_y}\otimes\Omega^1_{Q_y})=2a+2,
\end{equation}
$$
h^i(\mathcal{E}_{|Q_y}(-1))=h^i(\mathcal{E}_{|Q_y}(-2))=
h^i(\mathcal{E}_{|Q_y}\otimes\Omega^1_{Q_y})=0,\ \ i\ne1.
$$
Consider the sheaves of $\mathcal{O}_{S}$-modules
\begin{equation}\label{E_i}
E_{-1}:=R^1p_*(\mathcal{E}\otimes\mathcal{O}_Q(-2)\otimes
p^*\mathcal{O}_{S}(2)),\ \ \
E_0:=R^1p_*(\mathcal{E}\otimes\Omega^1_{Q/S}), \ \ \
E_1:=R^1p_*(\mathcal{E}\otimes\mathcal{O}_Q(-1)).
\end{equation}
The equalities (\ref{cohomology}) together with Cohomology and Base
Change imply that $E_{-1},\ E_1$ and
$E_0$
are locally free $\mathcal{O}_{S}$-modules, and
$\rk(E_{-1})=\rk(E_1)=a,$ and $\rk(E_0)=2a+2$. Moreover,
\begin{equation}\label{R_i}
R^ip_*(\mathcal{E}\otimes\mathcal{O}_Q(-2))=
R^ip_*(\mathcal{E}\otimes\Omega^1_{Q/S})=R^ip_*(\mathcal{E}\otimes\mathcal{O}_Q(-1))=0
\end{equation}
for $i\ne 1$. Note that $\mathcal{E}^\vee\simeq\mathcal{E}$ as
$c_1(\mathcal{E})=0$ and
$\rk\mathcal{E}=2$. Furthermore, (\ref{rel dual}) implies that the
nondegenerate pairing ($p$-relative Serre duality)
$R^1p_*(\mathcal{E}\otimes\mathcal{O}_Q(-1))\otimes
R^1p_*(\mathcal{E}^\vee\otimes\mathcal{O}_Q(1)\otimes \omega_{Q/S})\to
R^2p_*\omega_{Q/S}=\mathcal{O}_{S}$ can be rewritten as $E_1\otimes
E_{-1}\to\mathcal{O}_{S}, $
thus giving an isomorphism
\begin{equation}\label{isom dual}
E_{-1}\simeq E_1^\vee.
\end{equation}
Similarly, since
$\mathcal{E}^\vee\simeq\mathcal{E}$ and $\Omega^1_{Q/S}\simeq
T_{Q/S}\otimes\omega_{Q/S}$,
$p$-relative Serre duality yields a nondegenerate pairing
$E_0\otimes E_0=R^1p_*(\mathcal{E}\otimes\Omega^1_{Q/S})\otimes
R^1p_*(\mathcal{E}\otimes\Omega^1_{Q/S})=
R^1p_*(\mathcal{E}\otimes\Omega^1_{Q/S})\otimes
R^1p_*(\mathcal{E}^\vee\otimes T_{Q/S}\otimes\omega_{Q/S})
\to R^2p_*\omega_{Q/S}=\mathcal{O}_{S}$.
Therefore $E_0$ is self-dual, i.e. $E_0\simeq E_0^\vee$, and in
particular $c_1(E_0)=0$.
Now, let $J$ denote the fiber product $Q\times_{S}Q$ with projections
$Q\overset{pr_1}\leftarrow J\overset{pr_2}\to Q$ such that $p\circ
pr_1=p\circ pr_2$.
Put $F_1\boxtimes F_2:=pr_1^*F_1\otimes pr_2^*F_2$ for sheaves $F_1$
and $F_2$ on $Q$,
and consider the standard $\mathcal{O}_J$-resolution of the structure
sheaf
$\mathcal{O}_{\Delta}$ of the diagonal $\Delta\hookrightarrow J$
\begin{equation}\label{resoln of diag}
0\to\mathcal{O}_Q(-1)\otimes
p^*\mathcal{O}_{S}(2)\boxtimes\mathcal{O}_Q(-2)\to
{\Omega^1}_{Q/S}(1)\boxtimes\mathcal{O}_Q(-1)\to\mathcal{O}_J\to
\mathcal{O}_{\Delta}\to0.
\end{equation}
Twist this sequence by the sheaf
$(\mathcal{E}\otimes\mathcal{O}_Q(-1))\boxtimes\mathcal{O}_Q(1)$
and apply the functor $R^ipr_{2*}$ to the resulting sequence. In view
of (\ref{E_i}) and
(\ref{R_i}) we obtain the following monad for $\mathcal{E}$:
\begin{equation}\label{monad1}
0\to p^*E_{-1}\otimes\mathcal{O}_Q(-1)\overset{\lambda}\to
p^*E_0\overset{\mu}
\to p^*E_1\otimes\mathcal{O}_Q(1)\to0,\ \ \ \ \ \ker(\mu)/{\rm
im}(\lambda)=\mathcal{E}.
\end{equation}
Put $R:=p^*h$, where $h$ is the class of a line in $S$. Furthermore,
set
$H:=\sigma^*H_X$, $[\mathbb{P}_\alpha]:=\sigma^*[\mathbb{P}^2_\alpha]$,
$[\mathbb{P}_\beta]:=\sigma^*[\mathbb{P}^2_\beta]$,
where $H_X$ is the class of a hyperplane section of $X$ (via the
Pl\"ucker embedding),
and respectively, $[\mathbb{P}^2_\alpha]$ and $[\mathbb{P}^2_\beta]$
are the classes of an $\alpha$- and
$\beta$-plane. Note that, clearly,
$\mathcal{O}_Q(H)\simeq\mathcal{O}_Q(1)$.
Thus, taking into account the duality (\ref{isom dual}), we rewrite the
monad (\ref{monad1}) as
\begin{equation}\label{monad2}
0\to p^*E_1^\vee\otimes\mathcal{O}_Q(-H)\overset{\lambda}\to
p^*E_0\overset{\mu}\to p^*E_1\otimes\mathcal{O}_Q(H)\to0,\ \ \ \ \ \
\ker(\mu)/{\rm im}(\lambda)\simeq\mathcal{E}.
\end{equation}
In particular, it becomes clear that \refeq{monad1} is a relative version of the monad \refeq{eqMonad}.
As a next step, we are going to express all Chern classes of the
sheaves in
(\ref{monad2}) in terms of $a$. We start by writing down the Chern
polynomials of the bundles
$p^*E_1\otimes\mathcal{O}_Q(H)$ and
$p^*E_1^\vee\otimes\mathcal{O}_Q(-H)$ in the form
\begin{equation}\label{Chern1}
c_t(p^*E_1\otimes\mathcal{O}_Q(H))=\prod_{i=1}^a(1+(\delta_i+H)t),\ \ \
c_t(p^*E_1^\vee\otimes\mathcal{O}_Q(-H))=\prod_{i=1}^a(1-(\delta_i+H)t),
\end{equation}
where $\delta_i$ are the Chern roots of the bundle $p^*E_1$. Thus
\begin{equation}\label{c,d}
cR^2=\sum_{i=1}^a\delta_i^2,\ \ dR=\sum_{i=1}^a\delta_i.
\end{equation}
for some $c,d\in\mathbb{Z}$.
Next we invoke the following easily verified relations in $A^\cdot(Q)$:
\begin{equation}\label{rel in A(Q)}
H^4=RH^3=2[pt],\ \ \ R^2H^2=R^2[\mathbb{P}_\alpha]=
RH[\mathbb{P}_\alpha]=H^2[\mathbb{P}_\alpha]=RH[\mathbb{P}_\beta]=H^2[\mathbb{P}_\beta]=[pt],
\end{equation}
$$
[\mathbb{P}_\alpha][\mathbb{P}_\beta]=R^2[\mathbb{P}_\beta]=R^4=R^3H=0,
$$
where $[pt]$ is the class of a point. This, together with (\ref{c,d}),
gives
\begin{equation}\label{sums}
\sum_{1\le i<j\le a}\delta_i^2\delta_j^2=
\sum_{1\le i<j\le a}(\delta_i^2\delta_j+\delta_i\delta_j^2)H=0,
\sum_{1\le i<j\le a}\delta_i\delta_jH^2=\frac{1}{2}(d^2-c)[pt],
\sum_{1\le i\le a}(\delta_i+\delta_j)H^3=2(a-1)d[pt].
\end{equation}
Note that, since $c_1(E_0)=0$,
\begin{equation}\label{Chern2}
c_t(p^*E_0)=1+bR^2t^2
\end{equation}
for some $b\in\mathbb{Z}$. Furthermore,
\begin{equation}\label{c_t(E)}
c_t(\mathcal{E})=1+a[\mathbb{P}_\alpha]t^2
\end{equation}
by the condition of the Proposition.
Substituting (\ref{Chern2}) and (\ref{c_t(E)}) into the polynomial
$f(t):=c_t(\mathcal{E})c_t(p^*E_1\otimes\mathcal{O}_Q(H))
c_t(p^*E_1^\vee\otimes\mathcal{O}_Q(-H))$, we have
$f(t)=(1+a[\mathbb{P}_\alpha]t^2)\prod_{i=1}^a(1-(\delta_i+H)^2t^2)$.
Expanding $f(t)$ in $t$ and using (\ref{c,d})-(\ref{sums}),
we obtain
\begin{equation}\label{f(t)2}
f(t)=1+(a[\mathbb{P}_\alpha]-cR^2-2dRH-aH^2)t^2+e[pt]t^4,\ \ \
\end{equation}
where
\begin{equation}\label{e}
e=-3c-a(2d+a)+(a-1)(a+4d)+2d^2.
\end{equation}
Next, the monad (\ref{monad2}) implies $f(t)=c_t(p^*E_0)$. A comparison
of (\ref{f(t)2}) with
(\ref{Chern2}) yields
\begin{equation}\label{c_2}
c_2(\mathcal{E})=a[\mathbb{P}_\alpha]=(b+c)R^2+2dRH+aH^2,
\end{equation}
\begin{equation}\label{c_4}
e=c_4(p^*E_0)=0.
\end{equation}
The relation (\ref{c_4}) is the crucial relation which enables us to
express the Chern classes
of all sheaves in (\ref{monad2}) just in terms of $a$.
More precisely, (\ref{c_2}) and (\ref{rel in A(Q)}) give
$0=c_2(\mathcal{E})[\mathbb{P}_\beta]=2d+a$, hence
$a=-2d$.
Substituting these latter equalities into (\ref{e}) we get
$e=-a(a-2)/2-3c$.
Hence $c=-a(a-2)/6$ by (\ref{c_4}). Since $a=-2d$, (\ref{c,d}) and the
equality $c=-a(a-2)/6$
give
$c_1(E_1)=-a/2,\ \ c_2(E_1)=(d^2-c)/2=a(5a-4)/24$. Substituting this
into the standard formulas
$e_k:=c_k(p^*E_1\otimes\mathcal{O}_Q(H))=\sum_{i=0}^2\binom{a-i}{k-i}R^iH^{k-i}c_i(E_1),
\ \ 1\le k\le4$, we obtain
\begin{equation}\label{ee_i}
e_1=-aR/2+aH,\ \ e_2=(5a^2/24-a/6)R^2+(a^2-a)(-RH+H^2)/2,
\end{equation}
$$e_3=(5a^3/24-7a^2/12+a/3)R^2H+(-a^3/4+3a^2/4-a/2)RH^2+(a^3/6-a^2/2+a/3)H^3,$$
$$
e_4=(-7a^4/144+43a^3/144-41a^2/72+a/3)[pt].
$$
It remains to write down explicitely $c_2(p^*E_0)$:
(\ref{rel in A(Q)}), (\ref{c_2}) and the relations $a=-2d$,
$c=-a(a-2)/6$ give
$a=c_2(\mathcal{E})[\mathbb{P}_\alpha]=b+c,$ hence
\begin{equation}\label{c_2(E_0)}
c_2(E_0)=b=(a^2+4a)/6
\end{equation}
by (\ref{Chern2}).
Our next and final step will be to obtain a contradiction by computing
the Euler characteristic
of the sheaf $\mathcal{E}$ and two different ways. We first compute the
Todd class
${\rm td}(T_Q)$ of
the bundle $T_Q$. From the exact triple dual to (\ref{Euler}) we find
$c_t(T_{Q/S})=1+(-2R+3H)t+(2R^2-4RH+3H^2)t^2$.
Next, $c_t(T_Q)=c_t(T_{Q/S})c_t(p^*T_S)$.
Hence $c_1(T_Q)=R+3H,\ c_2(T_Q)=-R^2+5RH+3H^2,\ c_3(T_Q)=-3R^2H+9H^2R,\
c_4(T_Q)=9[pt].$
Substituting into the formula for
the Todd class of $T_Q$, ${\rm
td}(T_Q)=1+\frac{1}{2}c_1+\frac{1}{12}(c_1^2+c_2)+\frac{1}{24}c_1c_2
-\frac{1}{720}(c_1^4-4c_1^2c_2-3c_2^2-c_1c_3+c_4)$, where
$c_i:=c_i(T_Q)$ (see, e.g.,
\cite[p.432]{H}), we get
\begin{equation}\label{td(T_Q)}
{\rm
td}(T_Q)=1+\frac{1}{2}R+\frac{3}{2}H+\frac{11}{12}RH+H^2+\frac{1}{12}HR^2+
\frac{3}{4}H^2R+\frac{3}{8}H^3+[pt].
\end{equation}
Next, by the hypotheses of Proposition
$c_1(\mathcal{E})=0,\ c_2(\mathcal{E})=a[\mathbb{P}_{\alpha}],\
c_3(\mathcal{E})=c_4(\mathcal{E})=0$.
Substituting this into the general formula for the Chern character of a
vector bundle $F$,
$$
{\rm
ch}(F)=\rk(F)+c_1+(c_1^2-2c_2)/2+(c_1^3-3c_1c_2-3c_3)/6+(c_1^4-4c_1^2c_2+4c_1c_3+2c_2^2-4c_4)/24, \ \
$$
$c_i:=c_i(F)$ (see, e.g., \cite[p.432]{H}), and using
(\ref{td(T_Q)}), we obtain by the Riemann-Roch Theorem for
$F=\mathcal{E}$
\begin{equation}\label{chi(E)}
\chi(\mathcal{E})=\frac{1}{12}a^2-\frac{23}{12}a+2.
\end{equation}
In a similar way, using (\ref{ee_i}), we obtain
\begin{equation}\label{chi(E1)+chi(E-1)}
\chi(p^*E_1\otimes\mathcal{O}_Q(H))+\chi(p^*E_1^\vee\otimes\mathcal{O}_Q(-H))=
\frac{5}{216}a^4-\frac{29}{216}a^3-\frac{1}{54}a^2+\frac{113}{36}a.
\end{equation}
Next,
in view of (\ref{c_2(E_0)})
and the equality $c_1(E_0)=0$ the Riemann-Roch Theorem for $E_0$
easily gives
\begin{equation}\label{chi(E_0)}
\chi(p^*E_0)=\chi(E_0)=-\frac{1}{6}a^2+\frac{4}{3}a+2.
\end{equation}
Together with (\ref{chi(E)}) and
(\ref{chi(E1)+chi(E-1)}) this yields
$$
\Phi(a):=\chi(p^*E_0)-(\chi(\mathcal{E})+
\chi(p^*E_1\otimes\mathcal{O}_Q(H))+\chi(p^*E_1^\vee\otimes\mathcal{O}_Q(-H)))=
-\frac{5}{216}a(a-2)(a-3)(a-\frac{4}{5}).
$$
The monad (\ref{monad2}) implies now $\Phi(a)=0.$ The only positive
integer roots of the
polynomial $\Phi(a)$ are $a=2$ and $a=3$.
However, (\ref{chi(E)}) implies $\chi(\mathcal{E})=-\frac{3}{2}$ for
$a=2$,
and (\ref{chi(E_0)}) implies $\chi(p^*E_0)=\frac{9}{2}$ for $a=3$.
This is a contradiction as the values of $\chi(\mathcal{E})$ and
$\chi(p^*E_0)$ are integers by
definition.
\end{proof}
We need a last piece of notation.
Consider the flag variety $Fl(k_m-2,k_m+2;V^{n_m})$. Any point
$u=(V^{k_m-2},V^{k_m+2})\in \Fl(k_m-2,k_m+2;V^{n_m})$ determines a
standard extension
\begin{equation}\label{i_z}
i_{u}:\ X=G(2;4)\hookrightarrow X_m,
\end{equation}
\begin{equation}\label{eq}
W^2\mapsto V^{k_m-2}\oplus W^2\subset V^{k_m+2}\subset
V^{n_m}=V^{k_m-2}\oplus W^4\subset V^{n_m},
\end{equation}
where $W^2\in X=G(2;W^4)$ and an isomorphism $V^{k_m-2}\oplus W^4\simeq
V^{k_m+2}$ is fixed (clearly $i_{u}$ does not depend on the choice of
this isomorphism modulo $\Aut(X_m)$). We
clearly have isomorphisms of Chow groups
\begin{equation}\label{isomChow}
i_{u}^*:\ A^2(X_m)\overset{\sim}\to A^2(X),\ \ \
i_{u*}:\ A_2(X)\overset{\sim}\to A_2(X_m),
\end{equation}
and the flag variety $Y_m:=Fl(k_m-1,k_m+1;V^{n_m})$ (respectively,
$Y:=Fl(1,3;4)$) is the set of lines in $X_m$ (respectively, in $X$).
\begin{theorem}\label{th56}
Let $\displaystyle\mathbf{X} = \lim_{\to}X_m$ be a twisted ind-Grassmannian.
Then any vector bundle $\displaystyle\mathbf{E}=\lim_{\gets}E_m$ on $\mathbf{X}$ of
rank 2 is trivial, and hence Conjecture \ref{con1}(iv) holds for vector
bundles of rank 2.
\end{theorem}
\begin{proof}
Fix $m\ge\max\{m_0,m_1\},$ where $m_0$ and $m_1$ are as in Corollary
\ref{d=(0,0)} and
Lemma \ref{c_2(E_m)=0}. For $j=1,2$, let $E^{(j)}$ denote the
restriction of $E_m$ to a
projective plane of type $\mathbb{P}^2_{j,m}$,
$T^j\simeq\Fl(k_m-j,k_m+3-j,V^{n_m})$ be the variety of planes of the
form $\mathbb{P}^2_{j,m}$
in $X_m$, and $\Pi^j:=\{\mathbb{P}^2_{j,m}\in T^j|\
{E_m}_{|\mathbb{P}^2_{j,m}}$ is properly unstable
(i.e. not semistable)$\}.$ As semistability is an open condition,
$\Pi^j$ is a closed subset
of $T^j$.
(i) Assume that $c_2(E^{(1)})>0$. Then, since $m\ge m_1$, Lemma
\ref{c_2(E_m)=0} implies
$c_2(E^{(2)})\le0$.
(i.1) Suppose that $c_2(E^{(2)})=0$. If $\Pi^2\ne T^2$, then for any
$\mathbb{P}^2_{2,m}\in T^2\smallsetminus \Pi^2$ the corresponding
bundle $E^{(2)}$ is
semistable, hence $E^{(2)}$ is trivial as $c_2(E^{(2)})=0$, see
\cite[Prop. 2.3,(4)]{DL}. Thus, for a generic point $u\in
Fl(k_m-2,k_m+2;V^{n_m})$, the bundle $E=i_{u}^*E_m$ on $X=G(2;4)$ satisfies the conditions
of Proposition \ref{not exist}, which is a contradiction.
We therefore assume $\Pi^2=T^2$. Then for any $\mathbb{P}^2_{2,m}\in
T^2$ the corresponding bundle $E^{(2)}$ has a maximal destabilizing
subsheaf $0\to\mathcal{O}_{\mathbb{P}^2_{2,m}}(a)\to E^{(2)}.$ Moreover
$a>0$. In fact, otherwise the condition $c_2(E^{(2)})=0$ would imply that
$a=0$ and
$E^{(2)}/\mathcal{O}_{\mathbb{P}^2_{2,m}}=\mathcal{O}_{\mathbb{P}^2_{2,m}}$, i.e. $E^{(2)}$ would be trivial, in particular
semistable. Hence
\begin{equation}\label{a,-a}
\mathbf{d}_{E^{(2)}}=(a,-a).
\end{equation}
Since any line in $X_m$ is contained in a plane $\mathbb{P}^2_{2,m}\in
T^2$,
(\ref{a,-a}) implies $\mathbf{d}_{E_m}=(a,-a)$ with $a>0$ for $m>m_0$,
contrary to Corollary \ref{d=(0,0)}.
(i.2) Assume $c_2(E^{(2)})<0$. Since $E^{(2)}$ is not stable for any
$\mathbb{P}^2_{2,m}\in T^2$, its maximal destabilizing subsheaf
$0\to\mathcal{O}_{\mathbb{P}^2_{2,m}}(a)\to E^{(2)}$ clearly satisfies the
condition $a>0$, i.e. $E^{(2)}$ is properly unstable, hence $\Pi^2=T^2$.
Then we again obtain a contradiction as above.
(ii) Now we assume that $c_2(E^{(2)})>0$. Then, replacing $E^{(2)}$ by
$E^{(1)}$ and vice versa, we arrive to a contradiction by the same
argument as in case (i).
(iii) We must therefore assume $c_2(E^{(1)})=c_2(E^{(2)})=0$. Set
$D(E_m):=\{l\in Y_m|~\mathbf{d}_{E_m}(l)\ne(0,0)\}$ and $D(E):=\{l\in
Y|~\mathbf{d}_E(l)\ne(0,0)\}$. By Corollary \ref{d=(0,0)},
$\mathbf{d}_{E_m}=(0,0),$ hence $\mathbf{d}_E=(0,0)$ for a generic embedding $i_u:X\hookrightarrow X_m$. Then by deformation theory \cite{B}, $D(E_m)$ (respectively,
$D(E)$) is an effective divisor on $Y_m$ (respectively, on $Y$). Hence,
$\mathcal{O}_Y(D(E))=p_1^*\mathcal{O}_{Y^1}(a) \otimes
p_2^*\mathcal{O}_{Y^2}(b)$ for some $a,b\ge0$, where $p_1$, $p_2$ are as in diagram
\refeq{eqDiag}. Note that each fiber of $p_1$ (respectively, of $p_2$) is a plane
$\tilde{\mathbb{P}}^2_{\alpha}$ dual to some $\alpha$-plane
$\mathbb{P}^2_{\alpha}$ (respectively, a plane $\tilde{\mathbb{P}}^2_{\beta}$ dual to
some $\beta$-plane $\mathbb{P}^2_{\beta}$). Thus, setting
$D(E_{|\mathbb{P}^2_{\alpha}}):=\{l\in\tilde{\mathbb{P}}^2_{\alpha}|~\mathbf{d}_E(l)\ne(0,0)\}$,
$D(E_{|\mathbb{P}^2_{\beta}}):=\{l\in\tilde{\mathbb{P}}^2_{\beta}|~\mathbf{d}_E(l)\ne(0,0)\}$,
we obtain
$\mathcal{O}_{\tilde{\mathbb{P}}^2_{\alpha}}(D(E_{|\mathbb{P}^2_{\alpha}}))=
\mathcal{O}_Y(D(E))_{|\tilde{\mathbb{P}}^2_{\alpha}}=
\mathcal{O}_{\tilde{\mathbb{P}}^2_{\alpha}}(b),\ \ \
\mathcal{O}_{\tilde{\mathbb{P}}^2_{\beta}}(D(E_{|\mathbb{P}^2_{\beta}}))=
\mathcal{O}_Y(D(E))_{|\tilde{\mathbb{P}}^2_{\beta}}=
\mathcal{O}_{\tilde{\mathbb{P}}^2_{\beta}}(a).$ Now if
$E_{|\mathbb{P}^2_{\alpha}}$ is semistable, a theorem of
Barth \cite[Ch. II, Theorem 2.2.3]{OSS} implies that
$D(E_{|\mathbb{P}^2_{\alpha}})$ is a divisor of degree
$c_2(E_{|\mathbb{P}^2_{\alpha}})=a$ on
$\mathbb{P}^2_{\alpha}$.
Hence $a=c_2(E^{(1)})=0$ for a semistable $E_{|\mathbb{P}^2_{\alpha}}$.
If $E_{|\mathbb{P}^2_{\alpha}}$ is not semistable, it is unstable and
the equality
$\mathbf{d}_E(l)=(0,0)$ yields
$\mathbf{d}_{E_{|\mathbb{P}^2_{\alpha}}}=(0,0)$. Then the
maximal destabilizing subsheaf of $E_{|\mathbb{P}^2_{\alpha}}$ is
isomorphic to
$\mathcal{O}_{\mathbb{P}^2_{\alpha}}$ and, since
$c_2(E_{|\mathbb{P}^2_{\alpha}})=0,$
we obtain an exact triple
$0\to\mathcal{O}_{\mathbb{P}^2_{\alpha}}\to
E_{|\mathbb{P}^2_{\alpha}}\to \mathcal{O}_{\mathbb{P}^2_{\alpha}}\to
0$,
so that
$E_{|\mathbb{P}^2_{\alpha}}\simeq\mathcal{O}_{\mathbb{P}^2_{\alpha}}^{\oplus2}$
is semistable,
a contradiction. This shows that $a=0$ whenever
$c_2(E^{(1)})=c_2(E^{(2)})=0$. Similarly, $b=0$.
Therefore $D(E_m)=\emptyset$, and
Proposition \ref{prop31} implies that $E_m$ is trivial. Therefore
$\mathbf{E}$ is trivial as
well.
\end{proof}
In \cite{DP} Conjecture \ref{con1} (iv) was proved
not only when $\mathbf{X}$ is a twisted projective ind-space,
but also for finite rank bundles on special twisted ind-Grassmannians
defined through certain
homogeneous embeddings $\varphi_m$. These include embeddings of the form
\[
G(k;n)\to G(ka;nb)
\]
\[
V^k\subset V\mapsto V^k\otimes W^a\subset V\otimes W^b,
\]
where $W^a\subset W^b$ is a fixed pair of finite-dimensional spaces
with $a>b$, or of the form
\[
G(k;n)\to G\left(\frac{k(k+1)}{2};n^2\right)
\]
\[
V^k\subset V\mapsto S^2(V^k)\subset V\otimes V.
\]
More precisely, Conjecture \ref{con1} (iv) was proved in \cite{DP} for
twisted
ind-Grassmannians whose defining embeddings are homogeneous embeddings
satisfying some specific
numerical conditions relating the degrees $\deg\varphi_m$ with the pairs
of integers $(k_m,n_m)$.
There are many twisted ind-Grassmannians for which those conditions are
not satisfied. For
instance, this applies to the ind-Grassmannians defined by iterating
each of the following
embeddings:
\begin{eqnarray*}
G(k;n)\to G\left(\frac{k(k+1)}{2};\frac{n(n+1)}{2}\right)\\
V^k\subset V\mapsto S^2(V^k)\subset S^2(V), \\
G(k;n)\to G\left(\frac{k(k-1)}{2};\frac{n(n-1)}{2}\right)\\
V^k\subset V\mapsto \Lambda^2(V^k)\subset \Lambda^2(V).
\end{eqnarray*}
Therefore the resulting ind-Grassmannians $\mathbf G(k,n,S^2)$ and
$\mathbf G (k,n,\Lambda^2)$ are examples of twisted ind-Grassmannians
for which \refth{th56}
is new.
\end{document} |
\begin{document}
\title{On a Parabolic-Hyperbolic Filter for Multicolor Image Noise Reduction}
\author{
Valerii Maltsev\thanks{Department of Cybernetics, Kyiv National Taras Shevchenko University, Ukraine
\texttt{maltsev.valerii@gmail.com}} \and
Michael Pokojovy\thanks{Department of Mathematics, Karlsruhe Institute of Technology, Karlsruhe, Germany
\texttt{michael.pokojovy@kit.edu}}
}
\date{\today}
\pagestyle{myheadings}
\thispagestyle{plain}
\markboth{\textsc{V. Maltsev, M. Pokojovy}}{\textsc{On a Filter for Multicolor Image Noise Reduction}}
\maketitle
\begin{abstract}
We propose a novel PDE-based anisotropic filter for noise reduction in multicolor images.
It is a generalization of Nitzberg \& Shiota's (1992) model
being a hyperbolic relaxation of the well-known parabolic Perona \& Malik's filter (1990).
First, we consider a `spatial' molifier-type regularization of our PDE system
and exploit the maximal $L^{2}$-regularity theory for non-autonomous forms
to prove a well-posedness result both in weak and strong settings.
Again, using the maximal $L^{2}$-regularity theory and Schauder's fixed point theorem,
respective solutions for the original quasilinear problem are obtained
and the uniqueness of solutions with a bounded gradient is proved.
Finally, the long-time behavior of our model is studied.
\end{abstract}
{\bf Key words: } image processing, nonlinear partial differential equations,
weak solutions, strong solutions, maximal regularity
{\bf AMS:}
35B30,
35D30,
35D35,
35G61,
35M33,
65J15
\section{Introduction}
Image processing (also referred to as digital image processing) is one of central tasks in the image science.
It includes, but is not limited to
denoising, deblurring, decomposition or segmentation of images with appropriate edges (cf. \cite[p. 5]{BuMeOshRu2013}).
Since the presence of noise is unavoidable
due to the image formation process, recording and/or transmission (cf. \cite[p. 259]{RuOshFa1992}),
in practice, a noise reduction technique must be applied before any further processing steps can reasonably be performed.
One of the earlier systematic theories dates back to Marr and Hildreth \cite{MaHi1980} (cf. \cite[p. 182]{CaLioMoCo1992})
and incorporates a low-pass filtering as a noise reduction tool.
For a detailed overview of the historical literature, we refer the reader
to the comprehensive article by Alvarez et al. \cite{AlGuLiMo1993}.
After a decade of gradual improvements and developments by Canny \cite{Ca1983}, Witkin \cite{Wi1983} and many other authors,
the field has been revolutionized by Perona \& Malik \cite{PeMa1990} in early 90s,
when they proposed their famour anisotropic Perona \& Malik image filter.
Their development marked a new era in image processing -- the era of (time-dependent) partial differential equations (PDEs).
With $G \subset \mathbb{R}^{d}$ denoting the domain occupied by a monochromic image
and $u(t, \mathbf{x})$ standing for the (grayscale) image intensity at time $t \geq 0$ and pixel $\mathbf{x} \in G$,
in its modern formulation (cf. \cite[175--190]{KeSto2002}), Perona \& Malik's PDE reads as
\begin{equation}
\begin{split}
\partial_{t} u &= \mathrm{div}\, \big(\mathbf{g}(\nabla u) \nabla u\big) \text{ in } (0, \infty) \times G, \\
\mathbf{g}(\nabla u) \cdot \mathbf{n} &= 0 \text{ on } (0, \infty) \times \Gamma, \\
u(0, \cdot) &= \tilde{u}^{0} \text{ in } \Omega,
\end{split}
\label{EQUATION_CLASSICAL_PERONA_MALIK_FILTER}
\end{equation}
where $\mathbf{n}$ stands for the outer unit normal vector to $\Gamma := \partial \Omega$,
$\mathbf{g}$ is a nonlinear response or diffusivity function (scalar or matrix-valued)
and $\tilde{u}^{0}$ denotes the original noisy image.
Whereas, as observed by Witkin \cite{Wi1983},
Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER}) leads to a linear Gaussian low-pass filter if $g$ is constant,
an appropriate choice of a nonlinear diffusivity
turns out to be particularly beneficial for the edge preservation.
Selecting $\mathbf{g}$ to vanish as $|\nabla u| \to \infty$,
the diffusion slows down at the edges thus preserving their localization.
For small values of $|\nabla u|$, the diffusion is active
and tends to smoothen around such points (cf. \cite[p. 183]{CaLioMoCo1992}).
Typical choices of $\mathbf{g}$ can be found in
\cite[Table 1, p. 178]{KeSto2002}, \cite[Section 1.3.3]{Wei1998}, for example,
\begin{equation}
\mathbf{g}(\mathbf{s}) = \left(1 + \frac{|\mathbf{s}|}{\lambda}\right)^{-1} \mathbf{I} \text{ for some } \lambda > 0. \notag
\end{equation}
In the analytic sense, it can be observed
the Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER}) is ill-posed
due to its connection with the reverse heat equation (cf. \cite[pp. 15--19]{Wei1998}).
Surprisingly, numerical discretizations have been observed to be stable
(\cite[pp. 20--21]{Am2007}, \cite{HaMiSga2002}),
though undesirable staircaising effects still occur sometimes (cf. \cite[p. 176]{KeSto2002}).
Some of the numerical studies have though been critically perceived by other authors (viz. \cite[p. 185]{CaLioMoCo1992}).
Another major drawback of Perona \& Malik's filter
is that it can break down if being applied to images
contaminated with an irregular noise such as the white noise.
Indeed, in such situations, $\nabla u$ becomes unbounded almost everywhere in $G$ and the diffusion collapses
(see \cite[p. 183]{CaLioMoCo1992}).
Unfortunately, despite of numerous numerical results,
no rigorous analytic theories are known in the literature for Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER}).
As an alternative for Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER}),
Catt\'{e} et al. \cite{CaLioMoCo1992} proposed to consider a space-convolution regularization
called the `selecting smoothing' given by
\begin{equation}
\begin{split}
\partial_{t} u &= \mathrm{div}\, \big(\mathbf{g}(\nabla_{\sigma} u) \nabla u\big) \text{ in } (0, \infty) \times G, \\
\mathbf{g}(\nabla_{\sigma} u) \cdot \mathbf{n} &= 0 \text{ on } (0, \infty) \times \Gamma, \\
u(0, \cdot) &= \tilde{u}^{0} \text{ in } \Omega,
\end{split}
\label{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_SPACE_REGULARIZATION}
\end{equation}
where $\nabla_{\sigma} u$ ($\sigma > 0$) denotes the gradient operator
applied to the convolution of $u$ with a multiple of Gaussian pdf in space
(see Section \ref{SECTION_PDE_BASED_IMAGE_FILTERING} below).
Heuristically, the convolution is meant to play the role of a low-pass filter
which iteratively smoothes the image at the scale of $t^{1/2}$ before recomputing the diffusivity matrix.
Under a $C^{\infty}$-smoothness assumption on the scalar function $\mathbf{g}$,
for $\tilde{u}^{0} \in L^{2}(G)$, Catt\'{e} et al. \cite{CaLioMoCo1992} proved an existence and uniqueness theorem for
Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_SPACE_REGULARIZATION})
in the class of weak solutions
$H^{1}\big(0, T; L^{2}(G)\big) \cap L^{2}\big(0, T; H^{1}(G)\big)$
together with a $C^{\infty}$-regularity of solutions in $(0, T) \times G$ for any $T > 0$.
They also developed a finite-difference numerical scheme and presented some illustrations of its performance.
As later discovered by Amann \cite[p. 1030]{Am2005},
Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_SPACE_REGULARIZATION})
results in smoothing of sharp edges and, therefore, produces unwanted blurring effects.
A more detailed discussion and some numerical illustrations can be found in \cite[Sections 1 and 2]{Am2007}.
To overcome the smearing effect of Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_SPACE_REGULARIZATION}),
Amann \cite{Am2005, Am2007} studied a memory-type regularization of Perona \& Malik's equation
(\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER}) given by
\begin{align}
\begin{split}
\partial_{t} u - \mathrm{div}\, \left(g\Big(\int_{0}^{t} \theta(t - s) \big|\nabla u(s, \cdot)\big|^{2} \mathrm{d}s\Big) \nabla u\right) &= 0
\text{ in } (0, \infty) \times G, \\
\frac{\partial u}{\partial \mathbf{n}} &= 0 \text{ on } (0, \infty) \times \Gamma, \\
u(0, \cdot) &= u^{0} \text{ in } \Omega,
\end{split}
\label{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_TIME_REGULARIZATION}
\end{align}
where $\theta \in L^{s}_{\mathrm{loc}}(0, \infty; \mathbb{R}_{+})$ for some $s > 1$.
For $C^{2}$-domains $G$ (which rules out rectangular images), $1 < p, q < 1$ such that $\frac{2}{p} + \frac{d}{q} < 1$
and $g \in C^{2-}(\mathbb{R}^{d}, (0, \infty)\big)$ with $C^{2-}$ denoting the space of functions
with locally bounded difference quotients up to order 2, the initial data
\begin{equation}
u^{0} \in H^{2, q}_{\mathrm{Neu}} := \Big\{u \in H^{2, q}(G) \,\big|\, \frac{\partial u}{\partial \mathbf{n}} = 0 \text{ on } \Gamma\Big\}
\notag
\end{equation}
were shown to admit a unique strong solution
\begin{equation}
u \in H^{1, p}\big(0, T; L^{q}(G)\big) \cap L^{p}\big(0, T; H^{2, q}_{\mathrm{Neu}}(G)\big) \notag
\end{equation}
with a maximal existence time $T^{\ast} > 0$,
which is even infinite if $\theta$ has a compact support in $(0, \infty)$.
Moreover, it has been proved the solution continuously depends on the data in the respective topologies
and a maximum principle for $u$ has been shown, etc.
The proof is based on the maximal $L^{p}$-regularity theory.
A generalization of Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_TIME_REGULARIZATION})
has also been studied and the results of numerical experiments have been presented.
An abstract linear version of Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER_TIME_REGULARIZATION})
was studied by Pr\"uss in \cite{Pr1991} and Zacher in \cite{Za2005}.
We refer the reader to the fundamental monograph \cite{Pr1993} by Pr\"uss
for further details on this and similar problems.
Cottet \& El Ayyadi \cite{CoAy1998} studied the initial-boundary value problem
\begin{align}
\begin{split}
\partial_{t} u - \mathrm{div}\,\big(\mathbf{L} \nabla u) &= 0 \text{ in } (0, \infty) \times G \\
\partial_{t} \mathbf{L} + \mathbf{L} &= \mathbf{F}(\nabla_{\sigma} u) \text{ in } (0, \infty) \times G, \\
u(0, \cdot) = u^{0}, \quad
\mathbf{L}(0, \cdot) &= \mathbf{L}^{0} \text{ in } G
\end{split}
\label{EQUATION_COTTET_EL_AYYADI}
\end{align}
together with the periodic boundary conditions for $u$ for the case $d = 2$.
Here, $\mathbf{F}$ is a function mapping $\mathbb{R}^{d}$ into the space of positive semidefinite $(d \times d)$-matrices,
$\sigma > 0$ and $\mathbf{L}^{0}$ is uniformly positive definite.
Equation (\ref{EQUATION_COTTET_EL_AYYADI})
has first been proposed in a similar form and without any mathematial justification by Nitzberg \& Shiota in \cite{NiShi1992}.
For the initial data $(u^{0}, \mathbf{L}^{0})^{T} \in L^{\infty}(G) \times \big(H^{1}(G) \cap L^{\infty}(G)\big)^{d \times d}$,
Cottet \& El Ayyadi \cite{CoAy1998} showed the existence of a unique solution
\begin{align}
u \in L^{2}\big(0, T; H^{1}(G)\big) \cap L^{\infty}\big(0, T; L^{\infty}(G)\big), \quad
\mathbf{L} \in L^{\infty}\Big(0, T; \big(H^{1}(G) \cap L^{\infty}(G)\big)^{d \times d}\Big) \notag
\end{align}
for any $T > 0$, which, moreover, continuosly depends on the data in a certain topology.
The proof is based on a convolution-like time discretization and {\it a priori} estimates.
The choice of parameters has been discussed.
A finite difference scheme together with numerical examples have been presented
and a connection to a neural network has been established.
Belahmidi \cite[Chapter 4]{Be2003} and Belahmidi \& Chambolle \cite{BeCha2005}
studied a modification of Equation (\ref{EQUATION_COTTET_EL_AYYADI}) reading as
\begin{align}
\begin{split}
u_{t} &= \mathrm{div}\,\big(g(v) \nabla u\big) \text{ in } (0, \infty) \times G, \\
v_{t} + v &= F\big(|\nabla u|\big) \text{ in } (0, \infty) \times G, \\
\frac{\partial u}{\partial \mathbf{n}} &= 0 \text{ on } (0, \infty) \times \Gamma, \\
u(0, \cdot) = u^{0}, \quad
v(0, \cdot) &= v^{0} \text{ in } \Omega
\end{split}
\label{EQUATION_BELAHMIDI_CHAMBOLLE}
\end{align}
where $g, F$ are scalar $C^{1}$-functions
such that $g$ is positive non-increasing and $F$ is bounded together with its first derivative.
The main disadvantage of Equation (\ref{EQUATION_BELAHMIDI_CHAMBOLLE}) over Equation (\ref{EQUATION_COTTET_EL_AYYADI})
is that the former is not genuinely anisotropic in sense of \cite[Section 1.3.3]{Wei1998}.
Belahmidi \& Chambolle \cite{BeCha2005} developed a semi-implicit
space-time finite difference scheme for Equation (\ref{EQUATION_COTTET_EL_AYYADI})
and proved a discrete maximum principle for $u$ implying the unconditional stability of their scheme.
For the initial data
$(u^{0}, v^{0})^{T} \in \big(H^{1}(G) \cap L^{\infty}(G)\big) \cap \big(H^{1}(G) \cap L^{\infty}(G)\big)$ with $v^{0} \geq 0$,
the sequence of numerical solutions was shown to subconverge to a `weak' solution
\begin{equation}
(u, v)^{T} \in \Big(H^{1}\big((0, T) \times \Omega\big) \cap L^{\infty}\big((0, T) \times G\big)\Big)^{2}
\text{ for any } T > 0 \notag
\end{equation}
in the norm of $\Big(L^{2}\big(0, T; H^{1}(G)\big)\Big)^{2}$ as the lattice size goes to 0,
whence an existence theorem for Equation (\ref{EQUATION_BELAHMIDI_CHAMBOLLE}) follows.
As pointed out by Amann \cite[p. 20]{Am2007}, their proof is only valid in 2D.
For a H\"older-space treatment of Equation (\ref{EQUATION_BELAHMIDI_CHAMBOLLE}),
we refer the reader to Belahmidi's PhD thesis \cite[Chapter 4]{Be2003},
for which the author assumes, in particular,
\begin{equation}
(u^{0}, v^{0})^{T} \in C^{2, \alpha}(\bar{G}) \times C^{1, \alpha}(\bar{G}) \text{ and }
\Omega \in C^{2, \alpha} \text{ for } \alpha > 0, \notag
\end{equation}
thus ruling out both rectangular images and rough noise patterns.
Equation (\ref{EQUATION_BELAHMIDI_CHAMBOLLE}) shares a certain degree of similarity with the equations of compressible and incompressible fluids.
Recently, Hieber \& Murata \cite{HieMu2015} studied a fluid-rigid interaction problem for a compressible fluid
and used the maximal $L^{p}$-regularity theory to prove the local well-posedness.
For an overview on the recent developments in the theory of parabolic systems we refer the reader to the same paper \cite{HieMu2015}.
For the sake of completeness,
one should also mention the vast literature
studying various image filters incorporating the total variation functional
as originally proposed by Rudin et al. \cite{RuOshFa1992},
which turned out to perform particularly well in practice.
Omitting the time-independet case (cf. Remark \ref{REMARK_ALTERNATIVE_APPROACHES} below),
the total variation counterpart of Perona \& Malik's Equation (\ref{EQUATION_CLASSICAL_PERONA_MALIK_FILTER})
is given by
\begin{equation}
u_{t} = \mathrm{div}\,\Big(\frac{\nabla u}{|\nabla u|}\Big) \text{ in } (0, \infty) \times G \notag
\end{equation}
together with appropriate boundary conditions,
where $\frac{\nabla u}{|\nabla u|}$
formally denotes the gradient/subdifferential of the total variation functional evaluated at $u$.
Without being exhaustive, we refer the reader to the well-posedness and long-time behavior studies
\cite{AnBaCaMa2001, AnBaCaMa2002, BeCaNo2002} and the references therein.
In the present paper, we revisit Equation (\ref{EQUATION_COTTET_EL_AYYADI}).
In Section \ref{SECTION_FILTER_DESCRIPTION},
we derive a multicolor, genuinely anisotropic generalization of Equation (\ref{EQUATION_COTTET_EL_AYYADI})
and discuss the choice of parameters for our new image filter.
In Section \ref{SECTION_SOLUTION_THEORY_REGULAR_CASE},
we present a well-posedness theory for the multicolor version of Equation (\ref{EQUATION_COTTET_EL_AYYADI}) for $\sigma > 0$.
In contrast to \cite{CoAy1998}, we obtain a more regular solution under a weaker data regularity assumption.
In Section \ref{SECTION_SOLUTION_THEORY_LIMITING_CASE},
we consider the limiting case $\sigma = 0$.
First, we prove the existence of mild and/or strong solutions
using the classical variational theory for parabolic equations.
Again, our approach requires less regularity than in the earlier work \cite{BeCha2005} and is valid in any space dimension.
Under an additional assumption,
we further prove the solutions are unique and continuously depend on the data.
Next, we study the long-time behavior of our model
and prove the exponential stability under a uniform positive definiteness condition on the diffusivity function.
In the appendix Section \ref{SECTION_APPENDIX}, we briefly summarize the classical
maximal $L^{2}$-regularity for non-autonomous forms as well as its recent improvements.
\section{Filter Description}
\label{SECTION_FILTER_DESCRIPTION}
In this section, we present a multicolor generalization of the monochromic PDE image filter proposed by
Nitzberg \& Shiota \cite{NiShi1992} and further developed by Cottet \& El Ayyadi \cite{CoAy1998}.
Our filter is more comprehensive than the monochromic one
since it takes into account possible local correlactions between the color components.
Besides, we provide some geometric intuition and a connection to diffusion processes to justify the logic of our filter.
\subsection{PDE Based Image Filtering}
\label{SECTION_PDE_BASED_IMAGE_FILTERING}
Let $G$ be a bounded domain of $\mathbb{R}^{d}$ ($d \in \mathbb{N}$)
with $\mathbf{n} \colon \partial G \to \mathbb{R}^{d}$ standing for the unit outer normal vector.
Typically, $d = 2$ and $G = (0, L_{1}) \times (0, L_{2})$.
Let $\mathbf{u}^{0} \colon G \to \mathbb{R}^{k}$ with $\mathbf{u}^{0} = (u_{1}^{0}, \dots, u_{k}^{0})^{T}$
denote initial color intensity of the image at point $\mathbf{x} = (x_{1}, x_{2}, \dots, x_{d})^{T} \in G$
measured with respect to an additive $k$-color space (e.g., the RGB space with $k = 3$).
In most practical situations, not the original image $\mathbf{u}^{0}$ but a corrupted version of it, say, $\tilde{\mathbf{u}}^{0}$ is known.
Various pollution scenarios can occur ranging from noise effects and blurring to missing parts, etc.
Here, we want to restrict ourselves to the situation that $\mathbf{u}^{0}$ is distorted by an additive noise $\boldsymbol{\varepsilon}$, i.e.,
\begin{equation}
\tilde{\mathbf{u}}^{0}(\mathbf{x}) = \mathbf{u}^{0}(\mathbf{x}) + \boldsymbol{\varepsilon}(\mathbf{x}) \text{ for } \mathbf{x} \in G.
\label{EQUATION_TUKEY_DECOMPOSITION}
\end{equation}
For a probability space $(\Omega, \mathcal{F}, \mathrm{P})$,
the noise $\boldsymbol{\varepsilon}$ can be modeled as an $\mathcal{F}$-measurable random variable
taking its values in a closed subspace of $L^{2}(G, \mathbb{R}^{k})$, e.g., a Gaussian random field on $G$.
The goal is to reconstruct or at least to `optimally' estimate the (unknown) original image $\mathbf{u}^{0}$
based on the noisy observation $\tilde{\mathbf{u}}^{0}$.
We outline the following abstract approach (known as the scale-space theory) to constructing such estimators
based on general techniques of semiparametric statistics (cf. \cite[Section 1.2.2]{Wei1998}).
First, a deterministic semiflow $\big(S(t)\big)_{t \geq 0}$ on $L^{2}(G, \mathbb{R}^{k})$
referred to as a `scale-space' is introduced.
There are various rationales behind a particular selection $\big(S(t)\big)_{t \geq 0}$.
For example, $\big(S(t)\big)_{t \geq 0}$ can be designed such that
any `reasonable' unpolluted image $\mathbf{u}^{0}$
can be approximated by one of the stationary points of $\big(S(t)\big)_{t \geq 0}$.
In this case, an estimate $\hat{\mathbf{u}}^{0}$ of $\mathbf{u}^{0}$ is given by
\begin{equation}
\hat{\mathbf{u}}^{0} = S(T) \tilde{\mathbf{u}}^{0} \text{ for an appropriately large } T > 0.
\label{EQUATION_GENERAL_SMOOTHER_BASED_ON_SEMIGROUP}
\end{equation}
Another example is given when $S(\cdot)$ is selected to play the role of a kernel smoothing operator
from the nonparametric statistics (cf. \cite[Chapter 8]{Sc2015}).
In this case, the evaluation time $T$ in Equation (\ref{EQUATION_GENERAL_SMOOTHER_BASED_ON_SEMIGROUP})
roughly represents the (reciprocal) bandwidth and is typically selected to minimize the
asymptotic mean integrated square error (AMISE)
as a function of the design size $n$ (e.g., the number of pixels available).
\begin{remark}
\label{REMARK_ALTERNATIVE_APPROACHES}
Among other popular approaches such as the low-pass filtering, morphological multiscale analysis,
neural networks, Bayesian techniques, etc.,
one should mention the penalized nonparametric regression.
Given a noise image $\tilde{\mathbf{u}}^{0}(\mathbf{x})$ from Equation (\ref{EQUATION_TUKEY_DECOMPOSITION}),
the filtered image is obtained by minimizing the penalized objective functional
\begin{equation}
\mathcal{J}(\mathbf{u}) =
\frac{1}{2} \int_{G} \big|\tilde{\mathbf{u}}^{0} - \mathbf{u}\big|^{2} \mathrm{d}\mathbf{x} +
\lambda \mathcal{P}(\mathbf{u})
\label{EQUATION_PENALIZED_FUNCTIONAL}
\end{equation}
with a regularization parameter $\lambda > 0$.
Here, the first term measures the $L^{2}$-goodness of fit between the noisy and the filtered images
and can alternatively be replaced with any other $L^{p}$-norm.
The second represents a Tychonoff-regularization associated with a stronger topology.
The typical choices are
\begin{equation}
\mathcal{P}(\mathbf{u}) = \frac{1}{2} \int_{G}
\sum_{|\beta| \leq k} c_{\beta}
\Big(\big(\nabla^{\alpha} \mathbf{u}\big)_{|\alpha| \leq s}\Big)^{\beta} \mathrm{d}\mathbf{x}
\quad \text{ or } \quad
\mathcal{P}(\mathbf{u}) = \mathrm{TV}(\mathbf{u}), \notag
\end{equation}
where $\mathrm{TV}(\mathbf{u})$ stands for the total variation of $\mathbf{u}$.
Whereas the former choice leads to the
classical spline smoothing/De Boor's approach (\cite[Section 8.2.2]{Sc2015}) or
elastic maps/thin plate smoothing splines (\cite[Section 4.3]{Ta2006}), etc.,
the latter one is known as ROF-denoising model
(cf. \cite{RuOshFa1992}, \cite[pp. 1--70]{BuMeOshRu2013}).
The first-order Lagrange optimality condition for the minimum of functional
in Equation (\ref{EQUATION_PENALIZED_FUNCTIONAL})
is typically given as a (parameter-)elliptic partial differential equation or inclusion.
\end{remark}
In the following, we use a synthesis of these two approaches to put forth the semiflow $\big(S(t)\big)_{t \geq 0}$.
The latter is also referred to as a $C_{0}$-operator semigroup (cf., e.g., \cite[Chapter 4]{Ba2010} or \cite[Chapter 5]{BeMoMcBri1998}).
As a matter of fact, one can not expect the filter to be able to perfectly reconstruct the original image.
At the same time, the filter should be designed the way it performs `well' on a certain class or set of images.
Assuming $\boldsymbol{\varepsilon}$ is only locally autocorrelated,
a natural choice is to let the evolution associated with semiflow be driven by a partial differential equation (PDE).
In the following, we briefly outline our PDE model.
Motivated by the standard approach adopted in the theory of transport phenomena (cf. \cite[p. 2]{Wei1998}),
let $\mathbf{u}(t, \cdot) \in L^{2}(G, \mathbb{R}^{k})$ denote the color intensity at time $t \geq 0$
after applying the semiflow to the initial noisy measurement $\tilde{\mathbf{u}}^{0}$.
In physical applications, $\mathbf{u}$ is usually a scalar variable
representing the heat or material concentration density, etc.
Further, let $\mathbf{J}(t, \cdot) \in L^{2}(G, \mathbb{R}^{k \times d})$
denote the `color flux' tensor at time $t \geq 0$.
Intuitively speaking, $\mathbf{J}(t, \cdot)$ represents the direction the color intensity is flowing into
to compensate for local distortions caused by the noise.
Assuming that there are no other sources of color distortion,
we exploit the divergence theorem to obtain the following conservation or continuity equation
\begin{equation}
\partial_{t} \mathbf{u} + \mathrm{div} \,\mathbf{J} = \mathbf{0} \text{ in } (0, \infty) \times G,
\label{EQUATION_CONSERVATION_OF_STRESS}
\end{equation}
where
$\mathrm{div}\,\mathbf{J} =
\Big(\sum\limits_{j = 1}^{d} \partial_{x_{j}} J_{1j}, \sum\limits_{j = 1}^{d} \partial_{x_{j}} J_{2j}, \dots, \sum\limits_{j = 1}^{d} \partial_{x_{j}} J_{kj}\Big)^{T}$.
Since Equation (\ref{EQUATION_CONSERVATION_OF_STRESS}) is underdetermined,
a so-called constitutive equation establishing a relation between $\mathbf{u}$ and $\mathbf{J}$ is needed.
In many applications, one adopts the well-known Fick's law of diffusion, which postulates
$\mathbf{P}(t, \cdot)$ to be proportional to $-\nabla \mathbf{u}(t, \cdot)$, i.e.,
\begin{equation}
\mathbf{J}(t, \mathbf{x}) = -\mathbf{H}(t, \mathbf{x}) \nabla \mathbf{u}(t, \mathbf{x}) =
-\Big(\sum_{I = 1}^{d} \sum_{J = 1}^{k} H_{ijIJ}(t, \mathbf{x}) \partial_{x_{J}} u_{I}(t, \mathbf{x})\Big)_{i = 1, \dots, k}^{j = 1, \dots, d} \text{ for } \mathbf{x} \in G.
\label{EQUATION_TENSOR_MATRIX_MULTIPLICATION}
\end{equation}
Here, $\nabla \mathbf{u}$ stands for the Jacobian of $\mathbf{u}$
and the symmetric fourth-order tensor $\mathbf{H}(t, \cdot) \in \mathbb{R}^{(k \times d) \times (k \times d)}$
plays the role of diffusivity tensor and can be interpreted as a symmetric linear mapping from $\mathbb{R}^{k \times d}$ into itself.
With this in mind, Equation (\ref{EQUATION_CONSERVATION_OF_STRESS}) rewrites as
\begin{equation}
\partial_{t} \mathbf{u} - \mathrm{div}(\mathbf{H} \nabla \mathbf{u}) = \mathbf{0} \text{ in } (0, \infty) \times G.
\label{EQUATION_CONSERVATION_OF_STRESS_INCREMENTAL_FORM}
\end{equation}
If $\mathbf{H}$ is a constant tensor,
Equation (\ref{EQUATION_CONSERVATION_OF_STRESS_INCREMENTAL_FORM}) is referred to as the homogeneous diffusion.
Otherwise, Equation (\ref{EQUATION_CONSERVATION_OF_STRESS_INCREMENTAL_FORM})
is still underdetermined and a futher constitutive relation between
$\mathbf{H}$ and $\nabla \mathbf{u}$ is indispensable.
In physics, this equations models the properties of the medium the diffusion is taking place in
and/or the properties of the substance which is diffusing.
In image processing, some other principles are adopted.
See Section \ref{SECTION_RESPONSE_FUNCTION} below for details.
Assuming, for example,
\begin{equation}
\mathbf{H} = \mathbf{F}(\nabla \mathbf{u}) \text{ in } (0, \infty) \times G
\label{EQUATION_PERONA_MALIK_CONSTITUTIVE_LAW}
\end{equation}
for an appropriate response function $\mathbf{F} \colon \mathbb{R}^{k \times d} \to \mathbb{R}^{(k \times d) \times (k \times d)}$
and plugging Equation (\ref{EQUATION_PERONA_MALIK_CONSTITUTIVE_LAW}) into (\ref{EQUATION_CONSERVATION_OF_STRESS_INCREMENTAL_FORM})
leads to a multicolor anisotropic generalization of Perona \& Malik's filter \cite{PeMa1990}
\begin{equation}
\partial_{t} \mathbf{u} - \mathrm{div}\big(\mathbf{F}(\nabla \mathbf{u}) \nabla \mathbf{u}\big) = \mathbf{0}
\text{ in } (0, \infty) \times G.
\label{EQUATION_PERONA_MALIK_GENERALIZED}
\end{equation}
As a parabolic PDE system, Equation (\ref{EQUATION_PERONA_MALIK_GENERALIZED}) exhibits an infinite signal propagation speed.
Since any practically relevant selection of the diffusivity function $\mathbf{F}$ violates the causality principle,
the equation even turns out to be ill-posed.
Besides, no direct intuition on how to select the stopping time $T$
from Equation (\ref{EQUATION_GENERAL_SMOOTHER_BASED_ON_SEMIGROUP}) is provided.
This motivated Nitzberg \& Shiota \cite{NiShi1992} and Cottet \& El Ayyadi \cite{CoAy1998}
to consider a hyperbolic relaxation of Equation (\ref{EQUATION_PERONA_MALIK_CONSTITUTIVE_LAW}) for the particular case $k = 1$.
For a positive relaxation parameter $\tau > 0$,
they replaced Equation (\ref{EQUATION_PERONA_MALIK_CONSTITUTIVE_LAW}) with the first-order hyperbolic equation
\begin{equation}
\tau \partial_{t} \mathbf{H} + \mathbf{H} = \mathbf{F}(\nabla \mathbf{u}) \text{ in } (0, \infty) \times G.
\label{EQUATION_HYPERBOLIC_CONSTITUTIVE_LAW}
\end{equation}
They called their regularization a `time-delay', which is, strictly speaking, not correct since
it rather has the form of a relaxation.
At the same time, Equation (\ref{EQUATION_HYPERBOLIC_CONSTITUTIVE_LAW}) can be viewed as a first-order Taylor approximation with respect to $\tau$ of the delay equation
\begin{equation}
\mathbf{H}(t + \tau, \mathbf{x}) = \mathbf{F}\big(\nabla \mathbf{u}(t, \mathbf{x})\big)
\text{ for } (t, \mathbf{x}) \in (0, \infty) \times G. \notag
\end{equation}
Equation (\ref{EQUATION_HYPERBOLIC_CONSTITUTIVE_LAW}) together with (\ref{EQUATION_CONSERVATION_OF_STRESS_INCREMENTAL_FORM}) yields a nonlinear PDE system
\begin{align}
\partial_{t} \mathbf{u} - \mathrm{div}(\mathbf{H} \nabla \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times G,
\label{EQUATION_HYPERBOLIC_FILTER_1} \\
\tau \partial_{t} \mathbf{H} + \mathbf{H} - \mathbf{F}(\nabla \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times G.
\label{EQUATION_HYPERBOLIC_FILTER_2}
\end{align}
Recall that $\mathbf{H} \nabla \mathbf{u}$ stands for the tensor-matrix multiplication (cf. Equation (\ref{EQUATION_TENSOR_MATRIX_MULTIPLICATION})).
In fact, Equations (\ref{EQUATION_HYPERBOLIC_FILTER_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_2})
are very much reminiscent of the well-known Cattaneo system (cf. \cite{Ca1958}) of relativistic heat conduction.
Formally speaking, Equation (\ref{EQUATION_HYPERBOLIC_CONSTITUTIVE_LAW}) `converges' to (\ref{EQUATION_PERONA_MALIK_CONSTITUTIVE_LAW}) as $\tau \to 0$.
Equations (\ref{EQUATION_HYPERBOLIC_FILTER_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_2})
can be viewed as parabolic-hyperbolic PDE system or a nonlinear Gurtin \& Pipkin heat equation (cf. \cite{GuPi1968}).
Indeed, solving Equation (\ref{EQUATION_HYPERBOLIC_FILTER_2}) for $\mathbf{H}$
and plugging the result into Equation (\ref{EQUATION_HYPERBOLIC_FILTER_1}),
we obtain a memory-type equation
\begin{equation}
\partial_{t} \mathbf{u} - \mathrm{div} \bigg(\Big(\int_{0}^{\cdot} \exp\big(-(\cdot - s)/\tau\big)
\big(\mathbf{F}(\nabla \mathbf{u})\big)(s) \mathrm{d}s\Big) \nabla \mathbf{u}\bigg)
= \mathbf{0} \text{ in } (0, \infty) \times G,
\label{EQUATION_PARABOLIC_VOLTERRA_EQUATION}
\end{equation}
which, after being differentiated with respect to $t$, yields a quasilinear wave equation
with a Kelvin \& Voigt damping and memory-time coefficients.
Next, appropriate boundary conditions for $(\mathbf{u}, \mathbf{H})^{T}$ need to be prescribed.
Neither Dirichlet, nor periodic boundary conditions seem to be adequate for the most applications.
In contrast to that, a nonlinear Neumann boundary condition
\begin{equation}
\mathbf{n}^{T}(\mathbf{H} \nabla \mathbf{u}) = \mathbf{0} \text{ in } (0, \infty) \times \partial G
\label{EQUATION_NEUMANN_BOUNDARY_CONDITION}
\end{equation}
turns out both to be mathematically sound and geometrically intuitive.
Equation (\ref{EQUATION_NEUMANN_BOUNDARY_CONDITION}) states that the color flow on the boundary vanishes in the normal direction.
As for the initial conditions, we prescribe
\begin{equation}
\mathbf{u}(0, \cdot) = \tilde{\mathbf{u}}^{0}, \quad
\mathbf{H}(0, \cdot) = \tilde{\mathbf{H}}^{0} \text{ in } G.
\label{EQUATION_INITIAL_CONDITION}
\end{equation}
Here, the fourth-order tensor $\tilde{\mathbf{H}}^{0}$ can be chosen to be symmetric and positive definite, e.g.,
$\tilde{\mathbf{H}}^{0} = \big(\alpha \delta_{iI} \delta_{jJ}\big)_{i, I = 1, \dots, k}^{j, J = 1, \dots, d}$ for a small parameter $\alpha > 0$.
Collecting Equations (\ref{EQUATION_HYPERBOLIC_FILTER_1})--(\ref{EQUATION_INITIAL_CONDITION}),
we arrive at an initial-boundary value problem for the quasilinear PDE system
\begin{align}
\partial_{t} \mathbf{u} - \mathrm{div}(\mathbf{H} \nabla \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times G,
\label{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1} \\
\tau \partial_{t} \mathbf{H} + \mathbf{H} - \mathbf{F}(\nabla \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times G.
\label{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_2} \\
\mathbf{n}^{T}(\mathbf{H} \nabla \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times \partial G,
\label{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_3} \\
\mathbf{u}(0, \cdot) = \tilde{\mathbf{u}}^{0}, \quad
\mathbf{H}(0, \cdot) &= \tilde{\mathbf{H}}^{0} \text{ in } G.
\label{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4}
\end{align}
Equations (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4}) can be viewed as a mixed parabolic-hyperbolic system.
Additionally, the boundary condition in Equation (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_2}) is nonlinear.
Hence, neither the classical hyperbolic solution theory (viz. \cite{Ohk1981} or \cite{Se1996}),
nor the classical parabolic solution theory (see, e.g., \cite{Ba2010}) are directly applicable.
To make Equations (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4}) better feasible,
Cottet \& El Ayyadi considered in \cite{CoAy1998} a regularization of $\nabla \mathbf{u}$
in Equation (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_2}) though a spatial mollification.
For $\mathbf{u} \in L^{2}(G, \mathbb{R}^{k})$ and a kernel $\rho \in L^{\infty}_{\mathrm{loc}}(\mathbb{R}^{d}, \mathbb{R})$,
the convolution $\mathbf{u} \ast \rho$ is given by
\begin{equation}
\big(\mathbf{u} \ast \rho\big)(\mathbf{x}) :=
\int_{G} \rho(\mathbf{x} - \mathbf{y}) \mathbf{u}(\mathbf{y}) \mathrm{d}\mathbf{y} \text{ for } \mathbf{x} \in G. \notag
\end{equation}
Selecting now a fixed mollifier $\rho \in W^{1, \infty}\big(\mathbb{R}^{d}, \mathbb{R}\big)$ with $\rho \geq 0$ a.e. in $\mathbb{R}^{d}$
and $\int_{\mathbb{R}^{d}} \rho(\mathbf{x}) \mathrm{d}\mathbf{x} = 1$, e.g., $\rho$ can be the Gaussian pdf,
as well as a bandwidth $\sigma > 0$,
we define for $\mathbf{u} \in L^{2}(G, \mathbb{R}^{k})$ the nonlocal operator
\begin{equation}
\nabla_{\sigma} \mathbf{u} := \nabla \big(\mathbf{u} \ast \rho_{\sigma}\big)
\text{ with } \rho_{\sigma}(\mathbf{x}) := \tfrac{1}{\sigma^{d}} \rho\big(\tfrac{\mathbf{x}}{\sigma}\big) \text{ for } \mathbf{x} \in G \notag
\end{equation}
as a regularization of the gradient operator. With $(\rho_{\sigma})_{\sigma > 0}$ being a delta sequence,
$\nabla_{\sigma}$ is a regular approximation of the $\nabla$-operator.
Replacing $\nabla$ with $\nabla_{\sigma}$ in Equation (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_2}),
we arrive at the following system of partial integro-differential equations
\begin{align}
\partial_{t} \mathbf{u} - \mathrm{div}\, (\mathbf{H} \nabla \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times G, \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1} \\
\tau \partial_{t} \mathbf{H} + \mathbf{H} - \mathbf{F}(\nabla_{\sigma} \mathbf{u}) &= \mathbf{0} \text{ in } (0, \infty) \times G, \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_2} \\
(\mathbf{H} \nabla \mathbf{u})^{T} \mathbf{n} &= \mathbf{0} \text{ on } (0, \infty) \times \partial G, \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_3} \\
\mathbf{u}(0, \cdot) = \tilde{\mathbf{u}}^{0}, \quad
\mathbf{H}(0, \cdot) &= \tilde{\mathbf{H}}^{0} \text{ in } G. \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4}
\end{align}
\subsection{Parameter Selection}
In this subsection, we discuss the choice of parameters $\tau$, $\mathbf{F}$ and $\tilde{\mathbf{H}}^{0}$.
Also, depending on whether the model (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4})
or (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4}) is adopted,
a kernel $\rho$ and a regularization parameter $\sigma > 0$ need or need not to be selected.
Here, we restrict ourselves to the limiting case $\sigma = 0$.
\subsubsection{Response function $\mathbf{F}$}
\label{SECTION_RESPONSE_FUNCTION}
For $d, k \in \mathbb{N}$, let the space $\mathbb{R}^{k \times d}$ of real $(k \times d)$-matrices be equipped with the Frobenius scalar product
\begin{equation}
\langle \mathbf{D}, \hat{\mathbf{D}}\rangle_{\mathbb{R}^{k \times d}} \equiv \mathbf{D}:\hat{\mathbf{D}}
:= \sum_{i = 1}^{k} \sum_{j = 1}^{d} D_{ij} \hat{D}_{ij}
\text{ for } \mathbf{D}, \hat{\mathbf{D}} \in \mathbb{R}^{k \times d}.
\label{EQUATION_FROBENIUS_SCALAR_PRODUCT_MATRICES}
\end{equation}
For a fixed $\hat{\mathbf{D}} \in \mathbb{R}^{k \times d}$, we can thus define an orthogonal projection operator
$\mathbb{P}_{\hat{\mathbf{D}}^{\perp}} \colon \mathbb{R}^{k \times d} \to \mathbb{R}^{k \times d}$ onto the orthogonal complement of $\hat{\mathbf{D}}$ via
\begin{equation}
\mathbb{P}_{\hat{\mathbf{D}}^{\perp}}(\mathbf{D}) = \mathbf{D} - \frac{(\mathbf{D} : \hat{\mathbf{D}}) \hat{\mathbf{D}}}{\hat{\mathbf{D}} : \hat{\mathbf{D}}} \text{ for } \mathbf{D} \in \mathbb{R}^{k \times d}. \notag
\end{equation}
Obviously, $\mathbb{P}_{\hat{\mathbf{D}}^{\perp}}$ can be viewed as an element of $\mathbb{R}^{(k \times d) \times (k \times d)}$.
A first choice of $\mathbf{F}$ could be
\begin{equation}
\mathbf{F}(\mathbf{D}) = \mathbb{P}_{\hat{\mathbf{D}}^{\perp}}(\mathbf{D}).
\label{EQUATION_FUNCTION_F_NAIVE_CHOICE}
\end{equation}
In addition to being unsmooth at zero and thus possibly leading to technical difficulties
when treating Equations (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4}) analytically or numerically,
this particular choice of the nonlinearity does not seem to meet practical requirements
which are desirable for an image filter.
Indeed, as reported in \cite[Section III.A]{CoAy1998},
the resulting system possesses too many undesirable stationary points
and exhibits a too fast convergence speed leading to such a serious drawback
that a rather high amount of noise is retained.
That is why a regularization of Equation (\ref{EQUATION_FUNCTION_F_NAIVE_CHOICE})
based on a contrast threshold parameter should be adopted.
Motivated by \cite[Equation (21)]{CoAy1998}, we let
\begin{equation}
\mathbf{F}_{s}(\nabla \mathbf{u}) =
\begin{cases}
\mathbb{P}_{(\nabla \mathbf{u})^{\perp}}, & (\nabla \mathbf{u}) : (\nabla \mathbf{u}) \geq s^{2}, \\
\frac{3}{2}\Big(1 - \frac{(\nabla \mathbf{u}) : (\nabla \mathbf{u})}{s^{2}}\Big) + \frac{(\nabla \mathbf{u}) : (\nabla \mathbf{u})}{s^{2}} \mathbb{P}_{(\nabla \mathbf{u})^{\perp}}, & \text{ otherwise}.
\end{cases}
\label{EQUATION_FUNCTION_F_PRACTICAL_CHOICE}
\end{equation}
For a discussion on the particular choice of the coefficient $\frac{3}{2}$
and a connection to a neural network model,
we refer to \cite[Section IV]{CoAy1998}.
With the color variables being each rescaled to lie in the interval $[-1, 1]$,
the contrast threshold $s$ is usually selected as 5\% to 10\% of the image width or height.
Note that the choice of function $\mathbf{F}$ in Equation (\ref{EQUATION_FUNCTION_F_PRACTICAL_CHOICE})
satisfies Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F}
thus complying with our existence theory in Sections
\ref{SECTION_SOLUTION_THEORY_REGULAR_CASE} and \ref{SECTION_SOLUTION_THEORY_LIMITING_CASE}.
Numerous alternative choices of the response function $\mathbf{F}$
can be found in \cite[Table 1, p. 178]{KeSto2002}.
\subsection{Tensor $\tilde{\mathbf{H}}^{0}$}
As for the initial diffusivity tensor $\tilde{\mathbf{H}}^{0}$,
assuming the noise $\boldsymbol{\varepsilon}$ in Equation (\ref{EQUATION_TUKEY_DECOMPOSITION}) is weakly autocorrelated,
we can select
\begin{equation}
\tilde{\mathbf{H}}^{0}(\mathbf{x}) = \mathrm{Cov}\big[\nabla \boldsymbol{\varepsilon}(\mathbf{x})\big]
\text{ for } \mathbf{x} \in G
\end{equation}
under an additional uniform positive definiteness condition on $\mathrm{Cov}\big[\boldsymbol{\varepsilon}(\cdot)\big]$.
Since $\mathrm{Cov}\big[\boldsymbol{\varepsilon}(\cdot)\big]$ is not known in practice,
the value of $\mathrm{Cov}\big[\boldsymbol{\varepsilon}(\mathbf{x})\big]$ for a particular $\mathbf{x} \in G$
can be estimated by computing the sample covariance matrix
of $\nabla \tilde{\mathbf{u}}^{0}$ evaluated over an appropriate neighborhood of $\mathbf{x}$.
We refer to \cite{Ca1983, MaHi1980}, \cite[Chapter 8]{Sc2015} for a discussion on the optimal neighborhood size.
\subsection{Relaxation time $\tau$}
Repeating the calculations in \cite[Section III.A]{CoAy1998} and \cite{Wi1983},
any particular selection of the parameter $\tau$ can be shown to imply that
any graphical pattern occurring on scales smaller than $\sqrt{\tau}$ vanishes asymptotically,
i.e., converges to its spatial mean.
If no prior information on the minimum pattern size is available,
statistical methods need to be employed to estimate the former.
\section{The regular case $\sigma > 0$}
\label{SECTION_SOLUTION_THEORY_REGULAR_CASE}
In this section, we provide a well-posedness theory for Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4}).
In contrast to \cite{CoAy1998}, nonlinear Neumann and not linear periodic boundary conditions are presribed in Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_3}).
Being more adequate for practical applications,
they are mathematically more challenging since the evolution is now driven by an operator with a time-varying domain.
Thus, a standard application of Faedo \& Galerkin method is rather problematic.
Therefore, we propose a new solution technique based on the maximum $L^{2}$-regularity theory
for non-autonomous sesquilinear forms due to Dautray \& Lions \cite[Chapter 18, \S 3]{DauLio1992}
as well as its recent improvement by Dier \cite{Die2015}.
Another major advantage of our approach over \cite{CoAy1998} is
that we get a much more regular solution under weaker smoothness assumptions on the initial data.
Our results have a certain degree of resemblance to \cite{CaLioMoCo1992},
where Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4})
were studied for $\tau = 0$ and $k = 1$.
In contrast to \cite{CaLioMoCo1992}, we consider both the weak and strong settings
without requiring $\mathbf{F}$ to be a $C^{\infty}$-function.
Without loss of generality, let $\int_{G} \tilde{\mathbf{u}}^{0} \mathrm{d}\mathbf{x} = \mathbf{0}$.
Otherwise, replace Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4}) with
\begin{equation}
\mathbf{u}(0, \cdot) = \tilde{\mathbf{u}}^{0} - \frac{1}{|G|} \int_{G} \tilde{\mathbf{u}}^{0} \mathrm{d}\mathbf{x}, \quad
\mathbf{H}(0, \cdot) = \tilde{\mathbf{H}}^{0} \text{ in } G
\label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ZERO_MEAN_4}
\end{equation}
and solve the resulting system for $\mathbf{u}$.
Later, by adding $\frac{1}{|G|} \int_{G} \tilde{\mathbf{u}}^{0} \mathrm{d} \mathbf{x}$ to $\mathbf{u}$,
a solution to the original system is obtained.
We consider the space $\mathbb{R}^{(k \times d) \times (k \times d)}$ of real fourth-order tensors.
Similar to Equation (\ref{EQUATION_FROBENIUS_SCALAR_PRODUCT_MATRICES}), we equip $\mathbb{R}^{(k \times d) \times (k \times d)}$ with the Frobenius inner product
\begin{equation}
\langle \mathbf{H}, \hat{\mathbf{H}}\rangle_{\mathbb{R}^{(k \times d) \times (k \times d)}} \equiv \mathbf{H} : \hat{\mathbf{H}} :=
\sum_{i, I = 1}^{k} \sum_{j, J = 1}^{d} H_{ijIJ} \hat{H}_{ijIJ} \text{ for } \mathbf{H}, \hat{\mathbf{H}} \in \mathbb{R}^{(k \times d) \times (k \times d)}. \notag
\end{equation}
With all norms being equivalent on the finite dimensional space $\mathbb{R}^{(k \times d) \times (k \times d)}$ by virtue of Riesz' theorem,
the Frobenius norm $\sqrt{(\cdot) : (\cdot)}$ is equivalent with the operator norm
\begin{equation}
\|\mathbf{H}\|_{L(\mathbb{R}^{k \times d})} = \sup_{\|\mathbf{D}\|_{\mathbb{R}^{k \times d}} = 1} \|\mathbf{H} \mathbf{D}\|_{\mathbb{R}^{k \times d}}. \notag
\end{equation}
The space $\mathcal{S}(\mathbb{R}^{k \times d})$ of symmetric $(k \times d) \times (k \times d)$-tensors
\begin{equation}
\mathcal{S}(\mathbb{R}^{k \times d}) = \big\{\mathbf{H} \in \mathbb{R}^{(k \times d) \times (k \times d)} \,|\,
H_{ijIJ} = H_{IJij} \text{ for } i, I = 1, \dots, k \text{ and } j, J = 1, \dots, d\big\} \notag
\end{equation}
is a closed subspace of $\mathbb{R}^{(k \times d) \times (k \times d)}$.
Obviously, $\mathcal{S}(\mathbb{R}^{k \times d})$ is isomorphic to the space of linear symmetric operators on $\mathbb{R}^{k \times d}$.
For $\kappa \geq 0$, we define a closed subset $\mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})$ of $\mathcal{S}(\mathbb{R}^{k \times d})$ by the means of
\begin{equation}
\mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d}) :=
\big\{\mathbf{H} \in \mathcal{S}(\mathbb{R}^{k \times d}) \,|\,
\lambda_{\min}(\mathbf{H}) \geq \kappa\big\}, \notag
\end{equation}
where $\lambda_{\min}(\mathbf{H})$ denotes the smallest eigenvalue of $\mathbf{H}$ viewed as a bounded, linear operator on $\mathbb{R}^{k \times d}$.
As we know from the linear algebra, $\lambda_{\min}(\mathbf{H}) \geq \kappa$ is equivalent to
\begin{equation}
(\mathbf{H}\mathbf{D}) : \mathbf{D} \geq \kappa (\mathbf{D} : \mathbf{D}) \text{ for any } \mathbf{D} \in \mathbb{R}^{k \times d}. \notag
\end{equation}
Further, we define
\begin{align*}
L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big) &:=
\Big\{\mathbf{H} \in L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big) \,\big|\,
\mathbf{H}(\mathbf{x}) \in \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d}) \text{ for a.e. } \mathbf{x} \in G\big\} \\
&\phantom{:}=
\Big\{\mathbf{H} \in L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big) \,\big|\,
\mathop{\operatorname{ess\,inf}}_{\mathbf{x} \in G} \lambda_{\min}(\mathbf{H}) \geq \kappa\Big\}. \notag
\end{align*}
Throughout this subsection, we require the following assumption on $\rho$ and $\mathbf{F}$.
\begin{assumption}
\label{ASSUMPTION_ON_F_AND_RHO}
Let the functions $\mathbf{F} \colon \mathbb{R}^{k \times d} \to \mathcal{S}_{\geq 0}(\mathbb{R}^{k \times d})$ as well as
$\rho \colon \mathbb{R}^{d} \to \mathbb{R}$ be weakly differentiable and
let their first-order weak derivatives be essentially bounded by a positive number $c > 0$.
\end{assumption}
\begin{remark}
Assumption \ref{ASSUMPTION_ON_F_AND_RHO} is weaker than the one in \cite[p. 293, Equation (4)]{CoAy1998}.
\end{remark}
We introduce the nonlinear mapping $\mathcal{F}_{\sigma} \colon L^{2}(G, \mathbb{R}^{k}) \to L^{\infty}(G, \mathcal{S}_{\geq 0}(\mathbb{R}^{k \times d}))$ with
\begin{equation}
\mathcal{F}_{\sigma}(\mathbf{u}) := \mathbf{F}(\nabla_{\sigma} \mathbf{u}) \text{ for } \mathbf{u} \in L^{2}(G, \mathbb{R}^{k}). \notag
\end{equation}
\begin{lemma}
\label{LEMMA_F_SIGMA_LIPSCHITZ_CONTINUOUS}
The mapping $\mathcal{F}$ is Lipschitz-continuous.
\end{lemma}
\begin{proof}
Using H\"older's inequality and Assumption \ref{ASSUMPTION_ON_F_AND_RHO}, we can estimate for any $\mathbf{x} \in G$
\begin{align*}
\big\|\big(\nabla_{\sigma} \mathbf{u}\big)(\mathbf{x})&\big\|_{\mathbb{R}^{k \times d}} =
\big\|\nabla \big(\mathbf{u} \ast \rho_{\sigma}\big)(\mathbf{x})\big\|_{\mathbb{R}^{k \times d}}
=
\Big\| \nabla \Big(\int_{G} \sigma^{-d} \rho\big(\sigma^{-1}(\mathbf{x} - \mathbf{y})\big) \mathbf{u}(\mathbf{y}) \mathrm{d}\mathbf{y}\Big)\Big\|_{\mathbb{R}^{k \times d}} \\
&\leq
\sigma^{-(d + 1)} \int_{G} \big|\rho'\big(\sigma^{-1}(\mathbf{x} - \mathbf{y})\big)\big| \, \|\mathbf{u}(\mathbf{y})\|_{\mathbb{R}^{k}} \mathrm{d}\mathbf{y}
\leq
\sigma^{-(d + 1)} C (|G|)^{1/2} \|\mathbf{u}\|_{L^{2}(G, \mathbb{R}^{k})}, \notag
\end{align*}
where $|G| < \infty$ is the standard Lebesgue measure of $G$.
Hence, for any $\mathbf{u}, \hat{\mathbf{u}} \in L^{2}(G, \mathbb{R}^{k})$, we get
\begin{align*}
\big\|\big(\mathbf{F}(\nabla_{\sigma} \mathbf{u})\big)(\mathbf{x}) - \big(\mathbf{F}(\nabla_{\sigma} \hat{\mathbf{u}})\big)(\mathbf{x})\big\|_{\mathbb{R}^{(k \times d) \times (k \times d)}}
&\leq C \big\|\big(\nabla_{\sigma} \mathbf{u}\big)(\mathbf{x}) - \big(\nabla_{\sigma} \hat{\mathbf{u}}\big)(\mathbf{x})\big\|_{\mathbb{R}^{k \times d}} \\
&\leq C^{2} \sigma^{-(d + 1)} (|G|)^{1/2}
\|\mathbf{u} - \hat{\mathbf{u}}\|_{L^{2}(G, \mathbb{R}^{k})},
\end{align*}
which finishes the proof.
\end{proof}
We let
\begin{equation}
\mathcal{H} := L^{2}(G, \mathbb{R}^{k})/\{\mathbf{1}\} \equiv
\Big\{\mathbf{u} \in L^{2}(G, \mathbb{R}^{k}) \,\big|\, \int_{G} \mathbf{u}\, \mathrm{d}\mathbf{x} = \mathbf{0}\Big\}, \quad
\mathcal{V} := H^{1}(G, \mathbb{R}^{k}) \cap \mathcal{H}. \notag
\end{equation}
Then $(\mathcal{V}, \mathcal{H}, \mathcal{V}')$ is a Gelfand triple.
For a tensor-valued function $\mathbf{H} \in L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big)$ for some $\kappa > 0$,
we further consider the bilinear form
\begin{equation}
a(\cdot, \cdot; \mathbf{H}) \colon \mathcal{V} \times \mathcal{V} \to \mathbb{R}, \quad
(\mathbf{u}, \mathbf{v}) \mapsto \int_{G} \big(\mathbf{H} \nabla \mathbf{u}) : \big(\nabla \mathbf{v}\big) \mathrm{d}\mathbf{x}, \notag
\end{equation}
where
\begin{equation}
\mathcal{V} := \big(H^{1}(G, \mathbb{R}^{k}) \cap \mathcal{H}\big) \times \big(H^{1}(G, \mathbb{R}^{k}) \cap \mathcal{H}\big). \notag
\end{equation}
By virtue of Assumption \ref{ASSUMPTION_ON_F_AND_RHO}, $a(\cdot, \cdot; \mathbf{H})$ is a symmetric, continuous bilinear form.
The associated linear bounded symmetric operator $\mathcal{A}(\mathbf{H}) \colon \mathcal{V} \to \mathcal{V}'$ is given as
\begin{equation}
\langle \mathcal{A}(\mathbf{H}) \mathbf{u}, \tilde{\mathbf{u}}\rangle_{\mathcal{V}'; \mathcal{V}} := a(\mathbf{u}, \tilde{\mathbf{u}}; \mathbf{H})
\text{ for any } \mathbf{u}, \tilde{\mathbf{u}} \in \mathcal{V}. \notag
\end{equation}
Using Assumption \ref{ASSUMPTION_ON_F_AND_RHO} and the second Poincar\'{e}'s equality, we can estimate
\begin{equation}
a(\mathbf{u}, \mathbf{u}; \mathbf{H}) \geq \kappa \|\nabla \mathbf{u}\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2} \geq
\kappa C_{P} \|\mathbf{u}\|_{\mathcal{V}}^{2} \text{ for any } \mathbf{u} \in \mathcal{V},
\label{EQUATION_UNIFORM_COERCIVITY_OF_A}
\end{equation}
where $C_{P} = C_{P}(G) > 0$ is the Poincar\'{e}'s constant.
Hence, $\mathcal{A}(\mathbf{H})$ is continuously invertible and self-adjoint.
From the elliptic theory, we know $\mathcal{A}(\mathbf{H})$ to generalize the Neumann boundary conditions in (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_3})
associated with the PDE in Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1}).
Further, we know that the maximum domain of the strong realization of $\mathcal{A}(\mathbf{H})$
\begin{equation}
D\big(\mathcal{A}(\mathbf{H})\big) := \big\{\mathbf{u} \in \mathcal{V} \,|\, \mathcal{A}(\mathbf{H}) \mathbf{u} \in \mathcal{H}\big\} \notag
\end{equation}
is a dense subspace of $\mathcal{H}$.
\begin{remark}
If $\mathbf{H} \in C^{1}\big(\bar{G}, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big)$ for $\kappa > 0$,
the elliptic regularity theory implies
\begin{equation}
D\big(\mathcal{A}(\mathbf{H})\big) \subset H^{2}(G, \mathbb{R}^{k}) \notag
\end{equation}
if $G \in C^{2}$ (cf. \cite[Lemma 3.6]{HoJaSch2015} for the case $G$ is a rectangular box).
This regularity for $\mathbf{H}$ can be assured by selecting a regular convolution kernel $\rho$
and a smooth nonlinearity $\mathbf{F}$ as well as considering
smooth initial data $\mathbf{H}^{0}$ for $\mathbf{H}$.
\end{remark}
With the notation introduced above, Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4}) can be written in the following abstract form:
\begin{align}
\partial_{t} \mathbf{u} + \mathcal{A}(\mathbf{H}) \mathbf{u} &= 0 \text{ in } L^{2}(0, T; \mathcal{V}'), \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1} \\
\tau \partial_{t} \mathbf{H} + \mathbf{H} - \mathcal{F}_{\sigma}(\mathbf{u}) &= 0 \text{ in } L^{2}\big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big)\big), \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_2} \\
\mathbf{u}(0, \cdot) = \tilde{\mathbf{u}}^{0} \text{ in } \mathcal{H}, \quad
\mathbf{H}(0, \cdot) &= \tilde{\mathbf{H}}^{0} \text{ in } L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big), \label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_3}
\end{align}
where the Neumann boundary conditions are now incorporated into the definition of operator $\mathcal{A}(\mathbf{H})$.
\begin{definition}
\label{DEFINITION_SOLUTION_NOTION_REGULAR_CASE}
For $T > 0$, a function $(\mathbf{u}, \mathbf{H})^{T} \colon [0, T] \times \bar{G} \to \mathbb{R}^{k} \times \mathcal{S}(\mathbb{R}^{k \times d})$
is referred to as a weak solution to Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_3}) on $[0, T]$
if there exists a number $\kappa > 0$ such that the function pair $(\mathbf{u}, \mathbf{H})^{T}$
satisfies $\mathbf{H} \in \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})$ a.e. in $(0, T) \times G$ and
\begin{equation}
\mathbf{u} \in H^{1}(0, T; \mathcal{V}') \cap L^{2}(0, T; \mathcal{V}), \quad
\mathbf{H} \in W^{1, \infty}\big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big)\big)
\notag
\end{equation}
and fulfils the abstract differential equations
(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_2})
together with the initial conditions (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_3})
in sense of interpolation Equation (\ref{EQUATION_INTERPOLATION_THEOREM_WEAK_SOLUTIONS}).
If $\mathbf{u}$ additionally satisfies
\begin{equation}
\mathbf{u} \in H^{1}(0, T; \mathcal{H}) \text{ and } \mathrm{div}(\mathbf{H} \nabla \mathbf{u}) \in L^{2}(0, T; \mathcal{H}),
\notag
\end{equation}
$(\mathbf{u}, \mathbf{H})^{T}$ is then referred to as a strong solution.
\end{definition}
\begin{remark}
Note that Definition \ref{DEFINITION_SOLUTION_NOTION_REGULAR_CASE}
can easily be generalized to the case $T = \infty$ by replacing the Banach-Sobolev spaces $W^{s, p}$ and $L^{p}$ with
the metric Sobolev spaces $W^{s, p}_{\mathrm{loc}}$ and $L^{p}_{\mathrm{loc}}$.
\end{remark}
\begin{theorem}
\label{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_REGULAR_CASE}
Let
$(\tilde{\mathbf{u}}^{0}, \tilde{\mathbf{H}}^{0})^{T} \in \mathcal{H} \times L^{\infty}\big(G, \mathcal{S}_{\geq \alpha}(\mathbb{R}^{k \times d})\big)$ for some $\alpha > 0$.
For any $T > 0$, the initial-boundary value problem (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_2})
possesses then a unique weak solution on $[0, T]$ satisfying
\begin{equation}
\mathbf{H}(t, \cdot) \in L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big) \text{ for a.e. } t \in [0, T]
\text{ with } \kappa := \alpha \exp(-T/\tau). \notag
\end{equation}
\end{theorem}
\begin{proof}
\textit{Equivalent formulation and \emph{a priori} estimates: }
Solving Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_2}) for $\mathbf{H}$
and plugging the result into Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1}),
Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_3})
reduce to
\begin{align}
\partial_{t} \mathbf{u} +
\mathcal{A}\big(\mathbf{H}(\mathbf{u})\big) \mathbf{u} = 0 \text{ in } (0, T), \quad
\mathbf{u}(0, \cdot) = \mathbf{u}^{0}
\label{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED}
\end{align}
with
\begin{equation}
\big(\mathbf{H}(\mathbf{u})\big)(t, \cdot) = \exp(-t/\tau) \mathbf{H}_{0} +
\int_{0}^{t} \exp\big(-(t - s)/\tau\big) \mathbf{F}\big(\nabla_{\sigma} \mathbf{u}(s, \cdot)\big) \mathrm{d}s
\text{ for } t \in [0, T].
\label{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H}
\end{equation}
We prove several {\it a priori} estimates we later used in the proof.
For any $\mathbf{D} \in \mathbb{R}^{k \times d}$,
Equation (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H})
together with Assumption \ref{ASSUMPTION_ON_F_AND_RHO} imply
\begin{align}
\begin{split}
\big(\big(\mathbf{H}(\mathbf{u})\big)(t, \cdot) \mathbf{D}\big) : \mathbf{D} &=
\exp(-t/\tau) \big((\mathbf{H}^{0} \mathbf{D}) : \mathbf{D}\big) \\
&+ \int_{0}^{t} \exp\big(-(t - s)/\tau\big) \Big(\big(\mathbf{F}\big(\nabla_{\sigma} \mathbf{u}(s, \cdot)\big) \mathbf{D}\big) : \mathbf{D}\Big) \, \mathrm{d}s \\
&\geq \kappa \|\mathbf{D}\|_{\mathbb{R}^{k \times d}} \text{ for a.e. } t \in [0, T]
\text{ with } \kappa = \alpha \exp(-T/\tau),
\end{split}
\label{EQUATION_UNIFORM_POSITIVITY_OF_H}
\end{align}
and, therefore, $\mathbf{H}(t, \cdot) \in \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})$ for a.e. $t \in [0, T]$ a.e. in $G$.
On the other hand, by virtue of Equation (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H})
and Assumption \ref{ASSUMPTION_ON_F_AND_RHO},
\begin{align}
\begin{split}
\big\|\big(\mathbf{H}(\mathbf{u})\big)(t, \cdot)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))}
&\leq
\|\mathbf{H}^{0}\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} \\
&+ \int_{0}^{t} \big\|\mathbf{F}\big(\nabla_{\sigma} \mathbf{u}(s, \cdot)\big)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} \mathrm{d}s \\
&\leq \|\mathbf{H}^{0}\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} + c T
\text{ for a.e. } t \in [0, T].
\end{split}
\label{EQUATION_REGULAR_CASE_A_PRIORI_ESTIMATE_FOR_H}
\end{align}
For $t \in (0, T]$, multiplying Equation (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED})
with $\mathbf{u}$ in $L^{2}(0, t; \mathcal{H})$, we obtain
\begin{equation}
\tfrac{1}{2} \big\|\mathbf{u}(t, \cdot)\big\|_{\mathcal{H}}^{2} - \tfrac{1}{2} \big\|\mathbf{u}(0, \cdot)\big\|_{\mathcal{H}}^{2} +
\int_{0}^{t} a\big(\mathbf{u}(s, \cdot), \mathbf{u}(s, \cdot); \big(\mathbf{H}(\mathbf{u})\big)(s, \cdot)\big) \mathrm{d}s = 0. \notag
\end{equation}
Hence, using Equation (\ref{EQUATION_UNIFORM_POSITIVITY_OF_H}), we arrive at
\begin{equation}
\big\|\mathbf{u}(t, \cdot)\big\|_{\mathcal{H}}^{2} +
2 \kappa \int_{0}^{t} \big\|\nabla \mathbf{u}(s, \cdot)\big\|_{L^{2}(\mathbb{R}^{k \times d})}^{2} \mathrm{d}s \leq
\|\mathbf{u}^{0}\|_{\mathcal{H}}^{2}
\text{ for a.e. } t \in [0, T].
\label{EQUATION_REGULAR_CASE_A_PRIORI_ESTIMATE_FOR_U}
\end{equation}
\textit{Constructing a fixed point mapping: }
Consider the Banach space
\begin{equation}
\mathscr{X} := C^{0}\big([0, T], \mathcal{H}\big) \notag
\end{equation}
equipped with the standard topology.
Now, we define an operator $\mathscr{F} \colon \tilde{\mathscr{X}} \to \tilde{\mathscr{X}}$
which maps each $\tilde{\mathbf{u}} \in \mathscr{X}$
to the unique weak solution
\begin{equation}
\mathbf{u} \in H^{1}(0, T; \mathcal{V}) \cap L^{2}(0, T; \mathcal{V}')
\hookrightarrow \mathscr{X}
\label{EQUATION_REGULAR_CASE_FIXED_POINT_MAPPING_IMAGE}
\end{equation}
of Equations (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED})--(\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H})
(See Equation (\ref{EQUATION_INTERPOLATION_THEOREM_WEAK_SOLUTIONS}) for the embedding above.)
By virtue of Equations (\ref{EQUATION_UNIFORM_POSITIVITY_OF_H}) and (\ref{EQUATION_REGULAR_CASE_A_PRIORI_ESTIMATE_FOR_H}),
the bilinear non-autonomous form
$t \mapsto a\big(\cdot, \cdot; \big(\mathbf{H}(\mathbf{u})\big)(t, \cdot)\big)$
is uniformly coercive and bounded.
Hence, by Theorem (\ref{THEOREM_MAXIMAL_REGULARITY_WEAK_FORM}),
the operator $\mathscr{F}$ is well-defined.
\textit{Proving the contraction property of $\mathscr{F}$: }
To show the contraction property, similar to the classical existence and uniqueness theorem of Picard \& Lindel\"of,
we first equip the Banach space $\mathscr{X}$ with an equivalent norm
\begin{equation}
\|\mathbf{u}\|_{\mathscr{X}} :=
\max_{t \in [0, T]} e^{-(L + 1) t} \big\|\mathbf{u}(t, \cdot)\big\|_{\mathcal{H}} \notag
\end{equation}
with a positive constant $L$ to be selected later.
For the sake of simplicity, we keep the same notation for this new isomorphic space.
For $\tilde{\mathbf{u}}_{1}, \tilde{\mathbf{u}}_{2} \in \tilde{\mathscr{X}}$, let
\begin{equation}
\mathbf{u}_{1} := \mathscr{F}(\tilde{\mathbf{u}}_{1}) \text{ and }
\mathbf{u}_{1} := \mathscr{F}(\tilde{\mathbf{u}}_{2}). \notag
\end{equation}
By definition, $\bar{\mathbf{u}} := \mathbf{u}_{1} - \mathbf{u}_{2}$
solves then the non-autonomous linear (w.r.t. $\bar{\mathbf{u}}$) problem
\begin{align}
\partial_{t} \bar{\mathbf{u}} + \mathcal{A}\big(\mathbf{H}(\tilde{\mathbf{u}}_{1}\big)\big) \bar{\mathbf{u}} &=
\Big(\mathcal{A}\big(\mathbf{H}(\tilde{\mathbf{u}}_{1}\big)\big) - \mathcal{A}\big(\mathbf{H}(\tilde{\mathbf{u}}_{2}\big)\big)\Big) \mathbf{u}_{2}
\text{ in } L^{2}(0, T; \mathcal{V}'),
\label{EQUATION_REGULAR_CASE_CONTRACTION_EQUATION_1} \\
\bar{\mathbf{u}}(t, \cdot) &= \mathbf{0} \text{ in } \mathcal{H}
\label{EQUATION_REGULAR_CASE_CONTRACTION_EQUATION_2}
\end{align}
For $t \in (0, T]$, multiplying Equation (\ref{EQUATION_REGULAR_CASE_CONTRACTION_EQUATION_1})
with $\bar{\mathbf{u}}$ in $L^{2}(0, t; \mathcal{H})$, we get
\begin{align}
\tfrac{1}{2} \big\|\bar{\mathbf{u}}(t, \cdot)\big\|_{\mathcal{H}} &\leq
\int_{0}^{t}
\big\|\mathbf{H}(\tilde{\mathbf{u}}_{1}(s, \cdot)\big) - \mathbf{H}(\tilde{\mathbf{u}}_{2}(s, \cdot)\big)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} \notag \\
&\times
\big\|\nabla \mathbf{u}_{2}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}
\big\|\nabla \bar{\mathbf{u}}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})} \mathrm{d}s \notag \\
&\leq
\Big(\max_{s \in [0, t]} \big\|\mathbf{H}(\tilde{\mathbf{u}}_{1}(s, \cdot)\big) - \mathbf{H}(\tilde{\mathbf{u}}_{2}(s, \cdot)\big)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))}\Big)
\label{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_PRELIMINARY} \\
&\times
\big\|\nabla \mathbf{u}_{2}\big\|_{L^{2}((0, T) \times G, \mathbb{R}^{k \times d})}
\big\|\nabla \bar{\mathbf{u}}\big\|_{L^{2}((0, T) \times G, \mathbb{R}^{k \times d})} \notag \\
&\leq
\tilde{C}
\Big(\max_{s \in [0, t]} \big\|\mathbf{H}(\tilde{\mathbf{u}}_{1}(s, \cdot)\big) - \mathbf{H}(\tilde{\mathbf{u}}_{2}(s, \cdot)\big)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))}\Big), \notag
\end{align}
where
\begin{align}
\tilde{C} := \frac{\|\mathbf{u}^{0}\|_{\mathcal{H}}}{\kappa}
\geq
\|\nabla \mathbf{u}_{2}\|_{L^{2}((0, T) \times G, \mathbb{R}^{k \times d})}
\|\nabla \bar{\mathbf{u}}\|_{L^{2}((0, T) \times G, \mathbb{R}^{k \times d})} \notag
\end{align}
by virtue of Equation (\ref{EQUATION_REGULAR_CASE_A_PRIORI_ESTIMATE_FOR_U}).
Using Equation (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H}),
we estimate
\begin{align}
\begin{split}
\max_{s \in [0, t]} \big\|\mathbf{H}\big(\tilde{\mathbf{u}}_{1}(s, \cdot)\big) &- \mathbf{H}\big(\tilde{\mathbf{u}}_{2}(s, \cdot)\big)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} \\
&\leq
\int_{0}^{t} \big\|\mathbf{F}\big(\nabla_{\sigma} \tilde{\mathbf{u}}_{1}(s, \cdot)\big) - \mathbf{F}\big(\nabla_{\sigma} \tilde{\mathbf{u}}_{2}(s, \cdot)\big)\big\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} \mathrm{d}s \\
&\leq C_{\mathrm{Lip}} \int_{0}^{t}
\big\|\tilde{\mathbf{u}}_{1}(s, \cdot) - \tilde{\mathbf{u}}_{2}(s, \cdot)\big\|_{\mathcal{H}} \mathrm{d}s,
\end{split}
\label{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_FOR_H_BAR}
\end{align}
where $C_{\mathrm{Lip}}$ is the Lipschitz constant of the mapping $\mathscr{F}$
from Lemma \ref{LEMMA_F_SIGMA_LIPSCHITZ_CONTINUOUS}.
Combining the estimates from Equations
(\ref{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_PRELIMINARY}) and
(\ref{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_FOR_H_BAR}), we arrive at
\begin{equation}
\big\|\bar{\mathbf{u}}(t, \cdot)\big\|_{\mathcal{H}} \leq
L \int_{0}^{t}
\big\|\tilde{\mathbf{u}}_{1}(s, \cdot) - \tilde{\mathbf{u}}_{2}(s, \cdot)\big\|_{\mathcal{H}} \mathrm{d}s
\text{ with } L := 2 \tilde{C} C_{\mathrm{Lip}}.
\label{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_IMPROVED}
\end{equation}
Multipying Equation (\ref{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_IMPROVED}) with $\exp(-Lt)$, we estimate
\begin{align}
e^{-(L + 1)t} \big\|\bar{\mathbf{u}}(t, \cdot)\big\|_{\mathcal{H}} &\leq
L e^{-(L + 1)t} \int_{0}^{t}
\big\|\tilde{\mathbf{u}}_{1}(s, \cdot) - \tilde{\mathbf{u}}_{2}(s, \cdot)\big\|_{\mathcal{H}} \mathrm{d}s \notag \\
&\leq
L e^{-(L + 1)t} \int_{0}^{t} e^{(L + 1) t} \Big(e^{-(L + 1) t}
\big\|\tilde{\mathbf{u}}_{1}(s, \cdot) - \tilde{\mathbf{u}}_{2}(s, \cdot)\big\|_{\mathcal{H}}\Big) \mathrm{d}s \notag \\
&\leq
\Big(L e^{-(L + 1)t} \int_{0}^{t} e^{(L + 1) t} \mathrm{d}s\Big)
\|\tilde{\mathbf{u}}_{1} - \tilde{\mathbf{u}}_{2}\|_{\mathscr{X}}
\label{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_FINAL} \\
&\leq
\Big(L e^{-(L + 1)t} \frac{e^{(L + 1) t} - 1}{L + 1}\Big) \|\tilde{\mathbf{u}}_{1} - \tilde{\mathbf{u}}_{2}\|_{\mathscr{X}} \notag \\
&\leq \frac{L}{L + 1} \|\tilde{\mathbf{u}}_{1} - \tilde{\mathbf{u}}_{2}\|_{\mathscr{X}}. \notag
\end{align}
Hence, taking the maximum over $t \in [0, T]$ on the left-hand side of Equation (\ref{EQUATION_REGULAR_CASE_CONTRACTION_ESTIMATE_FINAL}),
we find
\begin{equation}
\big\|\mathscr{F}(\tilde{\mathbf{u}}_{1}) - \mathscr{F}(\tilde{\mathbf{u}}_{2})\big\|_{\mathscr{X}}
\leq \frac{L}{L + 1} \|\tilde{\mathbf{u}}_{1} - \tilde{\mathbf{u}}_{2}\|_{\mathscr{X}}. \notag
\end{equation}
which implies $\mathscr{X}$ is a contraction.
By virtue of Banach's fixed point theorem,
$\mathscr{F}$ posseses then a unique fixed point $\mathbf{u} \in \mathscr{X}$.
Hence, applying Lemma \ref{LEMMA_F_SIGMA_LIPSCHITZ_CONTINUOUS}
to Equation (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H})
and recalling Equation, we further get
\begin{equation}
\mathbf{H} \in
W^{1, \infty}\Big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big)\Big) \text{ and }
\mathbf{H}(t, \cdot) \in L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big)
\text{ for a.e. } t \in [0, T]. \notag
\end{equation}
Taking into account Equation (\ref{EQUATION_REGULAR_CASE_FIXED_POINT_MAPPING_IMAGE})
as well as the equivalence between
Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_ABSTRACT_3})
and (\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED})--(\ref{EQUATION_CAUCHY_PROBLEM_REGULAR_CASE_REDUCED_OPERATOR_H}),
we deduce $\mathbf{u}$ is the unique weak solution to
Equations (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1})--(\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_4}).
\end{proof}
\begin{corollary}
\label{COROLLARY_STRONG_SOLUTION_SIGMA_GREATER_ZERO}
Under the conditions of Theorem \ref{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_REGULAR_CASE}, let $\tilde{\mathbf{u}}^{0} \in \mathcal{V}$.
The weak solution $(\mathbf{u}, \mathbf{H})^{T}$ given in the Theorem is then also a strong solution.
\end{corollary}
\begin{proof}
For the unique weak solution $(\mathbf{u}, \mathbf{H})^{T}$, consider the linear initial value problem
\begin{equation}
\partial_{t} \mathbf{u}(t) + \tilde{\mathcal{A}}(t) \mathbf{u}(t) = \mathbf{0} \text{ for } t \in (0, T), \quad
\mathbf{u}(0) = \tilde{\mathbf{u}}^{0},
\label{EQUATION_PARABOLIC_PROBLEM_LINEAR_A_POSTERIORI}
\end{equation}
where
\begin{equation}
\tilde{\mathcal{A}}(t) := \mathcal{A}\big(\mathbf{H}(t, \cdot)\big) \text{ for } t \in [0, T]. \notag
\end{equation}
The associated non-autonomous form is of bounded variation since
\begin{align}
\begin{split}
|\tilde{a}(\mathbf{u}, \mathbf{v}; t) - \tilde{a}(\mathbf{u}, \mathbf{v}; s)| &=
\Big|\int_{G} \big(\big(\mathbf{H}(t, \cdot) - \mathbf{H}(s, \cdot)\big) \nabla \mathbf{u}\big) : (\nabla \mathbf{v}) \mathrm{d}\mathbf{x}\Big| \\
&= \int_{s}^{t} \|\partial_{t} \mathbf{H}(\xi, \cdot)\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))}
\big\|\mathbf{u}(\xi, \cdot)\big\|_{\mathcal{V}} \big\|\mathbf{v}(\xi, \cdot)\big\|_{\mathcal{V}} \mathrm{d}\xi \\
&\leq (t - s) \|\mathbf{H}\|_{W^{1, \infty}(0, T; L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d})))} \|\mathbf{u}\|_{\mathcal{V}} \|\mathbf{v}\|_{\mathcal{V}}
\end{split}
\label{EQUATION_PARABOLIC_PROBLEM_LINEAR_A_POSTERIORI_BOUNDED_VARIATION}
\end{align}
for $0 \leq s \leq t \leq T$ and $\mathbf{u}, \mathbf{v} \in \mathcal{V}$.
Theorem \ref{THEOREM_MAXIMAL_REGULARITY} applied to Equation (\ref{EQUATION_PARABOLIC_PROBLEM_LINEAR_A_POSTERIORI}) yields then
\begin{equation}
\mathbf{u} \in W^{1, 2}(0, T; \mathcal{H}) \text{ and }
\mathrm{div}\big(\mathbf{H} \nabla \mathbf{u}\big) \in L^{2}(0, T; \mathcal{H}). \notag
\end{equation}
Hence, $(\mathbf{u}, \mathbf{H})^{T}$ is also a strong solution.
\end{proof}
\section{Limiting case $\sigma = 0$}
\label{SECTION_SOLUTION_THEORY_LIMITING_CASE}
In this remaining section, we want to obtain a solution theory for the original PDE system
(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4}).
In contrast to the regularized case,
a slightly stronger assumption on the function $\mathbf{F}$ is required here.
It is precisely the one used in \cite[Equation (4)]{CoAy1998}.
\begin{assumption}
\label{ADDITIONAL_ASSUMPTION_ON_F}
Let the function $\mathbf{F} \colon \mathbb{R}^{k \times d} \to \mathcal{S}_{\geq 0}(\mathbb{R}^{k \times d})$ be weakly differentiable
such that $\mathbf{F}$ together with its weak Jacobian are essentially bounded by a positive number $c > 0$.
\end{assumption}
Now, we introduce the nonlinear mapping $\mathcal{F} \colon H^{1}(G, \mathbb{R}^{k}) \to L^{\infty}(G, \mathcal{S}_{\geq 0}(\mathbb{R}^{k \times d}))$ with
\begin{equation}
\mathcal{F}(\mathbf{u}) := \mathbf{F}(\nabla \mathbf{u}) \text{ for } \mathbf{u} \in H^{1}(G, \mathbb{R}^{d}). \notag
\end{equation}
Obviously, $\mathcal{F}$ is well-defined. Indeed, $\mathbf{F}(\nabla \mathbf{u})$ is strongly measurable as a composition of two strongly measurable functions
and essentially bounded by virtue of Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F}.
Unlike $\mathcal{F}_{\sigma}$, generally speaking, $\mathcal{F}$ is not Lipschitzian from $H^{1}$ and $L^{\infty}$.
At the same time, due to Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F}, we trivially have:
\begin{lemma}
\label{LEMMA_PROPERTIES_OF_F}
The mapping $\mathcal{F}$ is Lipschitzian from $H^{1}(G, \mathbb{R}^{d})$ to $L^{2}(G, \mathbb{R}^{d})$.
\end{lemma}
With the notations of Section \ref{SECTION_SOLUTION_THEORY_REGULAR_CASE},
the abstract form of Equations (\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_1})--(\ref{EQUATION_HYPERBOLIC_FILTER_POOLED_FORM_4})
reads as
\begin{align}
\partial_{t} \mathbf{u} + \mathcal{A}(\mathbf{H}) \mathbf{u} &= 0 \text{ in } L^{2}(0, T; \mathcal{V}'), \label{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1} \\
\tau \partial_{t} \mathbf{H} + \mathbf{H} - \mathcal{F}(\mathbf{u}) &= 0 \text{ in } L^{2}\big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big)\big), \label{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_2} \\
\mathbf{u}(0, \cdot) = \tilde{\mathbf{u}}^{0} \text{ in } \mathcal{H}, \quad
\mathbf{H}(0, \cdot) &= \tilde{\mathbf{H}}^{0} \text{ in } L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big), \label{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3}
\end{align}
We adopt the following solution notions for Equations
(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3}).
Note that the regularity condition on $\mathbf{H}$ differs from the one employed in the regularized case.
\begin{definition}
\label{DEFINITION_SOLUTION_NOTION_LIMITING_CASE}
For $T > 0$, we call a function $(\mathbf{u}, \mathbf{H})^{T} \colon [0, T] \times \bar{G} \to \mathbb{R}^{k} \times \mathcal{S}(\mathbb{R}^{k \times d})$
a weak solution to Equations (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3}) on $[0, T]$
if there exists a number $\kappa > 0$ such that the function pair $(\mathbf{u}, \mathbf{H})^{T}$
satisfies $\mathbf{H} \in \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})$ a.e. in $(0, T) \times G$ and
\begin{equation}
\mathbf{u} \in H^{1}(0, T; \mathcal{V}') \cap L^{2}(0, T; \mathcal{V}), \quad
\mathbf{H} \in H^{1}\big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big)\big)
\notag
\end{equation}
and fulfils the abstract differential equations
(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_2})
and the initial conditions (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3})
in sense of interpolation Equation (\ref{EQUATION_INTERPOLATION_THEOREM_WEAK_SOLUTIONS}).
If $\mathbf{u}$ additionally satisfies
\begin{equation}
\mathbf{u} \in H^{1}(0, T; \mathcal{H}) \text{ and } \mathrm{div}(\mathbf{H} \nabla \mathbf{u}) \in L^{2}(0, T; \mathcal{H}),
\notag
\end{equation}
we refer to $(\mathbf{u}, \mathbf{H})^{T}$ as a strong solution.
\end{definition}
\begin{theorem}
\label{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_LIMITING_CASE}
Let
$(\tilde{\mathbf{u}}^{0}, \tilde{\mathbf{H}}^{0})^{T} \in \mathcal{H} \times L^{\infty}\big(G, \mathcal{S}_{\geq \alpha}(\mathbb{R}^{k \times d})\big)$ for some $\alpha > 0$.
Under Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F},
for any $T > 0$, the initial-boundary value problem
(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3})
possesses then a weak solution $(\mathbf{u}, \mathbf{H})^{T}$ on $[0, T]$ satisfying
\begin{equation}
\mathbf{H}(t, \cdot) \in L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big) \text{ for a.e. } t \in [0, T]
\text{ with } \kappa := \alpha \exp(-1/\tau). \notag
\end{equation}
In addition, weak solutions are globally extendable (not necessarily uniquely).
\end{theorem}
\begin{proof}
Repeating the proof of Equation (\ref{EQUATION_UNIFORM_POSITIVITY_OF_H}),
we get the {\it a priori} positive definiteness for $\mathbf{H}$, i.e.,
\begin{equation}
\mathbf{H}(t, \cdot) \in L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big)
\text{ for a.e. } t \in [0, T]
\text{ with } \kappa := \alpha \exp(-T/\tau). \notag
\end{equation}
Solving Equation (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_2}) for $\mathbf{H}$
and plugging the result into Equation (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1}),
Equations (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3})
reduce to
\begin{align}
\partial_{t} \mathbf{u} +
\mathcal{A}(\mathbf{H}(\mathbf{u})) \mathbf{u} = 0 \text{ in } (0, T), \quad
\mathbf{u}(0, \cdot) = \mathbf{u}^{0}
\label{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED}
\end{align}
with
\begin{equation}
\big(\mathbf{H}(\mathbf{u})\big)(t, \cdot) = \exp(-t/\tau) \mathbf{H}_{0} +
\int_{0}^{t} \exp\big(-(t - s)/\tau\big) \mathbf{F}\big(\nabla \mathbf{u}(s, \cdot)\big) \mathrm{d}s
\text{ for } t \in [0, T].
\label{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED_OPERATOR_H}
\end{equation}
We solve Equation (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED})
by applying Schauder \& Tychonoff's fixed point theorem (see, e.g., \cite[p. 165]{Mo1975}).
\textit{Constructing a fixed point mapping: }
Consider the convex compact subset
\begin{equation}
\mathscr{Y} := L^{2}(0, T; \mathcal{V}) \text{ of Hilbert space }
\mathscr{X} := W^{-1, 2}(0, T; \mathcal{H}), \notag
\end{equation}
where the compactness is a direct consequence of
Rellich \& Kondrachov's imbedding theorem and \cite[Theorem 5.1]{Am2000}.
Let $\mathscr{F}$ map an element $\mathbf{u} \in \mathscr{Y}$
to the (unique) solution
$\mathbf{u} \in H^{1}(0, T; \mathcal{V}') \cap L^{2}(0, T; \mathcal{V})$
of Equation (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED})
with $\mathbf{H}$ given in Equation (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED_OPERATOR_H}).
We now show $\mathscr{F}$ is well-defined.
For $\tilde{\mathbf{u}} \in \mathscr{Y}$,
Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F} implies
$\mathbf{F}(\nabla \tilde{\mathbf{u}}) \in L^{2}\Big(0, T; L^{\infty}\big(G, \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d})\big)\Big)$.
Hence, by virtue of Equation (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED_OPERATOR_H}),
\begin{equation}
\mathbf{H} \equiv \mathbf{H}(\mathbf{u}) \in W^{1, 2}\Big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{k \times d})\big)\Big)
\text{ and } \mathbf{H} \in \mathcal{S}_{\geq \kappa}(\mathbb{R}^{k \times d}) \text{ a.e. in } (0, T) \times G.
\label{EQUATION_LIMITING_CASE_PROPERTIES_OF_H}
\end{equation}
Here, we used the strong measurability of $\mathbf{H}$ and the boundedness of respective norms.
Using the fundamental theorem of calculus and Cauchy \& Schwarz' inequality, we estimate
\begin{align}
\big|a(\mathbf{u}, \mathbf{v}; t)\big| &\leq
\int_{G} \Big|\big(\mathbf{H}(t, \mathbf{x}) \nabla \mathbf{u}\big) : (\nabla \mathbf{v})\Big| \mathrm{d}\mathbf{x} \\
&\leq
\int_{G} \Big|\Big(\big(\mathbf{H}^{0}(t, \cdot) + \int_{0}^{t} \partial_{t} \mathbf{H}(\xi, \cdot) \mathrm{d}\xi\big) \nabla \mathbf{u}\Big) : (\nabla \mathbf{v})\Big| \mathrm{d}\mathbf{x} \\
&\leq
\Big(\|\mathbf{H}^{0}\|_{L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d}))} +
\sqrt{T} \|\mathbf{H}\|_{W^{1, 2}(0, T; L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d})))}\Big)
\|\mathbf{u}\|_{\mathcal{V}} \|\mathbf{v}\|_{\mathcal{V}}
\end{align}
for $t \in (0, T]$ and $\mathbf{u}, \mathbf{v} \in \mathcal{V}$, where
\begin{equation}
a(\mathbf{u}, \mathbf{v}; t) :=
\int_{G} (\mathbf{H} \nabla \mathbf{u}) : (\nabla \mathbf{v}) \mathrm{d}\mathbf{x}. \notag
\end{equation}
This together with the fact $\mathbf{u}^{0} \in \mathcal{H}$
combined with Theorem \ref{THEOREM_MAXIMAL_REGULARITY_WEAK_FORM}
yields a unique solution
$\mathbf{u} \in \mathscr{Y}$ to Equation (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED}).
Hence, $\mathscr{F}$ is well-defined as a self-mapping on $\mathscr{Y}$.
\textit{Showing the continuity of $\mathscr{F}$: }
For an arbitrary, but fixed $\tilde{\mathbf{u}} \in \mathscr{Y}$
consider a sequence $(\tilde{\mathbf{u}}_{n})_{n \in \mathbb{N}} \subset \mathscr{Y}$
such that $\tilde{\mathbf{u}}_{n} \to \tilde{\mathbf{u}}$ in $\mathscr{Y}$ as $n \to \infty$.
Further, let $\mathbf{u} := \mathscr{F}(\tilde{\mathbf{u}})$
and $\mathbf{u}_{n} := \mathscr{F}(\tilde{\mathbf{u}}_{n})$ for $n \in \mathbb{N}$.
We want to show $\mathbf{u}_{n} \to \mathbf{u}$ in $\mathscr{Y}$ as $n \to \infty$.
Note that the sequential continuity of $\mathscr{F}$
is equivalent with the regular continuity since $\mathscr{Y}$ is separable.
Let $\bar{\mathbf{u}}_{n} := \mathbf{u} - \mathbf{u}_{n}$.
By definition, $\bar{\mathbf{u}}_{n}$ solves the Cauchy problem
\begin{align}
\partial_{t} \bar{\mathbf{u}}_{n} +
\mathcal{A}\big(\mathbf{H}(\tilde{\mathbf{u}})\big) \bar{\mathbf{u}}_{n} =
\mathbf{f}_{n} \text{ in } (0, T), \quad
\mathbf{u}(0, \cdot) = 0
\end{align}
with
\begin{equation}
\mathbf{f}_{n} := \mathcal{A}\big(\mathbf{H}(\tilde{\mathbf{u}})\big) -
\mathcal{A}\big(\mathbf{H}(\tilde{\mathbf{u}}_{n})\big) \tilde{\mathbf{u}}_{n}
= \mathrm{div}\Big(\big(\mathbf{H}(\tilde{\mathbf{u}}) - \mathbf{H}(\tilde{\mathbf{u}}_{n})\big) \nabla \tilde{\mathbf{u}}_{n}\Big)
\text{ for } n \in \mathbb{N}. \notag
\end{equation}
Due to the Lipschitz continuity of $\mathcal{F}$ (cf. Lemma \ref{LEMMA_PROPERTIES_OF_F}),
we have
\begin{equation}
\mathbf{F}(\nabla \tilde{\mathbf{u}}_{n}) \to \mathbf{F}(\nabla \tilde{\mathbf{u}})
\text{ in } L^{2}\big(0, T; L^{2}(G, \mathbb{R}^{k \times d})\big) \text{ as } n \to \infty. \notag
\end{equation}
Hence, by virtue of Equation (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED_OPERATOR_H}),
\begin{equation}
\mathbf{H}(\tilde{\mathbf{u}}_{n}) \to \mathbf{H}(\tilde{\mathbf{u}})
\text{ in } H^{1}\big(0, T; L^{2}(G, \mathbb{R}^{k \times d})\big) \hookrightarrow
L^{2}\big(0, T; L^{2}(G, \mathbb{R}^{k \times d})\big) \text{ as } n \to \infty.
\label{EQUATION_CONTINUITY_OF_H_IN_L_2}
\end{equation}
Using Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F} to verify
\begin{equation}
\sup_{n \in \mathbb{N}}
\Big\|\big(\mathbf{H}(\tilde{\mathbf{u}}) -
\mathbf{H}(\tilde{\mathbf{u}}_{n})\big) \nabla \tilde{\mathbf{u}}_{n}\Big\|_{L^{2}((0, T) \times G, \mathbb{R}^{k \times d})}
\leq 2C \sup_{n \in \mathbb{N}} \|\nabla \tilde{\mathbf{u}}_{n}\|_{L^{2}((0, T) \times G, \mathbb{R}^{k \times d})} < \infty, \notag
\end{equation}
we apply Lebesgue's dominated convergence theorem to Equation (\ref{EQUATION_CONTINUITY_OF_H_IN_L_2}) to obtain
\begin{equation}
\big(\mathbf{H}(\tilde{\mathbf{u}}) - \mathbf{H}(\tilde{\mathbf{u}}_{n})\big) \nabla \tilde{\mathbf{u}}_{n} \to \mathbf{0}
\text{ in } L^{2}\big((0, T) \times G, \mathbb{R}^{k \times d}\big) \text{ as } n \to \infty. \notag
\end{equation}
Hence, since $\mathrm{div}$ is a continuous linear mapping between the Hilbert spaces
$L^{2}(G, \mathbb{R}^{k \times d})$ and $\mathcal{V'}$, we find
\begin{equation}
\mathrm{div}\Big(\big(\mathbf{H}(\tilde{\mathbf{u}}) - \mathbf{H}(\tilde{\mathbf{u}}_{n})\big) \nabla \tilde{\mathbf{u}}_{n}\Big) \to \mathbf{0}
\text{ in } L^{2}(0, T; \mathcal{V}') \text{ as } n \to \infty. \notag
\end{equation}
Therefore, by virtue of Theorem \ref{THEOREM_MAXIMAL_REGULARITY_WEAK_FORM},
\begin{equation}
\|\bar{\mathbf{u}}_{n}\|_{L^{2}(0, T; \mathcal{H})} \leq
\frac{1}{\kappa^{2}}
\Big\|\mathrm{div}\Big(\big(\mathbf{H}(\tilde{\mathbf{u}}) - \mathbf{H}(\tilde{\mathbf{u}}_{n})\big)\Big\|_{L^{2}(0, T; \mathcal{V}')}^{2}
\to 0 \text{ as } n \to \infty \notag
\end{equation}
implying $\mathscr{F}$ is continuous.
{\it Applying the fixed point theorem: }
Now, by virtue of Schauder \& Tychonoff's fixed point theorem,
$\mathscr{F}$ posseses a fixed point $\bar{\mathbf{u}} \in L^{2}\big(0, T; \mathcal{V}\big)$ (not necessarily unique).
Using Equations (\ref{EQUATION_CAUCHY_PROBLEM_LIMITING_CASE_REDUCED}) and
(\ref{EQUATION_LIMITING_CASE_PROPERTIES_OF_H}),
we finally deduce $\bar{\mathbf{u}} \in H^{1}(0, T; \mathcal{V}')$.
Letting $\bar{\mathbf{H}} := \mathbf{H}(\bar{\mathbf{u}})$,
we easily verify $(\bar{\mathbf{u}}, \bar{\mathbf{H}})^{T}$ satisfies
Equations (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3}),
which completes the proof.
\end{proof}
\begin{corollary}
\label{COROLLARY_STRONG_SOLUTION_SIGMA_ZERO}
Under the conditions of Theorem \ref{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_LIMITING_CASE},
let $\tilde{\mathbf{u}}^{0} \in \mathcal{V}$.
Any weak solution $(\mathbf{u}, \mathbf{H})^{T}$ to Equations
(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3})
given in Theorem \ref{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_LIMITING_CASE}
is then also a strong solution satisfying
\begin{equation}
\mathbf{u} \in L^{2}\big(0, T; W^{1 + s, 2}(G, \mathbb{R}^{k})\big)
\text{ for any } s \in [0, 1/2).
\notag
\end{equation}
\end{corollary}
\begin{proof}
{\it Strongness: }
Similar to Equation (\ref{EQUATION_PARABOLIC_PROBLEM_LINEAR_A_POSTERIORI_BOUNDED_VARIATION}),
we use the fundamental theorem of calculus together with Cauchy \& Schwarz' inequality to estimate
\begin{align}
\begin{split}
|a(\mathbf{u}, \mathbf{v}; t) - \tilde{a}(\mathbf{u}, \mathbf{v}; s)| &=
\Big|\int_{G} \big(\big(\mathbf{H}(t, \cdot) - \mathbf{H}(s, \cdot)\big) \nabla \mathbf{u}\big) : (\nabla \mathbf{v}) \mathrm{d}\mathbf{x}\Big| \\
&= \int_{s}^{t} \int_{G} \|\partial_{t} \mathbf{H}(\xi, \mathbf{x})\|_{\mathcal{S}(\mathbb{R}^{k \times d})} \|\mathbf{u}\|_{\mathcal{V}} \|\mathbf{v}\|_{\mathcal{V}} \mathrm{d}\mathbf{x} \mathrm{d}\xi \\
&\leq \sqrt{t - s} \; \|\mathbf{H}\|_{W^{1, 2}(0, T; L^{\infty}(G, \mathcal{S}(\mathbb{R}^{k \times d})))} \|\mathbf{u}\|_{\mathcal{V}} \|\mathbf{v}\|_{\mathcal{V}}
\end{split}
\notag
\end{align}
for $0 \leq s \leq t \leq T$ and $\mathbf{u}, \mathbf{v} \in \mathcal{V}$, where
\begin{equation}
a(\mathbf{u}, \mathbf{v}; t) :=
\int_{G} (\mathbf{H} \nabla \mathbf{u}) : (\nabla \mathbf{v}) \mathrm{d}\mathbf{x}. \notag
\end{equation}
This together with the assumption $\mathbf{u}^{0} \in \mathcal{V}$
enables us to deduce $\mathbf{u} \in \mathcal{MR}_{a}(\mathcal{H})$
by virtue of Theorem \ref{THEOREM_MAXIMAL_REGULARITY_WEAK_FORM}.
{\it Extra regularity: }
Applying \cite[Theorem 4]{Sa1998} to the following family of elliptic problems
\begin{equation}
\mathcal{A}\big(\mathbf{H}(t, \cdot)\big) = \mathbf{g}(t, \cdot) \text{ for a.e. } t \in [0, T]
\text{ with } \mathbf{g} = \partial_{t} \mathbf{u} \in L^{2}(0, T; \mathcal{H}), \notag
\end{equation}
the desired regularity follows.
\end{proof}
Concerning the uniqueness for Equations (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3}),
no existence results are known in the literature
both for weak and strong solutions in sense of Definition \ref{DEFINITION_SOLUTION_NOTION_LIMITING_CASE} (cf. \cite{BeCha2005}).
Same is true for the quasilinear heat equation in non-divergence form (see \cite{ArChi2010}), etc.
Nonetheless, under a boundedness condition for $\nabla \mathbf{u}$, the following uniqueness result can be proved.
\begin{theorem}
\label{THEOREM_LIMITING_CASE_UNIQUENESS}
Let $(\mathbf{u}_{1}, \mathbf{H}_{1})^{T}$, $(\mathbf{u}_{2}, \mathbf{H}_{2})^{T}$
be two weak solutions to Equations
(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3})
such that
$\nabla \mathbf{u}_{1}, \nabla \mathbf{u}_{2} \in L^{\infty}\big((0, T) \times G, \mathbb{R}^{k \times d}\big)$.
Then $\mathbf{u}_{1} \equiv \mathbf{u}_{2}$ a.e. in $(0, T) \times G$.
\end{theorem}
\begin{proof}
Letting
$\bar{\mathbf{u}} := \mathbf{u}_{1} - \mathbf{u}_{2}$,
$\bar{\mathbf{H}} := \mathbf{H}_{1} - \mathbf{H}_{1}$,
we observe that $(\bar{\mathbf{u}}, \bar{\mathbf{H}})^{T}$ satisfies
\begin{align}
\partial_{t} \bar{\mathbf{u}} - \mathrm{div}\,\big(\mathbf{H}_{1} \nabla \bar{\mathbf{u}}\big)
- \mathrm{div}\,\big(\bar{\mathbf{H}} \nabla \mathbf{u}_{2}\big) &= \mathbf{0}
\text{ in } L^{2}(0, T; \mathcal{V}'),
\label{EQUATION_WEAK_SOLUTION_UNIQUENESS_SOLUTION_DIFFERENCE_1} \\
\tau \partial_{t} \bar{\mathbf{H}} + \bar{\mathbf{H}} - \big(\mathbf{F}(\nabla \mathbf{u}_{1}) - \mathbf{F}(\nabla \mathbf{u}_{2})\big) &= \mathbf{0}
\text{ in } L^{2}\Big(0, T; L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{d \times 2})\big)\Big),
\label{EQUATION_WEAK_SOLUTION_UNIQUENESS_SOLUTION_DIFFERENCE_2} \\
\bar{\mathbf{u}}(0, \cdot) = \mathbf{0} \text{ in } \mathcal{V}, \quad \bar{\mathbf{H}}(0, \cdot) &= \mathbf{0} \text{ in } L^{\infty}\big(G, \mathcal{S}(\mathbb{R}^{d \times 2})\big).
\label{EQUATION_WEAK_SOLUTION_UNIQUENESS_SOLUTION_DIFFERENCE_3}
\end{align}
Multiplying Equation (\ref{EQUATION_WEAK_SOLUTION_UNIQUENESS_SOLUTION_DIFFERENCE_1})
with $\bar{\mathbf{u}}$ in $L^{2}\big(0, T; L^{2}(G, \mathbb{R}^{d})\big)$,
using Green's formula and exploiting the uniform positive definiteness of $\mathbf{H}(t, \cdot)$,
we obtain using H\"older's and Young's inequalities
\begin{align}
\begin{split}
\big\|\bar{\mathbf{u}}(t, \cdot)\big\|_{\mathcal{H}}^{2} &\leq
- 2 \kappa \int_{0}^{t} \big\|\nabla \bar{\mathbf{u}}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2} \mathrm{d}s +
\int_{0}^{t} \big\|\big(\bar{\mathbf{H}} \nabla \mathbf{u}_{2}\big) : \nabla \bar{\mathbf{u}}\big\|_{L^{1}(G)} \mathrm{d}s \\
&\leq
- 2 \kappa \int_{0}^{t} \big\|\nabla \bar{\mathbf{u}}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2} \mathrm{d}s +
\big\|\nabla \mathbf{u}_{2}\|_{L^{\infty}((0, T) \times G, \mathbb{R}^{k \times d}))} \times \\
&\times
\int_{0}^{t} \big\|\bar{\mathbf{H}}\big\|_{L^{2}(G, \mathcal{S}(\mathbb{R}^{k \times d}))}
\|\nabla \bar{\mathbf{u}}\big\|_{L^{2}(G, \mathbb{R}^{k \times d})} \mathrm{d}s \\
&\leq
- \kappa \int_{0}^{t} \big\|\nabla \bar{\mathbf{u}}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2} \mathrm{d}s +
\tilde{C}_{1} \int_{0}^{t} \big\|\bar{\mathbf{H}}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} \mathrm{d}s,
\end{split}
\label{EQUATION_UNIQUENESS_ESTIMATE_U_BAR}
\end{align}
where $\tilde{C}_{1}$ depends on the $L^{\infty}$-norm of $\nabla \mathbf{u}_{2}$.
Further, multiplying Equation (\ref{EQUATION_WEAK_SOLUTION_UNIQUENESS_SOLUTION_DIFFERENCE_2})
with $\bar{\mathbf{H}}(t, \cdot)$ in $L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})$
as well as exploiting Cauchy \& Schwarz' and Young's inequalities, we estimate
\begin{equation}
\tau \partial_{t} \big\|\bar{\mathbf{H}}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2}
\leq \tilde{C}_{2} \big\|\bar{\mathbf{H}}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} +
\kappa \big\|\nabla \bar{\mathbf{u}}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2}
\label{EQUATION_UNIQUENESS_ESTIMATE_H_BAR}
\end{equation}
for some $\tilde{C}_{2} > 0$.
Integrating Equation (\ref{EQUATION_UNIQUENESS_ESTIMATE_H_BAR}) w.r.t. to $t$
and adding the result to Equation (\ref{EQUATION_UNIQUENESS_ESTIMATE_U_BAR}), we get
\begin{align*}
\big\|\bar{\mathbf{u}}(t, \cdot)\big\|_{\mathcal{H}}^{2} &+ \tau \big\|\bar{\mathbf{H}}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} \\
&\leq \tilde{C} \int_{0}^{t} \Big(
\big\|\bar{\mathbf{u}}(s, \cdot)\big\|_{\mathcal{H}}^{2} + \tau \big\|\bar{\mathbf{H}}(s, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2}\Big) \mathrm{d}s
\end{align*}
for some $\tilde{C} > 0$. Now, the claim follows by virtue of Gronwall's inequality.
\end{proof}
In a similar fashion, we can prove:
\begin{corollary}
Under the conditions of Theorem \ref{THEOREM_LIMITING_CASE_UNIQUENESS},
for any $T > 0$, there exists a constant $\tilde{C} > 0$ such that
\begin{align*}
\max_{0 \leq t \leq T}
\Big(\big\|\mathbf{u}_{1}(t, \cdot) - &\mathbf{u}_{2}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k})}^{2} +
\big\|\mathbf{H}_{1}(t, \cdot) - \mathbf{H}_{2}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2}\Big) \\
&\leq \tilde{C} \Big(
\big\|\mathbf{u}_{1}(0, \cdot) - \mathbf{u}_{2}(0, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k})}^{2} +
\big\|\mathbf{H}_{1}(0, \cdot) - \mathbf{H}_{2}(0, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2}\Big).
\end{align*}
\end{corollary}
For a global weak solution $(\mathbf{u}, \mathbf{H})^{T}$
to Equations (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3})
given in Theorem \ref{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_LIMITING_CASE},
consider the energy functional
\begin{equation}
\mathcal{E}(t) :=
\frac{1}{2} \int_{G} \big\|\mathbf{u}(t, \cdot)\big\|_{\mathbb{R}^{k}}^{2} \mathrm{d}\mathbf{x} +
\frac{\tau}{2} \int_{G} \big\|\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big\|_{\mathbb{R}^{(k \times d) \times (k \times d)}}^{2} \mathrm{d}\mathbf{x}.
\label{EQUATION_ENERGY_FUNCTIONAL}
\end{equation}
\begin{theorem}
In addition to the assumptions of Corollary \ref{COROLLARY_STRONG_SOLUTION_SIGMA_ZERO},
there may exist a number $\omega > 0$ such that
\begin{equation}
\mathbf{F}(\mathbf{D}) \in \mathcal{S}_{\geq \omega}(\mathbb{R}^{k \times d}).
\label{EQUATION_ASSUMPTION_UNIFORM_POSITIVITY_F}
\end{equation}
The energy functional defined in Equation (\ref{EQUATION_ENERGY_FUNCTIONAL})
decays then exponentially along any strong solution of
Equations (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_1})--(\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_3}), i.e.,
\begin{equation}
\mathcal{E}(t) \leq C \exp(-2 \beta t) \mathcal{E}(0) \text{ for a.e. } t \geq 0
\text{ with appropriate } C, \beta > 0, \notag
\end{equation}
which implies
\begin{equation}
\lim_{t \to 0} \; (\mathbf{u}, \mathbf{H})^{T}(t, \cdot) =
\big(\mathbf{0}, \mathbf{F}(\mathbf{0})\big)^{T} \text{ in }
L^{2}(G, \mathbb{R}^{k}) \times L^{2}\big(G, \mathbb{R}^{(k \times d) \times (k \times d)}\big). \notag
\end{equation}
\end{theorem}
\begin{proof}
Solving Equation (\ref{EQUATION_HYPERBOLIC_PDE_LIMITING_ABSTRACT_2}) for $\mathbf{H}$, we obtain
\begin{equation}
\mathbf{H}(t, \cdot) = \exp\big(-t/\tau\big) \mathbf{H}^{0} +
\int_{0}^{t} \exp\big(-(t - s)/\tau\big) \mathbf{F}\big(\nabla \mathbf{u}(s, \cdot)\big) \mathrm{d}\mathbf{s}. \notag
\end{equation}
Using assumption $\mathbf{H}^{0} \in \mathcal{S}_{\geq \alpha}(\mathbb{R}^{k \times d})$
of Theorem \ref{THEOREM_EXISTENCE_AND_UNIQUENESS_WEAK_SOLUTION_LIMITING_CASE}
and Equation (\ref{EQUATION_ASSUMPTION_UNIFORM_POSITIVITY_F}),
this implies
\begin{align}
\min \sigma\big(\mathbf{H}(t, \cdot)\big) &\geq \alpha \exp\big(-t/\tau\big) +
\omega \int_{0}^{t} \exp\big(-(t - s)/\tau\big) \mathrm{d}s \notag \\
&\geq \alpha \exp\big(-t/\tau\big) +
\frac{\omega}{\tau} \big(1 - \exp\big(-t/\tau\big)\big) \label{EQUATION_UNIFORM_POSITIVITY_OF_F_EXPONENTIAL_STABILITY_PROOF} \\
&\geq \min\big\{\alpha \exp(-1), \omega/(2 \tau)\big\} =: \kappa > 0 \notag
\end{align}
with $\sigma\big(\mathbf{H}(t, \cdot)\big)$ denoting the spectrum of $\mathbf{H}(t, \cdot)$.
For a.e. $t \geq 0$,
multiplying Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_1}) in $L^{2}(G, \mathbb{R}^{k})$ with $\mathbf{u}(t, \cdot)$,
using Green's formula, utilizing the boundary conditions (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_3})
and taking into account Equation (\ref{EQUATION_UNIFORM_POSITIVITY_OF_F_EXPONENTIAL_STABILITY_PROOF}), we obtain
\begin{equation}
\frac{1}{2} \partial_{t} \big\|\mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k})}^{2} \leq
-\kappa \big\|\nabla \mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2}. \notag
\end{equation}
By virtue of second Poincar\'{e}'s inequality, this implies
\begin{equation}
\frac{1}{2} \partial_{t} \big\|\mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k})}^{2} \leq
-\frac{\kappa}{2} \big\|\nabla \mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}^{2}
-\frac{\kappa C_{P}}{2} \big\|\mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k})}^{2}
\text{ for } t > 0.
\label{EQUATION_EXPONENTIAL_STABILITY_ESTIMATE_FOR_U}
\end{equation}
Subtracting $\mathbf{F}(\mathbf{0})$ from Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_2}), we get
\begin{equation}
\tau \partial_{t} \big(\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big) +
\big(\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big) =
\mathbf{F}\big(\nabla \mathbf{u}(t, \cdot)\big) - \mathbf{F}(\mathbf{0})
\text{ for a.e. } t > 0.
\label{EQUATION_HYPERBOLIC_PDE_REGULARIZED_2_F_OF_ZERO_SUBTRACTED}
\end{equation}
Hence, multiplying Equation (\ref{EQUATION_HYPERBOLIC_PDE_REGULARIZED_2_F_OF_ZERO_SUBTRACTED})
in $L^{2}\big(G, \mathbb{R}^{(k \times d) \times (k \times d)}\big)$
with $\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})$ and using Assumption \ref{ADDITIONAL_ASSUMPTION_ON_F}, we arrive at
\begin{align*}
\frac{\tau}{2} \partial_{t} \big\|\mathbf{H}(t, \cdot) - &\mathbf{F}(\mathbf{0})\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2}
+ \big\|\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} \\
&\leq c \big\|\nabla \mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}
\big\|\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}.
\end{align*}
Now, using Young's inequality, we estimate
\begin{align}
\begin{split}
\frac{\tau}{2} \partial_{t} \big\|\mathbf{H}(t, \cdot) - &\mathbf{F}(\mathbf{0})\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} \\
&\leq
-\frac{1}{2} \big\|\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} +
\frac{c^{2}}{2} \big\|\nabla \mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k \times d})}.
\end{split}
\label{EQUATION_EXPONENTIAL_STABILITY_ESTIMATE_FOR_H}
\end{align}
Multiplying Equation (\ref{EQUATION_EXPONENTIAL_STABILITY_ESTIMATE_FOR_H}) with $\frac{\kappa C_{P}}{c^{2}}$
and adding the result to Equation (\ref{EQUATION_EXPONENTIAL_STABILITY_ESTIMATE_FOR_U}) yields
\begin{align*}
\partial_{t} \mathcal{E}(t) &\leq
-\frac{\kappa C_{P}}{2 c^{2}} \big\|\mathbf{u}(t, \cdot)\big\|_{L^{2}(G, \mathbb{R}^{k})}^{2}
-\frac{1}{2} \big\|\mathbf{H}(t, \cdot) - \mathbf{F}(\mathbf{0})\big\|_{L^{2}(G, \mathbb{R}^{(k \times d) \times (k \times d)})}^{2} \\
&\leq
-\min\big\{\kappa C_{P}/c^{2}, 1/\tau\big\} \mathcal{E}(t) \text{ for a.e. } t \geq 0.
\end{align*}
Hence, the exponential decay of $\mathcal{E}$ is a direct consequence of Gronwall's inequality.
\end{proof}
\begin{appendix}
\section{Maximal $L^{2}$-Regularity for Non-Autonomous Forms}
\label{SECTION_APPENDIX}
In this appendix, we briefly summarize the theory of maximal $L^{2}$-regularity for non-autonomous forms.
We start with the classical theory dating back to Dautray \& Lions (cf. \cite{DauLio1992}), which furnishes the existence and uniqueness of weak solutions.
Further, we present a recent theory developed by Dier in \cite{Die2015}
guaranteeing the existence of strong solutions under a boundedness assumption on the variation of non-autononous form associated with the `elliptic' part of evolution problem.
Let $\mathcal{H}$ and $\mathcal{V}$ be separable Hilbert spaces such that $\mathcal{V}$ is continuously and densely embedded into $\mathcal{H}$.
For $T > 0$, we consider the initial value problem
\begin{equation}
\dot{u}(t) + \mathcal{A}(t)u(t) = f(t) \text{ in } L^{2}(0, T; \mathcal{V}'), \quad u(0) = u^{0} \in \mathcal{H}. \label{EQUATION_CAUCHY_PROBLEM_APPENDIX}
\end{equation}
Further, let
\begin{equation}
a \colon [0, T] \times \mathcal{V} \times \mathcal{V} \to \mathbb{C}, \quad (t, u, v) \mapsto a(u, v; t)
\end{equation}
be a non-autonomous sesquilinear form, i.e.,
$a(u, v; \cdot)$ is measurable for all $u, v \in \mathcal{V}$ and $a(\cdot, \cdot; t)$ is sesquilinear for a.e. $t \in [0, T]$.
Let $a$ be continuous in $u$ and $v$ uniformly w.r.t. $t$, i.e., there may exist a number $M > 0$ such that
\begin{equation}
|a(u, v; t)| \leq M \|u\|_{\mathcal{V}} \|v\|_{\mathcal{V}} \text{ for all } u, v \in \mathcal{V} \text{ and a.e. } t \in [0, T]. \notag
\end{equation}
Additionally, let $a$ be uniformly coercive, i.e., there may exist some number $\alpha > 0$ such that
\begin{equation}
\mathrm{Re}\, a(u, u; t) \geq \alpha \|u\|_{\mathcal{V}}^{2} \text{ for any } u \in \mathcal{V} \text{ and a.e. } t \in [0, T]. \notag
\end{equation}
With $\mathcal{V}'$ denoting the antidual of $\mathcal{V}$,
the linear bounded operator $\mathcal{A}(t) \colon \mathcal{V} \to \mathcal{V}'$ associated with $a(\cdot, \cdot; t)$ for $t \in [0, T]$ is defined as
\begin{equation}
\langle \mathcal{A}(t) u, v\rangle_{\mathcal{V}'; \mathcal{V}} := a(u, v; t) \text{ for } u, v \in \mathcal{V}. \notag
\end{equation}
A classical result due to Lions states the following well-posedness result in the class of weak solutions.
\begin{theorem}
\label{THEOREM_MAXIMAL_REGULARITY_WEAK_FORM}
For every $f \in L^{2}(0, T; \mathcal{V}')$ and $u_{0} \in \mathcal{H}$, there exists a unique weak solution
\begin{equation}
u \in L^{2}(0, T; \mathcal{V}) \cap H^{1}(0, T; \mathcal{V}') \notag
\end{equation}
to the initial value problem (\ref{EQUATION_CAUCHY_PROBLEM_APPENDIX}).
Moreover, we have the continuous embedding
\begin{equation}
L^{2}(0, T; \mathcal{V}) \cap H^{1}(0, T; \mathcal{V}') \hookrightarrow C^{0}\big([0, T], \mathcal{H}\big)
\label{EQUATION_INTERPOLATION_THEOREM_WEAK_SOLUTIONS}
\end{equation}
and the estimate
\begin{equation}
\|u\|_{L^{2}(0, T; \mathcal{V})}^{2} \leq \frac{1}{\alpha^{2}} \|f\|_{L^{2}(0, T; \mathcal{V}')}^{2} + \frac{1}{\alpha} \|u_{0}\|_{\mathcal{H}}^{2}. \notag
\end{equation}
\end{theorem}
For the weak solution to be strong, additional assumptions on the non-autonomous form $a$ are required.
In the following, let the non-autonomous form $a$ be of bounded variation, i.e., there may exist a nondecreasing function $g \colon [0, T] \to [0, \infty)$ such that
\begin{equation}
|a(u, v; t) - a(u, v; s)| \leq \big(g(t) - g(s)\big) \|u\|_{\mathcal{V}} \|v\|_{\mathcal{V}} \text{ for all } u, v \in \mathcal{V}
\text{ and } 0 \leq s \leq t \leq T. \notag
\end{equation}
The maximal regularity class for the operator family $\big(\mathcal{A}(t)\big)_{t \in [0, T]}$ is then defined as
\begin{equation}
\mathcal{MR}_{a}(\mathcal{H}) := \big\{u \in L^{2}(0, T; \mathcal{V}) \cap H^{1}(0, T; \mathcal{H}) \,|\, \mathcal{A} u \in L^{2}(0, T; \mathcal{H})\big\}. \notag
\end{equation}
Under conditions above, \cite[Section 4]{Die2015} provides the following well-posedness result.
\begin{theorem}
\label{THEOREM_MAXIMAL_REGULARITY}
For every $f \in L^{2}(0, T; \mathcal{H})$ and $u^{0} \in \mathcal{V}$, there exists a unique strong solution $u \in \mathcal{MR}_{a}(\mathcal{H})$
to the initial value problem (\ref{EQUATION_CAUCHY_PROBLEM_APPENDIX}).
Moreover, $\mathcal{MR}_{a}(\mathcal{H}) \hookrightarrow C^{0}\big([0, T], \mathcal{V}\big)$ and
\begin{equation}
\|u\|_{L^{\infty}(0, T; \mathcal{V})}^{2} \leq \frac{1}{\alpha}\Big(\|f\|_{L^{2}(0, T; \mathcal{H})}^{2} + M \|u_{0}\|_{\mathcal{V}}^{2}\Big)
\exp\Big(\tfrac{1}{\alpha}\big(g(T) - g(0)\big)\Big). \notag
\end{equation}
\end{theorem}
\end{appendix}
\section*{Acknowledgment}
This work has been funded by the ERC-CZ Project LL1202 `MOdelling REvisited + MOdel REduction'
at Charles University in Prague, Czech Republic
and the Deutsche Forschungsgemeinschaft (DFG) through CRC 1173 at Karlsruhe Institute of Technology, Germany.
\end{document} |
\begin{document}
\title[Asymptotically regular semigroups]{On the structure of fixed-point
sets of asymptotically regular semigroups}
\author[A. Wi\'{s}nicki]{Andrzej Wi\'{s}nicki}
\begin{abstract}
We extend a few recent results of G\'{o}rnicki (2011) asserting that the set
of fixed points of an asymptotically regular mapping is a retract of its
domain. In particular, we prove that in some cases the resulting retraction is
H\"{o}lder continuous. We also characterise Bynum's coefficients and the
Opial modulus in terms of nets.
\end{abstract}
\subjclass[2010]{Primary 47H10; Secondary 46B20, 47H20, 54C15.}
\keywords{Asymptotically regular mapping, retraction, fixed point, Opial
property, Bynum's coefficients, weakly null nets. }
\address{Andrzej Wi\'{s}nicki, Institute of Mathematics, Maria Curie-Sk\l
odowska University, 20-031 Lublin, Poland}
\email{awisnic@hektor.umcs.lublin.pl}
\maketitle
\section{Introduction.}
The notion of asymptotic regularity, introduced by Browder and Petryshyn in
\cite{BrPe}, has become a standing assumption in many results concerning
fixed points of nonexpansive and more general mappings. Recall that a
mapping $T:M\rightarrow M$ acting on a metric space $(M,d)$ is said to be
asymptotically regular if
\begin{equation*}
\lim_{n\rightarrow \infty }d(T^{n}x,T^{n+1}x)=0
\end{equation*}
for all $x\in M.$ Ishikawa\ \cite{Is} proved that if $C$ is a bounded closed
convex subset of a Banach space $X$ and $T:C\rightarrow C$ is nonexpansive,
then the mapping $T_{\lambda }=(1-\lambda )I+\lambda T$ is asymptotically
regular for each $\lambda \in (0,1).$ Edelstein and O'Brien \cite{EdOb}
showed independently that $T_{\lambda }$ is uniformly asymptotically regular
over $x\in C,$ and Goebel and Kirk \cite{GoKi3} proved that the convergence
is even uniform with respect to all nonexpansive mappings from $C$ into $C$.
Other examples of asymptotically regular mappings are given by the result of
Anzai and Ishikawa \cite{AnIs} (see also \cite{XuYa}): if $T$ is an affine
mapping acting on a bounded closed convex subset of a locally convex space $
X $, then $T_{\lambda }=(1-\lambda )I+\lambda T$ is uniformly asymptotically
regular.
In 1987, Lin \cite{Li} constructed a uniformly asymptotically regular
Lipschitz mapping in $\ell _{2}$ without fixed points which extended an
earlier construction of Tingley \cite{Ti}. Subsequently, Maluta, Prus and Wo
\'{s}ko \cite{MaPrWo} proved that there exists a continuous fixed-point free
asymptotically regular mapping defined on any bounded convex subset of a
normed space which is not totally bounded (see also \cite{Er}). For the
fixed-point existence theorems for asymptotically regular mappings we refer
the reader to the papers by T. Dom\'{\i}nguez Benavides, J. G\'{o}rnicki,
M. A. Jap\'{o}n Pineda and H. K. Xu (see \cite{DoJa, DoXu, Go1}).
It was shown in \cite{SeWi} that the set of fixed points of a k-uniformly
Lipschitzian mapping in a uniformly convex space is a retract of its domain
if $k$ is close to $1$. In recent papers \cite{GoT,GoTai,GoN}, J. G\'{o}rnicki
proved several results concerning the structure of fixed-point sets
of asymptotically regular mappings in uniformly convex spaces. In this paper
we continue this work and extend a few of G\'{o}rnicki's results in two
aspects: we consider a more general class of spaces and prove that in some
cases, the fixed-point set $\mathrm{Fix\,}T$ is not only a (continuous)
retract but even a H\"{o}lder continuous retract of the domain. We present
our results in a more general case of a one-parameter nonlinear semigroup.
We also characterise Bynum's coefficients and the Opial modulus in terms of
nets.
\section{Preliminaries}
Let $G$ be an unbounded subset of $[0,\infty )$ such that $t+s,t-s\in G$ for
all $t,s\in G$ with $t>s$ (e.g., $G=[0,\infty )$ or $G=\mathbb{N}$). By a
nonlinear semigroup on $C$ we shall mean a one-parameter family of mappings $
\mathcal{T}=\{T_{t}:t\in G\}$ from $C$ into $C$ such that $
T_{t+s}x=T_{t}\,T_{s}x$ for all $t,s\in G$ and $x\in C$. In particular, we
do not assume in this paper that $\{T_{t}:t\in G\}$ is strongly continuous.
We use a symbol $|T|$ to denote the exact Lipschitz constant of a mapping $
T:C\rightarrow C$, i.e.,
\begin{equation*}
|T|=\inf \{k:\Vert Tx-Ty\Vert \leq k\Vert x-y\Vert \ \text{for\ all}\ x,y\in
C\}.
\end{equation*}
If $T$ is not Lipschitzian we define $|T|=\infty $.
A semigroup $\mathcal{T}=\{T_{t}:t\in G\}$ from $C$ into $C$ is said to be
asymptotically regular if $\lim_{t}\left\Vert T_{t+h}x-T_{t}x\right\Vert =0$
for every $x\in C$ and $h\in G.$
Assume now that $C$ is convex and weakly compact and $\mathcal{T}
=\{T_{t}:t\in G\}$ is a nonlinear semigroup on $C$ such that $s(\mathcal{T}
)=\liminf_{t}|T_{t}|<\infty .$ Choose a sequence $(t_{n})$ of elements in $G$
such that $\lim_{n\rightarrow \infty }t_{n}=\infty $ and $s(\mathcal{T}
)=\lim_{n\rightarrow \infty }\left\vert T_{t_{n}}\right\vert .$ By
Tikhonov's theorem, there exists a pointwise weakly convergent subnet $
(T_{t_{n_{\alpha }}})_{\alpha \in \emph{A}}$ of $(T_{t_{n}}).$ We denote it
briefly by $(T_{t_{\alpha }})_{\alpha \in \emph{A}}.$ For every $x\in C$,
define
\begin{equation}
Lx=w\text{-}\lim_{\alpha }T_{t_{\alpha }}x, \label{Lx}
\end{equation}
i.e., $Lx$ is the weak limit of the net $(T_{t_{\alpha }}x)_{\alpha \in
\emph{A}}$. Notice that $Lx$ belongs to $C$ since $C$ is convex and weakly
compact. The weak lower semicontinuity of the norm implies
\begin{equation*}
\Vert Lx-Ly\Vert \leq \liminf_{\alpha }\Vert T_{t_{\alpha }}x-T_{t_{\alpha
}}y\Vert \leq \limsup_{n\rightarrow \infty }\Vert T_{t_{n}}x-T_{t_{n}}y\Vert
\leq s(\mathcal{T})\Vert x-y\Vert .
\end{equation*}
We formulate the above observation as a separate lemma.
\begin{lemma}
\label{nonexp}Let $C$ be a convex weakly compact subset of a Banach space $X$
and let $\mathcal{T}=\{T_{t}:t\in G\}$ be a semigroup on $C$ such that $s(
\mathcal{T})=\liminf_{t}|T_{t}|<\infty .$ Then the mapping $L:C\rightarrow C$
defined by (\ref{Lx}) is $s(\mathcal{T})$-Lipschitz.
\end{lemma}
We end this section with the following variant of a well known result which
is crucial for our work (see, e.g., \cite[Prop. 1.10]{BeLi}).
\begin{lemma}
\label{holder}Let $(X,d)$ be a complete bounded metric space and let $
L:X\rightarrow X$ be a k-Lipschitz mapping. Suppose there exists $0<\gamma
<1 $ and $c>0$ such that $\Vert L^{n+1}x-L^{n}x\Vert \leq c\gamma ^{n}$ for
every $x\in X$. Then $Rx=\lim_{n\rightarrow \infty }L^{n}x$ is a H\"{o}lder
continuous mapping.
\end{lemma}
\begin{proof}
We may assume that $\diam X<1$. Fix $x\neq y$ in $X$ and notice that for any
$n\in \mathbb{N}$,
\begin{equation*}
d(Rx,Ry)\leq d(Rx,L^{n}x)+d(L^{n}x,L^{n}y)+d(L^{n}y,Ry)\leq 2c\frac{\gamma
^{n}}{1-\gamma }+k^{n}d(x,y).
\end{equation*}
Take $\alpha <1$ such that $k\leq \gamma ^{1-\alpha ^{-1}}$ and put $\gamma
^{n-r}=d(x,y)^{\alpha }$ for some $n\in \mathbb{N}$ and $0<r\leq 1$. Then $
k^{n-1}\leq (\gamma ^{1-\alpha ^{-1}})^{n-r}$ and hence
\begin{equation*}
d(Rx,Ry)\leq 2c\frac{\gamma ^{n-r}}{1-\gamma }+k(\gamma ^{n-r})^{1-\alpha
^{-1}}d(x,y)=(\frac{2c}{1-\gamma }+k)d(x,y)^{\alpha }.
\end{equation*}
\end{proof}
\section{Bynum's coefficients and Opial's modulus in terms of nets}
From now on, $C$ denotes a nonempty convex weakly compact subset of a Banach
space $X$. Let $\mathcal{A}$ be a directed set, $(x_{\alpha })_{\alpha \in
\mathcal{A}}$ a bounded net in $X$, $y\in X$ and write
\begin{align*}
r(y,(x_{\alpha }))& =\limsup_{\alpha }\Vert x_{\alpha }-y\Vert , \\
r(C,(x_{\alpha }))& =\inf \{r(y,(x_{\alpha })):y\in C\}, \\
A(C,(x_{\alpha }))& =\{y\in C:r(y,(x_{\alpha }))=r(C,(x_{\alpha }))\}.
\end{align*}
The number $r(C,(x_{\alpha }))$ and the set $A(C,(x_{\alpha }))$ are called,
respectively, the asymptotic radius and the asymptotic center of $(x_{\alpha
})_{\alpha \in \mathcal{A}}$ relative to $C$. Notice that $A(C,(x_{\alpha
})) $ is nonempty convex and weakly compact. Write
\begin{equation*}
r_{a}(x_{\alpha })=\inf \{\limsup_{\alpha }\Vert x_{\alpha }-y\Vert :y\in
\overline{\conv}(\{x_{\alpha }:\alpha \in \mathcal{A}\})\}
\end{equation*}
and let
\begin{equation*}
\diam_{a}(x_{\alpha })=\inf_{\alpha }\sup_{\beta ,\gamma \geq \alpha }\Vert
x_{\beta }-x_{\gamma }\Vert
\end{equation*}
denote the asymptotic diameter of $(x_{\alpha })$.
The normal structure coefficient $\Nor(X)$ of a Banach space $X$ is defined
by
\begin{equation*}
\Nor(X)=\sup \left\{ k:k\,r(K)\leq \diam K\ \ \text{for\ each\ bounded\
convex\ set}\ K\subset X\right\} ,
\end{equation*}
where $r(K)=\inf_{y\in K}\sup_{x\in K}\Vert x-y\Vert $ is the Chebyshev
radius of $K$ relative to itself. Assuming that $X$ does not have the Schur
property, the weakly convergent sequence coefficient (or Bynum's
coefficient) is given by
\begin{equation*}
\WCS(X)
=\sup \left\{ k:k\,r_{a}(x_{n})\leq \diam_{a}(x_{n})\ \ \text{for\ each\
sequence}\ x_{n}\overset{w}{\longrightarrow }0\right\} ,
\end{equation*}
where $x_{n}\overset{w}{\longrightarrow }0$ means that $(x_{n})$ is weakly
null in $X$ (see \cite{By}). For Schur spaces, we define $WCS(X)=2$.
It was proved independently in \cite{DoLop, Pr, Zh} that
\begin{equation}
WCS(X)=\sup \left\{ k:k\,\limsup_{n}\Vert x_{n}\Vert \leq \diam_{a}(x_{n})\
\text{for each\ sequence}\ x_{n}\overset{w}{\longrightarrow }0\right\}
\label{wcs1}
\end{equation}
and, in \cite{DoLoXu}, that
\begin{equation*}
WCS(X)=\sup \left\{ k:k\,\limsup_{n}\Vert x_{n}\Vert \leq D[(x_{n})]\ \text{
for\ each\ sequence}\ x_{n}\overset{w}{\longrightarrow }0\right\} ,
\end{equation*}
where $D[(x_{n})]=\limsup_{m}\limsup_{n}\left\Vert x_{n}-x_{m}\right\Vert
.
$
Kaczor and Prus \cite{KaPr} initiated a systematic study of assumptions
under which one can replace sequences by nets in a given condition. We
follow the arguments from that paper and use the well known method of
constructing basic sequences attributed to S. Mazur (see \cite{Pe}). Let us
first recall a variant of a classical lemma which can be proved in the same
way as for sequences (see, e.g., \cite[Lemma]{Pe}).
\begin{lemma}
\label{Ma} Let $\{x_{\alpha }\}_{\alpha \in \mathcal{A}}$ be a bounded net
in $X$ weakly converging to $0$ such that $\inf_{\alpha }\Vert x_{\alpha
}\Vert >0$. Then for every $\varepsilon >0$, $\alpha ^{\prime }\in \mathcal{A
}$ and for every finite dimensional subspace $E$ of $X$, there is $\alpha
>\alpha ^{\prime }$ such that
\begin{equation*}
\Vert e+tx_{\alpha }\Vert \geq (1-\varepsilon )\Vert e\Vert
\end{equation*}
for any $e\in E$ and every scalar $t.$
\end{lemma}
Recall that a sequence $(x_{n})$ is basic if and only if there exists a
number $c>0$ such that $\Vert \sum_{i=1}^{q}t_{i}x_{i}\Vert \leq c\Vert
\sum_{i=1}^{p}t_{i}x_{i}\Vert $ for any integers $p>q\geq 1$ and any
sequence of scalars $(t_{i})$. In the proof of the next lemma, based on
Mazur's technique, we follow in part the reasoning given in \cite[Cor. 2.6]
{KaPr}. Set $D[(x_{\alpha })]=\limsup_{\alpha }\limsup_{\beta }\left\Vert
x_{\alpha }-x_{\beta }\right\Vert .$
\begin{lemma}
\label{KaPr} Let $(x_{\alpha })_{\alpha \in \mathcal{A}}$ be a bounded net
in $X$ which converges to $0$ weakly but not in norm. Then there exists an
increasing sequence $(\alpha _{n})$ of elements of $\mathcal{A}$ such that $
\lim_{n}\Vert x_{\alpha _{n}}\Vert =\limsup_{\alpha }\Vert x_{\alpha }\Vert $
, $\diam_{a}(x_{\alpha _{n}})\leq D[(x_{\alpha })]$ and $(x_{\alpha _{n}})$
is a basic sequence.
\end{lemma}
\begin{proof}
Since $(x_{\alpha })_{\alpha \in \mathcal{A}}$ does not converge strongly to
$0$ and $D[(x_{\alpha _{s}})]\leq D[(x_{\alpha })]$ for any subnet $
(x_{\alpha _{s}})_{s\in \mathcal{B}}$ of $(x_{\alpha })_{\alpha \in \mathcal{
A}}$, we can assume, passing to a subnet, that $\inf_{\alpha }\Vert
x_{\alpha }\Vert >0$ and the limit $c=\lim_{\alpha }\Vert x_{\alpha }\Vert $
exists. Write $d=D[(x_{\alpha })]$. Let $(\varepsilon _{n})$ be a sequence
of reals from the interval $(0,1)$ such that $\Pi _{n=1}^{\infty
}(1-\varepsilon _{n})>0$. We shall define the following sequences $(\alpha
_{n})$ and $(\beta _{n})$ by induction.
Let us put $\alpha _{1}<\beta _{1}\in \mathcal{A}$ such that $\left\vert
\Vert x_{\alpha _{1}}\Vert -c\right\vert <1$ and $\sup_{\beta \geq \beta
_{1}}\Vert x_{\alpha _{1}}-x_{\beta }\Vert <d+1$. By the definitions of $c$
and $d$, there exists $\alpha ^{\prime }>\beta _{1}$ such that $\left\vert
\Vert x_{\alpha }\Vert -c\right\vert <\frac{1}{2}$ and $\inf_{\beta ^{\prime
}}\sup_{\beta \geq \beta ^{\prime }}\Vert x_{\alpha }-x_{\beta }\Vert <d+
\frac{1}{2}$ for every $\alpha \geq \alpha ^{\prime }.$ It follows from
Lemma \ref{Ma} that there exists $\alpha _{2}>\alpha ^{\prime }$ such that
\begin{equation*}
\Vert t_{1}x_{\alpha _{1}}+t_{2}x_{\alpha _{2}}\Vert \geq (1-\varepsilon
_{2})\Vert t_{1}x_{\alpha _{1}}\Vert
\end{equation*}
for any scalars $t_{1},t_{2}.$ Furthermore, $\left\vert \Vert x_{\alpha
_{2}}\Vert -c\right\vert <\frac{1}{2},$ and we can find $\beta _{2}>\alpha
_{2}$ such that $\sup_{\beta \geq \beta _{2}}\Vert x_{\alpha _{2}}-x_{\beta
}\Vert <d+\frac{1}{2}.$
Suppose now that we have chosen $\alpha _{1}<\beta _{1}<...<\alpha
_{n}<\beta _{n}$ $(n>1)$ in such a way that $\left\vert \Vert x_{\alpha
_{k}}\Vert -c\right\vert <\frac{1}{k}$, $\sup_{\beta \geq \beta _{k}}\Vert
x_{\alpha _{k}}-x_{\beta }\Vert <d+\frac{1}{k}$ and
\begin{equation*}
(1-\varepsilon _{k})\Vert t_{1}x_{\alpha _{1}}+...+t_{k-1}x_{\alpha
_{k-1}}\Vert \leq \Vert t_{1}x_{\alpha _{1}}+...+t_{k}x_{\alpha _{k}}\Vert
\end{equation*}
for any scalars $t_{1},...,t_{k}$, $k=2,...,n.$ From the definitions of $c$
and $d$, and by Lemma \ref{Ma}, we can find $\beta _{n+1}>\alpha
_{n+1}>\beta _{n}$ such that $\left\vert \Vert x_{\alpha _{n+1}}\Vert
-c\right\vert <\frac{1}{n+1}$, $\sup_{\beta \geq \beta _{n+1}}\Vert
x_{\alpha _{n+1}}-x_{\beta }\Vert \leq d+\frac{1}{n+1}$ and (considering a
subspace $E$ spanned by the elements $x_{\alpha _{1}},...,x_{\alpha _{n}}$
and putting $e=t_{1}x_{\alpha _{1}}+...+t_{n}x_{\alpha _{n}}$),
\begin{equation*}
(1-\varepsilon _{n+1})\Vert t_{1}x_{\alpha _{1}}+...+t_{n}x_{\alpha
_{n}}\Vert \leq \Vert t_{1}x_{\alpha _{1}}+...+t_{n+1}x_{\alpha _{n+1}}\Vert
\end{equation*}
for any scalars $t_{1},...,t_{n+1}$.
Notice that the sequence $(x_{\alpha _{n}})$ defined in this way satisfies $
\lim_{n\rightarrow \infty }\Vert x_{\alpha _{n}}\Vert =c$ and $\diam
_{a}(x_{\alpha _{n}})\leq d$. Furthermore,
\begin{equation*}
\Vert t_{1}x_{\alpha _{1}}+...+t_{p}x_{\alpha _{p}}\Vert \geq \Pi
_{n=q+1}^{p}(1-\varepsilon _{n})\Vert t_{1}x_{\alpha
_{1}}+...+t_{q}x_{\alpha _{q}}\Vert
\end{equation*}
for any integers $p>q\geq 1$ and any sequence of scalars $(t_{i})$. Hence $
(x_{\alpha _{n}})$ is a basic sequence.
\end{proof}
We are now in a position to give a characterization of the coefficient
WCS(X) in terms of nets. The abbreviation \textquotedblleft $\left\{
x_{\alpha }\right\} $ is r.w.c.\textquotedblright\ means that the set $
\left\{ x_{\alpha }:\alpha \in \mathcal{A}\right\} $ is relatively weakly
compact.
\begin{theorem}
\label{Wi1} Let $X$ be a Banach space without the Schur property and write
\begin{align*}
w_{1}& =\sup \left\{ k:k\,r_{a}(x_{\alpha })\leq \diam_{a}(x_{\alpha })\
\text{ for\ each\ net}\ x_{\alpha }\overset{w}{\longrightarrow }0,\text{ }
\left\{ x_{\alpha }\right\} \text{ is r.w.c.}\right\} , \\
w_{2}& =\sup \left\{ k:k\,\limsup_{\alpha }\Vert x_{\alpha }\Vert \leq \diam
_{a}(x_{\alpha })\ \text{for\ each\ net}\ x_{\alpha }\overset{w}{
\longrightarrow }0,\text{ }\left\{ x_{\alpha }\right\} \text{ is r.w.c.}
\right\} , \\
w_{3}& =\sup \left\{ k:k\,\limsup_{\alpha }\Vert x_{\alpha }\Vert \leq
D[(x_{\alpha })]\ \text{for\ each\ net}\ x_{\alpha }\overset{w}{
\longrightarrow }0,\text{ }\left\{ x_{\alpha }\right\} \text{ is r.w.c.}
\right\} .
\end{align*}
Then
\begin{equation*}
\WCS(X)=w_{1}=w_{2}=w_{3}.
\end{equation*}
\end{theorem}
\begin{proof}
Fix $k>w_{3}$ and choose a weakly null net $(x_{\alpha })$ such that the set
$\left\{ x_{\alpha }:\alpha \in \mathcal{A}\right\} $ is relatively weakly
compact and $k\,\limsup_{\alpha }\Vert x_{\alpha }\Vert >D[(x_{\alpha })].$
Then, by Lemma \ref{KaPr}, there exists an increasing sequence $(\alpha
_{n}) $ such that
\begin{equation*}
k\,\lim_{n}\Vert x_{\alpha _{n}}\Vert >D[(x_{\alpha })]\geq \diam
_{a}(x_{\alpha _{n}})
\end{equation*}
and $(x_{\alpha _{n}})$ is a basic sequence. Since the set $\left\{
x_{\alpha }:\alpha \in \mathcal{A}\right\} $ is relatively weakly compact,
we can assume (passing to a subsequence) that $(x_{\alpha _{n}})$ is weakly
convergent. Since it is a basic sequence, its weak limit equals zero. It
follows from (\ref{wcs1}) that $
\WCS(X)
\leq k$ and letting $k$ go to $w_{3}$ we have
\begin{equation*}
\WCS(X)
\leq w_{3}\leq w_{2}\leq w_{1}\leq
\WCS(X)
.
\end{equation*}
\end{proof}
Notice that a similar characterisation holds for the normal structure
coefficient.
\begin{theorem}
For a Banach space $X$,
\begin{equation*}
\Nor(X)=\sup \left\{ k:k\,r_{a}(x_{\alpha })\leq \diam_{a}(x_{\alpha })\
\text{for\ each bounded net}\ (x_{\alpha })\text{ in }X\right\} .
\end{equation*}
\end{theorem}
\begin{proof}
Let
\begin{equation*}
N_{1}=\sup \left\{ k:k\,r_{a}(x_{\alpha })\leq \diam_{a}(x_{\alpha })\ \text{
for\ each bounded net}\ (x_{\alpha })\text{ in }X\right\} .
\end{equation*}
Set $k>N_{1}$ and choose a bounded net $(x_{\alpha })$ such that $
k\,r_{a}(x_{\alpha })>\diam_{a}(x_{\alpha }).$ Fix $y\in \overline{\conv}
(\{x_{\alpha }:\alpha \in \mathcal{A}\})$ and notice that $
k\,\limsup_{\alpha }\Vert x_{\alpha }-y\Vert >\diam_{a}(x_{\alpha }).$ In a
straightforward way, we can choose a sequence $(\alpha _{n})$ such that
\begin{equation*}
k\,\lim_{n}\Vert x_{\alpha _{n}}-y\Vert =k\,\limsup_{\alpha }\Vert x_{\alpha
}-y\Vert >\diam_{a}(x_{\alpha })\geq \diam_{a}(x_{\alpha _{n}}).
\end{equation*}
It follows from \cite[Th. 1]{By} that $\Nor(X)\leq k$ and letting $k$ go to $
N_{1}$ we have $\Nor(X)\leq N_{1}.$ By \cite[Th. 1]{Lim}, $\Nor(X)\geq N_{1}$
and the proof is complete.
\end{proof}
In the next section we shall need a similar characterisation for the Opial
modulus of a Banach space $X,$ defined for each $c\geq 0$ by
\begin{equation*}
r_{X}(c)=\inf \left\{ \liminf_{n\rightarrow \infty }\left\Vert
x_{n}+x\right\Vert -1\right\} ,
\end{equation*}
where the infimum is taken over all $x\in X$ with $\left\Vert x\right\Vert
\geq c$ and all weakly null sequences $(x_{n})$ in $X$ such that $
\liminf_{n\rightarrow \infty }\left\Vert x_{n}\right\Vert \geq 1$ (see \cite
{LiTaXu}). We first prove the following counterpart of Lemma \ref{KaPr}.
\begin{lemma}
\label{KaPr2} Let $(x_{\alpha })_{\alpha \in \mathcal{A}}$ be a bounded net
in $X$ which converges to $0$ weakly but not in norm and $x\in X.$ Then
there exists an increasing sequence $(\alpha _{n})$ of elements of $\mathcal{
A}$ such that $\lim_{n}\Vert x_{\alpha _{n}}+x\Vert =\liminf_{\alpha }\Vert
x_{\alpha }+x\Vert ,$ $\lim_{n}\Vert x_{\alpha _{n}}\Vert \geq
\liminf_{\alpha }\Vert x_{\alpha }\Vert $ and $(x_{\alpha _{n}})$ is a basic
sequence.
\end{lemma}
\begin{proof}
Since $(x_{\alpha })_{\alpha \in \mathcal{A}}$ does not converge strongly to
$0$ and
\begin{equation*}
\liminf_{s}\Vert x_{\alpha _{s}}\Vert \geq \liminf_{\alpha }\Vert x_{\alpha
}\Vert
\end{equation*}
for any subnet $(x_{\alpha _{s}})_{s\in \mathcal{B}}$ of $(x_{\alpha
})_{\alpha \in \mathcal{A}}$, it is sufficient (passing to a subnet) to
consider only the case that $\inf_{\alpha }\Vert x_{\alpha }\Vert >0$ and
the limits $c_{1}=\liminf_{\alpha }\Vert x_{\alpha }+x\Vert $, $
c_{2}=\liminf_{\alpha }\Vert x_{\alpha }\Vert $ exist. Let $(\varepsilon
_{n})$ be a sequence of reals from the interval $(0,1)$ such that $\Pi
_{n=1}^{\infty }(1-\varepsilon _{n})>0$. We shall define the sequence $
(\alpha _{n})$ by induction.
Let us put $\alpha _{1}\in \mathcal{A}$ such that $\left\vert \Vert
x_{\alpha _{1}}+x\Vert -c_{1}\right\vert <1$ and $\left\vert \Vert x_{\alpha
_{1}}\Vert -c_{2}\right\vert <1$. By the definitions of $c_{1}$ and $c_{2}$,
there exists $\alpha ^{\prime }>\alpha _{1}$ such that $\left\vert \Vert
x_{\alpha }+x\Vert -c_{1}\right\vert <\frac{1}{2}$ and $\left\vert \Vert
x_{\alpha }\Vert -c_{2}\right\vert <\frac{1}{2}$ for every $\alpha \geq
\alpha ^{\prime }.$ It follows from Lemma \ref{Ma} that there exists $\alpha
_{2}>\alpha ^{\prime }$ such that
\begin{equation*}
\Vert t_{1}x_{\alpha _{1}}+t_{2}x_{\alpha _{2}}\Vert \geq (1-\varepsilon
_{2})\Vert t_{1}x_{\alpha _{1}}\Vert
\end{equation*}
for any scalars $t_{1},t_{2}.$ We can now proceed analogously to the proof
of Lemma \ref{KaPr} to obtain a basic sequence $(x_{\alpha _{n}})$ with the
desired properties.
\end{proof}
\begin{theorem}
\label{Wi2}For a Banach space $X$ without the Schur property and for $c\geq
0,$
\begin{equation*}
r_{X}(c)=\inf \left\{ \liminf_{\alpha }\left\Vert x_{\alpha }+x\right\Vert
-1\right\} ,
\end{equation*}
where the infimum is taken over all $x\in X$ with $\left\Vert x\right\Vert
\geq c$ and all weakly null nets $(x_{\alpha })$ in $X$ such that $
\liminf_{\alpha }\Vert x_{\alpha }\Vert \geq 1$ and the set $\left\{
x_{\alpha }:\alpha \in \mathcal{A}\right\} $ is relatively weakly compact.
\end{theorem}
\begin{proof}
Let $r_{1}(c)=\inf \left\{ \liminf_{\alpha }\left\Vert x_{\alpha
}+x\right\Vert -1\right\} ,$ where the infimum is taken as above. Fix $c\geq
0$ and take $k>r_{1}(c).$ Then there exist $x\in X$ with $\left\Vert
x\right\Vert \geq c$ and a weakly null net $(x_{\alpha })_{\alpha \in
\mathcal{A}}$ such that $\liminf_{\alpha }\Vert x_{\alpha }\Vert \geq 1,$ $
\left\{ x_{\alpha }:\alpha \in \mathcal{A}\right\} $ is relatively weakly
compact and
\begin{equation*}
\liminf_{\alpha }\left\Vert x_{\alpha }+x\right\Vert -1<k.
\end{equation*}
By Lemma \ref{KaPr2}, there exists an increasing sequence $(\alpha _{n})$ of
elements of $\mathcal{A}$ such that $\lim_{n}\Vert x_{\alpha _{n}}\Vert \geq
1,\lim_{n}\Vert x_{\alpha _{n}}+x\Vert -1<k$ and $(x_{\alpha _{n}})$ is a
basic sequence. Since $\left\{ x_{\alpha }:\alpha \in \mathcal{A}\right\} $
is relatively weakly compact, we can assume (passing to a subsequence) that $
(x_{\alpha _{n}})$ is weakly null. Hence $r_{X}(c)<k$ and since $k$ is an
arbitrary number greater than $r_{1}(c)$, it follows that $r_{X}(c)\leq
r_{1}(c).$ The reverse inequality is obvious.
\end{proof}
\section{Fixed-point sets as H\"{o}lder continuous retracts}
The following lemma may be proved in a similar way to \cite[Th. 7.2 ]{DoJaLo}
.
\begin{lemma}
\label{main}Let $C$ be a nonempty convex weakly compact subset of a Banach
space $X$ and $\mathcal{T}=\{T_{t}:t\in G\}$ an asymptotically regular
semigroup on $C$ such that $s(\mathcal{T})=\lim_{\alpha }\left\vert
T_{t_{\alpha }}\right\vert $ for a pointwise weakly convergent subnet $
(T_{t_{\alpha }})_{\alpha \in \emph{A}}$ of $(T_{t})_{t\in G}.$ Let $
x_{0}\in C$, $x_{m+1}=w$-$\lim_{\alpha }T_{t_{\alpha }}x_{m},m=0,1,...,$ and
\begin{equation*}
B_{m}=\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m+1}\right\Vert .
\end{equation*}
Assume that
\end{lemma}
\begin{enumerate}
\item[(a)] $s(\mathcal{T})<\sqrt{\WCS(X)}$ or,
\item[(b)] $s(\mathcal{T})<1+r_{X}(1).$
\end{enumerate}
Then, there exists $\gamma <1$ such that $B_{m}\leq \gamma
B_{m-1} $ for any $m=1,2,...$.
\begin{proof}
It follows from the asymptotic regularity of $\{T_{t}:t\in G\}$ that
\begin{equation*}
\limsup_{\alpha }\left\Vert T_{t_{\alpha }-l}\,x-y\right\Vert
=\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x-y\right\Vert
\end{equation*}
for any $l\in G$ and $x,y\in C$. Thus
\begin{align*}
& D[(T_{t_{\alpha }}x_{m})]=\limsup_{\beta }\limsup_{\alpha }\left\Vert
T_{t_{\alpha }}x_{m}-T_{t_{\beta }}x_{m}\right\Vert \\
& \ \leq \limsup_{\beta }\left\vert T_{t_{\beta }}\right\vert
\limsup_{\alpha }\left\Vert T_{t_{\alpha }-t_{\beta }}x_{m}-x_{m}\right\Vert
=s(\mathcal{T})\limsup_{\alpha }\left\Vert T_{t_{\alpha
}}x_{m}-x_{m}\right\Vert .
\end{align*}
Hence, from Theorem \ref{Wi1} and from the weak lower semicontinuity of the
norm,
\begin{align*}
B_{m}& \leq \frac{D[(T_{t_{\alpha }}x_{m})]}{\WCS(X)}\leq \frac{s(\mathcal{T}
)}{\WCS(X)}\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m}\right\Vert
\\
& \leq \frac{s(\mathcal{T})}{\WCS(X)}\limsup_{\alpha }\liminf_{\beta
}\left\Vert T_{t_{\alpha }}x_{m}-T_{t_{\beta }}x_{m-1}\right\Vert \\
& \leq \frac{s(\mathcal{T})}{\WCS(X)}\limsup_{\alpha }\left\vert
T_{t_{\alpha }}\right\vert \limsup_{\beta }\left\Vert x_{m}-T_{t_{\beta
}-t_{\alpha }}x_{m-1}\right\Vert =\frac{(s(\mathcal{T}))^{2}}{\WCS(X)}
B_{m-1}.
\end{align*}
This gives (a). For (b), we can use Theorem \ref{Wi2} and proceed
analogously to the proof of \cite[Th. 7.2 ]{DoJaLo} (see also \cite[Th. 5]
{GoN}).
\end{proof}
We are now in a position to prove a qualitative semigroup version of \cite[
Th. 7.2 (a) (b)]{DoJaLo} which is in turn based on the results given in \cite
{DoJa, DoXu} (see also \cite{Ku}). It also extends, in a few directions,
\cite[Th. 5]{GoN}.
\begin{theorem}
\label{Thwcs}Let $C$ be a nonempty convex weakly compact subset of a Banach
space $X$ and $\mathcal{T}=\{T_{t}:t\in G\}$ an asymptotically regular
semigroup on $C.$ Assume that
\end{theorem}
\begin{enumerate}
\item[(a)] $s(\mathcal{T})<\sqrt{\WCS(X)}$ or,
\item[(b)] $s(\mathcal{T})<1+r_{X}(1).$
\end{enumerate}
Then $\mathcal{T}$ has a fixed point in $C$ and $\Fix\mathcal{T}
=\{x\in C:T_{t}x=x,\,t\in G\}$ is a H\"{o}lder continuous retract of $C.$
\begin{proof}
Choose a sequence $(t_{n})$ of elements in $G$ such that $\lim_{n\rightarrow
\infty }t_{n}=\infty $ and $s(\mathcal{T})=\lim_{n\rightarrow \infty
}\left\vert T_{t_{n}}\right\vert .$ Let $(T_{t_{n_{\alpha }}})_{\alpha \in
\emph{A}}$ (denoted briefly by $(T_{t_{\alpha }})_{\alpha \in \emph{A}}$) be
a pointwise weakly convergent subnet of $(T_{t_{n}}).$ Define, for every $
x\in C$,
\begin{equation*}
Lx=w-\lim_{\alpha }T_{t_{\alpha }}x.
\end{equation*}
Fix $x_{0}\in C$ and put $x_{m+1}=Lx_{m},m=0,1,....$ Let $
B_{m}=\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m+1}\right\Vert .$
By Lemma \ref{main}, there exists $\gamma <1$ such that $B_{m}\leq \gamma
B_{m-1}$ for any $m\geq 1.$ Since the norm is weak lower semicontinuous and
the semigroup is asymptotically regular,
\begin{align*}
& \Vert L^{m+1}x_{0}-L^{m}x_{0}\Vert =\left\Vert x_{m+1}-x_{m}\right\Vert
\leq \liminf_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m}\right\Vert \\
& \ \leq \liminf_{\alpha }\liminf_{\beta }\left\Vert T_{t_{\alpha
}}x_{m}-T_{t_{\beta }}x_{m-1}\right\Vert \leq \limsup_{\alpha }\left\vert
T_{t_{\alpha }}\right\vert \limsup_{\beta }\left\Vert x_{m}-T_{t_{\beta
}-t_{\alpha }}x_{m-1}\right\Vert \\
& \ =s(\mathcal{T})B_{m-1}\leq s(\mathcal{T})\gamma ^{m-1}\diam C
\end{align*}
for every $x_{0}\in C$ and $m\geq 1.$ Furthermore, by Lemma \ref{nonexp},
the mapping $L:C\rightarrow C$ is $s(\mathcal{T})$-Lipschitz. It follows
from Lemma \ref{holder} that $Rx=\lim_{n\rightarrow \infty }L^{n}x$ is a H
\"{o}lder continuous mapping on $C$. We show that $R$ is a retraction onto $
\Fix\mathcal{T}.$ It is clear that if $x\in \Fix\mathcal{T},$ then $Rx=x.$
Furthermore, for every $x\in C,m\geq 1$ and $\alpha \in \emph{A},$
\begin{equation*}
\Vert T_{t_{\alpha }}Rx-Rx\Vert \leq \left\Vert T_{t_{\alpha
}}Rx-T_{t_{\alpha }}L^{m}x\right\Vert +\left\Vert T_{t_{\alpha
}}L^{m}x-L^{m+1}x\right\Vert +\left\Vert L^{m+1}x-Rx\right\Vert
\end{equation*}
and hence
\begin{equation*}
\lim_{\alpha }\Vert T_{t_{\alpha }}Rx-Rx\Vert \leq s(\mathcal{T})\left\Vert
Rx-L^{m}x\right\Vert +B_{m}+\left\Vert L^{m+1}x-Rx\right\Vert .
\end{equation*}
Letting $m$ go to infinity, $\limsup_{\alpha }\Vert T_{t_{\alpha
}}Rx-Rx\Vert =0.$ Since $s(\mathcal{T})=\lim_{\beta }\left\vert T_{t_{\beta
}}\right\vert <\infty ,$ there exists $\beta _{0}\in \emph{A}$ such that $
\left\vert T_{t_{\beta }}\right\vert <\infty $ for every $\beta \geq \beta
_{0}.$ Then, the asymptotic regularity of $\mathcal{T}$ implies
\begin{align*}
\Vert T_{t_{\beta }}Rx-Rx\Vert & \leq \left\vert T_{t_{\beta }}\right\vert
\limsup_{\alpha }\Vert Rx-T_{t_{\alpha }}Rx\Vert +\lim_{\alpha }\Vert
T_{t_{\beta }+t_{\alpha }}Rx-T_{t_{\alpha }}Rx\Vert \\
& +\limsup_{\alpha }\Vert T_{t_{\alpha }}Rx-Rx\Vert =0.
\end{align*}
Hence $T_{t_{\beta }}Rx=Rx$ for every $\beta \geq \beta _{0}$ and, from the
asymptotic regularity again,
\begin{equation*}
\Vert T_{t}Rx-Rx\Vert =\lim_{\beta }\left\Vert T_{t+t_{\beta
}}Rx-T_{t_{\beta }}Rx\right\Vert =0
\end{equation*}
for each $t\in G.$ Thus $Rx\in \Fix\mathcal{T}$ for every $x\in C$ and the
proof is complete.
\end{proof}
It is well known that the Opial modulus of a Hilbert space $H,$
\begin{equation*}
r_{H}(c)=\sqrt{1+c^{2}}-1,
\end{equation*}
and the Opial modulus of $\ell _{p},p>1,$
\begin{equation*}
r_{\ell _{p}}(c)=(1+c^{p})^{1/p}-1
\end{equation*}
for all $c\geq 0$ (see \cite{LiTaXu}). The following corollaries are
sharpened versions of \cite[Th. 2.2]{GoT} and \cite[Cor. 8]{GoN}.
\begin{corollary}
Let $C$ be a nonempty bounded closed convex subset of a Hilbert space $H.$
If $\mathcal{T}=\{T_{t}:t\in G\}$ is an asymptotically regular semigroup on $
C$ such that
\begin{equation*}
\liminf_{t}|T_{t}|<\sqrt{2},
\end{equation*}
then $\Fix\mathcal{T}$ is a H\"{o}lder continuous retract of $C.$
\end{corollary}
\begin{corollary}
Let $C$ be a nonempty bounded closed convex subset of $\ell _{p},1<p<\infty
. $ If $\mathcal{T}=\{T_{t}:t\in G\}$ is an asymptotically regular semigroup
on $C$ such that
\begin{equation*}
\liminf_{t}|T_{t}|<2^{1/p},
\end{equation*}
then $\Fix\mathcal{T}$ is a H\"{o}lder continuous retract of $C.$
\end{corollary}
Let $1\leq p,q<\infty .$ Recall that the Bynum space $\ell _{p,q}$ is the
space $\ell _{p}$ endowed with the equivalent norm $\Vert x\Vert
_{p,q}=(\Vert x^{+}\Vert _{p}^{q}+\Vert x^{-}\Vert _{p}^{q})^{1/q},$ where $
x^{+},x^{-}$ denote, respectively, the positive and the negative part of $x.$
If $p>1,$ then
\begin{equation*}
r_{\ell _{p,q}}(c)=\min \{(1+c^{p})^{1/p}-1,(1+c^{q})^{1/q}-1\}
\end{equation*}
for all $c\geq 0$ (see, e.g., \cite{AyDoLo}). The following corollary
extends \cite[Cor. 10]{GoN}.
\begin{corollary}
Let $C$ be a nonempty convex weakly compact subset of $\ell
_{p,q},1<p<\infty ,1\leq q<\infty .$ If $\mathcal{T}=\{T_{t}:t\in G\}$ is an
asymptotically regular semigroup on $C$ such that
\begin{equation*}
\liminf_{t}|T_{t}|<\min \{2^{1/p},2^{1/q}\},
\end{equation*}
then $\Fix\mathcal{T}$ is a H\"{o}lder continuous retract of $C.$
\end{corollary}
Let us now examine the case of $p$-uniformly convex spaces. Recall that a
Banach space $X$ is $p$-uniformly convex if $\inf_{\varepsilon >0}\delta
(\varepsilon )\varepsilon ^{-p}>0,$ where $\delta $ denotes the modulus of
uniform convexity of $X.$ If $X$ is $p$-uniformly convex, then (see \cite
{Xu91})
\begin{equation}
\left\Vert \lambda x+(1-\lambda )y\right\Vert ^{p}\leq \lambda \left\Vert
x\right\Vert ^{p}+(1-\lambda )\left\Vert y\right\Vert
^{p}-c_{p}W_{p}(\lambda )\left\Vert x-y\right\Vert ^{p} \label{ineq_Xu}
\end{equation}
for some $c_{p}>0$ and every $x,y\in X,0\leq \lambda \leq 1,$ where $
W_{p}(\lambda )=\lambda (1-\lambda )^{p}+\lambda ^{p}(1-\lambda ).$ A Banach
space $X$ satisfies the Opial property if
\begin{equation*}
\liminf_{n\rightarrow \infty }\left\Vert x_{n}-x\right\Vert
<\liminf_{n\rightarrow \infty }\left\Vert x_{n}-y\right\Vert
\end{equation*}
for every sequence $x_{n}\overset{w}{\longrightarrow }x$ and $y\neq x.$
The following theorem is an extension of \cite[Th. 7]{GoN}, and a partial
extension of \cite[Th. 9]{GoTai}.
\begin{theorem}
\label{Pconvex}Let $C$ be a nonempty bounded closed convex subset of a $p$
-uniformly convex Banach space $X$ with the Opial property and $\mathcal{T}
=\{T_{t}:t\in G\}$ an asymptotically regular semigroup on $C$ such that
\begin{equation*}
\liminf_{t}|T_{t}|<\max \left\{ (1+c_{p})^{1/p},\left( \frac{1}{2}\left(
1+(1+4c_{p}WCS(X)^{p})^{1/2}\right) \right) ^{1/p}\right\} .
\end{equation*}
Then $\mathcal{T}$ has a fixed point in $C$ and $\Fix\mathcal{T}$ is a H\"{o}
lder continuous retract of $C.$
\end{theorem}
\begin{proof}
Choose a sequence $(t_{n})$ of elements in $G,$ $\lim_{n\rightarrow \infty
}t_{n}=\infty ,$ such that $s(\mathcal{T})=\lim_{n\rightarrow \infty
}\left\vert T_{t_{n}}\right\vert $ and let $(t_{\alpha })_{\alpha \in \emph{A
}}$ denotes a pointwise weakly convergent subnet of $(t_{n}).$ Define, for
every $x\in C$,
\begin{equation*}
Lx=w\text{-}\lim_{\alpha }T_{t_{\alpha }}x.
\end{equation*}
Fix $x_{0}\in C$ and put $x_{m+1}=Lx_{m},m\geq 0.$ Let $B_{m}=\limsup_{
\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m+1}\right\Vert .$ Since $X$
satisfies the Opial property, it follows from \cite[Prop. 2.9]{KaPr} that
\begin{equation*}
\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m+1}\right\Vert
<\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-y\right\Vert
\end{equation*}
for every $y\neq x_{m+1},$ i.e., $x_{m+1}$ is the unique point in the
asymptotic center $A(C,(T_{t_{\alpha }}x_{m})),m\geq 0.$ Applying (\ref
{ineq_Xu}) yields
\begin{align*}
& c_{p}W_{p}(\lambda )\left\Vert x_{m}-T_{t_{\alpha }}x_{m}\right\Vert
^{p}+\left\Vert \lambda x_{m}+(1-\lambda )T_{t_{\alpha }}x_{m}-T_{t_{\beta
}}x_{m-1}\right\Vert ^{p} \\
& \leq \lambda \left\Vert x_{m}-T_{t_{\beta }}x_{m-1}\right\Vert
^{p}+(1-\lambda )\left\Vert T_{t_{\alpha }}x_{m}-T_{t_{\beta
}}x_{m-1}\right\Vert ^{p}.
\end{align*}
for every $\alpha ,\beta \in \emph{A},0<\lambda <1,m>0.$ Following \cite[Th.
9]{GoTai} (see also \cite{Xu90}) and using the asymptotic regularity of $
\mathcal{T},$ we obtain
\begin{equation}
\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m}\right\Vert ^{p}\leq
\frac{s(\mathcal{T})^{p}-1}{c_{p}}(B_{m-1})^{p}. \label{in1}
\end{equation}
for any $m>0.$ By Theorem \ref{Wi1} and the weak lower semicontinuity of the
norm, we have
\begin{equation}
B_{m}\leq \frac{D[(T_{t_{\alpha }}x_{m})]}{\WCS(X)}\leq \frac{s(\mathcal{T})
}{\WCS(X)}\limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m}\right\Vert .
\label{in2}
\end{equation}
Furthermore, by the Opial property,
\begin{equation}
B_{m}\leq \limsup_{\alpha }\left\Vert T_{t_{\alpha }}x_{m}-x_{m}\right\Vert .
\label{in3}
\end{equation}
Combining (\ref{in1}) with (\ref{in2}) and (\ref{in3}) we see that
\begin{equation*}
(B_{m})^{p}=\limsup_{\alpha }\left\Vert T_{t_{\alpha
}}x_{m}-x_{m+1}\right\Vert ^{p}\leq \gamma ^{p}(B_{m-1})^{p},
\end{equation*}
where
\begin{equation*}
\gamma ^{p}=\max \left\{ \frac{s(\mathcal{T})^{p}-1}{c_{p}},\frac{s(\mathcal{
T})^{p}-1}{c_{p}}\left( \frac{s(\mathcal{T})}{\WCS(X)}\right) ^{p}\right\}
<1,
\end{equation*}
by assumption. Hence $B_{m}\leq \gamma B_{m-1}$ for every $m\geq 1$ and,
proceeding in the same way as in the proof of Theorem \ref{Thwcs}, we
conclude that $\Fix\mathcal{T}$ is a nonempty H\"{o}lder continuous retract
of $C.$
\end{proof}
\end{document} |
\begin{document}
\title{Bessel potentials and optimal Hardy and Hardy-Rellich inequalities}
\author{ Nassif Ghoussoub\thanks{Partially supported by a grant
from the Natural Sciences and Engineering Research Council of Canada. } \quad and \quad Amir
Moradifam \thanks{Partially supported by a UBC Graduate Fellowship. }
\\
\small Department of Mathematics,
\small University of British Columbia, \\
\small Vancouver BC Canada V6T 1Z2 \\
\small {\tt nassif@math.ubc.ca} \\
\small {\tt a.moradi@math.ubc.ca}
\\
}
\maketitle
\begin{abstract} We give necessary and sufficient conditions on a pair of positive radial
functions $V$ and $W$ on a ball $B$ of radius $R$ in $R^{n}$, $n \geq 1$, so that the
following inequalities hold for all $u \in C_{0}^{\infty}(B)$:
\begin{equation*} \lambdabel{one}
\hbox{$\int_{B}V(x)|\nabla u |^{2}dx \geq \int_{B} W(x)u^2dx$, }
\end{equation*}
and
\begin{equation*} \lambdabel{two}
\hbox{$\int_{B}V(x)|\Delta u |^{2}dx \geq \int_{B} W(x)|\nabla
u|^{2}dx+(n-1)\int_{B}(\frac{V(x)}{|x|^2}-\frac{V_r(|x|)}{|x|})|\nabla u|^2dx$.}
\end{equation*}
This characterization makes a very useful connection between Hardy-type inequalities and the oscillatory behaviour of certain ordinary differential equations, and helps in the identification of a large number of such couples $(V, W)$ -- that we call Bessel pairs -- as well
as the best constants in the corresponding inequalities. This allows us to improve, extend, and
unify many results --old and new-- about Hardy and Hardy-Rellich type inequalities, such as those obtained by
Caffarelli-Kohn-Nirenberg \cite{CKN}, Brezis-V\'{a}zquez \cite{BV}, Wang-Willem \cite{WW}, Adimurthi-Chaudhuri-Ramaswamy \cite{ACR}, Filippas-Tertikas \cite{FT},
Adimurthi-Grossi -Santra \cite{AGS}, Tertikas-Zographopoulos \cite{TZ}, and
Blanchet-Bonforte-Dolbeault-Grillo-Vasquez \cite{BBDGV}.
\end{abstract}
\section{Introduction}
Ever since Br\'ezis-Vazquez \cite{BV} showed that Hardy's inequality can be improved once
restricted to a smooth bounded domain $\Omega$ in $\R^n$, there was a flurry of activity about possible
improvements of the following type:
\begin{equation}\lambdabel{gen-hardy.0}
\hbox{If $n\geq 3$\quad then \quad $\int_{\Omega}|\nabla u |^{2}dx - ( \frac{n-2}{2})^{2}
\int_{\Omega}\frac{|u|^{2}}{|x|^{2}}dx\geq \int_{\Omega} V(x)|u|^{2}dx$ \quad for all $u \in
H^{1}_{0}(\Omega)$,}
\end{equation}
as well as its fourth order counterpart
\begin{equation}\lambdabel{gen-rellich.0}
\hbox{If $n\geq 5$\quad then \quad $\int_{\Omega}|\Delta u|^{2}dx -
\frac{n^2(n-4)^2}{16} \int_{\Omega}\frac{u^{2}}{|x|^{4}}dx\geq
\int_{\Omega} W(x)u^{2}dx$ \quad for $u \in H^{2}(\Omega)\cap H_{0}^{1}(\Omega)$},
\end{equation}
where $V, W$ are certain explicit radially symmetric potentials of order lower than
$\frac{1}{r^2}$ (for $V$) and $\frac{1}{r^4}$ (for $W$).
In this paper, we provide an approach that completes, simplifies and improves most related
results to-date regarding the Laplacian on Euclidean space as well as its powers. We also
establish new inequalities some of which cover critical dimensions such as $n=2$ for inequality
(\ref{gen-hardy.0}) and $n=4$ for (\ref{gen-rellich.0}).
\quad We start -- in section 2 -- by giving necessary and sufficient conditions on positive
radial functions $V$ and $W$ on a ball $B$ in $R^{n}$, so that the following inequality
holds for some $c>0$:
\begin{equation}\lambdabel{most.general.hardy}
\hbox{$\int_{B}V(x)|\nabla u |^{2}dx \geq c\int_{B} W(x)u^2dx$ for all $u \in
C_{0}^{\infty}(B)$.}
\end{equation}
Assuming that the ball $B$ has radius $R$ and that
$\int^{R}_{0}\frac{1}{r^{n-1}V(r)}dr=+\infty$, the condition is simply that the ordinary
differential equation
\begin{equation*}
\hbox{ $({\rm B}_{V,cW})$ \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad $y''(r)+(\frac{n-1}{r}+\frac{V_r(r)}{V(r)})y'(r)+\frac{cW(r)}{V(r)}y(r)=0$ \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad}
\end{equation*}
has a positive solution on the interval $(0, R)$. We shall call such a couple $(V, W)$ a
{\it Bessel pair on $(0, R)$}. The {\it weight} of such a pair is then defined as
\begin{equation}
\hbox{$\beta (V, W; R)=\sup \big\{ c;\, ({\rm B}_{V,cW})$ has a positive solution on $(0, R)\big\} $.}
\end{equation}
This characterization makes an important connection between Hardy-type inequalities and the oscillatory behaviour of the above equations. For example, by
using recent results on ordinary differential equations, we can then infer that an integral
condition on $V, W$ of the form
\begin{equation}
\limsup_{r\to 0}r^{2(n-1)}V(r)W(r)\big( \int^{R}_{r}\frac{d\tau}{\tau^{n-1}V(\tau)}\big)^2<
\frac{1}{4}
\end{equation}
is sufficient (and ``almost necessary") for $(V, W)$ to be a Bessel pair on a ball of sufficiently small radius $\rho$.
\quad Applied in particular, to a pair $(V, \frac{1}{r^2}V)$ where the function
$\frac{rV'(r)}{V(r)}$ is assumed to decrease to $-\lambdambda$ on $(0, R)$, we obtain the following
extension of Hardy's inequality:
If $\lambdambda \leq n-2$, then
\begin{equation}\lambdabel{v-hardy}
\hbox{$\int_{B}V(x)|\nabla u|^{2}dx\geq
(\frac{n-\lambdambda-2}{2})^2\int_{B}V(x)\frac{u^{2}}{|x|^2}dx$
\quad for all $u \in C^{\infty}_{0}(B)$}
\end{equation}
and $(\frac{n-\lambdambda-2}{2})^2$ is the best constant. The case where $V(x)\equiv 1$ is obviously the classical Hardy inequality and when $V(x)=|x|^{-2a}$ for $-\infty <a < \frac{n-2}{2}$, this is a particular case of the Caffarelli-Kohn-Nirenberg inequality. One can however apply the above criterium to obtain new inequalities such as the following: For $a,b> 0$
\begin{itemize}
\item If $\alphapha\beta>0$ and $m\leq \frac{n-2}{2}$,
then for all $u \in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V1}
\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m}}|\nabla u|^2dx\geq
(\frac{n-2m-2}{2})^2\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m+2}}u^2dx,
\end{equation}
and $(\frac{n-2m-2}{2})^2$ is the best constant in the inequality.
\item If $\alphapha \beta<0$ and $2m-\alphapha \beta \leq n-2$, then for all $u \in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V2}
\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m}}|\nabla u|^2dx\geq (\frac{n-2m+\alphapha
\beta-2}{2})^2\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m+2}}u^2dx,
\end{equation}
and $(\frac{n-2m+\alphapha
\beta-2}{2})^2$ is the best constant in the inequality.
\end{itemize}
We can also extend some of the recent results of Blanchet-Bonforte-Dolbeault-Grillo-Vasquez \cite{BBDGV}.
\begin{itemize}
\item If $\alphapha \beta <0$ and $-\alphapha \beta \leq n-2$,
then for all $u \in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V3}
\int_{\R^n}(a+b|x|^{\alphapha})^{\beta}|\nabla u|^2dx\geq b^{\frac{2}{\alphapha}}(\frac{n-\alphapha
\beta-2}{2})^2\int_{\R^n}(a+b|x|^{\alphapha})^{\beta-\frac{2}{\alphapha}}u^2dx,
\end{equation}
and $b^{\frac{2}{\alphapha}}(\frac{n-\alphapha
\beta-2}{2})^2$ is the best constant in the inequality.
\item If $\alphapha \beta >0$,
and
$n\geq 2$, then there exists a constant $C>0$ such that for all $u
\in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V4}
\int_{\R^n}(a+b|x|^{\alphapha})^{\beta}|\nabla u|^2dx\geq C
\int_{\R^n}(a+b|x|^{\alphapha})^{\beta-\frac{2}{\alphapha}}u^2dx.
\end{equation}
Moreover, $b^{\frac{2}{\alphapha}}(\frac{n-2}{2})^2\leq C\leq b^{\frac{2}{\alphapha}}(\frac{n+\alphapha
\beta-2}{2})^2$.
\end{itemize}
On the other hand, by considering
the pair
\[
\hbox{$V(x)=|x|^{-2a}$\quad and \quad $W_{a,c}(x)= (\frac{n-2a-2}{2})^2|x|^{-2a-2}
+c|x|^{-2a}W(x)$}
\]
we get the following improvement of the Caffarelli-Kohn-Nirenberg inequalities:
\begin{equation}\lambdabel{CKN}
\int_{B}|x|^{-2a}|\nabla u |^{2}dx - (\frac{n-2a-2}{2})^2\int_{B}|x|^{-2a-2}u^2 dx\geq
c\int_{B}|x|^{-2a} W(x)u^2dx \quad \hbox{for all $u \in C_{0}^{\infty}(B)$}
\end{equation}
if and only if the following ODE
\begin{equation*}\lambdabel{ODE111}
\hbox{ $({\rm B}_{cW})$ \quad \quad \quad \quad\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad $y''+\frac{1}{r}y'+c W(r)y=0$ \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad\quad \quad \quad \quad}
\end{equation*}
has a positive solution on $(0, R)$. Such a function $W$ will be called a {\it Bessel potential} on $(0, R)$. This type of characterization was established recently by the authors \cite{GM1} in the case
where $a=0$, yielding in particular the recent improvements of Hardy's inequalities (on bounded domains) established by
Brezis-V\'{a}zquez \cite{BV}, Adimurthi et al. \cite{ACR}, and Filippas-Tertikas \cite{FT}.
Our results here include in addition those proved by Wang-Willem \cite{WW} in the case where $a<
\frac{n-2}{2}$ and $W(r)=\frac{1}{r^2(\ln\frac{R}{r})^2}$, but also cover the previously unknown limiting case corresponding to $a=
\frac{n-2}{2}$ as well as the critical dimension $n=2$.
More importantly, we establish here that Bessel pairs lead to a myriad of optimal Hardy-Rellich
inequalities of arbitrary high order, therefore extending and completing a series of new results
by Adimurthi et al. \cite{AGS}, Tertikas-Zographopoulos \cite{TZ} and others. They are mostly
based on the following theorem which summarizes the main thrust of this paper.
\begin{theorem} Let $V$ and $W$ be positive radial $C^1$-functions on $B\backslash \{0\}$,
where $B$ is a ball centered at zero with radius $R$ in $\R^n$ ($n \geq 1$) such that
$\int^{R}_{0}\frac{1}{r^{n-1}V(r)}dr=+\infty$ and $\int^{R}_{0}r^{n-1}V(r)dr<+\infty$. The
following statements are then equivalent:
\begin{enumerate}
\item $(V, W)$ is a Bessel pair on $(0, R)$ and $\beta (V, W; R) \geq 1$.
\item $ \int_{B}V(x)|\nabla u |^{2}dx \geq \int_{B} W(x)u^2dx$ for all $u \in C_{0}^{\infty}(B)$.
\item If $\lim_{r \rightarrow 0}r^{\alphapha}V(r)=0$ for some $\alphapha< n-2$, then the above are
equivalent to
\[
\hbox{$\int_{B}V(x)|\Delta u |^{2}dx \geq \int_{B} W(x)|\nabla
u|^{2}dx+(n-1)\int_{B}(\frac{V(x)}{|x|^2}-\frac{V_r(|x|)}{|x|})|\nabla u|^2dx$ \quad for all
radial $u \in
C^{\infty}_{0,r}(B)$.}
\]
\item If in addition, $W(r)-\frac{2V(r)}{r^2}+\frac{2V_r(r)}{r}-V_{rr}(r)\geq 0$ on $(0, R)$,
then the above are equivalent to
\[
\hbox{$\int_{B}V(x)|\Delta u |^{2}dx \geq \int_{B} W(x)|\nabla
u|^{2}dx+(n-1)\int_{B}(\frac{V(x)}{|x|^2}-\frac{V_r(|x|)}{|x|})|\nabla u|^2dx$ \quad for all $u
\in
C^{\infty}_{0}(B)$.}
\]
\end{enumerate}
\end{theorem}
In other words, one can obtain as many Hardy and Hardy-Rellich type inequalities as one can
construct Bessel pairs on $(0, R)$. The relevance of the above result stems from the fact that
there are plenty of such pairs that are easily identifiable. Indeed, even the class of {\it Bessel
potentials} --equivalently those $W$ such that $\left(1, (\frac{n-2}{2})^2|x|^{-2} +cW(x)\right)$ is a Bessel pair-- is quite rich and contains
several important potentials. Here are some of the most relevant properties --to be established
in an appendix-- of the class of $C^1$ Bessel potentials $W$ on $(0, R)$, that we shall denote
by ${\cal B}(0, R)$.
First, the class is a closed convex {\it solid} subset of $C^1(0, R)$, that is
if $W\in {\cal B}(0, R)$ and $0\leq V\leq W$, then $V\in {\cal B}(0, R)$. The "weight" of each
$W\in {\cal B}(R)$, that is
\begin{equation}
\hbox{$\beta (W; R)=\sup\big\{c>0;\, (B_{cW})$ has a positive solution on $(0, R)\big\},$}
\end{equation}
will be an important ingredient for computing the best constants in corresponding functional
inequalities. Here are some basic examples of Bessel potentials and their corresponding weights.
\begin{itemize}
\item $ W \equiv 0$ is a Bessel potential on $(0, R)$ for any $R>0$.
\item $ W \equiv 1$ is a Bessel potential on $(0, R)$ for any $R>0$, and $\beta (1;
R)=\frac{z_0^2}{R^2}$ where $z_{0}=2.4048...$ is the first zero of the Bessel function $J_0$.
\item If $a<2$, then there exists $R_a>0$ such that $W (r)=r^{-a}$ is a Bessel potential on $(
0, R_a)$.
\item For $k\geq 1$, $R>0$ and $\rho=R( e^{e^{e^{.^{.^{e((k-1)-times)}}}}} )$, let $ W_{k,
\rho} (r)=\Sigma_{j=1}^k\frac{1}{r^{2}}\big(\prod^{j}_{i=1}log^{(i)}\frac{\rho}{r}\big)^{-2}$
where the functions $log^{(i)}$ are defined iteratively as follows: $log^{(1)}(.)=log(.)$ and
for $k\geq 2$, $log^{(k)}(.)=log(log^{(k-1)}(.))$. $ W_{k, \rho}$ is then a Bessel potential on
$(0, R)$ with $\beta (W_{k, \rho}; R)=\frac{1}{4}$.
\item For $k\geq 1$, $R>0$ and $\rho\geq R$, define $\tilde W_{k; \rho}
(r)=\Sigma_{j=1}^k\frac{1}{r^{2}}X^{2}_{1}(\frac{r}{\rho})X^{2}_{2}(\frac{r}{\rho}) \ldots
X^{2}_{j-1}(\frac{r}{\rho})X^{2}_{j}(\frac{r}{\rho})$ where the functions $X_i$ are defined
iteratively as follows:
$X_{1}(t)=(1-\log(t))^{-1}$ and for $k\geq 2$, $ X_{k}(t)=X_{1}(X_{k-1}(t))$. Then again $ \tilde
W_{k, \rho}$ is a Bessel potential on $(0, R)$ with $\beta (\tilde W_{k, \rho}; R)=\frac{1}{4}$.
\item More generally, if $W$ is any positive function on $\R$ such that
$\liminf\limits_{r\rightarrow 0} \ln(r)\int^{r}_{0} sW(s)ds>-\infty$,
then for every $R>0$, there exists $\alphapha:=\alphapha(R)>0$ such that $W_\alphapha(x):=\alphapha^2W(\alphapha
x)$ is a Bessel potential on $(0, R)$.
\end{itemize}
What is remarkable is that the class of Bessel potentials $W$ is also the one that leads to
optimal improvements for fourth order inequalities (in dimension $n\geq 3$) of the following type:
\begin{equation}\lambdabel{gen-second.hardy}
\hbox{$ \int_{B}|\Delta u
|^{2}dx - C(n) \int_{B}\frac{|\nabla u|^{2}}{|x|^{2}}dx\geq c(W, R)\int_{B}
W(x)|\nabla u|^{2}dx$ \quad for all $u \in H^{2}_{0}(B)$,}
\end{equation}
where $C(3)=\frac{25}{36}$, $C(4)=3$ and $C(n)=\frac{n^2}{4}$ for $n\geq 5$.
The case when $W\equiv \tilde W_{k, \rho}$ and $n\geq 5$ was recently established by
Tertikas-Zographopoulos \cite{TZ}. Note that $W$ can be chosen to be any one of the examples of
Bessel potentials listed above. Moreover, both $C(n)$ and the weight $\beta (W; R)$ are the best
constants in the above inequality.
Appropriate combinations of (\ref{most.general.hardy}) and (\ref{gen-second.hardy}) then lead
to a myriad of Hardy-Rellich inequalities in dimension $n\geq 4$. For example, if $W$ is a
Bessel potential on $(0, R)$ such that the function $r\frac{W_r(r)}{W(r)}$ decreases to
$-\lambdambda$, and if $\lambdambda \leq n-2$, then we have for all $u \in C^{\infty}_{0}(B_{R})$
\begin{equation} \lambdabel{Rellich.1}
\int_{B}|\Delta u|^{2}dx -
\frac{n^2(n-4)^2}{16}\int_{B}\frac{u^2}{|x|^4}dx\geq
\big(\frac{n^2}{4}+\frac{(n-\lambdambda-2)^2}{4}\big)
\beta (W; R)\int_{B}\frac{W(x)}{|x|^2}u^2 dx.
\end{equation}
By applying (\ref{Rellich.1}) to the various examples of Bessel functions listed above, one
improves in many ways the recent results of Adimurthi et al. \cite{AGS} and those by
Tertikas-Zographopoulos \cite{TZ}. Moreover, besides covering the critical dimension $n=4$, we
also establish that the best constant is $(1+\frac{n(n-4)}{8})$ for all the potentials $W_k$ and
$\tilde W_k$ defined above. For example we have for $n\geq 4$,
\begin{equation}
\int_{B}|\Delta u(x) |^{2}dx \geq \frac{n^2(n-4)^2}{16}\int_{B}\frac{u^2}{|x|^4}
dx+(1+\frac{n(n-4)}{8})\sum^{k}_{j=1}\int_{B}\frac{u^2}{|x|^4}\big(
\prod^{j}_{i=1}log^{(i)}\frac{\rho}{|x|}\big)^{-2}dx.
\end{equation}
More generally, we show that for any $m<\frac{n-2}{2}$, and any $W$ Bessel potential on a ball
$B_{R}\subset R^n$ of radius $R$, the following inequality holds for all $u \in C^{\infty}_{0}(B_{R})$
\begin{equation}\lambdabel{gm-hr.00}
\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}\geq a_{n,m}\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx+\beta(W; R)\int_{B_{R}}W(x)\frac{|\nabla u|^2}{|x|^{2m}}dx,
\end{equation}
where $a_{m,n}$ and $\beta(W; R)$ are best constants that we compute in the appendices for all
$m$ and $n$ and for many Bessel potentials $W$. Worth noting is Corollary \ref{radial} where we
show that inequality (\ref{gm-hr.00}) restricted to radial functions in $C^{\infty}_{0}(B_{R})$
holds with a best constant equal to $(\frac{n+2m}{2})^2$, but that $a_{n,m}$ can however be
strictly smaller than $(\frac{n+2m}{2})^2$ in the non-radial case. These results improve
considerably Theorem 1.7, Theorem 1.8, and Theorem 6.4 in \cite{TZ}.
We also establish a more general version of equation (\ref{Rellich.1}). Assuming again that
$\frac{rW'(r)}{W(r)}$ decreases to $-\lambdambda$ on $(0, R)$, and provided $m\leq \frac{n-4}{2}$
and $\lambdambda \leq n-2m-2$, we then have for all $u \in C^{\infty}_{0}(B_R)$,
\begin{eqnarray}\lambdabel{ex-gen-hr}
\int_{B_R}\frac{|\Delta u|^{2}}{|x|^{2m}}dx &\geq& \beta_{n,m}\int_{B_R}\frac{u^2}{|x|^{2m+4}}dx
\nonumber\\
&&\quad+\beta (W; R)(\frac{(n+2m)^2}{4}+\frac{(n-2
m-\lambdambda-2)^2}{4})
\int_{B_R}\frac{W(x)}{|x|^{2m+2}}u^2 dx,
\end{eqnarray}
where again the best constants $\beta_{n,m}$ are computed in section 3. This completes the
results in Theorem 1.6 of \cite{TZ}, where the inequality is established for $n\geq 5$, $0\leq m < \frac{n-4}{2}$, and the particular potential ${\tilde W}_{k,\rho}$.
Another inequality that relates the Hessian integral to the Dirichlet energy is the
following:
Assuming $-1<m\leq \frac{n-4}{2}$ and $W$ is a Bessel potential on a ball $B$ of radius $R$ in
$R^n$, then for all $u \in C^{\infty}_{0}(B)$,
\begin{eqnarray}\lambdabel{hrs}
\int_{B}\frac{|\Delta u|^{2}}{|x|^{2m}}dx-
\frac{(n+2m)^{2}(n-2m-4)^2}{16}\int_{B}\frac{u^2}{|x|^{2m+4}}dx&\geq&
\beta (W; R)\frac{(n+2m)^2}{4}
\int_{B}\frac{W(x)}{|x|^{2m+2}}u^2 dx \quad \quad \quad \nonumber \\
&&+ \beta(|x|^{2m}; R)||u||_{H^1_0}.
\end{eqnarray}
This improves considerably Theorem A.2. in \cite{AGS} where it is established -- for $m=0$ and without best constants -- with the potential $W_{1, \rho}$
in dimension $n\geq 5$, and the potential $W_{2, \rho}$ when $n=4$.
Finally, we establish several higher order Rellich inequalities for integrals of the form $\int_{B_{R}}\frac{|\Delta^{m}u|^2}{|x|^{2k}}dx$, improving in many ways several recent results in \cite{TZ}.
The approach can also be used to improve the recent results of
Liskevich-Lyachova-Moroz \cite{LLM} on exterior domains and will be developed in a forthcoming paper.
\section{General Hardy Inequalities}
Here is the main result of this section.
\begin{theorem} \lambdabel{main} Let $V$ and $W$ be positive radial $C^1$-functions on
$B_R\backslash \{0\}$, where $B_R$ is a ball centered at
zero with radius $R$ ($0<T\leq +\infty$) in $\R^n$ ($n \geq 1$). Assume that $\int^{a}_{0}\frac{1}{r^{n-1}V(r)}dr=+\infty$ and
$\int_{0}^{a}r^{n-1}V(r)dr<\infty$ for some $0<a<R$. Then the following two statements are equivalent:
\begin{enumerate}
\item The ordinary differential equation
\[
\hbox{ $({\rm B}_{V,W})$ \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad
\quad \quad \quad $y''(r)+(\frac{n-1}{r}+\frac{V_r(r)}{V(r)})y'(r)+\frac{W(r)}{V(r)}y(r)=0$ \quad
\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad
\quad \quad \quad}
\]
has a positive solution on the interval $(0, R]$ (possibly with $\varepsilonphi(R)=0)$.
\item For all $u \in C_{0}^{\infty}(B_R)$
\begin{equation*}\lambdabel{2dim-in}
\hbox{ $({\rm H}_{V,W})$ \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad
\quad $\int_{B_R}V(x)|\nabla u(x) |^{2}dx \geq \int_{B_R} W(x)u^2dx$.\quad \quad \quad \quad \quad
\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad}
\end{equation*}
\end{enumerate}
\end{theorem}
Before proceeding with the proofs, we note the following immediate but useful corollary.
\begin{corollary} Let $V$ and $W$ be positive radial $C^1$-functions on $B\backslash \{0\}$,
where $B$ is a ball with radius $R$ in $\R^n$ ($n \geq 1$) and centered at zero, such that
$\int^{R}_{0}\frac{1}{r^{n-1}V(r)}dr=+\infty$ and $\int^{R}_{0}r^{n-1}V(r)dr<\infty$. Then $(V,
W)$ is a Bessel pair on $(0, R)$ if and only if for all $u \in C_{0}^{\infty}(B_R)$, we have
\[
\int_{B_R}V(x)|\nabla u |^{2}dx \geq \beta (V, W; R) \int_{B_R} W(x)u^2dx,
\]
with $\beta (V, W; R)$ being the best constant.
\end{corollary}
For the proof of Theorem \ref{main}, we shall need the following lemmas.
\begin{lemma}\lambdabel{main-lem} Let $\Omega$ be a smooth bounded domain in $R^n$ with $n\geq 1$ and
let $\varepsilonphi \in C^{1}(0,R:=\sup_{x \in \partial \Omega}|x|)$ be a positive solution of the
ordinary differential equation
\begin{equation}\lambdabel{ODEv}
y''+(\frac{n-1}{r}+\frac{V_r(r)}{V(r)})y'+\frac{W(r)}{V(r)}y=0,
\end{equation}
on $(0,R)$ for some $V(r),W(r)\geq 0$ where $\int^{R}_{0}\frac{1}{r^{n-1}V(r)}dr=+\infty$ and
$\int^{R}_{0}r^{n-1}V(r)dr<\infty$. Setting $\psi(x)=\frac{u(x)}{\varepsilonphi(|x|)}$ for any $u \in
C^{\infty}_{0}(\Omega)$, we then have the following properties:
\begin{enumerate}
\item $\int_{0}^{R}r^{n-1}V(r)(\frac{\varepsilonphi'(r)}{\varepsilonphi(r)})^2dr<\infty$ and $\lim_{r\rightarrow
0}r^{n-1}V(r)\frac{\varepsilonphi'(r)}{\varepsilonphi(r)}=0.$
\item $\int_{\Omega}V(|x|)(\varepsilonphi'(|x|))^2\psi^2(x)dx<\infty.$
\item $\int_{\Omega}V(|x|)\varepsilonphi^2(|x|)|\nabla \psi|^2(x)dx<\infty.$
\item $|\int_{\Omega}V(|x|)\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x)\frac{x}{|x|}.\nabla
\psi(x)dx|<\infty$.
\item $\lim_{r\rightarrow 0}|\int_{\partial
B_{r}}V(|x|)\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi^2(x)ds|=0,$ where
$B_{r}\subset \Omega$ is a ball of radius $r$ centered at $0$.
\end{enumerate}
\end{lemma}
\noindent {\bf Proof:} $1)$ Setting
$x(r)=r^{n-1}V(r)\frac{\varepsilonphi'(r)}{\varepsilonphi(r)}$, we have
\[
r^{n-1}V(r)x'(r)+x^2(r)=\frac{r^{2(n-1)}V^{2}(r)}{\varepsilonphi}(\varepsilonphi''(r)+(\frac{n-1}{r}+\frac{V_r(r
)}{V(r)})\varepsilonphi'(r))=-
\frac{r^{2(n-1)}V(r)W(r)}{\varepsilonphi(r)}\leq 0, \ \ \ \ 0<r<R.\]
Dividing by $r^{n-1}V(r)$ and integrating once, we obtain
\begin{equation}\lambdabel{lem-eq}
x(r)\geq \int_{r}^{R}\frac{|x(s)|^2}{s^{n-1}V(s)}ds+x(R).
\end{equation}
To prove that $\lim_{r\rightarrow 0}G(r)<\infty$, where $G(r):=\int_{r}^{R}\frac{x^{2}(s)}{s^{n-1}V(s)}ds$, we assume the contrary and
use (\ref{lem-eq}) to write that
\begin{equation*}
(-r^{n-1}V(r))G'(r))^{\frac{1}{2}}\geq G(r)+x(R).
\end{equation*}
Thus, for $r$ sufficiently small we have
$-r^{n-1}V(r)G'(r)\geq \frac{1}{2}G^{2}(r)$
and hence,
$(\frac{1}{G(r)})'\geq\frac{1}{2r^{n-1}V(r)}$, which contradicts the fact that $G(r)$ goes to
infinity as $r$ tends to zero.
Also in view of (\ref{lem-eq}), we have that $x_{0}:=\lim_{r\rightarrow
0}x(r)$ exists, and since $\lim_{r\rightarrow 0}G(r)<\infty$, we necessarily have $x_{0}=0$ and 1) is proved.
For assertion 2), we use $1)$ to see that
\[\int_{\Omega}V(|x|)(\varepsilonphi'(|x|))^2\psi^2(x)dx \leq ||u||^{2}_{\infty}\int_{\Omega}V(|x|)
\frac{(\varepsilonphi'(|x|))^2}{\varepsilonphi^2(|x|)}dx<\infty.\]
3)\, Note that
\[\hbox{$|\nabla \psi(x)|\leq \frac{|\nabla
u(x)|}{\varepsilonphi(|x|)}+|u(x)|\frac{|\varepsilonphi'(|x|)|}{\varepsilonphi^2(|x|)}\leq
\frac{C_{1}}{\varepsilonphi(|x|)}+C_{2}\frac{|\varepsilonphi'(|x|)|}{\varepsilonphi^2(|x|)}$, \ \ for all $x \in \Omega
$},\]
where $C_{1}=\max_{x \in \Omega}|\nabla u|$ and $C_{2}=\max_{x \in \Omega}|u|$.
Hence we have
\begin{eqnarray*}
\int_{\Omega}V(|x|)\varepsilonphi^2(|x|)|\nabla \psi|^2(x)dx&\leq& \int_{\omega}
V(|x|)\frac{(C_{1}\varepsilonphi(|x|)+C_{2}\varepsilonphi'(|x|))^2}{\varepsilonphi^2(|x|)}dx\\
&=&\int_{\Omega}C^{2}_{1}V(|x|)dx+\int_{\Omega}2C_{1}C_{2}\frac{|\varepsilonphi'(|x|)|}{\varepsilonphi(|x|)}
V(|x|)dx+\int_{\Omega}C^{2}_{2}(\frac{\varepsilonphi'(|x|)}{\varepsilonphi(|x|)})^2V(|x|) dx\\
&\leq&
L_{1}+2C_{1}C_{2}\big(\int_{\Omega}V(|x|)(\frac{\varepsilonphi'(|x|)}{\varepsilonphi(|x|)})^2dx\big)^{\frac{1
}{2}} \big(\int_{\Omega}V(|x|)dx\big)^{\frac{1}{2}}+L_{2}\\
&<& \infty,
\end{eqnarray*}
which proves $3)$.
$4)$ now follows from $2)$ and $3)$ since
\[ V(|x|)|\nabla u|^2= V(|x|)(\varepsilonphi'(|x|))^2
\psi^2(x)+2V(|x|)\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x)\frac{x}{|x|}.\nabla
\psi(x)+V(|x|)\varepsilonphi^2(|x|)|\nabla \psi|^2.
\]
Finally, $5)$ follows from $1)$ since
\begin{eqnarray*}
|\int_{\partial
B_{r}}V(|x|)\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi^2(x)ds|&<&||u||^{2}_{\infty}|\int_{\partial
B_{r}}V(|x|)\frac{\varepsilonphi'(|x|)}{\varepsilonphi(|x|)}ds\\
&=&||u||^{2}_{\infty}V(r)\frac{|\varepsilonphi'(r)|}{\varepsilonphi(r)}\int_{\partial B_{r}}1 ds\\
&=&n\omega_{n}||u||^{2}_{\infty}r^{n-1}V(r)\frac{|\varepsilonphi'(r)|}{\varepsilonphi(r)}.
\end{eqnarray*}
\begin{lemma} \lambdabel{super} Let $V$ and $W$ be positive radial $C^1$-functions on a ball
$B\backslash \{0\}$, where $B$ is a ball with radius $R$ in $\R^n$ ($n \geq 1$) and centered at
zero. Assuming
\begin{eqnarray*}
\hbox{$\int_{B}\left(V(x)|\nabla u|^{2}-W(x)|u|^{2}\right)dx\geq 0$ for all $u \in
C_{0}^{\infty}(B)$, }
\end{eqnarray*}
then there exists a $C^{2}$-supersolution to the following linear elliptic equation
\begin{eqnarray}\lambdabel{pde}
-{\rm div}(V(x)\nabla u)-W(x)u&=&0, \ \ \ \ {\rm in} \ \ B, \\
u&>&0 \ \ \quad {\rm in} \ \ B \setminus \{0\}, \\
u&=&0 \ \quad {\rm in} \ \ \partial B.
\end{eqnarray}
\end{lemma}
{\bf Proof:} Define
\begin{eqnarray*}
\lambdambda_{1}(V):=\inf \{\frac{\int_{B}V(x)|\nabla\psi|^{2}-
W(x)|\psi|^{2}}{\int_{B}|\psi|^{2}}; \ \ \psi \in C^{\infty}_{0}(B \setminus \{0\}) \}.
\end{eqnarray*}
By our assumption $\lambdambda_1(V)\geq 0$. Let $({\varphi}_{n}, \lambdambda^{n}_{1})$ be the first eigenpair
for the problem
\begin{eqnarray*}
(L-\lambdambda_{1}(V)-\lambdambda^{n}_{1}){\varphi}_{n}&=&0 \ \ on \ \ B \setminus B_{\frac{R}{n}}\\
{\varphi}_n&=&0 \ \ on\ \ \partial (B \setminus B_{\frac{R}{n}}),
\end{eqnarray*}
where $Lu=-{\rm div}(V(x)\nabla u)- W(x) u$, and $B_{\frac{R}{n}}$ is a ball of radius
$\frac{R}{n}$, $n\geq 2$ . The eigenfunctions can be chosen in such a way that ${\varphi}_{n}>0$ on
$B \setminus B_{\frac{R}{n}}$ and $\varepsilonphi_{n}(b)=1$, for some $b \in B$ with
$\frac{R}{2}<|b|<R$.
Note that $\lambdambda^{n}_{1}\downarrow 0$ as $n \rightarrow \infty$. Harnak's inequality yields that
for any compact subset $K$, $\frac{{\rm max}_{K}{\varphi}_{n}}{{\rm min}_{K}{\varphi}_{n}}\leq C(K)$ with
the later constant being independant of ${\varphi}_{n}$. Also standard elliptic estimates also yields
that the family $({\varphi}_{n})$ have also uniformly bounded derivatives on the compact sets
$B-B_{\frac{R}{n}}$. \\
Therefore, there exists a subsequence $(\varepsilonphi_{n_{l_{2}}})_{l_{2}}$ of ($\varepsilonphi_{n})_{n}$ such
that $(\varepsilonphi_{n_{l_{2}}})_{l_{2}}$ converges to some $\varepsilonphi_{2} \in C^{2}(B \setminus
B(\frac{R}{2}))$. Now consider $(\varepsilonphi_{n_{l_{2}}})_{l_{2}}$ on $B \setminus
B(\frac{R}{3})$. Again there exists a subsequence $(\varepsilonphi_{n_{l_{3}}})_{l_{3}}$ of
$(\varepsilonphi_{n_{l_{2}}})_{l_{2}}$ which converges to $\varepsilonphi_{3} \in C^{2}( B \setminus
B(\frac{R}{3}))$, and $\varepsilonphi_{3}(x)=\varepsilonphi_{2}(x)$ for all $x \in B \setminus
B(\frac{R}{2})$. By repeating this argument we get a supersolution $\varepsilonphi \in C^{2}( B
\setminus\{ 0\})$ i.e. $L\varepsilonphi \geq 0$, such that $\varepsilonphi>0$ on $B \setminus \{0\}$.
$\square$\\
\noindent {\bf Proof of Theorem \ref{main}:} First we prove that 1) implies 2). Let ${\varphi} \in
C^{1}(0,R]$ be a solution of $(B_{V,W})$ such that ${\varphi} (x)>0$ for all $x \in (0,R)$. Define
$\frac{u(x)}{\varepsilonphi(|x|)}= \psi(x)$. Then
\[ |\nabla u|^2=(\varepsilonphi'(|x|))^2 \psi^2(x)+2\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x)\frac{x}{|x|}.\nabla
\psi+\varepsilonphi^2(|x|)|\nabla \psi|^2.\]
Hence,
\[ V(|x|)|\nabla u|^2\geq V(|x|)(\varepsilonphi'(|x|))^2
\psi^2(x)+2V(|x|)\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x)\frac{x}{|x|}.\nabla \psi(x).\]
Thus, we have
\[\int_{B} V(|x|)|\nabla u|^2 dx \geq \int_{B} V(|x|) (\varepsilonphi'(|x|))^2 \psi^2(x)dx+ \int_{B}2
V(|x|) \varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x) \frac{x}{|x|}.\nabla \psi dx.\]
Let $B_{\epsilon}$ be a ball of radius $\epsilon$ centered at the origin. Integrate by parts to
get
\begin{eqnarray*}
\int_{B} V(|x|)|\nabla u|^2 dx &\geq& \int_{B} V(|x|) (\varepsilonphi'(|x|))^2
\psi^2(x)dx+\int_{B_{\epsilon}}2V(|x|) \varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x) \frac{x}{|x|}.\nabla \psi
dx\\
&+&\int_{B\backslash B_{\epsilon}}2 V(|x|) \varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x) \frac{x}{|x|}.\nabla
\psi dx\\
&=&\int_{B_{\epsilon}} V(|x|) (\varepsilonphi'(|x|))^2 \psi^2(x)dx+\int_{B_{\epsilon}}2V(|x|)
\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi(x) \frac{x}{|x|}.\nabla \psi dx\\
&-&\int_{B\backslash B_{\epsilon}}\left\{\big(V(|x|)
\varepsilonphi''(|x|)\varepsilonphi(|x|)+(\frac{(n-1)V(|x|)}{r}+V_r(|x|))\varepsilonphi'(|x|)\varepsilonphi(|x|)\big)\psi^2(x)
\right\}dx\\
&+&\int_{\partial (B \backslash B_{\epsilon})}V(|x|)\varepsilonphi'(|x|)\varepsilonphi(|x|)\psi^2(x)ds
\end{eqnarray*}
Let $\epsilon \rightarrow 0$ and use Lemma $\ref{main-lem}$ and the fact that ${\varphi}$ is a solution
of $(D_{v,w})$ to get
\begin{eqnarray*}
\int_{B} V(|x|)|\nabla u|^2 dx &\geq&
-\int_{B}[V(|x|)\varepsilonphi''(|x|)+(\frac{(n-1)V(|x|)}{r}+V_r(|x|))\varepsilonphi'(|x|)]\frac{u^2(x)}{
\varepsilonphi(|x|)}dx\\
&=&\int_{B}W(|x|)u^2(x)dx.
\end{eqnarray*}
To show that 2) implies 1), we assume that inequality (${\rm H}_{V,W}$) holds on a ball $B$ of
radius $R$, and then apply Lemma \ref{super} to obtain a $C^{2}$-supersolution for the equation
(\ref{pde}). Now take the surface average of $u$, that is
\begin{equation}\lambdabel{sup-eq}
y(r)=\frac{1}{n\omega_{w} r^{n-1}}\int_{\partial B_{r}} u(x)dS=\frac{1}{n\omega_{n}}
\int_{|\omega|=1}u(r\omega)d\omega >0,
\end{equation}
where $\omega_{n}$ denotes the volume of the unit ball in $R^{n}$. We may assume that the unit
ball is contained in $B$ (otherwise we just use a smaller ball). We clearly have
\begin{equation}
y''(r)+\frac{n-1}{r}y'(r)= \frac{1}{n\omega_{n}r^{n-1}}\int_{\partial B_{r}}\Delta u(x)dS.
\end{equation}
Since $u(x)$ is a supersolution of (\ref{pde}), we have
\[\int_{\partial B_{r}}div(V(|x|)\nabla u)ds-\int_{\partial B}W(|x|)udx\geq 0,\]
and therefore,
\[V(r)\int_{\partial B_{r}}\Delta u dS -V_r(r)\int_{\partial B_{r}} \nabla u.x
ds-W(r)\int_{\partial B_{r}}u(x)ds\geq 0.\]
It follows that
\begin{equation}
V(r)\int_{\partial B_{r}}\Delta u dS -V_r(r)y'(r)-W(r)y(r)\geq 0,
\end{equation}
and in view of (\ref{sup-eq}), we see that $y$ satisfies the inequality
\begin{equation}\lambdabel{ode}
V(r)y''(r)+(\frac{(n-1)V(r)}{r}+V_r(r))y'(r)\leq -W(r)y(r), \ \ \ \ for\ \ \ 0<r<R,
\end{equation}
that is it is a positive supersolution for $(B_{V,W})$.
Standard results in ODE now allow us to conclude that $(B_{V,W})$ has actually a positive solution
on $(0, R)$, and the proof of theorem \ref{main} is now complete.
\subsection{Integral criteria for Bessel pairs}
In order to obtain criteria on $V$ and $W$ so that inequality $({\rm H}_{V, W})$ holds, we clearly
need to investigate whether the ordinary differential equation $(B_{V,W})$ has positive
solutions. For that, we rewrite $(B_{V,W})$ as
\[
(r^{n-1}V(r)y')'+r^{n-1}W(r)y=0,
\]
and then by setting $s=\frac{1}{r}$ and $x(s)=y(r)$, we see that $y$ is a solution of
$(B_{V,W})$ on an interval $(0,\deltalta)$ if and only if $x$ is a positive solution for the equation
\begin{equation}\lambdabel{s-ode}
\hbox{$(s^{-(n-3)}V(\frac{1}{s})x'(s))'+s^{-(n+1)}W(\frac{1}{s})x(s)=0$ \quad on \quad
$(\frac{1}{\deltalta},\infty)$.}
\end{equation}
Now recall that a solution $x(s)$ of the equation (\ref{s-ode}) is said to be {\it oscillatory} if
there exists a sequence $\{a_{n}\}^{\infty}_{n=1}$ such that $a_{n} \rightarrow +\infty$ and
$x(a_{n})=0$. Otherwise we call the solution {\it non-oscillatory}. It follows from Sturm
comparison theorem that all solutions of (\ref{s-ode}) are either all oscillatory or all
non-oscillatory. Hence, the fact that $(V, W)$ is a Bessel pair or not
is closely related to the oscillatory behavior of the equation (\ref{s-ode}). The following
theorem is therefore a consequence of Theorem \ref{main}, combined with a relatively recent result
of Sugie et al. in \cite{SKY} about the oscillatory behavior of the equation (\ref{s-ode}).
\begin{theorem}\lambdabel{main-cr} Let $V$ and $W$ be positive radial $C^1$-functions on
$B_R\backslash \{0\}$, where $B_R$ is a ball centered at $0$ with radius $R$ in $\R^n$ ($n \geq 1$). Assume $\int^{R}_{0}\frac{1}{\tau^{n-1}V(\tau)}d\tau =+\infty$ and
$\int_{0}^{R}r^{n-1}v(r)dr<\infty$.
\begin{itemize}
\item Assume
\begin{equation}\lambdabel{integral.1}
\limsup_{r\to 0}r^{2(n-1)}V(r)W(r)\big( \int^{R}_{r}\frac{1}{\tau^{n-1}V(\tau)}d\tau\big)^2<
\frac{1}{4}
\end{equation}
then $(V, W)$ is a Bessel pair on $(0, \rho)$ for some $\rho>0$ and consequently, inequality $({\rm H}_{V, W})$
holds for all $u \in C^{\infty}_{0}(B_{\rho})$, where $B_{\rho}$ is a ball of radius $\rho$.
\item On the other hand, if
\begin{equation}\lambdabel{integral.2}
\liminf_{r\to 0}r^{2(n-1)}V(r)W(r)\big( \int^{R}_{r}\frac{1}{\tau^{n-1}V(\tau)}d\tau\big)^2>
\frac{1}{4}
\end{equation}
then there is no interval $(0, \rho)$ on which $(V,W)$ is a Bessel pair and consequently, there
is no
smooth domain $\Omega$ on which inequality
$({\rm H}_{V, W})$ holds.
\end{itemize}
\end{theorem}
A typical Bessel pair is $(|x|^{-\lambdambda}, |x|^{-\lambdambda -2})$ for $\lambdambda \leq n-2$. It is also easy to see by a simple change of variables in the corresponding ODEs that
\begin{equation}
\hbox{$W$ is a Bessel potential if and only if $
\left(|x|^{-\lambdambda}, |x|^{-\lambdambda}(|x|^{-2}+W(|x|)\right)$
is a Bessel pair.}
\end{equation}
More
generally, the above integral criterium allows to show the following.
\begin{theorem} \lambdabel{main.Bessel.pair} Let $V$ be an strictly positive $C^1$-function on $(0,R)$ such
that for some $\lambdambda \in \R$
\begin{equation}
\hbox{$\frac{rV_r(r)}{V(r)}+\lambdambda \geq 0$ on $(0, R)$ and $\lim\limits_{r\to
0}\frac{rV_r(r)}{V(r)}+\lambdambda =0$.}
\end{equation}
If $\lambdambda \leq n-2$, then for any Bessel potential $W$ on $(0, R)$, and any $c\leq \beta
(W; R)$,
the couple $(V, W_{\lambdambda, c})$ is a Bessel pair, where
\begin{equation}
W_{\lambdambda,c}(r)=V(r)((\frac{n-\lambdambda-2}{2})^2r^{-2}+cW(r)).
\end{equation}
Moreover, $\beta \big(V, W_{\lambdambda,c}; R\big)=1$ for all $c\leq \beta (W; R)$.
\end{theorem}
We need the following easy lemma.
\begin{lemma}\lambdabel{strict-lemma} Assume the equation
\[y''+\frac{a}{r}y'+V(r)y=0,\]
has a positive solution on $(0,R)$, where $a\geq 1$ and $V(r)> 0$. Then $y$ is strictly decreasing
on $(0,R)$.
\end{lemma}
{\bf Proof:} First observe that $y$ can not have a local minimum, hence it is either increasing or
decreasing on $(0,\deltalta)$, for $\deltalta$ sufficiently small. Assume $y$ is increasing. Under this
assumption if $y'(a)=0$ for some $a>0$, then $y''(a)=0$ which contradicts the fact that $y$ is a
positive solution of the above ODE. So we have $\frac{y''}{y'}\leq-\frac{a}{r},$ thus,
\[y'\geq \frac{c}{r^a}.\]
Therefore, $x(r) \rightarrow - \infty$ as $r \rightarrow 0$ which is a contradiction. Since, $y$
can not have a local minimum it should be strictly decreasing on $(0,R)$.
$\Box$
{\bf Proof of Theorem \ref{main.Bessel.pair}:} Write $\frac{V_r(r)}{V(r)}=-\frac{\lambdambda}{r}+f(r)$
where $f(r)\geq 0$ on $(0,R)$ and $\lim\limits_{r\rightarrow 0}r f(r)=0$. In order to prove that
$\left(V(r), V(r)((\frac{n-\lambdambda-2}{2})^2r^{-2}+cW(r))\right)$ is a Bessel pair, we need to
show that the equation
\begin{equation}\lambdabel{f-ode.1}
y''+(\frac{n-\lambdambda-1}{r}+f(r))y'+((\frac{n-\lambdambda-2}{2})^2r^{-2}+cW(r))y(r)=0,
\end{equation}\lambdabel{f-ode}
has a positive solution on $(0,R)$. But first we note that the equation
\[x''+(\frac{n-\lambdambda-1}{r})x'+((\frac{n-\lambdambda-2}{2})^2r^{-2}+cW(r))x(r)=0,\]
has a positive solution on $(0,R)$, whenever $c\leq \beta (W; R)$.
Since now $f(r)\geq 0$ and since, by the proceeding lemma, $x'(r)\leq 0$, we get that $x$ is a
positive subsolution for the equation (\ref{f-ode.1}) on $(0,R)$, and thus it has a positive
solution of $(0,R)$. Note that this means that $\beta (V, W_{\lambdambda, c}; R) \geq 1$.
For the reverse inequality, we shall use the criterium in Theorem \ref{main-cr}. Indeed apply
criteria (\ref{integral.1}) to $V(r)$ and $W_1(r)=C\frac{V(r)}{r^2}$ to get
\begin{eqnarray*}
\lim_{r \rightarrow 0}r^{2(n-1)}V(r)W_1(r)\big(
\int^{R}_{r}\frac{1}{\tau^{n-1}V(\tau)}d\tau\big)^2&=&C\lim_{r \rightarrow
0}r^{2(n-2)}V^{2}(r)\big( \int^{R}_{r}\frac{1}{\tau^{n-1}V(\tau)}d\tau\big)^2\\
&=&C\big(\lim_{r \rightarrow 0}r^{(n-2)}V(r)\int^{R}_{r}\frac{1}{\tau^{n-1}V(\tau)}d\tau\big)^2\\
&=&C\big(\lim_{r\rightarrow 0}\frac{\frac{1}{r^{n-1}V(r)}}
{\frac{(n-2)r^{n-3}V(r)+r^{n-2}V_r(r)}{r^{2(n-2)}V^{2}(r)}}\big)^2\\
&=&C\big(\lim_{r \rightarrow 0}\frac{1}{(n-2)+r\frac{V_r(r)}{V(r)}}\big)^2\\
&=&\frac{C}{(n-\lambdambda-2)^2}.
\end{eqnarray*}
For $\big(V, CV(r^{-2} +cW)\big)$ to be a Bessel pair, it is necessary that
$
\frac{C}{(n-\lambdambda-2)^2}
\leq \frac{1}{4},
$
and the proof for the best constant is complete.
$\Box$
With a similar argument one can also prove the following.
\begin{corollary} Let $V$ and $W$ be positive radial $C^1$-functions on $B_R\backslash
\{0\}$, where $B_R$ is a ball centered at zero with radius $R$ in $\R^n$ ($n \geq 1$). Assume
that
\begin{equation}
\hbox{$\lim\limits_{r
\rightarrow 0}r\frac{V_r(r)}{V(r)}=-\lambdambda$ and $\lambdambda \leq n-2$.}
\end{equation}
\begin{itemize}
\item If $\limsup\limits_{r\rightarrow 0}r^{2}\frac{W(r)}{V(r)}<(\frac{n-\lambdambda-2}{2})^2$, then
$(V, W)$ is a Bessel pair on some interval $(0, \rho)$, and consequently there exists a ball $B_\rho
\subset R^n$ such that inequality $({\rm H}_{V, W})$ holds for all $u \in C^{\infty}_{0}(B_\rho)$.
\item On the other hand, if $\liminf\limits_{r\rightarrow
0}r^{2}\frac{W(r)}{V(r)}>(\frac{n-\lambdambda-2}{2})^2$, then there is no smooth domain $\Omega
\subset R^n$ such that inequality $({\rm H}_{V, W})$ holds on $\Omega$.
\end{itemize}
\end{corollary}
\subsection{New weighted Hardy inequalities}
An immediate application of Theorem \ref{main.Bessel.pair} and Theorem \ref{main} is the following very general Hardy inequality.
\begin{theorem} \lambdabel{super.hardy} Let $V(x)=V(|x|)$ be a strictly positive radial function on a smooth domain $\Omega$ containing
$0$ such that $R=\sup_{x \in \Omega} |x|$. Assume
that for some $\lambdambda \in \R$
\begin{equation}
\hbox{$\frac{rV_r(r)}{V(r)}+\lambdambda \geq 0$ on $(0, R)$ and $\lim\limits_{r\to
0}\frac{rV_r(r)}{V(r)}+\lambdambda =0$.}
\end{equation}
\begin{enumerate}
\item If $\lambdambda \leq n-2$, then the following
inequality holds for any Bessel potential $W$ on $(0, R)$:
\begin{equation}\lambdabel{v-hardy}
\hbox{$\int_{\Omega}V(x)|\nabla u|^{2}dx\geq
(\frac{n-\lambdambda-2}{2})^2\int_{\Omega}\frac{V(x)}{|x|^2}u^{2}dx+\beta (W; R)\int_{\Omega}
V(x)W(x)u^{2}dx$\quad for all $u \in C^{\infty}_{0}(\Omega)$,}
\end{equation}
and both $(\frac{n-\lambdambda-2}{2})^2$ and $\beta (W; R)$ are the best constants.
\item In particular, $\beta (V, r^{-2}V; R)=(\frac{n-\lambdambda-2}{2})^2$ is the best constant in the
following inequality
\begin{equation}\lambdabel{super-hardy}
\hbox{$\int_{\Omega}V(x)|\nabla u|^{2}dx\geq
(\frac{n-\lambdambda-2}{2})^2\int_{\Omega}\frac{V(x)}{|x|^2}u^{2}dx$ \quad for all $u \in
C^{\infty}_{0}(\Omega)$.}
\end{equation}
\end{enumerate}
\end{theorem}
Applied to $V_1(r)=r^{-m} W_{k, \rho}(r)$ and $V_2(r)=r^{-m} {\tilde W}_{k, \rho}(r)$ where
$ W_{k, \rho}
(r)=\Sigma_{j=1}^k\frac{1}{r^{2}}\big(\prod^{j}_{i=1}log^{(i)}\frac{\rho}{r}\big)^{-2}$ and
$\tilde W_{k; \rho}
(r)=\Sigma_{j=1}^k\frac{1}{r^{2}}X^{2}_{1}(\frac{r}{\rho})X^{2}_{2}(\frac{r}{\rho}) \ldots
X^{2}_{j-1}(\frac{r}{\rho})X^{2}_{j}(\frac{r}{\rho})$ are the iterated logs introduced in the
introduction, and noting that in both cases the corresponding $\lambdambda$ is equal to $2m+2$, we get the following new Hardy inequalities.
\begin{corollary} Let $\Omega$ be a smooth bounded domain in $\R^n$ ($n \geq 1$) and $m\leq
\frac{n-4}{2}$. Then the following inequalities hold.
\begin{eqnarray}
\int_{\Omega}\frac{W_{k,\rho}(x)}{|x|^{2m}}|\nabla
u|^2dx\geq(\frac{n-2m-4}{2})^2\int_{\Omega}\frac{W_{k,\rho}(x)}{|x|^{2m+2}}u^2dx\\
\int_{\Omega}\frac{\tilde W_{k,\rho}(x)}{|x|^{2m}}|\nabla
u|^2dx\geq(\frac{n-2m-4}{2})^2\int_{\Omega}\frac{\tilde W_{k,\rho}(x)}{|x|^{2m+2}}u^2dx.
\end{eqnarray}
Moreover, the constant $(\frac{n-2m-4}{2})^2$ is the best constant in both
inequalities.
\end{corollary}
\begin{remark}\rm
The two following theorems deal with Hardy-type inequalities on the whole of $\R^n$.
Theorem \ref{main} already yields that inequality $(H_{V,W})$ holds for all $u \in C^{\infty}_{0}(\R^n)$ if and only if the ODE $(B_{V,W})$ has a positive solution on $(0,\infty)$. The latter equation is therefore
non-oscillatory, which will again be a very useful fact for computing best constants, in view of the following criterium at infinity (Theorem 2.1 in \cite{SKY}) applied to the equation
\begin{equation} \lambdabel{remark-ode}
\left(a(r)y' \right)'+b(r)y(r)=0,
\end{equation}
where $a(r)$ and $b(r)$ are positive real valued functions. Assuming that $\int^{\infty}_{d} \frac{1}{a(\tau)}d\tau<\infty $ for some $d>0$, and that the following limit
\[L:=\lim_{r\rightarrow \infty }a(r)b(r)\left(
\int_{r}^{\infty}\frac{1}{a(r)}dr \right)^2,\]
exists. Then for the equation (\ref{remark-ode}) equation to be non-oscillatory, it is necessary that $L\leq \frac{1}{4}$.
\end{remark}
\begin{theorem} \lambdabel{GM-IV} Let $a,b> 0$, and $\alphapha, \beta, m$ be real numbers.
\begin{itemize}
\item If $\alphapha\beta>0$, and $m\leq \frac{n-2}{2}$,
then for all $u \in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V1}
\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m}}|\nabla u|^2dx\geq
(\frac{n-2m-2}{2})^2\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m+2}}u^2dx,
\end{equation}
and $(\frac{n-2m-2}{2})^2$ is the best constant in the inequality.
\item If $\alphapha \beta<0$, and $2m-\alphapha \beta \leq n-2$,
then for all $u \in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V2}
\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m}}|\nabla u|^2dx\geq (\frac{n-2m+\alphapha
\beta-2}{2})^2\int_{\R^n}\frac{(a+b|x|^{\alphapha})^{\beta}}{|x|^{2m+2}}u^2dx,
\end{equation}
and $(\frac{n-2m+\alphapha
\beta-2}{2})^2$ is the best constant in the inequality.
\end{itemize}
\end{theorem}
{\bf Proof:} Letting $V(r)=\frac{(a+br^\alphapha)^\beta}{r^{2m}}$, then
\[r\frac{V'(r)}{V(r)}=-2m+\frac{b\alphapha \beta r^\alphapha}{a+br^{\alphapha}}=-2m+\alphapha \beta -\frac{a
\alphapha \beta}{a+br^\alphapha}.\]
Hence, in the case $\alphapha, \beta> 0$ and $2m\leq n-2$, (\ref{GM-V1}) follows directly from
Theorem \ref{super.hardy}. The same holds for (\ref{GM-V2}) since it also follows directly from Theorem \ref{super.hardy} in the case where
$\alphapha <0$, $\beta >0$ and $2m-\alphapha \beta \leq n-2$.
For the remaining two other cases, we will use Theorem \ref{main}. Indeed, in this case the equation $(B_{V,W})$ becomes
\begin{equation}\lambdabel{V-ODE1}
y''+(\frac{n-2m-1}{r}+\frac{b\alphapha \beta r^{\alphapha-1}}{a+br^{\alphapha}})y'+\frac{c}{r^2}y=0,
\end{equation}
and the best constant in inequalities (\ref{GM-V1}) and (\ref{GM-V2}) is the largest $c$ such
that the above equation has a positive solution on $(0,+\infty)$. Note that by Lemma
\ref{strict-lemma}, we have that $y'<0$ on $(0,+\infty)$. Hence, if $\alphapha <0$ and $\beta<0$, then the positive
solution of the equation
\[y''+\frac{n-2m-1}{r}y'+\frac{(\frac{n-2m-2}{2})^2}{r^2}y=0\]
is a positive super-solution for (\ref{V-ODE1}) and therefore the latter ODE has a positive
solution on $(0,+\infty)$, from which we conclude that (\ref{GM-V1}) holds. To prove now that $(\frac{n-2m-2}{2})^2$ is the best constant in (\ref{GM-V1}), we use the fact that if the equation (\ref{V-ODE1}) has a positive
solution on $(0, +\infty)$, then the equation is necessarily non-oscillatory. By rewriting
(\ref{V-ODE1}) as
\begin{equation} \lambdabel{V-ODE2}
\left(r^{n-2m-1}(a+br^{\alphapha})^{\beta} y'\right)'+cr^{n-2m-3}(a+br^{\alphapha})^{\beta}y=0,
\end{equation}
and by noting that
\[\int_{d}^{\infty}\frac{1}{r^{n-2m-1}(a+br^{\alphapha})^{\beta}}<\infty,\]
and
\[\lim_{r\rightarrow \infty }cr^{2(n-2m-2)}(a+br^{\alphapha})^{2\beta}\left(
\int_{r}^{\infty}\frac{1}{r^{n-2m-1}(a+br^{\alphapha})^{\beta}}dr \right)^2=\frac{c}{(n-2m-2)^2}, \]
we can use Theorem 2.1 in \cite{SKY} to conclude that for equation (\ref{V-ODE2}) to be non-oscillatory it is necessary that
\[\frac{c}{(n-2m-2)^2}\leq \frac{1}{4}.\]
Thus, $\frac{(n-2m-2)^2}{4}$ is the best constant in the inequality (\ref{GM-V1}).
A very similar argument applies in the case where $\alphapha>0$, $\beta<0$, and $2m<n-2$, to obtain that inequality (\ref{GM-V2}) holds for all $u \in C_{0}^{\infty}(\R^n)$ and that $(\frac{n-2m+\alphapha \beta-2}{2})^2$
is indeed the best constant.
$\Box$\\
Note that the above two inequalities can be improved on smooth bounded domains by using Theorem \ref{super.hardy}.
We shall now extend the recent results of Blanchet-Bonforte-Dolbeault-Grillo-Vasquez \cite{BBDGV} and address some of their questions regarding best constants.
\begin{theorem}\lambdabel{GM-V} Let $a,b> 0$, and $\alphapha, \beta$ be real numbers.
\begin{itemize}
\item If $\alphapha \beta<0$ and $-\alphapha \beta \leq n-2$,
then for all $u \in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V3}
\int_{\R^n}(a+b|x|^{\alphapha})^{\beta}|\nabla u|^2dx\geq b^{\frac{2}{\alphapha}}(\frac{n-\alphapha
\beta-2}{2})^2\int_{\R^n}(a+b|x|^{\alphapha})^{\beta-\frac{2}{\alphapha}}u^2dx,
\end{equation}
and $b^{\frac{2}{\alphapha}}(\frac{n-\alphapha
\beta-2}{2})^2$ is the best constant in the inequality.
\item If $\alphapha \beta >0$ and
$n\geq 2$, then there exists a constant $C>0$ such that for all $u
\in C^{\infty}_{0}(\R^n)$
\begin{equation}\lambdabel{GM-V4}
\int_{\R^n}(a+b|x|^{\alphapha})^{\beta}|\nabla u|^2dx\geq C
\int_{\R^n}(a+b|x|^{\alphapha})^{\beta-\frac{2}{\alphapha}}u^2dx.
\end{equation}
Moreover, $b^{\frac{2}{\alphapha}}(\frac{n-2}{2})^2\leq C\leq b^{\frac{2}{\alphapha}}(\frac{n+\alphapha
\beta-2}{2})^2$.
\end{itemize}
\end{theorem}
{\bf Proof:} Letting $V(r)=(a+br^{\alphapha})^{\beta}$, then we have
\[
r\frac{V'(r)}{V(r)}=\frac{b\alphapha \beta r^{\alphapha}}{a+br^{\alphapha}}=\alphapha \beta-\frac{a \alphapha
\beta}{a+br^{\alphapha}}.
\]
Inequality (\ref{GM-V3}) and its best constant in the case when $\alphapha<0$ and $\beta>0$, then follow immediately from Theorem \ref{super.hardy} with $\lambdambda=-\alphapha \beta$.
The proof of the remaining cases will use Theorem \ref{main} as well as the integral criteria for
the oscillatory behavior of solutions for ODEs of the form ($B_{V,W}$).
Assuming still that $\alphapha \beta <0$, then with an argument similar to that of Theorem \ref{GM-IV} above, one can show
that the positive solution of the equation $y''+(\frac{n+\alphapha \beta-1}{r})y'+\frac{(n+\alphapha
\beta -2)^2}{4r^2}y=0$ on $(0,+\infty)$ is a positive supersolution for the equation
\[y''+(\frac{n-1}{r}+\frac{V'(r)}{V(r)})y'+\frac{b^{\frac{2}{\alphapha}}(n+\alphapha \beta
-2)^2}{4(a+br^{\alphapha})^{\frac{2}{\alphapha}}}y=0.\]
Theorem \ref{main} then yields that the inequality (\ref{GM-V3}) holds for all $u \in
C_{0}^{\infty}(\R^n)$.
To prove now that $b^{\frac{2}{\alphapha}}(\frac{n+\alphapha \beta -2}{2})^2$ is the best constant in
(\ref{GM-V3}) it is enough to show that if the following equation
\begin{equation}\lambdabel {V-ODE3}
\left(r^{n-1}(a+br^{\alphapha})^{\beta}y'\right)'+cr^{n-1}(a+br^{\alphapha})^{\beta-\frac{2}{\alphapha}}y=0
\end{equation}
has a positive solution on $(0,+\infty)$, then $c\leq b^{\frac{2}{\alphapha}}(\frac{n+\alphapha \beta
-2}{2})^2$.
If now $\alphapha>0$ and $\beta<0$, then we have
\[\lim_{r\rightarrow \infty }cr^{2(n-1)}(a+br^{\alphapha})^{2\beta-\frac{2}{\alphapha}}\left(
\int_{r}^{\infty}\frac{1}{r^{n-1}(a+br^{\alphapha})^{\beta}}dr
\right)^2=\frac{c}{b^{\frac{2}{\alphapha}}(n+\alphapha \beta-2)^2}.\]
Hence, by Theorem 2.1 in \cite{SKY} again, the non-oscillatory aspect of the equation holds for $c\leq \frac{b^{\frac{2}{\alphapha}}(n+\alphapha
\beta-2)^2}{4}$ which completes the proof of the first part. \\
A similar argument applies in the case where $\alphapha \beta>0$ to prove that (\ref{GM-V4}) holds for all $u \in C_{0}^{\infty}(\R^n)$ and
$b^{\frac{2}{\alphapha}}(\frac{n-2}{2})^2\leq C\leq b^{\frac{2}{\alphapha}}(\frac{n+\alphapha
\beta-2}{2})^2$.
The best constants are estimated by carefully studying the existence of positive solutions for the ODE (\ref{V-ODE3}).
\begin{remark}\rm Recently, Blanchet et al. in \cite{BBDGV} studied a special case of inequality
(\ref{GM-V3}) ($a=b=1$, and $\alphapha=2$) under the additional condition:
\begin{equation}\lambdabel{extra-cond}
\int_{\R^n} (1+|x|^2)^{\beta-1}u(x)dx=0, \ \ for \ \ \beta<\frac{n-2}{2}.
\end{equation}
Note that we do not assume (\ref{extra-cond}) in Theorem \ref{GM-V}, and that we have found the best constants for
$\beta\leq 0$, a case that was left open in \cite{BBDGV}.
\end{remark}
\subsection{Improved Hardy and Caffarelli-Kohn-Nirenberg Inequalities}
In \cite{CKN} Caffarelli-Kohn-Nirenberg established a set inequalities of the
following form:
\begin{equation}\lambdabel{CKN}
\hbox{
$\big(\int_{R^n}|x|^{-bp}|u|^{p}dx\big)^{\frac{2}{p}}\leq C_{a,b}\int_{R^n}|x|^{-2a}|\nabla
u|^2dx$ for all $u \in C^{\infty}_{0}(R^n)$,}
\end{equation}
where for $n\geq 3$,
\begin{equation}\hbox{
$-\infty<a<\frac{n-2}{2}$, $a\leq b \leq a+1,$ and $p=\frac{2n}{n-2+2(b-a)}$.
}
\end{equation}
For the cases $n=2$ and $n=1$ the conditions are slightly different. For $n=2$
\begin{equation}\hbox{
$-\infty<a<0$, $a< b \leq a+1,$ and $p=\frac{2}{b-a}$,
}
\end{equation}
and for $n=1$
\begin{equation}\hbox{
$-\infty<a<-\frac{1}{2}$, $a+\frac{1}{2}< b \leq a+1,$ and $p=\frac{2}{-1+2(b-a)}$.
}
\end{equation}
Let $D_{a}^{1,2}$ be the completion of $C^{\infty}_{0}(R^n)$ for the inner product
$(u,v)=\int_{R^n}|x|^{-2a}\nabla u. \nabla vdx$ and
let
\begin{equation}
S(a,b)=\inf_{u \in D_{a}^{1,2}\backslash \{0\}} \frac{\int_{R^n}|x|^{-2a}|\nabla
u|^2dx}{(\int_{R^n}|x|^{-bp}|u|^{p}dx\big)^{2/p}}
\end{equation}
denote the best embedding constant.
We are concerned here with the ``Hardy critical" case of the above inequalities, that is when
$b=a+1$. In this direction, Catrina and Wang \cite{CW} showed that for $n\geq 3$ we have
$S(a,a+1)=(\frac{n-2a-2}{2})^2$ and that $S(a,a+1)$ is not achieved while $S(a,b)$ is always
achieved for $a<b<a+1$. For the case $n=2$ they also showed that $S(a,a+1)=a^{2}$, and that
$S(a,a+1)$ is not achieved, while for $a<b<a+1$, $S(a,b)$ is again achieved. For $n=1$,
$S(a,a+1)=(\frac{1+2a}{2})^{2}$ is also not achieved.
In this section we give a necessary and sufficient condition for improvement of (\ref{CKN}) with
$b=a+1$ and $n\geq 1$. Our results cover also the critical case when $a=\frac{n-2}{2}$ which is
not allowed by the methods of \cite{CKN}.
\begin{theorem} \lambdabel{main-CKN} Let $W$ be a positive radial function on the ball $B$ in $\R^n$
($n \geq 1$) with radius $R$ and centered at zero.
Assume $a\leq \frac{n-2}{2}$. The following two statements are then equivalent:
\begin{enumerate}
\item $W$ is a Bessel potential on $(0, R)$.
\item There exists $c>0$ such that the following inequality holds for all $u \in
C_{0}^{\infty}(B)$
\begin{equation*}
\hbox{$({\rm H}_{a, cW})$ \quad \quad \quad \quad \quad $\int_{B}|x|^{-2a}|\nabla u(x) |^{2}dx
\geq (\frac{n-2a-2}{2})^2\int_{B}|x|^{-2a-2}u^2 dx+c\int_{B} |x|^{-2a}W(x)u^2dx,$\quad \quad \quad
\quad \quad \quad \quad \quad \quad \quad \quad }
\end{equation*}
\end{enumerate}
Moreover, $(\frac{n-2a-2}{2})^2$ is the best constant and $\beta (W; R)=\sup \{c; (H_{a,cW})
holds\}$, where $\beta (W; R)$ is the weight of the Bessel potential $W$ on $(0, R)$.
On the other hand, there is no strictly positive $W \in C^{1}(0,\infty)$,
such that the following inequality holds for all $u \in C_{0}^{\infty}(\R^n)$,
\begin{equation}\lambdabel{no-improve}
\int_{\R^n}|x|^{-2a}|\nabla u(x) |^{2}dx \geq (\frac{n-2a-2}{2})^2\int_{\R^n}|x|^{-2a-2}u^2
dx+c\int_{\R^n} W(|x|)u^2dx.
\end{equation}
\end{theorem}
{\bf Proof:} It suffices to use Theorems \ref{main} and \ref{super.hardy} with $V(r)=r^{-2a}$
to get that $W$ is a Bessel function if and only if the pair $\big(r^{-2a}, W_{a,c}(r)\big)$ is a
Bessel pair on $(0, R)$ for some $c>0$, where
\[W_{a,c}(r)=(\frac{n-2a-2}{2})^2r^{-2-2a}+cr^{-2a}W(r).\]
For the last part, assume that (\ref{no-improve}) holds for some $W$. Then it follows from
Theorem \ref{main-CKN} that for $V=cr^{2a}W(r)$ the equation $y''(r)+\frac{1}{r}y' +v(r)y=0$ has
a positive solution on $(0,\infty)$. From Lemma \ref{strict-lemma} we know that $y$ is strictly
decreasing on $(0,+\infty)$. Hence, $\frac{y''(r)}{y'(r)}\geq -\frac{1}{r}$ which yields $y'(r)\leq
\frac{b}{r}$, for some $b>0$. Thus $y(r) \rightarrow -\infty$ as $r\rightarrow +\infty$. This is a
contradiction and the proof is complete.
$\Box$
\begin{remark} \rm Theorem \ref{main-CKN} characterizes the best constant only when $\Omega$ is a ball, while for general domain $\Omega$, it just gives a lower and upper bounds for the best constant corresponding to a given Bessel potential $W$. It is indeed clear that
\[C_{B_{R}}(W) \leq C_{\Omega}(W)\leq C_{B_{\rho}}(W),\]
where $B_{R}$ is the smallest ball containing $\Omega$ and $B_{\rho}$ is the largest
ball contained in it. If now $W$ is a Bessel potential such that $\beta (W, R)$ is independent of $R$, then clearly $\beta (W, R)$ is also the best constant in inequality $(H_{a,cW})$ for any smooth bounded domain.
This is clearly the case for the potentials $W_{k, \rho}$ and
$\tilde W_{k, \rho}$
where $\beta (W, R)=\frac{1}{4}$ for all $R$, while for $W\equiv 1$ the best constant is still not known for general domains even for the simplest case
$a=0$.
\end{remark}
Using the integral criteria for Bessel potentials, we can also deduce immediately the following.
\begin{corollary} Let $\Omega$ be a bounded smooth domain in $R^n$ with $n\geq 1$, and let $W$ be
a non-negative function in $C^{1}(0, R=:\sup_{x \in \partial \Omega}|x|]$ and $a\leq
\frac{n-2}{2}$.
\begin{enumerate}
\item If
$\hbox{$\liminf\limits_{r\rightarrow 0} \ln(r)\int^{r}_{0} sW(s)ds>-\infty$, } $
then there exists $\alphapha:=\alphapha(\Omega)>0$ such that an improved Hardy inequality $({\rm H}_{a,
W_{\alphapha}})$ holds for the scaled potential $W_\alphapha(x):=\alphapha^2W(\alphapha |x|)$.
\item If
$\hbox{$\lim\limits_{r\rightarrow 0} \ln(r)\int^{r}_{0} sW(s)ds=-\infty$, } $
then there are no $\alphapha, c>0$, for which $({\rm H}_{a,W_{\alphapha,c}})$ holds with
$W_{\alphapha,c}=cW(\alphapha |x|)$.
\end{enumerate}
\end{corollary}
By applying the above to various examples of Bessel potentials, we can now deduce several old and
new inequalities. The first is an extension of a result established by Brezis and V\'{a}zquez
\cite{BV} in the case where $a=0$, and $b=0$.
\begin{corollary} \lambdabel{CKN.BV} Let $\Omega$ be a bounded smooth domain in $R^n$ with $n\geq 1$
and $a\leq \frac{n-2}{2}$. Then, for any $b<2a+2$ there exists $c>0$ such that for all $u \in
C_{0}^{\infty}(\Omega)$
\begin{equation}\lambdabel{beta}
\hbox{$\int_{\Omega}|x|^{-2a}|\nabla u |^{2}dx \geq
(\frac{n-2a-2}{2})^2\int_{\Omega}|x|^{-2a-2}u^2 dx+c\int_{\Omega} |x|^{-b}u^2dx.$}
\end{equation}
Moreover, when $\Omega$ is a ball $B$ of radius $R$ the best constant $c$ for which (\ref{beta})
holds is equal to the weight $\beta (r^{2a-b}; R)$ of the Bessel potential $W(r)=r^{2a-b}$ on $(0,
R]$.\\
In particular,
\begin{equation}
\hbox{ \quad \quad \quad \quad \quad \quad $\int_{B}|x|^{-2a}|\nabla u|^{2}dx \geq
(\frac{n-2a-2}{2})^2\int_{B}|x|^{-2a-2}u^2 dx+\lambdambda_{B}\int_{B}
|x|^{-2a}u^2dx,$\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad}
\end{equation}
where the best constant $\lambdambda_{B}$ is equal to $z_{0}\omega^{2/n}_{n}|\Omega|^{-2/n}$,
where $\omega_{n}$ and $|\Omega|$ denote the volume of the unit ball and $\Omega$ respectively,
and $z_{0}=2.4048...$ is the first zero of the Bessel function $J_{0}(z)$.
\end{corollary}
{\bf Proof:} It suffices to apply Theorem \ref{main-CKN} with the function $W(r)=r^{b+2a}$
which is a Bessel potential whenever $b >-2a-2$ since then
${\rm liminf}_{r\rightarrow 0} \ln(r)\int^{r}_{0} s^{2a+1}W(s)ds>-\infty$.
In the case where $b=-2a$ and therefore $W\equiv 1$, we use the fact that $\beta (1;
R)=\frac{z^2_0}{R^2}$ (established in the appendix) to deduce that the best constant is then
equal to $z_{0}\omega^{2/n}_{n}|\Omega|^{-2/n}$.
$\square$ \\
The following corollary is an extension of a recent result by Adimurthi et all \cite{ACR}
established in the case where $a=0$, and of another result by Wang and Willem in \cite{WW}
(Theorem 2) in the case $k=1$. We also provide here the value of the best constant.
\begin{corollary}\lambdabel{CKN.A} Let $B$ be a bounded smooth domain in $R^n$ with $n\geq 1$ and
$a\leq \frac{n-2}{2}$. Then for every integer $k$, and $\rho=(\sup_{x \in \Omega}|x|)(
e^{e^{e^{.^{.^{e((k-1)-times)}}}}} )$, we have for any $u \in H^{1}_{0}(\Omega)$,
\begin{equation}\lambdabel{ar-hardy}
\hbox{$\int_{\Omega}|x|^{-2a}|\nabla u|^{2}dx \geq
(\frac{n-2a-2}{2})^2\int_{\Omega}\frac{u^2}{|x|^{2a+2}}
dx+\frac{1}{4}\sum^{k}_{j=1}\int_{\Omega}\frac{|u |^{2}}{|x|^{2a+2}}\big(
\prod^{j}_{i=1}log^{(i)}\frac{\rho}{|x|}\big)^{-2}dx$.}
\end{equation}
Moreover, $\frac{1}{4}$ is the best constant which is not attained in $H_{0}^{1}(\Omega)$.
\end{corollary}
{\bf Proof:} As seen in the appendix, $W_{k, \rho}(r)=\sum^{k}_{j=1}\frac{1}{r^2}\big(
\prod^{j}_{i=1}log^{(i)}\frac{\rho}{|x|}\big)^{-2}dx$ is a Bessel potential
on $(0, R)$ where $R=\sup_{x \in \Omega}|x|$, and $\beta (W_{k, \rho}; R)=\frac{1}{4}$.
$\square$\\
The very same reasoning leads to the following extension of a result established by Filippas and
Tertikas \cite{FT} in the case where $a=0$.
\begin{corollary} \lambdabel{CKN.FT} Let $\Omega$ be a bounded smooth domain in $R^n$ with $n\geq 1$
and $a\leq \frac{n-2}{2}$. Then for every integer $k$, and any $D\geq \sup_{x \in \Omega}|x|$, we
have for $u \in H^{1}_{0}(\Omega)$,
\begin{equation}\lambdabel{ar-hardy}
\hbox{$\int_{\Omega}\frac{|\nabla u|^{2}}{|x|^{2a}}dx \geq
(\frac{n-2a-2}{2})^2\int_{\Omega}\frac{u^2}{|x|^{2a+2}}
dx+\frac{1}{4}\sum^{\infty}_{i=1}\int_{\Omega}\frac{1}{|x|^{2a+2}}X
^{2}_{1}(\frac{|x|}{D})X^{2}_{2}(\frac{|x|}{D})...X^{2}_{i}(\frac{|x|}{D})|u|^{2}dx, $
}
\end{equation}
and $\frac{1}{4}$ is the best constant which is not attained in $H_{0}^{1}(\Omega)$.
\end{corollary}
The classical Hardy inequality is valid for dimensions $n\geq 3$. We now present optimal Hardy
type inequalities for dimension two in bounded domains, as well as the corresponding best constants.
\begin{theorem}\lambdabel{2dim-hardy} Let $\Omega$ be a smooth domain in $R^2$ and $0 \in \Omega$.
Then we have the following inequalities.
\begin{itemize}
\item Let $D\geq \sup_{x \in \Omega}|x|$, then for all $u \in H^{1}_{0}(\Omega),$
\begin{equation}\hbox{$\int_{\Omega}|\nabla u |^{2}dx \geq
\frac{1}{4}\sum^{\infty}_{i=1}\int_{\Omega}\frac{1}{|x|^{2}}X
^{2}_{1}(\frac{|x|}{D})X^{2}_{2}(\frac{|x|}{D})...X^{2}_{i}(\frac{|x|}{D})|u|^{2}dx$}
\end{equation}
and $\frac{1}{4}$ is the best constant.
\item Let $\rho=(\sup_{x \in \Omega}|x|)( e^{e^{e^{.^{.^{e((k-1)-times)}}}}} )$, then for all $u
\in H^{1}_{0}(\Omega)$
\begin{equation}
\hbox{$\int_{\Omega}|\nabla u |^{2}dx \geq \frac{1}{4}\sum^{k}_{j=1}\int_{\Omega}\frac{|u
|^{2}}{|x|^{2}}\big( \prod^{j}_{i=1}log^{(i)}\frac{\rho}{|x|}\big)^{-2}dx$,}
\end{equation}
and $\frac{1}{4}$ is the best constant for all $k\geq 1$.
\item If $\alphapha<2$, then there exists $c>0$ such that for all $u \in H^{1}_{0}(\Omega),$
\begin{equation}
\hbox{$\int_{\Omega}|\nabla u |^{2}dx \geq c\int_{\Omega}\frac{u^2}{|x|^{\alphapha}}\, dx$,}
\end{equation}
and the best constant is larger or equal to $\beta (r^\alphapha; \sup\limits_{x \in \Omega}|x|)$.
\end{itemize}
\end{theorem}
An immediate application of Theorem \ref{main} coupled with H\"older's inequality gives the
following duality statement, which should be compared to inequalities dual to those of
Sobolev's, recently obtained via the theory of mass transport \cite{AGK, CNV}.
\begin{corollary} \lambdabel{dual} Suppose that $\Omega$ is a smooth bounded domain containing $0$ in $R^{n}$ ($n\geq1$) with $R:=\sup_{x\in \Omega}|x|$. Then, for any $a\leq \frac{n-2}{2}$ and $0<p\leq 2$, we have the following dual inequalities:
\begin{eqnarray*}
\inf \left\{\int_{\Omega}|x|^{-2a}|\nabla u |^{2}dx - ( \frac{n-2a-2}{2})^{2}
\int_{\Omega}|x|^{-2a-2}|u|^{2}dx;\, u \in C_{0}^{\infty}(\Omega), ||u||_p=1\right\}\\
\geq \sup \left\{\big(\int_\Omega (\frac{|x|^{-2a}}{W(x)})^{\frac{p}{p-2}}\, dx
\big)^{\frac{2-p}{p}}
; \, W\in {\cal B}(0,R)\right\}.
\end{eqnarray*}
\end{corollary}
\section{General Hardy-Rellich inequalities}
Let $0 \in \Omega \subset R^n$ be a smooth domain, and denote
\[C^{k}_{0,r}(\Omega)=\{v \in C^{k}_{0}(\Omega): \mbox{v is radial and supp v }\subset \Omega\},\]
\[H^{m}_{0,r}(\Omega)=\{ u \in H^{m}_{0}(\Omega): \mbox{u is radial}\}.\]
We start by considering a general inequality for radial functions.
\begin{theorem} \lambdabel{mainrad.hr} Let $V$ and $W$ be positive radial $C^1$-functions on a ball
$B\backslash \{0\}$, where $B$ is a ball with radius $R$ in $\R^n$ ($n \geq 1$) and centered at
zero. Assume $\int^{R}_{0}\frac{1}{r^{n-1}V(r)}dr=\infty$ and $\lim_{r \rightarrow
0}r^{\alphapha}V(r)=0$ for some $\alphapha< n-2$. Then the following statements are equivalent:
\begin{enumerate}
\item $(V, W)$ is a Bessel pair on $(0, R)$.
\item There exists $c>0$ such that the following inequality holds for all radial functions $u
\in
C^{\infty}_{0,r}(B)$
\begin{equation*}\lambdabel{gen-hardy}
\hbox{ $({\rm HR}_{V,cW})$ \quad \quad \quad \quad $\int_{B}V(x)|\Delta u |^{2}dx \geq
c\int_{B} W(x)|\nabla u|^{2}dx+(n-1)\int_{B}(\frac{V(x)}{|x|^2}-\frac{V_r(|x|)}{|x|})|\nabla
u|^2dx.$ \quad \quad \quad \quad \quad \quad \quad \quad }
\end{equation*}
\end{enumerate}
Moreover, the best constant is given by
\begin{equation}
\hbox{$\beta (V, W; R)=\sup\big\{c; \, \, ({\rm HR}_{V, cW})$ holds for radial
functions$\big\}$.}
\end{equation}
\end{theorem}
{\bf Proof:} Assume $u \in C^{\infty}_{0,r}(B)$ and observe that
\[\int_{B}V(x)|\Delta u
|^{2}dx=n\omega_{n}\{\int^{R}_{0}V(r)u_{rr}^{2}r^{n-1}dr+(n-1)^2\int^{R}_{0}V(r)\frac{u^{2}_{r}}{r
^{2
}}r^{n-1}dr
+2(n-1)\int^{R}_{0}V(r)uu_rr^{n-2}dr\}.\]
Setting $\nu=u_{r}$, we then have
\[\int_{B}V(x)|\Delta u |^{2}dx=\int_{B}V(x)|\nabla \nu |^{2}dx+(n-1)
\int_{B}(\frac{V(|x|)}{|x|^2}-\frac{V_r(|x|)}{|x|})|\nu|^{2}dx. \]
Thus, $({\rm HR}_{V,W})$ for radial functions is equivalent to
\[\int_{B}V(x)|\nabla \nu |^{2}dx\geq \int_{B}W(x)\nu^2 dx.\]
Letting $x(r)=\nu(x)$ where $|x|=r$, we then have
\begin{equation}\lambdabel{1-dim}
\int^{R}_{0}V(r)(x'(r))^2r^{n-1}dr \geq \int^{R}_{0}W(r)x^{2}(r)r^{n-1}dr.
\end{equation}
It therefore follows from Theorem \ref{main} that 1) and 2) are equivalent.
$\Box$\\
By applying the above theorem to the Bessel pair
\[
\hbox{$V(x)=|x|^{-2m}$ \quad and \quad $W_{m}(x)=
V(x)\big[(\frac{n-2m-2}{2})^2|x|^{-2}+W(x)\big]$}
\]
where $W$ is a Bessel potential, and by using Theorem \ref{super.hardy}, we get the following
result in the case of radial functions.
\begin{corollary} \lambdabel{radial} Suppose $n\geq 1$ and $m<\frac{n-2}{2}$. Let $B_{R}\subset \R^n$
be a ball of radius $R>0$ and centered at zero. Let $W$ be a Bessel potential on $(0, R)$. Then we
have for all $u \in C^{\infty}_{0,r}(B_{R})$
\begin{equation}
\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}\geq (\frac{n+2m}{2})^{2}\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx+\beta (W; R)\int_{B_{R}}W(x)\frac{|\nabla u|^2}{|x|^{2m}}dx.
\end{equation}
Moreover, $(\frac{n+2m}{2})^{2}$ and $\beta (W; R)$ are the best constants.
\end{corollary}
\subsection{The non-radial case}
The decomposition of a function into its spherical harmonics will be one of our tools to prove the
corresponding result in the non-radial case. This idea has also been used in \cite{TZ}. Any
function $u \in C^{\infty}_{0}(\Omega)$ could be extended by zero outside $\Omega$, and could
therefore be considered as a function in $C^{\infty}_{0}(R^n)$. By decomposing $u$ into spherical
harmonics we get
\[
\hbox{$u=\Sigma^{\infty}_{k=0}u_{k}$
where $u_{k}=f_{k}(|x|)\varepsilonphi_{k}(x)$}
\]
and $(\varepsilonphi_k(x))_k$ are the orthonormal eigenfunctions of the Laplace-Beltrami operator
with corresponding eigenvalues $c_{k}=k(n+k-2)$, $k\geq 0$. The functions $f_{k}$ belong to
$C_{0}^{\infty}(\Omega)$ and satisfy $f_{k}(r)=O(r^k)$ and $f'(r)=O(r^{k-1})$ as $r \rightarrow
0$. In particular,
\begin{equation}\lambdabel{zero}
\hbox{ $\varepsilonphi_{0}=1$ and $f_{0}=\frac{1}{n \omega_{n}r^{n-1}}\int_{\partial B_{r}}u ds=
\frac{1}{n \omega_{n}}\int_{|x|=1}u(rx)ds.$}
\end{equation}
We also have for any $k\geq 0$, and any continuous real valued functions $v$ and $w$ on
$(0,\infty)$,
\begin{equation}
\int_{R^n}V(|x|)|\Delta u_{k}|^{2}dx=\int_{R^n}V(|x|)\big( \Delta
f_{k}(|x|)-c_{k}\frac{f_{k}(|x|)}{|x|^2}\big)^{2}dx,
\end{equation}
and
\begin{equation}
\int_{R^n}W(|x|)|\nabla u_{k}|^{2}dx=\int_{R^n}W(|x|)|\nabla
f_{k}|^{2}dx+c_{k}\int_{R^n}W(|x|)|x|^{-2}f^{2}_{k}dx.
\end{equation}
\begin{theorem} \lambdabel{main.hr} Let $V$ and $W$ be positive radial $C^1$-functions on a ball
$B\backslash \{0\}$, where $B$ is a ball with radius $R$ in $\R^n$ ($n \geq 1$) and centered at
zero. Assume $\int^{R}_{0}\frac{1}{r^{n-1}V(r)}dr=\infty$ and $\lim_{r \rightarrow
0}r^{\alphapha}V(r)=0$ for some $\alphapha<(n-2)$. If
\begin{equation}\lambdabel{main.con}
W(r)-\frac{2V(r)}{r^2}+\frac{2V_r(r)}{r}-V_{rr}(r)\geq 0 \ \ for \ \ 0\leq r \leq R,
\end{equation}
then the following statements are equivalent.
\begin{enumerate}
\item $(V, W)$ is a Bessel pair with $\beta (V, W; R)\geq 1$.
\item The following inequality holds for all $u \in
C^{\infty}_{0}(B)$,
\begin{equation*}\lambdabel{gen-hardy}
\hbox{ $({\rm HR}_{V,W})$ \quad \quad $\int_{B}V(x)|\Delta u |^{2}dx \geq \int_{B} W(x)|\nabla
u|^{2}dx+(n-1)\int_{B}(\frac{V(x)}{|x|^2}-\frac{V_r(|x|)}{|x|})|\nabla u|^2dx.$ \quad \quad \quad
\quad \quad \quad \quad }
\end{equation*}
\end{enumerate}
Moreover, if $\beta (V, W; R)\geq 1$, then the best constant is given by
\begin{equation}
\hbox{$\beta (V, W; R)=\sup\big\{c; \, \, ({\rm HR}_{V, cW})$ holds$\big\}$.}
\end{equation}
\end{theorem}
{\bf Proof:} That 2) implies 1) follows from Theorem \ref{mainrad.hr} and does not require
condition (\ref{main.con}).
To prove that 1) implies 2) assume that the equation $(B_{V,W})$ has a positive solution on
$(0,R]$. We prove that the inequality $(HR_{V,W})$ holds for all $u \in C^{\infty}_{0}(B)$ by
frequently using that
\begin{equation}\lambdabel{1-dim}
\hbox{$\int^{R}_{0}V(r)|x'(r)|^2r^{n-1}dr \geq \int^{R}_{0}W(r)x^{2}(r)r^{n-1}dr$ for all $x\in
C^1(0, R]$.}
\end{equation}
Indeed, for all $n\geq 1$ and $k\geq 0$ we have
\begin{eqnarray*}
\frac{1}{nw_n}\int_{R^n}V(x)|\Delta u_{k}|^{2}dx&=&\frac{1}{nw_n}\int_{R^n}V(x)\big( \Delta
f_{k}(|x|)-c_{k}\frac{f_{k}(|x|)}{|x|^2}\big)^{2}dx\\
&=&
\int^{R}_{0}V(r)\big(f_{k}''(r)+\frac{n-1}{r}f_{k}'(r)-c_{k}\frac{f_{k}(r)}{r^2}\big)^{2}r^{n-1}dr
\\
&=&\int^{R}_{0}V(r)(f_{k}''(r))^{2}r^{n-1}dr+(n-1)^{2} \int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}dr\\
&&+c^{2}_{k} \int^{R}_{0}V(r)f_{k}^{2}(r)r^{n-5}
+ 2(n-1) \int^{R}_{0}V(r)f_{k}''(r)f_{k}'(r)r^{n-2}\\
&&-2c_{k}
\int^{R}_{0}V(r)f_{k}''(r)f_{k}(r)r^{n-3}dr
- 2c_{k}(n-1) \int^{R}_{0}V(r)f_{k}'(r)f_{k}(r)r^{n-4}dr.
\end{eqnarray*}
Integrate by parts and use (\ref{zero}) for $k=0$ to get
\begin{eqnarray}
\frac{1}{n\omega_{n}}\int_{R^n}V(x)|\Delta u_{k}|^{2}dx&=&
\int^{R}_{0}V(r)(f_{k}''(r))^{2}r^{n-1}dr+(n-1+2c_{k}) \int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}dr
\lambdabel{piece.0}\\
&+&
(2c_{k}(n-4)+c^{2}_{k})\int^{R}_{0}V(r)r^{n-5}f_{k}^{2}(r)dr-(n-1)\int^{R}_{0}V_r(r)r^{n
-2}(f_{k}')^{2}(r)dr\nonumber\\
&-&c_{k}(n-5)\int^{R}_{0}V_r(r)f_{k}^2(r)r^{n-4}dr-c_{k}\int^{R}_{0}V_{rr}(r)f_{k}^2(r)r^{n-3}dr.
\nonumber
\end{eqnarray}
Now define $g_{k}(r)=\frac{f_{k}(r)}{r}$ and note that $g_{k}(r)=O(r^{k-1})$ for all $k\geq 1$. We
have
\begin{eqnarray*}
\int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}&=&\int^{R}_{0}V(r)(g_{k}'(r))^{2}r^{n-1}dr+\int^{R}_{0}2V(r
)g
_{k}(r)g_{k}'(r)r^{n-2}dr+\int^{R}_{0}V(r)g_{k}^{2}(r)r^{n-3}dr\\
&=&\int^{R}_{0}V(r)(g_{k}'(r))^{2}r^{n-1}dr-(n-3)\int^{R}_{0}V(r)g_{k}^{2}(r)r^{n-3}dr
-\int_{0}^{R}V_r(r)g^2_{k}(r)r^{n-2}dr\\
\end{eqnarray*}
Thus,
\begin{equation}\lambdabel{g1}
\int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}\geq
\int^{R}_{0}W(r)f_{k}^{2}(r)r^{n-3}dr-(n-3)\int^{R}_{0}V(r)f_{k}^{2}(r)r^{n-5}dr-\int_{0}^{R}V_r(r
)f^2_{k}(r)r^{n-4}dr.
\end{equation}
Substituting $2c_k\int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}$ in (\ref{piece.0}) by its lower estimate
in the last inequality (\ref{g1}), we get
\begin{eqnarray*}
\frac{1}{n\omega_{n}}\int_{R^n}V(x)|\Delta u_{k}|^{2}dx&\geq&
\int^{R}_{0}W(r)(f_{k}'(r))^{2}r^{n-1}dr+\int^{R}_{0}W(r)(f_{k}(r))^{2}r^{n-3}dr \\ &+&(n-1)
\int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}dr+c_{k}(n-1) \int^{R}_{0}V(r)(f_{k}(r))^{2}r^{n-5}dr\\
&-&(n-1)\int^{R}_{0}V_r(r)r^{n-2}(f_{k}')^{2}(r)dr-c_{k}(n-1)\int^{R}_{0}V_r(r
)r^{n
-4}(f_{k})^{2}(r)dr\\
&+&c_{k}(c_{k}-(n-1))\int^{R}_{0}V(r)r^{n-5}f_{k}^{2}(r)dr\\
&+&c_{k}\int^{R}_{0}(W(r)-\frac{2V(r)}{r^2}+\frac{2V_r(r)}{r}-V_{rr}(r))f^2_{k}(r)r^{n-3}dr
.\\
\end{eqnarray*}
The proof is now complete since the last term is non-negative by condition (\ref{main.con}). Note
also that because of this condition, the formula for the best constant requires that $\beta (V, W;
R) \geq 1$, since if $W$ satisfies (\ref{main.con}) then $cW$ satisfies it for any $c\geq 1$.
$\square$
\begin{remark}\rm
In order to apply the above theorem to the Bessel pair
\[
\hbox{$V(x)=|x|^{-2m}$ \quad and \quad $W_{m,c}(x)=
V(x)\big[(\frac{n-2m-2}{2})^2|x|^{-2}+cW(x)\big]$}
\]
where $W$ is a Bessel potential, we see that even in the simplest case $V\equiv 1$ and $W_{m,c}(x)=
(\frac{n-2}{2})^2|x|^{-2}+W(x)$, condition (\ref{main.con}) reduces to
$(\frac{n-2}{2})^2|x|^{-2}+W(x) \geq 2|x|^{-2}$, which is then guaranteed only if $n\geq 5$.
More generally, if $V(x)=|x|^{-2m}$, then in order to satisfy (\ref{main.con}) we need to have
\begin{equation}\lambdabel{restrict}
\frac{-(n+4)-2\sqrt{n^2-n+1}}{6} \leq m\leq \frac{-(n+4)+2\sqrt{n^2-n+1}}{6},
\end{equation}
and in this case, we have for $m<\frac{n-2}{2}$ and any Bessel potential $W$ on $B_{R}$, that
for all $u \in C^{\infty}_{0}(B_{R})$
\begin{equation}\lambdabel{gm-hr.0}
\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}\geq (\frac{n+2m}{2})^2\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx+\beta (W; R)\int_{B_{R}}W(x)\frac{|\nabla u|^2}{|x|^{2m}}dx.
\end{equation}
Moreover, $(\frac{n+2m}{2})^2$ and $\beta (W; R)$ are the best constant.
Therefore, inequality (\ref{gm-hr.0}) in the case where $m=0$ and $n\geq 5$, already includes Theorem 1.5 in \cite{TZ} as a special case. It also extends Theorem 1.8 in \cite{TZ} where it is established under
the condition
\begin{equation}
0\leq m\leq \frac{-(n+4)+2\sqrt{n^2-n+1}}{6}
\end{equation} which is more restrictive than (\ref{restrict}).
We shall see however that
this inequality remains true without condition (\ref{restrict}), but with a constant
that is sometimes different from $(\frac{n+2m}{2})^2$ in the cases where (\ref{restrict}) is not
valid. For example, if $m=0$, then the best constant is $3$ in dimension $4$ and $\frac{25}{36}$
in dimension $3$.
\end{remark}
We shall now give a few immediate applications of the above in the case where $m=0$ and $n\geq 5$.
Actually the results are true in lower dimensions, and will be stated as such, but the proofs for
$n<5$ will require additional work and will be postponed to the next section.
\begin{theorem} \lambdabel{m=0}Assume $W$ is a Bessel potential on $B_{R} \subset R^n$ with $n\geq 3$,
then for all $u \in C_{0}^{\infty}(B_{R})$ we have
\begin{equation}
\int_{B_{R}}|\Delta u|^2 dx\geq C(n)\int_{B_{R}}\frac{|\nabla u|^2}{|x|^2}dx+\beta(W;
R)\int_{B_{R}}W(x)|\nabla u|^2dx,
\end{equation}
where $C(3)=\frac{25}{36}$, $C(4)=3$ and $C(n)=\frac{n^2}{4}$ for all $n\geq 5$. Moreover,
$C(n)$ and $\beta (W; R)$ are the best constants.
In particular, the following holds for any smooth bounded domain $\Omega$ in $R^n$ with
$R=\sup_{x \in \Omega}|x|$, and any $u \in H^{2}(\Omega)\cap H^{1}_{0}(\Omega)$.
\begin{itemize}
\item For any $\alphapha<2$,
\begin{equation}
\int_{\Omega}|\Delta u|^2 dx\geq C(n)\int_{\Omega}\frac{|\nabla u|^2}{|x|^2}dx+\beta(|x|^\alphapha;
R)\int_{\Omega}\frac{|\nabla u|^2}{|x|^\alphapha}dx,
\end{equation}
and for $\alphapha=0$,
\begin{equation}\lambdabel{gm-hardy-Rellich}
\hbox{$\int_{\Omega}|\Delta u |^{2}dx \geq C(n) \int_{\Omega}\frac{|\nabla
u|^2}{|x|^{2}}dx+\frac{z^{2}_{0}}{R^2}\int_{\Omega}|\nabla u|^{2}dx$,}
\end{equation}
the constants being optimal when $\Omega$ is a ball.
\item For any $k\geq 1$, and $\rho=R( e^{e^{e^{.^{.^{e(k-times)}}}}} )$, we have
\begin{equation}
\int_{\Omega}|\Delta u(x) |^{2}dx \geq C(n)\int_{\Omega}\frac{|\nabla u|^2}{|x|^2}
dx+\frac{1}{4}\sum^{k}_{j=1}\int_{\Omega}\frac{|\nabla u |^2}{|x|^2}\big(
\prod^{j}_{i=1}log^{(i)}\frac{\rho}{|x|}\big)^{-2}dx,
\end{equation}
\item For $D\geq R$, and $X_{i}$ is defined as (\ref{X-def}) we have
\begin{equation}
\int_{\Omega}|\Delta u(x) |^{2}dx \geq C(n)\int_{\Omega}\frac{|\nabla u|^2}{|x|^2}
dx+\frac{1}{4}\sum^{\infty}_{i=1}\int_{\Omega}\frac{|\nabla
u|}{|x|^{2}}X^{2}_{1}(\frac{|x|}{D})X^{2}_{2
}(\frac{|x|}{D})...X^{2}_{i}(\frac{|x|}{D})dx,
\end{equation}
Moreover, all constants appearing in the above two
inequality are optimal.
\end{itemize}
\end{theorem}
\begin{theorem} Let $W(x)=W(|x|)$ be radial Bessel potential on a ball $B$ of radius $R$ in $R^n$
with $n\geq 4$, and such that $\frac{W_r(r)}{W(r)}=\frac{\lambdambda}{r}+f(r)$, where $f(r)\geq 0$ and
$\lim_{r \rightarrow 0}rf(r)=0$. If $\lambdambda <n-2$, then the following Hardy-Rellich inequality
holds:
\begin{equation}
\int_{B}|\Delta u|^{2}dx \geq
\frac{n^{2}(n-4)^2}{16}\int_{B}\frac{u^2}{|x|^4}dx+(\frac{n^2}{4}+\frac{(n-\lambdambda-2)^2}{4})
\beta (W; R)\int_{B}\frac{W(x)}{|x|^2}u^2 dx,
\end{equation}
\end{theorem}
{\bf Proof:} Use first Theorem \ref{m=0} with the Bessel potential $W$, then Theorem
\ref{main-CKN} with the Bessel pair\\
$(|x|^{-2}, |x|^{-2}(\frac{(n-4)^2}{4}|x|^{-2}+W)$, then Theorem \ref{super.hardy} with the Bessel
pair $(W, \frac{(n-\lambdambda-2)^2}{4})|x|^{-2}W)$ to obtain
\begin{eqnarray*}
\int_{B}|\Delta u|^2 dx&\geq&C(n)\int_{B}\frac{|\nabla u|^2}{|x|^2}dx+ \beta (W,
R)\int_{B}W(x)|\nabla u|^2 dx\\
&\geq& C(n)\frac{(n-4)^2}{4}\int_{B}\frac{u^2}{|x|^4}dx+C(n)\beta (W,
R)\int_{B}\frac{W(x)}{|x|^2}u^2+\beta (W, R)\int W(x)|\nabla u|^2 dx\\
&\geq&
C(n)\frac{ (n-4)^2}{4}\int_{B}\frac{u^2}{|x|^4}dx+(C(n)+\frac{(n-\lambdambda-2)^2}{4})
\beta (W, R)\int_{B}\frac{W(x)}{|x|^2}u^2 dx.
\end{eqnarray*}
Recall that $C(n)=\frac{n^2}{4}$ for $n\geq 5$, giving the claimed result in these dimensions.
This is however not the case when $n=4$, and therefore another proof will be given in the next
section to cover these cases.
The following is immediate from Theorem \ref{m=0} and from the fact that $\lambdambda=2$ for the
Bessel potential under consideration.
\begin{corollary} Let $\Omega$ be a smooth bounded domain in $\R^n$, $n \geq 4$ and $R=\sup_{x \in
\Omega}|x|$. Then the following holds for all $u \in H^{2}(\Omega) \cap H^{1}_{0}(\Omega)$
\begin{enumerate}
\item If $\rho=R( e^{e^{e^{.^{.^{e(k-times)}}}}} )$ and
$log^{(i)}(.)$ is defined as (\ref{log-def}), then
\begin{equation}
\int_{\Omega}|\Delta u(x) |^{2}dx \geq \frac{n^2(n-4)^2}{16}\int_{\Omega}\frac{u^2}{|x|^4}
dx+(1+\frac{n(n-4)}{8})\sum^{k}_{j=1}\int_{\Omega}\frac{u^2}{|x|^4}\big(
\prod^{j}_{i=1}log^{(i)}\frac{\rho}{|x|}\big)^{-2}dx.
\end{equation}
\item If $D\geq R$ and $X_{i}$ is defined as (\ref{X-def}), then
\begin{equation}
\int_{\Omega}|\Delta u(x) |^{2}dx \geq \frac{n^2(n-4)^2}{16}\int_{\Omega}\frac{u^2}{|x|^4}
dx+(1+\frac{n(n-4)}{8})\sum^{\infty}_{i=1}\int_{\Omega}\frac{u^2}{|x|^{4}}X^{2}_{1}(\frac{|x|}{D})
X^{2}_{2
}(\frac{|x|}{D})...X^{2}_{i}(\frac{|x|}{D})dx.
\end{equation}
\end{enumerate}
\end{corollary}
\begin{theorem} \lambdabel{n-dim} Let $W_1(x)$ and $W_2(x)$ be two radial Bessel potentials on a ball
$B$ of radius $R$ in $R^n$ with $n\geq 4$. If $a<1$, then there exists $c(a, R)>0$ such that for
all $u \in H^{2}(B)\cap H_{0}^{1}(B)$
\begin{eqnarray*}
\int_{B}|\Delta u |^{2}dx &\geq& \frac{n^2(n-4)^2}{16}
\int_{B}\frac{u^{2}}{|x|^{4}}dx+ \frac{n^2}{4}\beta (W_1; R)\int_{B}
W_1(x)\frac{u^{2}}{|x|^2}dx\\
&&+ c(\frac{n-2a-2}{2})^2 \int_{B}\frac{u^2}{|x|^{2a+2}}dx+c\beta (W_2;
R)\int_{B}W_2(x)\frac{u^2}{|x|^{2a}}dx,
\end{eqnarray*}
\end{theorem}
{\bf Proof:} Here again we shall give a proof when $n\geq 5$. The case $n=4$ will be handled in
the next section. We again first use Theorem \ref{m=0} (for $n\geq 5$) with the Bessel potential
$|x|^{-2a}$ where $a<1$, then Theorem \ref{main-CKN} with the Bessel pair $(|x|^{-2},
|x|^{-2}(\frac{(n-4)^2}{4}|x|^{-2}+W) )$, then again Theorem \ref{main-CKN} with the Bessel pair\\
$(|x|^{-2a}, |x|^{-2a}((\frac{n-2a-2}{2})^2|x|^{-2}+W)$
to obtain
\begin{eqnarray*}
\int_{B}|\Delta u|^2 dx&\geq& \frac{n^2}{4} \int_{B}\frac{|\nabla u|^2}{|x|^2}dx+ \beta
(|x|^{-2a}; R)\int_{B}\frac{|\nabla u|^2}{|x|^{-2a}} dx\\
&\geq& \frac{n^2(n-4)^2}{16}\int_{B}\frac{u^2}{|x|^4}dx+\frac{n^2}{4} \beta (W_1;
R)\int_{B}W_1(x)\frac{u^2}{|x|^2}dx+ \beta (|x|^{-2a}; R)\int_{B}\frac{|\nabla u|^2}{|x|^{-2a}}
dx\\&\geq&
\frac{n^2(n-4)^2}{16}\int_{B}\frac{u^2}{|x|^4}dx+\frac{n^2}{4} \beta (W_1;
R)\int_{B}W_1(x)\frac{u^2}{|x|^2}dx\\
&&+ \beta (|x|^{-2a}; R)(\frac{n-2a-2}{2})^2 \int_{B}\frac{u^2}{|x|^{2a+2}}dx
+\beta (|x|^{-2a}; R)\beta(W_2; R)\int_{B}W_2(x)\frac{u^2}{|x|^{2a}}dx.
\end{eqnarray*}
The following theorem will be established in full generality (i.e with $V(r)=r^{-m}$) in the next
section.
\begin{theorem} Let $W(x)=W(|x|)$ be a radial Bessel potential on a smooth bounded domain $\Omega$
in $\R^n$, $n \geq 4$. Then,
\begin{equation*}\lambdabel{HR1}
\hbox{$ \quad \quad \quad \int_{\Omega}|\Delta u(x) |^{2}dx -\frac{n^2(n-4)^2}{16}
\int_{\Omega}\frac{u^{2}}{|x|^{4}}dx-
\frac{n^2}{4}\int_{\Omega} W(x)u^{2}dx\geq \frac{z^{2}_{0}}{R^2}||u||^{2}_{W_{0}^{1,2}(\Omega)}
\quad u \in H^{2}_{0}(\Omega). \quad \quad $}
\end{equation*}
\end{theorem}
\subsection{The case of power potentials $|x|^m$}
The general Theorem \ref{main.hr} allowed us to deduce inequality (\ref{gm-hr}) below for a
restricted interval of powers $m$. We shall now prove that the same holds for all
$m<\frac{n-2}{2}$. The following theorem improves considerably Theorem 1.7, Theorem 1.8, and
Theorem 6.4 in \cite{TZ}.
\begin{theorem}\lambdabel{gm.hr}
Suppose $n\geq 1$ and $m<\frac{n-2}{2}$, and let $W$ be a Bessel potential on a ball $B_{R}\subset
R^n$ of radius $R$. Then for all $u \in C^{\infty}_{0}(B_{R})$
\begin{equation}\lambdabel{gm-hr}
\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}\geq a_{n,m}\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx+\beta(W; R)\int_{B_{R}}W(x)\frac{|\nabla u|^2}{|x|^{2m}}dx,
\end{equation}
where
\[
a_{n,m}=\inf \left\{\frac{\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}dx}{\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx};\, u\in C^{\infty}_{0}(B_{R})\setminus \{0\}\right\}.
\]
Moreover, $\beta(W; R)$ and $a_{m,n}$ are the best constants to be computed in the appendix.
\end{theorem}
{\bf Proof:} Assuming the inequality
\[\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}\geq a_{n,m}\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx,\]
holds for all $u \in C^{\infty}_{0}(B_{R})$, we shall prove that it can be improved by any Bessel
potential $W$. We will use the following inequality frequently in the proof which follows directly
from Theorem \ref{main-CKN} with n=1.
\begin{equation}\lambdabel{freq-in}
\int_{0}^{R}r^{\alphapha}(f'(r))^{2}dr\geq
(\frac{\alphapha-1}{2})^2\int_{0}^{R}r^{\alphapha-2}f^2(r)dr+\beta(W;R)\int_{0}^{R}r^{\alphapha}W(r)
f^2(r)dr, \ \ \alphapha \geq 1,
\end{equation}
for all $f \in C^{\infty}(0,R)$, where both $(\frac{\alphapha-1}{2})^2$ and $\beta(W;R)$ are best
constants.
Decompose $u \in C^{\infty}_{0}(B_{R})$ into its spherical harmonics $
\Sigma^{\infty}_{k=0}u_{k}$, where $u_{k}=f_{k}(|x|)\varepsilonphi_{k}(x)$. We evaluate
$I_k=\frac{1}{nw_n}\int_{R^n} \frac{|\Delta u_{k}|^2}{|x|^{2m}}dx$ in the following way
\begin{eqnarray*}
I_k&=&\int_{0}^{R}r^{n-2m-1}(f''_{k}(r))^2dr+[(n-1)(2m+1)+2c_{k}]\int^{R}_{0}r^{n-2m-3}(f_{k}')^2
dr\\
&&+c_{k}[c_{k}+(n-2m-4)(2m+2)]\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr\\
&\geq& \beta
(W)\int^{R}_{0}r^{n-2m-1}W(x)(f_{k}')^2dr+[(\frac{n+2m}{2})^2+2c_{k}]\int^{R}_{0}r^{n-2m-3}(f_{k}'
)^2dr\\
&&+c_{k}[c_{k}+(n-2m-4)(2m+2)]\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr\\
&\geq& \beta (W)\int^{R}_{0}r^{n-2m-1}W(x)(f_{k}')^2dr+a_{n,m}\int^{R}_{0}r^{n-2m-3}(f_{k}')^2dr\\
&&+\beta (W)[(\frac{n+2m}{2})^2+2c_{k}-a_{n,m}]\int^{R}_{0}r^{n-2m-3}W(x)(f_{k})^2dr\\
&&+\big((\frac{n-2m-4}{2})^2[(\frac{n+2m}{2})^2+2c_{k}-a_{n,m}]+c_{k}[c_{k}+(n-2m-4)(2m+2)]\big)
\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr.\\
\end{eqnarray*}
Now by (\ref{a-nm-k}) we have
\[\big((\frac{n-2m-4}{2})^2[(\frac{n+2m}{2})^2+2c_{k}-a_{n,m}]+c_{k}[c_{k}+(n-2m-4)(2m+2)]\geq
c_{k}a_{n,m},\]
for all $k\geq 0$. Hence, we have
\begin{eqnarray*}
I_{k}&\geq&a_{n,m}\int^{R}_{0}r^{n-2m-3}(f_{k}')^2dr+a_{n,m}c_{k}\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^
2dr\\
&&+\beta (W)\int^{R}_{0}r^{n-2m-1}W(x)(f_{k}')^2dr+\beta
(W)[(\frac{n+2m}{2})^2+2c_{k}-a_{n,m}]\int^{R}_{0}r
^{n-2m-3}W(x)(f_{k})^2dr\\
&\geq& a_{n,m}\int^{R}_{0}r^{n-2m-3}(f_{k}')^2dr+a_{n,m}c_{k}
\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr\\
&&+ \beta (W)\int^{R}_{0}r^{n-2m-1}W(x)(f_{k}')^2dr+
\beta (W)c_{k}\int^{R}_{0}r^{n-2m-3}W(x)(f_{k})^2dr\\
&=&a_{n,m}\int_{B_{R}}\frac{|\nabla u|^2}{|x|^{2m+2}}dx+\beta (W)\int_{B_{R}}W(x)\frac{|\nabla
u|^2}{|x|^{2m}}dx.
\end{eqnarray*}
Moreover, it is easy to see from Theorem \ref{main} and the above calculation that $\beta (W; R)$
is the best constant.
\begin{theorem}\lambdabel{super.hardy-rellich}
Let $\Omega$ be a smooth domain in $R^{n}$ with $n\geq 1$ and let $V \in C^{2}(0,R=:\sup_{x \in
\Omega}|x|)$ be a non-negative function that satisfies the following conditions:
\begin{equation}
\hbox{$V_r(r)\leq 0$\quad and \quad
$\int^{R}_{0}\frac{1}{r^{n-3}V(r)}dr=-\int^{R}_{0}\frac{1}{r^{n-4}V_r(r)}dr=+\infty$.}
\end{equation}
There exists $\lambdambda_{1}, \lambdambda_{2} \in R$ such that
\begin{equation}
\hbox{$\frac{rV_r(r)}{V(r)}+\lambdambda_{1} \geq 0$ on $(0, R)$ and $\lim\limits_{r\to
0}\frac{rV_r(r)}{V(r)}+\lambdambda_{1} =0$,}
\end{equation}
\begin{equation}
\hbox{$\frac{rV_{rr}(r)}{V_r(r)}+\lambdambda_{2} \geq 0$ on $(0, R)$ and $\lim\limits_{r\to
0}\frac{rV_{rr}(r)}{V_r(r)}+\lambdambda_{2} =0$,}
\end{equation}
and
\begin{equation}\lambdabel{super.hr.con}
\hbox{$\left(\frac{1}{2}(n-\lambdambda_{1}-2)^2+3(n-3)\right)V(r)-(n-5)rV_r(r)-r^2V_{rr}(r)\geq 0$ for
all $r \in (0,R)$. }
\end{equation}
Then the following inequality holds:
\begin{eqnarray}\lambdabel{super.hr}
\int_{\Omega}V(|x|)|\Delta u|^2 dx&\geq&
(\frac{(n-\lambdambda_{1}-2)^{2}}{4}+(n-1))\frac{(n-\lambdambda_{1}-4)^{2}}{4}\int_{\Omega}\frac{V(|x|)}{|
x|^4}u^2 dx \nonumber\\
&&-\frac{(n-1)(n-\lambdambda_{2}-2)^{2}}{4}\int_{\Omega}\frac{V_r(|x|)}{|x|^3}u^2 dx.
\end{eqnarray}
\end{theorem}
{\bf Proof:} We have by Theorem \ref{super.hardy} and condition (\ref{super.hr.con}),
{\small
\begin{eqnarray*}
\frac{1}{n\omega_{n}}\int_{R^n}V(x)|\Delta u_{k}|^{2}dx&=&
\int^{R}_{0}V(r)(f_{k}''(r))^{2}r^{n-1}dr+(n-1+2c_{k}) \int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}dr
\lambdabel{piece}\\
&+&
(2c_{k}(n-4)+c^{2}_{k})\int^{R}_{0}V(r)r^{n-5}f_{k}^{2}(r)dr-(n-1)\int^{R}_{0}V_r(r)r^{n
-2}(f_{k}')^{2}(r)dr\\
&-&c_{k}(n-5)\int^{R}_{0}V_r(r)f_{k}^2(r)r^{n-4}dr-c_{k}\int^{R}_{0}V_{rr}(r)f_{k}^2(r)r^{n-3}dr\\
&\geq&\int^{R}_{0}V(r)(f_{k}''(r))^{2}r^{n-1}dr+(n-1) \int^{R}_{0}V(r)(f_{k}'(r))^{2}r^{n-3}dr
\lambdabel{piece}\\
&-&
(n-1)\int^{R}_{0}V_r(r)r^{n-2}(f_{k}')^{2}(r)dr\\
&+&c_{k}\int_{0}^{R}\left(\left(\frac{1}{2}(n-\lambdambda_{1}-2)^2+3(n-3)\right)V(r)-(n-5)rV_r(r)-r^2V
_{rr}(r) \right)f_{k}^2(r)r^{n-5}dr
\end{eqnarray*}
}
The rest of the proof follows
from the above inequality combined with Theorem \ref{super.hardy}.
$\Box$
\begin{remark}\rm
Let $V(r)=r^{-2m}$ with $m\leq \frac{n-4}{2}$. Then in order to satisfy condition
(\ref{super.hr.con}) we must have $-1-\frac{\sqrt{1+(n-1)^2}}{2}\leq m\leq \frac{n-4}{2}$. Under
this assumption the inequality (\ref{super.hr}) gives the following weighted second order Rellich
inequality:
\[\int_{B}\frac{|\Delta u|^2}{|x|^{2m}}dx\geq
(\frac{(n+2m)(n-4-2m)}{4})^2\int_{B}\frac{u^2}{|x|^{2m+4}}dx.\]
In the following theorem we will show that the constant appearing in the above inequality is
optimal. Moreover, we will see that if $m<-1-\frac{\sqrt{1+(n-1)^2}}{2}$, then the best constant
is strictly less than $(\frac{(n+2m)(n-4-2m)}{4})^2$. This shows that inequality (\ref{super.hr})
is actually sharp.
\end{remark}
\begin{theorem}
Let $m\leq \frac{n-4}{2}$ and define
\begin{equation}
\beta_{n,m}=\inf_{u \in C^{\infty}_{0}(B)\backslash \{0\}}\frac{\int_{B}\frac{|\Delta
u|^2}{|x|^{2m}}dx}{\int_{B}\frac{u^2}{|x|^{2m+4}}dx}.
\end{equation}
Then
\[
\beta_{n,m}=(\frac{(n+2m)(n-4-2m)}{4})^2+\min_{k=0,1,2,...}\{
k(n+k-2)[k(n+k-2)+\frac{(n+2m)(n-2m-4)}{2}]\}.\]
Consequently the values of $\beta_{n,m}$ are as follows.
\begin{enumerate}
\item If $-1-\frac{\sqrt{1+(n-1)^2}}{2}\leq m\leq \frac{n-4}{2}$, then
\[\beta_{n,m}=(\frac{(n+2m)(n-4-2m)}{4})^2.\]
\item If $\frac{n}{2}-3 \leq m\leq -1-\frac{\sqrt{1+(n-1)^2}}{2}$, then
\[\beta_{n,m}=(\frac{(n+2m)(n-4-2m)}{4})^2+(n-1)[(n-1)+\frac{(n+2m)(n-2m-4)}{2}].\]
\item If $k:=\frac{n-2m-4}{2} \in N$, then
\[\beta_{n,m}=(\frac{(n+2m)(n-4-2m)}{4})^2+k(n+k-2)[k(n+k-2)+\frac{(n+2m)(n-2m-4)}{2}].\]
\item If $k<\frac{n-2m-4}{2}<k+1$ for some $k \in N$, then
\begin{eqnarray*}
\beta_{n,m}=\frac{(n+2m)^2(n-2m-4)^2}{16}+a(m, n, k)
\end{eqnarray*}
\end{enumerate}
where
\footnotesize
\[
a(m,n,k)=\min\left\{k(n+k-2)[k(n+k-2)+\frac{(n+2m)(n-2m-4)}{2}],
(k+1)(n+k-1)[(k+1)(n+k-1)+\frac{(n+2m)(n-2m-4)}{2}]\right\}.
\]
\end{theorem}
{\bf Proof:} Decompose $u \in C^{\infty}_{0}(B_{R})$ into spherical harmonics
$\Sigma^{\infty}_{k=0}u_{k}$, where $u_{k}=f_{k}(|x|)\varepsilonphi_{k}(x)$. we have
\begin{eqnarray*}
\frac{1}{n\omega_{n}}\int_{R^n} \frac{|\Delta
u_{k}|^2}{|x|^{2m}}dx&=&\int_{0}^{R}r^{n-2m-1}(f''_{k}(r))^2dr+[(n-1)(2m+1)+2c_{k}]\int^{R}_{0}r^{
n-2m-3}(f_{k}')^2dr\\
&+&c_{k}[c_{k}+(n-2m-4)(2m+2)]\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr\\
&\geq&\big((\frac{(n+2m)(n-4-2m)}{4})^2\\
&+&c_{k}[c_{k}+\frac{(n+2m)(n-2m-4)}{2}]\big)\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr,
\end{eqnarray*}
by Hardy inequality. Hence,
\[\beta_{n,m}\geq B(n,m,k):= (\frac{(n+2m)(n-4-2m)}{4})^2
+ \min_{k=0,1,2,...}\{k(n+k-2)[k(n+k-2)+\frac{(n+2m)(n-2m-4)}{2}]\}.\]
To prove that $\beta_{n,m}$ is the best constant, let $k$ be such that
\begin{eqnarray}
\beta_{n,m}=\frac{(n+2m)(n-4-2m)}{4})^2
+k(n+k-2)[k(n+k-2)+\frac{(n+2m)(n-2m-4)}{2}].
\end{eqnarray}
Set
\[u=|x|^{-\frac{n-4}{2}+m+\epsilon}\varepsilonphi_{k}(x)\varepsilonphi(|x|),\]
where $\varepsilonphi_{k}(x)$ is an eigenfunction corresponding to the eigenvalue $c_{k}$ and
$\varepsilonphi(r)$ is a smooth cutoff function, such that $0 \leq \varepsilonphi \leq 1$, with $\varepsilonphi\equiv
1$ in $[0,\frac{1}{2}]$. We have
\[\frac{\int_{B_R}\frac{|\Delta u|^2}{|x|^{2m}}dx}{\int_{B_{R}}\frac{
u^2}{|x|^{2m+4}}dx}=(-\frac{(n+2m)(n-4-2m)}{4}-c_{k}+\epsilon(2+2m+\epsilon))^2+O(1).\]
Let now $\epsilon \rightarrow 0$ to obtain the result. Thus the inequality
\[\int_{B_R}\frac{|\Delta u|^2}{|x|^{2m}}\geq \beta_{n,m}\int_{B_R}\frac{
u^2}{|x|^{2m+4}}dx,\]
holds for all $u \in C^{\infty}_{0}(B_{R})$.
To calculate explicit values of $\beta_{n,m}$ we need to find the minimum point of the function
\[f(x)=x(x+\frac{(n+2m)(n-2m-4)}{2}), \ \ x\geq 0.\]
Observe that
\[f'(-\frac{(n+2m)(n-2m-4)}{4})=0.\]
To find minimizer $k \in N$ we should solve the equation
\[k^2+(n-2)k+\frac{(n+2m)(n-2m-4)}{4}=0.\]
The roots of the above equation are $x_{1}=\frac{n+2m}{2}$ and $x_{2}=\frac{n-2m-4}{2}$. 1)
follows from Theorem \ref{super.hardy-rellich}. It is easy to see that if $m\leq
-1-\frac{\sqrt{1+(n-1)^2}}{2}$, then $x_{1}<0$. Hence, for $m\leq -1-\frac{\sqrt{1+(n-1)^2}}{2}$
the minimum of the function $f$ is attained in $x_{2}$. Note that if $m\leq
-1-\frac{\sqrt{1+(n-1)^2}}{2}$, then $B(n,m1)\leq B(n,m,0)$. Therefore claims 2), 3), and 4)
follow.
$\Box$\\
The following theorem extends Theorem 1.6 of \cite{TZ} in many ways. First, we do not assume that
$n\geq 5$ or $m\geq 0$, as was assumed there. Moreover, inequality (\ref{ex-gen-hr}) below
includes inequalities (1.17) and (1.22) of \cite{TZ} as special cases.
\begin{theorem}\lambdabel{gen.hr} Let $m\leq \frac{n-4}{2}$ and let $W(x)$ be a Bessel potential on a
ball $B$ of radius $R$ in $R^n$ with radius $R$. Assume
$\frac{W(r)}{W_r(r)}=-\frac{\lambdambda}{r}+f(r)$, where $f(r)\geq 0$ and $\lim_{r \rightarrow
0}rf(r)=0$. Then the following inequality holds for all $u \in C^{\infty}_{0}(B)$
\begin{eqnarray}\lambdabel{ex-gen-hr}
\int_{B}\frac{|\Delta u|^{2}}{|x|^{2m}}dx &\geq& \beta_{n,m}\int_{B}\frac{u^2}{|x|^{2m+4}}dx
\nonumber\\
&&\quad+\beta (W; R)(\frac{(n+2m)^2}{4}+\frac{(n-2
m-\lambdambda-2)^2}{4})
\int_{B}\frac{W(x)}{|x|^{2m+2}}u^2 dx.
\end{eqnarray}
\end{theorem}
{\bf Proof:} Again we will frequently use inequality (\ref{freq-in}) in the proof. Decomposing $u
\in C^{\infty}_{0}(B_{R})$ into spherical harmonics
$\Sigma^{\infty}_{k=0}u_{k}$, where $u_{k}=f_{k}(|x|)\varepsilonphi_{k}(x)$, we can write
\begin{eqnarray*}
\frac{1}{n\omega_{n}}\int_{R^n} \frac{|\Delta
u_{k}|^2}{|x|^{2m}}dx&=&\int_{0}^{R}r^{n-2m-1}(f''_{k}(r))^2dr+[(n-1)(2m+1)+2c_{k}]\int^{R}_{0}r^{
n-2m-3}(f_{k}')^2dr\\
&&+c_{k}[c_{k}+(n-2m-4)(2m+2)]\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr\\
&\geq&(\frac{n+2m}{2})^2\int^{R}_{0}r^{n-2m-3}(f_{k}')^2dr+ \beta (W;
R)\int^{R}_{0}r^{n-2m-1}W(x)(f_{k}')^2dr\\
&&+c_{k}[c_{k}+2(\frac{n-\lambdambda-4}{2})^2+(n-2m-4)(2m+2)]\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr,
\end{eqnarray*}
where we have used the fact that $c_k\geq 0$ to get the above inequality. We have
\begin{eqnarray*}
\frac{1}{n\omega_{n}}\int_{R^n} \frac{|\Delta u_{k}|^2}{|x|^{2m}}dx
&\geq& \beta_{n,m} \int^{R}_{0}r^{n-2m-5}(f_{k})^2dr\\
&&+\beta (W; R)\frac{(n+2m)^{2}}{4}
\int^{R}_{0}r^{n-2m-3}W(x)(f_{k})^2dr\\
&& +\beta (W; R)\int^{R}_{0}r^{n-2m-1}W(x)(f_{k}')^2dr\\
&\geq&\beta_{n,m}\int^{R}_{0}r^{n-2m-5}(f_{k})^2dr\\
&&+\beta (W; R)(\frac{(n+2m)^2}{4}+\frac{(n-2
m-\lambdambda-2)^2}{4})
\int^{R}_{0}r^{n-2m-3}W(x)(f_{k})^2dr \\
&\geq& \frac{\beta_{n,m}}{n\omega_{n}}\int_{B}\frac{u_{k}^2}{|x|^{2m+4}}dx\\
&&+\frac{\beta (W; R)}{n\omega_{n}}(\frac{(n+2m)^2}{4}+\frac{(n-2
m-\lambdambda-2)^2}{4})
\int_{B}\frac{W(x)}{|x|^{2m+2}}u_{k}^2 dx,
\end{eqnarray*}
by Theorem \ref{super.hardy}. Hence, (\ref{ex-gen-hr}) holds and the proof is complete.
$\Box$
\begin{theorem}\lambdabel{hrs-in} Assume $-1<m\leq \frac{n-4}{2}$ and let $W(x)$ be a Bessel potential
on a ball $B$ of radius $R$ and centered at zero in $R^n$ ($n\geq 1$). Then there exists $C>0$
such that the following holds for all $u \in C^{\infty}_{0}(B)$:
\begin{eqnarray}\lambdabel{hrs}
\int_{B}\frac{|\Delta u|^{2}}{|x|^{2m}}dx &\geq&
\frac{(n+2m)^{2}(n-2m-4)^2}{16}\int_{B}\frac{u^2}{|x|^{2m+4}}dx \\
&&+\beta (W; R)\frac{(n+2m)^2}{4}
\int_{B}\frac{W(x)}{|x|^{2m+2}}u^2 dx+ \beta(|x|^{2m}; R)||u||_{H^1_0}.
\end{eqnarray}
\end{theorem}
{\bf Proof:} Decomposing again $u \in C^{\infty}_{0}(B_{R})$ into its spherical harmonics
$\Sigma^{\infty}_{k=0}u_{k}$ where $u_{k}=f_{k}(|x|)\varepsilonphi_{k}(x)$, we calculate
\begin{eqnarray*}
\frac{1}{n\omega_{n}}\int_{R^n} \frac{|\Delta
u_{k}|^2}{|x|^{2m}}dx&=&\int_{0}^{R}r^{n-2m-1}(f''_{k}(r))^2dr+[(n-1)(2m+1)+2c_{k}]\int^{R}_{0}r^{
n-2m-3}(f_{k}')^2dr\\
&+&c_{k}[c_{k}+(n-2m-4)(2m+2)]\int_{0}^{R}r^{n-2m-5}(f_{k}(r))^2dr\\
&\geq&(\frac{n+2m}{2})^2\int^{R}_{0}r^{n-2m-3}(f_{k}')^2dr+
\beta(|x|^{2m}; R)\int^{R}_{0}r^{n-1}(f_{k}')^2dr\\
&+&c_{k}\int^{R}_{0}r^{n-2m-3}(f_{k}')^2dr\\
&\geq& \frac{(n+2m)^{2}(n-2m-4)^2}{16}\int^{R}_{0}r^{n-2m-5}(f_{k})^2dr\\
&&+\beta
(W; R)\frac{(n+2m)^2}{4}
\int^{R}_{0}W(r)r^{n-2m-3}(f_{k})^2dr\\
&+&\beta(|x|^{2m}; R)\int^{R}_{0}r^{n-1}(f_{k}')^2dr+c_{k}\beta(|x|^{2m}; R)\int^{R}_{0}r^{n-3}(f_{k})^2
dr\\
&=&\frac{(n+2m)^{2}(n-2m-4)^2}{16n\omega_{n}}\int_{R^n}\frac{u_{k}^2}{|x|^{2m+4}}dx\\
&+&\frac{\beta (W; R)}{n\omega_{n}}(\frac{(n+2m)^2}{4})
\int_{R^n}\frac{W(x)}{|x|^{2m+2}}u_{k}^2 dx+ \beta(|x|^{2m}; R)||u_{k}||_{W_{0}^{1,2}}.
\end{eqnarray*}
Hence (\ref{hrs}) holds.
$\Box$
We note that even for $m=0$ and $n\geq 4$, Theorem \ref{hrs-in} improves considerably Theorem
A.2. in \cite{AGS}.
\section{Higher order Rellich inequalities}
In this section we will repeat the results obtained in the previous section to derive higher order
Rellich inequalities with corresponding improvements. Let $W$ be a Bessel potential, $\beta_{n,m}$
be defined as in Theorem \ref{gen.hr} and
\[\sigma_{n,m}=\beta (W; R)(\frac{(n+2m)^2}{4}+\frac{(n-2
m-\lambdambda-2)^2}{4}).\]
For the sake of convenience we make the following convention: $\prod\limits_{i=1}^{0}a_{i}=1.$
\begin{theorem}\lambdabel{h.o.rellich1}
Let $B_{R}$ be a ball of radius $R$ and $W$ be a Bessel potential on $B_{R}$ such that
$\frac{W(r)}{W_r(r)}=-\frac{\lambdambda}{r}+f(r)$, where $f(r)\geq 0$ and $\lim_{r \rightarrow
0}rf(r)=0$. Assume $m \in N$, $1\leq l\leq m$, and $2k+4m\leq n$. Then the following inequality
holds for all $u \in C^{\infty}_{0}(B_R)$
\begin{equation}
\int_{B_{R}}\frac{|\Delta^{m}u|^2}{|x|^{2k}}dx \geq
\prod\limits_{i=0}^{l-1}\beta_{n,k+2i}\int_{B_{R}}\frac{|\Delta^{m-l}u|^2}{|x|^{2k+4l}}dx+
\sum\limits_{i=0}^{l-1
}\sigma_{n,k+2i}\prod\limits_{j=1}^{l-1}\beta_{n,k+2j-2}\int_{B_{R}}\frac{W(x)|\Delta^{m-i-1}u|^2}
{|x
|^{2k+4i+2}}dx
\end{equation}
\end{theorem}
{\bf Proof:} Follows directly from theorem \ref{gen.hr}.
$\Box$
\begin{theorem}\lambdabel{h.o.rellich2}
Let $B_{R}$ be a ball of radius $R$ and $W$ be a Bessel potential on $B_{R}$ such that
$\frac{W(r)}{W_r(r)}=-\frac{\lambdambda}{r}+f(r)$, where $f(r)\geq 0$ and $\lim_{r \rightarrow
0}rf(r)=0$. Assume $m \in N$, $1\leq l\leq m$, and $2k+4m+2\leq n$. Then the following inequality
holds for all $u \in C^{\infty}_{0}(B_R)$
\begin{eqnarray}
\int_{B_{R}}\frac{|\nabla \Delta^{m}u|^2}{|x|^{2k}}dx &\geq& (\frac{n-2k-2}{2})^2
\prod\limits_{i=0}^{l-1}\beta_{n,k+2i+1}\int_{B_{R}}\frac{|\Delta^{m-l}u|^2}{|x|^{2k+4l+2}}dx
\nonumber\\
&+&(\frac{n-2k-2}{2})^2
\sum\limits_{i=0}^{l-1}\sigma_{n,k+2i+1}\prod\limits_{j=1}^{l-1}\beta_{n,k+2j-1}\int_{B_{R}}
\frac{W
(x)
|\Delta^{m-i-1}u|^2}{|x|^{2k+4i+4}}dx\nonumber \\
&+&\beta(W; R)\int_{B_{R}}W(x)\frac{|\Delta^{m}u|^2}{|x|^{2k}}dx
\end{eqnarray}
\end{theorem}
{\bf Proof:} Follows directly from Theorem \ref{main-CKN} and the previous theorem.
$\Box$
\begin{remark} For $k=0$ Theorems \ref{h.o.rellich1} and \ref{h.o.rellich2} include Theorem 1.9 in
\cite{TZ} as a special case.
\end{remark}
\begin{theorem}\lambdabel{h.o.rellich3}
Let $B_{R}$ be a ball of radius $R$ and $W$ be a Bessel potential on $B_{R}$ such that
$\frac{W(r)}{W_r(r)}=-\frac{\lambdambda}{r}+f(r)$, where $f(r)\geq 0$ and $\lim_{r \rightarrow
0}rf(r)=0$. Assume $m \in N$, $1\leq l\leq m-1$, and $2k+4m\leq n$. Then the following inequality
holds for all $u \in C^{\infty}_{0}(B_R)$
\begin{eqnarray}
\int_{B_{R}}\frac{|\Delta^{m}u|^2}{|x|^{2k}}dx &\geq& a_{n,k}(\frac{n-2k-4}{2})^2
\prod\limits_{i=0}^{l-1}\beta_{n,k+2i+2}\int_{B_{R}}\frac{|\Delta^{m-l-1}u|^2}{|x|^{2k+4l+4}}dx
\nonumber\\
&+&a_{n,k}(\frac{n-2k-4}{2})^2
\sum\limits_{i=0}^{l-1}\sigma_{n,k+2i+2}\prod\limits_{j=1}^{l-1}\beta_{n,k+2j}\int_{B_{R}}\frac{W(
x)|\Delta^{m-i-2}u|^2}{|x|^{2k+4i+6}}dx\nonumber \\
&+&\beta(W; R)a_{n,k}\int_{B_{R}}W(x)\frac{|\Delta^{m-1}u|^2}{|x|^{2k+2}}dx+\beta(W;
R)\int_{B_{R}}W(x
)
\frac{|\nabla \Delta^{m-1}u|^2}{|x|^{2k}}dx
\end{eqnarray}
where $a_{n,m}$ is defined in Theorem \ref{gm.hr}.
\end{theorem}
{\bf Proof:} Follows directly from Theorem \ref{gm.hr} and the previous theorem.
$\Box$\\
The following improves Theorem 1.10 in \cite{TZ} in many ways, since it is assumed there that
$l\leq \frac{-n+8+2\sqrt{n^2-n+1}}{12}$ and $4m<n$. Even for $k=0$, Theorem \ref{h.o.rellich4}
below shows that we can drop the first condition and replace the second one by $4m\leq n$.
\begin{theorem}\lambdabel {h.o.rellich4}
Let $B_{R}$ be a ball of radius $R$ and $W$ be a Bessel potential on $B_{R}$ such that . Assume $m
\in N$, $1\leq l\leq m$, and $2k+4m\leq n$. Then the following inequality holds for all $u \in
C^{\infty}_{0}(B_R)$
\begin{eqnarray}\lambdabel{nuts}
\int_{B_{R}}\frac{|\Delta^{m}u|^2}{|x|^{2k}}dx &\geq&
\prod\limits_{i=1}^{l}\frac{a_{_{n,k+2i-2}}(n-2k-4i)^2}{4}\int_{B_{R}}\frac{|\Delta^{m-l}u|^2}{|x|
^{2
k+4l}}dx\\
&+&\beta(W;R)\sum\limits_{i=1}^{l}\prod\limits_{j=1}^{l-1}\frac{a_{_{n,k+2j-2}}(n-2k-4j)^2}{4}\int
_{B_{R
}}W(x)\frac{|\nabla \Delta^{m-i}u|^2}{|x|^{2k+4i-4}}dx\nonumber\\
&+&\beta(W;R)\sum\limits_{i=1}^{l}a_{_{n,k+2i-2}}\prod\limits_{j=1}^{l-1}\frac{a_{_{n,k+2j-2}}(n-2
k-4j)^2}{4
}\int_{B_{R}}W(x)\frac{|\Delta^{m-i}u|^2}{|x|^{2k+4i-2}}dx,\nonumber
\end{eqnarray}
where $a_{n,m}$ are the best constants in inequality (\ref{gm-hr}).
\end{theorem}
{\bf Proof:} Follows directly from Theorem \ref{gm.hr}.
$\Box$
\section{Appendix (A): The class of Bessel potentials}
The Bessel equation associated to a potential $W$
\begin{equation*}\lambdabel{Bessel}
\hbox{$(B_W)$ \hskip 150pt $y''+\frac{1}{r}y'+W(r)y=0$\hskip 150pt}
\end{equation*}
is central to all results revolving around the inequalities of Hardy and Hardy-Rellich type. We
summarize in
this appendix the various properties of these equations that were used throughout this paper.
\begin{definition} We say that a non-negative real valued $C^1$-function is a {\it Bessel
potential on $(0, R)$} if there exists $c>0$ such that the equation $(B_{cW})$ has a positive
solution on $(0, R)$.
The class of Bessel potentials on $(0, R)$ will be denoted by ${\cal B}(0, R)$.
\end{definition}
Note that the change of variable $z(s)=y(e^{-s})$ maps the equation $y''+\frac{1}{r}y'+W(r)y=0$
into
\begin{equation}
\hbox{$(B'_W)$ \hskip 150pt $z''+e^{-2s}W(e^{-s})z(s)=0.$\hskip 150pt}
\end{equation}
On the other hand, the change of variables $\psi
(t)=\frac{-e^{-t}y'(e^{-t})}{y(e^{-t})}$ maps it into the nonlinear equation
\begin{equation}
\hbox{$(B''_W)$ \hskip 150pt $\psi'(t)+\psi^{2}(t)+e^{-2t}W(e^{-t})=0$. \hskip 150pt}
\end{equation}
This will allow us to relate the existence of positive solutions of $(B_W)$ to the non-oscillatory
behaviour of equations $(B'_W)$ and $(B''_W)$.
The theory of sub/supersolutions --applied to $(B''_W)$ (See Wintner \cite{win1, win2, Har})--
already yields, that if $(B_W)$ has a positive solution on an interval $(0, R)$ for some
non-negative potential $W\geq 0$, then for any $W$ such that $0\leq V \leq W$, the equation
$(B_V)$ has also a positive solution on $(0, R)$.
This leads to the definition of the {\it weight} of a potential $W\in {\cal B}(0, R)$ as:
\begin{equation}
\hbox{$\beta (W; R)=\sup\{c>0; \, (B_{cW})$ has a positive solution on $(0, R)$\}.}
\end{equation}
The following is now straightforward.
\begin{proposition} 1)\, The class ${\cal B}(0, R)$ is a closed convex and solid subset of
$C^1(0, R)$.
2)\, For every $W\in {\cal B}(0, R)$, the equation
\begin{equation*}\lambdabel{Bessel}
\hbox{$(B_W)$ \hskip 150pt $y''+\frac{1}{r}y'+\beta(W; R)W(r)y=0$\hskip 150pt}
\end{equation*}
has a positive solution on $(0, R)$.
\end{proposition}
The following gives an integral criteria for Bessel potentials.
\begin{proposition}\lambdabel{integral}
Let $W$ be a positive locally integrable function on $\R$.
\begin{enumerate}
\item If
$\liminf\limits_{r\rightarrow 0} \ln(r)\int^{r}_{0} sW(s)ds>-\infty$,
then for every $R>0$, there exists $\alphapha:=\alphapha(R)>0$ such that the scaled function
$W_\alphapha(x):=\alphapha^2W(\alphapha x)$ is a Bessel potential on $(0, R)$.
\item If
$\hbox{$\lim\limits_{r\rightarrow 0} \ln(r)\int^{r}_{0} sW(s)ds=-\infty$, } $
then there are no $\alphapha, c>0$, for which
$W_{\alphapha,c}=cW(\alphapha |x|)$ is a Bessel potential on $(0, R)$.
\end{enumerate}
\end{proposition}
{\bf Proof:} This relies on well known results concerning the existence of
non-oscillatory solutions (i.e., those $z(s)$ such that $z(s)>0$ for $s>0$ sufficiently large) for
the second order linear differential equations
\begin{equation}\lambdabel{OSI_ODE}
z''(s)+a(s)z(s)=0,
\end{equation}
where $a$ is a locally integrable function on $\R$. For these equations, the following integral
criteria are available. We refer to \cite{Har, Hua, win1,win2, won}) among others for proofs and
related results.
\begin{description}
\item i)\, If $\hbox{$\limsup_{t\rightarrow \infty } t\int^{\infty}_{t}a(s)ds<\frac{1}{4}$, } $
then Eq. (\ref{OSI_ODE}) is non-oscillatory.
\item ii)\, If
$\hbox{$\liminf_{t\rightarrow \infty } t\int^{\infty}_{t}a(s)ds>\frac{1}{4}$,}$
then Eq. (\ref{OSI_ODE}) is oscillatory.
\end{description}
It follows that if $\liminf\limits_{r\rightarrow 0} \ln(r)\int^{r}_{0} sW(s)ds>-\infty$ holds,
then there exists $\deltalta>0$ such that $(B_W)$ has a positive solution on $(0, \deltalta)$. An easy
scaling argument then shows that there exists $\alphapha>0$ such that $W_\alphapha(x):=\alphapha^2W(\alphapha
x)$ is a Bessel potential on $(0, R)$. The rest of the proof is similar.
$\square$\\
We now exhibit a few explicit Bessel potentials and compute their weights. We use the following
notation.
\begin{equation}\lambdabel{log-def}
\hbox{$log^{(1)}(.)=log(.)$ \ \ and \ \ $log^{(k)}(.)=log(log^{(k-1)}(.))$ \ \ for\ \ $k\geq 2$.}
\end{equation}
and
\begin{equation}\lambdabel{X-def}
X_{1}(t)=(1-\log(t))^{-1}, \quad X_{k}(t)=X_{1}(X_{k-1}(t)) \ \ \ \ k=2,3, ... ,
\end{equation}
\begin{theorem} \lambdabel{Bessel.theorem}{\rm \bf Explicit Bessel potentials}
\begin{enumerate}
\item $ W \equiv 0$ is a Bessel potential on $(0, R)$ for any $R>0$.
\item The Bessel function $J_{0}$ is a positive solution for equation $(B_W)$ with $W\equiv 1$,
on $(0, z_0)$, where $z_{0}=2.4048...$ is the first zero of $J_0$. Moreover, $z_{0}$ is larger
than the first root of any other solution for $(B_1)$. In other words,
for every $R>0$,
\begin{equation}
\beta (1; R)=\frac{z_0^2}{R^2}.
\end{equation}
\item If $a<2$, then there exists $R_a>0$ such that $W (r)=r^{-a}$ is a Bessel potential on $(
0, R_a)$.
\item For each $k\geq 1$ and $\rho>R( e^{e^{e^{.^{.^{e(k-times)}}}}} )$, the equation
$(B_{\frac{1}{4}W_{k, \rho}})$ corresponding to the potential
\[
\hbox{$W_{k, \rho}(r)=\Sigma_{j=1}^kU_{j}$ where
$
U_j(r)=\frac{1}{r^{2}}\big(\prod^{j}_{i=1}log^{(i)}\frac{\rho}{r}\big)^{-2}$}
\]
has a positive solution on $(0, R)$ that is explicitly given by
$
\varepsilonphi_{k, \rho}(r)=( \prod^{k}_{i=1}log^{(i)}\frac{\rho}{r})^{\frac{1}{2}}.$
On the other hand, the equation $(B_{_{\frac{1}{4}W_{k, \rho}+\lambdambda U_k}})$ corresponding to
the potential $\frac{1}{4}W_{k, \rho}+\lambdambda U_k$ has no positive solution for any $\lambdambda>0$.
In other words, $W_{k, \rho}$ is a Bessel potential on $(0, R)$ with
\begin{equation}
\hbox{$ \beta (W_{k; \rho}, R)=\frac{1}{4}$ for any $k\geq 1$.}
\end{equation}
\item For each $k\geq 1$ and $R>0$, the equation $(B_{\frac{1}{4}\tilde W_{k, R}})$ corresponding
to the potential
\[
\hbox{$ \tilde W_{k, R}(r)=\Sigma_{j=1}^k\tilde U_{j}$ where $
\tilde U_j(r)=\frac{1}{r^{2}}X^{2}_{1}(\frac{r}{R})X^{2}_{2}(\frac{r}{R}) \ldots
X^{2}_{j-1}(\frac{r}{R})X^{2}_{j}(\frac{r}{R})$}
\]
has a positive solution on $(0, R)$ that is explicitly given by
\begin{eqnarray*}
\varepsilonphi_{k}(r)=(X_{1}(\frac{r}{R})X_{2}(\frac{r}{R}) \ldots
X_{k-1}(\frac{r}{R})X_{k}(\frac{r}{R}))^{-\frac{1}{2}}.
\end{eqnarray*}
On the other hand, the equation $(B_{\frac{1}{4}\tilde W_{k, R}+\lambdambda \tilde U_k})$
corresponding to the potential $\frac{1}{4}\tilde W_{k, R}+\lambdambda \tilde U_k$ has no positive
solution for any $\lambdambda>0$. In other words, $ \tilde W_{k, R}$ is a Bessel potential on $(0,
R)$ with
\begin{equation}
\hbox{$\beta ({\tilde W}_{k, R}; R)=\frac{1}{4}$\quad for any $k\geq 1$.}
\end{equation}
\end{enumerate}
\end{theorem}
{\bf Proof:} 1) It is clear that ${\varphi} (r)=-log(\frac{e}{R}r)$ is a positive solution of $(B_0)$
on $(0, R)$ for any $R>0$.
2)The best constant for which the equation $y''+\frac{1}{r}y'+cy=0$ has a positive solution on
$(0,R)$ is $\frac{z^{2}_{0}}{R^2}$, where $z_{0}=2.4048...$ is the first zero of Bessel function
$J_{0}(z)$. Indeed if $\alphapha$ is the first root of the an arbitrary solution of the Bessel
equation $y''+\frac{y'}{r}+y(r)=0$, then we have $\alphapha \leq z_{0}$. To see this let
$x(t)=aJ_{0}(t)+bY_{0}(t)$, where $J_{0}$ and $Y_{0}$ are the two standard linearly independent
solutions of Bessel equation, and $a$ and $b$ are constants. Assume the first zero of $x(t)$ is
larger than $z_{0}$. Since the first zero of $Y_{0}$ is smaller than $z_{0}$, we have $a\geq0$.
Also $b\leq0$, because $Y_{0}(t)\rightarrow -\infty$ as $t \rightarrow 0$. Finally note that
$Y_{0}(z_{0})>0$, so if $b<0$, then $x(z_{0}+\epsilon)<0$ for $\epsilon$ sufficiently small.
Therefore, $b=0$ which is a contradiction.
3) follows directly from the integral criteria.
4) That ${\varphi}_k$ is an explicit solution of the equation $(B_{\frac{1}{4}W_k})$ is
straightforward.
Assume now that there exists a positive function $\varepsilonphi$ such that
\begin{equation*}
-\frac{\varepsilonphi'(r)+r\varepsilonphi''(r)}{\varepsilonphi(r)}=\frac{1}{4}\sum^{k-1}_{j=1}\frac{1}{r}\big(
\prod^{j}_{i=1}log^{(i)}\frac{\rho}{r}\big)^{-2}+(\frac{1}{4}+\lambdambda)\frac{1}{r}\big(
\prod^{k}_{i=1}log^{(i)}\frac{\rho}{r}\big)^{-2}.
\end{equation*}
Define $f(r)=\frac{\varepsilonphi(r)}{\varepsilonphi_{k}(r)}>0$, and calculate,
\[\frac{\varepsilonphi'(r)+r\varepsilonphi''(r)}{\varepsilonphi(r)}=\frac{\varepsilonphi_{k}'(r)+r\varepsilonphi_{k}''(r)}{\varepsilonphi_{k
}(r)}+\frac{f'(r)+rf''(r)}{f(r)}-\frac{f'(r)}{f(r)}\sum_{i=1}^{k}\frac{1}{\prod^{i}_{j=1}\log^{j}(
\frac{\rho}{r})}.\]
Thus,
\begin{equation}\lambdabel{main-rel}
\frac{f'(r)+rf''(r)}{f(r)}-\frac{f'(r)}{f(r)}\sum_{i=1}^{k}\frac{1}{\prod^{i}_{j=1}\log^{j}(\frac{
\rho}{r})}=-\lambdambda\frac{1}{r}\big( \prod^{k}_{i=1}log^{(i)}\frac{\rho}{r}\big)^{-2}.
\end{equation}
If now $f'(\alphapha_{n})=0$ for some sequence $\{\alphapha_{n}\}^{\infty}_{n=1}$ that converges to
zero, then there exists a sequence $\{\beta_{n}\}^{\infty}_{n=1}$ that also converges to zero,
such that $f''(\beta_{n})=0$, and $f'(\beta_{n})>0$. But this contradicts (\ref{main-rel}), which
means that $f$ is eventually monotone for $r$ small enough. We consider the two cases according to
whether $f$ is increasing or decreasing:\\ \\
Case I: Assume $f'(r)>0$ for $r>0$ sufficiently small. Then we will have
\[\frac{(rf'(r))'}{rf'(r)}\leq \sum_{i=1}^{k}\frac{1}{r\prod^{i}_{j=1}\log^{j}(\frac{\rho}{r})}.\]
Integrating once we get
\[f'(r)\geq \frac{c}{r\prod^{k}_{j=1}\log^{j}(\frac{\rho}{r})},\]
for some $c>0$.
Hence, $\lim_{r\rightarrow 0}f(r)=-\infty$ which is a contradiction. \\ \\
Case II: Assume $f'(r)<0$ for $r>0$ sufficiently small. Then
\[\frac{(rf'(r))'}{rf'(r)}\geq \sum_{i=1}^{k}\frac{1}{r\prod^{i}_{j=1}\log^{j}(\frac{\rho}{r})}.
\]
Thus,
\begin{equation}\lambdabel{estim}
f'(r)\geq- \frac{c}{r\prod^{k}_{j=1}\log^{j}(\frac{\rho}{r})},
\end{equation}
for some $c>0$ and $r>0$ sufficiently small. On the other hand
\[\frac{f'(r)+rf''(r)}{f(r)}\leq-\lambdambda \sum^{k}_{j=1} \frac{1}{r}\big(
\prod^{j}_{i=1}log^{(i)}\frac{R}{r}\big)^{-2}\leq
-\lambdambda(\frac{1}{\prod^{k}_{j=1}\log^{j}(\frac{\rho}{r})})'.\]
Since $f'(r)<0$, there exists $l$ such that $f(r)>l>0$ for $r>0$ sufficiently small. From the
above inequality we then have
\[bf'(b)-af'(a)<-\lambdambda
l(\frac{1}{\prod^{k}_{j=1}\log^{j}(\frac{\rho}{b})}-\frac{1}{\prod^{k}_{j=1}\log^{j}(\frac{\rho}{a
})}).\]
From (\ref{estim}) we have $\lim_{a \rightarrow 0}af'(a)=0$. Hence,
\[bf'(b)<-\frac{\lambdambda l }{\prod^{k}_{j=1}\log^{j}(\frac{\rho}{b})},\]
for every $b>0$, and
\[f'(r)<- \frac{\lambdambda l }{r\prod^{k}_{j=1}\log^{j}(\frac{\rho}{r})},\]
for $r>0$ sufficiently small. Therefore,
\[\lim_{r\rightarrow 0}f(r)=+\infty,\]
and by choosing $l$ large enouph (e.g., $l>\frac{c}{\lambdambda})$ we get to contradict
$(\ref{estim})$.
The proof of 5) is similar and is left to the interested reader.
$\square$
\section{Appendix (B): The evaluation of $a_{n,m}$}
Here we evaluate the best constants $a_{n,m}$ which appear in Theorem \ref{gm.hr}.
\begin{theorem} Suppose $n\geq 1$ and $m\leq \frac{n-2}{2}$. Then for any $R>0$, the constants
\[
a_{n,m}=\inf \left\{\frac{\int_{B_{R}}\frac{|\Delta u|^2}{|x|^{2m}}dx}{\int_{B_{R}}\frac{|\nabla
u|^2}{|x|^{2m+2}}dx};\, u\in C^{\infty}_{0}(B_{R})\setminus \{0\}\right\}
\]
are given by the following expressions.
\begin{enumerate}
\item For $n=1$
\begin{itemize}
\item if $m \in (-\infty,-\frac{3}{2})\cup[-\frac{7}{6},-\frac{1}{2}]$, then
\[a_{1,m}=(\frac{1+2m}{2})^2\]
\item if $-\frac{3}{2}<m<-\frac{7}{6}$, then
\[a_{1,m}=\min \{
(\frac{n+2m}{2})^2,\frac{(\frac{(n-4-2m)(n+2m)}{4}+2)^2}{(\frac{n-4-2m}{2})^2+2}\}.\]
\end{itemize}
\item If $m=\frac{n-4}{2}$, then
\[a_{m,n}=\min\{(n-2)^2,n-1\}.\]
\item If $n\geq 2$ and $m\leq \frac{-(n+4)+2\sqrt{n^2-n+1}}{6}$,
then $a_{n,m}=(\frac{n+2m}{2})^2$.
\item If $2 \leq n\leq 3$ and $\frac{-(n+4)+2\sqrt{n^2-n+1}}{6}<m\leq \frac{n-2}{2}$, or $n\geq 4$
and $\frac{n-4}{2}<m\leq \frac{n-2}{2}$, then
\[a_{n,m}=\frac{(\frac{(n-4-2m)(n+2m)}{4}+n-1)^2}{(\frac{n-4-2m}{2})^2+n-1}.\]
\item For $n\geq 4$ and $\frac{-(n+4)+2\sqrt{n^2-n+1}}{6} < m< \frac{n-4}{2}$, define
$k^{*}=[(\frac{\sqrt{3}}{3}-\frac{1}{2})(n-2)].$
\begin{trivlist}
\item $\bullet$ If $k^{*}\leq 1$, then
\[a_{n,m}=\frac{(\frac{(n-4-2m)(n+2m)}{4}+n-1)^2}{(\frac{n-4-2m}{2})^2+n-1}.\]
\item $\bullet$ For $k^{*}>1$ the interval $(m^{1}_{0}:=\frac{-(n+4)+2\sqrt{n^2-n+1}}{6},m^{2}_{0}:=
\frac{n-4}{2})$ can be divided in $2k^{*}-1$ subintervals. For $1 \leq k \leq k^{*}$ define
\[m^{1}_{k}:=\frac{2(n-5)-\sqrt{(n-2)^2-12k(k+n-2)}}{6},\]
\[m^{2}_{k}:=\frac{2(n-5)+\sqrt{(n-2)^2-12k(k+n-2)}}{6}.\]
If $m \in (m^{1}_{0},m^{1}_{1}]\cup[m^{2}_{1},m^{2}_{0})]$, then
\[a_{n,m}=\frac{(\frac{(n-4-2m)(n+2m)}{4}+n-1)^2}{(\frac{n-4-2m}{2})^2+n-1}.\]
\item $\bullet$ For $k\geq 1$ and $m \in (m^{1}_{k},m^{1}_{k+1}]\cup[m^{2}_{k+1},m^{2}_{k})$, then
\[a_{n,m}=\min\{\frac{(\frac{(n-4-2m)(n+2m)}{4}+k(n+k-2))^2}{(\frac{n-4-2m}{2})^2+k(n+k-2)},\frac{
(\frac{(n-4-2m)(n+2m
)}{4}+(k+1)(n+k-1))^2}{(\frac{n-4-2m}{2})^2+(k+1)(n+k-1)}\}.\]
For $m \in (m^{1}_{k^{*}},m^{2}_{k^{*}})$, then
\[a_{n,m}=\min\{\frac{(\frac{(n-4-2m)(n+2m)}{4}+k^{*}(n+k^{*}-2))^2}{(\frac{n-4-2m}{2})^2+k^{*}(n+
k^{*}-2)},\frac{(\frac{(n-4-2m)(n+2m)}{4}+(k^{*}+1)(n+k^{*}-1))^2}{(\frac{n-4-2m}{2})^2+(k^{*}+1)(
n+k^{*}-1)}\}.\]
\end{trivlist}
\end{enumerate}
\end{theorem}
{\bf Proof:} Letting
$V(r)=r^{-2m}$ then,
\[W(r)-\frac{2V(r)}{r^2}+\frac{2V_r(r)}{r}-V_{rr}(r)=
((\frac{n-2m-2}{2})^2-2-4m-2m(2m+1))r^{-2m-2}.\]
In order to satisfy condition (\ref{main.con}) we should have
\begin{equation}\lambdabel{exmain.con}
\frac{-(n+4)+2\sqrt{n^2-n+1}}{6} \leq m\leq \frac{-(n+4)+2\sqrt{n^2-n+1}}{6}.
\end{equation}
So, by Theorem \ref{main.hr} under the above condition we have $a_{n,m}=(\frac{n+2m}{2})^2$ as in the radial case. \\
For
the rest of the proof we will use an argument similar to that of Theorem 6.4 in \cite{TZ} who computed $a_{n, m}$ in the case where $n\geq 5$ and for certain intervals of $m$.\\
Decomposing again $u \in C^{\infty}_{0}(B_{R})$ into spherical harmonics; $u=\Sigma^{\infty}_{k=0}u_{k}$,
where $u_{k}=f_{k}(|x|)\varepsilonphi_{k}(x)$, one has
\begin{eqnarray}\lambdabel{N1}
\int_{\R^n}\frac{|\Delta
u_{k}|^2}{|x|^{2m}}dx&=&\int_{\R^n}|x|^{-2m}(f''_{k}(|x|))^2dx+\left((n-1)(2m+1)+2c_{k}\right)
\int_{\R^n}|x|^{-2m-2}(f_{k}')^2dx\\
&+&c_{k}(c_{k}+(n-4-2m)(2m+2))\int_{\R^n}|x|^{-2m-4}(f_{k})^2dx, \nonumber
\end{eqnarray}
\begin{equation}\lambdabel{N2}
\int_{\R^n}\frac{|\nabla u_{k}|^2}{|x|^{2m+2}}dx=\int_{\R^n}|x|^{-2m-2}(f_{k}')^2
dx+c_{k}\int_{\R^n}|x|^{-2m-4}(f_{k})^2dx.
\end{equation}
One can then prove as in \cite{TZ} that
\begin{equation}\lambdabel{a-nm-k}
a_{n,m}=\min \left\{A(k,m,n); \, k\in \N \right\}
\end{equation}
where
\begin{equation}
\hbox{$A(k,m,n)=\frac{(\frac{(n-4-2m)(n+2m)}{4}+c_{k})^2}{(\frac{n-4-2m}{2})^2+c_{k}}$ if $m = \frac{n-4}{2}$}
\end{equation}
and
\begin{equation}
\hbox{$A(k,m,n):= c_{k}$ if $m = \frac{n-4}{2}$ and $n+k>2$.}
\end{equation}
Note that when $m=\frac{n-4}{2}$ and $n+k>2$, then $c_k\neq 0$. Actually, this also holds for $n+k\leq 2$, in which case one deduces that if
$m = \frac{n-4}{2}$, then
\[a_{n,m}= \min
\{(n-2)^2=(\frac{n+2m}{2})^2, (n-1)=c_{1}\}
\] which is statement 2).
The rest of the proof consists of computing the infimum especially in the cases not considered in \cite{TZ}.
For that we consider the function
\[f(x)=\frac{(\frac{(n-4-2m)(n+2m)}{4}+x)^2}{(\frac{n-4-2m}{2})^2+x}.\]
It is easy to check that $f'(x)=0$ at $x_{1}$ and $x_{2}$, where
\begin{eqnarray}
x_{1}&=&-\frac{(n-4-2m)(n+2m)}{4}\\
x_{2}&=&\frac{(n-4-2m)(-n+6m+8)}{4}.
\end{eqnarray}
Observe that for for $n\geq 2$, $\frac{n-8}{6}\leq \frac{n-4}{2}$. Hence, for $m\leq
\frac{n-8}{6}$ both $x_{1}$ and $x_{2}$ are negative and hence $a_{n,m}= (\frac{n+2m}{2})^2$.
Also note that
\[\frac{-(n+4)-2\sqrt{n^2-n+1}}{6}\leq \frac{n-8}{6}\ \ for \ \ all \ \ n\geq 1.\]
Hence, under the condition in 3) we have $a_{n,m}= (\frac{n+2m}{2})^2.$\\
Also for $n=1$ if
$m\leq -\frac{3}{2}$ both critical points are negative and we have $a_{1,m}\leq
(\frac{1+2m}{2})^2$. Comparing $A(0,m,n)$ and $A(1,m,n)$ we see that $A(1,m,n)\geq A(0,m,n)$ if
and only if (\ref{exmain.con}) holds.
For $n=1$ and $-\frac{3}{2}<m<-\frac{7}{6}$ both $x_{1}$ and $x_{2}$ are positive. Consider the
equations
\[x(x-1)=x_{1}=\frac{(2m+3)(2m+1)}{4},\]
and
\[x(x-1)=x_{2}=-\frac{(2m+3)(6m+7)}{4}.\]
By simple calculations we can see that all four solutions of the above two equations are less that
two. Since, $A(1,m,1)<A(0,m,1)$ for $m<-\frac{7}{6}$, we have $a_{1,m}\leq \min
\{A(1,m,1),A(2,m,1)\}$ and $1)$ follows.
For $n\geq 2$ and $\frac{n-4}{2}<m<\frac{n-2}{2}$ we have $x_{1}>0$ and $x_{2}<0$. Consider the
equation
\[x(x+n-2)=x_{1}=-\frac{(n-4-2m)(n+2m)}{4}.\]
Then $\frac{2m+4-n}{2}$ and $-\frac{(2m+n)}{2}$ are solutions of the above equation and both are
less than one. Since, for $n\geq 4$
\[\frac{n-2}{2}>\frac{-(n+4)+2\sqrt{n^2-n+1}}{6},\]
and $A(1,m,n)\leq A(0,m,n)$ for $m\geq \frac{-(n+4)+2\sqrt{n^2-n+1}}{6} $, the best constant is
equal to what 4) claims. \\
$5)$ follows from an argument similar to that of Theorem 6.4 in
\cite{TZ}.
\end{document} |
\begin{document}
\title{A Minimal Set Low for Speed}
\author{Rod Downey \and Matthew Harrison-Trainor}
\makeatletter
\def\xdef\@thefnmark{}\@footnotetext{\xdef\@thefnmark{}\@footnotetext}
\makeatother
\maketitle
\begin{abstract}
An oracle $A$ is low-for-speed if it is unable to speed up the computation of a set which is already computable: if a decidable language can be decided in time $t(n)$ using $A$ as an oracle, then it can be decided without an oracle in time $p(t(n))$ for some polynomial $p$. The existence of a set which is low-for-speed was first shown by Bayer and Slaman who constructed a non-computable computably enumerable set which is low-for-speed. In this paper we answer a question previously raised by Bienvenu and Downey, who asked whether there is a minimal degree which is low-for-speed. The standard method of constructing a set of minimal degree via forcing is incompatible with making the set low-for-speed; but we are able to use an interesting new combination of forcing and full approximation to construct a set which is both of minimal degree and low-for-speed.
\end{abstract}
\section{Introduction}
Almost since the beginning of computational complexity theory,
we have had results about oracles and their effect
on the running times of computations.
For example Baker, Gill, and Solovay \cite{BakerGS1975} showed that on the one hand there are oracles~$A$ such that $\mathsf{P}^A=\mathsf{NP}^A$ and on the other hand there are oracles~$B$ such that $\mathsf{P}^B \not= \mathsf{NP}^B$, thus demonstrating that methods that relativize will not suffice to solve basic questions like $\mathsf{P}$ vs $\mathsf{NP}$.
An underlying question is whether oracle results can say things about complexity questions in the unrelativized world.
The answer seems to be yes. For example,
Allender together with Buhrman and Kouck\'{y} \cite{AllenderBK2006} and with Friedman and Gasarch
\cite{AllenderFG2013} showed that
oracle access to sets of random strings
can give insight into
basic complexity questions.
In~\cite{AllenderFG2013} Allender, Friedman, and Gasarch
showed that
$\bigcap_U \mathsf{P}^{R_{K_U}}\cap \mathsf{COMP}\subseteq \mathsf{PSPACE}$
where $R_{K_U}$ denotes the strings whose prefix-free Kolmogorov complexity
(relative to universal machine $U$) is at least their length, and
$\mathsf{COMP}$ denotes the collection of computable sets.
Later the ``$\cap \mathsf{COMP}$'' was removed by
Cai, Downey, Epstein, Lempp, and J. Miller \cite{CaiDELM2014}.
Thus we conclude that reductions to very complex sets like the
random strings somehow gives insight into
very simple things like computable sets.
One of the classical notions in computability theory is that of lowness.
An oracle is low for a specific type of problem if that oracle does not help to solve that problem. A language $A$
is low if the halting problem relative to $A$ has the same Turing degree
(and hence the same computational content) as the halting problem.
Slaman and Solovay \cite{SS} characterized languages $L$
where oracles are of no help in Gold-style learning theory:
$EX^L=EX$ iff $L$ is low and 1-generic.
Inspired by this and other lowness results in classical computability,
Allender asked whether
there were non-trivial sets which were ``low for speed'' in that,
as oracles, they did not accelerate
running times
of computations by more than a polynomial amount.
Of course, as stated this makes little sense since
using any $X$ as oracle, we can decide membership in~$X$ in linear time, while without an oracle $X$ may not even be computable at all!
Thus, what we are really interested in is the set of oracles which do not speed up the computation of \emph{computable sets} by more than a polynomial amount. More precisely, an oracle~$X$ is \emph{low for speed} if for any computable language~$L$, if some Turing machine $M$ with access to oracle~$X$ decides $L$ in time~$f$, then there is a Turing machine~$M'$ without any oracle and a polynomial~$p$ such that $M'$ decides~$\mathcal{L}$ in time~$p \circ f$. (Here the computation time of an oracle computation is counted in the usual complexity-theoretic fashion: we have a query tape on which we can write strings, and once a string~$x$ is written on this tape, we get to ask the oracle whether $x$ belongs to it in time $O(1)$.)
There are trivial examples of such sets, namely oracles that belong to~$\mathsf{P}$, because any query to such an oracle can be replaced by a polynomial-time computation. Allender's precise question was therefore:
\begin{center}
Is there an oracle $X \notin \mathsf{P}$ which is low for speed?
\end{center}
\noindent Such an~$X$, if it exists, has to be non-computable, for the same reason as above: if $X$ is computable and low for speed, then $X$ is decidable in linear time using oracle~$X$, thus---by lowness---decidable in polynomial time without oracle, i.e., $X \in \mathsf{P}$.
A partial answer was given by Lance Fortnow (unpublished), who observed the following.
\begin{theorem}[Fortnow] If $X$ is a hypersimple and computably enumerable oracle, then $X$ is low for polynomial time, in that if $L \in \mathsf{P}^X$ is computable, then $L \in \mathsf{P}$.
\end{theorem}
Allender's question was finally solved by Bayer and Slaman, who showed the following.
\begin{theorem}[Bayer-Slaman \cite{Bayer-PhD}]
There are non-computable, computably enumerable, sets~$X$ which are low for speed.
\end{theorem}
\noindent Bayer showed that whether 1-generic sets were low for speed depended on
whether $\mathsf{P} = \mathsf{NP}$.
In \cite{BienvenuDowney}, Bienvenu and Downey began an analysis of precisely
what kind of sets/languages could be low for speed. The showed for instance,
randomness always accelerates some computation in that no
Schnorr random set is low for speed.
They also constructed a perfect $\mathcal{P}i_1^0$ class all of whose members were low for speed. Among other results, they demonstrated that being low for speed
did not seem to align very well to having low complexity in that
no set of low computably enumerable Turing degree could also be low for speed.
From one point of view the sets with barely non-computable information are
those of minimal Turing degree. Here we recall that ${\bf a}$ is a
\emph{minimal} Turing degree if it is nonzero and there is
no degree ${\bf b} $ with ${\bf 0}<{\bf b}<{\bf a}$.
It is quite easy to construct a set of minimal Turing degree which is
not low for speed, and indeed any natural minimality construction seems to give this.
That is because natural Spector-forcing style dynamics seem
to entail certain delays in any construction, even a full approximation one,
which cause problems with the polynomial time simulation of
the oracle computations being emulated.
In view of this, Bienvenu and Downey
asked the following question:
\begin{question} Can a set $A$ of minimal Turing degree be low for speed?
\end{question}
In the present paper we answer this question affirmatively:
\begin{theorem} There is a set $A$ which is both of minimal Turing degree and
low for speed.
\end{theorem}
\noindent The construction is a mix of forcing and full approximation of a kind
hitherto unseen. The argument is a complicated priority construction in which the interactions between different requirements is quite involved. In order to make the set $A$ of minimal Turing degree, we must put it on splitting trees; and in order to make it low-for-speed, we must have efficient simulations of potential computations involving $A$. When defining the splitting trees, we must respect decisions made by our simulations, which restricts the splits we can choose. The splitting trees end up having the property that while every two paths through the tree split, two children of the same node may not split; finding splits is sometimes very delayed. This is a new strategy which does not seem to have been used before for constructing sets of minimal degree.
\section{The construction with few requirements}\langlebel{sec:two}
We will construct a set $A$ meeting the following requirements:
\begin{align*}
\mathcal{M}_e:\qquad& \text{If $\mathcal{P}hi_e^A$ is total then it is either computable or computes $A$.}\\
\mathcal{L}_{\langle e,i \rangle}:\qquad& \text{If $\mathcal{P}si_{e}^A = R_{i}$ is total and computable in time $t(n)$, then it is}\\ &\text{ computable in time $p(t(n))$ for some polynomial $p$.}\\
\mathcal{P}_e:\qquad& \text{$A \neq W_e$.}
\end{align*}
Here, $R_{i}$ is a partial computable function. The requirements $\mathcal{P}_e$ make $A$ non-computable, while the requirements $\mathcal{M}_e$ make $A$ of minimal degree. The requirements $\mathcal{L}_{\langle e,i \rangle}$ make $A$ low for speed.
{}
When working with Spector-style forcing, it is common to define a tree to be a map $T \colon 2^{< \omega} \to 2^{< \omega}$ such that $\sigma \preceq \tau$ implies $T(\sigma) \preceq T(\tau)$. We will need our trees to be finitely branching; so for the purposes of this proof a tree will be a computable subset of $2^{< \omega}$ so that each node $\sigma$ on the tree has finitely many children $\tau \succeq \sigma$. The children of $\sigma$ may be of any length, where by length we mean the length as a binary string. Our trees will have no dead ends, and in fact every node will have at least two children.
As usual, $[T]$ denotes the collection of paths through $T$.
Recall that for a functional $\mathcal{P}hi_e$, we
will say that $T$ is $e$-\textit{splitting} if for any two distinct paths $\pi_1$ and $\pi_2$ through $T$,
there is $x$ with
\[\mathcal{P}hi_e^{\pi_1}(x)\downarrow \ne \mathcal{P}hi_e^{\pi_2}(x)\downarrow.\]
If $\tau_1$ and $\tau_2$ are initial segments of $\pi_1$ or $\pi_2$ respectively witnessing this, i.e., with
\[\mathcal{P}hi_e^{\tau_1}(x)\downarrow \ne \mathcal{P}hi_e^{\tau_2}(x)\downarrow,\]
and with a common predecessor $\sigma$, we say that they $e$-split over $\sigma$, or that they are an $e$-split over $\sigma$.
The requirements $\mathcal{M}_e$ will be satisfied by an interesting new mix of forcing and full approximation. Following the standard Spector argument, to satisfy $\mathcal{M}_e$ we attempt to make $A$ a path on a tree $T$ with either:
\begin{itemize}
\item $T$ is $e$-splitting, and so for any path $B \in [T]$, $\mathcal{P}hi_e^B \geq_T B$; or
\item for all paths $B_1,B_2 \in [T]$ and all $x$, if $\mathcal{P}hi_e^{B_1}(x) \downarrow$ and $\mathcal{P}hi_e^{B_2}(x) \downarrow$ then $\mathcal{P}hi_e^{B_1}(x) = \mathcal{P}hi_e^{B_2}(x)$, and so $\mathcal{P}hi_e^B$ is either partial or computable for any $B \in [T]$.
\end{itemize}
Given such a tree, any path on $T$ satisfies $\mathcal{M}_e$.
The standard argument for building a minimal degree is a forcing argument. Suppose that we want to meet just the $\mathcal{M}$ and $\mathcal{P}$ requirements. We can begin with a perfect tree $T_{-1}$, say $T_{-1} = 2^{<\omega}$. Then there is a computable tree $T_0 \subseteq T_{-1}$ which is either $0$-splitting or forces $\mathcal{P}hi_0^A$ to be either computable or partial. We can then choose $A_0 \in T_0$ such that $A_0$ is not an initial segment of $W_e$. Then there is a computable tree $T_1 \subseteq T_0$ with root $A_0$ which is either $1$-splitting or forces $\mathcal{P}hi_1^A$ to be either computable or partial. We pick $A_1 \in T_1$ so that $A_1$ is not an initial segment of $W_1$, then $T_2 \subseteq T_1$ with root $A_1$, and so on. Then $A = \bigcup A_i$ is a path through each $T_i$, and so is a minimal degree. Though each $T_i$ is computable, they are not uniformly computable; given $T_i$, to compute $T_{i+1}$ we must know whether $T_{i+1}$ is to be $(i+1)$-splitting, to force partiality, or to force computability.
{}
We cannot purely use forcing the meet the lowness requirements $\mathcal{L}_{\langle e,i\rangle}$. We use something similar to the Slaman-Beyer strategy from \cite{Bayer-PhD}. The entire construction will take place on a tree $T_{-1}$ with the property that it is polynomial in $|\sigma|$ to determine whether $\sigma \in T_{-1}$, and that moreover, for each $n$, there are polynomially many in $n$ strings of length $n$ on $T_{-1}$. For example, let $\sigma \in T_{-1}$ if it is of the form
\[ a_1^{2^0} a_2^{2^1} a_3^{2^2} a_4^{2^3} \cdots \]
where each $a_i \in \{0,1\}$.
So, for example, $100111100000000 \in T_{-1}$.
First we will show how to meet $\mathcal{L}_{\langle e,i \rangle}$ in the absence of any other requirements. For simplicity drop the subscripts $e$ and $i$ so that we write $\mathcal{P}si = \mathcal{P}si_{e}$ and $R = R_{i}$. The idea is to construct a computable simulation $\Xi$ of $\mathcal{P}si^A$, with $\Xi(x)$ computable in time polynomial in the running time of $\mathcal{P}si^A(x)$, so that if $\mathcal{P}si^A = R$ then $\Xi = \mathcal{P}si^A$. We compute $\Xi(x)$ as follows. We computably search over $\sigma \in T_{-1}$ (i.e.\ over find potential initial segments $\sigma$ of $A$) and simulate the computations $\mathcal{P}si^\sigma(x)$. When we find $\sigma$ with $\mathcal{P}si^\sigma(x) \downarrow$, we set $\Xi(x) = \mathcal{P}si^\sigma(x)$ for the first such $\sigma$. Of course, $\sigma$ might not be an initial segment of $A$, and so $\Xi$ might not be equal to $\mathcal{P}si^A$; this only matters if $\mathcal{P}si^A = R$ is total, as otherwise $\mathcal{L}$ is satisfied vacuously. If $x$ is such that $\Xi(x) \downarrow \neq R(x) \downarrow$, then there is some $\sigma \in T_{-1}$ witnessing that $\mathcal{P}si^\sigma(x) = \Xi(x)$; the requirement $\mathcal{L}$ asks that $A$ extend $\sigma$, so that $\mathcal{P}si^A(x) \neq R(x)$ and $\mathcal{L}$ is satisfied. So now we need to ensure that if $\Xi = \mathcal{P}si^A = R$, then $\Xi$ is only polynomially slower than $\mathcal{P}si^A$. We can do this by appropriately dovetailing the simulations so that if $\mathcal{P}si^\sigma(x) \downarrow$ in time $t(x)$, the simulation $\Xi$ will test this computation in a time which is only polynomially slower than $t(x)$, and we will have $\Xi(x) \downarrow$ in time which is only polynomially slower than $t(x)$. For example, we might start by simulating one stage of the computation $\mathcal{P}si^\sigma(x)$ for $\sigma$s of length one, then simulating two stages for $\sigma$s of length at most two, then three stages for $\sigma$s of length at most three, and so on. It is important here that $T_{-1}$ has only polynomially many nodes at height $n$ and we can test membership in $T_{-1}$ in polynomial time; so the $n$th round of simulations takes time polynomial in $n$.
Think of the simulations as being greedy and taking any computation that they find; and then, at the end, we can non-uniformly choose the initial segment of $A$ to force that either the simulation is actually correct, or to get a diagonalization.
{}
The interactions between the requirements get more complicated. Consider now two requirements $\mathcal{M} = \mc{M}_e$ and $\mathcal{L}$. If $\mc{L}$ is of higher priority than $\mc{M}$, there is nothing new going on---$\mc{M}$ knows whether $\mc{L}$ asked to have $A$ extend some node $\sigma$, and if it did, $\mc{M}$ tries to build a splitting tree extending $\sigma$. So assume that $\mathcal{M}$ is of higher priority than $\mathcal{L}$.
Write $\mathcal{P}hi = \mathcal{P}hi_e$. Assume that for each $\sigma \in T_{-1}$, there are $x$ and $\tau_1,\tau_2 \succeq \sigma$ such that $\mathcal{P}hi^{\tau_1}(x) \downarrow \neq \mathcal{P}hi^{\tau_2}(x) \downarrow$; and that for each $\sigma \in T_{-1}$ and $x$ there is $\tau \succeq \sigma$ such that $\mathcal{P}hi^{\tau}(x) \downarrow$. Otherwise, we could find a subtree of $T_{-1}$ which forces that $\mathcal{P}hi^A$ is either not total or is computable, and satisfy $\mc{M}$ by restricting to that subtree. This assumption implies that we can also find any finite number of extensions of various nodes that pairwise $e$-split, e.g. given $\sigma_1$ and $\sigma_2$, there are extensions of $\sigma_1$ and $\sigma_2$ that $e$-split. Indeed, find extensions $\tau,\tau^*$ of $\sigma_1$ that $e$-split, say $\mathcal{P}hi^{\tau_1}(x) \downarrow \neq \mathcal{P}hi^{\tau_2}(x) \downarrow$, and an extension $\rho$ of $\sigma_2$ with $\mathcal{P}hi^{\tau}(x) \downarrow$. Then $\rho$ $e$-splits with one of $\tau_1$ or $\tau_2$.
The requirement $\mathcal{L}$ non-uniformly guesses at whether or not $\mathcal{M}$ will succeed at building an $e$-splitting tree. Suppose that it guesses that $\mathcal{M}$ successfully builds such a tree. $\mathcal{M}$ begins with the special tree $T_{-1}$ described above, and it must build an $e$-splitting tree $T \subseteq T_{-1}$.
While building the tree, $\Xi$ will be simulating $\mathcal{P}si^A$ by looking at computations $\mathcal{P}si^\sigma$. The tree $T$ might be built very slowly, while $\Xi$ has to simulate computations relatively quickly. So when a node is removed from $T$, $\Xi$ will stop simulating it, but $\Xi$ will have to simulate nodes which are extensions of nodes in $T$ as it has been defined so far, but which have not yet been determined to be in or not in $T$. This leads to the following problem: Suppose that $\gamma$ is a leaf of $T$ at stage $s$, $\rho$ extends $\gamma$, and $\Xi$ simulates $\mathcal{P}si^\rho(x)$ and sees that it converges, and so defines $\Xi(x) = \mathcal{P}si^\rho(x)$. But then the requirement $\mathcal{M}$ finds an $e$-split $\tau_1,\tau_2 \succeq \gamma$ and wants to set $\tau_1$ and $\tau_2$ to be the successors of $\gamma$ on $T$, with both $\tau_1$ and $\tau_2$ incompatible with $\rho$. If we allow $\mathcal{M}$ to do this, then since $\mathcal{M}$ has higher priority than $\mathcal{L}$, $\mathcal{M}$ has determined that $A$ cannot extend $\rho$ as $\mc{M}$ restricts $A$ to be a path through $T$. So $\mc{L}$ has lost its ability to diagonalize and it might be that $\mathcal{P}si^A = R$ (say, because this happens on all paths through $T$) but $\mathcal{P}si^A(x) \neq \mathcal{P}si^\rho(x) = \Xi(x)$.
This means that $\mathcal{M}$ needs to take some action to keep computations that $\mathcal{L}$ has found on the tree. We begin by describing the most basic strategy for keeping a single node $\rho$ on the tree.
Suppose that at stage $s$ the requirement $\mathcal{M}$ wants to add children to a leaf node $\gamma$ on $T$. First, look for $\gamma_1,\gamma_2,\gamma_3$ extending $\gamma$ such that they pairwise $e$-split: for any two $\gamma_i,\gamma_j$, there is $x$ such that $\mathcal{P}hi_e^{\gamma_i}(x) \downarrow \neq \mathcal{P}hi_e^{\gamma_j}(x) \downarrow$. By our earlier assumption that each node on $T_{-1}$ has an $e$-splitting extension we will eventually find such elements, say at stage $t$. But it might be that by stage $t$, we have simulated $\mathcal{P}si_t^\rho(x) \downarrow$ and set $\Xi(x)$ to be equal to this simulated computation. So for the sake of $\mc{L}$, we must keep $\rho$ on the tree. (Later, we will have to extend the strategy to worry about what happens if we have simulated multiple computations $\rho$, but for now assume that there is just one.)
To begin, we stop simulating any computations extending $\rho$. This means that we are now free to extend the tree however we like above $\rho$ without worrying about how this affects the simulations. We also stop simulating any other computations not compatible with $\gamma_1$, $\gamma_2$, or $\gamma_3$.
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (gamma1) at (-2,3) {{$\gamma_1$}};
\node (gamma2) at (-1,3) {{$\gamma_2$}};
\node (gamma3) at (0,3) {{$\gamma_3$}};
\node (rho) at (2,3) {{$\rho$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma) edge[-] (gamma3);
\path (gamma) edge[-] (rho);
\node (rhoe) at (-1,4) {{Simulated by $\mc{L}$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-2.3,3.3) -- (0.3,3.3);
\end{tikzpicture}
\end{center}
Now look for an extension $\rho^*$ of $\rho$ that $e$-splits with at least two of $\gamma_1$, $\gamma_2$, and $\gamma_3$. We can find such a $\rho^*$ by looking for one with $\mathcal{P}hi^{\rho^*}$ defined on the values $x$ witnessing the $e$-splitting of $\gamma_1$, $\gamma_2$, and $\gamma_3$, e.g., if $\mathcal{P}hi^{\gamma_1}(x) \neq \mathcal{P}hi^{\gamma_2}(x)$, and $\mathcal{P}hi^{\rho^*}(x) \downarrow$, then $\rho^*$ must $e$-split with either $\gamma_1$ or $\gamma_2$. Say that $\rho^*$ $e$-splits with $\gamma_1$ and $\gamma_2$. Then we define the children of $\gamma$ to be $\gamma_1$, $\gamma_2$, and $\rho^*$.
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (gamma1) at (-2,3) {{$\gamma_1$}};
\node (gamma2) at (-1,3) {{$\gamma_2$}};
\node (rho) at (2,3) {{$\rho$}};
\node (rhos) at (2,4) {{$\rho^*$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma) edge[-] (rho);
\path (rho) edge[-] (rhos);
\node (rhoe) at (-1.5,4) {{Simulated by $\mc{L}$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-2.3,3.3) -- (-0.7,3.3);
\end{tikzpicture}
\end{center}
\noindent So of the extensions of $\gamma$, some are simulated by $\mc{L}$, and others are not. If $\mc{L}$ has the infinitary outcome, where it never finds the need to diagonalize, then it will have $A$ extend either $\gamma_1$ or $\gamma_2$. It is only if $\mc{L}$ needs to diagonalize that it will have $A$ extend $\rho^*$---and in this case, $\mc{L}$ is satisfied and so does not have to simulate $\mathcal{P}si^A$.
There is still an issue here. What if, while looking for $\rho^*$, we simulate a computation $\mathcal{P}si^{\gamma_3}(y) \downarrow$, and set $\Xi(y) = \mathcal{P}si^{\gamma_3}(y)$, and then only after this find that $\rho^*$ $e$-splits with $\gamma_1$ and $\gamma_2$? We can no longer remove $\gamma_3$ from the tree. Moreover, there might be many different nodes $\rho$ that we cannot remove from the tree---indeed, it might be that around stage $s$, we cannot remove any nodes at height $s$ from the tree, because each of them has some computation that we have simulated.
To deal with this, we have to build $e$-splitting trees in a weaker way. It will no longer be the case that every pair of children of a node $\sigma$ $e$-split, but we will still make sure that every pair of paths $e$-splits. (It might seem that this violates compactness, but in fact thinking more carefully it does not---the set of pairs of paths $(\pi_1,\pi_2)$ that $e$-split is an open cover of the non-compact topological space $[T]\times [T] - \mathsf{D}elta$ where $\mathsf{D}elta$ is the diagonal.)
So suppose again that we are trying to extend $\gamma$. Look for a pair of nodes $\gamma_1,\gamma_2$ that $e$-split. Suppose that $\rho_1,\ldots,\rho_n$ are nodes that have been simulated, that we must keep on the tree. (We might even just assume that $\rho_1,\ldots,\rho_n$ are all of the other nodes at the same level as $\gamma_1,\gamma_2$.) We stop simulating computations above $\rho_1,\ldots,\rho_n$.
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (gamma1) at (-2,3) {{$\gamma_1$}};
\node (gamma2) at (-1,3) {{$\gamma_2$}};
\node (rho1) at (0,3) {{$\rho_1$}};
\node (rho2) at (1,3) {{$\rho_2$}};
\node (rho3) at (2,3) {{$\rho_3$}};
\node (dots) at (3,3) {{$\cdots$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma) edge[-] (rho1);
\path (gamma) edge[-] (rho2);
\path (gamma) edge[-] (rho3);
\node (rhoe) at (-1.5,4) {{Simulated by $\mc{L}$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-2.3,3.3) -- (-0.7,3.3);
\end{tikzpicture}
\end{center}
Now at the next step we need to add extensions to $\gamma_1$ and $\gamma_2$ just as we added extensions of $\gamma$. We look for extensions $\gamma_1^*$ and $\gamma_1^{**}$ of $\gamma_1$, $\gamma_2^*$ and $\gamma_2^{**}$ of $\gamma_2$, and $\rho_i^*$ and $\rho_i^{**}$ of $\rho_i$ such that all of these pairwise $e$-split. While we are looking for these, we might simulate more computations at nodes $\tau$ above $\gamma_1$ and $\gamma_2$, but there will be no more computations simulated above the $\rho_i$.
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (tau1) at (-5,4) {{$\tau$}};
\node (tau2) at (-6,4) {{$\tau$}};
\node (tau3) at (-1,4) {{$\tau$}};
\node (tau4) at (-2,4) {{$\tau$}};
\node (gamma1) at (-4,3) {{$\gamma_1$}};
\node (gamma2) at (-3,3) {{$\gamma_2$}};
\node (gamma1s) at (-4.25,4) {{$\gamma_1^*$}};
\node (gamma2s) at (-2.75,4) {{$\gamma_2^{**}$}};
\node (gamma1ss) at (-3.75,4) {{$\gamma_1^{**}$}};
\node (gamma2ss) at (-3.25,4) {{$\gamma_2^{*}$}};
\node (rho1) at (1,3) {{$\rho_1$}};
\node (rho1s) at (0.5,4) {{$\rho_1^*$}};
\node (rho1ss) at (1.5,4) {{$\rho_1^{**}$}};
\node (rho2) at (3,3) {{$\rho_2$}};
\node (rho2s) at (2.5,4) {{$\rho_2^*$}};
\node (rho2ss) at (3.5,4) {{$\rho_2^{**}$}};
\node (rho3) at (5,3) {{$\rho_3$}};
\node (rho3s) at (4.5,4) {{$\rho_3^*$}};
\node (rho3ss) at (5.5,4) {{$\rho_3^{**}$}};
\node (dots) at (7,3) {{$\cdots$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma1) edge[-] (gamma1s);
\path (gamma1) edge[-] (gamma1ss);
\path (gamma1) edge[-] (tau1);
\path (gamma1) edge[-] (tau2);
\path (gamma2) edge[-] (tau3);
\path (gamma2) edge[-] (tau4);
\path (gamma2) edge[-] (gamma2s);
\path (gamma2) edge[-] (gamma2ss);
\path (gamma) edge[-] (rho1);
\path (gamma) edge[-] (rho2);
\path (gamma) edge[-] (rho3);
\path (rho1) edge[-] (rho1s);
\path (rho2) edge[-] (rho2s);
\path (rho3) edge[-] (rho3s);
\path (rho1) edge[-] (rho1ss);
\path (rho2) edge[-] (rho2ss);
\path (rho3) edge[-] (rho3ss);
\node (rhoe) at (-3.5,5) {{Simulated by $\mc{L}$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-4.3,4.3) -- (-2.7,4.3);
\end{tikzpicture}
\end{center}
Now at the next step of extending the tree we need to extend $\gamma_1^*$, $\gamma_1^{**}$, $\gamma_2^*$, and $\gamma_2^{**}$, and make sure that we extend $\tau$ to $e$-split with these extensions and with extensions of the $\rho^*$; but in doing so we will introduce further extensions that do not $e$-split. So at no finite step do we get that everything $e$-splits with each other, but in the end every pair of paths $e$-splits.
\section{Multiple requirements and outcomes}\langlebel{sec:three}
Order the requirements $\mathcal{M}_e$, $\mathcal{L}_e$, and $\mathcal{P}_e$ as follows, from highest priority to lowest:
\[ \mc M_0 > \mc L_0 > \mc P_0 > \mc M_1 > \mc L_1 > \mc P_1 > \mc M_2 > \cdots .\]
Each requirement has various possible outcomes:
\begin{itemize}
\item A requirement $\mathcal{M}_e$ can either build an $e$-splitting tree, or it can build a tree forcing that $\mathcal{P}hi_e$ is either partial or computable. In the former case, when $\mathcal{M}_e$ builds an $e$-splitting tree, we say that $\mathcal{M}_e$ has the infinitary outcome $\infty$. In the latter case, there is a node $\sigma$ above which we do not find any more $e$-splittings. We say that $\mathcal{M}_e$ has the finitary outcome $\sigma$.
\item A requirement $\mathcal{L}_{\langle e,i\rangle}$ can either have the simulation $\Xi$ of $\mathcal{P}si_{e}$ be equal to $R_{i}$ whenever they are both defined, or $\mathcal{L}_{\langle e,i\rangle}$ can force $A$ to extend a node $\sigma$, with $\mathcal{P}si_e^\sigma(x) \neq R_i(x)$ for some $x$. In the first case, we say that $\mathcal{L}_{\langle e,i\rangle}$ has the infinitary outcome $\infty$, and in the latter case we say that $\mathcal{L}_{\langle e,i\rangle}$ has the finitary outcome $\sigma$.
\item A requirement $\mathcal{P}_e$ chooses an initial segment $\sigma$ of $A$ that ensures that $A$ is not equal to the $e$th c.e.\ set $W_e$. This node $\sigma$ is the outcome of $\mathcal{P}_e$.
\end{itemize}
The \textit{tree of outcomes} is the tree of finite strings $\eta$ where $\eta(3e)$ is an outcome for $\mathcal{M}_e$, $\eta(3e+1)$ is an outcome for $\mathcal{L}_e$, and $\eta(3e+2)$ is an outcome for $\mathcal{P}_e$, and so that $\eta$ satisfies the coherence condition described below. For convenience, given a requirement $\mc{R}$ we write $\eta(\mc{R})$ for the outcome of $\mc{R}$ according to $\eta$: $\eta(\mc{M}_e) = \eta(3e)$, $\eta(\mc{L}_e) = \eta(3e+1)$, and $\eta(\mc{P}_e) = \eta(3e+2)$. Using this notation allows us to avoid having to remember exactly how we have indexed the entries of $\eta$. Given a requirement $\mc{R}$, we say that $\eta$ is a \textit{guess by $\mc{R}$} if $\eta$ has an outcome for each requirement of higher priority than $\mc{R}$, e.g.\ a guess by $\mathcal{L}_e$ is a string $\eta$ of length $3e+1$ with \[ \eta = \langle \eta(\mc{M}_0),\eta(\mc{L}_0),\eta(\mc{P}_0),\ldots,\eta(\mc{M}_{e-1}),\eta(\mc{L}_{e-1}),\eta(\mc{P}_{e-1}),\eta(\mc{M}_e) \rangle.\]
Not all of these guesses are internally consistent; the guesses which might actually be the true outcomes will satisfy other conditions, for example the nodes in the guess will actually be on the trees built by higher priority requirements.
Each requirement $\mc{R}$ will have an \textit{instance} $\mc{R}^\eta$ operating under each possible guess $\eta$ at the outcomes of the lower priority requirements. Each of these instances of a particular requirement will be operating independently, but the actions of all of the instances of all the requirements will be uniformly computable. So for example each instance $\mathcal{M}_e^\eta$ of a minimality requirement will be trying to build an $e$-splitting tree, using $\eta$ to guess at whether or not $\mathcal{M}_{e-1}$ successfully built an $e$-splitting tree, how $\mathcal{L}_{e-1}$ was satisfied, and the node chosen by $\mathcal{P}_{e-1}$ as an initial segment of $A$. The instances of different requirements will not be completely independent; for example, a requirement $\mathcal{M}_e^\eta$ must take into account all of the lower priority requirements $\mathcal{L}_d^\nu$ for $d \geq e$, $\nu \succ \eta$.
One can think of the argument as a forcing argument except that there are some (effective) interactions between the conditions. In a standard forcing construction to build a minimal degree, for each requirement $\mathcal{M}_e$, after forcing the outcomes of the lower priority requirements, we decide non-uniformly whether we can find an $e$-splitting tree $T_e$, or whether there is a node $\sigma$ which has no $e$-splitting tree extending it. The tree $T_e$ is in some sense built after deciding on the outcomes of the previous requirements. What we will do is attempt to build, for each guess $\eta$ of $\mathcal{M}_e$ at the outcomes of the lower priority requirements, an $e$-splitting tree $T_e^\eta$; and then we will, at the end of the construction, choose one instance $\mc{M}_e^\eta$ of $\mathcal{M}_e$ to use depending on the outcomes of the lower priority requirements, and then we use the tree $T_e^\eta$ built by that instance. All of these trees $T_e^\eta$ were already built before we started determining the outcomes of the requirements.
There is only one instance $\mathcal{M}_0^\varnothing$ of $\mathcal{M}_0$, since there are no higher priority requirements. After the construction, we will ask $\mc{M}^\varnothing_0$ what its outcome was. We then have an instance of $\mathcal{L}_0$ which guessed this outcome for $\mathcal{M}_0$, and we ask this instance what its outcome was. This gives us an instance of $\mathcal{P}_0$ that guessed correctly, and so on. So at the end, we use only one instance of each requirement, and follow whatever that instance did.
{}
We now need to consider in more detail the interactions between the requirements. We saw in the previous section that an $\mathcal{M}$ requirement must take into account lower priority $\mathcal{L}$ requirements. In the full construction, we will have not only many different lower priority $\mathcal{L}$ requirements, but also many different instances of each one that the $\mathcal{M}$ requirement must take into account.
Consider three requirements, $\mc{M}$ of highest priority, $\mc{L}_0$ of middle priority, and $\mc{L}_1$ of lowest priority. Suppose that both $\mc{L}_0$ and $\mc{L}_1$ correctly guess that $\mc{M}$ has the infinitary outcome, building a splitting tree. As described before, when we extend $\gamma$, we get a picture as follows (ignoring $\mc{L}_1$ for now):
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (gamma1) at (-2,3) {{$\gamma_1$}};
\node (gamma2) at (-1,3) {{$\gamma_2$}};
\node (rho1) at (0,3) {{$\rho_1$}};
\node (rho2) at (1,3) {{$\rho_2$}};
\node (rho3) at (2,3) {{$\rho_3$}};
\node (dots) at (3,3) {{$\cdots$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma) edge[-] (rho1);
\path (gamma) edge[-] (rho2);
\path (gamma) edge[-] (rho3);
\node (rhoe) at (-1.5,4) {{Simulated by $\mc{L}_0$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-2.3,3.3) -- (-0.7,3.3);
\end{tikzpicture}
\end{center}
Now $\mc{L}_1$ guesses at the outcome of $\mc{L}_0$, and if for example $\mc{L}_0$ has the finitary outcome $\rho_1$, and $\mc{L}_1$ guesses this, then $\mc{L}_1$ must simulate computations extending $\rho_1$.
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (gamma1) at (-2,3) {{$\gamma_1$}};
\node (gamma2) at (-1,3) {{$\gamma_2$}};
\node (rho1) at (0,3) {{$\rho_1$}};
\node (rho2) at (1,3) {{$\rho_2$}};
\node (rho3) at (2,3) {{$\rho_3$}};
\node (dots) at (3,3) {{$\cdots$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma) edge[-] (rho1);
\path (gamma) edge[-] (rho2);
\path (gamma) edge[-] (rho3);
\node (rhoe) at (-1.5,4) {{Simulated by $\mc{L}_0$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-2.3,3.3) -- (-0.7,3.3);
\node (rhoe) at (0,5) {{Simulated by $\mc{L}_1$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-0.3,4.3) -- (0.3,4.3);
\end{tikzpicture}
\end{center}
Now in the next step we found extensions $\rho_1^*,\rho_1^{**}$ of $\rho_1$ that split with the other extensions. Before, we could simply extend $\rho_1$ to $\rho_1^*$ and $\rho_1^{**}$. Now, while we are looking for the extensions, $\mc{L}_1$ might simulate other computations, say $\tau_1,\tau_2,\ldots$, extending $\rho_1$. We cannot remove these from the tree. So as before, $\mc{L}_1$ stops simulating them:
\begin{center}
\begin{tikzpicture}[-,>=stealth',shorten >=1pt,shorten <=1pt, auto,node
distance=2cm,thick,every loop/.style={<-,shorten <=1pt}]
\node (gamma) at (0,1) {{$\gamma$}};
\node (tau1) at (-5,4) {{}};
\node (tau2) at (-6,4) {{}};
\node (tau3) at (-1,4) {{}};
\node (tau4) at (-2,4) {{}};
\node (gamma1) at (-4,3) {{$\gamma_1$}};
\node (gamma2) at (-3,3) {{$\gamma_2$}};
\node (gamma1s) at (-4.25,4) {{$\gamma_1^*$}};
\node (gamma2s) at (-2.75,4) {{$\gamma_2^{**}$}};
\node (gamma1ss) at (-3.75,4) {{$\gamma_1^{**}$}};
\node (gamma2ss) at (-3.25,4) {{$\gamma_2^{*}$}};
\node (rho1) at (1,3) {{$\rho_1$}};
\node (rho1s) at (-0.5,4) {{$\rho_1^*$}};
\node (rho1ss) at (0.5,4) {{$\rho_1^{**}$}};
\node (rho1st) at (1.5,4) {{$\tau_1$}};
\node (rho1st2) at (2.5,4) {{$\tau_2$}};
\node (rho2) at (3,3) {{$\rho_2$}};
\node (rho3) at (4,3) {{$\rho_3$}};
\node (dots) at (5,3) {{$\cdots$}};
\path (0,0) edge[-] (gamma);
\path (gamma) edge[-] (gamma1);
\path (gamma) edge[-] (gamma2);
\path (gamma1) edge[-] (gamma1s);
\path (gamma1) edge[-] (gamma1ss);
\path (gamma1) edge[-] (tau1);
\path (gamma1) edge[-] (tau2);
\path (gamma2) edge[-] (tau3);
\path (gamma2) edge[-] (tau4);
\path (gamma2) edge[-] (gamma2s);
\path (gamma2) edge[-] (gamma2ss);
\path (gamma) edge[-] (rho1);
\path (gamma) edge[-] (rho2);
\path (gamma) edge[-] (rho3);
\path (rho1) edge[-] (rho1s);
\path (rho1) edge[-] (rho1ss);
\path (rho1) edge[-] (rho1st);
\path (rho1) edge[-] (rho1st2);
\node (rhoe) at (-3.5,5) {{Simulated by $\mc{L}_0$}};
\draw [decorate,decoration={brace,amplitude=10pt}]
(-4.3,4.3) -- (-2.7,4.3);
\node (rhoe) at (0,6) {{Simulated by $\mc{L}_1$}};
(-4.3,5.3) -- (-2.7,5.3);
\draw [decorate,decoration={brace,amplitude=10pt}]
(-0.7,5.3) -- (0.7,5.3);
\end{tikzpicture}
\end{center}
Now we have arrived to the point where computations above $\tau_1$ and $\tau_2$ are no longer being simulated by any $\mc{L}$ requirements, and when we extend $\gamma_1^*$, $\gamma_1^{**}$, $\gamma_2^*$, and $\gamma_2^{**}$ we can find extensions of $\tau_1$ and $\tau_2$ which $e$-split with these extensions (as well as with extensions of $\rho_2^*$, $\rho_3^*$, etc.).
It was important here that we were only dealing with finitely many lowness requirements at a time, because eventually we arrived at a point where parts of the tree were no longer being simulated by any lowness requirement. In the full construction, there will be some important bookkeeping to manage which lowness requirements are being considered at any particular time, so that we sufficiently delay the introduction of new lowness requirements. (This will be accomplished by giving each element of the tree a \textit{scope} in the next section.)
This is not the only case where one $\mc{L}$ requirement needs to simulate computations through nodes not simulated by another computation. We will introduce a relation $\mathcal{L}_{d_1}^{\nu_1} \rightsquigarrow \mc{L}_{d_2}^{\nu_2}$ which means that $\mathcal{L}_{d_1}^{\nu_1}$ must simulate computations above nodes that are kept on the tree to be the finitary outcome of $\mc{L}_{d_2}^{\nu_2}$, but which are not simulated by $\mc{L}_{d_2}^{\nu_2}$. We suggest reading $\mathcal{L}_{d_1}^{\nu_1} \rightsquigarrow \mc{L}_{d_2}^{\nu_2}$ as ``$\mathcal{L}_{d_1}^{\nu_1}$ \textit{watches} $\mc{L}_{d_2}^{\nu_2}$''. Given the previous example, if $d_1 < d_2$ and $\nu_1 \prec \nu_2$ then we will have $\mathcal{L}_{d_1}^{\nu_1} \rightsquigarrow \mc{L}_{d_2}^{\nu_2}$, but there will be other, more complicated, cases where $\mathcal{L}_{d_1}^{\nu_1} \rightsquigarrow \mc{L}_{d_2}^{\nu_2}$.
Now consider the case of two $\mathcal{M}$ requirements $\mathcal{M}_0$ and $\mathcal{M}_1$ which are of higher priority than two $\mc{L}$ requirements $\mc{L}_0$ and $\mc{L}_1$. Suppose that $\mathcal{M}_0$ successfully builds a $0$-splitting tree $T_0$, and suppose that $\mathcal{L}_0$ and $\mc{L}_1$ both correctly guesses this. Then $\mc{M}_1$ is trying to build a $1$-splitting subtree of $T_0$. Suppose that:
\begin{enumerate}
\item $\mathcal{L}_0$ guesses that $\mathcal{M}_1$ builds a 1-splitting subtree of $T_0$. If $\mathcal{M}_1$ succeeds at building a $1$-splitting subtree $T_1$ of $T_0$, $\mc{L}_0$ must be able to simulate computations on $T_1$. On the other hand, when $\mc{M}_0$ built $T_0$, there were some nodes of $T_0$ which $\mc{L}_0$ did not simulate. So to make sure that $\mc{L}_0$ simulates computations through $T_1$, $\mc{M}_1$ should look for 1-splits through nodes of $T_0$ that are simulated by $\mathcal{L}_0$.
\item $\mathcal{L}_1$ guesses that $\mathcal{M}_1$ fails to build a 1-splitting tree. If this guess is correct, then there is some node $\sigma$ above which $\mathcal{M}_1$ fails to find a 1-split. $\mathcal{M}_1$ defines a subtree $T_1'$ of $T_0$ containing no 1-splits. The tree $T_1'$ is not being defined dynamically---it is just the subtree of $T_0$ above $\sigma$ where $\mathcal{M}_1$ looked for (and failed to find) a 1-split.
Recall that $\mathcal{M}_0$ acted specifically to keep simulations computed by $\mathcal{L}_1$ on the tree $T_0$. The tree $T_1'$ also needs to keep these computations. Putting everything together, this means that we should be looking for 1-splits through the nodes which were kept on $T_0$ for the sake of $\mathcal{L}_1$; these are nodes which are not simulated by $\mathcal{L}_1$, but which might be used for the finitary outcomes of $\mc{L}_1$.
\end{enumerate}
So we see from (1) that $\mc{M}_1$ should look for 1-splits through nodes that are simulated by $\mc{L}_0$, and from (2) that $\mc{M}_1$ should look for 1-splits through the nodes which are kept on $T_0$ for the sake of $\mc{L}_1$, but which are not simulated by $\mc{L}_1$. This suggests that $\mc{L}_0$ should simulate the nodes which are kept on $T_0$ for the sake of $\mc{L}_1$, so that $\mathcal{L}_0 \rightsquigarrow \mc{L}_1$.
Given a string of outcomes $\nu$, define $\mathsf{D}elta(\nu) \in \{f,\infty\}^{< \omega}$ to be the string
\[ \langle \nu(\mc M_0),\nu(\mc M_1),\nu(\mc M_2),\ldots \rangle\]
except that we replace any entry which is not $\infty$ with $f$. We can put an ordering $\precsim$ on these using the lexicographic order with $\infty < f$. Suppose that $\nu_1$ and $\nu_2$ are guesses by $\mathcal{L}_{d_1}$ and $\mathcal{L}_{d_2}$ respectively at the outcomes of the higher priority requirements. Define $\mathcal{L}_{d_1}^{\nu_1} \rightsquigarrow \mathcal{L}_{d_2}^{\nu_2}$ if and only if $\mathsf{D}elta(\nu_1) \prec \mathsf{D}elta(\nu_2)$ in the ordering just defined.
\section{Construction}
\subsection{Procedure for constructing splitting trees}
Given the tree $T_{e-1}$ constructed by an instance of $\mc{M}_{e-1}$, we will describe the (attempted) construction by $\mc{M}_{e}$ of an $e$-splitting subtree $T$. This construction will be successful if there are enough $e$-splittings in $T_{e-1}$. We write $T[n]$ for the tree up to and including the $n$th level.
Let $\xi$ be the guess by the particular instance of $\mc{M}_e$ at the outcomes of lower priority requirements. This guess $\xi$ determines an instance of $\mc{M}_{e-1}$ compatible with the given instance of $\mc{M}_e$, and $\xi$ also includes a guess at the outcome of $\mc{M}_{e-1}$. The tree $T_{e-1}$ inside of which we build $T$ depends on the outcome of $\mc{M}_{e-1}$, i.e., if $\xi$ guesses that $\mc{M}_{e-1}$ builds an $(e-1)$-splitting tree then $T_{e-1}$ is this tree, and if $\xi$ guesses that $\mc{M}_{e-1}$ fails to do so, then $T_{e-1}$ is the tree with no $(e-1)$-splits witnessing this failure. (This will all be made more precise in the next section; for now we simply describe the procedure of building $T$.) If we are successful in building $T$ then $\mc{M}_e$ will have the infinitary outcome. We will also leave for later the description of the subtree of $T_{e-1}$ that we use for the finitary outcome. In this section, we just define the procedure \texttt{Procedure($e$,$\rho$,$T_{e-1}$)} for building an $e$-splitting tree $T$ with root $\rho$ in $T_{e-1}$. (The procedure for building $T$ will not actually use the guess $\xi$, other than to determine what the tree $T_{e-1}$ is, but it will be helpful to refer to $\xi$ in the discussion.)
When building $T$, $\mc{M}_e$ must take into account instances of lowness requirements $\mc{L}^\eta_d$ with $d \geq e$ and $\eta$ extending $\xi \widehat{\ }at \infty$. (Since $T$ is being built under the assumption that $\mc{M}_e$ has the infinitary outcome, it only needs to respect lowness requirements that guess that this is the case.) When considering $\mc{L}_d^\eta$ while building $T$, we will only need to know the guesses by $\mc{L}_d^\eta$ at the outcomes of the $\mc{M}$ requirements $\mc{M}_{e+1},\ldots,\mc{M}_d$ of lower priority than $\mc{M}_e$ but higher priority than $\mc{L}_d$ (because the only lowness requirements we need to consider have the same guesses at the requirements $\mc{M}_1,\ldots,\mc{M}_e$), and moreover we will only care about whether the guess is the infinitary outcome or a finitary outcome.
The tree $T$ will be a \textit{labeled tree}, as follows. Figure \ref{figone} below may help the reader to understand the structure of the tree. Each node of the tree $T$ will be labeled to show which lowness requirements are simulating it, and which lowness requirements are using it for the finitary outcome (of diagonalizing against a computable set $R$). Each node $\sigma$ is given a \textit{scope} $\scope(\sigma)$ and a \textit{label} $\ell(\sigma)$. The scope is a natural number $\geq 0$, and the label is an element of
\[ \mathtt{Labels} = \{f,\infty\}^{< \omega} \cup \{ \top \}.\]
The scope represents the number of lowness requirements that are being considered at this level of the tree, i.e., if the scope of a node is $n$, then we are considering lowness requirements $\mc{L}_e,\ldots,\mc{L}_{e+n}$. If a node $\sigma$ has scope $n$, then the label of $\sigma$ will be an element of
\[ \mathtt{Labels}_n = \{f,\infty\}^{\leq n} \cup \{ \top \}.\]
Note that the label might have length less than $n$, and might even be the empty string; an element of $\mathtt{Labels}_n$ of length $m$ corresponds to a guess by $\mc{L}_{e+m}$. We order the labels lexicographically with $\infty < f$, and with $\top$ as the greatest element. E.g., in $\mathtt{Labels}_2$, we have
\[\top \succ ff \succ f \infty \succ f \succ \infty f \succ \infty \infty \succ \infty \succ \varnothing.\]
We use $\preceq$ for this ordering. We often think of this ordering as being an ordering $\preceq_n$ on $\mathtt{Labels}_n$, and write $\pred_n(\eta)$ for the predecessor of $\eta$ in $\mathtt{Labels}_n$. Though $\mathtt{Labels}$ is well-founded, it does not have order type $\omega$, and so we need to restrict to $\mathtt{Labels}_n$ to make sense of the predecessor operator.
We think of elements of $\{f,\infty\}^n$ as guesses by $\mc{L}_{e+n}$ at the outcomes of $\mc{M}_{e+1},\ldots,\mc{M}_{e+n}$, and $\top$ is just an element greater than all of the guesses. Given an instance $\mc{L}_{e+n}^\eta$ of a lowness requirement of lower priority than $\mc{M}_e$, we write $\mathsf{D}elta_{> e}(\eta)$ for
\[ \langle \mathsf{D}elta(\eta)(e+1),\mathsf{D}elta(\eta)(e+2),\ldots,\mathsf{D}elta(\eta)(e+n) \rangle \in \{f,\infty\}^{n}.\]
Recall that $\mathsf{D}elta(\eta)$ just replaces the entries of $\eta$ by $f$ or $\infty$; $\mathsf{D}elta_{> e}(\eta)$ is the string which consists of the guesses of $\eta$ at the outcomes (finitary $f$ or infinitary $\infty$) of $\mc{M}_{e+1},\ldots,\mc{M}_d$. Given two requirements $\mc{L}_{d_1}^{\nu_1}$ and $\mc{L}_{d_2}^{\nu_2}$ with $\nu_1,\nu_2$ extending $\xi \widehat{\ }at \infty$, $\mc{L}_{d_1}^{\nu_1} \rightsquigarrow \mc{L}_{d_2}^{\nu_2}$ if and only if $\mathsf{D}elta_{> e}(\nu_1) \prec \mathsf{D}elta_{> e}(\nu_2)$ lexicographically.
The label $\ell(\sigma^*)$ means that $\sigma^*$ was kept on the tree in order to preserve simulated computations by instances of lowness requirements $\mc{L}_{e+|\ell(\sigma^*)|}^\eta$ with $\mathsf{D}elta_{>e}(\eta) = \ell(\sigma^*)$. So if $\mc{L}_d^\eta$ is an instance of a lowness requirement, and $\sigma^*$ is the child of $\sigma$ on $T$, with $d \leq e+\scope(\sigma^*)$, then:
\begin{itemize}
\item if $\mathsf{D}elta_{> e}(\eta) \prec \ell(\sigma^*)$, then if $\mc{L}_d^\eta$ simulates computations through $\sigma$, it also simulates computations through $\sigma^*$; and
\item if $\mathsf{D}elta_{> e}(\eta) = \ell(\sigma^*)$, then $\mc{L}_d^\eta$ does not simulate computations through $\sigma^*$, but $\sigma^*$ might be used for the finitary outcome of $\mc{L}_d^\eta$.
\end{itemize}
A node $\sigma^*$ with $\ell(\sigma^*) = \top$ is simulated by every lowness requirement that simulates its parent. Note that we always say that $\sigma^*$ is simulated if its parent $\sigma$ is simulated, rather than just saying that $\sigma^*$ is simulated. This is because when we apply $\mathsf{D}elta_{>e}$ to the guesses $\eta$ of different instances of $\mc{L}_d^\eta$, we lump together many instances with different values for the finitary outcomes. Some of these instances may not simulate computations through $\sigma^*$ because one of the higher priority requirements forces $A$ to extend a node incompatible with $\sigma$, while other instances may be forced to simulate computations through $\sigma^*$ because of these higher priority requirements. For a particular instance of a lowness requirement, there is some initial segment of $A$ determined by the higher priority requirements; think of the labels $\ell$ as applying above this initial segment.
{}
Certain levels of the tree $T_e$ will be called \textit{expansionary levels}. From the $n$th expansionary level of the tree on, we will begin to consider requirements $\mc{L}_{e+1},\ldots,\mc{L}_{e+n}$, using guesses from at the outcomes of $\mc{M}_{e+1},\ldots,\mc{M}_{e+n}$. (Recall that the scope of a node represents the lowness requirements that it considers.) The nodes at the $n$th expansionary level or higher, but below the $n+1$st expansionary level, will be said to be in the \textit{$n$th strip}. We write $e_1,e_2,e_3,\ldots$ for the expansionary levels. The expansionary levels are defined statically by $e_1 = 0$ and
\[ e_{i+1} = e_i + 2^{i+5}.\]
One might expect that if $\sigma$ is in the $n$th strip, then $\scope(\sigma)$ will be $n$. This will not quite be the case; an expansionary level is where we start considering more requirements, but this might not happen immediately for particular nodes. Instead, if $\sigma$ is in the $n$th strip, we will have $\scope(\sigma) = n-1$ or $\scope(\sigma) = n$. The scope of a child will always be at least the scope of its parent. We say that $\sigma^*$, a child of $\sigma$, is an \textit{expansionary node} if $\scope(\sigma^*) > \scope(\sigma)$. We say that an expansionary node $\sigma^*$ is an \textit{$n$th expansionary node} if $\scope(\sigma^*) = n$. Along any path in the tree, there is one expansionary node for each $n$; it is clear from the fact that the lengths of labels are non-decreasing along paths that there is at most one, and we will show in Lemma \ref{lem:expansionary} that there is at least one. Moreover, the $n$th expansionary node along a path will occur in the $n$th strip.
Suppose that $\sigma^*$ is a child of $\sigma$, with $\scope(\sigma) = n$. If $\sigma^*$ is an $(n+1)$st expansionary node, then we will have $\scope(\sigma^*) = n+1$ and $\ell(\sigma^*) = \top$. Otherwise, $\scope(\sigma^*) = \scope(\sigma) = n$ and we will have either $\ell(\sigma^*) = \ell(\sigma)$ or $\ell(\sigma^*) \prec \ell(\sigma)$. When the label stays the same (or when $\sigma^*$ is an expansionary node), we say that $\sigma^*$ is a \textit{main child} of $\sigma$. Each node $\sigma$ will have exactly two main children, which will $e$-split with each other. Otherwise, if $\ell(\sigma^*) \prec \ell(\sigma)$, then we say that $\sigma^*$ is a \textit{secondary child} of $\sigma$.
\begin{sidewaysfigure}[pt]
\begin{center}
\begin{tikzpicture}[level 1/.style={sibling distance=8cm},
level 2/.style={sibling distance=2cm},
level 3/.style={sibling distance=0.7cm},
level 4/.style={sibling distance=0.5cm},
level 6/.style={sibling distance=0.9cm},
level 7/.style={sibling distance=0.7cm},
level 8/.style={sibling distance=0.7cm},]
\node {$\top$}
child { node {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]}
child { node {$\top$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[dashed]}}
child { node {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]}
child { node {$\top$} edge from parent[thick,solid]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[dashed]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node {$\infty$} edge from parent[dashed]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]
child[draw,circle,color=black,inner sep=1.5pt,solid,thin] { node {$\top$} edge from parent[thick,solid]}
child[draw,circle,color=black,inner sep=1.5pt,solid,thin] { node {$\top$} edge from parent[thick,solid]}
child { node {$ff$} edge from parent[dashed]}}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node {$\varnothing$} edge from parent[dashed]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}}}}}
child { node {$f$} edge from parent[dashed]}}
child { node {$f$} edge from parent[dashed]
child { node {$f$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[thick,solid]}
child { node {$\infty$} edge from parent[dashed]}}}
child { node {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]}
child { node {$\top$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[dashed]}}
child { node {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]}
child { node {$\top$} edge from parent[thick,solid]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]
child { node {$\top$} edge from parent[thick,solid]}
child { node {$\top$} edge from parent[thick,solid]}
child { node {$ff$} edge from parent[dashed]
child { node {$ff$} edge from parent[thick,solid]}
child { node {$ff$} edge from parent[thick,solid]}
child { node {$f\infty$} edge from parent[dashed]
child { node {$f\infty$} edge from parent[thick,solid]}
child { node {$f\infty$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[dashed]
child { node {$f$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[thick,solid]}
child { node {$\infty f$} edge from parent[dashed]}}}}}
child { node {$f$} edge from parent[dashed]}}
child { node {$f$} edge from parent[dashed]}}
child { node {$f$} edge from parent[dashed]
child { node {$f$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[thick,solid]}
child { node {$\infty$} edge from parent[dashed]}}}
child { node {$f$} edge from parent[dashed]
child { node {$f$} edge from parent[thick,solid]
child { node {$f$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[thick,solid]}
child { node {$\infty$} edge from parent[dashed]}}
child { node {$f$} edge from parent[thick,solid]
child { node {$f$} edge from parent[thick,solid]}
child { node {$f$} edge from parent[thick,solid]}
child { node {$\infty$} edge from parent[dashed]}}
child { node {$\infty$} edge from parent[dashed]
child { node {$\infty$} edge from parent[thick,solid]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node {$\varnothing$} edge from parent[dashed]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}}}
child { node {$\infty$} edge from parent[thick,solid]}
child { node {$\varnothing$} edge from parent[dashed]
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}
child { node[draw,circle,color=black,inner sep=1.5pt,solid,thin] {$\top$} edge from parent[thick,solid]}}}};
\node at (10,-9) {\begin{varwidth}{4cm}\footnotesize Nodes labeled $\varnothing$ have no secondary children.\end{varwidth}};
\node[fill=white] at (-3.5,-1.5) {\begin{varwidth}{4cm}\footnotesize Since we have not yet passed the 2nd expansionary stage, main children are not 2nd expansionary nodes.\end{varwidth}};
\node at (4,-7.5) {\begin{varwidth}{4cm}\footnotesize The scope of these nodes is now 2, so the labels can have length $\leq 2$. Along the secondary children, the labels keep decreasing.\end{varwidth}};
\node at (-10,-8) {\begin{varwidth}{3cm}\footnotesize The expansionary nodes do not all occur at the same level, but we can see that each path eventually has one.\end{varwidth}};
\node at (-9,-12) {\begin{varwidth}{5cm}\footnotesize These children labeled $\top$ are not expansionary because we have not reached the 3rd expansionary level.\end{varwidth}};
\node at (-4,-5.7) {\small \textbf{2nd expansionary level.}};
\draw[thick,dashed] (-12,-5.25) -- (12,-5.25);
\end{tikzpicture}
\end{center}
\caption{An example of what the labeled tree might look like. We draw the main children with a solid line and the secondary children with a dashed line. Expansionary nodes are shown by a circle. To fit the tree onto a single page, we have made some simplifications: (a) we have omitted some nodes from the diagram; (b) we have assumed that each node has only one secondary child; and (c) we have assumed that the $2$nd expansionary level $e_2$ is much smaller than the value of 128 that we set in the construction. We show the 2nd expansionary level with the long horizontal dashed line.}\langlebel{figone}
\end{sidewaysfigure}
{}
Recall from the end of the previous section that when we look for splits, we do so through only a subtree of $T_{e-1}$. $T_{e-1}$ itself will be a labeled tree, with scopes $\scope_{e-1} \colon T_{e-1} \to \omega$ and labels $\ell_{e-1} \colon T_{e-1} \to \mathtt{Labels}$. Given $\tau$ on $T_{e-1}$, let $T_{e-1}\treeres{\tau}{\succ f}$ be the subtree of $T_{e-1}$ above $\tau$ (so that $\tau$ is the root node of $T_{e-1}\treeres{\tau}{\succ f}$) such that given $\sigma$ on $T_{e-1}\treeres{\tau}{\succ f}$, the children of $\sigma$ in $T_{e-1}\treeres{\tau}{\succ f}$ are the children $\sigma^*$ of $\sigma$ on $T_{e-1}$ with $\ell_{e-1}(\sigma^*) \succ f$. When we look for a splitting extension of $\tau$, we look through $T_{e-1}\treeres{\tau}{\succ f}$.
In general, for any tree $S$, node $\tau \in S$, and relation $R(\sigma^*)$ (or even a relation $R(\sigma^*,\sigma)$ between a node $\sigma^*$ and its parent $\sigma$), we can define the subtree $S \treeres{\tau}{R}$ as the tree with root node $\tau$, and such that whenever $\sigma \in S \treeres{\tau}{R}$, the children $\sigma^*$ of $\sigma$ on $S \treeres{\tau}{R}$ are exactly the children $\sigma^*$ of $\sigma$ on $S$ such that $R(\sigma^*)$ holds (or $R(\sigma^*,\sigma)$). We will use this notation for the trees $S \treeres{\tau}{\succ \eta}$ and $S \treeres{\tau}{\succeq \eta}$ for $\eta \in \mathtt{Labels}$, and $S \treeres{\tau}{main}$ where $main(\sigma^*,\sigma)$ is the relation of being a main child. So for example $T_{e-1} \treeres{\sigma}{main}[s]$ is the tree consisting of main children of main children of... main children of $\sigma$.
The input tree $T_{e-1}$ will have similar properties to those described above. We say that a finitely branching tree $T_{e-1}$ with labels $\ell_{e-1}$ and scopes $\scope_{e-1}$ is \textit{admissible} if:
\begin{enumerate}
\item Each $\sigma \in T_{e-1}$ has two main children $\sigma^*$ and $\sigma^{**}$ with $\ell_{e-1}(\sigma^*) \succeq \ell_{e-1}(\sigma)$ and $\ell_{e-1}(\sigma^{**}) \succeq \ell_{e-1}(\sigma)$.
\item If $\sigma^*$ is a child of $\sigma$, then $\scope_{e-1}(\sigma^*) \geq \scope_{e-1}(\sigma)$.
\item For each $n$, each path through $T_{e-1}$ contains a node $\sigma$ with $\ell_{e-1}(\sigma) = \top$ and $\scope_{e-1}(\sigma) \geq n$.
\end{enumerate}
We are now ready to describe the procedure for constructing splitting trees.
\noindent \texttt{Procedure($e$,$\rho$,$T_{e-1}$):}\\
\textit{Input:} A value $e \geq 0$, an admissible labeled tree $T_{e-1}$ with labels $\ell_{e-1}(\cdot)$ and scopes $\scope_{e-1}(\cdot)$, and a node $\rho$ on $T_{e-1}$ with $\ell_{e-1}(\rho) = \top$. \\
\textit{Output:} A possibly partial labeled $e$-splitting tree $T$, built stage-by-stage.
\noindent \textit{Construction.} To begin, the root node of $T$ is $\rho$ with $\scope(\rho) = 1$ and $\ell(\rho) = \top$. This is the $0$th level of the tree, $T[0]$. At each stage of the construction, if we have so far built $T$ up to the $n$th level $T[n]$, we try to add an additional $n+1$st level.
At stage $s$, suppose that we have defined the tree up to and including level $n$, and the last expansionary level $\leq n$ was $e_t$. Look for a length $l$ such that for each leaf $\sigma$ of $T[n]$, there is an extension $\sigma'$ of $\sigma$ with $\sigma' \in T_{e-1} \treeres{\sigma}{main}$ with $\ell_{e-1}(\sigma') = \top$, and there are extensions $\sigma^*$ and $\sigma^{**}$ of $\sigma$ on $T_{e-1} \treeres{\sigma'}{\succ f}$, such that $\sigma^*$ and $\sigma^{**}$ are of length $l$, and such that all of these extensions pairwise $e$-split, i.e., for each pair of leaves $\sigma,\tau$ of $T[n]$, these extensions $\sigma^*$, $\sigma^{**}$, $\tau^*$, and $\tau^{**}$ all $e$-split with each other. (At stage $s$, we look among the first $s$-many extensions of these leaves, and we run computations looking for $e$-splits up to stage $s$. If we do not find such extensions, move on to stage $s+1$.)
If we do find such extensions, we will define $T[n+1]$ as follows. To begin, we must wait for $T_{e-1}[s]$ to be defined. In the meantime, we designate each $\sigma$ as \textit{waiting with main children $\sigma^*$ and $\sigma^{**}$}. (This designation is purely for the use of the simulations for lowness requirements, and has no effect on the resulting tree $T$.) While waiting, we still count through stages of the construction, so that after we resume the next stage of the construction will not be stage $s+1$ but some other stage $t > s$ depending on how long we wait. Once $T_{e-1}[s]$ has been defined, for each leaf $\sigma$ of $T[n]$, the children of $\sigma$ in $T[n+1]$ will be:
\begin{itemize}
\item $\sigma^*$, with:
\begin{itemize}
\item if no predecessor of $\sigma$ on $T$ is $t$-expansionary, set $\scope({\sigma}^*) = \scope(\sigma)+1$ and $\ell({\sigma}^*) = \top$, or
\item $\scope({\sigma}^*) = \scope(\sigma)$ and $\ell({\sigma}^*) = \ell(\sigma)$ otherwise;
\end{itemize}
\item $\sigma^{**}$, with:
\begin{itemize}
\item $\scope({\sigma}^{**}) = \scope(\sigma)+1$ and $\ell({\sigma}^{**}) = \top$ if no predecessor of $\sigma$ on $T$ is $t$-expansionary, or
\item $\scope({\sigma}^{**}) = \scope(\sigma)$ and $\ell({\sigma}^{**}) = \ell(\sigma)$ otherwise;
\end{itemize}
\item If $\ell(\sigma) \succ \varnothing$, each other extension $\sigma^\dagger$ of $\sigma$ on $T_{e-1} \treeres{\sigma}{\succ \varnothing}[s]$ which is incompatible with $\sigma^*$ and $\sigma^{**}$ will be a child of $\sigma$ on $T$. Put $\scope(\sigma^\dagger) = \scope(\sigma)$. Define $\ell(\sigma^\dagger)$ as follows. Let $n = \scope(\sigma)$. Let $\eta \in \mathtt{Labels}_{n}$ be greatest such that $\sigma^\dagger \in T_{e-1} \treeres{\sigma}{\succeq \eta}$. Then:
\begin{itemize}
\item If $\eta$ is $\top$ or begins with $f$, then let $\ell(\sigma^\dagger) = \pred_{n}(\ell(\sigma))$.
\item If $\eta$ begins with $\infty$, say $\eta = \infty \eta^*$, then $\ell(\sigma^\dagger)$ will be the minimum, in $\mathtt{Labels}_{n}$, of $\pred_{n}(\ell(\sigma))$ and $\eta^*$.
\end{itemize}
Note that $\pred_n(\ell(\sigma))$ exists because $\ell(\sigma) \succ \varnothing$.
\end{itemize}
The children ${\sigma}^*$ and ${\sigma}^{**}$ are the main children of $\sigma$, and the $\sigma^\dagger$, if they exist, are secondary children. This ends the construction at stage $s$.
\noindent \textit{End construction.}
We say that the procedure is \textit{successful} if it never gets stuck, and construct the $n$th level of the tree $T$ for every $n$. The next lemma is the formal statement that if $T_{e-1}$ has enough $e$-splits, then the procedure is successful.
\begin{lemma}\langlebel{lem:split}
Fix $e$, an admissible labeled tree $T_{e-1}$, and $\rho \in T_{e-1}$ with $\ell_{e-1}(\rho) = \top$. Suppose that for all $\sigma \in T_{e-1} \treeres{\rho}{\succ \varnothing}$ with $\ell_{e-1}(\sigma) = \top$,
\begin{itemize}
\item for all $n$, there is $\tau \in {T}_{e-1}\treeres{\sigma}{\succ f}$ such that $\mathcal{P}hi_e^\tau(n) \downarrow$, and
\item there are $n$ and $\tau_1,\tau_2 \in {T}_{e-1}\treeres{\sigma}{\succ f}$ such that
\[ \mathcal{P}hi_e^{\tau_1} (n) \neq \mathcal{P}hi_e^{\tau_2}(n).\]
\end{itemize}
Then {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} is successful.
\end{lemma}
As part of proving this lemma, we will use the following remark, which follows easily from the construction:
\begin{remark}
Every node on $T$ is a node on $T_{e-1} \treeres{\rho}{\succ \varnothing}$.
\end{remark}
\begin{proof}[Proof of Lemma \ref{lem:split}]
If we have built $T$ up to level $n$, and $T[n]$ has leaves $\sigma_1,\ldots,\sigma_k$, then as $T_{e-1}$ is admissible, for each $i$ there are $\sigma_i'$ on $T_{e-1} \treeres{\sigma_i}{main}$ with $\ell(\sigma_i') = \top$. By the remark, each $\sigma_i' \in T_{e-1} \treeres{\rho}{\succ \varnothing}$. Then using the assumption of the lemma and standard arguments there are $\sigma_i^{*},\sigma_i^{**}$ on $T_{e-1} \treeres{\sigma_i'}{\succ f}$ such that all of the $\sigma_i^*$ and $\sigma_i^{**}$ pairwise $e$-split. For sufficiently large stages $s$, we will find these extensions.
\end{proof}
The remaining lemmas of this section give properties of the tree constructed by the procedure. The next few lemmas show that the tree $T$ has expansionary levels and is $e$-splitting. As a result, we will see that $T$ is admissible.
\begin{lemma}\langlebel{lem:expansionary}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. For each $\sigma^* \in T[e_{n+1}]$, there is a predecessor $\sigma$ of $\sigma^*$ which is $n$-expansionary.
\end{lemma}
\begin{proof}
Let $\sigma_0 \in T[e_{n}]$ be the predecessor of $\sigma^*$ at the $n$th expansionary level, and let $\sigma_0,\sigma_1,\sigma_2,\ldots,\sigma_k = \sigma^*$ be the sequence of predecessors of $\sigma^*$ between $\sigma_0$ and $\sigma^*$. If any $\sigma_{i+1}$ were a main child of $\sigma_i$, then either $\sigma_{i+1}$ would be $n$-expansionary, or $\sigma_i$ or one of its predecessors would be $n$-expansionary as desired. If none of these are expansionary, then we must have
\[ \top \succeq \ell(\sigma_0) \succ \ell(\sigma_1) \succ \ell(\sigma_2) \succ \cdots \succ \ell(\sigma_k) = \ell(\sigma^*) \]
with all of these in $\mathtt{Labels}_{n-1}$. Since $e_{n+1} > e_n + 2^{n+1} > |\mathtt{Labels}_{n-1}|$, this cannot be the case, and so some predecessor of $\sigma^*$ must be expansionary.
\end{proof}
The following lemma is easy to see by inspecting the construction.
\begin{lemma}\langlebel{lemma:splits-or-down}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. Given distinct leaves $\sigma$ and $\sigma$ of $T[n]$, and $\sigma^*,\tau^* \in T[n+1]$ which are children of $\sigma$ and $\tau$ respectively, either:
\begin{itemize}
\item $\sigma^*$ and $\tau^*$ are main children of $\sigma$ and $\tau$ respectively, and $\sigma^*$ and $\tau^*$ $e$-split,
\item $\sigma^*$ is a secondary child of $\sigma$, and $\scope(\sigma^*) = \scope(\sigma)$ and $\ell(\sigma^*) \prec \ell(\sigma)$, or
\item $\tau^*$ is a secondary child of $\tau$, and $\scope(\tau^*) = \scope(\tau)$ and $\ell(\tau^*) \prec \ell(\tau)$.
\end{itemize}
\end{lemma}
\begin{lemma}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. Given distinct $\sigma$ and $\tau$ in $T$ at the $n$th expansionary level of the tree, and $\sigma^*,\tau^*$ which are extensions of $\sigma$ and $\tau$ respectively at the $n+1$st expansionary level of the tree, $\sigma^*$ and $\tau^*$ are $e$-splitting.
\end{lemma}
\begin{proof}
Let $\sigma_0 = \sigma,\sigma_1,\sigma_2,\ldots,\sigma_k = \sigma^*$ be the sequence of predecessors of $\sigma^*$ between $\sigma$ and $\sigma^*$, and similarly for $\tau_0 = \tau,\tau_1,\tau_2,\ldots,\tau_k = \tau^*$. Since $\sigma_0$ and $\tau_0$ are at the level $e_n$, and $\sigma^*$ and $\tau^*$ are at the level $e_{n+1}$, we have $k \geq 2^{n+5}$. If, for any $i$, both $\sigma_{i+1}$ and $\tau_{i+1}$ are main children of $\sigma_i$ and $\tau_i$, then by Lemma \ref{lemma:splits-or-down}, $\sigma_{i+1}$ and $\tau_{i+1}$ are $e$-splitting. We argue that this must happen for some $i < k$.
For each $i$, either (a) $\sigma_{i+1}$ is $n$-expansionary and $\ell(\sigma_{i+1}) = \top$, or $\scope(\sigma_{i+1}) = \scope(\sigma_i)$ and either (b) $\ell(\sigma_{i+1}) \prec \ell(\sigma_{i})$, or (c) $\ell(\sigma_{i+1}) = \ell(\sigma_{i})$. There is at most one $i$ for which (a) is the case. Thus there are at most $|\mathtt{Labels}_{n}|+|\mathtt{Labels}_{n+1}| \leq 2^{n+3}$ values of $i$ for which (b) is the case. The same is true for the $\tau_i$. So, as $k \geq 2^{n+5}$, there must be some $i$ for which neither (a) nor (b) is the case for either the $\sigma_i$ or the $\tau_i$. For this $i$, we have both $\sigma_{i+1}$ and $\tau_{i+1}$ are main children of $\sigma_i$ and $\tau_i$ respectively, and so $\sigma_{i+1}$ and $\tau_{i+1}$ are $e$-splitting. Thus $\sigma^*$ and $\tau^*$ are $e$-splitting.
\end{proof}
\begin{lemma}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. $T$ is an $e$-splitting tree: any two paths in $T$ are $e$-splitting.
\end{lemma}
\begin{proof}
Choose $\sigma_1$ and $\sigma_2$ initial segments of the two paths, long enough that they are distinct, which are at the $n$th expansionary level $T[e_n]$. Let $\tau_1,\tau_2$ be the longer initial segments of the paths at the $n+1$st expansionary level $T[e_{n+1}]$. Then by the previous lemma, $\tau_1$ and $\tau_2$ are $e$-splitting, and so the two paths are $e$-splitting.
\end{proof}
The next lemmas relate the labels of $T$ to the labels on $T_{e-1}$. If a node is labeled on $T_{e-1}$ so that it is not simulated by some lowness requirement, then it should also be labeled on $T$ to not be simulated by that lowness requirement. (The converse is not necessary; $T$ might determine that some node should not be simulated even if that was not determined by $T_{e-1}$.)
\begin{lemma}\langlebel{lem:n-to-n-1}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. Given $\sigma \in T$ and $\sigma^* \in T \treeres{\sigma}{\succ \eta}$, with $\ell_{e-1}(\sigma) = \top$, we have $\sigma^* \in T_{e-1} \treeres{\sigma}{\succ \infty \eta}$. In particular, if $\ell_e(\sigma^*) \succ \eta$, then $\ell_{e-1}(\sigma^*) \succ \infty\eta$.
\end{lemma}
\begin{proof}
It suffices to prove the lemma when $\sigma^*$ is a child of $\sigma$ on $T$, and $\ell(\sigma^*) \succ \eta$. We have two cases, depending on whether $\sigma^*$ is a main child or secondary child of $\sigma$ on $T$.
\begin{itemize}
\item If $\sigma^*$ is a main child of $\sigma$ on $T$, then $\sigma^* \in T_{e-1}\treeres{\sigma'}{\succ f}$ and $\sigma' \in T_{e-1}\treeres{\sigma}{main}$, and $\ell_{e-1}(\sigma') = \top$.
Since $\ell_{e-1}(\sigma) = \top$, for every $\tau$ on $T_{e-1}$ between $\sigma$ and $\sigma'$ we have $\ell_{e-1}(\tau) = \top$. Thus $\ell_{e-1}(\sigma') = \top$.
Now for every $\tau$ on $T_{e-1}$ between $\sigma'$ and $\sigma^*$, we have $\ell_{e-1}(\tau) \succ f \succ \infty \eta$. So $\ell_{e-1}(\sigma^*) \succ \infty \eta$.
\item If $\sigma^*$ is a secondary child of $\sigma$, let $n = \scope(\sigma)$ and let $\nu \in \mathtt{Labels}_{n}$ be least such that $\sigma^* \in T_{e-1} \treeres{\sigma}{\succeq \nu}$. If $\nu$ is $\top$ or begins with $f$, then $\nu \succ \infty \eta$ and so $\sigma^* \in T_{e-1} \treeres{\sigma}{\succ \infty\eta}$. Otherwise, if $\nu$ begins with $\infty$, say $\nu = \infty \nu^*$, then $\ell(\sigma^*) \succ \eta$ is the minimum, in $\mathtt{Labels}_{n}$, of $\pred_{n}(\ell(\sigma))$ and $\nu^*$. Thus $\nu^* \succ \eta$, which means that $\infty \nu^* \succ \infty \eta$, and so $\sigma^* \in T_{e-1} \treeres{\sigma}{\succ \infty\eta}$.
\end{itemize}
This proves the lemma.
\end{proof}
Similarly, we can prove the same lemma but replacing $\succ$ with $\succeq$. We have:
\begin{lemma}\langlebel{lem:n-to-n-1-eq}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. Given $\sigma \in T$ and $\sigma^* \in T \treeres{\sigma}{\succeq \eta}$, we have $\sigma^* \in T_{e-1} \treeres{\sigma}{\succeq \infty \eta}$. In particular, if $\ell_e(\sigma^*) \succeq \eta$, then $\ell_{e-1}(\sigma^*) \succeq \infty\eta$.
\end{lemma}
Finally, putting together results from all of these lemmas, we have:
\begin{lemma}\langlebel{lem:admissible}
Suppose that {\upshape\texttt{Procedure($e$,$\rho$,$T_{e-1}$)}} successfully constructs $T$. Then $T$ is an admissible tree.
\end{lemma}
\subsection{Minimality requirements}\langlebel{sec:min-req}
In the previous subsection, we described a procedure for constructing an $e$-splitting tree. In this section, we will show how the procedure is applied.
We begin with $\mathbb{T}_{-1}$ defined as in Section \ref{sec:two}. We put $\ell_{-1}(\sigma) = \top$ and $\scope_{-1}(\sigma) = |\sigma|$ for each $\sigma \in \mathbb{T}_{-1}$. This $\mathbb{T}_{-1}$ is admissible. For each instance $\mc{M}_e^\xi$ we will define trees $\mathbb{T}_e^{\xi \widehat{\ }at \infty}$ and $\mathbb{T}_e^{\xi\widehat{\ }at \sigma}$, where $\mathbb{T}_e^{\xi \widehat{\ }at \infty}$ is the outcome of the attempt to construct an $e$-splitting tree, and the $\mathbb{T}_e^{\xi\widehat{\ }at \sigma}$ are subtrees which would witness the failure of the construction of the $e$-splitting tree. Define
\begin{itemize}
\item $\mathbb{T}_e^{\xi \widehat{\ }at \infty}$ is the labeled tree $T$ produced by \texttt{Procedure($e$,$\sigma$,$\mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}$)} where $\sigma = \xi(\mc{P}_{e-1})$ (or $\sigma$ is the root of $\mathbb{T}_{-1}$ if $e = 0$). The tree $T$ built by this procedure has labels $\ell_e$ and scopes $\scope_e$ defined in its construction.
\item $\mathbb{T}_e^{\xi \widehat{\ }at \sigma}$ is the tree $\mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}\treeres{\sigma}{\succ f}$. The labels $\ell_e$ of the tree $\mathbb{T}_e^{\xi \widehat{\ }at \sigma}$ are defined by setting:
\begin{itemize}
\item $\scope_e(\sigma) = 1$ and $\ell_e(\sigma) = \top$;
\item for $\tau \neq \sigma$, $\scope_e(\tau) = \scope_{e-1}(\tau)-1$ and $\ell_e(\tau) = \top$ if $\ell_{e-1}(\tau) = \top$, or $\ell_e(\tau) = \eta$ if $\ell_{e-1}(\tau) = f\eta$.
\end{itemize}
\end{itemize}
There is a uniform and computable construction of all of these trees simultaneously. Whenever in \texttt{Procedure($e$,$\sigma$,$\mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}$)} we need to determine the next level of the tree $\mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}$, the procedure waits until this next level is defined. If $\mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}$ is a total tree then at some point enough of it will be define for the procedure to continue, and it if is not a total tree, then the procedure will get stuck and $\mathbb{T}^{\xi \widehat{\ }at \infty}_{e}$ will also be partial. Similarly, to define a level of $\mathbb{T}_e^{\xi \widehat{\ }at \sigma} = \mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}\treeres{\sigma}{\succ f}$, we need to first build some portion of $\mathbb{T}^{\xi \; \!\!\upharpoonright_{3e-3}}_{e-1}$.
To be a bit more precise, there are two parts of \texttt{Procedure}. First, there is the part where we look for extensions $\sigma^*$ and $\sigma^{**}$ of each leaf $\sigma$ of $T[n]$; and second, after waiting for $T_{e-1}[s]$ to be defined, we define the next level $T[n+1]$ of $T$. If in the first part of \texttt{Procedure}, the tree $T_{e-1}$ has not been sufficiently defined, we just end the current stage of the procedure and restart at the next stage. In the second part of \texttt{Procedure}, we just wait for $T_{e-1}[s]$ to be defined, and then continue from where we were; but while we wait, we still count through the stages, so that when we return it is not at stage $s+1$ but at some greater stage depending on how long we waited for $T_{e-1}$. This will be important to make sure that the simulations used by the lowness requirements are not too slow.
After we define the true path, we will use Lemma \ref{lem:split} to show that along the true path the trees are all fully defined and admissible. When we are not on the true path, e.g.\ if the outcome of $\mc{M}^\eta_e$ is finitary (which means that \texttt{Procedure} is unsuccessful because it cannot find enough $e$-splits), the tree $\mathbb{T}_e^{\eta \widehat{\ }at \infty}$ will be partial, and then any tree defined using this tree will also be partial. Of course, these will all be off the true path.
\subsection{Construction of $A$ and the true path}
We simultaneously define $A$ and a true path of outcomes $\pi$ by finite extension. The construction of the trees in the previous section was uniformly computable, but $A$ and the true path $\pi$ will non-computable. (This of this part of the construction as analogous to choosing a generic in a forcing construction.)
Begin with $A_{-1} = \varnothing$ and $\pi_{-1} = \varnothing$. Suppose that we have so far defined $A_s \prec A$ and $\pi_s \prec \pi$, with $|\pi_s| = s+1$. To define $A_{s+1}$ and $\pi_{s+1}$ we first ask the next requirement what it's outcome is, and then define the extensions appropriately. We use $\mathbb{T}_e$ for the tree defined along the true outcome, i.e.\ $\mathbb{T}_e := \mathbb{T}_e^{\pi_{3e}}$. Begin with $\pi_{-1} = \varnothing$.
\begin{description}
\item[$s+1 = 3e$:] Consider $\mathcal{M}_e^{\pi_s}$. By Lemma \ref{lem:split}, either $\mathbb{T}_e^{\pi_s \widehat{\ }at \infty}$ is an $e$-splitting tree, or there is $\sigma \in \mathbb{T}_{e-1} \treeres{\pi_s (\mc{P}_{e-1})}{\succ \varnothing}$ with $\ell_{e-1}(\sigma) = \top$ such that either:
\begin{enumerate}
\item there is $n$ such that for all $\tau \in \mathbb{T}\treeres{\sigma}{\succ f}_{e-1}$, $\mathcal{P}hi_e^\tau(n) \uparrow$, or
\item for all $\tau_1,\tau_2 \in \mathbb{T}\treeres{\sigma}{\succ f}_{e-1}$ and $n$,
\[ \mathcal{P}hi_e^{\tau_1}(n) \downarrow \ \wedge \ \mathcal{P}hi_e^{\tau_2}(n) \downarrow \quad \longrightarrow \quad \mathcal{P}hi_e^{\pi_1}(n) = \mathcal{P}hi_e^{\pi_2}(n).\]
\end{enumerate}
In the former case, let $\pi_{s+1} = \pi_s \widehat{\ }at \infty$ and $A_{s+1} = A_s$.
In the latter two case, let $\pi_{s+1} = \pi_s \widehat{\ }at \sigma$ and $A_{s+1} = \sigma \succeq A_s$.
\item[$s+1 = 3e + 1$:] Consider $\mathcal{L}_e^{\pi_s}$ with $e = \langle e_1,e_2 \rangle$. If there is $\sigma \in \mathbb{T}_e$ and $n$ such that $\mathcal{P}si_{e_1}^\sigma(n) \neq R_{e_2}(n)$, then let $A_{s+1} = \sigma$ and $\pi_{s+1} = \pi_s \widehat{\ }at \sigma$. We may choose $\sigma$ to have $\ell_e(\sigma) = \top$, as (by Lemma \ref{lem:all-trees-admissible}) $\mathbb{T}_e$ is admissible. Otherwise, if there is no such $\sigma$, let $A_{s+1} = A_s$ and $\pi_{s+1} = \pi_s \widehat{\ }at \infty$.
\item[$s+1 = 3e + 2$:] Consider $\mathcal{P}_e^{\pi_s}$. If $A_s = \sigma \in \mathbb{T}_e$, let $\tau_1$ and $\tau_2$ be the two main children of $\sigma$ on $\mathbb{T}_e$. Choose $A_{s+1} \succ \sigma$ to be whichever of $\tau_1,\tau_2$ is not an initial segment of the $e$th c.e.\ set $W_e$.
\end{description}
We define $A = \bigcup_s A_s$ and the true sequence of outcomes $\pi = \bigcup_s \pi_s$. We denote by $\pi_{\mc{R}}$ the true outcome up to and including the requirement $\mc{R}$; for example, $\pi_{\mc{M}_e} = \pi_{3e}$; and similarly for $A_{\mc{R}}$.
In the following lemma, we prove that along the true path, the trees that we construct are total and admissible.
\begin{lemma}\langlebel{lem:all-trees-admissible}
For each $e$, $\mathbb{T}_e$ is an admissible labeled tree.
\end{lemma}
\begin{proof}
We argue by induction on $e$. $\mathbb{T}_{-1}$ is an admissible tree. Given $\mathbb{T}_e$ total and admissible, if $\mc{M}_{e+1}$ has the infinitary outcome then $\mathbb{T}_{e+1}$ is defined from $\mathbb{T}_e$ using the \texttt{Procedure}, which is successful, and hence by Lemma \ref{lem:admissible} is admissible.
So suppose that $\mc{M}_{e+1}$ has the finitary outcome, and $\mathbb{T}_{e+1}$ is the tree $\mathbb{T}_e \treeres{\sigma}{\succ f}$ for some $\sigma \in \mathbb{T}_{e-1} \treeres{\pi_s (\mc{P}_{e-1})}{\succ \varnothing}$ with $\ell_{e-1}(\sigma) = \top$. The labels $\ell_e$ of the tree $\mathbb{T}_e^{\xi \widehat{\ }at \sigma}$ are defined by setting:
\begin{itemize}
\item $\scope_e(\sigma) = 1$ and $\ell_e(\sigma) = \top$;
\item for $\tau \neq \sigma$, $\scope_e(\tau) = \scope_{e-1}(\tau)-1$ and $\ell_e(\tau) = \top$ if $\ell_{e-1}(\tau) = \top$, or $\ell_e(\tau) = \eta$ if $\ell_{e-1}(\tau) = f\eta$.
\end{itemize}
We must argue that $\mathbb{T}\treeres{\sigma}{\succ f}$ is admissible:
\begin{enumerate}
\item Each $\sigma \in \mathbb{T}_{e+1}$ has two main children $\sigma^*$ and $\sigma^{**}$, namely the same two main children of $\sigma$ in $\mathbb{T}_e$. We have $\ell_{e+1}(\sigma^*) = \ell_{e}(\sigma^*) - 1 \succeq \ell_{e}(\sigma) - 1 = \ell_{e+1}(\sigma)$ and similarly for $\sigma^{**}$.
\item If $\sigma^*$ is a child of $\sigma$ on $\mathbb{T}_{e+1}$, then $\sigma^*$ is a child of $\sigma$ on $\mathbb{T}_e$ and $\scope_{e+1}(\sigma^*) = \scope_{e}(\sigma) - 1 \geq \scope_{e}(\sigma) - 1 = \scope_{e+1}(\sigma)$.
\item For each $n$, each path through $\mathbb{T}_{e+1}$ is also a path through $\mathbb{T}_e$, and hence contains a node $\sigma$ with $\ell_{e+1}(\sigma) = \ell_e(\sigma) = \top$ and $\scope_{e+1}(\sigma) = \scope_{e}(\sigma) - 1 \geq n$.\qedhere
\end{enumerate}
\end{proof}
\section{Verification}
In this section, we check that the $A$ constructed above is non-computable, of minimal degree, and low for speed.
\subsection{Non-computable}
We chose the initial segment $A_{3e+2}$ of $A$ such that it differs from the $e$th c.e.\ set. Thus $A$ is not computable.
\subsection{Minimal degree}
Recall that we write $\mathbb{T}_e$ for $\mathbb{T}_e^{\pi_{\mc{M}_e}}$, the tree produced by the true outcome of $\mc{M}_e$, and we sometimes write $\mc{M}_e$ for the instance of $\mc{M}_e$ acting along the true sequence of outcomes. We show that $A$ is of minimal degree by showing that it lies on the trees $\mathbb{T}_e$ which are either $e$-splitting or force $\mathcal{P}hi_e^A$ to be partial or computable.
\begin{lemma}\langlebel{lem:A-on-tree}
For all $e$, $A \in [\mathbb{T}_e]$.
\end{lemma}
\begin{proof}
Note that for each $e$, $A_{3e-1}$ is the outcome of $\mathcal{P}_{e-1}$ and so it is the root node of $\mathbb{T}_e$. Then we choose $A_{3e} \preceq A_{3e+1} \preceq A_{3e+2}$ in $\mathbb{T}_e$. Since for each $e' \geq e$, $A_{e'} \in \mathbb{T}_{e'} \subseteq \mathbb{T}_{e}$, the lemma follows.
\end{proof}
\begin{lemma}
$A$ is a minimal degree.
\end{lemma}
\begin{proof}
$A$ is non-computable. We must show that $A$ is minimal. Suppose that $\mathcal{P}hi_e^A$ is total. If the outcome of $\mathcal{M}_e^{\pi_{3e}}$ is $\infty$, then $A$ lies on the $e$-splitting tree $\mathbb{T}_e = T$ produced by \texttt{Procedure($e$,$A_{\mc{P}_{e-1}}$,$\mathbb{T}_{e-1}$)} and hence $\mathcal{P}hi_e^A \geq_T A$. If the outcome of $\mathcal{M}_e^{\pi_{3e}}$ is $\sigma$, then $A$ lies on $\mathbb{T}_e = \mathbb{T}_{e-1}\treeres{\sigma}{\succ f}$ and (since $\mathcal{P}hi_e^A$ is total) for all $\tau_1,\tau_2 \in \mathbb{T}_{e-1}\treeres{\sigma}{\succ f}$ and for all $n$,
\[ \mathcal{P}hi_e^{\tau_1}(n) \downarrow \ \wedge \ \mathcal{P}hi_e^{\tau_2}(n) \downarrow \quad \longrightarrow \quad \mathcal{P}hi_e^{\tau_1}(n) = \mathcal{P}hi_e^{\tau_2}(n).\]
Thus $\mathcal{P}hi_e^A$ is computable.
\end{proof}
\subsection{Low-for-speed}
Our final task is to show that $A$ is low-for-speed. Because we now have to deal with running times, we need to be a bit more precise about the construction of the trees $\mathbb{T}$ in Section \ref{sec:min-req}. Fixing a particular set of parameters for \texttt{Procedure($e$,$\rho$,$T_{e-1}$)}, one can check that the $s$th stage takes time polynomial in $s$ and $e$. (If the leaves of $T$ have been designated \textit{waiting}, then we charge the time required to wait for $T_{e-1}[s]$ to be defined to stage $s+1,s+2,\ldots$.) In checking this, it is important to note that because all of these trees are subtrees of $\mathbb{T}_{-1}$, there are only polynomially in $s$ many elements of each tree of length (as a binary string) at most $s$. Thus by dovetailing all of the procedures, we can ensure that the $s$th stage of each instance of \texttt{Procedure} takes time polynomial in $s$; the particular polynomial will depend on the parameters for \texttt{Procedure}.
As we build all of the trees $\mathbb{T}$, we keep track of them in an easy-to-query way (such as using pointers) so, e.g., querying whether an element is in a tree can be done in time polynomial in the length (as a binary string) of that element. Again, we use the fact that all of these elements are in $\mathbb{T}_{-1}$.
Now we will define the simulation procedure. Fix a lowness requirement $\mc{L}_e$. Define $\eta = \mathsf{D}elta(\pi_{\mc{L}}) \in \{f,\infty\}^{e+1}$; $\eta$ is the sequence of guesses, $f$ or $\infty$, at the outcomes of $\mc{M}_0,\ldots,\mc{M}_e$. Write $\eta_{> i}$ for the final segment $\langle \eta(i+1),\ldots,\eta(e) \rangle$ of $\eta$, the guesses at $\mc{M}_{i+1},\ldots,\mc{M}_e$.
Write $\scope_i$ and $\ell_{i}$ for the scope and labeling function on $\mathbb{T}_i$. Let $\rho_1,\ldots,\rho_k$ be incomparable nodes on $\mathbb{T}_e$ such that (a) every path on $\mathbb{T}_e$ passes through some $\rho_i$, (b) each $\rho_i$ has $\ell_e(\rho_i) = \top$, and (c) for each $i$ and each $e' \leq e$, $\scope_{e'}(\rho_i) \geq e-e'$. We can find such $\rho_i$ because $\mathbb{T}_e$ is admissible. (Think of the $\rho_i$ as an open cover of $\mathbb{T}_e$ by nodes whose scope, in every $\mathbb{T}_{e'}$ ($e' \leq e$), includes $\mc{L}_e$.) For each $i$, we define a simulation $\Xi_{e,i}$ which works for extensions of $\rho_i$. (It will be non-uniform to know which $\Xi_{e,i}$ to use to simulate $A$, as we will need to know which $\rho_i$ is extended by $A$.) Fix $i$, for which we define the simulation $\Xi = \Xi_{e,i}$:
\noindent \textit{Simulation $\Xi = \Xi_{e,i}$:} Begin at stage $0$ with $\Xi(x) \uparrow$ for all $x$. At stage $s$ of the simulation, for each $\sigma \in \mathbb{T}_{-1}$ with $|\sigma| < s$ and $\sigma \succeq \rho_i$, check whether, for each $e' \leq e$, if $\mathbb{T}_{e'}[n]$ is the greatest level of the tree $\mathbb{T}_{e'}$ defined by stage $s$ of the construction of the trees $\mathbb{T}$, then either:
\begin{itemize}
\item $\sigma$ is on $\mathbb{T}_{e'}[n]$ and $\sigma \in \mathbb{T}_{e'} \treeres{\rho_i}{\succ \eta_{>e'}}$, or
\item $\sigma$ extends a leaf $\sigma'$ of $\mathbb{T}_{e'}[n]$, with $\sigma' \in \mathbb{T}_{e'} \treeres{\rho_i}{\succ \eta_{>e'}}$, and:
\begin{itemize}
\item[($*$)] if $\mathbb{T}_{e'}$ is defined using \texttt{Procedure} ($\mc{M}_{e'}$ has the infinitary outcome), \[ \pred_{\scope_{e'}(\sigma')}(\ell_{e'}(\sigma')) = \eta_{> e'},\]
and $\sigma'$ has at stage $s$ been designated \textit{waiting with main children $\sigma^*$ and $\sigma^{**}$}, then $\sigma$ extends or is extended by either $\sigma^*$ or $\sigma^{**}$.
\end{itemize}
\end{itemize}
If for some $\sigma$ this is true for all $e' \leq e$, then for any $k < s$ with $\mathcal{P}si^{\sigma}_s(k) \downarrow$, set $\Xi(k) = \mathcal{P}si^{\sigma}_s(k)$ if it is not already defined.
The idea behind the condition ($*$) is that if $\sigma'$ has been designated \textit{waiting}, this is a warning that the secondary children of $\sigma$ will not be simulated by any lowness requirement with guess $\preceq \eta_{> e'}$. So, if $\mc{L}_{e}$ is such a lowness requirement, and if $\sigma$ is along a secondary child of $\sigma'$, then we should not simulate $\sigma$.
When we say stage $s$ of the construction of the tree $\mathbb{T}_{e'}$, we mean stage $s$ in \texttt{Procedure} if that is how $\mathbb{T}_{e'}$ is defined, or if $\mc{M}_{e'}$ has a finitary outcome then that part of $\mathbb{T}_{e'}$ which can be defined from stage $s$ in the construction of $\mathbb{T}_{e'-1}$. Recall that we can run these constructions up to stage $s$ in time polynomial in $s$. Thus:
\begin{remark}\langlebel{rem:sim-poly}
Stage $s$ of the simulation can be computed in time polynomial in $s$. (The polynomial may depend on $e$.) This is because there are polynomially many in $s$ nodes $\sigma \in \mathbb{T}_{-1}$ with $|\sigma| < s$.
\end{remark}
The next series of lemmas are proved in the context above of a fixed $e$, with $\rho_1,\ldots,\rho_k$. If $e = \langle e_1,e_2 \rangle$, we write $\mathcal{P}si$ for $\mathcal{P}si_e$. Fix $j$ such that $A$ extends $\rho_j$.
If $\mc{L}_e$ has outcome $\infty$, then we need $\Xi_e$ to include the initial segments of $A$ in the computations that it simulates. First we prove that the initial segments of $A$ have the right labels to be simulated.
\begin{lemma}\langlebel{lem:result-watched}
If $\pi(\mc{L}_e) = \infty$, for each $e' \leq e$, $A \in [\mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}]$ for some $i$.
\end{lemma}
\begin{proof}
Let $\sigma = A_{\mc{M}_e}$ be the root of $\mathbb{T}_e$. Since $\pi(\mc{L}_e) = \infty$, $A_{\mc{L}_e} = A_{\mc{M}_e}$. Let $\sigma^* = \xi(\mc{P}_{e}) = A_{\mc{P}_e}$. Then, by construction, $\sigma^*$ a main child of $\sigma$. $A$ is a path through $\mathbb{T}_{e+1}$ extending $\sigma^*$, and $\mathbb{T}_{e+1}$ is one of the following two trees, depending on the outcome of $\mc{M}_{e+1}$:
\begin{enumerate}
\item the labeled tree $T$ produced by \texttt{Procedure($e+1$,$\sigma^*$,$\mathbb{T}_e$)}, which is a subtree of $\mathbb{T}_e \treeres{\sigma^*}{\succ \varnothing}$; or
\item the tree $\mathbb{T}_{e} \treeres{\tau}{\succ f}$ for some $\tau \in \mathbb{T}_e \treeres{\sigma^*}{ \succ \varnothing}$ with $\ell_e(\tau) = \top$.
\end{enumerate}
In either case, $A \in [\mathbb{T}_{e} \treeres{\rho_j}{\succ \varnothing}]$, and $\varnothing = \eta_{> e}$. (Note that $\rho_j$ extends $\sigma$ and is extended by $A$.)
Now we argue backwards by induction. Suppose that $A \in [\mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}]$. We want to argue that $A \in [\mathbb{T}_{e'-1} \treeres{\rho_j}{\succ \eta_{>e'-1}}]$. We have two cases, depending on the outcome of $\mc{M}_e$:
\begin{itemize}
\item The outcome of $\mc{M}_e$ is $\infty$. Then $\eta_{>e'-1} = \infty \eta_{>e'}$ and $\mathbb{T}_{e'}$ is the labeled tree $T$ produced by \texttt{Procedure($e'$,$A_{\mc{P}_{e'-1}}$,$\mathbb{T}_{e'-1}$)}. By Lemma \ref{lem:n-to-n-1}, given $\sigma \in \mathbb{T}_{e'}$ and $\sigma^* \in \mathbb{T}_{e'} \treeres{\sigma}{\succ \eta_{>e'}}$, we have that $\sigma^* \in \mathbb{T}_{e'-1} \treeres {\sigma}{\succ \infty \eta_{>e'}} = \mathbb{T}_{e'-1} \treeres {\sigma}{\succ \eta_{>e'-1}}$. As $A \in [\mathbb{T}_e\treeres{\rho_j}{\succ \eta_{> e'}}]$ we have $A \in [\mathbb{T}_{e'-1} \treeres{\rho_j}{\succ \eta_{>e'-1}}]$.
\item The outcome of $\mc{M}_e$ is $f$. Then $\eta_{>e'-1} = f \eta_{>e'}$ and $\mathbb{T}_{e'}$ is the tree $\mathbb{T}_{e'-1} \treeres{\tau}{\succ f}$ for some $\tau \in \mathbb{T}_{e-1} \treeres{A_{\mc{L}_{e'-1}}}{\succ \varnothing}$ with $\ell_{e-1}(\tau) = \top$. The labels on $\mathbb{T}_{e'}$ are defined so that if $\sigma \in \mathbb{T}_{e'}$, then $\ell_{e'-1}(\sigma) = f \ell_{e'}(\sigma)$ or $\ell_{e'-1}(\sigma) = \top$. Thus, as $A \in [\mathbb{T}_{e'}\treeres{\rho_j}{\succ \eta_{>e'}}]$, and $\mathbb{T}_{e'} = \mathbb{T}_{e'-1} \treeres{\tau}{\succ f}$, we get that $A \in [\mathbb{T}_{e'-1} \treeres{\rho_j}{\succ f\eta_{>e'}}] = [\mathbb{T}_{e'-1} \treeres{\rho_j}{\succ \eta_{>e'-1}}]$.\qedhere
\end{itemize}
\end{proof}
Now we prove that if $\mathcal{P}si^A(r)$ converges, then the simulation $\Xi(r)$ converges as well, though it is possible that it will have a different value if there was some other computation $\mathcal{P}si^\sigma(r)$ which converged before $\mathcal{P}si^A(r)$ did. Moreover, the simulation will not be too much delayed.
\begin{lemma}\langlebel{lem:result-watched}
If $\pi(\mc{L}_e) = \infty$, and $\mathcal{P}si^A(r) \downarrow$, then $\Xi(r) \downarrow$. Moreover, there is a polynomial $p$ depending only on $e$ such that if $\mathcal{P}si_s^A(r) \downarrow$, then $\Xi_{p(s)}(r) \downarrow$.
\end{lemma}
\begin{proof}
By the previous lemma for each $e' \leq e$, $A \in [\mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}]$. Let $\sigma \in \mathbb{T}_{e}$ be an initial segment of $A$ and $s$ a stage such that $\mathcal{P}si^\sigma_s(r) \downarrow$. We may assume that $\sigma$ is sufficiently long that $\sigma$ extends $\rho_j$.
Fix $e'$ and let $\mathbb{T}_{e'}[n]$ be the greatest level of the tree $\mathbb{T}_{e'}$ defined by stage $s$. Then, as $A \in [\mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}]$, $\sigma \in \mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}$. We check the conditions (for $e'$) from the definition of the simulation $\Xi$. Either $\sigma \in \mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}[n]$, or some initial segment $\sigma'$ of $\sigma$ is in $\mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}[n]$. In the second case, let us check that we satisfy ($*$). We only need to check ($*$) in the case that $\mathbb{T}_{e'}$ was defined using \texttt{Procedure},
\[ \pred_{\scope_{e'}(\sigma')}(\ell_{e'}(\sigma')) = \eta_{> e'},\]
and $\sigma'$ has at stage $s$ been designated \textit{waiting with main children $\sigma^*$ and $\sigma^{**}$}. Now, if $\sigma \in \mathbb{T}_e$ does not extend $\sigma^*$ or $\sigma^{**}$, then it would extend a secondary child $\sigma^\dagger$ of $\sigma'$ with \[ \ell_{e'}(\sigma^{\dagger}) \preceq \pred_{\scope_{e'}(\sigma')}(\ell_{e'}(\sigma')) = \eta_{> e'}.\]
This contradicts the fact that $\sigma \in \mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}$.
Since this is true for every $e' \leq e$, and $\mathcal{P}si_s^\sigma(r) \downarrow$, the simulation defines $\Xi(r) = \mathcal{P}si_s^\sigma(r)$ if $\Xi(r)$ is not already defined.
{}
The simulation defines $\Xi(r)$ at the $s$th stage of the simulation. By Remark \ref{rem:sim-poly}, the $s$th stage of the simulation can be computed in time polynomial in $s$.
\end{proof}
Lemma \ref{lem:result-watched} covers the infinitary outcome of $\mc{L}_e$. For the finitary outcome, we need to see that any computation simulated by $\Xi_e$ is witnessed by a computation on the tree, because the use of such a computation was not removed from the tree.
\begin{lemma}\langlebel{lem:on-tree}
For all $k$, if $\Xi(r) \downarrow$ then there is $\sigma \in \mathbb{T}_e$, $\sigma \succeq \rho_j$, such that $\mathcal{P}si^{\sigma}(r) = \Xi(r)$.
\end{lemma}
\begin{proof}
Since $\Xi(r) \downarrow$, by definition of the simulation, there is a stage $s$ of the simulation and $\sigma \in \mathbb{T}_{-1}$ with $|\sigma| < s$ such that, for each $e' \leq e$, if $\mathbb{T}_{e'}[n]$ is the greatest level of the tree $\mathbb{T}_{e'}$ defined by stage $s$, then either:
\begin{enumerate}
\item $\sigma$ is on $\mathbb{T}_{e'}[n]$ and $\sigma \in \mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}$, or
\item $\sigma$ extends a leaf $\sigma'$ of $\mathbb{T}_{e'}[n]$, with $\sigma' \in \mathbb{T}_{e'} \treeres{\rho_j}{\succ \eta_{>e'}}$, and:
\begin{itemize}
\item[($*$)] if $\mathbb{T}_{e'}$ is defined using \texttt{Procedure} ($\mc{M}_{e'}$ has the infinitary outcome), \[ \pred_{\scope_{e'}(\sigma')}(\ell_{e'}(\sigma')) = \eta_{> e'},\]
and $\sigma'$ has at stage $s$ been designated \textit{waiting with main children $\sigma^*$ and $\sigma^{**}$}, then $\sigma$ extends or is extended by either $\sigma^*$ or $\sigma^{**}$.
\end{itemize}
\end{enumerate}
$\Xi(r)$ was defined to be $\mathcal{P}si^{\sigma}_s(r)$ for some such $\sigma$.
We argue by induction on $i \leq e$ that there is a $\sigma \in \mathbb{T}_{i}\treeres{\rho_j}{\succeq \eta_{> i}}$, with the parent of $\sigma$ in $\mathbb{T}_{i}\treeres{\rho_j}{\succ \eta_{> i}}$, with $\Xi(r) = \mathcal{P}si^{\sigma}_s(r)$ and satisfying, for each $e'$ with $i < e' \leq e$, either (1) or (2). By the previous paragraph, this is true for $i = -1$. If we can show it for $i = e$, then the lemma is proved. All that is left is the inductive step. Suppose that it is true for $i$; we will show that it is true for $i+1$. We have two cases, depending on the outcome of $\mc{M}_{i+1}$.
\begin{case}
$\pi_{\mc{M}_{i+1}} = \infty$.
\end{case}
Since $\pi_{\mc{M}_{i+1}} = \infty$, $\mathbb{T}_{i+1}$ is the $(i+1)$-splitting tree defined by \texttt{Procedure($i+1$,$A_{\mc{P}_{i}}$,$\mathbb{T}_i$)}. Fix $\sigma$ from the induction hypothesis: $\sigma \in \mathbb{T}_{i}\treeres{\rho_j}{\succeq \eta_{> i}}$, the parent of $\sigma$ is in $\mathbb{T}_{i}\treeres{\rho_j}{\succ \eta_{> i}}$, $\Xi(r) = \mathcal{P}si^{\sigma}_s(r)$ and satisfies, for each $e'$ with $i < e' \leq e$, either (1) or (2). Since $\mathcal{P}si^{\sigma}_s(r)$ converges, we have that $|\sigma| \leq s$ and so $\sigma \in \mathbb{T}_{i}[s]$. Let $n$ be the greatest level of $\mathbb{T}_{i+1}$ defined by stage $s$.
At stage $s$, either (1) $\sigma$ is already on $\mathbb{T}_{i+1}\treeres{\rho_j}{\succ \eta_{>i+1}}[n]$, in which case we are done, or (2) $\sigma$ extends a leaf $\sigma'$ of $\mathbb{T}_{i+1}[n]$, with $\sigma' \in \mathbb{T}_{i+1} \treeres{\rho_j}{\succ \eta_{>i+1}}$, and:
\begin{itemize}
\item[($**$)] if $\sigma'$ has at stage $s$ been designated \textit{waiting with main children $\sigma^*$ and $\sigma^{**}$} and \[ \pred_{\scope_{i+1}(\sigma')}(\ell_{i+1}(\sigma')) = \eta_{> i+1},\]
then $\sigma$ extends or is extended by either $\sigma^*$ or $\sigma^{**}$.
\end{itemize}
We argue in case (2).
We have $\ell_{i+1}(\sigma') \succ \eta_{> i+1}$. (If $\sigma' = \rho_j$ we use the fact that $\ell_{i+1}(\rho_j) = \top$.) Now at some stage we define the next level of the tree, $\mathbb{T}_{i+1}[n+1]$. When we do this, the children of $\sigma'$ are:
\begin{itemize}
\item the main children $\sigma^*,\sigma^{**}$ of $\sigma'$;
\item each other $\sigma^\dagger \in \mathbb{T}_i \treeres{\sigma'}{\succ \varnothing}[t]$, where $t$ is the stage of the \texttt{Procedure} at which $\sigma'$ was declared \textit{waiting}. We might have $t < s$ if $\sigma'$ was already declared \textit{waiting} before stage $s$. In this case, ($**$) will come into play.
\end{itemize}
We divide into two possibilities, and use ($**$) to argue that these are the only two possibilities.
\begin{itemize}
\item[(P1)] There is a child $\sigma''$ of $\sigma'$ such that $\sigma''$ extends $\sigma$.
\item[(P2)] There is a child $\sigma''$ of $\sigma'$, with $\ell_{i+1}(\sigma'') \succ \eta_{> i+1}$, such that $\sigma$ strictly extends $\sigma''$.
\end{itemize}
Since $\sigma \in \mathbb{T}_{e'}$, $\sigma$ must be compatible with (i.e., it extends or is extended by) one of the children of $\sigma'$. If we are not in case (P1), then $\sigma$ extends one of the children of $\sigma'$. If $\sigma$ extends one of the main children $\sigma''$ of $\sigma'$, then we have $\ell_{i+1}(\sigma'') \succeq \ell_{i+1}(\sigma') \succ \eta_{> i+1}$ and are in case (P2). So the remaining possibility is that $\sigma$ extends one of the secondary children $\sigma''$ of $\sigma'$. Now $\sigma'' \in \mathbb{T}_i[t]$, where $t$ is the stage of the \texttt{Procedure} at which $\sigma'$ was declared \textit{waiting}. As $\sigma \in \mathbb{T}_i[s]$, because $\sigma''$ does not extend $\sigma$, it must be that $t < s$. Since $\sigma$ properly extends $\sigma''$, and the parent of $\sigma$ in $\mathbb{T}_i$ is in $\mathbb{T}_{i}\treeres{\rho_j}{\succ \eta_{> i}}$, $\sigma'' \in \mathbb{T}_{i}\treeres{\rho_j}{\succ \eta_{> i}}$. Then, looking at the construction of $\mathbb{T}_{i+1}$, $\ell_{i+1}(\sigma'') \succ \eta_{> i+1}$ unless $\pred_{\scope_{i+1}(\sigma')}(\ell_{i+1}(\sigma')) = \eta_{> i+1}$. (Recall that $\ell_{i+1}(\sigma') \succ \eta_{> i+1}$.) But then ($**$) would imply that $\sigma$ extends a secondary child of $\sigma'$, which we assumed was no the case. Thus $\ell_{i+1}(\sigma'') \succ \eta_{> i+1}$. We have successfully argued that (P1) and (P2) are the only possibilities.
We begin by considering (P1). If $\sigma''$ is a main child of $\sigma'$, then we have $\sigma'' \in \mathbb{T}_{i+1} \treeres{\rho_j}{\succ \eta_{> i+1}}$. If it is a secondary child of $\sigma'$, then there may be many possible choices for $\sigma''$. We have $\sigma \in \mathbb{T}_i\treeres{\rho_j}{\succeq \eta_{>i}}$ and so we could have chosen $\sigma'' \in \mathbb{T}_i\treeres{\rho_j}{\succeq \eta_{>i}}$ (for example, we could choose $\sigma''$ to be a main child of the main child of... of $\sigma$ in $\mathbb{T}_i$). Thus $\ell_{i+1}(\sigma'')$ is at least the minimum of $\eta_{>i+1}$ and $\pred_{\scope_{i+1}(\sigma')}(\ell_{i+1}(\sigma')) \succeq \eta_{> i+1}$. So $\sigma'' \in \mathbb{T}_{i+1} \treeres{\rho_j}{\succeq \eta_{> i+1}}$.
Since $\sigma''$ extends $\sigma$, $\Xi(r) = \mathcal{P}si^{\sigma''}_s(r)$.
Fix $e'$ with $i+1 < e' \leq e$. Now as $\sigma$ was not on $\mathbb{T}_{i+1}[n]$, it cannot be on that part of $\mathbb{T}_{e'}$ constructed by stage $s$. Thus (1) cannot be true for $e'$ and $\sigma$, and so (2) must be true. Then (2) is also true for $e'$ and the extension $\sigma''$ of $\sigma$.
Now consider (P2). There is a sequence of children $\sigma_0 = \sigma',\sigma_1 = \sigma'',\sigma_2,\ldots,\sigma_k$, with $\sigma$ strictly extending $\sigma_{k-1}$ and $\sigma_k$ extending $\sigma$, $\sigma_k$ is an initial segment of $A$, and with $k \geq 2$. We first claim that $\sigma_2$ is a main child of $\sigma_1$, $\sigma_3$ is a main child of $\sigma_2$, and so on, up until $\sigma_{k-1}$ is a main child of $\sigma_{k-2}$. Indeed, when we begin to define $(n+2)$nd level of the tree $\mathbb{T}_{i+1}$, we do so at a stage greater than $s$, and so the secondary children of $\sigma_1$ are at a level at least $s$ in $\mathbb{T}_i$ (while $\sigma \in \mathbb{T}_i[s]$). So any secondary child of $\sigma_1$ compatible with $\sigma$ would extend $\sigma$, and hence be $\sigma_k$. A similar argument works for the children of $\sigma_2$, $\sigma_3$, ..., $\sigma_{k-2}$.
Thus $\ell_{i+1}(\sigma_{k-1}) \geq \ell_{i+1}(\sigma_{k-2}) \geq \cdots \geq \ell_{i+1}(\sigma_{1}) \succ \eta_{>i+1}$. So $\sigma_{k-1}$, the parent of $\sigma_k = \sigma$, has $\sigma_{k-1} \in \mathbb{T}_{i+1} \treeres{\rho_j}{\succ \eta_{> i+1}}$.
Now if $\sigma_k$ is a main child of $\sigma_{k-1}$, then we have $\sigma_k \in \mathbb{T}_{i+1} \treeres{\rho_j}{\succ \eta_{> i+1}}$. If it is a secondary child of $\sigma_{k-1}$, then there may be many possible choices for $\sigma_k$. We have $\sigma \in \mathbb{T}_i\treeres{\rho_j}{\succeq \eta_{>i}}$ and so we could have chosen $\sigma_k \in \mathbb{T}_i\treeres{\rho_j}{\succeq \eta_{>i}}$ (for example, we could choose $\sigma_k$ to be a main child of the main child of... of $\sigma$ in $\mathbb{T}_i$). Thus $\ell_{i+1}(\sigma_k)$ is at least the minimum of $\eta_{>i+1}$ and $\pred_{\scope_{i+1}(\sigma_{k-1})}(\ell_{i+1}(\sigma_{k-1})) \succeq \eta_{> i+1}$. So $\sigma_k \in \mathbb{T}_{i+1} \treeres{\rho_j}{\succeq \eta_{> i+1}}$.
Since $\sigma_k$ extends $\sigma$, we have $\Xi(r) = \mathcal{P}si^{\sigma_k}_s(r)$.
Fix $e'$ with $i+1 < e' \leq e$. Now as $\sigma$ was not on $\mathbb{T}_{i+1}[n]$, it cannot be on that part of $\mathbb{T}_{e'}$ constructed by stage $s$. Thus (1) cannot be true for $e'$ and $\sigma$, and so (2) must be true. Then (2) is also true for $e'$ and the extension $\sigma_k$ of $\sigma$.
\begin{case}
$\pi_{\mc{M}_{i+1}} = \tau$.
\end{case}
We have that $\sigma \in \mathbb{T}_i \treeres{\rho_j}{\eta_{>i}}$. $\mathbb{T}_{i+1}$ is the tree $\mathbb{T}_{i} \treeres{\tau}{\succ f}$ for some $\tau \in \mathbb{T}_i \treeres{\pi_{\mc{P}_i}}{\succ \varnothing}$. The labels $\ell_{i+1}$ of the tree $\mathbb{T}_{i+1}$ are defined from the labels $\ell_{i}$ of the tree $\mathbb{T}_i$ by setting $\ell_{i+1}(\sigma) = \top$ if $\ell_{i} = \top$, or $\ell_{i+1}(\sigma) = \eta$ if $\ell_{i} = f\eta$.
Note that since $\pi_{\mc{M}_{i+1}} = \tau$ is the finitary outcome, $\eta_{> i}$ begins with $f$. Thus $\sigma$ is still on $\mathbb{T}_{i+1}$. Moreover, for each $\tau' \in \mathbb{T}_{i+1}$, $\ell_{i}(\tau') \succeq \eta_{>i}$ if and only if $\ell_{i+1}(\tau') \succeq \eta_{i+1}$. So $\sigma$ is on $\mathbb{T}_{i+1}\treeres{\rho_j}{\succ \eta_{>i+1}}$.
\end{proof}
We now show how to use these lemmas to prove that $A$ is low-for-speed.
\begin{lemma}
$A$ is low-for-speed.
\end{lemma}
\begin{proof}
Given $\langle e,i \rangle$, suppose that $\mathcal{P}si_{e}^A = R_{i}$ and that $\mathcal{P}si_e^A(n)$ is computable in time $t(n)$. We must show that $R_{i}$ is computable in time $p(t(n))$ for some polynomial $p$. Note that the outcome of $\mathcal{L}_{\langle e,i \rangle}$ must be $\infty$, as otherwise we would have ensured that $\mathcal{P}si_{e}^A \neq R_{i}$. Let $j$ be such that $A$ extends the $\rho_j$ from the simulation for $e$. So by Lemma \ref{lem:result-watched} $\Xi_{\langle e,i \rangle,j}$ is total and there is a polynomial $p$ depending only on $\langle e,i \rangle$ such that if $\mathcal{P}si_{e}^A(n)$ is computed in time $s$, then $\Xi_{\langle e,i \rangle,j}(n)$ is computed in time $p(s)$.
Now we argue that $\Xi_{\langle e,i \rangle,j}$ computes $R_{i} = \mathcal{P}si_e^A$. Suppose not; then there is $n$ such that $\Xi_{\langle e,i \rangle,j}(n) \neq R_{i}(n) = \mathcal{P}si_{e}^A(n)$.
Since $\Xi_{\langle e,i \rangle,j}(n)$ does in fact converge, by Lemma \ref{lem:on-tree} there is $\sigma \in \mathbb{T}_{\langle e,i \rangle}$ extending $\rho_j$ such that $\mathcal{P}si_e^\sigma(n) = \Xi_{{\langle e,i \rangle},j}(n) \neq R_i(n)$.
This contradicts the fact that the outcome of $\mc{L}_{\langle e,i \rangle}$ is $\infty$, as we would have chosen $\tau$ as the outcome.
\end{proof}
\end{document} |
\begin{document}
\begin{center}
\bfseries\Large On dominating set polyhedra of circular interval graphs\footnote{Partially supported by PIP-CONICET 277, PID-UNR 416, PICT-ANPCyT 0586, and MathAmSud 15MATH06 PACK-COVER.}
\end{center}
\begin{center}
\large Silvia Bianchi$^{2}$, Graciela Nasini$^{1,2}$, Paola Tolomei$^{1,2}$, and Luis Miguel Torres$^{3}$\\´
\normalsize \{\texttt{sbianchi,nasini,ptolomei}\}\texttt{@fceia.unr.edu.ar, luis.torres@epn.edu.ec}
\\
\small{$^{1}$CONICET - Argentina}\\
\small{$^{2}$FCEIA, Universidad Nacional de Rosario, Rosario, Argentina}\\
\small{$^{3}$Centro de Modelizaci{\'o}n Matem{\'a}tica - ModeMat, Escuela Polit{\'e}cnica Nacional, Quito, Ecuador}\\
\end{center}
\begin{abstract}
Clique-node and closed neighborhood matrices of circular interval graphs are circular matrices. The
stable set polytope and the dominating set polytope on these graphs are therefore closely related to the
set packing polytope and the set covering polyhedron on circular matrices. Eisenbrand
\emph{et al.} \cite{EisenbrandEtAl08} take advantage of this relationship to propose a complete linear description of the stable set polytope on circular interval graphs. In this paper we follow similar ideas to obtain a
complete description of the dominating set polytope on the same class of graphs. As in the
packing case, our results are established for a larger class of covering polyhedra of the
form $\Qi{A,b}:= \mathrm{conv}\setof{x \in {\mathbb Z}^n_{+}}{Ax \geq b}$, with $A$ a circular matrix and
$b$ an integer vector.
These results also provide linear descriptions of polyhedra associated with several variants
of the dominating set problem on circular interval graphs.
\noindent \textbf{Keywords:}
circular matrix $\cdot$ covering polyhedra $\cdot$ dominating sets $\cdot$ circulant minor
\end{abstract}
\section{Introduction}
The well-known concept of domination in graphs was introduced by Berge \cite{Berge62}, modeling many facility location problems in Operations Research. Given a graph $G=(V,E)$, $N[v]$ denotes the closed neighborhood of the node $v \in V$. A set $D \subseteq V$ is called a \emph{dominating set of $G$} if $D \cap N[v] \neq \emptyset$ holds for every $v \in V$. Given a vector $w \in {\mathbb R}^V$ of node weights, the \emph{Minimum-Weighted Dominating Set Problem} (MWDSP for short) consists in finding a dominating set $D$ of $G$ that minimizes $\sum_{v\in D} w_v$.
MWDSP arises in many applications, involving the strategic placement of resources on the nodes of a network. As example, consider a computer network in which one wishes to choose a smallest set of computers that are able to transmit messages to all the remaining computers \cite{KratschEtAl93}.
Many other interesting examples include sets of representatives, school bus routing, $(r,d)$-configurations, placement of radio stations, social network theory, kernels of games, etc. \cite{HaynesEtAl98}.
The MWDSP is NP-hard for general graphs and has been extensively investigated from an algorithmic point of view (see, e.g., \cite{Bertossi84,Chang98,CorneilStewart90,Farber84}). In particular, efficient algorithms for the problem on interval and arc circular graphs are proposed in \cite{Chang98b}.
However, only a few results about the MWDSP have been established from a polyhedral point of view.
The \emph{dominating set polytope} associated with a graph $G$ is defined
as the convex hull of all incidence vectors of dominating sets in $G$.
In \cite{BouchakourEtAl08}
the authors provide a complete description of the dominating set polytope of cycles. As a generalization,
a description of the dominating set polytope associated with web graphs of the form
$W_{s(2k+1)+t}^{k}$, with $2\leq s \leq 3$, $0\leq t \leq s-1$, and $k \in {\mathbb N}$, is
presented in \cite{BianchiEtAl14b}.
Actually, the MWDSP can be regarded as a particular case of the \emph{Minimum-Weighted Set Covering Problem} (MWSCP). Given a $\{0,1\}$-matrix $A$ of order $m\times n$, a \emph{cover} of $A$ is a vector $x\in \{0,1\}^n$ such that $Ax\geq \mathbf{1}$, where $\mathbf{1} \in {\mathbb Z}^m$ is the vector having all entries equal to one.
The MWSCP consists in finding a cover of minimum weight with respect to a given a weight vector $w \in {\mathbb R}^n$. This problem can be formulated as the integer linear program
$$
\min\setof{w^T x}{Ax \geq \mathbf{1}, x \in {\mathbb Z}^n_{+}}.
$$
The set $\Qi{A}:= \mathrm{conv}\setof{x \in {\mathbb Z}^n_{+}}{Ax \geq \mathbf{1} }$ is termed as the \emph{set covering polyhedron} associated with $A$.
The \emph{closed neighborhood matrix} of a graph $G$ is the square matrix $N[G]$
whose rows are the incidence vectors of the sets $N[v]$, for all $v \in V$. Observe that $x$ is the incidence vector of a dominating set of $G$ if and only if $x$ is a cover of $N[G]$. Therefore, solving the MWSCP on $N[G]$ is equivalent to solving the MWDSP on $G$. Moreover, the structure of the dominating set polytope of $G$ can be studied by considering the
set covering polyhedron associated with $N[G]$.
The closed neighborhood matrix of a web graph is a circulant matrix. More generally, the closed neighborhood matrix of a circular interval graph is a circular matrix (both terms are explained in more detail in the next section).
In this paper we are interested in studying the dominating set polytopes associated with circular interval graphs.
Another classic set optimization problem is the \emph{Set Packing Problem}: given a $\{0,1\}$-matrix $A$ of order $m\times n$, a \emph{packing} of $A$ is a vector $x \in \{0,1\}^n$ such that $Ax \leq \mathbf{1}$.
For a weight vector $w\in \mathbb R^n$, the \emph{Maximum-Weighted Set Packing Problem} (MWSPP) can
be stated as the integer linear program
$$
\max\setof{w^Tx}{x \in \{0,1\}^n, Ax \leq \mathbf{1}}.
$$
The polytope $\PPi{A}:= \mathrm{conv}\setof{x \in {\mathbb Z}_+^n}{ Ax \leq \mathbf{1}}$ is the \emph{set packing polytope} associated with $A$.
Set packing polyhedra have been extensively studied because of their relationship with the stable set polytope.
Indeed, given a graph $G$, a matrix $A$ can be defined whose rows
are incidence vectors of the maximal cliques in $G$. Conversely, given an arbitrary $\{0, 1\}$-matrix $A$,
the conflict graph $G$ of $A$ is defined as a graph having one node for each column of $A$ and
two nodes joined by an edge whenever the respective columns have scalar product
distinct from zero. In both cases, stable sets in $G$ correspond to packings of $A$.
In \cite{EisenbrandEtAl08,Stauffer05} the authors present a complete linear description of the stable set polytope of circular interval graphs,
which is equivalent to obtaining a complete linear description for the set packing polytope related to circular matrices.
The authors show that if $A$ is a circular matrix then $P^*(A)$ is completely described by three classes of inequalities: non-negativity constraints, clique inequalities, and
\emph{clique family inequalities} introduced in \cite{Oriolo03}.
Moreover, facet inducing clique family inequalities are associated with subwebs of the circular interval graph \cite{Stauffer05}.
Actually, their results are stated for a more general packing polyhedron $\PPi{A,b}$, defined as the convex hull of non-negative integer solutions of the system $A x \leq b$, with $b\in {\mathbb Z}_{+}^m$ and $A$ a circular matrix.
In the covering case, a similar polyhedron $\Qi{A,b}$ can be defined as the convex hull of the integer points in $\Q{A,b}=\{x\in {\mathbb R}^n_+ : A x \geq b\}$, with $b\in {\mathbb Z}_{+}^m$. When $A$ is the closed neighborhood matrix of a graph, the extreme points of $Q^*(A,b)$ correspond to some variants of dominating sets in graphs. In particular, if $b= k \mathbf{1} $, they correspond to $\{k\}$- dominating functions
\cite{Bange} and, in the general case, they are related to $L$-dominating functions
\cite{Lee}. Considering the symmetry in the definition of $\PPi{A,b}$ and $\Qi{A,b}$, it is natural to ask if the ideas proposed in \cite{EisenbrandEtAl08,Stauffer05} can be applied in the covering context.
In this paper we present a complete linear description of $\Qi{A,b}$ for any circular matrix $A$ and any vector $b\in {\mathbb Z}_n^+$.
This yields a complete description of the polyhedron associated to $L$-dominating functions of circular interval graphs. The linear inequalities have a particular structure when $b= k \mathbf{1}$, which includes the case of $\{k\}$-dominating functions. Finally, if $k=1$, facet defining inequalities of $\Qi{A}$ provide a characterization of facets of the dominating set polytope on circular interval graphs. These inequalities are related to circulant minors of $A$.
In the light of previous results obtained by Chudnovsky and Seymour \cite{ChudnovskySeymour05},
the linear description presented in \cite{EisenbrandEtAl08,Stauffer05}
actually provided
the final piece for establishing a complete linear description of the stable set polytope for the
much broader class of quasi-line graphs.
The fact that the dominating set problem is known to be NP-hard already for the particular subclass of line-graphs \cite{Yanakakis}, discourages seeking for an analogous result regarding domination
on quasi-line graphs. Nonetheless, we present here some positive results for a prominent subclass of them.
Some results presented in this paper appeared without proofs in \cite{Torres15}.
\section{Preliminaries}
\label{sec:preliminares}
A \emph{circular-arc graph} is the intersection graph of a set of arcs on the circle, i.e., $G=(V,E)$ is a circular-arc graph if each node $v \in V$ can be associated with an arc $C(v)$ on the circle in such a way that $uv \in E$ if and only $C(u)$ intersects $C(v)$. If additionally the family $\setof{C(v)}{v \in V}$ can be defined in such a way that no arc properly contains another, then $G$ is a \emph{proper circular-arc graph}. Proper circular-arc graphs are also termed as \emph{circular interval graphs} in \cite{ChudnovskySeymour08} and defined in a different, but equivalent manner:
take a finite set $V$ of points on a circle $C$ and a collection $\mathcal{I}$ of intervals from $C$.
Then, $V$ is the node set of $G$ and $u, v \in V$ are adjacent if and only if there is at least one interval in $\mathcal{I}$ containing both $u$ and $v$. Circular interval graphs are an important subclass of quasi-line graphs.
\emph{Web graphs} $W_n^k$ are regular circular interval graphs having node degree equal to $2k$.
For $n \in {\mathbb N}$, $[n]$ will denote the additive group defined on the set
$\tabulatedset{1, \ldots, n}$, with integer addition modulo $n$.
Given $a,b\in[n]$, let $t$ be the minimum non-negative integer such that $a+t=b \mod n$. Then, $[a,b]_n$ denotes the \emph{circular interval} defined by the set $\{a+s: 0\leq s \leq t\}$. Similarly, $(a,b]_n$, $[a,b)_n$, and $(a,b)_n$ correspond to $[a,b]_n\setminus \{a\}$, $[a,b]_n\setminus \{b\}$, and $[a,b]_n\setminus \{a,b\}$, respectively.
Unless otherwise stated, throughout this paper $A$ denotes a $\{0,1\}$-matrix of order $m\times n$.
Moreover, we consider the columns (resp. rows) of $A$ to be indexed by
$[n]$ (resp.~by $[m]$) and denote its entries by $a_{ij}$ with $i\in [m]$ and $j\in[n]$.
Two matrices $A$ and $A'$ are
\emph{isomorphic}, written as $A\approx A'$, if $A'$ can be
obtained from $A$ by a permutation of rows and columns.
In the context of this paper, a matrix $A$ is called \emph{circular} if, for every row $i \in [m]$,
there are two integer numbers $\ell_i, k_i\in [n]$ with $2\leq k_i\leq n-1$
such that the $i$-th row of $A$ is the incidence vector of the set $[\ell_i,\ell_i+k_i)_n$.
The following is an example of a $3\times 7$-circular matrix with $\ell_1=1, \ell_2= 2, \ell_3= 5, k_1= 3, k_2=5, $ and $k_3=5$:
$$A=\left(
\begin{array}{cccccccc}
1&1&1&0&0&0&0\\
0&1&1&1&1&1&0\\
1&1&0&0&1&1&1\\
\end{array}
\right)
$$
A row $i$ of $A$ is said to \emph{dominate} a row $\ell \neq i$ of $A$ if $a_{ij} \geq a_{\ell j}$, for all
$j \in [n]$. Moreover, a row is dominating if it dominates some other row.
A square circular matrix of order $n$ without dominating rows is called
a \emph{circulant matrix}. Observe that in this case
$k_i=k$ must hold for every row $i \in [n]$ and we can assume w.l.o.g. $\ell_i=i$ for all $i\in [n]$.
Such a matrix will be denoted by $\C{n}{k}$.
Given $N\subset [n]$, the \emph{minor of} $A$ \emph{obtained by
contraction of} $N$, denoted by $A/N$, is the submatrix of $A$ that
results after removing all columns with indices in $N$ and
all dominating rows.
In this work, anytime we refer to a minor of a matrix, we mean a minor obtained by
contraction. A minor of a matrix $A$ is called a \emph{circulant minor} if it is isomorphic to
a circulant matrix.
Circulant minors have an interesting combinatorial characterization in terms of circuits in a particular digraph \cite{Cornuejols94}.
In fact, given a circulant matrix $\C{n}{k}$, a directed auxiliary graph $G(\C{n}{k})$ is defined by considering $n$ nodes and arcs of the form $(i,i+k)$ and $(i,i+k+1)$ for every $i\in[n]$. The authors prove that if $N\subset [n]$ induces a simple circuit in $G(\C{n}{k})$, then the matrix $\C{n}{k}/N$ is
a circulant minor of $\C{n}{k}$. In a subsequent work, Aguilera \cite{Aguilera09} shows that $C_n^k/N$ is isomorphic to a circulant minor of $\C{n}{k}$ if and only if $N$ induces $d\geq 1$ disjoint simple circuits in $G(\C{n}{k})$, each one having the same number of arcs of length $k$ and $k+1$.
For a matrix $A$, the \emph{fractional set covering polyhedron} is given by
$\Q{A}:= \{x \in {\mathbb R}^n : Ax \geq \mathbf{1}, \, x \geq 0\}$.
The term \emph{boolean inequality} denotes each one of the inequalities
defining $\Q{A}$.
The \emph{covering number} $\tau(A)$ of $A$ is the minimum cardinality of a cover of $A$. When $A$ is the closed neighborhood of a graph $G$, $\tau(A)$ coincides with the \emph{domination number} $\gamma(G)$ of $G$.
The inequality $\sum_{j=1}^n x_j\geq \tau(A)$ is called the \emph{rank constraint}, and it is always valid for $\Qi{A}$.
When $A=\C{n}{k}$ it is known that $\tau(\C{n}{k})=\left\lceil \frac{n}{k}\right\rceil$ and the rank constraint is a facet of $\Qi{\C{n}{k}}$ if and only if $n$ is not a multiple of $k$ \cite{Sassano}. In a more general sense, given a matrix $A$ and a vector $b\in {\mathbb Z}^m$, we define $\tau_b(A):=\min \{\mathbf{1}^T x : x\in \Qi{A,b}\}$ and the rank constraint $\sum_{j=1}^n x_j\geq \tau_b(A)$, which is always valid for $\Qi{A,b}$. When $A$ is the closed neighborhood matrix of a graph $G$ and $b=k\mathbf{1}$, $\tau_b(A)=\gamma_{\{k\}}(G)$ is the $\{k\}$-\emph{domination number} of $G$. For general $b\in {\mathbb Z}_n^+$, $\tau_b(A)$ is the $L$-\emph{domination number} of $G$, for the corresponding list $L$ associated to $b$.
The class of \emph{row family inequalities} (rfi) was proposed in \cite{ArgiroffoBianchi10}
as a counterpart to clique family inequalities in the set packing case \cite{Oriolo03}.
We describe them at next, slightly modified to fit in our
current notation.
Let
$F\subset [m]$ be a set of row indices of $A$, $s:=\card{F}\geq 2$,
$p \in [s-1]$ such that $s$ is not a multiple of $p$, and $r:= s-p \left\lfloor \frac{s}{p}\right\rfloor$.
Define the sets
$$
I(F, p) = \setof{j \in [n]}{\sum_{i \in F} a_{ij} \leq p}, \quad
O(F, p) = \setof{j \in [n]}{\sum_{i \in F} a_{ij} = p+1}.
$$
Then, the \emph{row family inequality} (rfi) \emph{induced by} $(F, p)$ is
\begin{equation} \label{eq:rfi}
(r+1) \!\!\! \!\!\sum_{j \in O(F, p)} \!\!\! x_j \, + \, r \!\!\!\!\! \sum_{j \in I(F, p)} \!\!\! x_j \geq r \ceil{\frac{s}{p}}.
\end{equation}
Row family inequalities generalize several previously known classes of valid inequalities
for $\Qi{A}$. However, in contrast to clique family inequalities, not all of them are
valid for $\Qi{A}$. In \cite{ArgiroffoBianchi10} it is proved that inequality \eqref{eq:rfi} is valid for $\Qi{A}$ if the following condition holds for every cover
$B$ of $A$:
\begin{equation} \label{eq:rfi-condition}
p \card{B \cap I(F, p)} + (p+1) \card{B \cap O(F, p)} \geq s.
\end{equation}
In particular, if $p^*:= \max_{j\in [n]} \sum_{i\in F} a_{ij}-1$, the row family inequality induced by $(F, p^*)$ is always valid for $Q^*(A)$. Throughout this article, we are going to refer to this inequality simply as the
\emph{row family inequality induced by $F$}.
In the particular case when $A=\C{n}{k}$, facet defining inequalities of $\Qi{C^k_n}$ related to circulant minors were studied in \cite{ArgiroffoBianchi09,BianchiEtAl14a,BianchiEtAl14b,TolomeiTorres15,Torres15}. Given $N \subset [n]$ such that
$\C{n}{k}/ N \approx \C{n'}{k'}$,
let $W:=\{j\in N : j-(k+1) \in N\}$.
Then, the inequality
\begin{equation}
\label{eq:minor-eq}
2 \sum_{j\in W} x_j + \sum_{j\notin W} x_j \geq \ceil{\frac{n'}{k'}}
\end{equation}
is valid for $\Qi{\C{n}{k}}$, and facet defining if $n'-k' \left\lfloor \frac{n'}{k'}\right\rfloor=1$.
This inequality is termed as the \emph{minor inequality} induced by $N$ \cite{ArgiroffoBianchi09,BianchiEtAl14a}.
For a general circular matrix $A$, if $A/ N \approx \C{n'}{k'}$ and $F \subset [m]$ is the set of
rows of $A/ N$, then the rfi induced by $F$ will be termed as \emph{minor related row family inequality}.
These inequalities were introduced in \cite{Torres15} for the specific case when $A$ is a circulant
matrix. In this setting, the inequalities can be seen as a generalization of the minor inequalities \eqref{eq:minor-eq}, as they have the form:
\begin{equation}
\label{eq:rfi-minor}
(r+1) \sum_{j\in W} x_j + r \sum_{j\notin W} x_j \geq r \left\lceil \frac{n'}{k'}\right\rceil.
\end{equation}
with $r= n'-k' \left\lfloor \frac{n'}{k'}\right\rfloor$.
In this paper we follow many
of the ideas proposed in \cite{EisenbrandEtAl08,Stauffer05} for describing the stable set polytope of circular interval graphs. Actually, the construction detailed below was originally presented by
Bartholdi, Orlin and Ratliff \cite{BartholdiEtAl80} in the context of an algorithm to solve the cyclic
staffing problem, which is equivalent to the task of minimizing a linear function over $\Qi{A,b}$.
We associate with a circular matrix the digraph defined as follows:
\begin{definition}\label{D(A)}
Given a
circular matrix $A$, let $\Aux{A}:=(V, E)$ where
$V:=[n]$ and $E$
is the union of the following four sets:
\begin{align*}
E_1^+&:=
\setof{a_i:= (\ell_i -1, \ell_i+k_i-1)}{ i\in [m]}, \\
E_2^{+} &:=
\setof{a_{m+j}:= (j -1, j)}{ j \in [n]}, \\
E_1^{-} &:=
\setof{\bar{a}_i:= (\ell_i+k_i-1, \ell_i -1)}{ i \in [m]}, \\
E_2^{-} &:=
\setof{\bar{a}_{m+j}:= (j, j-1)}{ j \in [n]}.
\end{align*}
The arcs in
$E_1^{+} \cup E_2^{+}$
are called \emph{forward arcs}, while the arcs in $E_1^{-} \cup E_2^{-}$
are \emph{reverse arcs}. Moreover, arcs in $E_1^{+} \cup E_1^{-}$ are termed as \emph{row arcs}, and
arcs in $E_2^{+} \cup E_2^{-}$ are \emph{short arcs}.
For any path $P$ in $D(A)$, $E(P)$ denotes the set of arcs from $P$, whereas $E^+_1(P)$, $E^+_2(P)$, $E^-_1(P)$, and $E^-_2(P)$ denote the sets $E^+_1\cap E(P)$, $E^+_2\cap E(P)$, $E^-_1\cap E(P)$, an $E^-_2\cap E(P)$, respectively.
The (oriented) \emph{length} $l(a_i)$ (resp.~$l(\bar{a}_i)$)
of an arc $a_i \in E_1^{+}$ (resp.~$\bar{a}_i \in E_1^{-}$) is equal to $k_i$ (resp.~to $-k_i$). Arcs in $E_2^{+}$
have length of 1, while the length of arcs in $E_2^{-}$ is equal to -1.
\end{definition}
Simple directed circuits in $D(A)$ play a important role in the description of the set packing polytope associated with $A$ and the more general packing polytope $P^*(A,b)$
defined in the last section \cite{EisenbrandEtAl08,Stauffer05}. In this paper
we show that similar results hold for the corresponding covering polyhedra $\Qi{A}$ and $\Qi{A,b}$.
Throughout this article we will use the term \emph{circuit} to refer to a simple directed circuit.
Consider the invertible linear map $T: {\mathbb R}^n \to {\mathbb R}^n$ represented by a $\{0, 1, -1\}$-matrix $T$ having the elements on the diagonal all equal to 1, the elements on the first subdiagonal equal to -1 and all other elements equal to zero, i.e.,
\begin{equation}
\label{matrizT}
T := \left(
\begin{array}{cccc}
1 &&&\\
-1 & 1 &&\\
& \ddots & \ddots & \\
& & -1 & 1
\end{array}
\right).
\end{equation}
For a circular matrix $A$, let $\tilde{A}:=\binomio{A}{I} \in \{0,1\}^{(m + n) \times n}$, with $I$ being the identity matrix of order $n$.
If $B:= \tilde{A} T$ and
$M$ denotes the submatrix consisting of the first $n-1$ columns of $B$, it
is straightforward to verify that the node-arc incidence matrix $H$ of the digraph $\Aux{A}$ is given by
\begin{equation}
\label{eq:node-arc-incidence}
H:=
\left(
\arraycolsep=1.4pt\def2.2{2.2}
\begin{array}{r|r}
M^T & - M^T \\[0.3\baselineskip]
\hline
-\mathbf{1}^T M^T & \mathbf{1}^T M^T
\end{array}
\right).
\end{equation}
The remaining of this article is structured as follows. In the next section, we
review some constructions and results presented in \cite{EisenbrandEtAl08,Stauffer05} in the context of the covering case.
In Section~\ref{sec:gamma-ineq-suff}
we introduce a class of valid inequalities for $\Qi{A,b}$ induced by circuits in $D(A)$
and show that these inequalities are sufficient for describing $\Qi{A,b}$, for any $b \in {\mathbb Z}_{+}^m$.
From this result we obtain a complete description of the polyhedron of $L$-dominating
functions on circular interval graphs.
In Section~\ref{sec:homog-rhs}
we consider polyhedra of the form $\Qi{A,k \mathbf{1}}$ with $k \in {\mathbb N}$, which include the set covering polyhedron as a particular case. We prove that, for this class of polyhedra,
the circuits in $D(A)$ inducing facet defining inequalities have no reverse row arcs.
In Section~\ref{sec:no-reverse-arcs} we further study the structure of such circuits, obtaining as a result that the corresponding inequalities have full support and only two consecutive positive integer coefficients.
In the particular case of the set covering problem, these inequalities are row family inequalities.
Finally, in Section~\ref{sec:set-covering-polyhedron} we prove that the relevant inequalities are related to circulant minors. As we have observed in the introduction, the description of $\Qi{A,k \mathbf{1}}$
yields a complete description of the polyhedra associated with $\{k\}$-dominating functions on circular interval graphs, whereas a complete description for the dominating set polytope on those graphs can be obtained from the description of $\Qi{A}$.
\section{Following the ideas of the packing case}
As we have mentioned, the study of the covering polyhedra of circular matrices closely follows the ideas proposed in \cite{EisenbrandEtAl08,Stauffer05} for the corresponding packing polytopes. Some of these ideas can be straightforwardly translated to the covering case. In this section we review them, including the corresponding proofs for the sake of completeness.
Given a circular matrix $A$, $b\in {\mathbb Z}_+^m$ and $\beta \in {\mathbb N}$, the \emph{slice of} $Q(A,b)$ \emph{defined by} $\beta$ is the polyhedron:
$$
\Qb{A,b}:= \Q{A,b} \cap \setof{x \in {\mathbb R}^n}{\mathbf{1}^T x =\beta}.
$$
Remind that $\tilde{A}=\binomio{A}{I}$, $B=\tilde{A} T$, and $M$ is the submatrix consisting of the first $n-1$ columns of $B$. Moreover let $d=\binomio{b}{\mathbf{0}}$ and let $v$ be the last column of $\tilde{A}$.
\begin{lemma}
\label{th:int-slices}
For any circular matrix $A$, $b\in {\mathbb Z}_+^m$ and $\beta \in {\mathbb N}$, the polytope $\Qb{A,b}$
is integral.
\end{lemma}
\begin{myproof}
Let ${\mathbb R}Rb{A,b}$ be the image of $\Qb{A,b}$ under the inverse of matrix $T$ defined in \eqref{matrizT}, i.e.,
${\mathbb R}Rb{A,b}=\setof{T^{-1}x}{x\in \Qb{A,b}}$. Then if $y:= T^{-1}x$ we have
\begin{align*}
{\mathbb R}Rb{A,b} &= \setof{y \in {\mathbb R}^n}{By \geq d, y_n = \beta} \\
&= \setof{y \in {\mathbb R}^n}{M \hat{y} + \beta v \geq d} \\
&= \setof{(\hat{y},\beta) \in {\mathbb R}^n}{M \hat{y} \geq d_{\beta}},
\end{align*}
where $d_{\beta}:= d - \beta v \in {\mathbb Z}^{m+n}$ and $\hat{y} \in {\mathbb R}^{n-1}$ denotes the vector obtained from $y$ by dropping its last coordinate $y_n$.
Since $M^T$ is a submatrix of the node-arc incidence matrix of digraph $D(A)$, it follows that $M$ is totally unimodular. Thus, ${\mathbb R}Rb{A,b}$ is integral. Moreover, since $T$ maps integral points onto integral points, it follows that $\Qb{A,b}$ is also integral.
\end{myproof}
\begin{corollary}
\label{th:int-slices-cor}
If $A$ is a circular matrix and $b\in {\mathbb Z}_+^m$ then
$$
\Qi{A,b}= \mathrm{conv} \left( \bigcup\limits_{\beta \in {\mathbb N}} \Qb{A,b} \right).
$$
\end{corollary}
The last result states that the \emph{split-rank} of the polyhedron $\Q{A,b}$ is equal to
one. This fact can be used to address the problem of separating a point from $\Qi{A,b}$. To do that, we need the following definitions:
\begin{definition}
Let $A$ be a circular matrix,
$b\in {\mathbb Z}_+^m$ and $x^{*} \in \Q{A,b}$. Consider the \emph{slack vector} $s^{*}:= \tilde{A} x^{*} -d \geq 0$ and let $\mu:= \ceil{\mathbf{1}^T x^*} - \mathbf{1}^T x^*$. We define the cost vectors $c^{+}(x^*), c^{+}b(x^*) \in {\mathbb R}^{m+n}$ by
\begin{equation}
\label{eq:arc-costs}
\begin{aligned}
c^{+}(x^*) &:= \mu (s^{*} - (1 - \mu) v),\\
c^{+}b(x^*) &:= (1 -\mu) (s^{*} + \mu v).
\end{aligned}
\end{equation}
\end{definition}
From now on, $D(A,x^*)$ denotes the digraph $D(A)$ with cost $c^+(x^*)$ on its forward arcs and cost $c^-(x^*)$ on its reverse arcs. For any path $P$ in $D(A)$, $c(P,x^*)$ denotes the cost of $P$ in $D(A,x^*)$.
\begin{remark}\label{noneg}
Observe that if $\mathbf{1}^T x^* \in {\mathbb Z}$ then $c^{+}(x^*)=0$, $c^{+}b(x^*)= s^{*} \geq 0$ and
then $D(A, x^*)$ cannot contain a negative cost circuit.
\end{remark}
Non-negativity of all circuits in $D(A,x^*)$ is a sufficient condition for a point $x^*\in \Q{A,b}$ to be in $\Qi{A,b}$, as shown at next.
\begin{lemma} \label{th:gammanonegativo}
Let $A$ be a circular matrix,
$b\in {\mathbb Z}_+^m$ and $x^* \in \Q{A,b}$ such that every circuit in $D(A,x^*)$ has non-negative cost.
Then $x^* \in \Qi{A,b}$.
\end{lemma}
\begin{myproof}
From Corollary~\ref{th:int-slices-cor} it is sufficient to consider the case $\mathbf{1}^T x^* \notin {\mathbb Z}$.
Let $\tau^*:= \floor{\mathbf{1}^T x^*}$ and $\mu:= \tau^* + 1 - \mathbf{1}^T x^*>0$.
Since $D(A,x^*)$ does not contain a negative cost circuit, there exists a vector of \emph{node potentials} $z \in {\mathbb R}^n$,
such that $z_i - z_{j}$ is at most the cost of arc $(i,j)$,
for every arc $(i,j) \in E$ (see, e.g. \cite[Chapter 2]{CookEtAl98} for a proof of this well-known result).
Considering the node-arc incidence matrix $H$ defined in \eqref{eq:node-arc-incidence}, this property
can be written as
$$H^T z \leq \binomio{c^{+}(x^*)}{c^{+}b(x^*)}$$
or equivalently,
\begin{equation}
\label{eq:potential-z}
-c^-(x^*)\leq M \hat{z} - z_n M \mathbf{1} \leq c^+(x^*), \\
\end{equation}
where $\hat{z}$ denotes the vector obtained from $z$ by dropping its last coordinate $z_n$.
Now define
$$
\begin{aligned}
x^1&:= x^* - \frac{1}{\mu}Tz + \frac{z_n}{\mu} e_1 - (1 - \mu) e_n, \\
x^2&:= x^* + \frac{1}{1-\mu}Tz - \frac{z_n}{1-\mu} e_1 + \mu e_n.
\end{aligned}
$$
It is straightforward to verify that $x^* = \mu x^1 + (1- \mu) x^2$. Thus, if $x^1, x^2 \in \Qi{A,b}$ then
$x^* \in \Qi{A,b}$ follows from convexity of this polyhedron. Moreover, since $\mathbf{1}^T T z = z_n$, we have
$\mathbf{1}^T x^1 = \mathbf{1}^T x^* - (1 - \mu) = \tau^*\in {\mathbb N}$ and $\mathbf{1}^T x^2 = \mathbf{1}^T x^* + \mu = \tau^* + 1\in {\mathbb N}$.
Hence, due to Corollary~\ref{th:int-slices-cor}, it suffices to show that $x^1, x^2 \in \Q{A,b}$. Indeed,
\begin{align*}
\tilde{A} x^1 &= \tilde{A} x^* - \frac{1}{\mu}(\tilde{A} Tz - z_n \tilde{A} e_1) - (1 - \mu) \tilde{A} e_n, \\
&= \tilde{A} x^* - \frac{1}{\mu}\left(M\hat{z} - z_n(\tilde{A} e_1 - v) \right) - (1 - \mu) v, \\
&= \tilde{A} x^* - \frac{1}{\mu}(M\hat{z} - z_n M \mathbf{1}) - (1 - \mu) v, \\
& \geq \tilde{A} x^* - \frac{1}{\mu}c^+(x^*) - (1 - \mu) v= \tilde{A} x^* - s^*= d
\end{align*}
where the third equality follows from the fact that
$$M \mathbf{1} = B (\mathbf{1} - e_n) = \tilde{A} T (\mathbf{1} - e_n) = \tilde{A} T \mathbf{1} - v = \tilde{A} e_1 - v,$$
and the inequality in the fourth row is obtained from \eqref{eq:potential-z}. With a similar argument, $\tilde{A} x^2 \geq d$ follows, and the proof is completed.
\end{myproof}
As an immediate consequence of the previous result, if $x^*\in\Q{A,b}\setminus\Qi{A,b}$ then there exists a negative cost circuit in $D(A,x^*)$. It follows that, similarly as observed in \cite{EisenbrandEtAl08} in the packing context, the membership problem for $\Qi{A,b}$ can be reduced to a minimum cost circulation problem in $D(A)$.
In the next section, we present valid inequalities for $\Qi{A,b}$ associated with circuits in $D(A)$. We will see that, given $x^*\in \Q{A,b}$, if $c(\Gamma, x^*)<0$ holds for some circuit $\Gamma$ in $D(A,x^*)$, then there is a separating split cut for $x^*$ associated with $\Gamma$. In this way, we will prove that $\Qi{A,b}$ can be described by these \emph{circuit inequalities} together with the inequalities defining $\Q{A,b}$.
\section{A complete linear description of $\Qi{A,b}$}
\label{sec:gamma-ineq-suff}
Consider a circular matrix $A$ and the directed graph $D(A)$ defined in Section \ref{sec:preliminares}. Recall from Definition \ref{D(A)} that for any arc $a$ in $D(A)$, $l(a)$ denotes its (oriented) length.
Given a closed directed (not necessarily simple) path $\Gamma=(V(\Gamma),E(\Gamma))$ in $D(A)$, its
\emph{winding number} is the integer
$p(\Gamma)$ such that:
$$
p(\Gamma) \, n = \sum_{a \in E(\Gamma)} \!\! l(a).
$$
For every $i\in [m]$, let $P_i^+$ (resp. $P_i^-$) be the path of short forward (resp. reverse) arcs in $D(A)$ connecting $l_i-1$ with $l_i+k_i-1$ (resp. $l_i+k_i-1$ with $l_i-1$).
We say that a forward row arc $a_i=(\ell_i -1, \ell_i+k_i-1) \in E_1^{+}$ \emph{jumps over} a node $j \in V$
if $j \in
[l_i, l_i+k_i)_n$ and the only forward short arc jumping over $j$ is the arc $(j-1,j)$. A reverse arc is said
to jump over a node $j \in [n]$ if and only if the corresponding (antiparallel) forward arc jumps over $j$.
Let $p^{+}(\Gamma, j)$ and $p^{-}(\Gamma, j)$ the number of forward and reverse arcs of $\Gamma$ jumping
over a node $j \in [n]$, respectively. We have the following result:
\begin{lemma}
\label{th:winding-number}
Let $A$ be a circular matrix and $\Gamma$ be a closed directed path in $D(A)$.
For any $j \in [n]$, $$p^{+}(\Gamma, j) - p^{-}(\Gamma, j)=p(\Gamma).$$
\end{lemma}
\begin{myproof}
Let us start with the case when $\Gamma$ has only short arcs. For any $j\in [n]$, the arcs in $\Gamma$ that may leave $j$ are $(j,j+1)$ and $(j,j-1)$. Then the number of arcs in $\Gamma$ leaving $j$ is $p^{+}(\Gamma, j+1)+ p^{-}(\Gamma, j)$. Similarly, the number of arcs in $\Gamma$ entering $j$ is $p^{+}(\Gamma, j)+ p^{-}(\Gamma, j+1)$. Since $\Gamma$ is a closed path, we have:
$$p^{+}(\Gamma, j+1)+ p^{-}(\Gamma, j)= p^{+}(\Gamma, j)+ p^{-}(\Gamma, j+1)$$
or, equivalently,
$$p^{+}(\Gamma, j)- p^{-}(\Gamma, j)= p^{+}(\Gamma, j+1) - p^{-}(\Gamma, j+1).$$
Hence, $\gamma= p^{+}(\Gamma, j)- p^{-}(\Gamma, j)$ is a fixed value for all $j\in [n]$.
For each $j\in [n]$, let $\delta^+(j)$ be the set of arcs of $\Gamma$ leaving $j$. We have:
\begin{align*}
p(\Gamma) \, n &= \sum_{a \in E(\Gamma)} \!\! l(a)= \sum_{j\in [n]} \sum_{a\in \delta^+(j)} \!\! l(a)= \sum_{j\in [n]} \!\! [p^{+}(\Gamma, j+1)-p^{-}(\Gamma, j)]\\
&= \sum_{j\in [n]} \!\! p^{+}(\Gamma, j+1)- \sum_{j\in [n]} \!\! p^{-}(\Gamma, j)=
\sum_{j\in [n]} \!\! [p^{+}(\Gamma, j)-p^{-}(\Gamma, j)]= n \gamma.
\end{align*}
Then, $p(\Gamma) = \gamma= p^{+}(\Gamma, j)-p^{-}(\Gamma, j)$ for all $j\in [n]$.
Now consider any closed path $\Gamma$ in $D(A)$ and let $\Gamma'$ be the path obtained from $\Gamma$ by replacing each forward row arc $a_i= (\ell_i -1, \ell_i+(k_i-1))$
(resp. reverse row arc $\bar{a}_i= (\ell_i+(k_i-1), \ell_i -1)$)
by the path $P_i^+$ (resp. $P_i^-$). Observe that $p(\Gamma)= p(\Gamma)p$.
Moreover, for each $j\in [n]$, as each row arc jumping over $j$ is replaced by a path containing exactly one short arc jumping over $j$, we have $p^{+}(\Gamma, j) = p^{+}(\Gamma', j)$ and $p^{-}(\Gamma, j) = p^{-}(\Gamma', j)$.
This completes the proof.
\end{myproof}
Given a closed directed path $\Gamma$ in $D(A)$,
denote by $\pi_{\scriptscriptstyle \! +} \in {\mathbb Z}_+^{m+n}$ (resp. $\pi_{\scriptscriptstyle \! +}b\in {\mathbb Z}_+^{m+n}$) the vector whose components are the number of times each forward (resp. reverse) arc in $D(A)$ occurs in $\Gamma$. In particular, if $\Gamma$ is a circuit then $\pi_{\scriptscriptstyle \! +}, \pi_{\scriptscriptstyle \! +}b \in \{ 0, 1\}^{m+n}$. Observe that, for any $j\in [n]$, if $e_j$ is the $j$-th canonical vector, $p^{+}(\Gamma, j) = \pi_{\scriptscriptstyle \! +}^T \tilde{A} e_j$ and $p^{-}(\Gamma, j) = \pi_{\scriptscriptstyle \! +}b^T \tilde{A} e_j$, where $\tilde{A}=\binomio{A}{I}$. Hence, as a consequence of Lemma \ref{th:winding-number}, we have:
\begin{corollary}
\label{th:parameter-properties}
Let $A$ be a circular matrix. Then, for any circuit $\Gamma$ in $D(A)$,
$$
(\pi_{\scriptscriptstyle \! +} - \pi_{\scriptscriptstyle \! +}b)^T \tilde{A} = p(\Gamma) \mathbf{1}^T.
$$
\end{corollary}
As a consequence of Lemma \ref{th:gammanonegativo}, for any $x^*\in \Q{A,b}\setminus \Qi{A,b}$, there exists a negative cost circuit in $D(A,x^*)$. As we see at next, this circuit has positive winding number.
\begin{lemma}\label{ppositivo}
Let $A$ be a circular matrix, $b\in {\mathbb Z}_m^+$, $x^*\in \Q{A,b}\setminus \Qi{A,b}$, and $\Gamma$ a circuit with negative cost in $D(A,x^*)$. Then $p(\Gamma)>0$.
\end{lemma}
\begin{myproof}
Let $p=p(\Gamma)$. From definition we have that $c(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +} ^Tc^{+}(x^*) + \pi_{\scriptscriptstyle \! +}b^T c^{+}b (x^*)$. Since $c^{+}(x^*) + c^{+}b (x^*)=s^*$, we have
$$c(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}b^T s^* + (\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T) c^{+} (x^*).$$
In addition
$$(\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T) c^{+} (x^*)= (\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T)(\mu s^*-\mu (1-\mu)v).$$
Since $(\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T)v=p$ holds from Corollary \ref{th:parameter-properties}, we have that
$$c(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}b^T s^* + \mu (\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T)s^* -\mu(1-\mu)p$$
or, equivalently,
$$c(\Gamma, x^*) = (1-\mu)\pi_{\scriptscriptstyle \! +}b^T s^* + \mu \pi_{\scriptscriptstyle \! +}^T s^* -\mu(1-\mu)p.$$
Observe that $\pi_{\scriptscriptstyle \! +}b^T s^*\geq 0$, $\pi_{\scriptscriptstyle \! +}^T s^*\geq 0$ and $0<\mu<1$. It follows that
$$c(\Gamma, x^*) \geq -\mu(1-\mu)p.$$
If $p\leq 0$ then $c(\Gamma, x^*)\geq 0$, contradicting the assumption. Therefore, $p>0$.
\end{myproof}
Given a circuit $\Gamma$ in $D(A)$, $b\in \mathbb Z_m^+$, and $d=\binomio{b}{0}$, we introduce the following parameters:
\begin{align*}
t(\Gamma,b)&:= \sum_{i\in E^+_1(\Gamma)} \!\!\! b_i - \sum_{i\in E^-_1(\Gamma)}\!\!\!b_i= (\pi_{\scriptscriptstyle \! +} - \pi_{\scriptscriptstyle \! +}b)^T d,\\
\beta(\Gamma,b)&:= \floor{\frac{t(\Gamma,b)}{p(\Gamma)}}, \text{ and }\\
r(\Gamma,b)&:= t(\Gamma,b) - \beta(\Gamma,b) p(\Gamma).
\end{align*}
The parameters above are involved in the next definition:
\begin{definition}
\label{gammaineq}
Let $A$ be a circular matrix, $b\in {\mathbb Z}^m_+$ and $\Gamma$ be a circuit in $D(A)$. If
$\beta=\beta(\Gamma,b)$ and $r=r(\Gamma,b)$,
the $\Gamma$-inequality is defined as
\begin{equation}
\label{eq:gamma-ineq}
\sum_{j\in [n]} [p^-(\Gamma,j)+r] \, x_j \geq r \, (\beta +1)+ \sum_{i\in E^-_1(\Gamma)}\!\!\!b_i.
\end{equation}
We say that an inequality is a circuit inequality if it is a $\Gamma$-inequality for some circuit $\Gamma$ in $D(A)$.
\end{definition}
The next result shows that every $\Gamma$-\emph{inequality} with $p(\Gamma) > 0$ is valid for the slices $Q_{\beta}(A,b)$ and $Q_{\beta+1}(A,b)$ for $\beta=\beta(\Gamma,b)$ i.e., it is a disjunctive cut for $\Q{A,b}$ and then, it is valid for $\Qi{A,b}$.
\begin{theorem}\label{validas}
Let $A$ be a circular matrix, $b\in \mathbb Z_m^+$ and $\Gamma$ be a circuit in $D(A)$ with $p(\Gamma) > 0$. Then, the $\Gamma$-\emph{inequality}
is valid for $\Qi{A,b}$.
\end{theorem}
\begin{myproof}
In the following we denote $p(\Gamma)$, $t(\Gamma, b)$, $\beta(\Gamma,b)$, and $r(\Gamma,b)$ simply by $p$, $t$, $\beta$, and $r$, respectively. Remind that $t= (\pi_{\scriptscriptstyle \! +} - \pi_{\scriptscriptstyle \! +}b)^T d$ and let $t^-= \sum_{i\in E^-(\Gamma)}b_i= \pi_{\scriptscriptstyle \! +}b^T d$. Moreover, observe that $p>0$ implies $r\geq 0$ and $t<p (\beta+1)$.
Let $x^*$ be an extreme point of $\Qi{A,b}$. Applying $\pi_{\scriptscriptstyle \! +}b$ as a vector of multipliers on the system $\tilde{A} x^* \geq d$, we obtain
\begin{equation} \label{ineq}
\pi_{\scriptscriptstyle \! +}b^T \tilde{A} x^* \geq \pi_{\scriptscriptstyle \! +}b^T d.
\end{equation}
Since $\beta=\left\lfloor \frac{t}{p}\right\rfloor \in {\mathbb Z}$, $x^*$ satisfies the disjunction
$$
\mathbf{1}^T x^* \geq \beta + 1 \qquad
\text{ or } \qquad
\mathbf{1}^T x^* \leq \beta.
$$
Assume at first that $\mathbf{1}^T x^* \geq \beta + 1$.
Multiplying this inequality by the non-negative factor $r$ and adding it with inequality (\ref{ineq}) yields
\begin{equation}
\label{eq:proof-gamma-ineq1}
\pi_{\scriptscriptstyle \! +}b^T \tilde{A} x^* + r \mathbf{1}^T x^* \geq \pi_{\scriptscriptstyle \! +}b^T d + (\beta + 1)r = t^- + (\beta +1)r.
\end{equation}
By using the fact that $\pi_{\scriptscriptstyle \! +}b^T \tilde{A} e_j=p^{-}(\Gamma, j)$ for every $j\in[n]$, we conclude that
$x^*$ satisfies the $\Gamma$-inequality (\ref{eq:gamma-ineq}).
Now suppose $\mathbf{1}^T x^* \leq \beta$. Multiplying this inequality by the negative factor $t - (\beta + 1) p$
and adding the valid inequality $\pi_{\scriptscriptstyle \! +}^T \tilde{A} x^* \geq \pi_{\scriptscriptstyle \! +}^T d$, we
obtain
\begin{equation}
\label{eq:proof-gamma-ineq}
\pi_{\scriptscriptstyle \! +}^T \tilde{A} x^* + [t - (\beta+1) p] \mathbf{1}^T x^* \geq \pi_{\scriptscriptstyle \! +}^T d + \beta [t - (\beta+1) p].
\end{equation}
By Corollary~\ref{th:parameter-properties} we have that $\pi_{\scriptscriptstyle \! +}^T \tilde{A} x^*= \pi_{\scriptscriptstyle \! +}b^T \tilde{A} x^* + p \mathbf{1}^T x^*$ and then the left-hand side of this inequality is:
$$\pi_{\scriptscriptstyle \! +}^T \tilde{A} x^* + [t - (\beta+1) p] \mathbf{1}^T x^* =
\pi_{\scriptscriptstyle \! +}b^T \tilde{A} x^* + r \mathbf{1}^T x^*.$$
Moreover, as $\pi_{\scriptscriptstyle \! +}^T d = t+t^-$ the right-hand side of \eqref{eq:proof-gamma-ineq} can be written as:
\begin{align*}
\pi_{\scriptscriptstyle \! +}^T d + \beta [t - (\beta+1) p]
& = t+t^-+ \beta t - \beta(\beta +1) p \\
& = t^- + (\beta + 1)r.
\end{align*}
Hence, $x^*$ does also fulfill \eqref{eq:gamma-ineq} when $\mathbf{1}^T x^* \leq \beta$.
\end{myproof}
The following lemma establishes a necessary condition for a circuit $\Gamma$ so that the $\Gamma$-inequality defines a facet of $\Qi{A,b}$.
\begin{lemma} \label{rem:non-redundant}
Let $\Gamma$ be a circuit in $D(A)$ such that the $\Gamma$-inequality induces a facet of $\Qi{A,b}$. Then,
\begin{equation}
\label{eq:cond-gamma}
p(\Gamma) \mbox{ does not divide }
t(\Gamma,b) \text{ and } 2 \leq p(\Gamma) \leq
t(\Gamma,b)-1.
\end{equation}
Then, every circuit inequality defining a facet of $\Qi{A,b}$ has full support (i.e., non zero coefficients for all variables).
\end{lemma}
\begin{myproof}
Note that if $p(\Gamma)$ divides $t(\Gamma,b)$ then $r(\Gamma, b)=0$ and the $\Gamma$-inequality \eqref{eq:gamma-ineq} reduces to
$\pi_{\scriptscriptstyle \! +}b^T \tilde{A} x \geq t^{-}(\Gamma)$, which is redundant since it is $\tilde{A} x \geq d$ multiplied by the vector $\pi_{\scriptscriptstyle \! +}b^T$. It follows that $p(\Gamma) \geq 2$.
Moreover, if
$\beta \leq 0$, the inequality $\mathbf{1}^T x \geq \beta +1$ is implied by any of the inequalities in the system $Ax \geq b$ and the $\Gamma$-inequality is valid for $\Q{A,b}$. Thus, $\beta \geq 1$ and since $p(\Gamma)\geq 2$ we obtain
$p(\Gamma) \leq t(\Gamma,b)-1$.
\end{myproof}
Observe that if $x^*\in \Q{A,b}\setminus \Qi{A,b}$ and $\Gamma$ is a circuit with negative cost in $D(A,x^*)$, then from Lemma \ref{ppositivo} and Theorem \ref{validas} the $\Gamma$-inequality is valid for $\Qi{A,b}$.
We see at next that $x^*$ violates this inequality.
\begin{lemma}
\label{5prima}
Let $A$ be a circular matrix, $b\in {\mathbb Z}_m^+$, $x^*\in \Q{A,b}\setminus \Qi{A,b}$ and $\Gamma$ be a circuit with negative cost in $D(A,x^*)$. Then, the $\Gamma$-inequality is
violated by $x^*$.
\end{lemma}
\begin{myproof}
In the following we denote $p(\Gamma)$, $t(\Gamma, b)$, $\beta(\Gamma,b)$, and $r(\Gamma,b)$ simply by $p$, $t$, $\beta$, and $r$, respectively. Remind that $t= (\pi_{\scriptscriptstyle \! +} - \pi_{\scriptscriptstyle \! +}b)^T d$ and let $t^-= \sum_{i\in E^-(\Gamma)}b_i= \pi_{\scriptscriptstyle \! +}b^T d$.
Let us call $f(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}b^T \tilde{A} x^* - t^- + r (\mathbf{1}^T x^* - \beta - 1)$.
It is easy to see that $x^*$ violates the $\Gamma$-inequality \eqref{eq:gamma-ineq} if and only if $f(\Gamma, x^*) <0$.
We will prove that $f(\Gamma, x^*)= c(\Gamma, x^*)$.
On one hand, we have
\begin{align}
f(\Gamma, x^*) &= \pi_{\scriptscriptstyle \! +}b^T \tilde{A} x^* - t^- + r (\mathbf{1}^T x^* - \beta - 1), \nonumber \\
&= \pi_{\scriptscriptstyle \! +}b^T (\tilde{A} x^* - d) - r (\beta + 1 - \mathbf{1}^T x^*), \nonumber \\
&= \pi_{\scriptscriptstyle \! +}b^T s^* - r (\beta + 1 - \mathbf{1}^T x^*). \label{eq:costo-gamma-3}
\end{align}
On the other hand, we have already seen in Lemma \ref{ppositivo} that
$$c(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}b^T s^* + (\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T)(\mu s^*-\mu (1-\mu)v).$$
From Corollary~\ref{th:parameter-properties} it follows that $\pi_{\scriptscriptstyle \! +}b^T s^{*} = \pi_{\scriptscriptstyle \! +}^T s^{*} - p \mathbf{1}^T x^{*} + t$. Hence, we can write
\begin{equation}
\label{eq:tau}
c(\Gamma, x^*)=\pi_{\scriptscriptstyle \! +}b^T s^*-\mu[t-(\mathbf{1}^T x^*+\mu-1)p].
\end{equation}
Similarly, if we consider that
$$c(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}^T s^* - (\pi_{\scriptscriptstyle \! +}^T-\pi_{\scriptscriptstyle \! +}b^T) c^{+}b(x^*),$$
it is not hard to see that
\begin{equation}
\label{eq:tau+uno}
c(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}^T s^*- (1-\mu)[p(\mathbf{1}^T x^*+\mu)-t].
\end{equation}
Let $\tau^*:= \floor{\mathbf{1}^T x^*}$, i.e., $\mathbf{1}^T x^* + \mu = \tau^* + 1$. Since $c(\Gamma, x^*)<0$, it follows from \eqref{eq:tau} that $t - \tau^* p > 0$ or, equivalently, $\tau^* < \frac{t}{p}$.
Similarly, from \eqref{eq:tau+uno} we obtain $t-(\tau^*+1) p < 0$, i.e., $\tau^* + 1 > \frac{t}{p}$.
Thus, $\tau^* = \floor{\frac{t}{p}} = \beta$.
This fact together with \eqref{eq:costo-gamma-3} and \eqref{eq:tau} imply
$$f(\Gamma, x^*) = \pi_{\scriptscriptstyle \! +}b^T s^* - (\tau^* + 1 - \mathbf{1}^T x^*) r
= \pi_{\scriptscriptstyle \! +}b^T s^* - \mu (t - \tau^* p)
= c(\Gamma, x^*)<0.$$
\end{myproof}
As a consequence of the previous results we obtain a complete linear description of $\Qi{A,b}$.
\begin{theorem}
\label{th:complete-description}
For any circular matrix $A$ and any vector $b \in {\mathbb Z}_+^m$, the polyhedron $\Qi{A,b}$ is completely
described by the inequalities defining $\Q{A,b}$ and circuit inequalities induced by circuits $\Gamma$ in $\Aux{A}$ with $p(\Gamma)\geq 2$ such that $p(\Gamma)$ does not divide $t(\Gamma)$.
\end{theorem}
Observe that the above theorem and Lemma \ref{rem:non-redundant} imply that any facet of $\Qi{A,b}$ not in the system $Ax\geq b, x\geq 0$ must have full support. This fact has already been observed in \cite{ArgiroffoBianchi09} for the particular case of the set covering polyhedron related to circulant matrices. In contrast, there are non-boolean facets of the packing polytope of circular matrices which do not have full support.
As a further consequence of Theorem \ref{th:complete-description} and the polynomiality of the Minimum Cost Circulation Problem,
we also obtain that the Weighted $L$-Domination Problem is polynomial time solvable on circular interval graph.
\section{The case of homogeneous right-hand side}
\label{sec:homog-rhs}
In this section, we consider polyhedra $\Qi{A,b}$ for a circular matrix $A$ and $b=\alpha \mathbf{1}$ with $\alpha \in {\mathbb N}$.
Observe that for these class of polyhedra dominating rows of the matrix $A$ are associated with redundant constraints.
For this reason, in the remaining of this article we always assume that $A$ has no dominating rows.
We will prove that in this case, relevant circuit inequalities are induced by circuits in $\Aux{A}$ without reverse row arcs.
Remind that, for every $i\in [m]$, $P_i^+$ (resp. $P_i^-$) is the path of short forward (resp. reverse) arcs connecting $l_i-1$ with $l_i+k_i-1$ (resp. $l_i+k_i-1$ with $l_i-1$).
We start with the following result.
\begin{lemma}
\label{th:path-cost}
Let $A$ be a circular matrix and
$x^* \in \Q{A,\alpha \mathbf{1}}$ with $\alpha \in {\mathbb N}$.
Then, for every $i\in [m]$ the following statements hold in $D(A,x^*)$:
\begin{enumerate}
\item[(i)]
The cost of the forward row arc $a_i=(\ell_i -1, \ell_i + k_i - 1)$ is smaller than the cost of $P^+_i$ by the amount $-\mu \alpha$; i.e., $c^+_i(x^*)-c^+(P^+_i,x^*)=-\mu \alpha$.
\item[(ii)]
The cost of the reverse row arc $\bar{a}_i=(\ell_i + k_i - 1,\ell_i -1)$ is smaller
than the cost of $P^-_i$, by the amount $-(1 - \mu)\alpha$; i.e., $c^-_i(x^*)-c^-(P^+_i,x^*)=-(1 - \mu)\alpha$.
\end{enumerate}
\end{lemma}
\begin{myproof}
Let $i\in [m]$ and $u:= e_i - \sum_{j= \ell_i}^{\ell_i + k_i - 1} e_{m+j} \in {\mathbb R}^{m+n}$, where $e_k$ denotes the $k$-th canonic vector in ${\mathbb R}^{m+n}$.
Let us first prove that,
$u^T s^* =u^T(\tilde{A} x^* - d)= - \alpha$ and $u^T v =0$. Indeed,
$$
u^T \tilde{A} = u^T \binomio{A}{I} = e_i^T A - \sum_{j= \ell_i}^{\ell_i + k_i - 1} e_j^T = \mathbf{0}^T,
$$
as the $i$-th row of $A$ is the incidence vector of
$[\ell_i, \ell_i + k_i)_n \subset [n]$. Thus,
\begin{equation}
\label{us}
u^T s^* = -u^T d = -u^T \binomio{\alpha \mathbf{1}}{0} = - \alpha.
\end{equation}
Let us now analyze the product $u^T v$. Since $v$ is the last column of the matrix $\tilde{A}$, we have that $v_i = 1$ if and only if $n \in [\ell_i, \ell_i + k_i)_n$. Furthermore, $v_{m+j}=1$ if and only if $j=n$. Hence,
\begin{equation}
\label{uv}
u^T v = v_i - \sum_{j= \ell_i}^{\ell_i + k_i - 1} v_{m+j}= 0.
\end{equation}
Finally, since $c^{+}(x^*) = \mu s^* - \mu (1- \mu) v$ and $c^+(P^+_i,x^*)= \sum_{j= \ell_i}^{\ell_i + k_i - 1} c^{+}_{m+j}(x^*)$,
$$c^{+}_i(x^*) - c^+(P^+_i,x^*) = u^T c^{+}(x^*) = \mu (u^T s^*) - \mu (1 - \mu) (u^T v),$$
replacing (\ref{us}) and (\ref{uv}) in the last equation
we have:
$$c^{+}_i(x^*) - c^+(P^+_i,x^*) = - \mu \alpha.$$
The proof of part (ii) is similar, considering that $c^{+}b(x^*) = (1 - \mu) s^* + \mu (1- \mu) v$.
\end{myproof}
To prove that circuit inequalities induced by circuits with reverse row arcs are redundant, we
state at first the following result.
\begin{lemma}
\label{th:no-reverse}
Let $A$ be a circular matrix and $\Gamma$ be a circuit in $\Aux{A}$ with $p(\Gamma) > 0$. Let $\alpha \in {\mathbb N}$ and $x^* \in \Q{A,\alpha \mathbf{1}}$. Then, there exists a circuit $\Gamma'$ without
reverse row arcs, such that $p(\Gamma') > 0$ and $c(\Gamma', x^*) \leq c(\Gamma, x^*)$.
\end{lemma}
\begin{myproof}
If $\Gamma$ contains no reverse row arcs then $\Gamma'=\Gamma$.
Otherwise, let $\bar{a} \in E_1^{-}(\Gamma)$.
Observe that in this case $\Gamma$ must also contain at least one forward row arc, too. Indeed, from $p(\Gamma) >0$ and Lemma~\ref{th:winding-number} it follows that any node jumped by $\bar{a}$ must also be jumped by at least two forward arcs. But then, since $\Gamma$ is simple, at least one of these have to be a row arc.
Hence, we can choose two row arcs $\bar{a}_i, a_r \in E(\Gamma)$, $i\neq r$,
such that $a_r$ is the first row arc preceding $\bar{a}_i$ in $\Gamma$. Then, the circuit must
contain a simple path $P$ from $\ell_r +k_r -1$ to $\ell_i + k_i - 1$, consisting only of short arcs.
We distinguish between the two possible cases.
\emph{Case (i): $P$ contains only reverse arcs.} Figure~\ref{fig:grafo-case-i} depicts this situation.
\begin{figure}
\caption{Case (i): $P$ consists of arcs from $E_2^{-}
\label{fig:grafo-case-i}
\end{figure}
Observe that in this case $\ell_r -1$ is jumped by $\bar{a}_i$, as otherwise, row $r$ dominates row $i$ in $A$.
Let $P_1$ be the path from $\ell_r -1$ to $\ell_i -1$ in $\Gamma$, i.e. $P_1$ is the concatenation of
$a_r$, $P$ and $\bar{a}_i$.
Consider the alternative path $P_2$ in $D(A)$ that connects $\ell_r -1$ with $\ell_i -1$ using only reverse short arcs.
Define $\Phi$ to be the closed (not necessarily simple) path obtained from $\Gamma$ by replacing $P_1$ by $P_2$. Clearly, $\Phi$ has the same winding number and one fewer reverse row arc than $\Gamma$. Moreover, the cost $c(\Phi, x^*)$ is smaller than or equal to the cost $c(\Gamma, x^*)$. Indeed,
\begin{align*}
c(\Gamma, x^*) - c(\Phi, x^*) &= c(P_1, x^*) - c(P_2, x^*) \\
&= c^{+}_r + c(P,x^*) + c^{+}b_i - c(P_2, x^*)\\
&= c^{+}_r + c^{+}b_i + (c(P,x^*)- c(P_2, x^*)). \\
\end{align*}
Observe that
$$ c(P,x^*)- c(P_2, x^*) = c(P^-_r,x^*)- c(P^-_i, x^*).$$
By Lemma \ref{th:path-cost} (ii), $c(P^-_r,x^*)- c(P^-_i, x^*)= c^{+}b_r-c^{+}b_i$. Then,
$$
c(\Gamma, x^*) - c(\Phi, x^*)= c^{+}_r + c^{+}b_i + c^{+}b_r-c^{+}b_i = c^{+}_r+c^{+}b_r= s^{*}_r \geq 0. \\
$$
\emph{Case (ii): $P$ contains only forward arcs.} This situation is shown in Figure~\ref{fig:grafo-case-ii}.
\begin{figure}
\caption{Case (ii): $P$ consists of arcs from $E_2^{+}
\label{fig:grafo-case-ii}
\end{figure}
In this case, $\ell_i-1$ is jumped by $a_r$, as otherwise row $i$ dominates row $r$. Again, let $P_1$ be the path from $\ell_r -1$ to $\ell_i -1$ in $\Gamma$, and consider the alternative path $P_2$ in $D(A)$ consisting only from arcs of $E_2^{+}$.
Let $\Phi$ be the closed (not necessarily simple) path obtained from $\Gamma$ by replacing $P_1$ by $P_2$. This path has the same winding number, one fewer reverse row arc than $\Gamma$, and its cost $c(\Phi, x^*)$ is no larger than $c(\Gamma, x^*)$. Indeed,
\begin{align*}
c(\Gamma, x^*) - c(\Phi, x^*) &= c(P_1, x^*) - c(P_2, x^*) \\
&= c^{+}_r + c(P,x^*) + c^{+}b_i - c(P_2, x^*)\\
&= c^{+}_r + c^{+}b_i + (c(P,x^*)- c(P_2, x^*)).
\end{align*}
Observe that
$$ c(P,x^*)- c(P_2, x^*) = c(P^+_i,x^*)- c(P^+_r, x^*)$$
and from
Lemma \ref{th:path-cost} (i) it follows that
$c(\Gamma, x^*) - c(\Phi, x^*)=s^*_i\geq 0$.
In both cases, we have proven the existence of a closed path $\Phi$ with strictly fewer reverse row arcs than
$\Gamma$, and such that $c(\Gamma, x^*) \geq c(\Phi, x^*)$ and $p(\Gamma)=p(\Phi)>0$. But then, $\Phi$ contains at least one circuit $\Gamma^{(2)}$ with strictly fewer reverse row arcs than $\Gamma$, positive winding number, and such that $c(\Gamma, x^*) \geq c(\Gamma^{(2)}, x^*)$.
Iterating this argument a finite number of times, we prove the existence of a circuit $\Gamma'$ without reverse
row arcs and positive winding number such that $c(\Gamma, x^*) \geq c(\Gamma', x^*)$.
\end{myproof}
As a consequence of the last lemma,
we obtain the main result of this section:
\begin{theorem}
\label{th:result-homogeneous-case}
For any circular matrix $A$ and any $\alpha \in {\mathbb N}$, the polyhedron $\Qi{A,\alpha \mathbf{1}}$ is completely
described by the inequalities defining $\Q{A,\alpha \mathbf{1}}$ and circuit inequalities induced by circuits $\Gamma$ in $\Aux{A}$ without reverse row arcs, with $p(\Gamma)\geq 2$, and such that $p(\Gamma)$ does not divide $t(\Gamma, \alpha \mathbf{1})$.
\end{theorem}
\begin{myproof}
Let $x^* \in \Q{A,\alpha \mathbf{1}}\setminus \Qi{A,\alpha \mathbf{1}}$.
Due to Lemma \ref{th:gammanonegativo} there exists at least one circuit $\Gamma$ such that $c(\Gamma, x^*) < 0$.
From Lemma~\ref{th:no-reverse} there exists a circuit $\Gamma'$ without reverse row arcs such that $c(\Gamma', x^*) \leq c(\Gamma, x^*) < 0$. By Lemma \ref{5prima}, $x^*$ violates the $\Gamma'$-inequality. Then,
$\Qi{A,\alpha \mathbf{1}}$ is completely
described by boolean inequalities and circuit inequalities induced by circuits $\Gamma$ in $\Aux{A}$ without reverse row arcs.
By Theorem \ref{th:complete-description}, we only need to consider $\Gamma$-inequalities such that $p(\Gamma)\geq 2$ and $p(\Gamma)$ does not divide $t(\Gamma, \alpha \mathbf{1})$.
\end{myproof}
In the following we further study combinatorial properties of circuits without reverse row arcs in $\Aux{A}$ and the implications for the related inequalities. In particular, in the case of the set covering polyhedron,
we show that these circuit inequalities reduce to row family inequalities.
\section{Circuits without reverse row arcs and their inequalities}
\label{sec:no-reverse-arcs}
Given a circular matrix $A$, let us call $F(A)$ the digraph with nodes in $[n]$ and all arcs in $\Aux{A}$ except for reverse row arcs. Moreover, let $\Gamma$ be a circuit in $F(A)$. Keeping the same notation introduced in \cite{Stauffer05}, we consider the partition of the nodes of $F(A)$ into the following three classes:
\begin{itemize}
\item[(i)] \emph{circles}
$\circ(\Gamma) := \setof{j \in [n]}{(j-1,j)\in E(\Gamma)}$,
\item[(ii)] \emph{crosses}
$\otimes(\Gamma) :=\setof{j \in [n]}{(j, j-1)\in E(\Gamma)}$, and
\item[(iii)] \emph{bullets} $\bullet(\Gamma): = [n]\setminus (\circ(\Gamma)\cup \otimes(\Gamma))$.
\end{itemize}
Observe that circle (resp. cross) nodes are the heads (resp. tails) of forward (resp. reverse) short arcs of $\Gamma$. A bullet node is either a node outside $\Gamma$, or it is the tail or the head of a row arc.
We say that a bullet is an \emph{essential bullet} if it is reached by $\Gamma$.
Remind that a forward row arc $(u,v)$ jumps over a node $j \in [n]$ if $j \in (u ,v]_n$. Also, the only forward (resp. reverse) short arc jumping over $j$ is the arc $(j-1,j)$ (resp. $(j,j-1)$).
The number of row arcs of $\Gamma$ jumping over a given node depends on which partition class it belongs to.
\begin{lemma}
\label{th:jumping-number-nodes}
Let $A$ be a circular matrix and $\Gamma$ be a circuit in $F(A)$ with winding number $p$. For each node $j\in [n]$, let $r(j)$ be the number of row arcs of $\Gamma$ that jump over $j$.
Then,
$$
r(j) = \left\{
\begin{array}{ll}
p-1 \; & \mbox{if $j \in \circ(\Gamma)$}, \\
p+1 \;& \mbox{if $j \in \otimes(\Gamma)$}, \\
p \;& \mbox{if $j \in \bullet(\Gamma)$}. \\
\end{array}
\right.
$$
\end{lemma}
\begin{myproof}
From Lemma \ref{th:winding-number} we know that, for all $j \in [n]$,
$p=p^{+}(\Gamma, j) - p^{-}(\Gamma, j)$.
If $j \in \circ(\Gamma)$, there is exactly one forward short arc jumping over $j$. Since $\Gamma$ is a circuit there is no reverse short arcs that jump over this node. Hence, there are exactly $p-1$ forward row arcs that jump over $j$.
If $j \in \otimes(\Gamma)$, again from the assumption that $\Gamma$ is a circuit there is exactly one reverse short arc and no forward short arcs that jump over $j$. It follows that $p+1$ forward row arcs must jump over this node.
Finally, if $j \in \bullet(\Gamma)$, neither forward nor reverse short arcs can jump over $j$ and then $r(j)= p$.
\end{myproof}
From the previous results, the relevant circuit inequalities of $\Qi{A,\alpha \mathbf{1}}$ have a particular structure.
\begin{theorem}
\label{th:2-coefs}
Let $A$ be a circular matrix and $\alpha\in {\mathbb N}$. Let $\Gamma$ be a circuit in $F(A)$ with $s$ row arcs and winding number $p$, fulfilling the conditions of Lemma \ref{rem:non-redundant}. If
$r:= \alpha s - p \floor{\frac{\alpha s}{p}}$, the $\Gamma$-inequality of $\Qi{A,\alpha \mathbf{1}}$ has the form:
\begin{equation}
\label{eq:2-coefs-ineq}
r \sum_{j \not\in \otimes(\Gamma)} x_j + (r+1) \sum_{j \in \otimes(\Gamma)} x_j \geq r\ceil{\frac{\alpha s}{p}}.
\end{equation}
Moreover, if $\alpha= 1$ and $\otimes(\Gamma) \neq \emptyset$, this inequality is the row family inequality induced by $F:= \{i \in [m]: a_i \text{ is a row arc of } \, \Gamma\}$.
\end{theorem}
\begin{myproof}
Recall from Definition \ref{gammaineq} that the $\Gamma$-inequality has the form
$$\sum_{j\in [n]} [p^-(\Gamma,j)+r(\Gamma,b)] \, x_j \geq r(\Gamma,b) \, (\beta (\Gamma,b) +1)+ \sum_{i\in E^-_1(\Gamma)}\!\!\! b_i.$$
Clearly, since $\Gamma$ has no reverse row arcs, $\sum_{i\in E_1^-(\Gamma)}b_i=0$ and $t(\Gamma,b)= \sum_{i\in E_1^+(\Gamma)}b_i= \alpha s$. Then, $\beta (\Gamma, b)= \ceil{\frac{\alpha s}{p}}-1$ and $r(\Gamma, b)=r$. Then, the $\Gamma$-inequality (\ref{eq:gamma-ineq}) has the form
\begin{equation}
\sum_{j\in [n]} [p^-(\Gamma,j)+r] \, x_j \geq r \, \ceil{\frac{\alpha s}{p}}.
\end{equation}
In order to obtain (\ref{eq:2-coefs-ineq}), it only remains to observe that, since $E^-(\Gamma)$ contains only short reverse arcs, we have:
$$p^-(\Gamma,j)=\left\{
\begin{array}{rl}
1 & \mbox{ if } j \in \otimes(\Gamma), \\
0 & \mbox{ otherwise.}
\end{array} \right.$$
Now assume that $\alpha= 1$ and $\otimes(\Gamma) \neq \emptyset$.
Since $s= \card{F}$ to prove that \eqref{eq:2-coefs-ineq} is the row family inequality induced by $F$, it suffices to show that $p= \max_{j \in [n]} \{\sum_{i \in F} a_{ij} \} - 1$ and $O(F,p) = \otimes(\Gamma)$.
Indeed, it is not hard to see that, for any $j \in [n]$, $\sum_{i \in F} a_{ij}$ coincides with $r(j)$ defined in Lemma~\ref{th:jumping-number-nodes}. Moreover, since $\otimes(\Gamma) \neq \emptyset$,
$$\max_{j \in [n]} \{\sum_{i \in F} a_{ij} \} - 1 = (p+1) - 1 =p \quad \text{ and } \quad
O(F, p) = \{j \in [n]: \sum_{i \in F} a_{ij} = p+1\} = \otimes(\Gamma).$$
Then, the $\Gamma$-inequality is the row family inequality induced by $F$ and the proof is complete.
\end{myproof}
In the particular case when $A$ is a circulant matrix, relevant circuit inequalities correspond to circuits without circle nodes.
\begin{lemma}
Let $n,k\in {\mathbb N}$ such that $2\leq k\leq n-2$ and $\Gamma$ be a circuit in $F(C_n^k)$ with $s$ row arcs, winding number $p$, $\otimes(\Gamma)\neq \emptyset$ and $\circ(\Gamma)\neq \emptyset$. Then, for any $\alpha\in {\mathbb N}$,
the $\Gamma$-inequality is not a facet of $\Qi{C_n^k,\alpha \mathbf{1}}$.
\end{lemma}
\begin{myproof}
Since $\otimes(\Gamma)\neq \emptyset$ and $\circ(\Gamma)\neq \emptyset$, there is a path in $\Gamma$ connecting a cross with a circle. Let $P$ be a shortest path in $\Gamma$ with this condition. Assume that $P$ starts at $u\in \otimes(\Gamma)$, has $h\geq 1$ row arcs and ends at $v\in \circ(\Gamma)$. Then, the nodes of $P$ are $u,u-1$, $(u-1)+jk$ with $1\leq j\leq h$, and $(u-1)+hk+1=u+hk=v$.
Consider $P'$ the path of row arcs in $F(A)$ from $u$ to $u+hk=v$ and let $\Gamma'$ be the closed path obtained by replacing $P$ by $P'$ in $\Gamma$. We will see that $\Gamma'$ is a circuit. To do that, we only need to prove that internal nodes in $P'$ do not belong to $\Gamma$. Clearly, the internal nodes in $P'$ are $u+jk$ with $1\leq j\leq h-1$.
Assume there exists $j$ with $1\leq j\leq h-1$ such that $u+jk$ is a node of $\Gamma$. Let $t=\min \{j: u+jk\in V(\Gamma), 1\leq j\leq h-1\}$. Clearly, $u+tk\notin \circ(\Gamma)$ (resp. $u+tk\notin \otimes(\Gamma)$), otherwise there are two arcs from $\Gamma$ leaving (resp. entering) $(u-1)+tk$. Then, either $(u+tk+1, u+tk)$ or $(u+(t-1)k, u+tk)$ is an arc of $\Gamma$. If $(u+tk+1, u+tk)$ is an arc of $\Gamma$, $u+tk+1\in \otimes(\Gamma)$ and the path in $\Gamma$ from $u+tk+1$ to $v$ is shorter than $P$, a contradiction. If $(u+(t-1)k, u+tk)$ is an arc of $\Gamma$, $u+(t-1)k$ is a node of $\Gamma$. If $t=1$, we have two arcs in $\Gamma$ leaving $u$. If $t\geq 2$, we have a contradiction with the definition of $t$.
Then, $\Gamma'$ is a circuit in $F(A)$.
Moreover, $\Gamma'$ has $s$ row arcs, winding number $p$, and $\otimes{(\Gamma')}$ is strictly contained in $\otimes(\Gamma)$. Hence, the $\Gamma'$-inequality implies the $\Gamma$-inequality.
\end{myproof}
Observe that if $\Gamma$ is a circuit in $F(A)$ such that $\otimes(\Gamma)=\emptyset$, the $\Gamma$-inequality is implied by the rank constraint. As a consequence we have:
\begin{theorem}
\label{circulantes}
For any positive numbers $n$ and $k$, with $2\leq k\leq n-2$ and $\alpha \in {\mathbb N}$, a complete linear description for the polyhedron $\Qi{C^k_n,\alpha \mathbf{1}}$ is given by
the inequalities defining $\Q{C^k_n,\alpha \mathbf{1}}$,
the rank constraint, and
circuit inequalities corresponding to circuits in $F(A)$ without short forward arcs.
\end{theorem}
In the next section we will see that relevant inequalities for the set covering polyhedron of circular matrices are minor induced row family inequalities.
\section{Set covering polyhedron of circular matrices and circulant minors}
\label{sec:set-covering-polyhedron}
Throughout this section, we restrict our attention to the set covering polyhedron of circular matrices. Remind that we have assumed that the matrix $A$ has no dominating rows.
Let $\Gamma$ be a circuit of $F(A)$ with winding number $p$ and $s$ essential bullets $\{b_j: j=1,\ldots,s\}$, with $1\leq b_1 < b_2<\ldots< b_s\leq n$.
Clearly, if $[u,w]_n\subset \circ(\Gamma)$ and $u-1,w+1\notin \circ(\Gamma)$ (resp. $[u,w]_n\subset \otimes(\Gamma)$ and $u-1,w+1\notin \otimes(\Gamma)$) then $u-1$ is an essential bullet and $w+1\in \bullet(\Gamma)$.
For each $j\in [s]$ we define $v_j$ as the node of $\Gamma$ in $[b_j,b_{j+1})$ such that:
\begin{itemize}
\item[(i)] if $b_j+1 \in \circ(\Gamma)$ then $[b_j+1,v_j]_n\subset \circ(\Gamma)$ and $v_{j}+1\notin \circ(\Gamma)$,
\item[(ii)] if $b_j+1 \in \otimes(\Gamma)$ then $[b_j+1,v_j]_n\subset \otimes(\Gamma)$ and $v_{j}+1\notin \otimes(\Gamma)$,
\item[(iii)] if $b_j+1 \in \bullet(\Gamma)$ then $v_j=b_j$.
\end{itemize}
Then, for each $j\in [s]$ we define the \emph{block} $B_j=[b_j, v_j]_n$ which can be a \emph{circle} block, a \emph{cross} block or a \emph{bullet} block depending on if $b_j+1$ is a circle, a cross or a bullet of $\Gamma$.
It is easy to check that the blocks $\{B_j: j=1,\ldots,s\}$ define a partition of nodes of $\Gamma$.
Figure~\ref{fig:block-types} illustrates these three type of blocks.
\begin{figure}
\caption{All three possible block types: (a) circle block, (b) cross block, (c) bullet block.}
\label{f:circleblock}
\label{f:crossblock}
\label{f:bulletblock}
\label{fig:block-types}
\end{figure}
\begin{remark}\label{arcos}
For each $j\in [s]$ there exists one row arc leaving $B_j$ and another row arc entering $B_j$.
Let $B_j^{-} \in B_j$ be the tail of the arc leaving $B_j$, while $B_j^{+} \in B_j$ denotes the head of the arc entering in $B_j$. In particular, if $B_j$ is a cross block, $B^-_j=b_j$ and $B^+_j=v_j$. If $B_j$ is a circle block, $B^-_j=v_j$ and $B^+_j=b_j$. Finally, if $B_j$ is a bullet block, $B^-_j=B^+_j=b_j=v_j$.
Observe that if $B_j$ is a circle block (cross block) then there is always a path of forward (reverse) short arcs in $\Gamma$ that joins $B_j^{+}$ with $B_j^{-}$. Moreover, the nodes of every path in $\Gamma$ consisting only of short arcs belong to the same block.
\end{remark}
We have the following result:
\begin{lemma}
\label{peb}
Let $A$ be a circular matrix, $\Gamma$ be a circuit of $F(A)$ with winding number $p$ and $s$ essential bullets $\{b_j: j\in [s]\}$, with $1\leq b_1 < b_2<\ldots< b_s\leq n$. Then, $\Gamma$ has $s$ row arcs and each row arc in $\Gamma$ jumps over $p$ essential bullets, i.e., it has the form $(B^-_i,B^+_{i+p})$ for some $i\in [s]$. Moreover, $\gcd(s,p)=1$.
\end{lemma}
\begin{myproof}
Since there are $s$ blocks and each row arc of $\Gamma$ is the leaving arc of exactly one block, $\Gamma$ has $s$ row arcs.
Consider the row arc $(B^-_i,B^+_{i+t})$ of $\Gamma$. Clearly, it jumps over the $t$ essential bullets $b_{i+\ell}$ with $1\leq \ell \leq t$.
Moreover, since $A$ has no dominating rows,
then $(B^-_{i+1},B^+_{i+1+t})$ is also a row arc of $\Gamma$. Iterating this argument, one can verify that the row arcs of $\Gamma$ jumping over $b_{i+t}$ are exactly $(B^-_{i+\ell},B^+_{i+\ell+t})$ with $0\leq \ell \leq t-1$. From Lemma \ref{th:jumping-number-nodes}, it follows that $t=p$.
Thus, $(B^-_i,B^+_{i+t})$ jumps over $p$ essential bullets.
Let $\tilde{D}(A, \Gamma)=(\tilde{V}, \tilde{E})$ be the directed graph where $\tilde{V}=[s]$ and $(i,j)\in \tilde{E}$ if $(B^-_i, B^+_j)$ is a row arc of $\Gamma$. Hence, $j=i+p$ and $\tilde{D}(A, \Gamma)$ is a circuit
with $s$ arcs of length $p$.
But then,
$\gcd(s,p)=1$ must holds.
\end{myproof}
Lemma \ref{peb} also establishes a relationship between circuits in $F(A)$ and some circulant submatrices of $A$.
\begin{corollary}
\label{circulantsubmatrix}
Let $A$ be a circular matrix and $\Gamma$ be a circuit in $F(A)$ with winding number $p$ and $s$ row arcs. Let $L\subset [n]$ be the set of essential bullets of $\Gamma$ and
$F\subset [m]$ be the set of rows of $A$ corresponding to the row arcs of $\Gamma$.
Then, the submatrix of $A$ induced by rows in $F$ and columns in $L$ is isomorphic to the circulant matrix $\C{s}{p}$ with $\gcd(s,p)=1$.
\end{corollary}
\begin{myproof}
Let $A'$ be the submatrix of $A$ induced by the rows in $F$ and the columns in $L$.
From Lemma \ref{peb}, all the row arcs in $\Gamma$ jump over $p$ essential bullets. Then, each row of $A'$ has exactly $p$ entries equal to one and there is no pair of equal rows in $A'$. Hence, $A'$ is isomorphic to $\C{s}{p}$.
\end{myproof}
Observe that the submatrix mentioned in the corollary above is not necessarily a circulant (contraction) minor of
$A$. Indeed, after deleting the columns from $N=[n]\setminus L$, there might be rows in $[m]\setminus F$ that
are dominated by rows in $F$. This only happens when there is a row arc in $F(A)$ that jumps over less than $p$ essential bullets of $\Gamma$. Inspired by the results in \cite{Stauffer05}, we say that a row arc in $F(A)$ is a \emph{bad arc (with respect to $\Gamma$)} if it jumps over less than $p$ essential bullets of $\Gamma$.
Then, we have:
\begin{corollary}
\label{th:circulant-minor}
Let $A$ be a circular matrix and $\Gamma$ be a circuit in $F(A)$ with winding number $p$ and $s$ row arcs. Let $L\subset [n]$ be the set of essential bullets of $\Gamma$ and $F\subset [m]$ be the set of rows of $A$ corresponding to the row arcs of $\Gamma$.
If $\Gamma$ has no bad arcs, then the minor $A/N$ of $A$ is isomorphic to the circulant matrix $\C{s}{p}$. Moreover, the $\Gamma$-inequality for $\Qi{A}$ is a minor related row family inequality.
\end{corollary}
The following result gives a characterization of bad arcs in terms of their endpoints:
\begin{theorem}
\label{th:arcs-vs-ess-bullets}
Let $A$ be a circular matrix and $\Gamma$ be a circuit of $F(A)$ with $s$ essential bullets $1\leq b_1<\ldots<b_s\leq n$, and winding number $p$. Let $(u,v)$ be a row arc in $F(A)$ that jumps over $k$ essential bullets of $\Gamma$. Then, $k \in \{p-1, p, p+1\}$. Moreover, $(u,v)$ jumps over $p-1$ essential bullets of $\Gamma$ if and only if the following two conditions hold:
\begin{itemize}
\item[(i)] $u$ belongs to a circle block of $\Gamma$
\item[(ii)] $v\in \circ(\Gamma)$ or $v$ is not a node of $\Gamma$.
\end{itemize}
In addition, if $u\in B_i$ then $u\neq v_i$. In this case, if $v\in \circ(\Gamma)$, $B_{i+p-1}$ is a circle block and $v\in B_{i+p-1}\setminus \{b_{i+p-1}\}$. If $v$ is not a node of $\Gamma$, $v\in (v_{i+p-1},b_{i+p})_n$.
\end{theorem}
\begin{myproof}
Due to Lemma \ref{peb} if $(u,v)$ is a row arc of $\Gamma$, $k=p$. Now consider a row arc $(u,v)$ not in $\Gamma$.
If $u$ is not a node of $\Gamma$, then it is between two consecutive blocks in $\Gamma$, i.e., there exists $i\in [s]$ such that $u\in (v_{i-1}, b_i)_n$. Since there are no dominating rows in $A$ and $(B^-_{i-1}, B^+_{i+p-1})$, $(B^-_{i}, B^+_{i+p})$ are row arcs of $\Gamma$ then $v\in (b_{i+p-1},v_{i+p})_n$. But then, $(u,v)$ jumps either over $p$ essential bullets when $v\in (b_{i+p-1},b_{i+p})_n$, or over $p+1$ essential bullets when $v\in [b_{i+p},v_{i+p})_n$.
Hence, if $(u,v)$ is a bad arc, $u$ has to be a node of $\Gamma$.
Now assume $u$ is a node of $\Gamma$ and $u\in B_i$, for some $i\in [s]$. Again, since there is no dominating rows in $A$, and $(B^-_{i-1}, B^+_{i+p-1})$, $(B^-_{i+1}, B^+_{i+p+1})$ are row arcs of $\Gamma$ then $v\in (b_{i+p-1},v_{i+p+1})_n$. Therefore, $(u,v)$ jumps either over $p-1$, $p$ or $p+1$ essential bullets, depending on whether $v\in (b_{i+p-1},b_{i+p})_n$, $v\in [b_{i+p},b_{i+p+1})_n$ or $v\in [b_{i+p+1},v_{i+p+1})_n$, respectively.
Thus, any row arc in $F(A)$ jumps over $k$ essential bullets of $\Gamma$ with $k\in \{p-1,p,p+1\}$. Moreover, $(u,v)$ is a bad arc if it jumps over exactly $p-1$ essential bullets. In this case, $u$ is a node of $\Gamma$ and if $u\in B_i$, $v\in (b_{i+p-1},b_{i+p})_n$. Let us analyze this last case.
Since $v\in (b_{i+p-1},b_{i+p})_n$ then $B^+_i=v_i$, as otherwise the row of $A$ corresponding to the arc $(B^-_i,B^+_{i+p})$ is dominated by the row corresponding to $(u,v)$. Therefore, $B_i$ is a circle block and $u\neq v_i$. Moreover, if $v$ is a node of $\Gamma$ then $B^+_{i+p-1}=b_{i+p-1}$, as otherwise the row corresponding to the arc $(B^-_{i-1},B^+_{i+p-1})$ is dominated by the row corresponding to $(u,v)$. As a consequence, $B_{i+p-1}$ is a circle block and $v\in \circ(\Gamma)$.
\end{myproof}
Figure~\ref{fig:badarc} depicts the two possible situations for a row arc that jumps over one essential bullet in a circuit with winding number two.
\begin{figure}
\caption{(a) Case $v$ is not a node of $\Gamma$. (b) Case $v$ is a circle of $\Gamma$.}
\label{fig:badarc}
\end{figure}
Clearly, if $\circ(\Gamma)=\emptyset$, there is no bad arc with respect to $\Gamma$. Then, as a consequence of Theorem \ref{circulantes} and Corollary \ref{th:circulant-minor} we obtain the following result that was conjectured in \cite{Torres15}:
\begin{corollary}
For any positive numbers $n$ and $k$, with $2\leq k\leq n-2$ a complete linear description for the set covering polyhedron $\Qi{C^k_n}$ is given by boolean inequalities, the rank constraint, and minor related row family inequalities induced by circulant minors $\C{s}{p}$ of $\C{n}{k}$, with $\gcd(s,p)=1$.
\end{corollary}
In addition, recall that any circulant matrix $C_n^k$ with $k$ odd corresponds to the closed neighborhood matrix of a web graph and conversely. Then, the last result yields a complete description of the dominating set polyhedron of web graphs by boolean inequalities and row family inequalities induced by circulant minors $C_s^p$ with $\gcd(s,p)=1$.
Moreover, the corollary above can be seen as the counterpart of the complete description of the stable set polytope of web graphs given in \cite{Stauffer05}, proving a previous conjecture stated in \cite{PecherWagler06}.
Therein, the polytope is obtained by clique inequalities and clique family inequalities associated with subwebs $W_s^{p-1}$ with $\gcd(s,p)=1$.
In the remaining of this section, we will see that minor related row family inequalities are sufficient for describing the set covering polyhedron of any circular matrix. In order to do so, we prove that for every circular matrix $A$, the inequalities induced by circuits without bad arcs are sufficient for describing the set covering polyhedron $\Qi{A}$.
Let $x$ and $y$ be two nodes of $\Gamma$ that belong to a same block $B_i$, for some $i\in [s]$. Denote by $\Pi (x,y)$ the path of short arcs in $\Gamma$ that goes from node $x$ to node $y$. If $x=y$ then $\Pi (x,y)$ is the emptyset. Observe that $\Pi(x,y)$ is contained in $\Pi(B_i^{+},B_i^{-})$.
In addition, if $x$ and $y$ are two distinct nodes in $[n]$, let $\pi (x,y)$ be the path of short forward arcs in $F(A)$ that goes from node $x$ to node $y$. It is clear $\pi (x,y)$ is nonempty and simple.
Let $(u,v)$ be a bad arc with respect to $\Gamma$. From Theorem \ref{th:arcs-vs-ess-bullets} it holds that $u$ belongs to a circle block and $v$ is either a node in another circle block or it is outside $\Gamma$.
We will first see that if $v$ is a node of $\Gamma$, the $\Gamma$-inequality is not a facet of $\Qi{A}$ or it coincides with a $\Gamma'$-inequality where $\Gamma'$ is a circuit in $F(A)$ having less bad arcs than $\Gamma$.
For this purpose, we need some technical previous results.
Assume w.l.o.g. that $u\in B_1$ and consequently $B_1$ is a circle block. Since $v$ is a node of $\Gamma$, from Theorem \ref{th:arcs-vs-ess-bullets}, $v \in B_p$ and $B_p$ is a circle block. Moreover, $u \in[b_1,v_1)_n$ and $v\in(b_p,v_p]_n$.
Let $P_1$ be the path in $\Gamma$ that goes from $B_{p+1}^{-}$ to $B_s^{+}$ and $P_2$ be the path in $\Gamma$ that goes from $v$ to $u$. An example is illustrated in Figure \ref{fig:caminosbadarcb}.
\begin{figure}
\caption{Paths $P_1$ (left figure, black lines), $P_2$ (rigth figure, black lines) in a circuit $\Gamma$ (continuous lines) with a bad arc $(u,v)$ (dashed line).}
\label{fig:caminosbadarcb}
\end{figure}
Clearly, $\Gamma$ can be seen as the concatenation of
$P_2$,
$\Pi(u, v_1)$, the row arc $(v_1, B^{+}_{p+1})$,
$\Pi(B_{p+1}^{+},B_{p+1}^{-})$,
$P_1$, $\Pi (B^{+}_{s}, B^{-}_{s})$,
the row arc $(B_s^-,b_p)$ and
$\Pi (b_p,v)$. It follows that $P_1$ and $P_2$ are node disjoint paths.
Define $\Gamma_1$ as the circuit in $F(A)$ obtained by joining $P_1$ with
$\pi(B_{s}^{+},u)$, the row arc $(u, v)$, and
$\pi(v,B_{p+1}^{-})$. Similarly, we define $\Gamma_2$ as the circuit obtained by joining $P_2$ together with the row arc $(u, v)$ (see Figure \ref{fig:circuitosbadarcb}).
\begin{figure}
\caption{Circuits $\Gamma_1$ (left) and $\Gamma_2$ (right).}
\label{fig:circuitosbadarcb}
\end{figure}
Observe that, in $\Gamma_1$, nodes $v$ and $B_{s}^{+}$ are essential bullets but $u$ and $B_{p+1}^{-}$ are circles. Meanwhile, the internal nodes of path $P_1$ belong to the same class with respect to $\Gamma_1$ and to $\Gamma$.
Concerning $\Gamma_2$, $v$ is an essential bullet and $u$ together with the internal nodes of $P_2$ belong to the same class with respect to $\Gamma_2$ and to $\Gamma$. Hence, every node in the set $\tabulatedset{b_2, \ldots, b_{p-1}}$ is an essential bullet for either $\Gamma_1$ or $\Gamma_2$.
In the following, it is convenient to make the next assumptions:
\begin{assumption}\label{ass}
$A$ is a circular matrix, $\Gamma$ is a circuit in $F(A)$ with $s$ row arcs and winding number $p\geq 2$, $\beta=\left\lfloor \frac{s}{p}\right\rfloor$ and $r=s-p\beta\geq 1$. Moreover, $(u,v)$ is a bad arc with respect to $\Gamma$ such that $u$ and $v$ belong to the circle blocks $B_1$ and $B_p$ of $\Gamma$, respectively. For $i=1,2$, $P_i$ and $\Gamma_i$ are the path and the circuit, respectively, defined above. Finally, $s_i$ and $p_i$ denote, respectively, the number of row arcs and the winding number of $\Gamma_i$, $\beta_i=\floor{\frac{s_i}{p_i}}$, and $r_i=s_i-\beta_i p_i$.
\end{assumption}
Circuits $\Gamma_1$ and $\Gamma_2$ satisfy the following properties:
\begin{lemma}
Under Assumption \ref{ass}, it holds:
\label{rem:crosses}
\begin{itemize}
\item[(i)] $\otimes(\Gamma)u \cap \otimes(\Gamma)d = \emptyset$ and $\otimes(\Gamma)u \cup \otimes(\Gamma)d \subseteq \otimes(\Gamma)$.
\item[(ii)] $s= s_1 + s_2$.
\item[(iii)] $p=p_1+p_2$.
\end{itemize}
\end{lemma}
\begin{myproof}
Let $i=1,2$. Then:
\begin{itemize}
\item[(i)] Since $P_1$ and $P_2$ are disjoint, $\otimes(\Gamma)u \cap \otimes(\Gamma)d = \emptyset$. Moreover, every cross node of $\Gamma_i$ is an internal node of $P_i$ and then it is a cross node of $\Gamma$.
\item[(ii)] Clearly every row arc of $\Gamma_i$ different from $(u, v)$ is a row arc of $\Gamma$ contained in the path $P_i$. Besides, $\Gamma$ contains the two row arcs $(B_s^{-},b_p)$ and $(v_1,B_{p+1}^{+})$, which are neither in $\Gamma_1$ nor in $\Gamma_2$.
\item[(iii)] From construction, $(u,v)$ jumps over each node in the set $\tabulatedset{b_2, \ldots, b_{p-1}}\cup \{v\}$. Let $R_i= \bullet(\Gamma)i \cap \tabulatedset{b_2, \ldots, b_{p-1}}$. Since $(u, v)$ belongs to $\Gamma_i$, by Lemma~\ref{peb} $(u, v)$ jumps over $p_i$ bullets of $\Gamma_i$: one of them is $v$ and the other $p_i-1$ bullets belong to $\tabulatedset{b_2, \ldots, b_{p-1}}$. Thus, $p_i= \card{R_i} + 1$.
It is clear that $R_1 \cap R_2 = \emptyset$ and $R_1 \cup R_2 =\tabulatedset{b_2, \ldots, b_{p-1}}$.
Then, $p_1+p_2=\card{R_1}+\card{R_2}+ 2= |\tabulatedset{b_2, \ldots, b_{p-1}}|+2=p$.
\end{itemize}
\end{myproof}
\begin{lemma}
\label{th:gamma-impliedby-gam1-gam2}
Under Assumption \ref{ass}, it holds that:
\begin{itemize}
\item[(i)] if $r=1$ then
the $\Gamma$-inequality is not facet defining for $\Qi{A}$, or it coincides with the $\Gamma_2$-inequality,
\item[(ii)] if $r=p-1$ then the $\Gamma$-inequality is not facet defining for $\Qi{A}$.
\end{itemize}
\end{lemma}
\begin{myproof}
\begin{itemize}
\item [(i)] Since $r=1$, the path $P_1$ connecting $B_{p+1}^{-}$ with $B_s^{+}$ in $\Gamma$ contains exactly $\beta -1$ row arcs. Thus, from construction, $s_1=\beta$ and $p_1=1$.
Therefore, by Lemma \ref{rem:crosses}, the circuit $\Gamma_2$ contains exactly $s_2=s - s_1 =s-\beta=\beta(p-1) + 1$ row arcs and $p_2= p-1$.
Finally, we have $\ceil{\frac{s_2}{p_2}}= \beta + 1 = \ceil{\frac{s}{p}}$, $r_2= s_2 - p_2 \floor{\frac{s_2}{p_2}} = 1 = r$, and $\otimes(\Gamma)d \subseteq \otimes(\Gamma)$. As a consequence, if $\otimes(\Gamma)d \subset \otimes(\Gamma)$ then the $\Gamma$-inequality cannot be facet defining, as it is implied by the stronger $\Gamma_2$-inequality. Otherwise, both inequalities coincide.
\item [(ii)]
Since $r=p-1$, the path $P_2$ connecting $v$ with $u$ in $\Gamma$
contains exactly $\beta$ row arcs and hence $s_2=\beta+1$ and $p_2=1$.
Then, by Lemma \ref{rem:crosses}, $p_1= p-1$ and $s_1=(p-1)(\beta+1)-1$, implying $\ceil{\frac{s_1}{p_1}}= \beta + 1 = \ceil{\frac{s}{p}}$ and $r_1= s_1 - p_1 \floor{\frac{s_1}{p_1}} = p-2 = r-1$.
The $\Gamma_1$-inequality has the form:
\begin{equation}
\label{eq:gamma1-ineq}
(r-1) \!\! \sum_{j \not\in \otimes(\Gamma)u} x_j + r \!\! \sum_{j \in \otimes(\Gamma)u} x_j \geq (r-1)\ceil{\frac{s}{p}}.
\end{equation}
On the other hand, if we add the $s_2$ inequalities from $\Q{A}$ corresponding to the row arcs of $\Gamma_2$,
by Lemma~\ref{th:jumping-number-nodes}, we obtain the following valid inequality for $\Qi{A}$:
\begin{align*}
(p_2-1) \!\! \sum_{j \in \circ(\Gamma)d} x_j + p_2 \!\! \sum_{j \in \bullet(\Gamma)d} x_j + (p_2+1) \!\! \sum_{j \in \otimes(\Gamma)d} x_j &\geq s_2 = \beta + 1 = \ceil{\frac{s}{p}}
\end{align*}
which implies the valid inequality:
\begin{align*}
p_2 \!\! \sum_{j \not\in \otimes(\Gamma)d} x_j + (p_2 + 1)\!\! \sum_{j \in \otimes(\Gamma)d} x_j &\geq \ceil{\frac{s}{p}}.
\end{align*}
Since $p_2=1$ and, from Lemma \ref{rem:crosses} (i), we have $\otimes(\Gamma)u \cap \otimes(\Gamma)d = \emptyset$ and $\otimes(\Gamma)u \cup \otimes(\Gamma)d \subseteq \otimes(\Gamma)$, it follows that the $\Gamma$-inequality is implied by the sum of \eqref{eq:gamma1-ineq} and the last inequality. Hence, the $\Gamma$-inequality is not facet defining.
\end{itemize}
\end{myproof}
It remains to consider the case $r\in \tabulatedset{2, \ldots, p-2}$.
\begin{lemma}
\label{th:gamma-sum-gam1-gam2}
If Assumption \ref{ass} and $2\leq r\leq p-2$ hold then
$\beta_1=\beta_2=\beta$ and
$r_1 + r_2 = r$.
\end{lemma}
\begin{myproof}
As in the proof of Lemma \ref{rem:crosses}(iii), for $i=1,2$, let $R_i= \bullet(\Gamma)i \cap \tabulatedset{b_2, \ldots, b_{p-1}}$. Recall that $p_i= \card{R_i} + 1$. Since $2\leq r \leq p-2$ then $b_r, b_{r+1} \in R_1\cup R_2$. Define $R_i^{-}:= \setof{b_\ell \in R_i}{\ell < r}$ and $R_i^{+}:= \setof{b_\ell \in R_i}{\ell > r+1}$.
Observe that, from Lemma \ref{peb}, for every $\ell \in [s]$ and every $\alpha \in {\mathbb Z}_+$, the path in $\Gamma$ starting at $B_{\ell}^{-}$ that uses $\alpha$ row arcs, arrives at the block $B_{\ell+\alpha p}$.
It follows that the path in $\Gamma$ starting at node $B_\ell^{-}$ for some $\ell \in R_i^{-}$ reaches the node $B_{\ell+p-r}^{+}$ after $\beta + 1$ row arcs. It is straightforward to verify that $\ell+p-r \in \tabulatedset{2, \ldots, p-1}$. In addition, a path in $\Gamma$ starting at node $B_\ell^{-}$ for some $\ell \in R_i^{+}$ reaches the node $B_{\ell-r}^{+}$ after $\beta$ row arcs. Since $r+2 \leq \ell \leq p-1$ and $r \geq 2$ it follows that $2 \leq \ell - r \leq p-3$. In both cases, such paths may belong to either $\Gamma_1$ or $\Gamma_2$.
Now, the path in $\Gamma$ starting at the node $B_r^{-}$ reaches the node $B_s^{+}$ after $\beta$ row arcs, since $\beta p+r =s$. Hence, this path belongs to $\Gamma_1$.
It follows that $b_{r}\in R_1$, $\card{R_1} \geq 1$, and $\card{R_1^{-}}< \card{R_1}$.
Moreover, from construction, $\Gamma_1$ continues with the arc $(u,v)$ and then, by following a path of short arcs, it reaches $B_{p+1}^{-}$. From this last node and after $\beta$ row arcs it reaches $B_{p+1-r}^+$. It is clear that $3\leq p+1-r\leq p-1$. Hence, the path in $\Gamma_1$ that connects $B^-_r$ with $B_{p+1-r}^+$ contains $2\beta +1$ row arcs.
Finally, the path in $\Gamma$ that starts at the node $B_{r+1}^{-}$ reaches the node $B_1^{+}$ after $\beta$ row arcs. Since $B_1$ is a circle block, $B_1^{+}=b_1$ and thus the path belongs to $\Gamma_2$. Thus, $b_{r+1}\in R_2$, $\card{R_2} \geq 1$, and $\card{R_2^{-}}< \card{R_2}$. Moreover, the circuit $\Gamma_2$ continues with $(u,v)$ and another path of short arcs until it reaches $B_p^{-}=v_p$. From this node and after $\beta$ row arcs, $\Gamma_2$ reaches $B_{p-r}^+$. Hence, the path that connects $B_{r+1}^{-}$ with $B_{p-r}^+$ in $\Gamma_2$ contains $2\beta +1$ row arcs.
For $i=1,2$, let $\mathcal{P}_i$ be the set of simple directed paths obtained by splitting $\Gamma_i$ at the nodes
in $R_i$, i.e., the end nodes of each path in $\mathcal{P}_i$ belong to $R_i$ and no node of $R_i$ is an internal node of the path. As we have just observed, $\card{R_i} \geq 1$ holds.
If $\card{R_i} =1$, $\mathcal{P}_i$ contains one closed path, which coincides with $\Gamma_i$.
Hence, the number of row arcs of $\Gamma_i$ can be computed by adding up the number of row arcs in each path in $\mathcal P_i$:
\begin{align*}
s_i &= \card{R_i^{-}} (\beta + 1) + \card{R_i^{+}} \beta + 2\beta + 1 \\
&= (\card{R_i^{-}} + \card{R_i^{+}} + 2) \beta + \card{R_i^{-}} + 1 \\
&= p_i \beta + \card{R_i^{-}} + 1.
\end{align*}
Observe that $1 \leq \card{R_i^{-}} + 1 \leq p_i - 1$. Hence, $\floor{\frac{s_i}{p_i}}= \beta$ and $r_i = s_i - p_i \beta= \card{R_i^{-}} + 1$. But then, $r_1 + r_2 = \card{R_1^{-}} + \card{R_2^{-}} + 2 = \card{\tabulatedset{b_2, \ldots, b_{r-1}}} + 2 = r$. Similarly, $\beta_i= \beta = \floor{\frac{s}{p}}$.
\end{myproof}
As a consequence of the previous lemma we have:
\begin{corollary}
\label{cor:gamma-sum-gam1-gam2}
Under Assumption \ref{ass} and $2\leq r\leq p-2$ hold, the $\Gamma$-inequality is not facet defining for $\Qi{A}$.
\end{corollary}
\begin{myproof}
For $i \in \tabulatedset{1, 2}$, the $\Gamma_i$-inequality has the form
$$r_i \sum_{j \not\in \otimes(\Gamma)i} x_j + (r_i +1) \sum_{j \in \otimes(\Gamma)i} x_j \geq r_i \ceil{\frac{s_i}{p_i}}.$$
Adding the inequalities corresponding to $\Gamma_1$ and $\Gamma_2$, from Lemma \ref{th:gamma-sum-gam1-gam2}
together with Lemma~\ref{rem:crosses}(i), we have:
\begin{align*}
r \sum_{j \not\in \otimes(\Gamma)} x_j + (r +1) \sum_{j \in \otimes(\Gamma)} x_j
&\geq
(r_1 + r_2) \!\!\!\!\!\!\!\! \sum_{j \not\in \otimes(\Gamma)u \cup \otimes(\Gamma)d} \!\!\!\!\!\!\!\! x_j + (r_1 + r_2 +1) \!\!\!\!\!\!\!\! \sum_{j \in \otimes(\Gamma)u \cup \otimes(\Gamma)d} \!\!\!\!\!\!\!\! x_j \\
&=
\sum_{i=1}^2 \left( r_i \sum_{j \not\in \otimes(\Gamma)i} x_j + (r_i +1) \sum_{j \in \otimes(\Gamma)i} x_j \right) \\
&\geq
\sum_{i=1}^2 r_i \ceil{\frac{s_i}{p_i}} = (r_1 + r_2) \ceil{\frac{s}{p}} = r \ceil{\frac{s}{p}}.
\end{align*}
\end{myproof}
Finally we obtain the following result:
\begin{theorem}
\label{th:v-in-V-redundant}
Under Assumption \ref{ass}, if
the $\Gamma$-inequality is
facet defining for $\Qi{A}$, then there exists a circuit
in $F(A)$ defining the same circuit inequality and having less bad arcs than $\Gamma$.
\end{theorem}
\begin{myproof}
Assume that the $\Gamma$-inequality is facet defining for $\Qi{A}$.
By Lemma \ref{th:gamma-impliedby-gam1-gam2}(ii) and Corollary~\ref{cor:gamma-sum-gam1-gam2},
$r=1$.
By Lemma \ref{th:gamma-impliedby-gam1-gam2}(i), the $\Gamma$-inequality
coincides with the $\Gamma_2$-inequality. We will prove that $\Gamma_2$ has less bad arcs than $\Gamma$. Clearly, $(u,v)$ is a bad arc for $\Gamma$ which is not a bad arc for $\Gamma_2$.
Thus, it suffices
to prove that every bad arc for $\Gamma_2$ is a bad arc for $\Gamma$.
Assume there is a bad arc $(u',v')$ for $\Gamma_2$ which is not a bad arc for $\Gamma$. Since $\Gamma_2$ has winding number $p-1$, $(u',v')$ jumps over $p-2$ essential bullets of $\Gamma_2$ and at least $p$ essential bullets of $\Gamma$. Then, $(u',v')$ must jump over at least two essential bullets of $\Gamma$ that are not essential bullets of $\Gamma_2$. By construction of $\Gamma_2$, the only essential bullets of $\Gamma$ that are not essential bullets of $\Gamma_2$ are the nodes in the set $S=\{b_p\}\cup \{b_{1+tp}: t=1,\ldots, \beta-1\}$. The only pair in $S$ that can be jumped over by the same row arc is the pair $b_p, b_{p+1}$. But, if $(u',v')$ jumps over this pair of nodes, it must also jump over $v$, as $v\in (b_p,b_{p+1})_n$. Finally, since $v$ is an essential bullet of $\Gamma_2$, but not an essential bullet of $\Gamma$, $(u',v')$ must jump over a third essential bullet in $S$, which is not possible.
\end{myproof}
Now consider the case where $(u,v)$ is a bad arc with respect to $\Gamma$
and $v\notin V(\Gamma)$.
The following result holds.
\begin{lemma}
\label{th:v-not-in-VV}
Let $A$ be a circular matrix and $\Gamma$ be a circuit in $F(A)$ with winding number $p\geq 2$, $s$ row arcs and essential bullets
$1\leq b_1<\ldots \leq b_s\leq n$. Let $(u,v)$ be a bad arc with respect to $\Gamma$ with $u\in B_i=[b_i,v_i)_n$ and
$v\in (v_{i+p-1}, b_{i+p})_n$, for some $i\in [s]$. If the block $B_{i+p}$ is a cross block, the $\Gamma$-inequality is not a facet defining inequality for $\Qi{A}$.
\end{lemma}
\begin{myproof}
Assume w.l.o.g. that $i=1$ and $B_{p+1}$ is a cross block. Therefore, $B^{-}_{p+1}=b_{p+1}$.
Since $(u,v)$ is a bad arc, $B_{1}$ is a circle block. Consider the path $P_1$ in $\Gamma$ from node $b_{p+1}$ to node $b_1$.
Let $\Gamma^1$ be the circuit obtained
by joining the path $P_1$ with the path $\Pi(b_1,u)$ together with arc $(u,v)$, and $\Pi(v,b_{p+1})$. Note that $\Gamma$ and
$\Gamma^1$ have the same number of row arcs and the same winding number $p$.
Let us analyze the relationship between $\otimes(\Gamma^1)$ and $\otimes(\Gamma)$.
Observe that all nodes that are essential bullets of $\Gamma$, except for $b_{p+1}$, are essential bullets of $\Gamma^1$. Additionally, $v$ is an essential bullet of $\Gamma^1$ which is not an essential bullet of $\Gamma$.
If we call $B'_i$ with $i\in [s]$ the blocks of $\Gamma^1$, we have that $B_i=B'_i$ for $i\in [s]\setminus \{1,p+1\}$. Moreover, $B'_1\subset B_1$ and $B'_{p+1}$ is a circle block.
Then, $\otimes(\Gamma^1)$ is strictly contained in $\otimes(\Gamma)$
and the $\Gamma^1$-inequality is stronger than the $\Gamma$-inequality. Hence, the $\Gamma$-inequality does not define a facet of $\Qi{A}$.
\end{myproof}
Now, we can prove:
\begin{lemma}
\label{th:v-not-in-V}
Let $A$ be a circular matrix and $\Gamma$ be a circuit in $F(A)$ with $\otimes(\Gamma) \neq \emptyset$, such that the $\Gamma$-inequality is a facet defining inequality for $\Qi{A}$. Let $(u,v)$ be a bad arc with respect to $\Gamma$ with $v \not\in V(\Gamma)$. Then, there is a circuit
in $F(A)$ defining the same circuit inequality and having less bad arcs than $\Gamma$.
\end{lemma}
\begin{myproof}
We assume w.l.o.g. that $u$ belongs to the circle block $B_1$ and then $v\in (v_{p}, b_{p+1})_n$. From the previous lemma, we know that the block $B_{p+1}$ is not a cross block, as otherwise the $\Gamma$-inequality is not facet defining. Consider the circuit $\Gamma^1$ in $F(A)$ as defined in the previous lemma.
Since the $\Gamma$-inequality is facet defining, $\otimes(\Gamma^1)=\otimes(\Gamma)$ and the $\Gamma^1$-inequality coincides with the $\Gamma$-inequality. If $\Gamma^1$ has less bad arcs than $\Gamma$, the statement follows.
Otherwise, since $(u,v)$ is a bad arc with respect to $\Gamma$ but not with respect to $\Gamma_1$, there exists a bad arc $(u^1,v^1)$ with respect to $\Gamma^1$ which is not a bad arc with respect to $\Gamma$.
Since the sets of essential bullets from $\Gamma$ and $\Gamma^1$ differ only in the nodes $b_{p+1}$ and $v$, it follows that $(u^1,v^1)$ must jump over $b_{p+1}$ but not over $v$, i.e., we must have $u^1 \in [v,b_{p+1})_n$ and $v^1\in(b_{2p},b_{2p+1})_n$.
If $v^1\in V(\Gamma^1)$, by Theorem \ref{th:v-in-V-redundant}, there exists a circuit
in $F(A)$ with less bad arcs than $\Gamma^1$ defining the same circuit inequality and the statement follows.
Now, consider the case $v^1 \not\in V(\Gamma^1)$.
By the previous lemma, $B_{2p+1}$ is not a cross block. Applying iteratively the previous reasoning, either we find a circuit with less bad arcs than $\Gamma$ defining the same circuit inequality, or we obtain that none of the blocks induced by $\Gamma$ is a cross block, contradicting the hypothesis $\otimes(\Gamma) \neq \emptyset$.
\end{myproof}
\begin{corollary}
\label{th:v-not-in-V2}
Let $A$ be a circular matrix, $\Gamma$ be a circuit in $F(A)$ with $\otimes(\Gamma) \neq \emptyset$ such that the $\Gamma$-inequality is a facet defining inequality for $\Qi{A}$. Let $(u,v)$ be a bad arc with respect to $\Gamma$. Then, there is a circuit $\Gamma'$ without bad arcs such that the $\Gamma'$-inequality coincides with the $\Gamma$-inequality.
\end{corollary}
\begin{myproof}
Due to Theorem \ref{th:v-in-V-redundant} and Lemma~\ref{th:v-not-in-V} it follows that there is a circuit inducing the same inequality as $\Gamma$ and with a less number of bad arcs. Iterating this argument a finite number of times, we prove that there is a circuit $\Gamma'$ without bad arcs which induces the same circuit inequality as $\Gamma$.
\end{myproof}
Observe that if a circuit $\Gamma$ in $F(A)$ has no crosses then the $\Gamma$-inequality is implied by the rank constraint of $\Qi{A}$. Hence, as a consequence of the previous results, we obtain
for the set covering polyhedron of circular matrices a counterpart of the result obtained by Stauffer \cite{Stauffer05} for the stable set polytope of circular interval graphs.
\begin{theorem}
\label{th:complete-desc-minors}
Let $A$ be a circular matrix. A complete linear description for the set covering polyhedron $\Qi{A}$ is
given by boolean inequalities, the rank constraint, and $\Gamma$-inequalities with $\Gamma$ a circuit in $F(A)$ without bad arcs.
Moreover, the relevant inequalities for the set covering polyhedron $\Qi{A}$ are minor related row family inequalities induced by circulant minors $\C{s}{p}$ of $A$, with $\gcd(s,p)=1$.
\end{theorem}
\end{document} |
\betagin{document}
\title[A priori bounds and existence of solutions]{A priori bounds and existence of solutions
for some nonlocal elliptic problems}
\author[B. Barrios, L. Del Pezzo, J. Garc\'{\i}a-Meli\'{a}n and A. Quaas]
{B. Barrios, L. del Pezzo, J. Garc\'{\i}a-Meli\'{a}n\\ and A. Quaas}
\date{}
\address{B. Barrios
\break\indent
Department of Mathematics, University of Texas at Austin
\break\indent Mathematics Dept. RLM 8.100 2515 Speedway Stop C1200
\break\indent Austin, TX 78712-1202, USA.}
\varepsilonmail{{\tt bego.barrios@uam.es}}
\address{L. Del Pezzo
\break\indent
CONICET
\break\indent
Departamento de Matem\'{a}tica, FCEyN UBA
\break\indent Ciudad Universitaria, Pab I (1428)
\break\indent Buenos Aires,
ARGENTINA. }
\varepsilonmail{{\tt ldpezzo@dm.uba.ar}}
\address{J. Garc\'{\i}a-Meli\'{a}n
\break\indent
Departamento de An\'{a}lisis Matem\'{a}tico, Universidad de La Laguna
\break \indent C/. Astrof\'{\i}sico Francisco S\'{a}nchez s/n, 38271 -- La Laguna, SPAIN
\break\indent
{\rm and}
\break
\indent Instituto Universitario de Estudios Avanzados (IUdEA) en F\'{\i}sica
At\'omica,
\break\indent Molecular y Fot\'onica,
Universidad de La Laguna
\break\indent C/. Astrof\'{\i}sico Francisco
S\'{a}nchez s/n, 38203 -- La Laguna, SPAIN.}
\varepsilonmail{{\tt jjgarmel@ull.es}}
\address{A. Quaas
\break\indent
Departamento de Matem\'{a}tica, Universidad T\'ecnica Federico Santa Mar\'{\i}a
\break\indent Casilla V-110, Avda. Espa\~na, 1680 --
Valpara\'{\i}so, CHILE.}
\varepsilonmail{{\tt alexander.quaas@usm.cl}}
\betagin{abstract}
In this paper we show existence of solutions for some elliptic problems with nonlocal diffusion
by means of nonvariational tools. Our proof is based on the use of topological
degree, which requires a priori bounds for the solutions. We obtain the a priori
bounds by adapting the classical scaling method of Gidas and Spruck. We also
deal with problems involving gradient terms.
\varepsilonnd{abstract}
\maketitle
\section{Introduction}
\setcounter{section}{1}
\setcounter{equation}{0}
Nonlocal diffusion problems have received considerable attention during the last
years, mainly because their appearance when modelling different situations.
To name a few, let us mention anomalous diffusion and quasi-geostrophic flows, turbulence and
water waves, molecular dynamics and relativistic quantum mechanics of stars
(see \cite{BoG,CaV,Co,TZ} and references therein). They also appear in mathematical
finance (cf. \cite{A,Be,CoT}), elasticity problems \cite{signorini},
thin obstacle problem \cite{Caf79}, phase transition \cite{AB98, CSM05, SV08b}, crystal dislocation \cite{dfv, toland}
and stratified materials \cite{savin_vald}.
A particular class of nonlocal operators which have been widely analyzed is given, up to
a normalization constant, by
$$
(-\Delta)^s_K u(x) = \int_{\mathbb R^N} \frac{2u(x) -u(x+y)-u(x-y)}{|y|^{N+2s}} K(y) dy,
$$
where $s\in (0,1)$ and $K$ is a measurable function defined in $\mathbb R^N$ ($N\gammae 2$). A remarkable
example of such operators is obtained by setting $K=1$, when $(-\Delta)^s_K$ reduces to the
well-known fractional Laplacian (see \cite[Chapter 5]{Stein} or \cite{NPV, Landkof, S}
for further details). Of course, we will
require the operators $(-\Delta)^s_K$ to be elliptic, which in our context means that there exist positive
constants $\lambda\le \Lambdambda$ such that
\betagin{equation}{\mathcal B}l{elipticidad}
\lambda \le K(x) \le \Lambdambda \quad \hbox{in } \mathbb R^N
\varepsilonnd{equation}
(cf. \cite{CS}). While there is a large literature dealing with this class of operators,
very little is known about existence of solutions for nonlinear problems, except
for cases where variational methods can be employed (see for instance \cite{barrios2, barrios4, barrios3, SV2, servadeivaldinociBN, servadeivaldinociBNLOW}
and references therein).
But when the problem under consideration is not of variational type, for instance when
gradient terms are present, as far as we know, results about existence of solutions are very scarce in the
literature.
Thus our objective is to find a way to show existence of solutions for some problems
under this assumption. For this aim, we will resort to the use of the fruitful topological
methods, in particular Leray-Schauder degree.
It is well-known that the use of these methods requires the knowledge of the so-called
a priori bounds for all possible solutions. Therefore we will be
mainly concerned with the obtention of these a priori bounds for a particular class of
equations. A natural starting point for this program is to consider the problem:
\betagin{equation}{\mathcal B}l{problema}
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u = u^p + g(x,u) & \hbox{in }\Omega,\\[0.35pc]
\ \ u=0 & \hbox{in }\mathbb R^N \setminus \Omega,
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
where $\Omega \subset \mathbb R^N$ is a smooth bounded domain, $p>1$ and $g$ is a perturbation term which is
small in some sense. Under several expected restrictions on $g$ and $p$ we will show
that all positive solutions of this problem are a priori bounded. The most important
requirement is that $p$ is subcritical, that is
\betagin{equation}{\mathcal B}l{subcritico}
1<p< \frac{N+2s}{N-2s}
\varepsilonnd{equation}
and that the term $g(x,u)$ is a small perturbation of $u^p$ at infinity. By adapting the
classical scaling method of Gidas and Spruck (\cite{GS}) we can show that all positive solutions
of \varepsilonqref{problema} are a priori bounded.
An important additional assumption that we will be imposing on the kernel $K$ is that
\betagin{equation}{\mathcal B}l{continuidad}
\lim_{x\to 0} K(x)=1.
\varepsilonnd{equation}
It is important to clarify at this moment that we are always dealing with
viscosity solutions $u \in C(\mathbb R^N)$
in the sense of \cite{CS}, although in some cases the solutions will turn out to be more regular with the
help of the regularity theory developed in \cite{CS, CS2}.
With regard to problem \varepsilonqref{problema}, our main result is the following:
\betagin{teorema}{\mathcal B}l{th-1}
Assume $\Omega$ is a $C^2$ bounded domain of $\mathbb R^N$, $N\gammae 2$, $s\in (0,1)$ and $p$ verifies
\varepsilonqref{subcritico}. Let $K$ be a measurable kernel that satisfies \varepsilonqref{elipticidad} and \varepsilonqref{continuidad}.
If $g\in C(\Omegab \times \mathbb R)$ verifies
$$
|g(x,z)| \le C |z|^r \qquad x\in \Omegab, \ z\in \mathbb R,
$$
where $1<r<p$, then problem \varepsilonqref{problema} admits at least a positive
viscosity solution.
\varepsilonnd{teorema}
It is to be noted that the scaling method requires on one side of good estimates for solutions,
both interior and at the boundary, and on the other side of a Liouville theorem in $\mathbb R^N$.
In the present case interior estimates are well known (cf. \cite{CS}), but good local estimates
near the boundary do not seem to be available. We overcome this problem by constructing
suitable barriers which can be controlled when the scaled domains are moving. It is worthy of
mention at this point that the corresponding Liouville theorems are already available (cf.
\cite{ZCCY,CLO1,QX,FW}).
Let us also mention that we were not aware of any work dealing with the question of a priori
bounds for problem \varepsilonqref{problema}; however, when we were completing this manuscript, it has
just come to our attention the very recent preprint \cite{CLC}, where a priori bounds for
smooth solutions are obtained in problem \varepsilonqref{problema} with $K=1$ and $g=0$ (but no
existence is shown).
On the other hand, it is important to mention the papers \cite{BCPS,CT,CZ,C},
where a priori bounds and Liouville results have been obtained for related operators, like
the ``spectral" fractional laplacian. To see some diferences between this operator and $(-\Deltalta)^s$,
obtained by setting $K=1$ in the present work, see for instance \cite{SV}.
In all the previous works dealing with the spectral fractional Laplacian, the main tool is the well-known
Caffarelli-Silvestre extension obtained in \cite{CS3}. This tool is not available for us here,
hence we will treat the problem in a nonlocal way with a direct approach.
As we commented before, we will also be concerned with the adaptation of the previous result to some more general
equations. More precisely, we will study the perturbation of equation
\varepsilonqref{problema} with the introduction of gradient terms, that is,
\betagin{equation}{\mathcal B}l{problema-grad}
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u = u^p + h(x,u,\nablabla u) & \hbox{in }\Omega,\\[0.35pc]
\ \ u=0 & \hbox{in }\mathbb R^N \setminus \Omega.
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
For the type of nonlocal equations that we are analyzing, a natural restriction in order
that the gradient is meaningful is $s>\frac{1}{2}$. However, there seem to be few works
dealing with nonlocal equations with gradient terms (see for example
\cite{AI,BCI,BK2,CaV,CL,CV,GJL,S2,SVZ,W}).
It is to be noted that, at least in the case $K=1$, since solutions $u$
are expected to behave
like ${\rm dist}(x,\partial\Omega)^s$ near the boundary by Hopf's principle
(cf. \cite{ROS}), then the
gradient is expected to be singular near $\partial\Omega$. This implies that the standard scaling
method has to be modified to take care of this singularity. We achieve this by introducing
some suitable weighted norms which have been already used in the context of second order
elliptic equations (cf. \cite{GT}).
However, the introduction of this weighted norms presents some problems since the
scaling needed near the boundary is not the same one as in the interior. Therefore we
need to split our study into two parts: first, we obtain ``rough" universal bounds for all
solutions of \varepsilonqref{problema-grad}, by using the well-known doubling lemma in \cite{PQS}.
Since our problems are nonlocal in nature this forces us to strengthen the
subcriticality hypothesis \varepsilonqref{subcritico} and to require instead
\betagin{equation}{\mathcal B}l{subserrin}
1<p< \frac{N}{N-2s}
\varepsilonnd{equation}
(cf. Remarks \ref{comentario} (b) in Section 3).
After that, we reduce the obtention of the a priori bounds to an analysis near the boundary.
With a suitable scaling, the lack of a priori bounds leads to a problem in a half-space which
has no solutions according to the results in \cite{QX} or \cite{FW}.
It is worth stressing that the main results in this paper rely in the construction
of suitable barriers for equations with a singular right-hand side, which are well-behaved with respect to
suitable perturbations of the domain (cf. Section \ref{s2}).
Le us finally state our result for problem \varepsilonqref{problema-grad}. In this context, a solution of
\varepsilonqref{problema-grad} is a function $u\in C^1(\Omega)\cap C(\mathbb R^N)$ vanishing outside $\Omega$ and
verifying the equation in the viscosity sense.
\betagin{teorema}{\mathcal B}l{th-grad}
Assume $\Omega$ is a $C^2$ bounded domain of $\mathbb R^N$, $N\gammae 2$, $s \in (\frac{1}{2},1)$ and $p$ verifies
\varepsilonqref{subserrin}. Let $K$ be a measurable kernel that satisfies \varepsilonqref{elipticidad} and \varepsilonqref{continuidad}. If
$h\in C(\Omega \times \mathbb R\times \mathbb R^N)$ is nonnegative and verifies
$$
h(x,z,\xi) \le C (|z|^r + |\xi|^t), \quad x\in\Omega,\ z\in \mathbb R,\ \xi\in \mathbb R^N,
$$
where $1<r<p$ and $1<t<\frac{2sp}{p+2s-1}$, then problem \varepsilonqref{problema-grad} admits at
least a positive solution.
\varepsilonnd{teorema}
The rest of the paper is organized as follows: in Section 2 we recall some interior regularity
results needed for our arguments, and we solve some linear problems by constructing
suitable barriers. Section 3 is dedicated to the obtention of a priori bounds, while in Secion 4
we show the existence of solutions that is, we give the proofs of Theorems \ref{th-1} and
\ref{th-grad}.
\section{Interior regularity and some barriers}{\mathcal B}l{s2}
\setcounter{section}{2}
\setcounter{equation}{0}
The aim of this section is to collect several results regarding the construction of
suitable barriers and also some interior regularity for equations related to \varepsilonqref{problema}
and \varepsilonqref{problema-grad}. We will use throughout the standard convention that the letter $C$ denotes
a positive constant, probably different from line to line.
Consider $s\in (0,1)$, a measurable kernel $K$ verifying \varepsilonqref{elipticidad}
and \varepsilonqref{continuidad} and a $C^2$ bounded domain $\Omega$. We begin by analyzing the linear equation
\betagin{equation}{\mathcal B}l{eq-regularidad}
(-\Delta)^s_K u = f \quad \hbox{in } \Omega,
\varepsilonnd{equation}
where $f\in L^\infty_{\rm loc}(\Omega)$. As a consequence of Theorem 12.1 in
\cite{CS} we get that if $u \in C(\Omega)\cap L^\infty( \mathbb R^N)$ is a viscosity solution of \varepsilonqref{eq-regularidad} then
$u\in C^\alpha_{\rm loc}(\Omega)$ for some $\alpha \in (0,1)$. Moreover, for every ball $B_R\subset \subset \Omega$
there exists a positive constant $C=C(N,s,\lambda,\Lambda,R)$ such that:
\betagin{equation}{\mathcal B}l{est-ca}
\| u\|_{C^{\alpha}(\overline{B_{R/2}})} \le C\| f\| _{L^\infty(B_R)} + \| u\|_{L^\infty(\mathbb R^N)}.
\varepsilonnd{equation}
The precise dependence of the constant $C$ on $R$ can be determined by means of a simple scaling,
as in Lemma \ref{lema-regularidad} below; however, for interior estimates this will be of no
importance to us.
When $s>\frac{1}{2}$, the H\"older estimate for the solution can be improved to obtain an
estimate for the first derivatives. In fact, as a consequence of Theorem 1.2 in \cite{K}, we have that
$u\in C^{1,\beta}_{\rm loc}(\Omega)$, for some $\betata=\betata(N,s,\lambda,\Lambda) \in (0,1)$. Also, for every
ball $B_R\subset \subset \Omega$ there exists a positive constant $C=C(N,s,\lambda,\Lambda,R)$ such that:
\betagin{equation}{\mathcal B}l{est-c1a}
\| u\|_{C^{1,\betata}(\overline{B_{R/2}})} \le C
\left( \| f\| _{L^\infty(B_R)} + \| u\|_{L^\infty(\mathbb R^N)}\right).
\varepsilonnd{equation}
Both estimates will play a prominent role in our proof of a priori bounds for positive solutions of
\varepsilonqref{problema} and \varepsilonqref{problema-grad}.
Next we need to deal with problems with a right hand side which is possibly singular at
$\partial \Omega$. For this aim, it is convenient to introduce some norms which will help us to quantify
the singularity of both the right hand sides and the gradient of the solutions in case
$s>\frac{1}{2}$.
Let us denote, for $x\in \Omega$, $d(x)={\rm dist}(x,\partial \Omega)$. It is well known that $d$ is Lipschitz
continuous in $\Omega$ with Lipschitz constant 1 and it is a $C^2$ function in a neighborhood of $\partial\Omega$.
We modify it outside this neighborhood to make it a $C^2$ function (still with Lipschitz constant
1), and we extend it to be zero outside $\Omega$.
Now, for $\theta \in \mathbb R$ and $u \in C(\Omega)$, let us denote (cf. Chapter 6 in \cite{GT}):
$$
\| u\|_0^{(\theta)} =\sup_\Omega\; d(x)^{\theta} |u(x)|.
$$
When $u\in C^1(\Omega)$ we also set
\betagin{equation}{\mathcal B}l{norma-c1}
\| u\|_1^{(\theta)} = \sup_\Omega\; \left(d(x)^{\theta} |u(x)|+ d(x)^{\theta+1} |\nablabla u(x)|\right).
\varepsilonnd{equation}
Then we have the following existence result for the Dirichlet problem associated to \varepsilonqref{eq-regularidad}.
\betagin{lema}{\mathcal B}l{lema-existencia}
Assume $\Omega$ is a $C^2$ bounded domain, $0<s<1$ and $K$ is a measurable function
verifying \varepsilonqref{elipticidad} and \varepsilonqref{continuidad}. Let $f\in C(\Omega)$ be such that
$\| f \|_0^{(\theta)}<+\infty$ for some $\theta \in (s,2s)$. Then the problem
\betagin{equation}{\mathcal B}l{prob-lineal}
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u=f & \hbox{in } \Omega,\\[0.35pc]
\; \; u=0 & \hbox{in } \mathbb R^N\setminus \Omega,
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
admits a unique viscosity solution. Moreover, there exists a positive constant $C$
such that
\betagin{equation}{\mathcal B}l{est-1}
\| u \|_0^{(\theta-2s)} \le C \| f \| _0^{(\theta)}.
\varepsilonnd{equation}
Finally, if $f\gammae 0$ in $\Omega$ then $u\gammae 0$ in $\Omega$.
\varepsilonnd{lema}
The proof of this result relies in the construction of a suitable barrier in a neighborhood of
the boundary of $\Omega$ which we will undertake in the following lemma. This barrier
will also turn out to be important to obtain
bounds for the solutions when trying to apply the scaling method. It is worthy of mention
that for quite general operators, the lemma below can be obtained provided that
$\theta$ is taken close enough to $2s$ (cf. for instance Lemma 3.2 in \cite{FQ}). But
the precise assumptions we are imposing on $K$, especifically \varepsilonqref{continuidad},
allow us to construct the barrier in
the whole range $\theta \in (s,2s)$.
In what follows, we denote, for small positive $\delta$,
$$
\Omega_\delta=\{x\in \Omega: \hbox{dist}(x,\partial \Omega)<\delta\},
$$
and $K_\mu(x)= K(\mu x)$ for $\mu>0$.
\betagin{lema}{\mathcal B}l{lema-barrera-1}
Let $\Omega$ be a $C^2$ bounded domain of $\mathbb R^N$, $0<s<1$ and $K$ be measurable and
verify \varepsilonqref{elipticidad} and \varepsilonqref{continuidad}.
For every $\theta \in (s,2s)$ and $\mu_0>0$, there exist
$C_0,\delta>0$ such that
$$
(-\Delta)^s_{K_\mu} d^{2s-\theta} \gammae C_0 d^{-\theta} \quad \hbox{in } \Omega_\delta,
$$
if $0<\mu \le \mu_0$.
\varepsilonnd{lema}
\betagin{proof}
By contradiction, let us assume that the conclusion of the lemma is not true. Then there exist $\theta\in
(s,2s)$, $\mu_0>0$, sequences of points $x_n\in \Omega$ with
$d(x_n)\to 0$ and numbers
$\mu_n\in (0,\mu_0]$ such that
\betagin{equation}{\mathcal B}l{contradiction}
\lim_{n\to +\infty} d(x_n)^{\theta} (-\Delta)^s_{K_{\mu_n}} d^{2s-\theta} (x_n) \le 0.
\varepsilonnd{equation}
Denoting for simplicity $d_n:=d(x_n)$, and performing the change of variables $y= d_n z$
in the integral appearing in \varepsilonqref{contradiction} we obtain
\betagin{equation}{\mathcal B}l{contradiction-2}
\int_{\mathbb R^N} \frac{2 - \left(\frac{d(x_n+d_n z)}{d_n}\right)^{2s-\theta}-
\left(\frac{d(x_n-d_n z)}{d_n}\right)^{2s-\theta}}{|z|^{N+2s}} K(\mu_n d_n z) dz \le o(1).
\varepsilonnd{equation}
Before passing to the limit in this integral, let us estimate it from below. Observe that when
$x_n+d_n z\in \Omega$, we have by the Lipschitz property of $d$ that $d(x_n+d_n z) \le d_n (1+|z|)$.
Of course, the same is true when $x_n +d_n z \not\in \Omega$ and it similarly follows that $d(x_n-d_n z)
\le d_n (1+|z|)$. Thus, taking $L>0$ we obtain for large $n$
\betagin{equation}{\mathcal B}l{ineq1}
\betagin{array}{l}
\displaystyle \int_{|z| \gammae L}
\frac{2 - \left(\frac{d(x_n+d_n z)}{d_n}\right)^{2s-\theta}- \left(\frac{d(x_n-d_n z)}{d_n}\right)^{2s-\theta}}
{|z|^{N+2s}} K(\mu_n d_n z) dz\\[1.4pc]
\quad \gammae \displaystyle - 2 \Lambdambda \int_{ |z| \gammae L} \frac{(1+|z|)^{2s-\theta}}{|z|^{N+2s}} dz.
\varepsilonnd{array}
\varepsilonnd{equation}
On the other hand, since $d$ is smooth in a neighborhood of the boundary, when $|z|\le L$
and $x_n +d_n z\in \Omega$, we obtain by Taylor's theorem
\betagin{equation}{\mathcal B}l{taylor}
d(x_n + d_n z )= d_n + d_n \nablabla d(x_n) z + \Theta_n(d_n,z) d_n^2 |z|^2,
\varepsilonnd{equation}
where $\Theta_n$ is uniformly bounded. Hence
\betagin{equation}{\mathcal B}l{eq3}
d(x_n+d_n z ) \le d_n + d_n \nablabla d(x_n) z + C d_n^2 |z|^2.
\varepsilonnd{equation}
Now choose $\varepsilonta \in (0,1)$ small enough. Since $d(x_n)\to 0$ and $|\nablabla d|=1$ in a
neighborhood of the boundary, we can assume that
\betagin{equation}{\mathcal B}l{extra1}
\nablabla d(x_n)\to e \hbox{ as }n\to +\infty \hbox{ for some unit vector }e.
\varepsilonnd{equation}
Without loss of generality, we may take
$e=e_N$, the last vector of the canonical basis of $\mathbb R^N$. If we restrict $z$ further to satisfy $|z|\le \varepsilonta$,
we obtain $1+\nablabla d(x_n) z \sim 1 + z_N \gammae 1-\varepsilonta>0$ for large $n$, since $|z_N| \le |z|\le \varepsilonta$.
Therefore, the right-hand side in \varepsilonqref{eq3} is positive for large $n$ (depending only on
$\varepsilonta$), so that the inequality \varepsilonqref{eq3} is also true when
$x_n+d_n z\not\in \Omega$. Moreover, by using again Taylor's theorem
$$
(1+\nablabla d(x_n) z + C d_n |z|^2)^{2s-\theta} \le 1+ (2s-\theta) \nablabla d(x_n) z + C |z|^2,
$$
for large enough $n$. Thus from \varepsilonqref{eq3},
$$
\left(\frac{d(x_n+d_n z)}{d_n}\right)^{2s-\theta} \le 1+ (2s-\theta) \nablabla d(x_n) z + C |z|^2,
$$
for large enough $n$. A similar inequality is obtained for the term involving $d(x_n-d_n z)$. Therefore we deduce
that
\betagin{equation}{\mathcal B}l{ineq2}
\betagin{array}{l}
\displaystyle \int_{ |z| \le \varepsilonta }
\frac{2 - \left(\frac{d(x_n+d_n z)}{d_n}\right)^{2s-\theta}- \left(\frac{d(x_n-d_n z)}{d_n}\right)^{2s-\theta}}{|z|^{N+2s}}
K(\mu_n d_n z) dz\\[1.4pc]
\quad \gammae \displaystyle - 2 \Lambda C \int_{ |z| \le \varepsilonta} \frac{1}{|z|^{N-2(1-s)}} dz.
\varepsilonnd{array}
\varepsilonnd{equation}
We finally observe that it follows from the above discussion (more precisely from \varepsilonqref{taylor} and
\varepsilonqref{extra1} with $e=e_N$) that for $\varepsilonta \le |z| \le L$
\betagin{equation}{\mathcal B}l{extra2}
\frac{d(x_n \partialm d_n z)}{d_n} \to (1\partialm z_N)_+ \qquad \hbox{as } n \to +\infty.
\varepsilonnd{equation}
Therefore using \varepsilonqref{ineq1}, \varepsilonqref{ineq2} and \varepsilonqref{extra2}, and passing to the limit as
$n\to +\infty$ in \varepsilonqref{contradiction-2}, by dominated convergence we arrive at
$$
\betagin{array}{ll}
\displaystyle - 2 \Lambdambda \int_{ |z| \gammae L} \frac{(1+|z|)^{2s-\theta}}{|z|^{N+2s}} dz +
\int_{ \varepsilonta \le |z| \le L} \frac{2 - (1+z_N)_+^{2s-\theta}- (1-z_N)_+^{2s-\theta}}{|z|^{N+2s}} dz \\[1.4pc]
\displaystyle \qquad \qquad - 2 \Lambda C \int_{ |z| \le \varepsilonta} \frac{1}{|z|^{N-2(1-s)}} dz \le 0.
\varepsilonnd{array}
$$
We have also used that $\lim_{n\to +\infty} K(\mu_n d_n z)=1$ uniformly, by \varepsilonqref{continuidad}
and the boundedness of $\{\mu_n\}$. Letting now $\varepsilonta \to 0$ and then $L\to +\infty$, we have
$$
\int_{ \mathbb R^N} \frac{2 - (1+z_N)_+^{2s-\theta}- (1-z_N)_+^{2s-\theta}}{|z|^{N+2s}} dz \le 0.
$$
It is well-known, with the use of Fubini's theorem and a change of variables, that this
integral can be rewritten as a one-dimensional integral
\betagin{equation}{\mathcal B}l{contra-final}
\int_ \mathbb R \frac{2 - (1+t)_+^{2s-\theta}- (1-t)_+^{2s-\theta}}{|t|^{1+2s}} dt \le 0.
\varepsilonnd{equation}
We will see that this is impossible because of our assumption $\theta \in (s,2s)$. Indeed,
consider the function
$$
F(\tau) = \int_ \mathbb R \frac{ 2- (1+t)_+^\tau- (1-t)_+^\tau}{|t|^{1+2s}} dt, \quad \tau \in (0,2s),
$$
which is well-defined. We claim that $F \in C^\infty(0,2s)$ and it is strictly concave. In fact, observe that
for $k\in \mathbb N$, the candidate for the $k-$th derivative $F^{(k)}(\tau)$ is given by
$$
- \int_ \mathbb R \frac{(1+t)_+^\tau (\log(1+t))_+^k + (1-t)_+^\tau (\log(1-t))_+^k}{|t|^{1+2s}} dt.
$$
It is easily seen that this integral converges for every $k\gammae 1$, since by Taylor's expansion for $t\sim 0$ we
deduce $(1+t)^\tau (\log(1+t))^k + (1-t)^\tau (\log(1-t))^k =O( t^2)$. Therefore it follows that
$F$ is $C^\infty$ in $(s,2s)$. To see that $F$ is strictly concave, just notice that
$$
F_\varepsilon''(\tau)= - \int_\mathbb R \frac{ (1+t)_+^\tau (\log (1+t)_+ )^2+ (1-t)_+^\tau (\log (1-t)_+ )^2}{|t|^{1+2s}}
dt < 0.
$$
Finally, it is clear that $F(0)=0$. Moreover, since $v(x)=(x_+)^s$, $x\in \mathbb R$ verifies $(-\Delta)^s v=0$ in $\mathbb R_+$
(see for instance the introduction in \cite{CJS} or Proposition 3.1 in \cite{ROS}), we also deduce that $F(s)=0$.
By strict concavity we have $F(\tau)>0$ for $\tau\in (0,s)$, which clearly contradicts \varepsilonqref{contra-final} if
$\theta\in (s,2s)$. Therefore \varepsilonqref{contra-final} is not true and this concludes the proof of the lemma.
\varepsilonnd{proof}
\betagin{proof}[Proof of Lemma \ref{lema-existencia}]
By Lemma \ref{lema-barrera-1} with $\mu_0=1$, there exist $C_0>0$ and $\delta>0$
such that
\betagin{equation}{\mathcal B}l{extra3}
\mbox{$(-\Delta)^s_K d^{2s-\theta} \gammae C_0 d^{-\theta}$ in $\Omega_\delta$.}
\varepsilonnd{equation}
Let us show
that it is possible to construct a supersolution of the problem
\betagin{equation}{\mathcal B}l{supersolucion}
\left\{
\betagin{array}{ll}
(-\Delta)_K^s v=C_0 d^{-\theta} & \hbox{in } \Omega,\\[0.35pc]
\; \; v=0 & \hbox{in } \mathbb R^N\setminus \Omega,
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
vanishing outside $\Omega$.
First of all, by Theorem 3.1 in \cite{FQ}, there exists a nonnegative
function $w\in C(\mathbb R^N)$ such that $(-\Delta)_K^s w = 1$ in $\Omega$,
with $w=0$ in $\mathbb R^N \setminus \Omega$.
We claim that $v= d^{2s-\theta} + t w$ is a supersolution of \varepsilonqref{supersolucion}
if $t>0$ is large enough. For this aim, observe that $(-\Delta)^s_K d^{2s-\theta} \gammae -C$ in $\Omega \setminus
\Omega_\delta$, since $d$ is a $C^2$ function there. Therefore,
$$
\mbox{$(-\Delta)^s_K v\gammae t-C \gammae C_0 d^{-\theta}$
in $\Omega\setminus \Omega_\delta$}
$$
if $t$ is large enough. Since clearly $(-\Delta)^s_K v \gammae C_0 d^{-\theta}$
in $\Omega_\delta$ as well, we see that $v$ is a supersolution of \varepsilonqref{supersolucion}, which vanishes
outside $\Omega$.
Now choose a sequence of smooth functions $\{\partialsi_n\}$ verifying $0\le \partialsi_n\le 1$, $\partialsi_n=1$ in
$\Omega \setminus \Omega_{2/n}$ and $\partialsi_n=0$ in $\Omega_{1/n}$. Define $f_n= f\partialsi_n$, and consider
the problem
\betagin{equation}{\mathcal B}l{perturbado}
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u= f_n & \hbox{in } \Omega,\\[0.35pc]
\; \; u=0 & \hbox{in } \mathbb R^N\setminus \Omega.
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
Since $f_n \in C(\Omegab)$, we can use Theorem 3.1 in \cite{FQ} which gives a viscosity solution
$u_n\in C(\mathbb R^N)$ of \varepsilonqref{perturbado}.
On the other hand, $|f_n| \le |f| \le \| f \|_0^{(\theta)} d^{-\theta}$ in $\Omega$, so that the functions
$v_{\partialm} = \partialm C_0^{-1} \| f \|_0^{(\theta)} v$ are sub and supersolution of \varepsilonqref{perturbado}.
By comparison (cf. Theorem 5.2 in \cite{CS}), we obtain
$$
- C_0^{-1} \| f\| _0^{(\theta)} v \le u_n \le C_0^{-1} \| f\| _0^{(\theta)}v \qquad \hbox{in } \Omega.
$$
Now, this bound together with \varepsilonqref{est-ca}, Ascoli-Arzel\'a's theorem and a standard diagonal
argument allow us to obtain a subsequence, still denoted by $\{u_n\}$, and a function $u\in C(\Omega)$
such that $u_n\to u$ uniformly on compact sets of $\Omega$. In addition, $u$ verifies
\betagin{equation}{\mathcal B}l{eq-ult}
|u |\le C_0^{-1} \| f\| _0^{(\theta)}v \quad \hbox{in }\Omega.
\varepsilonnd{equation}
By Corollary 4.7 in \cite{CS}, we can pass to the limit in \varepsilonqref{perturbado}
to obtain that $u \in C(\mathbb{R}^{N})$ is a viscosity solution of \varepsilonqref{prob-lineal}.
Moreover inequality \varepsilonqref{eq-ult} implies that $|u |\le
C \| f\| _0^{(\theta)} d^{2s-\theta}$ in $\Omega\setminus\Omegaega_\delta$ for some $C>0$, so that,
by \varepsilonqref{prob-lineal}, \varepsilonqref{extra3} and the comparison principle, we obtain that
$$
|u| \le C \| f\|_0^{\theta} d^{2s-\theta} \quad \hbox{in }\Omega
$$
which shows \varepsilonqref{est-1}.
The uniqueness and the nonnegativity of $u$ when $f \gammae 0$ are a consequence
of the maximum principle (again Theorem 5.2 in \cite{CS}). This concludes the proof.
\varepsilonnd{proof}
Our next estimate concerns the gradient of the solutions of \varepsilonqref{prob-lineal} when
$s>\frac{1}{2}$. The proof is more or less standard starting from \varepsilonqref{est-c1a}
(cf. \cite{GT}) but we include it for completeness
\betagin{lema}{\mathcal B}l{lema-regularidad}
Assume $\Omega$ is a smooth bounded domain and $s>\frac{1}{2}$. There exists a constant $C_0$
which depends on $N,s, \lambda$ and $\Lambdambda$ but not on $\Omega$ such that, for every $\theta \in (s,2s)$
and $f\in C(\Omega)$ with $\| f \|_0^{(\theta)}<+\infty$ the unique solution
$u$ of \varepsilonqref{prob-lineal} verifies
\betagin{equation}{\mathcal B}l{est-adimensional}
\| \nablabla u \|_0^{(\theta - 2s +1)} \le C_0 ( \|f \|_0^{(\theta)} + \|u \|_{0}^{(\theta - 2s)}).
\varepsilonnd{equation}
\varepsilonnd{lema}
\betagin{proof}
By \varepsilonqref{est-c1a} with $R=1$ we know that if
$(-\Delta)^s_K u=f$ in $B_1$ then there exists a constant which depends on $N,s, \lambda$ and
$\Lambdambda$ such that $\| \nablabla u\|_{L^\infty (B_{1/2})} \le C ( \| f\| _{L^\infty(B_1)} + \| u\|_{L^\infty(\mathbb R^N)})$.
By a simple scaling, it can be seen that if $(-\Delta)^s_K u=f$ in $\Omega$ and $B_R\subset \subset \Omega$ then
$$
R \| \nablabla u\|_{L^\infty (B_{R/2})} \le C ( R^{2s} \| f\| _{L^\infty(B_R)} + \| u\|_{L^\infty(\mathbb R^N)}).
$$
Choose a point $x\in \Omega$. By applying the previous inequality in the ball $B=B_{d(x)/2}(x)$ and
multiplying by $d(x)^{\theta-2s}$ we arrive at
$$
d(x)^{\theta-2s+1} | \nablabla u(x) | \le C \left( d(x)^\theta \| f\| _{L^\infty(B)} +
d(x)^{\theta-2s} \| u\|_{L^\infty(\mathbb R^N)}\right).
$$
Finally, notice that $\frac{d(x)}{2} < d(y) <\frac{3d(x)}{2}$ for every $y\in B$, so that $d(x)^\theta |f(y)|
\le 2^\theta d(y)^\theta f(y) \le 2^{2s} \| f \|_0^{(\theta)}$, this implying $d(x)^\theta \| f \|_{L^\infty(B)}
\le 2^{2s} \| f \|_0^{(\theta)}$. A similar inequality can be achieved for the term involving $\| u\|_{L^\infty(\mathbb R^N)}$.
After taking supremum, \varepsilonqref{est-adimensional} is obtained.
\varepsilonnd{proof}
Our next lemma is intended to take care of the constant in \varepsilonqref{est-1} when
we consider problem \varepsilonqref{prob-lineal} in expanding domains, since in general it depends on
$\Omega$. This is the key for the
scaling method to work properly in our setting. For a $C^2$ bounded domain $\Omega$, we take
$\xi \in \partial\Omega$, $\mu>0$ and let
$$
\mbox{$\Omega^\mu:=\{y\in \mathbb R^N:\ \xi +\mu y\in \Omega\}$.}
$$
It is clear then that
$d_\mu(y):={\rm dist}(y,\partial \Omega^\mu)=\mu^{-1} d(\xi +\mu y)$.
Let us explicitly
remark that the constant in \varepsilonqref{est-1} for the solution of
\varepsilonqref{prob-lineal} posed in $\Omega^\mu$
will depend then on the domain $\Omega$, but not on the dilation parameter $\mu$, as we show next.
\betagin{lema}{\mathcal B}l{lema-barrera-2}
Assume $\Omega$ is a $C^2$ bounded domain, $0<s<1$ and $K$ is a measurable function verifying \varepsilonqref{elipticidad}
and \varepsilonqref{continuidad}. For every $\theta \in (s,2s)$ and $\mu_0>0$, there exist $C_0,\delta>0$ such that
$$
(-\Delta)^s_{K_\mu} d_\mu ^{2s-\theta} \gammae C_0 d_\mu^{-\theta} \quad \hbox{in } (\Omega^\mu)_\delta,
$$
if $0<\mu \le \mu_0$. Moreover, if $u$ verifies $(-\Delta)_{K_\mu}^s u \le C_1 d_\mu^{-\theta}$ in $\Omega^\mu$
for some $C_1>0$ with $u=0$ in $\mathbb R^N\setminus \Omega^\mu$, then
$$
u (x) \le C_2( C_1 +\|u \|_{L^\infty(\Omega^\mu)} )\; d_\mu ^{2s-\theta}
\quad \hbox{for } x \in (\Omega^\mu)_\delta.
$$
for some $C_2>0$ only depending on $s$, $\delta$, $\theta$ and $C_0$.
\varepsilonnd{lema}
\betagin{proof}
The first part of the proof is similar to that of Lemma \ref{lema-barrera-1} but taking a little more care
in the estimates. By contradiction let us assume that there exist sequences
$\xi_n\in \partial \Omega$, $\mu_n \in (0,\mu_0]$ and
$$
\mbox{$x_n \in \Omega^n:=\{y\in \mathbb R^N:\ \xi_n + \mu_n y\in \Omega\}$},
$$
such that $d_n(x_n)\to 0$ and
$$
d_n(x_n)^{\theta} (-\Delta)^s_{K_{\mu_n}} d_n^{2s-\theta} (x_n) \le o(1).
$$
Here we have denoted
$$
\mbox{$d_n(y):={\rm dist}(y,\partial \Omega^n) = \mu_n^{-1} d(\xi_n+\mu_n y)$.}
$$
For $L>0$, we obtain as in Lemma \ref{lema-barrera-1}, letting $d_n=d_n(x_n)$
$$
\betagin{array}{l}
\displaystyle \int_{|z| \gammae L}
\frac{2 - \left(\frac{d_n(x_n+d_n z)}{d_n}\right)^{2s-\theta}- \left(\frac{d_n (x_n-d_n z)}{d_n}\right)^{2s-\theta}}
{|z|^{N+2s}} K(\mu_n d_n z) dz\\[1.4pc]
\quad \gammae \displaystyle - 2 \Lambdambda \int_{ |z| \gammae L} \frac{(1+|z|)^{2s-\theta}}{|z|^{N+2s}} dz.
\varepsilonnd{array}
$$
Moreover, we also have an equation like \varepsilonqref{taylor}. In fact taking into account that
$\| D^2 d_n \| = \mu_n \| D^2 d\|$ is bounded we have for $|z|\le \varepsilonta <1$:
$$
d_n (x_n \partialm d_n z ) \le d_n \partialm d_n \nablabla d_n (x_n) z + C d_n^2 |z|^2.
$$
with a constant $C>0$ independent of $n$. Hence
$$
\betagin{array}{l}
\displaystyle \int_{ |z| \le \varepsilonta }
\frac{2 - \left(\frac{d_n(x_n+d_n z)}{d_n}\right)^{2s-\theta}- \left(\frac{d_n(x_n-d_n z)}{d_n}\right)^{2s-\theta}}{|z|^{N+2s}}
K(\mu_n d_n z) dz\\[1.4pc]
\quad \gammae \displaystyle - 2 \Lambda C \int_{ |z| \le \varepsilonta} \frac{1}{|z|^{N-2(1-s)}} dz.
\varepsilonnd{array}
$$
Now observe that $d_n(x_n)\to 0$ implies in particular $d(\xi_n+\mu_n x_n) \to 0$, so that
$|\nablabla d(\xi_n+\mu_n x_n)|=1$ for large $n$ and then $|\nablabla d_n (x_n)|=1$. As in \varepsilonqref{extra1},
passing to a subsequence we may assume that $\nablabla d_n(x_n)\to e_N$. Then
$$
\frac{d_n(x_n \partialm d_n z)}{d_n} \to (1\partialm z_N)_+ \qquad \hbox{as } n \to +\infty,
$$
for $\varepsilonta \le |z| \le L$ and the proof of the first part concludes as in Lemma \ref{lema-barrera-1}.
Now let $u$ be a viscosity solution of
$$\left\{
\betagin{array}{ll}
(-\Delta)_{K_\mu}^s u \le C_1 d_\mu^{-\theta} & \hbox{in } \Omega^\mu,\\[0.35pc]
\ \ u=0 & \hbox{in }\mathbb R^N\setminus \Omega^\mu.
\varepsilonnd{array}
\right.
$$
Choose $R>0$ and let $v=R d_\mu^{2s-\theta}$. Then
clearly
$$
\mbox{$(-\Delta)^s_{K_\mu} v \gammae RC_0 d_\mu ^{-\theta}\gammae C_1 d_\mu^{-\theta} \gammae (-\Delta)^s_{K_\mu} u$
in $(\Omega^\mu)_\delta$,}
$$
if we choose $R>C_1 C_0^{-1}$. Moreover, $u=v=0$ in $\mathbb R^N\setminus \Omega^\mu$ and
$v \gammae R \delta^{2s-\theta} \gammae u$ in $\Omega^\mu \setminus (\Omega^\mu)_\delta$ if $R$ is chosen so
that $R \delta^{2s-\theta} \gammae \|u\|_{L^\infty(\Omega^\mu)}$. Thus by comparison $u\le v$ in
$(\Omega^\mu)_\delta$, which gives the desired result, with, for instance $C_2=\delta^{\theta-2s} +C_0^{-1}$.
This concludes the proof.
\varepsilonnd{proof}
We close this section with a statement of the strong comparison principle for
the operator $(-\Delta)^s_K$, which will be frequently used throughout the
rest of the paper. We include a proof for completeness (cf. Lemma 12 in \cite{LL} for a similar proof).
\betagin{lema}{\mathcal B}l{PFM}
Let $K$ be a measurable function verifying \varepsilonqref{elipticidad} and assume $u\in C(\mathbb R^N)$,
$u\gammae 0$ in $\mathbb R^N$ verifies $(-\Delta)^s_K u \gammae 0$ in the viscosity sense in $\Omega$. Then
$u>0$ or $u\varepsilonquiv 0$ in $\Omega$.
\varepsilonnd{lema}
\betagin{proof}
Assume $u(x_0)=0$ for some $x_0\in \Omega$ but $u\not\varepsilonquiv 0$ in $\Omega$. Choose a nonnegative test
function $\partialhi \in C^2(\mathbb R^N)$ such that $u\gammae \partialhi$ in a neighborhood $U$ of $x_0$ with $\partialhi(x_0)=0$
and let
$$
\partialsi=\left\{
\betagin{array}{ll}
\partialhi & \hbox{in } U\\
u & \hbox{in } \mathbb R^N\setminus U.
\varepsilonnd{array}
\right.
$$
Observe that $\partialsi$ can be taken to be nontrivial since $u$ is not identically zero, by diminishing $U$ if
necessary. Since $(-\Delta)^s_K u\gammae 0$ in $\Omega$ in the viscosity sense, it follows that $(-\Delta)^s_K
\partialsi (x_0)\gammae 0$. Taking into account that for a nonconstant $\partialsi$
we should have $(-\Delta)^s_K \partialsi < 0$ at a global minimum, we deduce that
$\partialsi$ is a constant function.
Moreover, since $\partialsi(x_0)=\partialhi(x_0)=0$ then
$\partialsi\varepsilonquiv 0$ in $\mathbb R^N$, which is a contradiction.
Therefore if $u(x_0)=0$ for some $x_0\in \Omega$ we must have $u\varepsilonquiv 0$ in $\Omega$, as was to be shown.
\varepsilonnd{proof}
\section{A priori bounds}
\setcounter{section}{3}
\setcounter{equation}{0}
In this section we will be concerned with our most important step: the obtention of a priori
bounds for positive solutions for both problems \varepsilonqref{problema} and \varepsilonqref{problema-grad}.
We begin with problem \varepsilonqref{problema}, with the essential assumption of subcriticality of
$p$, that is equation \varepsilonqref{subcritico} and assuming that $g$ verifies the growth restriction
\betagin{equation}{\mathcal B}l{crec-g-2}
|g(x,z)| \le C (1 + |z|^r), \quad x\in\Omega,\ z\in \mathbb R,
\varepsilonnd{equation}
where $C>0$ and $0<r<p$.
\betagin{teorema}{\mathcal B}l{cotas}
Assume $\Omega$ is a $C^2$ bounded domain and $K$ a measurable function verifying
\varepsilonqref{elipticidad} and \varepsilonqref{continuidad}. Suppose $p$ is such that \varepsilonqref{subcritico} holds
and $g$ verifies \varepsilonqref{crec-g-2}. Then there exists a constant $C>0$ such that for every positive
viscosity solution $u$ of \varepsilonqref{problema} we have
$$
\| u\|_{L^\infty (\Omega)} \le C.
$$
\varepsilonnd{teorema}
\betagin{proof}
Assume on the contrary that there exists a sequence of positive solutions $\{u_k\}$ of \varepsilonqref{problema}
such that $M_k=\| u_k \|_{L^\infty(\Omega)} \to +\infty$. Let $x_k\in \Omega$ be points with
$u_k(x_k) =M_k $ and introduce the functions
$$
v_k(y)= \frac{u_k(x_k+\mu_k y)}{M_k}, \quad y\in \Omega^k,
$$
where $\mu_k=M_k^{-\frac{p-1}{2s}}\to 0$ and
$$
\mbox{$\Omega^k:=\{y\in \mathbb R^N:\ x_k+\mu_k y\in \Omega\}$.}
$$
Then $v_k$ is a function verifying $0< v_k \le 1$, $v_k(0)=1$ and
\betagin{equation}{\mathcal B}l{rescale-1}
(-\Delta)^s_{K_k} v_k = v_k^p + h_k \quad \hbox{in } \Omega^k
\varepsilonnd{equation}
where $K_k(y)=K(\mu_k y)$ and $h_k \in C(\Omega^k)$ verifies $|h_k|\le C M_k^{r-p}$.
By passing to subsequences, two situations may arise: either $d(x_k) \mu_k^{-1} \to +\infty$
or $d(x_k) \mu_k^{-1} \to d \gammae 0$.
Assume the first case holds, so that $\Omega^k \to \mathbb R^N$ as $k\to +\infty$. Since the right hand
side in \varepsilonqref{rescale-1} is uniformly bounded and $v_k\le 1$, we may use
estimates \varepsilonqref{est-ca} with an application of Ascoli-Arzel\'a's theorem and a diagonal argument
to obtain that $v_k \to v$ locally uniformly in $\mathbb R^N$. Passing to the limit in
\varepsilonqref{rescale-1} and using that $K$ is continuous at zero with $K(0)=1$, we see that
$v$ solves $(-\Delta)^s v= v^p$ in $\mathbb R^N$ in the viscosity sense (use for instance Lemma 5 in \cite{CS2}).
By standard regularity (cf. for instance Proposition 2.8 in \cite{S})
we obtain $v\in C^{2s+\alpha}(\mathbb R^N)$ for some $\alpha \in (0,1)$. Moreover, since $v(0)=1$, the strong
maximum principle implies $v>0$. Then by bootstrapping using again Proposition 2.8 in \cite{S}
we would actually have $v\in C^\infty(\mathbb R^N)$. In particular we deduce that $v$ is a strong
solution of $(-\Delta)^s v=v^p$ in $\mathbb R^N$ in the sense of \cite{ZCCY}. However,
since $p<\frac{N+2s}{N-2s}$, this contradicts for instance Theorem 4 in \cite{ZCCY} (see also \cite{CLO1}).
If the second case holds then we may assume $x_k\to x_0\in \partial\Omega$. With no loss of generality
assume also $\nu (x_0)=-e_N$. In this case, rather than working
with the functions $v_k$, it is more convenient to deal with
$$
w_k(y)= \frac{u_k(\xi_k+\mu_k y)}{M_k}, \quad y\in D^k,
$$
where $\xi_k\in \partial\Omega$ is the projection of $x_k$ on $\partial\Omega$ and
\betagin{equation}{\mathcal B}l{Dk}
\mbox{$D^k:=\{y\in \mathbb R^N:\
\xi_k+\mu_k y \in \Omega\}$.}
\varepsilonnd{equation}
Observe that
\betagin{equation}{\mathcal B}l{cero}
0\in \partial D^k,
\varepsilonnd{equation}
and
$$\mbox{$D^k \to \mathbb R^N_+=\{y\in \mathbb R^N:\ y_N>0\}$ as $k\to +\infty$.}
$$
It also follows that $w_k$ verifies \varepsilonqref{rescale-1} in $D^{k}$
with a slightly different function $h_k$, but with the same bounds.
Moreover, setting
$$
y_k:=\frac{x_k-\xi_k}{\mu_k},
$$
so that $|y_k|= d(x_k)\mu_k^{-1}$, we see that
$w_k(y_k)=1$. We claim that $d=\lim_{k\to +\infty} d(x_k) \mu_k^{-1}>0$. This in particular guarantees that
by passing to a further subsequence $y_k\to y_0$, where $|y_0|=d>0$, thus $y_0$ is in the
interior of the half-space $\mathbb R^N_+$.
Let us show the claim. Observe that by \varepsilonqref{rescale-1}, and since $r<p$, we have
$$
(-\Deltalta)^{s}_{K_k}w_k\leq C\leq C_1 d_k^{-\theta} \quad \hbox{in } D^k
$$
for every $\theta \in (s,2s)$, where $d_k(y)={\rm dist}(y,\partial D^k)$.
By Lemma \ref{lema-barrera-2}, fixing any such $\theta$, there exist
constants $C_0>0$ and $\delta>0$ such that $w_k(y) \le C_0 d_k(y)^{2s-\theta}$ if $d_k(y) < \delta$.
In particular, since by \varepsilonqref{cero} $|y_k|\gammae d_k(y_k)$,
if $d_k(y_k) <\delta$, then $1\le C_0 d_k(y_k)^{2s-\theta} \le C_0
|y_k|^{2s-\theta}$, which implies $|y_k|$ is bounded from
below so that $d>0$.
Now we can employ \varepsilonqref{est-ca} as above to obtain that $w_k\to w$ uniformly on
compact sets of $\mathbb R^N_+$, where $w$ verifies $0\le w \le 1$ in $\mathbb R^N_+$, $w(y_0)=1$ and
$w(y) \le C y_N^{2s-\theta}$ for $y_N <\delta$. Therefore $w\in C(\mathbb R^N)$ is a nonnegative, bounded
solution of
$$
\left\{
\betagin{array}{ll}
(-\Delta)^s w = w^p & \hbox{in } \mathbb R^N_+,\\[0.25pc]
w=0 & \hbox{in } \mathbb R^N \setminus \mathbb R^N_+.
\varepsilonnd{array}
\right.
$$
Again by bootstrapping and the strong maximum principle we have $w\in C^\infty (\mathbb R^N_+)$, $w>0$.
Since $p<\frac{N+2s}{N-2s}<\frac{N-1+2s}{N-1-2s}$, this is a contradiction with Theorem 1.1
in \cite{QX} (cf. also Theorem 1.2 in \cite{FW}). This contradiction proves the theorem.
\varepsilonnd{proof}
We now turn to analyze the a priori bounds for solutions of problem \varepsilonqref{problema-grad}.
We have already remarked that due to the expected singularity of the gradient of the solutions
near the boundary we need to work in spaces with weights which take care of the singularity.
Thus we fix $\sigma \in (0,1)$ verifying
\betagin{equation}{\mathcal B}l{cond-sigma}
0<\sigma< 1-\frac{s}{t}<1
\varepsilonnd{equation}
and let
\betagin{equation}{\mathcal B}l{E}
E_\sigma=\{u\in C^1(\Omega): \ \| u\|_1^{(-\sigma)}<+\infty\},
\varepsilonnd{equation}
where $\| \cdot \|_1^{(-\sigma)}$
is given by \varepsilonqref{norma-c1} with $\theta=-\sigma$. As for the function $h$, we assume that it has a
prescribed growth at infinity: there exists $C^{0}>0$ such that for every $x\in\Omega$, $z\in\mathbb R$ and $\xi\in\mathbb R^N$,
\betagin{equation}{\mathcal B}l{crec-h-2}
|h(x,z,\xi)| \le C^0 (1 + |z|^r + |\xi|^t),
\varepsilonnd{equation}
where $0<r<p$ and $1<t<\frac{2sp}{p+2s-1}<2s$ (observe that there is no loss of generality in
assuming $t>1$). We recall that in the present situation we require the stronger
restriction \varepsilonqref{subserrin}
on the exponent $p$.
Then we can prove:
\betagin{teorema}{\mathcal B}l{cotas-grad}
Assume $\Omega$ is a $C^2$ bounded domain and $K$ a measurable function verifying
\varepsilonqref{elipticidad} and \varepsilonqref{continuidad}. Suppose that $s>\frac{1}{2}$, $p$ verifies \varepsilonqref{subserrin}
and $h$ is nonnegative and such that \varepsilonqref{crec-h-2}
holds. Then there exists a constant $C>0$ such that for every positive solution $u$ of \varepsilonqref{problema-grad}
in $E_\sigma$ with $\sigma$ satisfying \varepsilonqref{cond-sigma} we have
$$
\| u\|_1^{(-\sigma)} \le C.
$$
\varepsilonnd{teorema}
We prove the a priori bounds in two steps. In the first one we obtain rough bounds for all solutions of
the equation which are universal, in the spirit of \cite{PQS}. It is here where the restriction
\varepsilonqref{subserrin} comes in.
\betagin{lema}{\mathcal B}l{cotas-pqs}
Assume $\Omega$ is a $C^2$ (not necessarily bounded) domain and $K$ a measurable function verifying
\varepsilonqref{elipticidad} and \varepsilonqref{continuidad}. Suppose that $s>\frac{1}{2}$ and $p$ verifies
\varepsilonqref{subserrin}. Then there exists a positive constant
$C=C(N,s,p,r,t,C^0,\Omega)$
(where $r$, $t$ and $C^0$ are given in \varepsilonqref{crec-h-2})
such that for
every positive function $u\in C^1(\Omega)\cap L^\infty(\mathbb R^N)$ verifying $(-\Delta)^s_K
u= u^p +h(x,u,\nablabla u)$ in the viscosity sense in $\Omega$, we have
$$
u(x) \le C (1+{\rm dist}(x,\partial \Omega)^{-\frac{2s}{p-1}}) ,\quad |\nablabla u(x)| \le C (1+{\rm dist}(x,\partial\Omega)
^{-\frac{2s}{p-1}-1})
$$
for $x\in \Omega$.
\varepsilonnd{lema}
\betagin{proof}
Assume on the contrary that there exist sequences of
positive functions $u_k\in C^1(\Omega)\cap L^\infty (\mathbb R^N)$ verifying $(-\Delta)^s_K u_k= u_k^p
+h(x,u_k,\nablabla u_k)$ in $\Omega$ and points $y_k\in \Omega$ such that
\betagin{equation}{\mathcal B}l{hipo}
u_k(y_k)^\frac{p-1}{2s} + |\nablabla u_k(y_k)|^\frac{p-1}{p+2s-1} > 2k\: (1+{\rm dist}(y_k,\partial \Omega)^{-1}).
\varepsilonnd{equation}
Denote $N_k(x)=u_k(x)^\frac{p-1}{2s} + |\nablabla u_k(x)|^\frac{p-1}{p+2s-1}$, $x\in \Omega$. By Lemma
5.1 in \cite{PQS} (cf. also Remark 5.2 (b) there) there exists a sequence of points $x_k\in \Omega$ with the
property that $N_k(x_k) \gammae N_k(y_k)$, $N_k(x_k)>2k\: {\rm dist}(x_k,\partial \Omega)^{-1}$ and
\betagin{equation}{\mathcal B}l{conjuntos}
\mbox{$N_k(z) \le 2 N_k(x_k)$ in $B(x_k, kN_k(x_k)^{-1})$.}
\varepsilonnd{equation}
Observe that, in particular, \varepsilonqref{hipo} implies that $N_k(x_k)\to +\infty$.
Let $\nu_k := N_k(x_k)^{-1}\to 0$ and
define
\betagin{equation}{\mathcal B}l{v_k}
v_k(y) := \nu_k^\frac{2s}{p-1} u_k (x_k+\nu _k y), \quad y \in B_k:=\{y\in \mathbb R^N:\ |y|<k\}.
\varepsilonnd{equation}
Then the functions $v_k$ verify $(-\Delta)^s_{K_k} v_k= v_k^p + h_k$ in $B_k$, where
$K_k(y) = K (\mu_k y)$ and
$$
h_k (y)=\nu_k ^{\frac{2sp}{p-1}} h(\xi_k+\nu_k y ,\nu_k^{-\frac{2s}{p-1}}
v_k(y),\nu_k(x_k)^{-\frac{2s+p-1}{p-1}} \nablabla v_k(y)).
$$
Since $h$ verifies \varepsilonqref{crec-h-2}, we have $| h_k| \le C_0 \nu_k^{\gammaamma}
(1+v_k^r+|\nablabla v_k|^t)$ in $B_k$, where
$$
\gammaamma= \max\left\{ \frac{2s(p-r)}{p-1}, \frac{2ps-(2s+p-1)t}{p-1}\right\} >0.
$$
Moreover by \varepsilonqref{conjuntos} it follows that
\betagin{equation}{\mathcal B}l{eq1}
v_k(y)^\frac{p-1}{2s} + |\nablabla v_k(y)|^\frac{p-1}{p+2s-1}\le 2, \quad y\in B_k .
\varepsilonnd{equation}
Also it is clear that
\betagin{equation}{\mathcal B}l{eq2}
v_k(0)^\frac{p-1}{2s} + |\nablabla v_k(0)|^\frac{p-1}{p+2s-1} =1.
\varepsilonnd{equation}
Since $\nu_k\to 0$ and $v_k$ and $|\nablabla v_k|$ are uniformly bounded in $B_k$, we see that
$ h_k$ is also uniformly bounded in $B_k$. We may then
use estimate \varepsilonqref{est-c1a} to obtain, again with the use of Ascoli-Arzel\'a's theorem
and a diagonal argument, that there exists a subsequence, still labeled $v_k$ such that
$v_k\to v$ in $C^1_{\rm loc}(\mathbb R^N)$ as $k\to +\infty$. Since $v(0)^\frac{p-1}{2s} + |\nablabla
v(0)|^\frac{p-1}{p+2s-1} =1$, we see that $v$ is nontrivial.
Now let $w_k$ be the functions obtained by extending $v_k$ to be zero outside $B_k$. Then it
is easily seen that $(-\Delta)^s_{K_k} w_k\gammae w_k^p$ in $B_k$. Passing to the limit using again Lemma 5 of \cite{CS2},
we arrive at $(-\Delta)^s v \gammae v^p$ in $\mathbb R^N$, which contradicts Theorem 1.3 in \cite{FQ2} since
$p<\frac{N}{N-2s}$. This concludes the proof.
\varepsilonnd{proof}
\betagin{obss}{\mathcal B}l{comentario} {\rm \
\noindent (a) With a minor modification in the above proof, it can be seen that
the constants given by Lemma \ref{cotas-pqs} can be taken independent of the domain $\Omega$
(cf. the proof of Theorem 2.3 in \cite{PQS}).
\noindent (b) We expect Lemma \ref{cotas-pqs} to hold in the full range given by \varepsilonqref{subcritico}.
Unfortunately, this method of proof seems purely local and needs to be properly adapted to
deal with nonlocal equations. Observe that there is no information available for the functions
$v_k$ defined in \varepsilonqref{v_k} in $\Omega\setminus B_k$, which makes it difficult to pass to the limit appropriately in the
equation satisfied by $v_k$.
}\varepsilonnd{obss}
We now come to the proof of the a priori bounds for positive solutions of \varepsilonqref{problema-grad}.
\betagin{proof}[Proof of Theorem \ref{cotas-grad}]
Assume that the conclusion of the theorem is not true. Then there exists a sequence of positive
solutions $u_k\in E_\sigma$ of \varepsilonqref{problema-grad} such
that $\| u_k \|_1^{(-\sigma)}\to +\infty$, where $\sigma$ satisfies \varepsilonqref{cond-sigma}. Define
$$
M_k(x)= d(x)^{-\sigma} u_k(x) + d(x)^{1-\sigma} |\nablabla u_k(x)|.
$$
Now choose points $x_k\in \Omega$ such that $M_k(x_k) \gammae \sup_\Omega
M_k -\frac{1}{k}$ (this supremum may not be achieved).
Observe that our assumption implies $M_k(x_k)\to +\infty$.
Let $\xi_k$ be a projection of $x_k$ on $\partial \Omega$ and introduce the functions:
$$
v_k(y) = \frac{u_k(\xi_k + \mu_k y)}{\mu_k^\sigma M_k(x_k)}, \quad y\in D^k,
$$
where $\mu_k=M_k(x_k)^{-\frac{p-1}{2s+\sigma(p-1)}}\to 0$ and $D^{k}$ is the set defined in \varepsilonqref{Dk}. It is not hard to see that
\betagin{equation}{\mathcal B}l{eq-rescalada}
\left\{
\betagin{array}{ll}
(-\Delta)_{K_k}^s v_k = v_k^p + h_k & \hbox{in } D^k,\\[0.35pc]
\ \ v_k=0 & \hbox{in }\mathbb R^N \setminus D^k,
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
where $K_k(y) = K (\mu_k y)$ and
$$
h_k (y)\hspace{-1mm}=\hspace{-1mm}M_k(x_k)^{-\frac{2sp}{2s+\sigma (p-1)}} h(\xi_k+\mu_k y ,M_k(x_k)^\frac{2s}{2s+\sigma(p-1)} v_k,
M_k(x_k)^\frac{2s+p-1}{2s+\sigma(p-1)} \nablabla v_k).
$$
By assumption \varepsilonqref{crec-h-2} on $h$, it is readily seen that $h_k$ verifies the inequality
$|h_k|\le C M_k(x_k) ^{-\bar \gammaamma} (1+ v_k^r+ |\nablabla v_k|^t)$ for
some positive constant $C$ independent of $k$, where
$$
\bar \gammaamma=\frac{2sp}{2s+\sigma(p-1)} -\frac{\max\{2sr, (2s+p-1)t\}}{2s+\sigma(p-1)}>0.
$$
Moreover, the functions $v_k$ verify
$$
\mu_k^\sigma d(\xi_k+\mu_k y)^{-\sigma} v_k(y)+ \mu_k^{\sigma-1} d(\xi_k+\mu_k y)^{1-\sigma} |\nablabla v_k(y)|
=\frac{M_k(\xi_k+\mu_k y) }{M_k(x_k)}.
$$
Then, using that $\mu_k ^{-1} d(\xi_k+\mu_k y)={\rm dist}(y,\partial D^k)=:d_k(y)$ and the choice of the points
$x_k$, we obtain for large $k$
\betagin{equation}{\mathcal B}l{eq-normal-1}
d_k(y)^{-\sigma} v_k(y)+d_k(y)^{1-\sigma} |\nablabla v_k(y)| \le 2 \quad \mbox{ in } D^k
\varepsilonnd{equation}
and
\betagin{equation}{\mathcal B}l{eq-normal-2}
d_k(y_k)^{-\sigma} v_k(y_k)+d_k(y_k)^{1-\sigma} |\nablabla v_k(y_k)| =1,
\varepsilonnd{equation}
where, as in the proof of Theorem \ref{cotas}, $y_k :=\mu_k^{-1}(x_k-\xi_k)$.
Next, since $u_k$ solves \varepsilonqref{problema-grad}, we may use Lemma \ref{cotas-pqs}
to obtain that $M_k(x_k) \le C d(x_k)^{-\sigma}(1+d(x_k)^{-\frac{2s}{p-1}})$
for some positive constant independent of $k$, which implies $d(x_k) \mu_k^{-1}\le C$.
This bound immediately entails that (passing to subsequences) $x_k\to x_0\in \partial\Omega$ and
$|y_k|=d(x_k)\mu_k^{-1}\to d\gammae 0$ (in particular the points $\xi_k$ are uniquely determined
at least for large $k$). Assuming that the outward
unit normal to $\partial \Omega$ at $x_0$ is $-e_N$, we also obtain then that $D^k \to \mathbb R^N_+$ as
$k \to +\infty$.
We claim that $d>0$. To show this, notice that from \varepsilonqref{eq-rescalada} and
\varepsilonqref{eq-normal-1} we have $(-\Delta)^s_{K_k}
v_k \le C d_k^{(\sigma-1)t}$ in $D^k$, for some constant $C$ not depending on $k$.
By our choice of $\sigma$ and $t$, we get that
\betagin{equation}{\mathcal B}l{cond-sigma2}
\sigma>\frac{t-2s}{t}.
\varepsilonnd{equation}
That is, we have
\betagin{equation}{\mathcal B}l{sigma3}
s<(1-\sigma)t<2s,
\varepsilonnd{equation}
so that Lemma \ref{lema-barrera-2} can
be applied to give $\delta>0$ and a positive constant $C$ such that
\betagin{equation}{\mathcal B}l{these1}
v_k(y) \le C d_k(y)^{2s+(\sigma-1) t}, \quad \hbox{when } d_k(y) <\delta.
\varepsilonnd{equation}
Moreover, since $1<t<2s$, \varepsilonqref{cond-sigma2} in particular implies that
\betagin{equation}{\mathcal B}l{sigma2}
\sigma>\frac{t-2s}{t-1},
\varepsilonnd{equation}
and, therefore, $-\sigma+2s+(\sigma-1)t =\sigma(t-1)+2s-t >0.$ Thus, by \varepsilonqref{eq-normal-1} we have
$$\mbox{$v_k (y) \le 2 d_k(y)^{\sigma}\le 2 \delta^{\sigma-2s-(\sigma-1)t} d_k(y)^{2s+(\sigma-1)t}$ when
$d_k(y)\gammae \delta$.}$$
Hence $\| v_k\|_0^{(-2s-(\sigma-1)t)}$ is bounded.
We can then use Lemma \ref{lema-regularidad}, with $\theta=(1-\sigma)t$, to obtain that
\betagin{equation}{\mathcal B}l{these2}
|\nablabla v_k(y)| \le C d_k(y)^{2s+(\sigma-1) t-1} \quad \hbox{in } D^k,
\varepsilonnd{equation}
where $C$ is also independent of $k$. Taking inequalities \varepsilonqref{these1} and \varepsilonqref{these2}
in \varepsilonqref{eq-normal-2}, we deduce
$$
1 \le C d_k(y_k) ^{-\sigma+2s+(\sigma-1)t},
$$
thus, by \varepsilonqref{sigma2} we see that $d_k (y_k)$ is bounded away from zero. Hence, by \varepsilonqref{cero},
$|y_k|$ also is, so that $d>0$, as claimed.
Finally, we can use \varepsilonqref{est-c1a} together with Ascoli-Arzel\'a's theorem and a
diagonal argument to obtain that $v_k \to v$ in $C^1_{\rm loc}(\mathbb R^N_+)$,
where by \varepsilonqref{eq-normal-2}, the function $v$ verifies $d^{-\sigma} v(y_0)+ d^{1-\sigma} |\nablabla v(y_0)|=1$ for
some $y_0\in \mathbb R^N_+$, hence it is nontrivial and $v(y) \le C y_N ^{2s+(\sigma-1)t}$ if $0<y_N<\delta$.
Thus $v\in C(\mathbb R^N)$ and $v=0$ outside $\mathbb R^N_+$. Passing to the limit in \varepsilonqref{eq-rescalada} with
the aid of Lemma 5 in \cite{CS2} and using that $K$ is continuous at zero with $K(0)=1$, we obtain
$$
\left\{
\betagin{array}{ll}
(-\Delta)^s v = v^p & \hbox{in } \mathbb R^N_+,\\[0.25pc]
v=0 & \hbox{in } \mathbb R^N \setminus \mathbb R^N_+.
\varepsilonnd{array}
\right.
$$
Using again bootstrapping and the strong maximum principle we have
$v>0$ and $v\in C^\infty(\mathbb R^N_+)$, therefore it is a classical solution. Moreover, by
Lemma \ref{cotas-pqs}, we also see that $v(y)\le C y_N^{-\frac{2s}{p-1}}$ in $\mathbb R^N_+$, so that
$v$ is bounded. This is a contradiction with Theorem 1.2 in \cite{FW}
(see also \cite{QX}), because
we are assuming $p<\frac{N}{N-2s} <\frac{N-1+2s}{N-1-2s}$.
The proof is therefore concluded.
\varepsilonnd{proof}
\section{Existence of solutions}
\setcounter{section}{4}
\setcounter{equation}{0}
This final section is devoted to the proof of our existence results, Theorems
\ref{th-1} and \ref{th-grad}. Both proofs are very similar, only that
that of Theorem \ref{th-grad} is slightly more involved. Therefore we only show this one.
Thus we assume $s>\frac{1}{2}$. Fix $\sigma$ verifying \varepsilonqref{cond-sigma} and
consider the Banach space $E_\sigma$, defined in \varepsilonqref{E}, which is an ordered
Banach space with the
cone of nonnegative functions $P=\{u\in E_\sigma:\ u\gammae 0 \hbox{ in }\Omega\}$. For the
sake of brevity, we will drop the subindex $\sigma$ throughout the rest of the section
and will denote $E$ and $\| \cdot \|$ for the space and its norm.
We will assume that $h$ is nonnegative and verifies the growth condition in the statement of
Theorem \ref{th-grad}:
\betagin{equation}{\mathcal B}l{hipo-h-2}
h(x,z,\xi) \le C (|z|^r +|\xi|^t), \quad x\in \Omega,\ z\in \mathbb R, \ \xi \in \mathbb R^N,
\varepsilonnd{equation}
where $1<r<p$ and $1<t<\frac{2sp}{2s+p-1}$. Observe that for every $v\in P$ we
have
\betagin{equation}{\mathcal B}l{h}
h(x,v(x),\nablabla v(x)) \le C (\|v\|) d(x)^{(\sigma-1)t}.
\varepsilonnd{equation}
Moreover, by \varepsilonqref{sigma3} we may apply Lemma \ref{lema-existencia} to deduce that the problem
$$
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u = v^p + h(x,v,\nablabla v) & \hbox{in }\Omega,\\[0.35pc]
\ \ u=0 & \hbox{in }\mathbb R^N \setminus \Omega,
\varepsilonnd{array}
\right.
$$
admits a unique nonnegative solution $u$, with $\|u \|_0^{(-\sigma)}<+\infty$. By Lemma \ref{lema-regularidad}
we also deduce $\| \nablabla u \|_0^{(1-\sigma)}<+\infty$. Hence $u\in E$. In this way, we can
define an operator $T: P \to P$ by means of $u=T(v)$. It is clear that nonnegative solutions of \varepsilonqref{problema}
in $E$ coincide with the fixed points of this operator.
We begin by showing a fundamental property of $T$.
\betagin{lema}{\mathcal B}l{lema-compacidad}
The operator $T: P \to P$ is compact.
\varepsilonnd{lema}
\betagin{proof}
We show continuity first: let $\{u_n\} \subset P$ be such that $u_n\to u$ in $E$. In particular,
$u_n\to u$ and $\nablabla u_n\to \nablabla u$ uniformly on compact sets of $\Omega$, so that the continuity of
$h$ implies
\betagin{equation}{\mathcal B}l{conv-unif}
h(\cdot,u_n,\nablabla u_n) \to h(\cdot,u,\nablabla u) \hbox{ uniformly on compact sets of }\Omega
\varepsilonnd{equation}
Moreover, since $u_n$ is bounded in $E$, similarly as in \varepsilonqref{h} we also have that
$h(\cdot,u_n,\nablabla u_n) \le C d^{(\sigma-1)t}$ in $\Omega$, for a constant that does not depend on $n$
(and the same is true for $u$ after passing to the limit). This implies
\betagin{equation}{\mathcal B}l{claim!}
\sup_\Omega d^\theta |h(\cdot,u_n,\nablabla u_n)- h(\cdot,u,\nablabla u)| \to 0,
\varepsilonnd{equation}
for every $\theta>(1-\sigma)t>s$. Indeed, if we take $\varepsilon>0$ then
$$
d^\theta |h(\cdot,u_n,\nablabla u_n)- h(\cdot,u,\nablabla u)|\leq C d^{\theta-(1-\sigma)t}
\le C \delta^{\theta-(1-\sigma)t}\le \varepsilon,
$$
if $d\le \delta$, by choosing a small $\delta$. When $d\gammae \delta$,
$$
d^\theta |h(\cdot,u_n,\nablabla u_n)- h(\cdot,u,\nablabla u)|\leq (\sup_\Omega d)^\theta
|h(\cdot,u_n,\nablabla u_n)- h(\cdot,u,\nablabla u)| \le \varepsilon,
$$
just by choosing $n\gammae n_0$, by \varepsilonqref{conv-unif}. This shows \varepsilonqref{claim!}.
From Lemmas \ref{lema-existencia} and \ref{lema-regularidad} for every $(1-\sigma)t<\theta<2s$, we obtain
$$
\sup_\Omega d^{\theta-2s} |T(u_n)-T(u)| + d^{\theta-2s+1} |\nablabla (T(u_n)-T(u))| \to 0.
$$
The desired conclusion follows by choosing $\theta$ such that
$$
(1-\sigma) t < \theta \le 2s-\sigma.
$$
This shows continuity.
To prove compactness, let $\{u_n\} \subset P$ be bounded. As we did before,
$h(\cdot,u_n,\nablabla u_n)
\le C d^{(\sigma-1)t}$ in $\Omega$. By \varepsilonqref{est-c1a} we obtain that
for every $\Omega' \subset \subset \Omega$ the $C^{1,\betata}$ norm of $T(u_n)$ in $\Omega'$ is
bounded. Therefore, we may assume by passing to a subsequence that $T(u_n)\to v$
in $C^1_{\rm loc} (\Omega)$.
From Lemmas \ref{lema-existencia} and \ref{lema-regularidad} we deduce that $T(u_n) \le Cd^{(\sigma-1)t+2s}$,
$|\nablabla T(u_n)| \le Cd^{(\sigma-1)t+2s-1}$ in $\Omega$, and the same estimates hold for
$v$ and $\nablabla v$ by passing to the limit. Hence
$$
\sup_\Omega d^{-\sigma} |T(u_n)-v| + d^{1-\sigma} |\nablabla (T(u_n)-v)| \to 0,
$$
which shows compactness. The proof is concluded.
\varepsilonnd{proof}
The proof of Theorem \ref{th-grad} relies in the use of topological degree, with the
aid of the bounds provided by Theorem \ref{cotas-grad}. The essential tool is the
following well-known result (see for instance Theorem 3.6.3 in \cite{Ch}).
\betagin{teorema}{\mathcal B}l{th-chang}
Suppose that $E$ is an ordered Banach space with positive cone $P$, and $U\subset P$ is an open
bounded set containing 0. Let $\rho>0$ be such that $B_\rho(0)\cap P\subset U$. Assume $T: U\to P$
is compact and satisfies
\betagin{itemize}
\item[(a)] for every $\mu\in [0,1)$, we have $u\ne \mu T(u)$ for every $u \in P$ with $\| u \|=\rho$;
\item[(b)] there exists $\partialsi \in P\setminus \{0\}$ such that $u-T(u) \ne t \partialsi$, for every
$u\in \partial U$, for every $t\gammae 0$.
\varepsilonnd{itemize}
Then $T$ has a fixed point in $U \setminus B_\rho(0)$.
\varepsilonnd{teorema}
The final ingredient in our proof is some knowledge on the principal eigenvalue for the
operator $(-\Delta)^s_K$. The natural definition of such eigenvalue in our context
resembles that of \cite{BNV} for linear second order elliptic operators, that is:
\betagin{equation}{\mathcal B}l{eigenvalue}
\lambda_1 :=\sup\left\{ \lambda\in \mathbb R:
\betagin{array}{cc}
\hbox{ there exists } u\in C(\mathbb R^N),\ u>0 \hbox{ in } \Omega, \hbox{ with } \\[0.25pc]
u=0 \hbox{ in } \mathbb R^N\setminus \Omega \hbox{ and } (-\Delta)^s_K u \gammae \lambda u \hbox{ in } \Omega
\varepsilonnd{array}
\right\}.
\varepsilonnd{equation}
At the best of our knowledge, there are no results available for the eigenvalues
of $(-\Delta)^s_K$, although it seems likely that the first one will enjoy the usual
properties (see \cite{QS}).
For our purposes here, we only need to show the finiteness of $\lambda_1$:
\betagin{lema}{\mathcal B}l{lema-auto}
$\lambda_1<+\infty$.
\varepsilonnd{lema}
\betagin{proof}
We begin by constructing a suitable subsolution.
The construction relies in a sort of ``implicit" Hopf's principle (it is to be noted that Hopf's principle
is not well understood for general kernels $K$ verifying \varepsilonqref{elipticidad}; see for instance Lemma 7.3 in
\cite{RO} and the comments after it). However, a relaxed version is enough for our purposes.
Let $B'\subset \subset B\subset \subset \Omega$ and consider the unique solution $\partialhi$ of
$$
\left\{
\betagin{array}{ll}
(-\Delta)_K^s \partialhi = 0 & \hbox{in } B\setminus B',\\[0.35pc]
\ \ \partialhi = 1 & \hbox{in } B',\\[0.25pc]
\ \ \partialhi = 0 & \hbox{in }\mathbb R^N \setminus B.
\varepsilonnd{array}
\right.
$$
given for instance by Theorem 3.1 in \cite{FQ}, and the unique viscosity solution of
$$
\left\{
\betagin{array}{ll}
(-\Delta)_K^s v= \partialhi & \hbox{in } B,\\[0.35pc]
\ \ v = 0 & \hbox{in }\mathbb R^N \setminus B.
\varepsilonnd{array}
\right.
$$
given by the same theorem. By Lemma \ref{PFM} we have both $\partialhi>0$ and $v>0$ in $B$, so that
there exists $C_0>0$ such that $C_0 v \gammae \partialhi$ in $B'$. Hence by comparison
$C_0 v \gammae \partialhi$ in $\mathbb R^N$. In particular,
\betagin{equation}{\mathcal B}l{extra4_1}
\mbox{$(-\Delta)^s_K v \le C_0 v$ in $B$.}
\varepsilonnd{equation}
We claim that $\lambda_1\le C_0$. Indeed, if we assume $\lambda_1>C_0$, then
there exist $\lambda>C_0$ and a
positive function $u\in C(\mathbb R^N)$ vanishing outside $\Omega$ such that
\betagin{equation}{\mathcal B}l{extra4_2}
\mbox{$(-\Delta)^s_K u \gammae \lambda u$ in $\Omega$.}
\varepsilonnd{equation}
Since $u>0$ in $\overline{B}$, the number
$$
\omegaega =\sup _B \frac{v}{u}
$$
is finite. Moreover, $\omegaega u \gammae v$ in $\mathbb R^N$. Observe that, since we are assuming $\lambda>C_0$, by
\varepsilonqref{extra4_1} and \varepsilonqref{extra4_2} it follows that
$$
\left\{
\betagin{array}{ll}
(-\Delta)^s_K (\omegaega u-v)\gammae 0 & \hbox{in } B,\\[0.35pc]
\ \ \omegaega u-v > 0 & \hbox{in }\mathbb R^N \setminus B.
\varepsilonnd{array}
\right.
$$
Hence the strong maximum principle (Lemma \ref{PFM}) implies
$\omegaega u -v>0$ in $\overline{B}$. However this would imply $(\omegaega-\varepsilon) u >v$ in
$\overline{B}$ for small $\varepsilon$, contradicting the definition of $\omegaega$. Then $\lambda_1\le C_0$
and the lemma follows.
\varepsilonnd{proof}
Now we are in a position to prove Theorem \ref{th-grad}.
\betagin{proof}[Proof of Theorem \ref{th-grad}]
As already remarked, we will show that Theorem \ref{th-chang} is applicable to the operator
$T$ in $P\subset E$.
Let us check first hypothesis (a) in Theorem \ref{th-chang}. Assume we have $u=\mu T(u)$
for some $\mu \in [0,1)$ and $u\in P$. This is equivalent to
$$
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u = \mu (u^p + h(x,u,\nablabla u)) & \hbox{in }\Omega,\\[0.35pc]
\ \ u=0 & \hbox{in }\mathbb R^N \setminus \Omega.
\varepsilonnd{array}
\right.
$$
By our hypotheses on $h$ we get that the right hand side of the previous
equation can be bounded by
$$
\betagin{array}{rl}
\mu (u^p+h(x,u,\nablabla u)) \hspace{-2mm} & \le d^{\sigma p} \| u\| ^p + C_0( d^{\sigma r} \| u\|^r
+ d^{(\sigma-1)t} \| u\| ^t)\\[0.25pc]
& \le C d^{(\sigma-1) t} ( \| u\| ^p + \| u\|^r + \| u\| ^t).
\varepsilonnd{array}
$$
Therefore, by Lemmas \ref{lema-existencia} and \ref{lema-regularidad} and \varepsilonqref{sigma3}, we have $\| u \| \le C
( \| u\| ^p + \| u\|^r + \| u\| ^t)$. Since $p,r,t>1$, this implies that $\| u \| > \rho$ for some small
positive $\rho$. Thus there are no solutions of $u=\mu T(u)$ if $\| u \|=\rho$
and $\mu\in [0,1)$, and (a) follows.
To check (b), we take $\partialsi \in P$ to be the unique solution of the problem:
$$
\left\{
\betagin{array}{ll}
(-\Delta)_K^s \partialsi = 1 & \hbox{in }\Omega,\\[0.35pc]
\ \ \partialsi = 0 & \hbox{in }\mathbb R^N \setminus \Omega
\varepsilonnd{array}
\right.
$$
given by Theorem 3.1 in \cite{FQ}.
We claim that there are no solutions in $P$ of the equation $u-T(u)=t \partialsi$ if $t$ is large enough.
For that purpose we note that this equation is equivalent to
\betagin{equation}{\mathcal B}l{problema-t}
\left\{
\betagin{array}{ll}
(-\Delta)_K^s u = u^p + h(x,u,\nablabla u)+t & \hbox{in }\Omega,\\[0.35pc]
\ \ u=0 & \hbox{in }\mathbb R^N \setminus \Omega.
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
Fix $\mu> \lambda_1$, where $\lambda_1$ is given by \varepsilonqref{eigenvalue}. Using the nonnegativity of $h$,
and since $p>1$, there exists a positive constant $C$ such that $u^p + h(x,u,\nablabla u)+t \gammae \mu u - C +t$.
If $t\gammae C$, then $(-\Delta)^s_K u \gammae \mu u$ in $\Omega$, which is against the choice of $\mu$ and
the definition of $\lambda_1$. Therefore $t< C$, and \varepsilonqref{problema-t} does not admit
positive solutions in $E$ if $t$ is large enough.
Finally, since $h+t$ also verifies condition \varepsilonqref{crec-h-2} for $t\le C$,
we can apply Theorem \ref{cotas-grad} to obtain that the solutions of
\varepsilonqref{problema-t} are a priori bounded, that is, there exists $M > \rho$ such that
$\| u\| < M$ for every positive solution of \varepsilonqref{problema-t} with $t\gammae 0$.
Thus Theorem \ref{th-chang} is applicable with $U=B_M(0)\cap P$ and the existence
of a solution in $P$ follows. This solution is positive by Lemma \ref{PFM}. The proof
is concluded.
\varepsilonnd{proof}
\noindent {\bf Acknowledgements.} B. B. was partially supported by a postdoctoral fellowship
given by Fundaci\'on Ram\'on Areces (Spain) and MTM2013-40846-P, MINECO.
L. D. P. was partially supported by PICT2012 0153 from ANPCyT (Argentina).
J. G-M and A. Q. were partially supported by Ministerio de Ciencia e
Innovaci\'on under grant MTM2011-27998 (Spain) and Conicyt MEC number 80130002.
A. Q. was also partially supported by Fondecyt Grant
No. 1151180 Programa Basal, CMM. U. de Chile and Millennium Nucleus
Center for Analysis of PDE NC130017.
\betagin{thebibliography}{99}
\bibitem{AB98} {\sc G.Alberti, G.Bellettini}, {\varepsilonm A nonlocal anisotropic model for phase transitions.
I. The optimal profile problem}. Math. Ann. {\bf 310} (3) (1998), 527--560.
\bibitem{AI} {\sc N. Alibaud, C. Imbert},
{\varepsilonm Fractional semi--linear parabolic equations with unbounded data},
Trans. Amer. Math. Soc. {\bf 361} (2009), no. 5, 2527--2566.
\bibitem{A} {\sc D. Applebaum}, ``L\'evy Processes and Stochastic Calculus", 2nd ed, Cambridge
Studies in Advanced Mathematics {\bf 116}, Cambridge University Press, Cambridge, 2009.
\bibitem{BCI} {\sc G. Barles, E. Chasseigne, C. Imbert}, {\varepsilonm On the Dirichlet problem for second-order
elliptic integro-differential equations}, Indiana Univ. Math. J. {\bf 57} (2008), 213--146.
\bibitem{barrios2}{\sc B. Barrios, E. Colorado, R. Servadei, F. Soria}, {\varepsilonm A critical fractional equation with concave-convex nonlinearities}. To appear in Annales Henri Poincar\'e. DOI: 10.1016/j.anihpc.2014.04.003.
\bibitem{barrios4} {\sc B. Barrios, I. De Bonis, M. Medina, I. Peral}, {\varepsilonm Fractional Laplacian and a singular nonlinearity}
To appear in Open Mathematics.
\bibitem{barrios3}{\sc B. Barrios, M. Medina, I. Peral}, {\varepsilonm Some remarks on the solvability of non local elliptic problems with the Hardy potential.} To appear in Comm. Contemp. Math. DOI: 10.1142/S0219199713500466.
\bibitem{BNV} {\sc H. Berestycki, L. Nirenberg, S. Varadhan}, {\varepsilonm The principal eigenvalue and maximum
principle for second-order elliptic operators in general domains}. Comm. Pure Appl. Math. {\bf XLVII} (1994),
47--92.
\bibitem{Be} {\sc J. Bertoin}, ``L\'evy Processes", Cambridge Tracts in Mathematics, {\bf 121}.
Cambridge University Press, Cambridge, 1996.
\bibitem{BK2}{\sc K. Bogdan, T. Komorowski},
{\varepsilonm Estimates of heat kernel of fractional Laplacian perturbed by gradient operators},
Comm. Math. Phys. {\bf 271} (2007), no. 1, 179--198.
\bibitem{BoG} {\sc J. P. Bouchaud, A. Georges}, {\varepsilonm Anomalous diffusion in disordered media},
Statistical mechanics, models and physical applications, Physics reports {\bf 195} (1990).
\bibitem{BCPS} {\sc C. Br\"andle, E. Colorado, A. de Pablo, U. S\'anchez}, {\varepsilonm A concave-convex elliptic
problem involving the fractional Laplacian}. Proc. Roy. Soc. Edinburgh Sect. A {\bf 143} (2013), no. 1, 39--71.
\bibitem{CSM05} {\sc X. Cabr\'e, J. Sola-Morales,} {\varepsilonm Layer solutions in a half-space for boundary reactions.} Comm. Pure Appl. Math.
{\bf 58} (12) (2005), 1678-1732.
\bibitem{CT} {\sc X. Cabr\'e, J. Tan}, {\varepsilonm Positive solutions of nonlinear problems involving the square
root of the Laplacian}. Adv. Math. {\bf 224} (2010), 2052--2093.
\bibitem{Caf79} {\sc L. Caffarelli}, {\varepsilonm Further regularity for the Signorini problem}. Comm. Partial Differential Equations {\bf 4}
(9) (1979), 1067-1075.
\bibitem{CJS} {\sc L. Caffarelli, J. M. Roquejoffre, Y. Sire}, {\varepsilonm Variational problems with free boundaries
for the fractional Laplacian}. J. Eur. Math. Soc. {\bf 12} (2010), no. 5, 1151--1179.
\bibitem{CS3} {\sc L. Caffarelli, L. Silvestre}, {\varepsilonm An extension problem related to the fractional
Laplacian}, Comm. in Partial Differential Equations {\bf 32} (2007), 1245--1260.
\bibitem{CS} {\sc L. Caffarelli, L. Silvestre}, {\varepsilonm Regularity theory for fully nonlinear integro
\-differential equations}. Comm. Pure Appl. Math. {\bf 62} (2009), no. 5, 597--638.
\bibitem{CS2} {\sc L. Caffarelli, L. Silvestre}, {\varepsilonm Regularity results for nonlocal equations by
approximation}. Arch. Rat Mech. Anal. {\bf 200} (2011), 59--88.
\bibitem{CaV} {\sc L. Caffarelli, L. Vasseur}, {\varepsilonm Drift diffusion equations with fractional diffusion and
the quasi-geostrophic equation}, Ann. of Math. (2) {\bf 171} (2010), no. 3, 1903--1930.
\bibitem{Ch} {\sc K. C. Chang}, ``Methods of nonlinear analysis". Monographs in Mathematics.
Springer-Verlag New York, 2005.
\bibitem{CL} {\sc H. A. Chang-Lara}, {\varepsilonm Regularity for fully non linear equations with non local drift}.
Preprint available at http://arxiv.org/abs/1210.4242
\bibitem{CV} {\sc H. Chen, L. V\'eron}, {\varepsilonm Semilinear fractional elliptic equations with gradient
nonlinearity involving measures}. J. Funct. Anal. {\bf 266} (2014), 5467--5492.
\bibitem{CLC} {\sc W. Chen, C. Li, Y. Li}, {\varepsilonm A direct blowing-up and rescaling argument on the
fractional Laplacian equation}. Preprint available at http://arxiv.org/abs/1506.00582
\bibitem{CLO1} {\sc W. Chen, C. Li, B. Ou}, {\varepsilonm Classification of solutions for an integral
equation}, Comm. Pure Appl. Math {\bf 59} (2006), 330--343.
\bibitem{CZ} {\sc W. Chen, J. Zhu}, {\varepsilonm Indefinite fractional elliptic problem and Liouville
theorems}. Preprint available at http://arxiv.org/abs/1404.1640
\bibitem{C} {\sc W. Choi}, {\varepsilonm On strongly indefinite systems involving the fractional Laplacian}.
Nonlinear Anal. TMA {\bf 120} (2015), 127--153.
\bibitem{CoT} {\sc R. Cont, P. Tankov}, ``Financial Modelling with Jump Processes", Chapman \&
Hall/CRC Financial Mathematics Series, Boca Raton, Fl, 2004.
\bibitem{Co} {\sc P. Constantin}, {\varepsilonm Euler equations, Navier-Stokes equations and turbulence}, in
``Mathematical Foundation of Turbulent Viscous Flows", Vol. {\bf 1871} of Lecture Notes in Math.,
Springer, Berlin, 2006.
\bibitem{NPV} {\sc E. di Nezza, G. Palatucci, E. Valdinoci}, {\varepsilonm Hitchhiker's guide to the fractional
Sobolev spaces}, Bull. Sci. Math. {\bf 136} (2012), no. 5, 521--573.
\bibitem{dfv} {\sc S. Dipierro, A. Figalli, E. Valdinoci}, {\varepsilonm Strongly nonlocal dislocation dynamics in crystals}, Comm. Partial
Differential Equations {\bf 39} (2014), no. 12, 2351--2387.
\bibitem{FW} {\sc M. M. Fall, T. Weth}, {\varepsilonm Monotonicity and nonexistence results for some
fractional elliptic problems in the half space}. To appear in Comm. Contemp. Math. Available at
http://arxiv.org/abs/1309.7230
\bibitem{FQ2} {\sc P. Felmer, A. Quaas}, {\varepsilonm Fundamental solutions and Liouville type theorems for
nonlinear integral operators}. Adv. Math. {\bf 226} (2011), 2712--2738.
\bibitem{FQ} {\sc P. Felmer, A. Quass}, {\varepsilonm Boundary blow up solutions for fractional elliptic equations}.
Asymptot. Anal. {\bf 78} (2012), no. 3, 123--144.
\bibitem{GS} {\sc B. Gidas, J. Spruck}, {\varepsilonm A priori bounds for positive solutions of nonlinear
elliptic equations}. Comm. Partial Differential Equations {\bf 6} (1981), 883--901.
\bibitem{GT} {\sc D. Gilbarg, N. S. Trudinger}, ``Elliptic Partial Differential
Equations of Second Order". Springer-Verlag, 1983.
\bibitem{GJL} {\sc P. Graczyk, T. Jakubowski, T. Luks}, {\varepsilonm Martin representation and Relative Fatou Theorem
for fractional Laplacian with a gradient perturbation}, Positivity {\bf 17} (2013), no. 4, 1043--1070.
\bibitem{K} {\sc D. Kriventsov}, {\varepsilonm $C^{1,\alpha}$ interior regularity for nonlinear nonlocal elliptic
equations with rough kernels}. Comm. Partial Differential Equations {\bf 38} (2013), no. 12, 2081
--2106.
\bibitem{Landkof} { \sc N. Landkof}, ``Foundations of modern potential theory".
Die Grundlehren der mathematischen Wissenschaften, Band 180. Springer-Verlag,
New York-Heidelberg, 1972.
\bibitem{LL} {\sc E. Lindgren, P. Lindqvist}, {\varepsilonm Fractional eigenvalues}, Calc. Var. Partial Differential
Equations {\bf 49} (2014), no. 1-2, 795--826.
\bibitem{PQS} {\sc P. Pol\'a\v cik, P. Quittner, P. Souplet}, {\varepsilonm Singularity and decay estimates in superlinear
problems via Liouville-type theorems, I: Elliptic equations and systems}. Duke Math. J.
{\bf 139} (2007), 555--579.
\bibitem{QS} {\sc A. Quaas, A. Salort}, work in progress.
\bibitem{QX} {\sc A. Quaas, A. Xia}, {\varepsilonm Liouville type theorems for nonlinear elliptic equations and
systems involving fractional Laplacian in the half space}. Calc. Var. Part. Diff. Eqns. {\bf 52} (2015),
641--659.
\bibitem{RO} {\sc X. Ros-Oton}, {\varepsilonm Nonlocal elliptic equations in bounded domains: a survey}.
Preprint available at http://arxiv.org/abs/1504.04099
\bibitem{ROS} {\sc X. Ros-Oton, J. Serra}, {\varepsilonm The Dirichlet problem for the fractional Laplacian:
regularity up to the boundary}. J. Math. Pures Appl. {\bf 101} (2014), 275--302.
\bibitem{savin_vald}{\sc O. Savin, E. Valdinoci}, {\varepsilonm Elliptic PDEs with fibered nonlinearities.} J. Geom. Anal. {\bf 19} (2009), no 2, 420--432.
\bibitem{SV2} {\sc R. Servadei, E. Valdinoci}, {\varepsilonm Variational methods for non-local operators
of elliptic type}. Discrete Cont. Dyn. Syst. {\bf 33} (2013), 2105--2137.
\bibitem{SV} {\sc R. Servadei, E. Valdinoci}, {\varepsilonm On the spectrum of two different fractional
operators}. Proc. Roy. Soc. Edinburgh Sect. A {\bf 144} (2014), 831--855.
\bibitem{servadeivaldinociBN}{ \sc R. Servadei, E. Valdinoci}, {\varepsilonm The Brezis-Nirenberg result for the fractional Laplacian.} Trans. Amer. Math. Soc. {\bf 367} (2015), 67--102.
\bibitem{servadeivaldinociBNLOW}{ \sc R. Servadei, E. Valdinoci}, {\varepsilonm A Brezis-Nirenberg result for non-local critical equations in low dimension.} Commun. Pure Appl. Anal. {\bf 12} (6) (2013), 2445--2464.
\bibitem{signorini}{\sc A. Signorini}, {\varepsilonm Questioni di elasticit\'{a} non linearizzata e semilinearizzata}, Rendiconti di
Matematica e delle sue applicazioni {\bf 18} (1959), 95--139.
\bibitem{S} {\sc L. Silvestre}, {\varepsilonm Regularity of the obstacle problem for a fractional power of the Laplace
operator}. Comm. Pure Appl. Math. {\bf 60} (2007), no. 1, 67--112.
\bibitem{S2} {\sc L. Silvestre}, {\varepsilonm On the differentiability of the solution to an equation with drift and
fractional diffusion}. Indiana Univ. Math. J. {\bf 61} (2012), no. 2, 557--584.
\bibitem{SVZ} {\sc L. Silvestre, V. Vicol, A. Zlato\v s}, {\varepsilonm On the loss of continuity for super-critical
drift-diffusion equations}. Arch. Ration. Mech. Anal. {\bf 207} (2013), no. 3, 845--877.
\bibitem{SV08b}{\sc Y. Sire, E. Valdinoci}, {\varepsilonm Fractional Laplacian phase transitions and boundary reactions: a geometric
inequality and a symmetry result.} J. Funct. Anal. {\bf 256} (6) (2009), 1842--1864.
\bibitem{Stein} {\sc E. M. Stein}, ``Singular integrals and differentiability
properties of functions", Princeton Mathematical Series, No. 30 Princeton
University Press, Princeton, N.J. 1970.
\bibitem{TZ} {\sc V. Tarasov, G. Zaslasvky}, {\varepsilonm Fractional dynamics of systems with long-range
interaction}, Comm. Nonl. Sci. Numer. Simul. {\bf 11} (2006), 885--889.
\bibitem{toland}{\sc J. Toland}, {\varepsilonm The Peierls-Nabarro and Benjamin-Ono equations.} J. Funct. Anal. {\bf 145} (1) (1997), 136--150.
\bibitem{W} {\sc J. Wang}, {\varepsilonm Sub-Markovian $C_{0}$-semigroups generated by fractional Laplacian
with gradient perturbation}, Integral Equations Operator Theory {\bf 76} (2013), no. 2, 151--161.
\bibitem{ZCCY} {\sc R. Zhuo, W. Chen, X. Cui, Z. Yuan}, {\varepsilonm A Liouville theorem for the fractional Laplacian}.
Preprint available at http://arxiv.org/abs/1401.7402
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title{Boundary layer for a non-Newtonian flow \\ over a rough surface}
\author{David G\'erard-Varet
\thanks{Institut de Math\'{e}matiques de Jussieu et Universit\'{e} Paris 7, 175 rue du Chevaleret, 75013 Paris France
({\tt gerard-varet@math.jussieu.fr})}
, Aneta Wr\'oblewska-Kami\'nska
\thanks{ Institute of Mathematics, Polish Academy of Sciences, ul. \'Sniadeckich 8, 00-956 Warszawa, Poland
({\tt awrob@impan.pl})}
}
\maketitle
\section{Introduction}
The general concern of this paper is the effect of rough walls on fluids. This effect is important at various scales. For instance, in the area of microfluidics, recent experimental works have emphasized the role of hydrophobic rough walls in the improvement of slipping properties of microchannels. Also, in geophysics, as far as large scale motions are concerned, topography or shore variations can be assimilated to roughness.
For high Reynolds number flows, an important issue is to understand how localized roughness triggers instabilities, and transition to turbulence. For laminar flows, the point is rather to understand how distributed roughness may have a macroscopic impact on the dynamics. More precisely, the hope is to be able to encode an averaged effect through an effective boundary condition at a smoothened wall. Such boundary condition, called {\em a wall law}, will avoid to simulate the small-scale dynamics that takes place in a boundary layer in the vicinity of the rough surface.
The derivation of wall laws for laminar Newtonian flows has been much studied, since the pioneering works of Achdou, Pironneau and Valentin \cite{Achdou:1995, Achdou:1998}, or J\"ager and Mikeli\'c \cite{Mikelic2001,Jager:2003}. See also \cite{Luchini:1995,Amirat:2001a,GV2003,Bresch,Mikelic2013}. A natural mathematical approach of this problem is by homogenization techniques, the roughness being modeled by a small amplitude/high frequency oscillation. Typically, one considers a Navier-Stokes flow in a channel $\Omega^\eps$ with a rough bottom:
$$\Omega^\ep = \Omega \cup \Sigma_0 \cup R^\ep. $$
Precisely:
\begin{itemize}
\item $\Omega = (0,1)^2$ is the flat portion of the channel.
\item $R^\eps$ is the rough portion of the channel: it reads
$$ R^\eps = \{ x = (x_1,x_2), x_1 \in (0,1), 0 > x_2 > \eps \gamma(x_1/\eps) \}$$
with a bottom surface $\Gamma^\eps := \{x_2 = \eps \gamma(x_1/\eps) \}$ parametrized by $\eps \ll 1$. Function $\gamma = \gamma(y_1)$ is the {\em roughness pattern}.
\item Eventually, $\Sigma_0 := (0,1) \times \{0\}$ is the interface between the rough and flat part. It is the artificial boundary at which the wall law is set.
\end{itemize}
Of course, within such model, the goal is to understand the asymptotic behavior of the Navier-Stokes solution $u^\eps$ as $\eps \rightarrow 0$. Therefore, the starting point is a formal approximation of $u^\eps$ under the form
\begin{equation} \label{blexpansion}
u^\eps_{app}(x) = u^0(x) + \eps u^1(x) + \dots + u^0_{bl}(x/\eps) + \eps u^1_{bl}(x/\eps) + \dots .
\end{equation}
In this expansion, the $u^i = u^i(x)$ describe the large-scale part of the flow, whereas the $u^i_{bl} = u^i_{bl}(y)$ describe the boundary layer. The typical variable $y=x/\eps$ matches the small-scale variations induced by the roughness. In the case of homogeneous Dirichlet conditions at $\Gamma^\eps$, one can check formally that:
\begin{itemize}
\item $u^0$ is the solution of the Navier-Stokes equation in $\Omega$, with Dirichlet condition at $\Sigma_0$. \item $u^0_{bl} = 0$, whereas $u^1_{bl}$ satisfies a Stokes equation in variable $y$ in the boundary layer domain
$$\Omega_{bl} := \{y = (y_1, y_2), y_1 \in \R, y_2 > \gamma(y_1)\}.$$
\end{itemize}
The next step is to solve this boundary layer system, and show convergence of $u^1_{bl}$ as $y_2 \rightarrow +\infty$ to a constant field $u^\infty = (U^\infty, 0)$. This in turn determines the appropriate boundary condition for the large scale correction $u^1$. From there, considering the large scale part $u^0 + \eps u^1$, one can show that:
\begin{itemize}
\item The limit wall law is a homogeneous Dirichlet condition. Let us point out that this feature persists even starting from a microscopic pure slip condition, under some non-degeneracy of the roughness: \cite{Bucur:2008,Bucur:2012,BoDaGe}.
\item The $O(\eps)$ correction to this wall law is a slip condition of Navier type, with $O(\eps)$ slip length.
\end{itemize}
All these steps were completed in aforementioned articles, in the case of periodic roughness pattern $\gamma$ : $\gamma(y_1 + 1) = \gamma(y_1)$. Over the last years, the first author has extended this analysis to general patterns of roughness, with ergodicity properties (random stationary distribution of roughness, {\it etc}). We refer to \cite{BaGeVa2008, DGV:2008,GeVaMa2010}. See also \cite{DaGeVa2011} for some recent work on the same topic.
The purpose of the present paper is to extend the former analysis to non-Newtonian flows. This may have various sources of interest. One can think of engineering applications, for instance lubricants to which polymeric additives confer a shear thinning behavior. One can also think of glaciology: as the interaction of glaciers with the underlying rocks is unavailable, wall laws can help. From a mathematical point of view, such examples may be described by a power-law model. Hence, we consider a system of the following form:
\begin{equation} \label{EQ1}
\left\{
\begin{aligned}
-\dive S(Du) + \nabla p = e_1 & \quad \mbox{in} \: \Omega^\eps, \\
\dive u = 0 & \quad \mbox{in} \: \Omega^\eps, \\
u\vert_{\Gamma^\eps} = 0, \quad u\vert_{x_2 = 1} = 0&, \quad u \: \mbox{$1$-periodic in $x_1$}.
\end{aligned}
\right.
\end{equation}
As usual, $u = u(x) \in \R^2$ is the velocity field, $p = p(x) \in \R$ is the pressure. The source term $e_1$ at the right-hand side of the first equation corresponds to a constant pressure gradient $e_1 = (1,0)^t$ throughout the channel. Eventually, the left-hand side involves the stress tensor of the fluid. As mentioned above, it is taken of power-law type: $S : \R^{2\times 2}_{\rm sym} \to \R^{2\times 2}_{\rm sym}$ is given by
\begin{equation} \label{defstress}
S : \R^{2\times 2}_{\rm sym} \to \R^{2\times 2}_{\rm sym}, \quad S(A) = \nu |A|^{p-2}A, \quad \nu > 0, \quad 1 < p < +\infty ,
\end{equation}
where $|A| = (\sum_{i,j} a_{i,j}^2)^{1/2}$ is the usual euclidean norm of the matrix $A$. {\em For simplicity, we shall take $\nu = 1$}. Hence, $S(Du) = |Du|^{p-2} Du$, where we recall that $Du = \frac{1}{2} (\nabla u + (\nabla u)^t)$ is the symmetric part of the jacobian. Following classical terminology, the case $p < 2$ resp. $p > 2$ corresponds to {\em shear thinning} fluids, resp. {\em shear thickening} fluids. The limit case $p=2$ describes a Newtonian flow. Note that we complete the equation in system \eqref{EQ1} by a standard no-slip condition at the top and bottom boundary of the channel. For the sake of simplicity, we assume periodicity in the large scale horizontal variable $x_1$. Finally, we also make a simplifying periodicity assumption on the roughness pattern $\gamma$:
\begin{equation}
\mbox{$\gamma$ is $C^{2,\alpha}$ for some $\alpha >0$, has values in $(-1,0)$, and is $1$-periodic in $y_1$}.
\end{equation}
For every $\eps > 0$ and any value of $p$, the generalized Stokes system \eqref{EQ1} has a unique solution
$$ (u^\eps, p^\eps) \in W^{1,p}(\Omega^\ep) \times L^{p'}(\Omega^\ep)/\R .$$
The main point is to know about the asymptotic behavior of $u^\eps$, precisely to build some good approximate solution. With regards to the Newtonian case, we anticipate that this approximation will take a form close to \eqref{blexpansion}. Our plan is then:
\begin{itemize}
\item to derive the equations satisfied by the first terms of expansion \eqref{blexpansion}.
\item to solve these equations, and show convergence of the boundary layer term to a constant field away from the boundary.
\item to obtain error estimates for the difference between $u^\eps$ and $u^\eps_{app}$.
\item to derive from there appropriate wall laws.
\end{itemize}
This program will be more difficult to achieve for non-Newtonian fluids, in particular for the shear thinning case $p < 2$, notably as regards the study of the boundary layer equations on $u_{bl} \: := u^1_{bl}$. In short, these equations will be seen to read
$$ - \dive(S(A + D u_{bl})) + \nabla p = 0, \dive u = 0, \quad y \in \Omega_{bl} $$
for some explicit matrix $A$, together with periodicity condition in $y_1$ and a homogeneous Dirichlet condition at the bottom of $\Omega_{bl}$. Due to the nonlinearity of these equations and the fact that $A \neq 0$, the analysis will be much more difficult than in the Newtonian case, notably the proof of the so-called Saint-Venant estimates. We refer to section \ref{subsecstvenant} for all details.
Let us conclude this introduction by giving some references on related problems. In \cite{MarusicPaloka2000}; E. Maru\v{s}i\'c-Paloka considers power-law fluids with convective terms in infinite channels and pipes (the non-Newtonian analogue of the celebrated Leray's problem). After an appropriate change of unknown, the system studied in \cite{MarusicPaloka2000} bears some strong similarity to our boundary layer system. However, it is different at two levels : first, the analysis is restricted to the case $p > 2$. Second, our lateral periodicity condition in $y_1$ is replaced by a no-slip condition. This allows to use Poincar\'e's inequality in the transverse variable, and control zero order terms (in velocity $u$) by $\nabla u$, and then by $D u$ through the Korn inequality. It simplifies in this way the derivation of exponential convergence of the boundary layer solution (Saint-Venant estimates). The same simplification holds in the context of paper \cite{BoGiMa-Pa}, where the behaviour of a Carreau flow through a thin filter is analysed. The corrector describing the behaviour of the fluid near the filter is governed by a kind of boundary layer type system, in a slab that is infinite vertically in both directions. In this setting, one has $A = 0$, and the authors refer to \cite{MarusicPaloka2000} for well-posedness and qualitative behaviour. We also refer to the recent article \cite{Suarez-Grau_2015} dedicated to power-law fluids in thin domains, with Navier condition and anisotropic roughness (with a wavelength that is larger than the amplitude). In this setting, no boundary layer analysis is needed, and the author succeeds to describe the limit asymptotics by the unfolding method. Finally, we point out the very recent paper \cite{ChupinMartin2015}, where an Oldroyd fluid is considered in a rough channel. In this setting, no nonlinearity is associated to the boundary layer, which satisfies a Stokes problem.
\section{Boundary layer analysis}
From the Newtonian case, we expect the solution $(u^\eps, p^\eps)$ of \eqref{EQ1} to be approximated by
$$ u^\eps \approx u^0(x) + \eps u_{bl}(x/\eps), \quad p^\eps \approx p^0(x) + p_{bl}(x/\eps) ,$$
where
\begin{itemize}
\item $(u^0, p^0)$ describes the flow away from the boundary layer. We shall take $u^0 = 0$ and $p^0 = 0$ in the rough part $R^\eps$ of the channel.
\item $(u_{bl}, p_{bl}) = (u_{bl}, p_{bl})(y)$ is a boundary layer corrector defined on the slab
$$\Omega_{bl} := \{y = (y_1, y_2), y_1 \in \T, y_2 > \gamma(y_1)\},$$
where $\T$ is the torus $\R/\Z$. This torus corresponds implicitly to a periodic boundary condition in $y_1$, which is inherited from the periodicity of the roughness pattern $\gamma$.
We denote
$$ \Omega_{bl}^{\pm} \: := \: \Omega_{bl} \cap \{ \pm y_2 > 0 \} $$
its upper and lower parts, and
$$ \Gamma_{bl} := \{ y = (y_1, y_2), y_1 \in \T, y_2 = \gamma(y_1)\} $$
its bottom boundary. As the boundary layer corrector is supposed to be localized, we expect that
$$ \nabla u_{bl} \rightarrow 0 \quad \mbox{as $y_2 \rightarrow +\infty$}. $$
\end{itemize}
With this constraint in mind, we take $(u^0, p^0)$ to be the solution of
\begin{equation} \label{EQ2}
\left\{
\begin{aligned}
-\dive S(Du^0) + \nabla p^0 = e_1 & \quad \mbox{in} \: \Omega, \\
\dive u^0 = 0 & \quad \mbox{in} \: \Omega, \\
u^0\vert_{\Sigma_0} = 0, \quad u^0\vert_{x_2 = 1} = 0&, \quad u^0 \mbox{ $1$-periodic in $x_1$}.
\end{aligned}
\right.
\end{equation}
The solution is explicit and generalizes the Poiseuille flow. A simple calculation yields: for all $x \in \Omega$,
\begin{equation} \label{Poiseuille}
p^0(x) = 0, \quad u^0(x) = (U(x_2), 0), \quad U(x_2) = \frac{p-1}{p} \left(\sqrt{2}^{-\frac{p}{(p-1)}} - \sqrt{2}^{\frac{p}{(p-1)}} \left|x_2 - \frac{1}{2}\right|^{\frac{p}{p-1}} \right).
\end{equation}
We extend this solution to the whole rough channel by taking: $u^0 = 0, p^0 = 0$ in $R^\eps$.
This zero order approximation is clearly continuous across the interface $\Sigma_0$, but the associated stress is not: denoting
\begin{equation} \label{defA}
A \: := D(u^0)\vert_{y_2 = 0^+}\: = \frac{1}{2}\begin{pmatrix} 0 & U'(0) \\ U'(0) & 0 \end{pmatrix}, \quad \mbox{with $U'(0) = \sqrt{2}^{\frac{p-2}{p-1}}$}
\end{equation}
we obtain
$$ [ S(Du^0) n - p^0 n ]\vert_{\Sigma_0} = |A|^{p-2}A n = \left( \begin{smallmatrix} - \sqrt{2}^{-p} U'(0)^{p-1} \\ 0 \end{smallmatrix}\right) = \left( \begin{smallmatrix} - \frac{1}{2} \\ 0 \end{smallmatrix}\right) $$
with $n = - e_2 = -(0,1)^t$ and $[f] := f\vert_{x_2 = 0^+} - f\vert_{x_2 = 0^-}$.
This jump should be corrected by $u_{bl}$, so that the total approximation $u^0(x) + \eps u_{bl}(x/\eps)$ has no jump. This explains the amplitude $\eps$ of the boundary layer term, as its gradient will then be $O(1)$. By Taylor expansion $U(x_2) = U(\eps y_2) = U(0) + \eps U'(0) y_2 + \dots$ we get formally $D(u^0 + \eps u_{bl}(\cdot/\eps)) \approx A + D u_{bl}$, where the last symmetric gradient is with respect to variable $y$. We then derive the following boundary layer system:
\begin{equation} \label{BL1}
\left\{
\begin{aligned}
- \dive S(A + D u_{bl}) + \nabla p_{bl} & = 0 \quad \mbox{in} \: \Omega_{bl}^+, \\
- \dive S(D u_{bl}) + \nabla p_{bl} & = 0 \quad \mbox{in} \: \Omega_{bl}^-, \\
\dive u_{bl} & = 0 \quad \mbox{in} \: \Omega_{bl}^+ \cup \Omega_{bl}^-, \\
u_{bl}\vert_{\Gamma_{bl}} & = 0, \\
u_{bl}\vert_{y_2 = 0^+} - u_{bl}\vert_{y_2 = 0^-} & = 0,
\end{aligned}
\right.
\end{equation}
together with the jump condition
\begin{equation} \label{BL2}
\left( S( A + D u_{bl}) n - p_{bl} n \right) |_{y_2 = 0^+}
- \left(S(D u_{bl}) n - p_{bl} n\right) |_{y_2= 0^-} = 0, \quad n = (0,-1)^t.
\end{equation}
Let us recall that the periodic boundary condition in $y_1$ is encoded in the definition of the boundary layer domain. The rest of this section will be devoted to the well-posedness and qualitative properties of \eqref{BL1}-\eqref{BL2}. We shall give detailed proofs only for the more difficult case $p < 2$, and comment briefly on the case $p \ge 2$ at the end of the section. Our main results will be the following:
\begin{Theorem} {\bf (Well-posedness)} \label{thWP}
\noindent
For all $1 < p < 2$, \eqref{BL1}-\eqref{BL2} has a unique solution
$ (u_{bl},p_{bl}) \in W^{1,p}_{loc}(\Omega_{bl}) \times L^{p'}_{loc}(\Omega_{bl})/\R $
satisfying for any $M > |A|$:
$$D u_{bl} \, 1_{\{|D u_{bl}| \le M\}} \in L^2(\Omega_{bl}), \quad D u_{bl} \, 1_{\{|D u_{bl}| \ge M\}} \in L^p(\Omega_{bl}).$$
\noindent
For all $p \ge 2$, \eqref{BL1}-\eqref{BL2} has a unique solution
$ (u_{bl},p_{bl}) \in W^{1,p}_{loc}(\Omega_{bl}) \times L^{p'}_{loc}(\Omega_{bl})/\R $
s.t. $D u_{bl} \in L^p(\Omega_{bl}) \cap L^2(\Omega_{bl})$.
\end{Theorem}
\begin{Theorem} {\bf (Exponential convergence)} \label{thEC}
\noindent
For any $1 < p < +\infty$, the solution given by the previous theorem converges exponentially, in the sense that for some $C, \delta > 0$
$$ | u_{bl}(y) - u^\infty | \le C e^{-\delta y_2} \quad \forall \: y \in \Omega_{bl}^+,$$
where $u^\infty= (U^\infty, 0)$ is some constant horizontal vector field.
\end{Theorem}
\subsection{Well-posedness} \label{sectionWP}
\subsubsection*{A priori estimates}
We focus on the case $1 < p < 2$, and provide the {\it a priori} estimates on which the well-posedness is based. The easier case $p \ge 2$ is discussed at the end of the paragraph. As $A$ is a constant matrix, we have from \eqref{BL1}:
$$ - \dive S(A + D u_{bl}) + \dive S(A) + \nabla p_{bl} = 0 \quad \mbox{in} \: \Omega_{bl}^+, \quad - \dive S(D u_{bl}) + \nabla p_{bl} = 0 \quad \mbox{in} \: \Omega_{bl}^-. $$
We multiply the two equations by $D u_{bl}$ and integrate over $\Omega_{bl}^+$ and
$\Omega_{bl}^-$ respectively. After integrations by parts, accounting for the jump conditions at $y_2 = 0$, we get
\begin{equation} \label{variationalBL}
\int_{\Omega_{bl}^+} (S(A + Du_{bl}) - S(A)) : D u_{bl} \dy + \int_{\Omega_{bl}^-} S(D u_{bl}) : D u_{bl} \dy = -\int_{y_2 = 0} S(A)n \cdot u_{bl} {\rm\,d}S.
\end{equation}
The right-hand side is controlled using successively Poincar\'e and Korn inequalities (for the Korn inequality, see the appendix):
\begin{equation}
|\int_{y_2 = 0} S(A)n \cdot u_{bl} \dy | \le C \| u_{bl} \|_{L^p(\{ y_2 = 0\})} \le C' \| \nabla u_{bl} \|_{L^p(\Omega_{bl}^-)} \le C'' \| D u_{bl} \|_{L^p(\Omega_{bl}^-)} .
\end{equation}
As regards the left-hand side, we rely on the following vector inequality, established in \cite[p74, eq. (VII)]{Lind}: for all $1 < p \le 2$, for all vectors $a,b$
\begin{equation} \label{vectorinequality}
( |b|^{p-2}b - |a|^{p-2} a \: | \: b-a) \: \ge \: (p-1) |b-a|^2 \int_0^1 |a + t(b-a)|^{p-2} dt.
\end{equation}
In particular, for any $M > 0$, if $|b-a| \le M$, one has
\begin{equation} \label{ineq1}
( |b|^{p-2}b - |a|^{p-2} a \: | \: b-a) \: \ge \: \frac{p-1}{(|a| + M)^{2-p}} |b-a|^2 ,
\end{equation}
whereas if $|b-a| > M > |a|$, we get
\begin{equation} \label{ineq2}
( |b|^{p-2}b - |a|^{p-2} a \: | \: b-a) \:
\ge (p-1) |b-a|^2 \int_{\frac{|a|}{|b-a|}}^1 \left(2 t |b-a|\right)^{p-2} dt \ge 2^{p-3} \left(1 - (|a|/M)^{p-1}\right) |b-a|^p.
\end{equation}
We then apply such inequalities to \eqref{variationalBL}, taking $a = A$, $b = A + Du_{bl}$. For $M > |A|$, there exists $c$ dependent on $M$ such that
$$
\int_{\Omega_{bl}^+} (S(A + Du_{bl}) - S(A)) : D u_{bl} \dy \ge c \int_{\Omega_{bl}^+} \bbbone_{\{ |Du_{bl}| \le M \}} |Du_{bl}|^2 \dy \: + \: \int_{\Omega_{bl}^+} \bbbone_{\{ |Du_{bl}| > M \}} |D u_{bl}|^p \dy ,
$$
so that for some $C$ dependent on $M$
\begin{equation*}
\int_{\Omega_{bl}^+}| \bbbone_{\{ |Du_{bl}| \le M \}} Du_{bl}|^2 \dy \: + \: \int_{\Omega_{bl}^+} |\bbbone_{\{ |Du_{bl}| > M \}} D u_{bl}|^p \dy + \int_{\Omega_{bl}^-} | D u_{bl}|^p \dy \: \le \: C \, \| D u_{bl} \|_{L^p(\Omega_{bl}^-)} .
\end{equation*}
Hence, still for some $C$ dependent on $M$:
\begin{equation} \label{aprioriestimate}
\int_{\Omega_{bl}^+} 1_{\{ |Du_{bl}| \le M \}} |Du_{bl}|^2 \dy \: + \: \int_{\Omega_{bl}^+} 1_{\{ |Du_{bl}| > M \}} |D u_{bl}|^p \dy
+ \int_{\Omega_{bl}^-} | D u_{bl}|^p \dy \: \le \: C .
\end{equation}
This is the {\it a priori} estimate on which Theorem \ref{thWP} can be established (for $p \in ]1,2]$). Note that this inequality implies that for any height $h$,
$$ \| Du_{bl} \|_{L^p(\Omega_{bl} \cap \{y_2 \le h \})} \le C_h $$
(bounding the $L^p$ norm by the $L^2$ norm on a bounded set). Combining with Poincar\'e and Korn inequalities, we obtain that $u_{bl}$ belongs to $W^{1,p}_{loc}(\Omega_{bl})$.
In the case $p\geq 2$, we can directly use the following inequality, which holds for all $a,\ b \in \R^n$:
\begin{equation}\label{abp_1}
| a - b |^p 2^{2-p} \leq 2^{-1} \left( |b|^{p-2} + |a|^{p-2}\right) |b-a|^{2} \leq\left\langle |a|^{p-2} a - |b|^{p-2}|b|, a-b \right\rangle .
\end{equation}
It provides both an $L^2$ and $L^p$ control of the symmetric gradient of the solution. Indeed, taking $a = \tA + \tD_y \ub$, $b= \tA$ and using \eqref{variationalBL} we get the following a'priori estimates for $p\geq 2$
\begin{equation}\label{apesDup}
\begin{split}
& 2^{2-p} \int_{\Omega_{bl}^{+}} | \tD \ub |^p \dy + \int_{\Omega_{bl}^{-}} | \tD \ub |^p \dy +
2^{-1} |A|^{p-2} \int_{\Omega_{bl}^+} | \tD \ub |^2 \dy \: \\
& {\leq} \:
\int_{\Omega_{bl}^{+}} \left( S(A+ Du_{bl}) - S(A) \right) : D \ub \dy + \int_{\Omega_{bl}^{-}} S(D u_{bl} ) : D \ub \dy \\
& = \: - \int_{\Sigma_0} S(A) n \cdot u_{bl} {\rm \, d}S \\
& \leq \: c(\alpha) \| S(A) \|^{p'}_{L^{p'}(\Sigma_0)} + \alpha \| \ub \|^{p}_{L^p(\Sigma_0)}
\leq \: c(\alpha) \| S(A) \|^{p'}_{L^{p'}(\Sigma_0)}
+ \alpha C_\Gamma \| \nablabla \ub \|^{p}_{L^p(\Omega_{bl}^{-})}
\\ &
\leq c(\alpha) \| S(A) \|^{p'}_{L^{p'}(\Sigma_0)}
+ \alpha C_\Gamma C_K\| \tD \ub \|^{p}_{L^p(\Omega_{bl}^{-})} ,
\end{split}
\end{equation}
where the trace theorem, the Poincar\'e inequality and the Korn inequality were employed.
By choosing the coefficient $\alpha$ small enough, and by the imbedding of $L^p(\Omega_{bl}^-)$ in $L^2(\Omega_{bl}^-)$, \eqref{apesDup} provides
\begin{equation}\label{es:Dup}
\int_{\Omega_{bl}} |\tD \ub|^{p} + |\tD \ub|^{2} \dy \leq C \| S(A) \|^{p'}_{L^{p'}(\Sigma_0)} < \infty .
\end{equation}
Eventually, by Korn and Poincar\'e inequalities: $\ub \in W^{1,p}(\Omega_{bl})$ for $2\leq p < \infty$.
\subsubsection*{Construction scheme for the solution}
We briefly explain how to construct a solution satisfying the above estimates. We restrict to the most difficult case $p \in ]1,2]$.
There are two steps:
{\em Step 1}: we solve the same equations, but {\em in the bounded domain $\Omega_{bl,n} = \Omega_{bl} \cap \{ y_2 < n \}$}, with a Dirichlet boundary condition at the top. As
$\Omega_{bl,n}$ is bounded, the imbedding of $W^{1,p}$ in $L^p$ is compact, so that a solution $u_{bl,n}$ can be built in a standard way. Namely, one can construct a sequence of Galerkin approximations $u_{bl,n,m}$ by Schauder's fixed point theorem. Then, as the estimate \eqref{aprioriestimate} holds for $u_{bl,n,m}$ uniformly in $m$ and $n$, the sequence $D u_{bl,n,m}$ is bounded in $L^p(\Omega_{bl,n})$ uniformly in $m$. Sending $m$ to infinity yields a solution $u_{bl,n}$, the convergence of the nonlinear stress tensor follows from Minty's trick. Note that one can then perform on $u_{bl,n}$ the manipulations of the previous paragraph, so that it satisfies \eqref{aprioriestimate} uniformly in $n$.
{\em Step 2}: we let $n$ go to infinity. We first extend $u_{bl,n}$ by $0$ for $y_2 > n$, and fix $M > |A|$. From the uniform estimate \eqref{aprioriestimate}, we get easily the following convergences (up to a subsequence):
\begin{equation}
\begin{aligned}
& u_{bl,n} \rightarrow u_{bl} \mbox{ weakly in } W^{1,p}_{loc}(\Omega_{bl}), \\
& D u_{bl,n} \rightarrow D u_{bl} \mbox{ weakly in } L^p(\Omega^-_{bl}), \\
& D u_{bl,n} {\bbbone}_{|Du_{bl,n}| < M}\rightarrow V_1 \mbox{ weakly in } L^2(\Omega^+_{bl}), \quad \mbox{ weakly-* in } L^\infty(\Omega_{bl}^+), \\
& D u_{bl,n} {\bbbone}_{|D u_{bl,n}| \ge M}\rightarrow V_2 \mbox{ weakly in } L^p(\Omega^+_{bl}).
\end{aligned}
\end{equation}
Of course, $D u_{bl} = V_1 + V_2$ in $\Omega_{bl}^+$. A key point is that
$$\mbox{$S(A + D u_{bl,n}) - S(A)$ is bounded uniformly in $n$ in $(L^{p}(\Omega^+_{bl}))' = L^{p'}(\Omega^+_{bl})$ and in $\left(L^2(\Omega^+_{bl}) \cap L^\infty(\Omega_{bl}^+)\right)'$.}$$
and converges weakly-* to some $S^+$ in that space. To establish this uniform bound, we treat separately
$$ S_{n,1} \: := \: (S(A + D u_{bl,n}) - S(A)) {\bbbone}_{|Du_{bl,n}| < M}, \quad S_{n,2} \: := \: (S(A + D u_{bl,n}) - S(A)) {\bbbone}_{|Du_{bl,n}| \ge M}. $$
\begin{itemize}
\item For $S_{n,1}$, we use the inequality \eqref{ineq3}. It gives $|S_{n,1}| \le C |D u_{bl,n}| {\bbbone}_{|Du_{bl,n}| < M}$,
which provides a uniform bound in $L^{2} \cap L^\infty$, and so in particular in $L^{p'}$ and in $L^2$.
\item For $S_{n,2}$, we use first that $|S_{n,2}| \le C |D u_{bl,n}|^{p-1} {\bbbone}_{|D u_{bl,n}| \ge M}$, so that it is uniformly bounded in $L^{p'}$. We use then \eqref{ineq3}, so that
$ |S_{n,2}| \le C |D u_{bl,n}| {\bbbone}_{|D u_{bl,n}| \ge M}$, which yields a uniform bound in $L^p$, in particular in $(L^2 \cap L^\infty)'$ ($p \in ]1,2]$).
\end{itemize}
From there, standard manipulations give
$$ \int_{\Omega_{bl}^+} (S(A+ D u_{bl,n}) - S(A)) : D u_{bl,n} \rightarrow \int_{\Omega_{bl}^+} S^+ : (V_1 + V_2) = \int_{\Omega_{bl}^+} S^+ : D u_{bl} $$
One has even more directly
$$ \int_{\Omega_{bl}^-} S(D u_{bl,n}) : D u_{bl,n} \rightarrow \int_{\Omega_{bl}^-} S^- : D u_{bl} $$
and one concludes by Minty's trick that $S^+ = S(A+D u_{bl}) - S(A)$, $\: S^- = S(D u_{bl})$. It follows that $u_{bl}$ satisfies \eqref{BL1}-\eqref{BL2} in a weak sense. Finally, one can perform on $u_{bl}$ the manipulations of the previous paragraph, so that it satisfies \eqref{aprioriestimate}.
\subsubsection*{Uniqueness}
Let $u_{bl}^1$ and $u_{bl}^2$ be weak solutions of \eqref{BL1}-\eqref{BL2}, that is satisfying the variational formulation
\begin{equation} \label{VF}
\int_{\Omega_{bl}^+} S(A+ D u_{bl}^i) : D \varphi \: + \: \int_{\Omega_{bl}^-} S(D u_{bl}^i) : D \varphi = -\int_{y_2 = 0} S(A)n \cdot \varphi {\rm\,d}S, \quad i=1,2
\end{equation}
for all smooth divergence free fields $\varphi \in C^\infty_c(\Omega_{bl})$.
The point is then to replace $\varphi$ by $u_{bl}^1 - u_{bl}^2$, to obtain
\begin{equation} \label{zeroidentity}
\int_{\Omega_{bl}^+} \left( S(A+ D u_{bl}^1) - S(A + D u_{bl}^2) \right) : D (u_{bl}^1 - u_{bl}^2) \: + \: \int_{\Omega_{bl}^-} (S(D u_{bl}^1) - S(D u_{bl}^2) : D (u_{bl}^1 - u_{bl}^2) = 0 . \end{equation}
Rigorously, one constructs by convolution a sequence $\varphi_n$ such that $D \varphi_n$ converges appropriately to $D u_{bl}^1 - D u_{bl}^2$. In the case $p < 2$, the convergence holds in
$(L^2(\Omega_{bl}^+) \cap L^\infty(\Omega_{bl}^+)) + L^p(\Omega_{bl}^+)$, respectively in $L^p(\Omega_{bl}^-)$. One can pass to the limit as $n$ goes to infinity because
$$ S(A+D u_{bl}^1) - S(A+D u_{bl}^2) = \left( S(A+D u_{bl}^1) - S(A) \right) + \left( S(A) - S(A+D u_{bl}^2) \right), $$
respectively $S(D u_{bl}^1) - S(D u_{bl}^2)$, belongs to the dual space: see the arguments of the construction scheme of section \ref{sectionWP}.
Eventually, by strict convexity of $M \rightarrow |M|^p$ ($p > 1$), \eqref{zeroidentity} implies that $D u_{bl}^1 = D u_{bl}^{2}$. This implies that $u_{bl}^1 - u_{bl}^2$ is a constant (dimension is $2$), and due to the zero boundary condition at $\pa \Omega_{bl}$, we get $u_{bl}^1 = u_{bl}^2$.
\subsection{Saint-Venant estimate} \label{subsecstvenant}
We focus in this paragraph on the asymptotic behaviour of $u_{bl}$ as $y_2$ goes to infinity. The point is to show exponential convergence of $u_{bl}$ to a constant field. At first, we can use interior regularity results for the generalized Stokes equation in two dimensions. In particular, pondering on the results of \cite{Wolf} for $p < 2$, and \cite{Kaplicky2002} for $p \ge 2$, we have :
\begin{Lemma} \label{lemma_unifbound}
The solution built in Theorem \ref{thWP} satisfies: $u_{bl}$ has $C^{1,\alpha}$ regularity over $\Omega_{bl} \cap \{ y_2 > 1\}$ for some $0 < \alpha < 1$. In particular, $\nabla u_{bl}$ is bounded uniformly over $\Omega_{bl} \cap \{ y_2 > 1\}$.
\end{Lemma}
{\em Proof}. Let $0 \le t < s$. We define $\Omega_{bl}^{t,s} \: := \: \Omega_{bl} \cap \{t < y_2 \le s\}$. Note that $\Omega_{bl} \cap \{y_2 > 1 \} = \cup_{t \in \N_*} \Omega_{bl}^{t,t+1}$. Moreover, from the {\it a priori estimate} \eqref{aprioriestimate} or \eqref{es:Dup}, we deduce easily that
\begin{equation} \label{uniformLp}
\| D u_{bl} \|_{L^p(\Omega_{bl}^{t,t+2})} \le C
\end{equation}
for all $t > 0$, for a constant $C$ that does not depend on $t$. We then introduce:
\begin{equation*}
v_t \: := \: 2 A \left(0,y_2 - (t+\frac{1}{2})\right) + u_{bl} \: - \frac{1}{2} \: \int_{\Omega_{bl}^{t-\frac{1}{2},t+\frac{3}{2}}} u_{bl} \dy, \quad y \in \Omega_{bl}^{t-\frac{1}{2},t+\frac{3}{2}}, \quad \forall t \in \N_*.
\end{equation*}
From \eqref{BL1}:
$$ -\dive(S(D v_t)) + \nablabla p_{bl} = 0, \quad \dive v_t= 0 \quad \mbox{ in } \: \Omega_{bl}^{t-1/2,t+3/2}, \quad \forall t \in \N_*. $$
Moreover, we get for some $C$ independent of $t$:
\begin{equation} \label{uniformLpv}
\| v_t \|_{W^{1,p}(\Omega_{bl}^{t-1/2,t+3/2})} \le C \quad \forall t \in \N_*.
\end{equation}
Note that this $W^{1,p}$ control follows from \eqref{uniformLp}: indeed, one can apply the Poincar\'e inequality for functions with zero mean, and then the Korn inequality. One can then ponder on the interior regularity results of articles \cite{Wolf} and \cite{Kaplicky2002}, depending on the value of $p$: $v_{t}$ has $C^{1,\alpha}$ regularity over $\Omega_{bl}^{t,t+1}$ for some $\alpha \in (0,1)$ (independent of $t$): for some $C'$,
$$\| v_t \|_{C^{1,\alpha}(\Omega_{bl}^{t,t+1})} \le C', \quad \mbox{and in particular} \quad \| \nabla v_t \|_{L^\infty(\Omega_{bl}^{t,t+1})} \le C' \quad \forall t \in \N_*.$$
Going back to $u_{bl}$ concludes the proof of the lemma.
We are now ready to establish a keypoint in the proof of Theorem \ref{thEC}, called a
{\it Saint-Venant estimate}: namely, we show that the energy of the solution located above $y_2 = t$ decays exponentially with $t$. In our context, a good energy is
$$ E(t) \: := \: \int_{\{y_2 > t\}} | \nabla u_{bl} |^2 \dy $$
for $t > 1$. Indeed, from Lemma \ref{lemma_unifbound}, there exists $M$ such that $|D u_{bl}| \le M$ for all $y$ with $y_2 > 1$. In particular, in the case $p < 2$, when localized above $y_2 =1$, the energy functional that appears at the left hand-side of \eqref{aprioriestimate} only involves the $L^2$ norm of the symmetric gradient (or of the gradient by the homogeneous Korn inequality, {\it cf} the appendix). Hence, $\nabla u_{bl} \in L^2(\Omega_{bl} \cap \{ y_2 > 1 \})$. The same holds for $p \ge 2$, thanks to \eqref{es:Dup}.
\begin{Proposition} \label{expdecay}
There exists $C,\delta > 0$, such that $E(t) \le C \exp(-\delta t)$.
\end{Proposition}
{\em Proof}. Let $t > 1$, $\Omega_{bl}^t := \Omega_{bl} \cap \{ y_2 > t \}$. Let $M$ such that $|D u_{bl}|$ is bounded by $M$ over $\Omega_{bl}^1$, which exists due to Lemma~\ref{lemma_unifbound}. As explained just above, one has
$ \int_{\Omega_{bl}^1 } |D u_{bl}|^2 < +\infty$, and by Korn inequality $E(1)$ is finite. In particular, $E(t)$ goes to zero as $t \rightarrow +\infty$ and the point is to quantify the speed of convergence. By the use of inequality \eqref{ineq1} (with $a = A$, $b = A + D u_{bl}$), we find
\begin{equation} \label{boundE}
\begin{aligned}
E(t) \le C \int_{\Omega_{bl}^t} |D u_{bl}|^2 \dy & \le
C' \int_{\Omega_{bl}^t}
\left( |A + D u_{bl}|^{p-2}(A + D u_{bl}) - | A |^{p-2} A \right): D u_{bl} \dy
\\ &
\le C' \lim\limits_{n \to \infty } \int_{\Omega_{bl}}
\left( |A + D u_{bl}|^{p-2}( A + D u_{bl}) - |A |^{p-2} A \right): D u_{bl} \, \chi_n (y_2) \dy
\end{aligned}
\end{equation}
for a smooth $\chi_n$ with values in $[0,1]$ such that $\chi_n = 1$ over $[t,t+n$], $\chi_n=0$ outside $[t-1, t+n+1]$, and $|\chi'_n| \le 2$. Then, we integrate by parts the right-hand side, taking into account the first equation in \eqref{BL1}. We write
\begin{align}
\nonumber
& \int_{\Omega_{bl}}
\left( |A + D u_{bl}|^{p-2}( A + D u_{bl}) - |A |^{p-2} A \right): D u_{bl} \, \chi_n (y_2) \dy \\
\nonumber
= & - \int_{\Omega_{bl}} \nabla p_{bl} \cdot u_{bl} \chi_n(y_2) \dy - \int_{\Omega_{bl}} \left( (S(A + D u_{bl}) - S(A)) \left( \begin{smallmatrix} 0 \\ \chi'_n \end{smallmatrix} \right) \right) \cdot u_{bl} \dy \\
\label{I1I2}
= &\int_{\Omega_{bl}} \left(S(A) - S(A+D u_{bl})) \left( \begin{smallmatrix} 0 \\ \chi'_n \end{smallmatrix} \right) \right) \cdot u_{bl} \dy + \int_{\Omega_{bl}} p_{bl} \chi'_n u_{bl,2} \dy \: := \: I_1 + I_2.
\end{align}
To estimate $I_1$ and $I_2$, we shall make use of simple vector inequalities. Namely:
\begin{equation} \label{ineq3}
\mbox{for all $p \in ]1,2]$, for all vectors $a,b$, $a\neq 0$, }, \quad | |b|^{p-2} b - |a|^{p-2} a | \: \le \: C_{p,a} \, |b-a| ,
\end{equation}
whereas
\begin{equation} \label{ineq3duo}
\mbox{for all $p > 2$, for all vectors $a,b$, $|b| \le M$}, \quad | |b|^{p-2} b - |a|^{p-2} a | \: \le \: C_{p,a,M} \, |b-a| .
\end{equation}
The latter is a simple application of the finite increment inequality. As regards the former, we distinguish between two cases:
\begin{itemize}
\item If $|b-a| < \frac{|a|}{2}$, it follows from the finite increments inequality.
\item If $|b-a| \ge \frac{|a|}{2}$, we simply write
\begin{align*}
| |b|^{p-2} b - |a|^{p-2} a | \: & \le \: |b|^{p-1} + |a|^{p-1} \: \le \: (3^{p-1} + 2^{p-1}) |b-a|^{p-1} \le \: (3^{p-1} + 2^{p-1}) (\frac{|a|}{2})^{p-2} |b-a|
\end{align*}
using that $\left(\frac{2 |b-a|}{|a|}\right)^{p-1} \le \left(\frac{2 |b-a|}{|a|}\right)$ for $1 < p \le 2$.
\end{itemize}
We shall also make use of the following:
\begin{Lemma} \label{lem_averages} For any height $t > 0$
\begin{description}
\item[i)] $\int_{\{y_2 =t\}} u_{bl,2} = 0$.
\item[ii)] $\int_{\{y_2=t\}} (S(A+D u_{bl}) - S(A)) \left( \begin{smallmatrix} 0 \\ 1 \end{smallmatrix} \right) \cdot \left( \begin{smallmatrix} 1 \\ 0 \end{smallmatrix} \right) = 0$.
\end{description}
\end{Lemma}
{\em Proof of the lemma}.
\noindent
i) The integration of the divergence-free condition over $\Omega_{bl}^{0,t}$ leads to
\begin{align*}
0 = \int_{\Omega_{bl}^{0,t}} \dive u_{bl} & = \int_{\{y_2 = t \}} u_{bl,2} - \int_{\{y_2 = 0^+ \}}u_{bl,2} = \int_{\{y_2 = t \}} u_{bl,2} - \int_{\{y_2 = 0^- \}} u_{bl,2} \\
& = \int_{\{y_2 = t \}} u_{bl,2} - \int_{\Omega_{bl^-}} \dive u_{bl} + \int_{\Gamma_{bl}} u_{bl} \cdot n = \int_{\{y_2 = t \}} u_{bl,2} ,
\end{align*}
where the second and fourth inequalities come respectively from the no-jump condition of $u_{bl}$ at $y_2 = 0$ and the Dirichlet condition at $\Gamma_{bl}$.
\noindent
ii) By integration of the first equation in \eqref{BL1} over $\Omega_{bl}^{0,t}$ we get:
$$ \int_{y_2 = t} (S(A+D u_{bl}) - S(A) - p_{bl} Id) \left( \begin{smallmatrix} 0 \\ 1 \end{smallmatrix} \right) =
\int_{y_2 = 0^+} (S(A+D u_{bl}) - S(A) - p_{bl} Id) \left( \begin{smallmatrix} 0 \\ 1 \end{smallmatrix} \right). $$
In particular, the quantity
$$ I := \int_{y_2 = t} (S(A+D u_{bl}) - S(A) - p_{bl} Id) \left( \begin{smallmatrix} 0 \\ 1 \end{smallmatrix} \right) \cdot \left( \begin{smallmatrix} 1 \\ 0 \end{smallmatrix} \right) =
\int_{y_2 = t} ( S(A+D u_{bl}) - S(A)) \left( \begin{smallmatrix} 0 \\ 1 \end{smallmatrix} \right) \cdot \left( \begin{smallmatrix} 1 \\ 0 \end{smallmatrix} \right) $$
is independent of the variable $t$. To show that it is zero, we apply inequality \eqref{ineq3} or \eqref{ineq3duo} with $a = A$ and $b = A + D u_{bl}$, so that
$$I^2 \: \le \: C \left( \int_{\{y_2=t\}} |D u_{bl}| \right)^2 \: \le \: C' \int_{\{y_2=t\}} |D u_{bl}|^2 $$
($C$ is bounded by Lemma~\ref{lemma_unifbound}). As $D u_{bl}$ belongs to $L^2(\Omega^1_{bl})$, there exists a sequence $t_n$ such that
$ \int_{\{y_2=t_n\}} |D u_{bl}|^2 \rightarrow 0$ as $n \rightarrow +\infty$. It follows that $I = 0$.
This concludes the proof of the Lemma.
We can now come back to the treatment of $I_1$ and $I_2$.
\begin{itemize}
\item Treatment of $I_1$.
\end{itemize}
We note that $\chi'_n$ is supported in $[t-1,t] \cup [t+n,t+n+1]$. By Lemma \ref{lem_averages} we can write
\begin{align}\label{I1a}
I_1 & = \int_{\Omega_{bl}^{t-1,t}} \left(S(A) - S(A+D u_{bl})) \left( \begin{smallmatrix} 0 \\ \chi'_n \end{smallmatrix} \right) \right) \cdot (u_{bl} - \overline{c}) \\
& + \: \int_{\Omega_{bl}^{t+n,t+n+1}} \left(S(A) - S(A+D u_{bl})) \left( \begin{smallmatrix} 0 \\ \chi'_n \end{smallmatrix} \right) \right) \cdot (u_{bl} - \overline{c}_n) \: := I_{1,1} + I_{1,2} ,
\end{align}
where
\begin{equation}\label{mean}
\overline{c} := \Xint-_{\Omega_{bl}^{t-1,t}} u_{bl} =
\left( \Xint-_{\Omega_{bl}^{t-1,t}} u_{bl,1} , 0 \right) \quad \mbox{ and } \quad
\overline{c}_n := \Xint-_{\Omega_{bl}^{t+n,t+n+1}} u_{bl} =
\left( \Xint-_{\Omega_{bl}^{t+n,t+n+1}} u_{bl,1} , 0 \right).
\end{equation}
Again, we apply inequality \eqref{ineq3} or \eqref{ineq3duo} to find
$$ I_{1,1} \le C \int_{\Omega_{bl}^{t-1,t}} | D u_{bl} | \, |u_{bl} - \overline{c}| $$
and by the Poincar\'e inequality for functions with zero mean, we easily deduce that
$$ I_{1,1} \le C' \int_{\Omega_{bl}^{t-1,t}} | \nabla u_{bl} |^2 = C' \left( E(t-1) - E(t) \right). $$
An upper bound on $I_{1,2}$ can be derived in the same way:
$$ I_{1,2} \le C' (E(t+n) - E(t+n+1)) ,$$
where the right-hand side going to zero as $n \rightarrow +\infty$ since $E(t') \to 0$ as $t'\to \infty$. Eventually:
\begin{equation} \label{estimI1}
\limsup_{n \rightarrow +\infty} I_{1} \: \le \: C \left(E(t-1) - E(t) \right).
\end{equation}
\begin{itemize}
\item Treatment of $I_2$.
\end{itemize}
We can again use the decomposition
\begin{equation}\label{I2a}
I_2 = \int_{\Omega_{bl}^{t-1,t}} p_{bl} \chi'_n u_{bl,2} \: + \: \int_{\Omega_{bl}^{t+n,t+n+1}} p_{bl} \chi'_n u_{bl,2} \: := \: I_{2,1} + I_{2,2}.
\end{equation}
From Lemma \ref{lem_averages} i), we infer that
$$ \int_{\Omega_{bl}^{t-1,t}} \chi'_n(y_2) u_{bl,2}(y) \, \dy = 0. $$
By standard results, there exists $w \in H^1_0(\Omega_{bl}^{t-1,t})$ satisfying $ \dive w(y) = \chi'_n(y_2) u_{bl,2}(y), \quad y \in \Omega_{bl}^{t-1,t}$, and the estimate
$$\| w \|_{H^1(\Omega_{bl}^{t-1,t})} \le C \| \chi'_n(y_2) u_{bl,2}(y) \|_{L^2(\Omega_{bl}^{t-1,t})} \le C' \| u_{bl,2} \|_{L^2(\Omega_{bl}^{t-1,t})}, $$
for constants $C,C'$ that do not depend on $t$. As $w$ is zero at the boundary:
$$ I_{1,2} = \int_{\Omega_{bl}^{t-1,t}} p_{bl} \dive w = - \int_{\Omega_{bl}^{t-1,t}} \nabla p_{bl} \cdot w = \int_{\Omega_{bl}^{t-1,t}} (S(A+D u_{bl}) - S(A)) \cdot \nabla w $$
where the last equality comes from \eqref{BL1}. We find as before ({\it cf} \eqref{ineq3} or \eqref{ineq3duo}):
\begin{align*}
|I_{1,2}| & \le C \int_{\Omega_{bl}^{t-1,t}} | D u_{bl} | |\nabla w| \le C \| D u_{bl} \|_{L^2(\Omega_{bl}^{t-1,t})} \| \nabla w \|_{L^2(\Omega_{bl}^{t-1,t})} \\
& \le C' \| D u_{bl} \|_{L^2(\Omega_{bl}^{t-1,t})} \, \|u_{bl,2} \|_{L^2(\Omega_{bl}^{t-1,t})} \le C'' \| \nabla u_{bl} \|_{L^2(\Omega_{bl}^{t-1,t})}^2
\end{align*}
where we have controlled the $L^2$ norm of $u_{bl,2}$ by the $L^2$ norm of its gradient (we recall that $u_{bl,2}$ has zero mean). A similar treatment can be performed with $I_{2,2}$, so that $I_{2,1} \le C (E(t-1) - E(t))$, $\: I_{2,2} \le C ( E(t+n) - E(t+n+1))$ and
\begin{equation} \label{estimI2}
\limsup_{n \rightarrow +\infty} I_{2} \: \le \: C \left(E(t-1) - E(t) \right).
\end{equation}
Finally, combining \eqref{boundE}, \eqref{I1I2}, \eqref{estimI1} and \eqref{estimI2}, we get
$$ E(t) \le C (E(t-1) - E(t)) $$
for some $C > 0$. It is well-known that this kind of differential inequality implies the exponential decay of Proposition \ref{expdecay} (see the appendix). The proof of the Proposition is therefore complete. We have now all the ingredients to show Theorem \ref{thEC}.
{\em Proof of Theorem \ref{thEC}}.
Thanks to the regularity Lemma \ref{lemma_unifbound}, we know that $\nabla u_{bl}$ is uniformly bounded over $\Omega_{bl}^1$, and belongs to $L^2(\Omega_{bl}^1)$. Of course, this implies that
$\nabla u_{bl}$ belongs to $L^q(\Omega_{bl}^1)$ for all $q \in [2,+\infty]$. More precisely, combining the $L^\infty$ bound with the $L^2$ exponential decay of Proposition \ref{expdecay}, we have that
\begin{equation} \label{expdecayLq}
\| \nabla u_{bl} \|_{L^q(\Omega_{bl}^t)} \le C \exp(-\delta t)
\end{equation}
(for some $C$ and $\delta$ depending on $q$). This exponential decay extends straightforwardly to all $1 \le q < +\infty$. Let us now fix $q > 2$. To understand the behavior of $u$ itself, we write the Sobolev inequality: for all $y$ and $y' \in B(y,r)$,
\begin{equation} \label{Sobolevineq}
|u(y') - u(y)| \: \le \: C r^{1-\frac{2}{q}} \left( \int_{B(y,2r)} |\nabla u(z)|^q dz \right)^{1/q}.
\end{equation}
We deduce from there that: for all $y_2 > 2$, for all $s \ge 0$,
\begin{align*}
& |u_{bl}(y_1,y_2+s) - u_{bl}(y_1,y_2)| \\
& \le |u_{bl}(y_1,y_2+s) - u_{bl}(y_1,y_2+ \lfloor s \rfloor) | + \sum_{k=0}^{\lfloor s \rfloor-1} |u_{bl}(y_1,y_2+k+1) - u_{bl}(y_1,y_2+k)| \\
& \le C \left( \| \nabla u_{bl} \|_{L^q(B((y_1,y_2+s)^t,1))} + \sum_{k=0}^{\lfloor s \rfloor-1} \| \nabla u_{bl} \|_{L^q(B((y_1,y_2+k)^t,1))} \right) \\
& \le C' \left( e^{-\delta(y_2+s)} + \sum_{k=0}^{\lfloor s \rfloor-1} e^{-\delta (y_2 + k)} \right)
\end{align*}
where the last inequality comes from \eqref{Sobolevineq}. This implies that $u_{bl}$ satisfies the Cauchy criterion uniformly in $y_1$, and thus converges uniformly in $y_1$ to some $u^\infty = u^\infty(y_1)$ as $y_2 \rightarrow +\infty$. To show that $u^\infty$ is a constant field, we rely again on \eqref{Sobolevineq}, which yields for all $|y_1 - y'_1| \le 1$:
$$ |u_{bl}(y_1,y_2) - u_{bl}(y'_1, y_2) | \le C |y_1 - y'_1|^{1 - \frac{2}{q}} \| \nabla u_{bl} \|_{L^q(B((y_1,y_2)^t,1))} \le C' e^{-\delta y_2}. $$
Sending $y_2$ to infinity gives: $u^\infty(y_1) = u^\infty(y'_1)$. Finally, the fact that $u^\infty$ is a horizontal vector field follows from Lemma \ref{lem_averages}, point i). This concludes the proof of the Theorem~\ref{thEC}.
Eventually, for later purposes, we state
\begin{Corollary} {\bf (higher order exponential decay)} \label{higherorder}
\begin{itemize}
\item There exists $\alpha \in (0,1)$, such that for all $s \in [0,\alpha)$, for all $1 \leq q < +\infty$, one can find $C$ and $\delta > 0$ with
$$ \| u_{bl} - u^\infty \|_{W^{s+1,q}(\Omega_{bl}^t)} \le C \exp(-\delta t), \quad \forall t \ge 1. $$
\item There exists $\alpha \in (0,1)$, such that for all $s \in [0,\alpha)$, for all $1 \leq q < +\infty$, one can find $C$ and $\delta > 0$ with
$$ \| p_{bl} - p^t \|_{W^{s,q}(\Omega_{bl}^{t,t+1})} \: \le \: C \exp(-\delta t), \quad \forall t \ge 1, \quad \mbox{for some constant $p^t$}.$$
\end{itemize}
\end{Corollary}
{\em Proof of the corollary}. It was established above that
$$ |u(y_1,y_2+s) - u(y_1,y_2)| \le C' \left( e^{-\delta'(y_2+s)} + \sum_{k=0}^{\lfloor s \rfloor -1} e^{-\delta' (y_2 + k)}\right) . $$
for some $C'$ and $\delta' > 0$. From there, after sending $s$ to infinity, it is easily deduced that
$$ \| u_{bl} - u^\infty \|_{L^q(\Omega_{bl}^t)} \le C \exp(-\delta t) .$$
It then remains to control the $W^{s,q}$ norm of $\nabla u_{bl}$. This control comes from the $C^{0,\alpha}$ uniform bound on $\nabla u_{bl}$ over $\Omega_{bl}^1$, see Lemma \ref{lemma_unifbound}. By Sobolev imbedding, it follows that
$$ \| \nabla u_{bl} \|_{W^{s,q}(\Omega_{bl}^{t,t+1})} \le C, \quad \forall s \in [0,\alpha), \forall 1\leq q < +\infty $$
uniformly in $t$. Interpolating this bound with the bound
$\| \nabla u_{bl} \|_{L^q(\Omega_{bl}^{t,t+1})} \le C' \exp(-\delta' t)$ previously seen, we get
$$ \| \nabla u_{bl} \|_{W^{s,q}(\Omega_{bl}^{t,t+1})} \le C'' \exp(-\delta'' t), \quad \forall s \in [0,\alpha), \forall 1\leq q < +\infty .$$
The first inequality of the Lemma follows.
The second inequality, on the pressure $p_{bl}$, is derived from the one on $u_{bl}$. This derivation is somehow standard, and we do not detail it for the sake of brevity.
\section{Error estimates, wall Laws}
\subsection{Approximation by the Poiseuille flow.}
We now go back to our primitive system \eqref{EQ1}. A standard estimate on $u^\eps$ leads to
$$ \int_{\Omega^\eps} |D u^\eps|^p \: \le \: \int_{\Omega^\eps} e_1 \cdot u^\eps. $$
The Korn inequality implies that
$$ \int_{\Omega^\eps} |\nabla u^\eps|^p \: \le \: C \int_{\Omega^\eps} |D u^\eps|^p $$
for a constant $C$ independent of $\eps$: indeed, one can extend $u^\eps$ by $0$ for $x_2 < \eps \gamma(x_1/\eps)$ and apply the inequality on the square $\T \times [-1,1]$, {\it cf} the appendix.
Also, by the Poincar\'e inequality:
$$ | \int_{\Omega^\eps} e_1 \cdot u^\eps | \le C \| u^\eps \|_{L^p(\Omega^\eps)} \le C' \| \nabla u^\eps \|_{L^p(\Omega^\eps)}. $$
We find that
\begin{equation} \label{basic_estimate}
\| u^\eps \|_{W^{1,p}(\Omega^\eps)} \le C.
\end{equation} In particular, it provides strong convergence of $u^\eps$ in $L^p$ by the Rellich theorem (up to a subsequence). As can be easily guessed, the limit of $u^0$ in $\Omega$ is the generalized Poiseuille flow $u^0$. One can even obtain an error estimate by a direct energy estimate of the difference (extending $u^0$ and $p^0$ by zero in $R^\eps$). We focus on the case $1 < p \le 2$, and comment briefly the easier case $p \ge 2$ afterwards. We write $\ue = \uz + \we$ and $p^\ep = p^0 + q^\ep$. We find, taking into account \eqref{EQ2}:
\begin{equation}\label{EQ4}
\begin{split}
- \Div \bS(\tD \uz + \tD \we) + \Div \bS(\tD \uz) + \nablabla q^\ep & = {\bbbone}_{R^\eps} e_1 \quad \mbox{ in } \Omega^\ep \setminus \Sigma_0 ,\\
\Div \we & = 0 \quad \mbox{ in } \Omega^\ep , \\
\we & = 0 \quad \mbox{ on } \Gamma^\ep \cup \Sigma_1 , \\
\we & \mbox{ is periodic in } x_1 \mbox{ with period } 1 , \\
[\we]|_{\Sigma_0} = 0, \quad [\tS ( \tD \uz + \tD \we ) {n} - \tS ( \tD \uz ) {n} - q^\ep n ]|_{\Sigma_0} & = -\tS ( \tD \uz) {n}|_{x_2=0^+} .
\end{split}
\end{equation}
In particular, performing an energy estimate and distinguishing between $\Omega$ and $R^\eps$, we find
\begin{equation}\label{weakEQ4}
\int_{\Omega} \left( \tS( \tD \uz + \tD \we) - \tS( \tD \uz) \right) : \tD \we + \int_{R^\ep} \tS( \tD \we ) : \tD \we
= - \int_{\Sigma_0} \tS (\tD \uz ){n}\vert_{x_2 = 0^+} \cdot \we {\rm d}S + \int_{R^\eps} e_1 \cdot \we
\end{equation}
Relying on inequalities \eqref{ineq1}-\eqref{ineq2}, we get for any $M > \| Du^0 \|_{L^\infty}$:
\begin{multline}\label{zeroap1}
\| D \we \|_{L^p(\Omega \cap \{ |D \we| \ge M\})}^{p} + \| D \we \|_{L^2(\Omega \cap \{ |D \we| \le M\})}^{2} + \| D \we \|_{L^p(R^\eps)}^{p} \\
\le C \left( \left| \int_{\Sigma_0} \bS(\tD \uz) {n} \cdot \we {\rm\,d}S \right| + \left| \int_{R^\eps} e_1 \cdot \we \right| \right)
\end{multline}
Then by the H\"older inequality and by Proposition~\ref{rescaledTracePoincare} in the appendix, we have that
\begin{equation}\label{es1}
| \int_{R^\ep} e_1 \cdot \we | \leq \eps^{\frac{p-1}{p}} \| \we \|_{L^p(R^\eps)}
\leq C \eps^{1 + \frac{p-1}{p}} \| \nablabla \we \|_{L^p(R^\ep)} .
\end{equation}
Next, since $D \uz$ is given explicitly and uniformly bounded, the Proposition~\ref{rescaledTracePoincare} provides
\begin{equation}\label{es2}
\begin{split}
& | \int_{\Sigma_0} ( \tS(\tD \uz) {n}\vert_{x_2 = 0^+} \cdot \we {\rm\,d}S |
\leq C \| \we \|_{L^{p}(\Sigma_0)}
\leq C' \ep^{\frac{p-1}{p}} \| \nablabla \we \|_{L^{p}(R^\ep)} .
\end{split}
\end{equation}
Note that, as $\we$ is zero at the lower boundary of the channel, we can extend it by $0$ below $R^\eps$ and apply Korn inequalities in a strip (see the appendix). We find
$$ \| \nablabla \we \|_{L^p(R^\ep)} \le C \| D \we \|_{L^p(R^\ep)} $$
for some constant $C > 0$ independent of $\eps$. Summarising, we get
\begin{equation*}
\| D \we \|_{L^p(\Omega \cap \{ |D \we| \ge M\})}^{p} + \| D \we \|_{L^2(\Omega \cap \{ |D \we| \le M\})}^{2} + \| D \we \|_{L^p(R^\eps)}^{p}
\le C \ep^{\frac{p-1}{p}} \| D \we \|_{L^p(R^\ep)}
\end{equation*}
and consequently
\begin{equation}\label{zeroap2}
\| D \we \|_{L^p(\Omega \cap \{ |D \we| \ge M\})}^{p} + \| D \we \|_{L^2(\Omega \cap \{ |D \we| \le M\})}^{2} + \| D \we \|_{L^p(R^\eps)}^{p}
\le C \ep
\end{equation}
In the case $p \ge 2$ one needs to use \eqref{abp_1} instead of \eqref{ineq1}-\eqref{ineq2} what yields
\begin{equation} \label{zeroap3}
\| D \we \|_{L^p(\Omega^\eps)} \le C \eps^{\frac{1}{p}}, \quad p \in [2, \infty).
\end{equation}
\subsection{Construction of a refined approximation}
The aim of this section is to design a better approximation of the exact solution $u^\eps$ of \eqref{EQ1}. This approximation will of course involve the boundary layer profile $u_{bl}$ studied in the previous section. Consequences of this approximation in terms of wall laws will be discussed in paragraph \ref{parag_wall_laws}.
From the previous paragraph, we know that the Poiseuille flow $u^0$ is the limit of $u^\eps$ in $W^{1,p}(\Omega)$.
However, the extension of $u^0$ by $0$ in the rough part of the channel was responsible for a jump of the stress tensor at $\Sigma_0$. This jump was the main limitation of the error estimates \eqref{zeroap2}-\eqref{zeroap3}, and the reason for the introduction of the boundary layer term $u_{bl}$. Hence, we hope to have a better approximation replacing $u^0$ by $u^0 + \eps u_{bl}(\cdot/\eps)$. Actually, one can still improve the approximation, accounting for the so-called boundary layer tail $u^\infty$. More precisely, {\em in the Newtonian case}, a good idea is to replace $u^0$ by the solution $u^{0,\eps}$ of the Couette problem:
$$ -\Delta u^{0,\eps} + \nabla p^{0,\eps} = 0, \quad \dive u^{0,\eps} = 0, \quad u^{0,\eps}\vert_{\Sigma_0} = \eps u^\infty, \quad u^{0,\eps}\vert_{x_2 = 1} = 0. $$
One then defines:
$$ u^{\eps} = u^{0,\eps} + \eps (u_{bl}(\cdot/\eps) - u_\infty) + r^\eps \: \mbox{ in $\Omega$}, \quad u^\eps = \eps u_{bl}(\cdot/\eps) \: \mbox{ in $R^\eps$},$$
where $r^\eps$ is a small divergence-free remainder correcting the $O(\exp(-\delta/\eps))$ trace of $u_{bl} - u^\infty$ at $\{x_2 = 1 \}$.
However, for technical reasons, the above approximation is not so successful in our context, so that we need to modify it a little. We proceed as follows. Let $N$ a large constant to be fixed later. We introduce:
$$ \Omega^{\eps}_N := \Omega^\eps \cap \{x_2 > N \eps |\ln \eps| \}, \quad \Omega^{\eps}_{0,N} = \Omega^\eps \cap \{ 0 < x_2 < N \eps |\ln \eps| \}, \quad \mbox{and} \quad \Sigma_N = \Pi \times \{ x_2 = N \eps |\ln \eps| \}. $$
First, we introduce the solution $u^{0,\eps}$ of
\begin{equation} \label{u0eps}
\left\{
\begin{aligned}
- \dive S(D u^{0,\eps}) + \nabla p^{0,\eps} & = e_1, \quad x \in \Omega^\eps_N, \\
\dive u^{0,\eps} & = 0, \quad x \in \Omega^\eps_N, \\
u^{0,\eps}\vert_{\Sigma_N} & = \left( x \rightarrow \left( \begin{smallmatrix} U'(0) x_2 \\ 0 \end{smallmatrix} \right) + \eps u^\infty \right)\vert_{\Sigma_N}, \\
u^{0,\eps}\vert_{\{ x_2 = 1 \}} & = 0.
\end{aligned}
\right.
\end{equation}
As for the generalized Poiseuille flow, the pressure $p^{0,\eps}$ is zero, and one has an explicit expression for $u^{0,\eps} = (U^\eps(x_2),0)$. In particular, one can check that
\begin{equation} \label{explicit1}
U^\eps(x_2) = \beta(\eps) - \frac{(\sqrt{2})^{p'}}{p'} \left| \frac{1}{2} + \alpha(\eps) -x_2 \right|^{p'},
\end{equation}
where $\alpha(\eps)$ satisfies the equation ($x_{2,N} := N \eps |\ln \eps|$)
\begin{equation} \label{explicit2}
-\frac{1}{p'}(\sqrt{2})^{p'} \left( \left| \frac{1}{2} + \alpha(\eps) - x_{2,N} \right|^{p'} - \left| \frac{1}{2} - \alpha(\eps) \right|^{p'} \right) = U'(0) x_{2,N} + \eps U^\infty
\end{equation}
and
$$ \beta(\eps) = \frac{(\sqrt{2})^{p'}}{p'} \left| \frac{1}{2} - \alpha(\eps)\right|^{p'} . $$
By the Taylor expansion, we find that
\begin{equation} \label{explicit3}
\alpha(\eps) = - \sqrt{2}^{p'-4} \eps U^\infty + O(\eps^2 |\ln \eps|^2).
\end{equation}
This will be used later.
Then, we consider the Bogovski problem
\begin{equation} \label{Bogov}
\left\{
\begin{aligned}
\dive r^\eps & = 0 \quad \mbox{in} \: \Omega^\eps_{N}, \\
r^\eps\vert_{\Sigma_N} & = \eps (u_{bl}(\cdot/\eps) - u^\infty)\vert_{\Sigma_N}, \\
r^\eps\vert_{\{x_2 = 1\}} & = 0.
\end{aligned}
\right.
\end{equation}
Since $u^\infty = (U^\infty,0)$, note that
$$ \int_{\Sigma_N} \eps (u_{bl}(\cdot/\eps) - u^\infty) \cdot e_2 = \int_{\Omega^\eps_N \cup \overline{R^\eps}} {\rm div}_y u_{bl}(\cdot/\eps) = 0. $$
Hence, the compatibility condition for solvability of \eqref{Bogov} is fulfilled: there exists a solution
$r^\eps$ satisfying
$$ \| r^\eps \|_{W^{1,p}(\Omega^\eps_N)} \le C \eps \| u_{bl}(\cdot/\eps) - u^\infty \|_{W^{1-\frac{1}{p},p}(\Sigma_N)}. $$
Using the first estimate of Corollary \ref{higherorder}, we find
\begin{equation} \label{estimreps}
\| r^\eps \|_{W^{1,p}(\Omega^\eps_N)} \le C \eps^{\frac{1}{p}} \exp(-\delta N |\ln \eps|).
\end{equation}
Finally, we define the approximation $(u^\eps_{app}, p^\eps_{app})$ by the formula
\begin{equation} \label{uepsapp}
u^\eps_{app}(x) = \left\{
\begin{aligned}
& u^{0,\eps}(x) + r^\eps(x) \quad x \in \Omega^\eps_N ,\\
& \left( \begin{smallmatrix} U'(0)x_2 \\ 0 \end{smallmatrix} \right) + \eps u_{bl}(x/\eps), \quad x \in \Omega^\eps_{0,N}, \\
& \eps u_{bl}(x/\eps), \quad x \in R^\eps ,
\end{aligned}
\right.
\end{equation}
whereas
\begin{equation}
p^\eps_{app}(x) = \left\{
\begin{aligned}
& 0 \quad x \in \Omega^\eps_N ,\\
& p_{bl}(x/\eps) \quad \quad x \in \Omega^\eps_{0,N} \cup R^\eps.
\end{aligned}
\right.
\end{equation}
With such a choice:
$$u^\eps_{app}\vert_{\partial \Omega^\eps} = 0, \quad \dive u^\eps_{app} = 0 \quad \mbox{over $\Omega^\eps_N \cup \Omega^\eps_{0,N} \cup R^\eps$}.$$
Moreover, $u^\eps_{app}$ has zero jump at the interfaces $\Sigma_0$ and $\Sigma_N$:
$$ [u^\eps_{app}]\vert_{\Sigma_0} = 0, \quad [u^\eps_{app}]\vert_{\Sigma_N} = 0. $$
Still, the stress tensor has a jump. More precisely, we find
\begin{equation} \label{stressjump}
\begin{aligned}
\left[S(D u^\eps_{app})n - p^\eps_{app}n\right]\vert_{\Sigma_0} & = 0, \\
\left[S(D u^\eps_{app})n - p^\eps_{app}n\right]\vert_{\Sigma_N} & = \left( S(D u^0_\eps + D r^\eps)\vert_{\{x_2 = (N \eps |\ln \eps|)^+\}} - S(A + Du_{bl}(\cdot/\eps))\vert_{\{x_2 = (N \eps |\ln \eps|)^-\}}\right) e_2 \\
& - p_{bl}(\cdot/\eps) \vert_{\{x_2 = (N \eps |\ln \eps|)^-\}} e_2 .
\end{aligned}
\end{equation}
The next step is to obtain error estimates on $u^\eps - u^\eps_{app}$.
\subsection{Error estimates}
We prove here:
\begin{Theorem} {\bf (Error estimates)} \label{thmerror}
\begin{itemize}
\item For $1 < p \le 2$, there exists $C$ such that
$$ \| u^\eps - u^\eps_{app} \|_{W^{1,p}(\Omega^\eps)} \le C (\eps |\ln \eps|)^{1+\frac{1}{p'}} .$$
\item For $p \ge 2$, there exists $C$ such that
$$ \| u^\eps - u^\eps_{app} \|_{W^{1,p}(\Omega^\eps)} \le C (\eps |\ln \eps|)^{\frac{1}{p-1}+\frac{1}{p}} . $$
\end{itemize}
\end{Theorem}
\begin{Remark}
A more careful treatment would allow to get rid of the $\ln$ factor in the last estimate ($p \ge 2$). We do not detail this point here, as we prefer to provide a unified treatment. Also, we recall that the shear thinning case ($1 < p \le 2$) has a much broader range of applications. More comments will be made on the estimates in the last paragraph \ref{parag_wall_laws}.
\end{Remark}
{\em Proof of the theorem.} We write $v^\eps = u^\eps - u^\eps_{app}$, $q^\eps = p^\eps - p^\eps_{app}$. We start from the equation
\begin{equation} \label{eq_error}
-\dive S(D u^\eps) + \dive S(D u^\eps_{app}) + \nabla q^\eps = e_1 + \dive S(D u^\eps_{app}) + \nabla p^\eps_{app} := F^\eps
\end{equation}
satisfied in $\Omega^\eps \setminus (\Sigma_0 \cup \Sigma_N)$.
A quick computation shows that
\begin{equation*}
F^\eps =
\left\{
\begin{aligned}
& \dive S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps}), \quad x \in \Omega^\eps_N, \\
& e_1, \quad x \in \Omega^\eps_{0,N} \cup R^\eps.
\end{aligned}
\right.
\end{equation*}
Defining
$$ \langle F^\eps, v^\eps \rangle := \int_{\Omega^\eps_N} F^\eps \cdot v^\eps + \int_{\Omega^\eps_{0,N}} F^\eps \cdot v^\eps + \int_{R^\eps} F^\eps \cdot v^\eps $$
we get:
$$ | \langle F^\eps, v^\eps \rangle | \le \alpha_\eps \| \nabla v^\eps \|_{L^p(\Omega^\eps_N)} + \beta_\eps \| v^\eps \|_{L^p(\Sigma_N)} + \| v^\eps \|_{L^1(\Omega^\eps \setminus \Omega^\eps_N)} $$
where
$$ \alpha_\eps := \| S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps}) \|_{L^{p'}(\Omega^\eps_N)}, \quad \beta_\eps := \| \left(S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps})\right)e_2 \|_{L^{p'}(\Sigma_N)}. $$
We then use the inequalities
\begin{equation} \label{poincarelike}
\begin{aligned}
\| v^\eps \|_{L^p(\Sigma_N)} \: \le \: C (\eps |\ln \eps|)^{1/p'} \| \nabla v^\eps \|_{L^p(\Omega^\eps)}, \\
\| v^\eps \|_{L^1(\Omega^\eps \setminus \Omega^\eps_N)} \le C \eps^{\frac{1}{p'}} \| v^\eps \|_{L^p(\Omega^\eps \setminus \Omega^\eps_N)} \: \le \: C \eps^{\frac{1}{p'}} (\eps |\ln \eps|) \| \nabla v^\eps \|_{L^p(\Omega^\eps)}
\end{aligned}
\end{equation}
(see the appendix for similar ones). We end up with
\begin{equation}
| \langle F^\eps, v^\eps \rangle | \le C \left( \alpha_\eps + \beta_\eps (\eps |\ln \eps|)^{1/p'} + \eps^{\frac{1}{p'}} (\eps |\ln \eps|) \right) \| \nabla v^\eps \|_{L^p(\Omega^\eps)} .
\end{equation}
Back to \eqref{eq_error}, after multiplication by $v^\eps$ and integration over $\Omega^\eps$, we find:
\begin{equation*}
\begin{aligned}
& \int_{\Omega^\eps} \left( S(D u^\eps) - S(Du^\eps_{app}) \right) :\nabla v^\eps \\
& \le C \left( \alpha_\eps + \beta_\eps (\eps |\ln \eps|)^{1/p'} + \eps^{\frac{1}{p'}} (\eps |\ln \eps|) \right) \| \nabla v^\eps \|_{L^p(\Omega^\eps)} + \int_{\Sigma_N} \left( [S(D u^\eps_{app}) e_2]\vert_{\Sigma_N} \cdot v^\eps - [p^\eps_{app}]\vert_{\Sigma_N} v^\eps_2 \right).
\end{aligned}
\end{equation*}
Let $p^{\eps,N}$ be a constant to be fixed later. As $v^\eps$ is divergence-free and zero at $\Gamma^\eps$, its flux through $\Sigma_N$ is zero: $\int_{\Sigma_N} v^\eps_2 = 0$. Hence, we can add $p^{\eps,N}$ to the pressure jump $[p^\eps_{app}]\vert_{\Sigma_N}$ without changing the surface integral. We get:
\begin{equation} \label{final_estimate}
\begin{aligned}
& \int_{\Omega^\eps} \left( S(D u^\eps) - S(Du^\eps_{app}) \right) : \nabla v^\eps \\
& \le C \left( \alpha_\eps + \beta_\eps (\eps |\ln \eps|)^{1/p'} + \eps^{\frac{1}{p'}} (\eps |\ln \eps|) \right) \| \nabla v^\eps \|_{L^p(\Omega^\eps)} + \int_{\Sigma_N} \left( [S(D u^\eps_{app}) e_2]\vert_{\Sigma_N} \cdot v^\eps - ([p^\eps_{app}]\vert_{\Sigma_N} - p^{\eps,N}) v^\eps_2 \right) \\
& \le \left( \alpha_\eps + \beta_\eps (\eps |\ln \eps|)^{1/p'} + \eps^{\frac{1}{p'}} (\eps |\ln \eps|) \right) \| \nabla v^\eps \|_{L^p(\Omega^\eps)} + \gamma_\eps \| v^\eps \|_{L^p(\Sigma_N)} \\
& \le C \left( \alpha_\eps + (\beta_\eps + \gamma^\eps) (\eps |\ln \eps|)^{1/p'} + \eps^{\frac{1}{p'}} (\eps |\ln \eps|) \right) \| \nabla v^\eps \|_{L^p(\Omega^\eps)} ,
\end{aligned}
\end{equation}
where
$$ \gamma_\eps := \| [\left(S(D u^\eps_{app})]\vert_{\Sigma_N} - ([p^\eps_{app}]\vert_{\Sigma_N} - p^{\eps,N}\right) e_2) \|_{L^{p'}(\Sigma_N)}.
$$
Note that we used again the first bound in \eqref{poincarelike} to go from the third to the fourth inequality.
\begin{Lemma} \label{bounds}
For $N$ large enough, and a good choice of $p^{\eps,N}$ there exists $C = C(N)$ such that
$$ \alpha_\eps \le C \eps^{10}, \quad \beta^\eps \le C \eps^{10}, \quad \gamma_\eps \le C \eps |\ln \eps|. $$
\end{Lemma}
Let us temporarily admit this lemma. Then, we can conclude the proof of the error estimates:
\begin{itemize}
\item In the case $1 \le p \le 2$, we rely on the inequality established in \cite[Proposition~5.2]{GM1975}: for all $p \in ]1,2]$, there exists $c$ such that for all $u,u' \in W_0^{1,p}(\Omega^\eps)$
$$ \int_{\Omega^\eps} \left( S(D u) - S(D u') \right) \cdot \nabla (u - u') \ge c \frac{\| Du - Du' \|^2_{L^p(\Omega^\eps)}}{(\| D u \|_{L^p(\Omega^\eps)} + \| D u' \|_{L^p(\Omega^\eps)})^{2-p}} $$
We use this inequality with $u = u^\eps$, $u' = u^\eps_{app}$. With the estimate \eqref{basic_estimate} and the Korn inequality in mind, we obtain
$$ \int_{\Omega^\eps} \left( S(D u^\eps) - S(Du^\eps_{app}) \right) \cdot \nabla v^\eps \ge c \| \nabla v^\eps \|_{L^p}^2. $$
Combining this lower bound with the upper bounds on $\alpha_\eps, \beta_\eps, \gamma_\eps$ given by the lemma, we deduce from \eqref{final_estimate} the first error estimate in Theorem \ref{thmerror}.
\item In the case $2 \le p$, we use the easier inequality
$$ \int_{\Omega^\eps} \left( S(D u) - S(D u') \right) \cdot \nabla (u - u') \ge c \| D u - D u' \|_{L^p(\Omega^\eps)}^p, $$ so that
$$ \int_{\Omega^\eps} \left( S(D u^\eps) - S(Du^\eps_{app}) \right) \cdot \nabla v^\eps \ge c \| \nabla v^\eps \|_{L^p(\Omega^\eps)}^p. $$
The second error estimate from Theorem \ref{thmerror} follows.
\end{itemize}
The final step is to establish the bounds of Lemma \ref{bounds}.
{\em Bound on $\alpha_\eps$ and $\beta_\eps$}. From Corollary \ref{higherorder} and the trace theorem, we deduce that
\begin{equation} \label{traceubl}
\| u_{bl}(\cdot/\eps) - u^\infty \|_{W^{1+s-\frac{1}{q},q}(\{ x_2 = t \})} \le C \eps^{\frac{1}{q}-s-1}\exp(-\delta t/\eps)
\end{equation}
for some $s < \alpha$ (where $\alpha \in (0,1)$) and any $q > \frac{1}{s}$. Let $q > \max(p', \frac{2}{s})$. The solution $r^\eps$ of \eqref{Bogov} satisfies:
$r^\eps \in W^{1+s,q}(\Omega^\eps_N)$ with
\begin{equation*}
\| r^\eps \|_{W^{1+s,q}(\Omega^\eps_N)} \le C \eps^{\frac{1}{q}-s} \exp(-N\delta|\ln \eps|)
\end{equation*}
so that by Sobolev imbedding
\begin{equation} \label{estimreps2}
\| D r^\eps \|_{L^\infty(\Sigma_N)} + \| D r^\eps \|_{L^q(\Sigma_N)} + \| D r^\eps \|_{L^\infty(\Omega^\eps_N)} \le C \| D r^\eps \|_{W^{s,q}(\Omega^\eps_N)} \le C \eps^{\frac{1}{q}-s} \exp(-N\delta|\ln \eps|)
\end{equation}
This last inequality allows to evaluate $\beta_\eps$. Indeed, for $x \in \Sigma_N$, $C \ge |Du^{0,\eps}(x)| \ge c > 0$ uniformly in $x$. We can then use the upper bound \eqref{ineq3} for $p < 2$, or \eqref{ineq3duo} for $p \ge 2$, to obtain
\begin{equation}\label{betaep}
\beta_\eps \le C \| D r^\eps \|_{L^{p'}(\Sigma_N)} \le C \| D r^\eps \|_{L^q(\Sigma_N)} \le C' \eps^{\frac{1}{q}-s} \exp(-N\delta|\ln \eps|) \le C' \eps^{10}\end{equation}
for $N$ large enough.
To treat $\alpha_\eps$, we still have to pay attention to the cancellation of $D u^{0,\eps}$. Indeed, from the explicit expression of $u^{0,\eps}$, we know that there is some $x_2(\eps) \sim \frac{1}{2}$ at which $D u^{0,\eps}\vert_{x_2 = x_2(\eps)} = 0$. Namely, we write
\begin{align*}
& \int_{\Omega^\eps_N} |S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps})|^{p'} \\
& = \int_{\{x \in \Omega^\eps_N,\, | x_2 - x_2(\eps) | \le \eps^{10 p'}\}} |S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps})|^{p'}
+ \int_{\{x\in \Omega^\eps_N,\, | x_2 - x_2(\eps) | \ge \eps^{10 p'}\}} |S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps})|^{p'} \\
& := I_1 + I_2.
\end{align*}
The first integral is bounded by
$$ I_1 \le C \int_{\{x\in \Omega^\eps_N,\, | x_2 - x_2(\eps) | \le \eps^{10 p'}\}} | D u^{0,\eps} |^p + | D r^\eps |^p \le C \eps^{10 p'}, $$
where we have used the uniform bound satisfied by $D u^{0,\eps}$ and $D r^\eps$ over $\Omega^\eps_N$, see \eqref{estimreps2}.
For the second integral, we can distinguish between $p < 2$ and $p \ge 2$. For $p < 2$, see \eqref{ineq3} and its proof, we get
\begin{equation}\label{I_2}
I_2 \le C \int_{\{x\in \Omega^\eps_N,\, | x_2 - x_2(\eps) | \ge \eps^{10 p'}\}} |D u^{0,\eps}|^{(p-2)p'} |D r^\eps|^{p'}
\le C' \eps^{-M} \exp(-\delta' N|\ln \eps|)
\end{equation}
for some $M, C', \delta' > 0$, see \eqref{estimreps2}. In the case $p \ge 2$, as $D u^{0,\eps}$ and $D r^\eps$ are uniformly bounded, we derive a similar inequality by \eqref{ineq3duo}. In both cases, taking $N$ large enough, we obtain $I_2 \le C'' \eps^{10 p'}$, to end up with $\alpha_\eps \le C \eps^{10}$.
{\em Bound on $\gamma_\eps$}. We have
\begin{align*}
\gamma_\eps &
\le \| \left(S(D u^{0,\eps} + D r^\eps) - S(D u^{0,\eps})\right) e_2 \|_{L^{p'}(\Sigma_N)}
+ \| \left(S(D u^{0,\eps}) - S(A)\right) e_2 \|_{L^{p'}(\Sigma_N)}
\\
& + \| (S(A) - S(A + D u_{bl}(\cdot/\eps))) e_2 \|_{L^{p'}(\Sigma_N)} + \| p_{bl}(\cdot/\eps) - p^{\eps,N} \|_{L^{p'}(\Sigma_N)} .
\end{align*}
The first term is $\beta_\eps$, so $O(\eps^{10})$ by previous calculations. The third term can be treated similarly to $\beta_\eps$. As $A \neq 0$, \eqref{ineq3} implies that
\begin{equation}\label{I_3bis}
\| (S(A) - S(A + D u_{bl}(\cdot/\eps))) e_2 \|_{L^{p'}(\Sigma_N)} \le C \| D u_{bl}(\cdot/\eps) \|_{L^{p'}(\Sigma_N)} \le C' \exp(-\delta' N \ln \eps) ,
\end{equation}
where the last inequality can be deduced from \eqref{traceubl}. It is again $O(\eps^{10})$ for $N$ large enough. For the second term of the right-hand side, we rely on the explicit expression of $u^{0,\eps}$. On the basis of \eqref{explicit1}-\eqref{explicit3}, we find that
$$ D(u^{0,\eps})\vert_{\Sigma_N} = A + O(\eps |\ln \eps|) $$
resulting in
$$ \| \left(S(D u^{0,\eps}) - S(A)\right) e_2 \|_{L^{p'}(\Sigma_N)} \le C \eps |\ln \eps|. $$
Finally, to handle the pressure term, we use the second term of Corollary \ref{higherorder}, which implies
$$ \| p_{bl} - p^t \|_{L^q(\{ y_2 = t\})} \: \le \: C \exp(-\delta t) \quad \mbox{for some constant $p^t$}.$$
We take $t = N \ln \eps$ and $p^{\eps,N} = p^t$ to get
$$ \| p_{bl}(\cdot/\eps) - p^{\eps,N} \|_{L^{p'}(\Sigma_N)} \le C' \exp(-\delta' N |\ln \eps|). $$
Taking $N$ large enough, we can make this term neglectible, say $O(\eps^{10})$. Gathering all contributions, we obtain $ \gamma_\eps \le C \eps |\ln \eps|$ as stated.
\subsection{Comment on possible wall laws} \label{parag_wall_laws}
On the basis of the previous error estimates, we can now discuss the appropriate wall laws for a non-Newtonian flow above a rough wall. We focus here again on the shear thinning case ($1 < p \le 2$).
We first notice that the field $u^\eps_{app}$ (see \eqref{uepsapp}) involves in a crucial way the solution $u^ {0,\eps}$ of \eqref{u0eps}. Indeed, we know from \eqref{estimreps} that the contribution of $r^\eps$ in $W^{1,p}(\Omega^\eps_N)$ is very small for $N$ large enough. Hence, the error estimate of Theorem \ref{thmerror} implies that
$$ \| u^\eps - u^{0,\eps} \|_{W^{1,p}(\Omega^\eps_N)} = O((\eps |\ln \eps|)^{1+\frac{1}{p'}}) . $$
In other words, away from the boundary layer, $u^\eps$ is well approximated by $u^{0,\eps}$, with a power of $\eps$ strictly bigger than $1$. Although such estimate is unlikely to be optimal, it is enough to emphasize the role of the boundary layer tail $u^\infty$. Namely, the addition of the term $\eps u^\infty$ in the Dirichlet condition for $u^{0,\eps}$ (see the third line of \eqref{u0eps}) allows to go beyond a $O(\eps)$ error estimate.
{\it A contrario}, the generalized Poiseuille flow $u^0$ leads to a $O(\eps)$ error only (away from the boundary layer). Notably,
\begin{equation} \label{estimu0}
\| u^\eps - u^0 \|_{W^{1,p}(\Omega^\eps_N)} \ge \| u^{0,\eps} - u^0 \|_{W^{1,p}(\Omega^\eps_N)} - \| u^\eps - u^{0,\eps} \|_{W^{1,p}(\Omega^\eps_N)} \ge c \eps - o(\eps) \ge c' \eps ,
\end{equation}
where the lower bound for $u^{0,\eps} - u^0$ is obtained using the explicit expressions.
Let us further notice that instead of considering $u^{0,\eps}$, we could consider the solution
$u^0_\eps$ of
\begin{equation} \label{u0epsbis}
\left\{
\begin{aligned}
- \dive S(u^0_\eps)) + \nabla p^0_\eps & = e_1, \quad x \in \Omega^\eps_N, \\
\dive u^0_\eps & = 0, \quad x \in \Omega^\eps_N, \\
u^0_\eps \vert_{\Sigma_0} & = \eps u^\infty, \\
u^0_\eps\vert_{\{ x_2 = 1 \}} & = 0.
\end{aligned}
\right.
\end{equation}
It reads $u^0_\eps = (U_\eps, 0)$ with
$$ U_\eps(x_2) = \beta'(\eps) - \frac{(\sqrt{2})^{p'}}{p'} \left| \frac{1}{2} + \alpha'(\eps) -x_2 \right|^{p'} $$
for $\alpha'$ and $\beta'$ satisfying
$$
-\frac{1}{p'}(\sqrt{2})^{p'} \left( \left| \frac{1}{2} + \alpha'(\eps) \right|^{p'} - \left| \frac{1}{2} - \alpha'(\eps) \right|^{p'} \right) = \eps u^{\infty}_1
\quad
\mbox{ and }
\quad
\beta'(\eps) = \frac{(\sqrt{2})^{p'}}{p'} \left| \frac{1}{2} - \alpha'(\eps)\right|^{p'}. $$
We can compare directly these expressions to \eqref{explicit1}-\eqref{explicit2} and deduce that
$$ \| u^{0,\eps} - u^0_\eps \|_{W^{1,p}(\Omega^\eps_N)} = O(\eps |\ln \eps|), $$
which in turn implies that
\begin{equation} \label{estimu0eps}
\| u^\eps - u^0_\eps \|_{W^{1,p}(\Omega^\eps_N)} = O(\eps |\ln \eps|).
\end{equation}
Hence, in view of \eqref{estimu0} and \eqref{estimu0eps}, we distinguish between two approximations (outside the boundary layer):
\begin{itemize}
\item A crude approximation, involving the generalized Poiseuille flow $u^0$.
\item A refined approximation, involving $u^0_\eps$.
\end{itemize}
The first choice corresponds to the Dirichlet wall law $u\vert_{\Sigma_0} = 0$, and neglects the role of the roughness. The second choice takes it into account through the inhomogeneous Dirichlet condition:
$u\vert_{\Sigma_0} = \eps u^\infty = \eps (U^\infty,0)$. Note that this last boundary condition can be expressed as a wall law, although slightly abstract. Indeed, $U^\infty$ can be seen as a function of the tangential shear $(D(u^0)n)_\tau\vert_{\Sigma_0} = \pa_2 u^0_1\vert_{\Sigma_0} = U'(0)$, through the mapping
$$ U'(0) \: \rightarrow \: A := \left( \begin{smallmatrix} 0 & U'(0) \\ U'(0) & 0 \end{smallmatrix} \right) \: \rightarrow \: u_{bl} \:\: \mbox{solution of \eqref{BL1}-\eqref{BL2}} \: \rightarrow \: U^\infty = \lim_{y_2 \rightarrow +\infty} u_{bl,1}. $$
Denoting by ${\cal F}$ this application, we write
$$(u^0_\eps)_\tau \vert_{\Sigma_0} = \eps {\cal F}((D(u^0)n)_\tau\vert_{\Sigma_0}) \approx \eps {\cal F}((D(u^0_\eps)n)_\tau\vert_{\Sigma_0})$$
whereas $ (u^0_\eps)_n = 0$. This provides the following refined wall law :
$$ u_n\vert_{\Sigma_0} = 0, \quad u_\tau\vert_{\Sigma_0} = \eps {\cal F}\bigl((D(u) n)_\tau\vert_{\Sigma_0}\bigr). $$
This wall law generalizes the Navier wall law derived in the Newtonian case, where ${\cal F}$ is simply linear. Of course, it is not very explicit as it involves the nonlinear system \eqref{BL1}-\eqref{BL2}.
More studies will be necessary to obtain qualitative properties of the function ${\cal F}$, leading to a more effective boundary condition.
{\bf Acknowledgements: }
The work of AWK is partially supported by Grant of National Science Center Sonata, No 2013/09/D/ST1/03692.
\section{Appendix : A few functional inequalities}
\begin{Proposition} {\bf (Korn inequality)}
Let $S_a := \T \times (a, a+1)$, $a \in \R$. For all $1 < p < +\infty$, there exists $C > 0$ such that: for all $a \in \R$, for all $u \in W^{1,p}(S_a)$,
\begin{equation} \label{Korn1}
\| \nabla u \|_{L^p(S_a)} \: \le \: C \| D u \|_{L^p(S_a)}.
\end{equation}
\end{Proposition}
{\em Proof.} Without loss of generality, we can show the inequality for $a = 0$: the independence of the constant $C$ with respect to $a$ follows from invariance by translation. Let us point out that the keypoint of the proposition is that the inequality is homogeneous. Indeed, it is well-known that the inhomogeneous Korn inequality
\begin{equation} \label{Korn2}
\| \nabla u \|_{L^p(S_0)} \: \le \: C' \left( \| D u \|_{L^p(S_0)} + \| u \|_{L^p(S_0)}\right)
\end{equation}
holds. To prove the homogeneous one, we use reductio at absurdum : if \eqref{Korn1} is wrong, there exists a sequence $u_n$ in $W^{1,p}(S_0)$ such that
\begin{equation} \label{absurd}
\| \nabla u_n \|_{L^p(S_0)} \: \ge \: n \| D u_n \|_{L^p(S_0)}.
\end{equation}
Up to replace $u_n$ by $u'_n := (u_n - \int_{S_0} u_n)/\| u_n\|_{L^p}$, we can further assume that
$$ \| u_n \|_{L^p} = 1, \quad \int_{S_0} u_n = 0.$$
Combining \eqref{Korn2} and \eqref{absurd}, we deduce that $1 \ge \frac{n - C'}{C'} \| D(u_n) \|_{L^p}$ which shows that $D(u_n)$ converges to zero in $L^p$. Using again \eqref{Korn2}, we infer that $(u_n)$ is bounded in $W^{1,p}$, so that up to a subsequence it converges weakly to some $u \in W^{1,p}$, with strong convergence in $L^p$ by Rellich Theorem. We have in particular
\begin{equation} \label{contradiction}
\| u \|_{L^p} = \lim_n \| u_n \|_{L^p} = 1, \quad \int_{S_0} u = \lim_n \int_{S_0} u_n = 0.
\end{equation}
Moreover, as $D(u_n)$ goes to zero, we get $D(u) = 0$. This implies that $u$ must be a constant (dimension is 2), which makes the two statements of \eqref{contradiction} contradictory.
\begin{Corollary}
Let $H_a := \T \times (a, + \infty)$.
For all $1 < p < +\infty$, there exists $C > 0$ such that: for all $a \in \R$, for all $u \in W^{1,p}(H_a)$,
\begin{equation*}
\| \nabla u \|_{L^p(H_a)} \: \le \: C \| D u \|_{L^p(H_a)}.
\end{equation*}
\end{Corollary}
{\em Proof.} From the previous inequality, we get for all $n \in \N$:
$$ \int_{S_{a+n}} | \nabla u |^p \: \le C \: \int_{S_{a+n}} | D u |^p. $$
The result follows by summing over $n$.
\begin{Corollary}
Let $1 < p < +\infty$. There exists $C > 0$, such that for all $u \in W^{1,p}(\Omega_{bl}^-)$, resp. $u \in W^{1,p}(\Omega_{bl})$, satisfying $u\vert_{\Gamma_{bl}} = 0$, one has
$$ \| \nabla u \|_{L^p(\Omega_{bl}^-)} \le C \| D u \|_{L^p(\Omega_{bl}^-)}, \quad \mbox{resp.} \: \| \nabla u \|_{L^p(\Omega_{bl})} \le C \| D u \|_{L^p(\Omega_{bl})}. $$
\end{Corollary}
{\em Proof}. One can extend $u$ by $0$ for all $y$ with $-1 < y_2 < \gamma(y_1)$, and apply the previous inequality on $S_{-1}$, resp. $H_{-1}$.
\begin{Proposition}[Rescaled trace and Poincar\'e inequalities]\label{rescaledTracePoincare}
Let $\varphi \in W^{1,p}(R^\ep)$. We have
\begin{equation}\label{IQ1}
\| \varphi \|_{L^{p}(\Sigma)} \leq C \ep^{\frac{1}{p'}} \| \nablabla_x \varphi \|_{L^p(R_\ep)} ,
\end{equation}
\begin{equation}\label{IQ2}
\| \varphi \|_{L^p(R_\ep)} \leq C \ep \| \nablabla_x \varphi \|_{L^p(R_\ep)} .
\end{equation}
\end{Proposition}
{\em Proof.} Let $\tilde\varphi(y) = \varphi(\ep y)$, where $y\in S_k = S + (k,-1)$ (a rescaled single cell of rough layer).
Then $\tilde \varphi \in W^{1,p}(S_k)$ for all $k\in \N$, and $\varphi = 0 $ on $\Gamma$. By the trace theorem and the Poincar\'e
inequality: for all $p \in [1,\infty )$
$$\int_{S_k \cap \{y_2 = 0 \}} | \tilde\varphi(\bar{y},0)|^p {\rm\,d}\bar{y} \leq C \int_{S_k} |\nablabla_y \tilde \varphi |^p {\rm\,d}y .$$
A change of variables provides
$$\int_{\ep S_k \cap \{x_2 = 0 \}} | \varphi(\bar{x},0)|^p \ep^{-1} {\rm\,d}\bar{x}
\leq C \int_{\ep S_k} \ep^p |\nablabla_x \tilde\varphi(x) |^p \ep^{-2} {\rm\,d}x .$$
Summing over $k$ we obtain
$$\left( \int_{\Sigma} | \varphi(\tilde{x},0) |^p {\rm\, d} \tilde{x} \right)^{\frac{1}{p}}
\leq C \ep^{\frac{p-1}{p}} \left( \int_{R^\ep} | \nablabla_{x} \varphi (x) |^p \dx \right)^{\frac{1}{p}}
$$
and \eqref{IQ1} is proved. The inequality \eqref{IQ2} is proved in the same way, as a consequence of the (one-dimensional) Poincar\'e inequality.
$\Box $
\end{document} |
\begin{document}
\title{A Simple Method for Detecting Interactions between a Treatment
and a Large Number of Covariates}
\author{
{\sc Lu Tian}
\thanks{Depts. of Health, Research \& Policy,
94305, lutian@stanford.edu}\\
{\sc Ash A Alizadeh}
\thanks{Dept. of Medicine, Stanford University.
94305, arasha@stanford.edu}\\
{\sc Andrew J Gentles}
\thanks{Integrative Cancer Biology Program, Stanford University.
andrewg@stanford.edu}\\
and\\
{\sc Robert Tibshirani}\thanks{Depts. of Health, Research \&
Policy, and Statistics,
Stanford University, tibs@stanford.edu}
}
\maketitle
\begin{abstract}
We consider a setting in which we have a treatment and a large number of covariates
for a set of observations,
and wish to model their relationship with an outcome of interest.
We propose a simple method for modeling interactions between the
treatment and covariates.
The idea is to modify the covariate in a simple way, and then fit a standard model
using the modified covariates and no main effects.
We show that coupled with an efficiency augmentation procedure, this method produces valid inferences in a variety of settings.
It can be useful for personalized medicine: determining from a large set of biomarkers
the subset of patients that can potentially
benefit from a treatment.
We apply the method to both simulated datasets and gene expression studies of cancer.
The modified data can be used for other purposes, for example
large scale hypothesis testing for determining which of a set of
covariates interact with a treatment variable.
\end{abstract}
\section{Introduction}
\label{sec:intro}
To develop strategies for personalized medicine, it is important to identify the
treatment and covariate interactions in the setting of randomized clinical
trial \citep{RS:08}. To confirm and quantify the treatment effect is often the primary
objective of a randomized clinical trial. Although important, the final
result (positive or negative) of a randomized trial is a conclusion
with respect to the average treatment effect on the entire study population.
For example, a treatment may be no better than the placebo in the overall study population, but it may be better for a subset of patients.
Identifying the treatment and covariate interactions may provide
valuable information for determining this subgroup of patients.
In practice, there are two commonly used approaches to characterize the potential treatment
and covariate interactions. First, a panel of simple patient subgroup analyses,
where the treatment and control arms are compared in different patients
subgroups defined a priori, such as male, female, diabetic and non-diabetic patients, may be performed following the main comparison. Such an exploratory
approach mainly focusses on simple interactions between treatment and one
dichotomized covariate. However it will often suffer from false positive
findings due to multiple testing and will not find complicated
treatment and covariates interaction.
In a more rigorous analytic approach, the treatment and covariates interactions
can be
examined in a multivariate regression analysis where the product of the
binary treatment indicator and a set of baseline covariates are included
in the regression model. Recent breakthroughs in biotechnology
makes a vast amount of data available for exploring for potential
interaction effect with the treatment and assisting in the optimal treatment
selection for individual patients. However, it is very difficult to detect
the interactions between treatment and high dimensional covariates via
direct multivariate regression modeling. Appropriate variable selection
methods such as Lasso are needed to reduce the number of
covariates having interaction with the treatment. The presence of main
effect, which often have bigger effect on the outcome than the treatment
interactions, further compounds the
difficulties in dimension reduction since a subset of variables need
to be selected for modeling the main effect as well.
Recently, \citet{BR04} formalized the subpopulation treatment effect pattern plot (STEPP) for characterizing interactions between the treatment and continuous covariates. \citet{SRZ:07} proposed a efficient algorithm for multivariate model-building with flexible fractional polynomials interactions (MFPI) and compared the empirical performance of MFPI with STEPP. \citet{Su:08} proposed the classification and regression tree method
to explore the covariates and treatment interactions in survival analysis. \citet{TT2009}
proposed an efficient algorithm to construct an index score, the sum
of selected dichotomized covariates, to stratify patients population
according to the treatment effect. In a more recent work, \citet{ZZRK:12} proposed a novel approach to directly estimate the optimal treatment selection rule via maximizing the expected clinical utility, which is equivalent to a weighted classification problem. There are also rich Bayesian literatures for flexible modeling nonlinear and nonadditive/interaction relationship between covariates and responses \citep{Le:95,CGM:98, Gu:00, chen12}. However, most of these existing methods excepting that proposed by \cite{ZZRK:12},
are not designed to deal with high-dimensional covariates.
In this paper,
we propose a simple approach to estimate the covariates and treatment
interactions without the need for modeling main effects.
The idea is simple, and in a sense, obvious. We simply code the treatment
variable as $\pm 1$ and then include the products
of this variable with centered versions of each covariate in the regression model.
Figure \ref{fig:MMsurv} gives a preview of the results of our method.
The data consist of gene expression measurements from multiple myeloma patients,
who were randomized to one of two treatments.
Our proposed method constructs a numerical gene score on a training set to
reveal gene expression- treatment interactions. The panels show the
estimated survival curves for patients in a
separate test set, overall and stratified by
the score. Although there is no significant survival difference between
the treatments overall, we see that patients with medium and high gene scores
have better survival with treatment PS341 than those with Doxyrubicin.
\begin{figure}
\caption{\em Example of the modified covariate approach,
applied to gene expression data from multiple myeloma patients
who were given one of two treatments in a randomized trial.
Our procedure constructed a gene score based on 20 genes, to detect
gene expression- treatment interactions. The numerical score was constructed on a training set,
and then categorized into low, medium and high. The panels show the
survival curves for a separate test set, overall and stratified by
the score.}
\label{fig:MMsurv}
\end{figure}
In section \ref{sec:proposed},
we describe the methods for continuous, binary as well as survival type
of outcomes. We also establish a simple casual interpretation of
the proposed method in several cases. In section 3, the finite sample
performance of the proposed method has been investigated via extensive
numerical study. In section 4, we apply the proposed method to a real
data example about the Tamoxifen treatment for breast cancer patients. Finally, potential extensions and applications
of the method were discussed in section 5.
\section{The proposed method}
\label{sec:proposed}
In the following, we let $T=\pm 1$ be the binary treatment indicator and $Y^{(1)}$ and $Y^{(-1)}$ be the potential outcome if the patient received treatment $T=1$ and $-1$, respectively. We only observe $Y=Y^{(T)},$ $T$ and $\mathbf{Z}$, a $q-$dimensional baseline covariate vector. We assume that the observed data consist
of $N$ independent and identically distributed copies of $(Y, T, \mathbf{Z}),$
$\{(Y_i, T_i, \mathbf{Z}_i), i=1, \cdots, N\}.$ Furthermore, we let $\mathbf{W}(\cdot): R^q \rightarrow R^p$ be a $p$ dimensional functions of baseline covariates $\mathbf{Z}$ and always include an intercept. We denote $\mathbf{W}(\mathbf{Z}_i)$ by $\mathbf{W}_i$ in the rest of the paper. Here the dimension of $\mathbf{W}_i$ could be large relative to the sample size $N.$
For simplicity, we assume that
$\mbox{Prob}(T=1)=\mbox{Prob}(T=-1)=1/2.$
\subsection{Continuous response model}
When $Y$ is continuous response, a simple multivariate linear regression
model for characterizing the interaction between treatment and covariates is
\begin{equation} Y=\beta_0'\mathbf{W}(\mathbf{Z})+\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{Z}) \cdot T/2+\epsilon,
\label{multlinear} \end{equation} where $\epsilon$ is the mean zero
random error. In this simple model, the interaction
term $\boldsymbol{\gamma}_0'\mathbf{W}(Z) \cdot T$ models the heterogeneous treatment effect
across the population and the linear combination of $\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{Z})$
can be used for identifying the subgroup of patients who may or may not
be benefited from the treatment. Specifically, under model (\ref{multlinear}),
we have
\begin{eqnarray*}
\Delta(\mathbf{z})&=&{\rm E}(Y^{(1)}-Y^{(-1)}|\mathbf{Z}=\mathbf{z})\\
&=&{\rm E}(Y|T=1, \mathbf{Z}=\mathbf{z})-{\rm E}(Y|T=-1, \mathbf{Z}=\mathbf{z})\\
&=& \boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{z}),
\end{eqnarray*} i.e.,
$\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{z})$ measures the causal treatment effect for patients with
baseline covariate $\mathbf{Z}.$ With observed data, $\boldsymbol{\gamma}_0$ can be
estimated along with $\beta_0$ via the ordinary least squares method.
On the other hand, noting
the relationship that $$ {\rm E}(2YT|\mathbf{Z}=\mathbf{z})=\Delta(\mathbf{z}),$$ one may
estimate $\boldsymbol{\gamma}_0$ by directly minimizing \begin{equation} N^{-1}\sum_{i=1}^N
(2Y_iT_i-\boldsymbol{\gamma}'\mathbf{W}_i)^2. \label{causalobj} \end{equation}
We call this the {\em modified outcome} method, where $2YT$ can be viewed as the {\em modified outcome},which has been first proposed in Ph.D thesis of James Sinovitch, Harvard University.
Under the simple linear model (\ref{multlinear}), both estimators are consistent for $\boldsymbol{\gamma}_0,$ and the full least squares
approach in general is more efficient than the modified outcome method.
In practice, the simple multivariate linear regression model often
is just a working model approximating the complicated underlying
probabilistic relationship between the treatment, baseline
covariates and outcome variables. It comes as a surprise, that even when model (\ref{multlinear})
is misspecified, multivariate linear regression and modified outcome estimators still converge to the same deterministic limit $\boldsymbol{\gamma}^*$ and furthermore $\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*$ is still a sensible estimator for the interaction effect in the sense that it seeks the ``best'' function of $\mathbf{z}$ in a functional space ${\cal F}$ to approximate $\Delta(\mathbf{z})$ by solving the optimization problem:
$$ \min_f {\rm E} \{\Delta(\mathbf{Z})-f(\mathbf{Z})\}^2,$$
$$\mbox{subject to } f\in {\cal F}=\{\boldsymbol{\gamma}'\mathbf{W}(\mathbf{z})| \boldsymbol{\gamma}\in R^p\},$$
where the expectation is with respect to $\mathbf{Z}.$
\subsection{The Modified Covariate Method}
The modified outcomes estimator defined above is useful for the Gaussian case,
but does not generalize easily to more complicated models.
Hence we propose a new estimator which is equivalent to
the modified outcomes approach in the Gaussian case and
extends easily to other models.
This is the main proposal of this paper.
We consider the simple working model \begin{equation}
Y=\alpha_0+\boldsymbol{\gamma}_0'\frac{\mathbf{W}(\mathbf{Z})\cdot T}{2}+\epsilon,
\label{proposal} \end{equation} where $\epsilon$ is the mean zero
random error. Based on model (\ref{proposal}), we propose the {\em modified covariate} estimator $\hat{\boldsymbol{\gamma}}$
as the minimizer of
\begin{eqnarray}
\frac{1}{N}\sum_{i=1}^N \left(Y_i-\boldsymbol{\gamma}'\frac{\mathbf{W}_i\cdot
T_i}{2}\right)^2.
\label{eqn:modcov}
\end{eqnarray}
The fact that we can directly estimate $\boldsymbol{\gamma}_0$ in model (\ref{proposal}) without considering the intercept $\alpha_0$ is due to the orthogonality between $\mathbf{W}({\mathbf{Z}}_i)\cdot T_i$ and the intercept, which is the consequence of the randomization.
That is, we simply multiply each component of $\mathbf{W}_i$ by one-half the treatment assignment indicator ($=\pm 1)$ and perform a regular linear regression.
Now since
$$\frac{1}{N}\sum_{i=1}^N \left\{Y_i-\boldsymbol{\gamma}'\frac{\mathbf{W}_i\cdot
T_i}{2}\right\}^2=\frac{1}{4N}\sum_{i=1}^N
\left\{2Y_iT_i-\boldsymbol{\gamma}'\mathbf{W}_i\right\}^2,$$
the modified outcome and modified covariate estimates
are identical and share the same causal interpretation for the simple Gaussian model. Operationally, we can omit the intercept and perform a simple linear regression with the modified covariates. In general, we proposed the following modified covariate approach
$$~$$
\fbox{
\begin{minipage}{\textwidth}
\begin{enumerate}
\item Modify the covariate
$$Z_i \rightarrow \mathbf{W}_i=\mathbf{W}(\mathbf{Z}_i) \rightarrow \mathbf{W}_i^*=\mathbf{W}_i\cdot T_i/2$$
\item Perform appropriate regression
\begin{equation}
Y \sim \boldsymbol{\gamma}_0'\mathbf{W}^*
\label{eqn:proposal}
\end{equation}
based on the modified observations
\begin{eqnarray}
(\mathbf{W}^*_i, Y_i)=\{(\mathbf{W}_i\cdot T_i)/2, Y_i\}, i=1,2,\ldots N.
\label{eqn:moddata}
\end{eqnarray}
\item $\hat{\boldsymbol{\gamma}}'\mathbf{W}(\mathbf{z})$ can be used to stratify patients for individualized treatment selection.
\end{enumerate}
\end{minipage}
}
$$~$$
Figure \ref{fig:example} illustrates how the modified covariate method works
for a single covariate $Z$,
in two treatment groups.
The raw data is shown the left, and the data with modified covariate
is shown on the right.
The slope of the regression line computed in the right panel estimates the
treatment-covariate interaction.
\begin{figure}
\caption{\em Example of the modified covariate approach.
The raw data is shown the left, consisting of a single covariate
$Z$ and a treatment $T=-1$ or $1$.
The treatment-covariate interaction has slope $\gamma$ approximately
equal to 1.
On the right panel we have plotted the response against $Z\cdot T/2$.
The the regression line computed in the right panel estimates the treatment effect for each give value of covariate $Z.$}
\label{fig:example}
\end{figure}
The advantage of this new approach
is twofold: it avoids having to directly model the main effects and it has a
causal interpretation for the resulting estimator regardless of the adequacy of the assumed working model (\ref{proposal}).
Furthermore, unlike modified outcome method, it is straightforward to generalize the new approach to other types of outcome.
\subsection{Binary Responses} When $Y$ is a binary response, in the
same spirit as the continuous outcome case, we propose to fit
a multivariate logistic regression model with modified covariates $\mathbf{W}^*=\mathbf{W}(\mathbf{Z})\cdot T/2$ generalized from (\ref{eqn:proposal}):
\begin{equation} \mbox{Prob}(Y=1|\mathbf{Z},
T)=\frac{\exp(\boldsymbol{\gamma}_0' \mathbf{W}^*)}{1+\exp(\boldsymbol{\gamma}_0'
\mathbf{W}^*)}. \label{propbinary} \end{equation}
Noting that if model (\ref{propbinary}) is correctly specified,
then
\begin{eqnarray*}
\Delta(\mathbf{z})&=&{\rm Prob}(Y^{(1)}=1|\mathbf{Z}=\mathbf{z})-{\rm Prob}(Y^{(-1)}=1|\mathbf{Z}=\mathbf{z})\\
&=&{\rm Prob}(Y=1|T=1, \mathbf{Z}=\mathbf{z})-{\rm Prob}(Y=1 |T=-1, \mathbf{Z}=\mathbf{z})\\
&=& \frac{\exp\{\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{z})/2\}-1}{\exp\{\boldsymbol{\gamma}_0' \mathbf{W}(\mathbf{z})/2\}+1},
\end{eqnarray*}
and thus $\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{z})$ has an appropriate causal interpretation.
However, even when model (\ref{propbinary}) is not correctly specified,
we still can estimate $\boldsymbol{\gamma}_0$ by treating (\ref{propbinary}) as a working model.
In general, the maximum likelihood estimator (MLE) of the working model, converges
to a deterministic limit $\boldsymbol{\gamma}^*$ and $\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2$ can be viewed as the solution to the following optimization problem
$$ \mbox{max}_f \mbox{E} \left\{ Yf(\mathbf{Z})T-\log(1+e^{f(\mathbf{Z})T})\right\} $$
$$ \mbox{subject to } f\in {\cal F}=\{\gamma'\mathbf{W}(\mathbf{z})/2| \boldsymbol{\gamma}\in R^p \},$$
where the expectation is with respect to $(Y, T, \mathbf{Z}).$
Therefore, where $\mathbf{W}(\mathbf{z})$ forms a ``rich" set of basis functions, $\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2$ is an approximation to the minimizer of $\mbox{E} \left\{ Yf(\mathbf{Z})T-\log(1+e^{f(\mathbf{Z})T})\right\}.$ In the appendix, we show that the latter can be represented as
$$f^*(\mathbf{z})=\log\left\{\frac{1-\Delta(\mathbf{z})}{1+\Delta(\mathbf{z})} \right\}$$ under very general assumptions. Therefore,
$$\hat{\Delta}(\mathbf{z})=\frac{\exp\{\hat{\boldsymbol{\gamma}}'\mathbf{W}(\mathbf{z})/2\}-1}{\exp\{\hat{\boldsymbol{\gamma}}'\mathbf{W}(\mathbf{z})/2\}+1}$$ may serve as an estimate for the covariate-specific treatment effect and used to stratify patients population, regardless of the validity the working model assumptions.
As described above, the MLE from the working model (\ref{propbinary}) can always be used to construct a surrogate to the personalized treatment effect measured by the ``risk difference''
$$\Delta(\mathbf{z})=\mbox{E}(Y^{(1)}-Y^{(-1)}|\mathbf{Z}=\mathbf{z}).$$
On the other hand, different measures for individualized treatment effects such as relative risk may also be of interest. For example, if we consider an alternative approach for fitting the logistic regression working model (\ref{propbinary}) by letting
$$\hat{\boldsymbol{\gamma}}=\mbox{argmax}_{\boldsymbol{\gamma}} \sum_{i=1}^n \left\{(1-Y_i)\boldsymbol{\gamma}'\mathbf{W}^*-Y_ie^{-\boldsymbol{\gamma}'\mathbf{W}^*_i}\right\},$$
then $\hat{\boldsymbol{\gamma}}$ converges to a deterministic limit $\tilde{\boldsymbol{\gamma}}^{*}$ and $\mathbf{W}(\mathbf{z})'\tilde{\boldsymbol{\gamma}}^{*}(\mathbf{z})/2$ can be viewed as an approximation to $\log\{\tilde{\Delta}(\mathbf{z})\},$ where
$$ \tilde{\Delta}(\mathbf{z})=\frac{\mbox{Prob}(Y^{(1)}=1|\mathbf{Z}=\mathbf{z})}{\mbox{Prob}(Y^{(-1)}=1|\mathbf{Z}=\mathbf{z})},$$
which measures the treatment effect based on ``relative risk" rather than ``risk difference''. The detailed justification is given in the Appendix 6.1.
\subsection{Survival Responses} When the outcome variable is survival
time, we often do not observe the exact outcome for every subject
in a clinical study due to incomplete follow-up. In this case,
we assume that the outcome $Y$ is a pair of random variables $(X,
\delta)=\{\tilde{X}\wedge C, I(\tilde{X}<C)\},$ where $\tilde{X}$ is the
survival time of primary interest, $C$ is the censoring time and $\delta$
is the censoring indicator.
Firstly, we propose to fit a Cox regression model
\begin{equation} \lambda(t|\mathbf{Z}, T)=\lambda_0(t)e^{\boldsymbol{\gamma}'\mathbf{W}^*} \label{propsurv2} \end{equation}
where $\lambda(t|\cdot)$ is
the hazard function for survival time $\tilde{X}$ and $\lambda_0(\cdot)$
is a baseline hazard function free of $\mathbf{Z}$ and $T.$
When model (\ref{propsurv2}) is correctly specified,
\begin{eqnarray*}
\Delta(\mathbf{z})&=&\log\left[\frac{\mbox{E} \{\Lambda_0(\tilde{X}^{(1)})| \mathbf{Z}=\mathbf{z}\}}{\mbox{E}\{\Lambda_0(\tilde{X}^{(-1)})|\mathbf{Z}=\mathbf{z}\}}\right]\\
&=&\left[\frac{\mbox{E} \{\Lambda_0(\tilde{X})| T=1, \mathbf{Z}=\mathbf{z}\}}{\mbox{E}\{\Lambda_0(\tilde{X})| T=-1, \mathbf{Z}=\mathbf{z}\}}\right]\\
&=& \exp\{-\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{z})\}
\end{eqnarray*}
and $\boldsymbol{\gamma}_0'\mathbf{W}(\mathbf{z})$ can be used to stratify patient population according to $\Delta(\mathbf{z}),$
where $\Lambda_0(t)=\int_0^t \lambda_0(u)du$ is a monotone increasing function (the baseline cumulative hazard function).
Under the proportional hazards assumption, the maximum partial likelihood estimator $\hat{\boldsymbol{\gamma}}$ is a consistent estimator for $\boldsymbol{\gamma}_0$ and semiparametric efficient.
Moreover, even when model (\ref{propsurv2}) is misspecified,
we still can ``estimate'' $\boldsymbol{\gamma}_0$ by maximizing the partial likelihood
function. In general, the resulting estimator, $\hat{\boldsymbol{\gamma}},$ converges to a deterministic limit
$\boldsymbol{\gamma}^*$, which is the root of a limiting score equation \citep{Lin:Wei:1989}.
More generally, $\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2$ can be viewed as the solution of the optimization problem
$$ \max_f \mbox{E} \int_0^\tau \left[f(\mathbf{Z})T- \log\{\sum_{j=1}^N e^{f(\mathbf{Z})T}I(\tilde{X}\ge u) \} \right]d N(u)$$
$$ \mbox{subject to } f\in {\cal F}=\{\boldsymbol{\gamma}'\mathbf{W}(\mathbf{z})/2| \boldsymbol{\gamma} \in R^p\},$$
where $N(t)=I(\tilde{X}\le t)\delta_i$ and the expectation is with respect to $(Y, T, \mathbf{Z}).$ Therefore, $\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2$ can be viewed as an approximation to
$$f^*(\mathbf{z})=\mbox{argmax}_f \mbox{E} \int_0^\tau \left[f(\mathbf{Z})T- \log\{\sum_{j=1}^N e^{f(\mathbf{Z})T}I(\tilde{X}\ge u) \} \right]d N(u).$$
In appendix 6.1, we shown that the minimizer $f^*$ satisfies
$$ e^{f^*(\mathbf{z})}\mbox{E}\{\Lambda^*(\tilde{X}^{(1)})|\mathbf{Z}=\mathbf{z}\}-e^{-f^*(\mathbf{z})}\mbox{E}\{\Lambda^*(\tilde{X}^{(-1)})|\mathbf{Z}=\mathbf{z}\}=\mbox{E}(\Delta^{(1)}|\mathbf{Z}=\mathbf{z})-\mbox{E}(\Delta^{(-1)}|\mathbf{Z}=\mathbf{z})$$
for a monotone increasing function $\Lambda^*(u).$ Thus, when censoring rates are balanced between two arms,
$$f^*(\mathbf{z})\approx -\frac{1}{2}\log \left[ \frac{\mbox{E} \{\Lambda^*(\tilde{X}^{(1)})|\mathbf{Z}=\mathbf{z}\}}{\mbox{E}\{\Lambda^*(\tilde{X}^{(-1)})|\mathbf{Z}=\mathbf{z}\}}\right]$$
can be used for characterizing the covariate-specific treatment effect and stratifying the patient population even when the working model (\ref{propsurv2}) is misspecified.
\subsection{Regularization for high dimensional data}
When the dimension of $\mathbf{W}^*$, $p,$ is high, we can
easily apply appropriate variable selection procedures based
on the corresponding working model. For example, $L_1$ penalized
(Lasso) estimators proposed by \citet{Ti96} can be
directly applied to the modified data (\ref{eqn:moddata}). In general,
one may estimate $\boldsymbol{\gamma}$ by minimizing
\begin{equation} \frac{1}{N}\sum_{i=1}^N l(Y_i, \boldsymbol{\gamma}'\mathbf{W}^*_i)+\lambda_0\sum_{j=1}^p |\gamma_{j}|,
\label{eqn:lasso}
\end{equation}
where
$$
l(Y_i, \boldsymbol{\gamma}'\mathbf{W}^*_i)=\begin{cases} \frac{1}{2}(Y_i-\boldsymbol{\gamma}'\mathbf{W}^*_i)^2 &\mbox{ for continuous response}\\
-\{Y_i\boldsymbol{\gamma}'\mathbf{W}^*_i-\log(1+e^{\boldsymbol{\gamma}'\mathbf{W}^*_i})\} &\mbox{ for binary response}\\
-\left[\boldsymbol{\gamma}'\mathbf{W}^*_i- \log\{\sum_{j=1}^N e^{\boldsymbol{\gamma}'\mathbf{W}^*_i}I(X_j\ge X_i) \} \right]\Delta_i &\mbox{ for survival response}
\end{cases}. $$
It might be reasonable to suppose that the covariates interacting with the treatment will more likely
be the ones exhibiting important main effects themselves. Therefore,
one could also apply the adaptive Lasso procedure \citep{Zou2006a}
with feature weights $\hat{w}_j$ proportional to the reciprocal of the univariate ``association strength''
between the outcome $Y$ and the $j$th component of $\mathbf{W}(\mathbf{Z}).$
Specifically, one may modify the penalty in (\ref{eqn:lasso}) as
\begin{equation} \lambda_0\sum_{j=1}^p\frac{|\gamma_{j}|}{\hat{w}_j},
\label{eqn:adaplasso}
\end{equation}
where $\hat{w}_j=|\hat{\theta}_i|^{-1}$ or $(|\hat{\theta}_{-1i}|+|\hat{\theta}_{1i}|)^{-1},$ where $\hat{\theta}_{j1},$ $\hat{\theta}_{j(-1)}$ and $\hat{\theta}_{j},$ are the estimated regression coefficients of the $j$th component of $\mathbf{W}(\mathbf{Z})$ in appropriate univariate regression analysis with observations from the group $T=1$ only, from the group $T=-1$ only, and from both groups, respectively. Other regularization methods such as elastic net may also be used \citep{Zou:Hastie:2005}.
Interestingly, one can treat the modified data (\ref{eqn:moddata})
just as generic data and hence couple it with other statistical learning techniques. For example, one can apply a classifier such as prediction analysis of microarrays (PAM) to the modified data for the purpose of finding subgroup of samples in which the treatment effect is large. We also can do large scale hypothesis testing
on the modified data to determine which gene-treatment interactions have a significant
effect on the outcome.
\subsection{Efficiency Augmentation}
When the models (\ref{eqn:proposal}, \ref{propbinary} and \ref{propsurv2}) with modified covariates is correctly specified, the MLE estimator for $\boldsymbol{\gamma}^*$ is the most efficient estimator asymptotically. However, when models are treated as working models subject to mis-specification, a more efficient estimator can be obtained for estimating the same deterministic limit $\boldsymbol{\gamma}^*.$ To this end, noting the fact that in general $\hat{\boldsymbol{\gamma}}$ is defined as the minimizer of an objective function motivated from a working model:
\begin{equation}
\hat{\boldsymbol{\gamma}}=\mbox{argmin}_{\boldsymbol{\gamma}} \frac{1}{N}\sum_{i=1}^N l(Y_i, \boldsymbol{\gamma}'\mathbf{W}^*_i)
\end{equation}\label{obj}
Noting that for any function $\mathbf{a}(\mathbf{z}): R^q \rightarrow R^p,$ $E\{T_i \mathbf{a}(\mathbf{Z}_i)\}=0$ due to randomization, the minimizer of the augmented objective function
$$\frac{1}{N}\sum_{i=1}^N \left\{ l(Y_i, \boldsymbol{\gamma}'\mathbf{W}^*_i)-T_i \mathbf{a}(\mathbf{Z}_i)'\boldsymbol{\gamma} \right\} $$
converges to the same limit as $\hat{\boldsymbol{\gamma}},$ when $N \rightarrow \infty.$ Furthermore, by selecting an optimal augmentation term $\mathbf{a}_0(\cdot)$, the minimizer of the augmented objective function can have smaller variance than that of the minimizer of the original objective function.
In appendix 6.2, we show that $$\mathbf{a}_0(\mathbf{z})=-\frac{1}{2}\mathbf{W}(\mathbf{z}) \mbox{E}(Y|\mathbf{Z}=\mathbf{z})$$
and $$\mathbf{a}_0(\mathbf{z})=-\frac{1}{2}\mathbf{W}(\mathbf{z}) \{\mbox{E}(Y|\mathbf{Z}=\mathbf{z})-0.5\}$$ are optimal choices for continuous and binary responses, respectively. Therefore, we proposed the following two-step procedures for estimating $\boldsymbol{\gamma}^*:$
$$~$$
\fbox{
\begin{minipage}{\textwidth}
\begin{enumerate}
\item Estimate the optimal $\mathbf{a}_0(\mathbf{z}):$
\begin{enumerate}
\item For continuous response, fit the linear regression model $E(Y|\mathbf{Z})=\xi'B(\mathbf{Z})$ for appropriate function $B(\mathbf{Z})$ with OLS. Appropriate regularization will be used if the dimension of $B(\mathbf{Z})$ is high. Let
$$\hat{\mathbf{a}}(\mathbf{z})=-\frac{1}{2}\mathbf{W}(\mathbf{z})\times \hat{\xi}'B(\mathbf{z}).$$
\item For binary response, fit the logistic regression model $\mbox{logit}\{\mbox{Prob}(Y=1|\mathbf{Z})\}=\xi'B(\mathbf{Z})$ for appropriate function $B(\mathbf{Z})$ by maximizing the likelihood function. Appropriate regularization will be used if the dimension of $B(\mathbf{Z})$ is high. Let
$$\hat{\mathbf{a}}(\mathbf{z})=-\frac{1}{2}\mathbf{W}(\mathbf{z}) \times \left\{\frac{e^{\hat{\xi}'B(\mathbf{z})}}{1+e^{\hat{\xi}'B(\mathbf{z})}}-\frac{1}{2}\right\}.$$
\end{enumerate}
Here $B(\mathbf{z})=\{B_1(\mathbf{z}), \cdots, B_S(\mathbf{z})\}$ and $B_k(\mathbf{z}): R^q \rightarrow R^1 $ is selected basis function.
\item Estimate $\boldsymbol{\gamma}^*$
\begin{enumerate}
\item For continuous response, we minimize $$\frac{1}{N}\sum_{i=1}^N \left\{\frac{1}{2}(Y_i-\boldsymbol{\gamma}'\mathbf{W}^*_i)^2-\boldsymbol{\gamma}'\hat{\mathbf{a}}(\mathbf{Z}_i)T_i\right\}$$
with appropriate regularization if needed.
\item For binary response, we minimize $$\frac{1}{N}\sum_{i=1}^N \left[-\{Y_i\boldsymbol{\gamma}'\mathbf{W}^*_i-\log(1+e^{\boldsymbol{\gamma}'\mathbf{W}^*_i})\}-\boldsymbol{\gamma}'\hat{\mathbf{a}}(\mathbf{Z}_i)T_i\right]$$
with appropriate regularization if needed.
\end{enumerate}
\end{enumerate}
\end{minipage}
}
$$~$$
For survival outcome, the log-partial likelihood function is not a simple sum of i.i.d terms. However, in Appendix 6.2 we show that the optimal choice of $\mathbf{a}(\mathbf{z})$ is
$$\mathbf{a}_0(\mathbf{z})=-\frac{1}{2}\left[\frac{1}{2}\mathbf{W}(\mathbf{z})\left\{G_1(\tau; \mathbf{z})+G_2(\tau; \mathbf{z})\right\}-\int_0^\tau \mathbf{R}(u) \{G_1(du; \mathbf{z})-G_2(du; \mathbf{z})\}\right],$$
where $G_1(u; \mathbf{z})=E\{M(u)|\mathbf{Z}=\mathbf{z}, T=1\},$
$G_2(u;\mathbf{z})=E\{M(u)|\mathbf{Z}=\mathbf{z}, T=-1\},$
$$M(t, \mathbf{W}^*, \boldsymbol{\gamma}^*)=N (t)-\int_0^t \frac{I(X \ge u) e^{\boldsymbol{\gamma}'\mathbf{W}^*} d {\rm E}\{N(u)\}}{{\rm E}\{e^{\boldsymbol{\gamma}'\mathbf{W}^*}I(X\ge u)\}}$$
and
$$\mathbf{R}(u; \boldsymbol{\gamma}^*)=\frac{{\rm E}\{\mathbf{W}^*e^{\boldsymbol{\gamma}'\mathbf{W}^*}I(X\ge u)\}}{{\rm E}\{e^{\boldsymbol{\gamma}'\mathbf{W}^*}I(X\ge u)\}}.$$
Unfortunately, $\mathbf{a}_0(\mathbf{z})$
depends on the unknown parameter $\boldsymbol{\gamma}^*.$ On the other hand, on
high-dimensional case, the interaction effect is usually small and
it is not unreasonable to assume that $\boldsymbol{\gamma}^*\approx 0.$ Furthermore,
if the censoring patterns are similar in both arms, we have $G_1(u,
\mathbf{z})\approx G_2(u, \mathbf{z}).$ Using these two approximations, we can
simplify the optimal augmentation term as
$$\mathbf{a}_0(\mathbf{z})=-\frac{1}{4}\mathbf{W}(\mathbf{z})\left\{G_1(\tau; \mathbf{z})+G_2(\tau; \mathbf{z})\right\}=-\frac{1}{2}\mathbf{W}(\mathbf{z}) \times \mbox{E}\{M(\tau)|\mathbf{Z}=\mathbf{z})$$
where
$$M(t)=N (t)-\int_0^t \frac{I(X \ge u) d {\rm E}\{N(u)\}}{{\rm E}\{I(X\ge
u)\}}.$$
Therefore, we propose to employ the following approach for implementing the efficiency augmentation procedure,:\\
\fbox{
\begin{minipage}{\textwidth}
\begin{enumerate}
\item Calculate
$$\hat{M}_i(\tau)=N_i(\tau)-\int_0^{\tau}\frac{I(X_i\ge u) d \{\sum_{j=1}^N N_j(u)\}}{\sum_{j=1}^N I(X_j\ge u)}$$
for $i=1, \cdots, N$ and fit the linear regression model $E(\hat{M}(t)|\mathbf{Z})=\xi'B(\mathbf{Z})$ for appropriate function $B(\mathbf{Z})$ with OLS and appropriate regularization if needed. Let
$$\hat{\mathbf{a}}(\mathbf{z})=-\frac{1}{2}\mathbf{W}(\mathbf{z})\times \hat{\xi}'B(\mathbf{z}).$$
\item Estimate $\boldsymbol{\gamma}^*$ by minimizing
$$\frac{1}{N}\sum_{i=1}^N \left(-\left[\boldsymbol{\gamma}'\mathbf{W}^*_i- \log \{\sum_{j=1}^N e^{\boldsymbol{\gamma}'\mathbf{W}^*_i}I(X_j\ge X_i)\} \right]\Delta_i-\boldsymbol{\gamma}'\hat{\mathbf{a}}(\mathbf{Z}_i)T_i\right)$$
with appropriate penalization if needed.
\end{enumerate}
\end{minipage}
}
$$~$$
\noindent
{\bf Remarks 1}
When the response is continuous, the efficient augmentation estimator is the minimizer of
\begin{align*}
& \sum_{i=1}^N
\left[\frac{1}{2}\left\{Y_i-\frac{1}{2}\boldsymbol{\gamma}'\mathbf{W}(\mathbf{Z}_i)T_i/2\right\}^2-\boldsymbol{\gamma}'\hat{\mathbf{a}}(\mathbf{Z}_i)T_i \right] \\
=&
\sum_{i=1}^N
\frac{1}{2}\left\{Y_i-\hat{\xi}'B(\mathbf{Z}_i)-\frac{1}{2}\boldsymbol{\gamma}'\mathbf{W}(\mathbf{Z}_i)T_i \right\}^2+\mbox{constant}.
\end{align*}
This equivalence implies that this efficiency augmentation
procedures is asymptotically equivalent to that based on a simple
multivariate regression with main effect $\hat{\xi}'B(\mathbf{Z}_i)$ and
interaction $\boldsymbol{\gamma}'\mathbf{W}(\mathbf{Z})\cdot T.$ This is not a surprise. As we pointed out in section 2.1, the choice of the main effect in the linear regression does not affect the asymptotical consistency of estimating the interactions. On the other hand, a good choice of main effect model can help to estimate the interaction, i.e., personalized treatment effect, more accurately.
Another consequence is that one may directly use the same algorithm
solving standard optimization problem to obtain the augmented estimator when lasso
penalty is used. For binary or survival response, the
augmented estimator under lasso regularization can be obtained with slightly modified algorithm designed for lasso optimization as well. The detailed algorithm is given in the appendix 6.3.
$$ $$
\noindent
{\bf Remarks 2}
For nonlinear models such as logistic and Cox regressions, the augmentation method is NOT equivalent to the full regression approach including main effect and interaction terms. In those cases, different specification of the main effects in the regression model result in asymptotically different estimates for the interaction term, which, unlike the proposed modified covariate estimator, in general can not be interpreted as the personalized treatment effect.
$$ $$
\noindent
{\bf Remarks 3}
With binary response, the estimating equation targeting on approximating the relative risk is
$$\sum_{i=1}^N \mathbf{W}^*_i\{(1-Y_i)-Y_ie^{-\boldsymbol{\gamma}'\mathbf{W}^*_i}\}$$
and the optimal augmentation term $a_0(\mathbf{z})$ can be be approximated by
$$-\frac{1}{2} \mathbf{W}(\mathbf{z})\left \{\mbox{E}(Y|\mathbf{Z}=\mathbf{z})- \frac{1}{2}\right\}$$
when $\boldsymbol{\gamma}^* \approx 0.$ The efficiency augmentation algorithm can be carried out accordingly.
$$ $$
\noindent
{\bf Remarks 4}
The similar technique can also be used for improving other estimators such as that proposed by \cite{ZZRK:12}, where the surrogate objective function for the weighted mis-classification error can be written in the form of (\ref{obj}) as well. The optimal function $\mathbf{a}_0(\mathbf{z})$ needs to be derived case by case.
\section{Numerical Studies}
\label{sec:numerical}
In this section, we perform an extensive
numerical study to investigate the finite sample performance of proposed method in various settings: the treatment may or may not have marginal main effect between two groups; the personalized treatment effect may depend on complicated function of covariates such as interactions among covariates; the regression model for detecting the interaction may or may not be correctly specified. Due to the limitation of the space, we only present simulation results from the selected representive cases. The results for other scenarios are similar to those presented.
\subsection{Continuous responses}
For continuous responses, we generated $N$ independent Gaussian samples from the regression model
\begin{eqnarray}
Y&=&\sum_{j=1}^p \beta_j Z_j + \sum_{j=1}^p \gamma_j Z_j T+\sigma_0\cdot \epsilon,
\label{eqn:simmodel}
\end{eqnarray}
where the covariate $(Z_1, \cdots, Z_p)$ follows a mean zero multivariate normal distribution with a compound symmetric variance-covariance matrix, $(1-\rho)\mathbf{I}_p+\rho \mathbf{1}'\mathbf{1},$ and $\epsilon\sim N(0, 1).$ We let $(\gamma_1, \gamma_2, \gamma_3, \gamma_4, \gamma_5, \cdots, \gamma_p)=(1/2, -1/2, 1/2, -1/2, 0, \cdots, 0),$ $\sigma_0=\sqrt{2},$ $N=100,$ and $p=50$ and $1000$ representing high and low dimensional cases, respectively. The treatment $T$ was generated as $\pm 1$ with equal probability at random. We consider four sets of simulations:
\begin{enumerate}
\item $\beta_j=(-1)^{j+1}I(3\le j\le 10)/4$ and $\rho=0;$
\item $\beta_j=(-1)^{j+1}I(3\le j\le 10)/4$ and $\rho=0.5;$
\item $\beta_j=(-1)^{j+1} I(3\le j\le 10)/2$ and $\rho=0;$
\item $\beta_j=(-1)^{j+1}I(3\le j\le 10)/2$ and $\rho=0.5.$
\end{enumerate}
Settings 1 and 2 presents relative moderate main effect, where the variability in response contributable to the main effect is the same as that to the interaction. Settings 3 and 4 represent relative big main effect, where the variability in response contributable to the main effect is twice as big as that to the interaction.
For each of the simulated data set, we implemented three methods:
\begin{itemize}
\item {\em full regression:} The first method is to fit a multivariate linear regression with full main effect
and covariate/treatment interaction terms, i.e., the dimension of the
covariate matrix was $2(p+1)$. The Lasso was used to select the variables.
\item {\em new:} The second method is to fit a multivariate linear regression with the modified covariate $\mathbf{W}^*=(1, \mathbf{Z})'\cdot T/2$ as the covariates, i.e., the dimension of the covariate matrix is $p+1.$ Again, the Lasso is used for selecting variables having treatment interaction.
\item {\em new/augmented:} the proposed method with efficiency augmentation, where $\mbox{E}(Y|\mathbf{Z})$ is estimated with lasso-regularized ordinary least squared method and $B(\mathbf{z})=\mathbf{z}.$
\end{itemize}
For all three methods, we selected the Lasso penalty parameter via 20-fold cross-validation. To evaluate the performance of the resulting score measuring the individualized treatment effect, we estimated the Spearman's rank correlation coefficient between the estimated score and the ``true'' treatment effect $$\Delta(\mathbf{Z})=\mbox{E}(Y^{(1)}-Y^{(-1)}|\mathbf{Z})=(Z_1-Z_2+Z_3-Z_4)$$
in an independently generated set with a sample size of 10000.
Based on 500 sets of simulations, we plotted the boxplots of the rank correlation coefficients between the estimated scores $\hat{\boldsymbol{\gamma}}'\mathbf{Z}$ and $\Delta(\mathbf{Z})$ under simulation settings (1), (2), (3) and (4) in top left, top right, bottom left and bottom right panels of Figure \ref{fig:gauss}, respectively. When the main effect is moderate and covariates are independent (setting 1), the performance of the proposed method is better than that of the full regression approach. However, when the main effect is relatively big compared to interactions (settings 3 and 4), the proposed method is unable to estimate the correct individualized treatment effect well and is actually inferior to the simple regression method. On the other hand, the performance of the ``new/augmented'' is the best or nears the best across all the four settings and is sometimes substantially better than its competitors.
\begin{figure}
\caption{\em Boxplots for the correlation coefficients between the estimated score and true treatment effect
with three different methods applied to continuous outcomes. The empty and filled boxes represent low and high dimensional ($p=50$ and $p=1000$) cases, respectively.
Left upper panel: moderate main effect and independent covariates; right upper panel: moderate main effect and correlated covariates; left lower panel: big main effect and independent covariates; right lower panel: big main effect and correlated covariates.}
\label{fig:gauss}
\end{figure}
\subsection{Binary responses}
For binary responses, we used the same simulation design as that for the continuous response. Specifically, we generated $N$ independent binary samples from the regression model
\begin{eqnarray}
Y&=& I\left(\sum_{j=1}^p \beta_j Z_j + \sum_{j=1}^p \gamma_j Z_j T+\sigma_0\cdot \epsilon \ge 0\right),
\label{eqn:binmodel1}
\end{eqnarray}
where all the model parameters were the same as those in the case of continuous response. Noting that the logistic regression model is misspecified under the chosen simulation design. We also considered the same four settings with different combinations of $\beta_j$ and $\rho.$ For each of the simulated data set, we implemented three methods:
\begin{enumerate}
\item {\em full regression:} The first method is to fit a multivariate logistic regression with full main effect
and covariate/treatment interaction terms, i.e., the dimension of the
covariate matrix was $2(p+1)$. The Lasso was used to select the variables.
\item {\em new:} The second method is to fit a multivariate logistic regression (without intercept) with the modified covariate $\mathbf{W}^*=(1, \mathbf{Z})'\cdot T/2$ as the covariates. Again, the Lasso was used for selecting variables having
treatment interaction.
\item {\em new/augmented:} the proposed method with efficiency augmentation, where $\mbox{E}(Y|\mathbf{Z})$ is estimated with Lasso-penalized logistic regression.
\end{enumerate}
To evaluate the performance of the resulting score measuring the individualized treatment effect, we estimated the Spearman's rank correlation coefficient between the estimated score and the ``true'' treatment effect
\begin{align*}
\Delta(\mathbf{Z})&=\mbox{E}(Y^{(1)}-Y^{(-1)}|\mathbf{Z})\\
&=\Phi\left(\frac{\sum_{j=1}^p(\beta_j+\gamma_j) Z_j}{\sigma_0}\right)-\Phi\left(\frac{\sum_{j=1}^p(\beta_j-\gamma_j) Z_j}{\sigma_0}\right)
\end{align*}
where $\Phi$ was the cumulative distribution function of standard normal. Although the scores measuring the interaction from the first and second/third methods were different even when the sample size goes to infinity, the rank correlation coefficients put them on the same footing in comparing performances.
In top left, top right, bottom left and bottom right panels of Figure \ref{fig:bin}, we plotted the boxplots of the correlation coefficients between the estimated scores $\hat{\boldsymbol{\gamma}}'\mathbf{Z}$ and $\Delta(\mathbf{Z})$ under simulation settings (1), (2), (3) and (4), respectively. The patterns are similar to that for the continuous response. The ``new/augmented method'' performed the best or close to the best in all the four settings. The efficiency gain of the augmented method in setting 4 where the main effect was relative big and covariates were correlated was more significant than that in other settings.
In additional simulation study, we also evaluated the empirical performance of the generalized modified covariate approach with nearest shrunken centroid classifier. In one set of the simulation, the binary response is simulated from model (\ref{eqn:binmodel1}) with $p=50$, $n=200$, $\beta_j=I(j\le 20)/2,$ $\gamma_j=I(j\le 4)/2$ and $\sigma_0=\sqrt{2}.$ Here the first four predictors have covariate treatment interaction. We applied the nearest shrunken centroid classifier \citep{THNC2002} to the modified data (\ref{eqn:moddata}) with the shrinkage parameter selected via 10 fold cross-validation. This produced a posterior probability estimator for $\{Y=1\}.$ We then applied this estimated posterior probability interaction score, to a independently generated test set of size 400. We dichotomized the observations in the test set into high and low score groups according to the median value and calculated the differences between two treatment arms in high and low score groups separately. With 100 replications, the boxplots of the differences in high and low score groups were shown in the right panel of Figure \ref{fig:logist}. For comparison purposes, the empirical differences between two arms in high and low score groups determined by the true interaction score $\sum_{j=1}^p \gamma_jZ_j$ were shown in the left panel of figure \ref{fig:logist}. It can be seen that modified covariate approach, coupled with nearest shrunken centroid classifier, provided reasonable stratification for differentiating the treatment effect.
\begin{figure}
\caption{\em Boxplots for the correlation coefficients between the estimated score and true treatment effect
with three different methods applied to binary outcomes. The empty and filled boxes represent low and high dimensional ($p=50$ and $p=1000$) cases, respectively.
Left upper panel: moderate main effect and independent covariates; right upper panel: moderate main effect and correlated covariates; left lower panel: big main effect and independent covariates; right lower panel: big main effect and correlated covariates.}
\label{fig:bin}
\end{figure}
\begin{figure}
\caption{Left panel: the boxplots for the group differences in $Y$ in subgroups stratified by the optimal score; right panel: the boxplots for the group differences in $Y$ in subgroups stratified by posterior probability based on the independently trained nearest shrunken centroid classifier.}
\label{fig:logist}
\end{figure}
\subsection{Survival Responses}
For survival responses, we used the same simulation design as that for the continuous and binary responses. Specifically, we generated $N$ independent survival time from the regression model
\begin{eqnarray}
\tilde{X}&=& \exp\left(\sum_{j=1}^p \beta_j Z_j + \sum_{j=1}^p \gamma_j Z_j T+\sigma_0\cdot \epsilon\right),
\label{eqn:survmodel1}
\end{eqnarray}
where all the model parameters were the same as in the previous subsections. The censoring time was generated from uniform distribution $U(0, \xi_0),$ where $\xi_0$ was selected to induce 25\% censoring rate. For each of the simulated data set, we implemented three methods:
\begin{enumerate}
\item {\em full regression:} The first method was to fit a multivariate Cox regression with full main effect
and covariate/treatment interaction terms, i.e., the dimension of the
covariate matrix was $2p+2$. The Lasso was used to select the variables
\item {\em new:} The second method was to fit a multivariate Cox regression with the modified covariate $\mathbf{W}^*=(1, \mathbf{Z})'\cdot T/2$ as the covariates. Again, the Lasso was used for selecting variables having
treatment interaction.
\item {\em new/augmented:} the proposed method with efficiency augmentation. To model the $\mbox{E}\{M(\tau)|\mathbf{Z}\}$, we used linear regression with lasso regularization method.
\end{enumerate}
To evaluate the performance of the resulting score measuring the individualized treatment effect, we estimated the Spearman's rank correlation coefficient between the estimated score and the ``true'' treatment effect based on survival probability at $t_0=5$
\begin{align*}
\Delta(\mathbf{Z})&=\mbox{Prob}(\tilde{X}^{(1)}\ge t_0|\mathbf{Z})-\mbox{Prob}(\tilde{X}^{(-1)}\ge t_0|\mathbf{Z})\\
&=\Phi\left(\frac{\sum_{j=1}^p(\beta_j+\gamma_j) Z_j-\log(t_0)}{\sigma_0}\right)-\Phi\left(\frac{\sum_{j=1}^p(\beta_j-\gamma_j) Z_j-\log(t_0)}{\sigma_0}\right).
\end{align*}
In top left, top right, bottom left and bottom right panels of Figure \ref{fig:surv}, we plotted the boxplots of the correlation coefficients between the estimated scores $\hat{\boldsymbol{\gamma}}'\mathbf{Z}$ and $\Delta(\mathbf{Z})$ under simulation settings, (1), (2), (3) and (4), respectively. The patterns were similar to those for the continuous and binary responses and confirmed our findings that the ``efficiency-augmented method'' performed the best among the three methods in general.
\begin{figure}
\caption{\em Boxplots for the correlation coefficients between the estimated score and true treatment effect
with three different methods applied to survival outcomes. The empty and filled boxes represent low and high dimensional ($p=50$ and $p=1000$) cases, respectively.
Left upper panel: moderate main effect and independent covariates; right upper panel: moderate main effect and correlated covariates; left lower panel: big main effect and independent covariates; right lower panel: big main effect and correlated covariates.}
\label{fig:surv}
\end{figure}
\section{Examples}
\label{sect:example}
It has been known that the breast cancer can be classified into different subtypes using gene expression profile and the effective treatment may be different for different subtypes of the disease \citep{Loi:2007}. In this section, we apply the proposed method to study the potential interactions between gene expression levels and Tamoxifen treatment in the breast cancer patients.
The data set consists of 414 patients in the cohort GSE6532 collected by \cite{Loi:2007} for the purpose of characterizing ER-positive subtypes with gene expression profiles. The dataset including demographic information and gene expression levels can be downloaded from the website {\it www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE6532}. Excluding patients with incomplete information, there are 268 and 125 patients receiving Tamoxifen and alternative treatments, respectively. In addition to the routine demographic information, we have $44,928$ gene expression measurements for each of the 393 patients. The outcome of the primary interest here is the distant metastasis free survival time, which subjects to right censoring due to incomplete follow-up. The metastasis free survival times in two treatment groups are not statistically different with a two-sided $p$ value of 0.59 based on the log-rank test (Figure \ref{example0}). The goal of the analysis is to construct a score using gene expression levels for identifying subgroup of patients who may or may not be benefited from the Tamoxifen treatment in terms of the distant metastasis free survival. To this end, we select the first 90 patients in the Tamxifen arm and an equal number of patients in the alternative treatment arm to form the training set and reserve the rest 213 patients as the independent validation set. In selecting the training and validation sets, we use the original order of the observations in the dataset without additional sorting to ensure an objective analysis.
We first identify 5,000 genes with highest empirical variances and then construct an interaction score by fitting the Lasso penalized Cox regression model with modified covariates based on the 5,000 genes in the training set. The Lasso penalty parameter is selected via 20-fold cross-validation. The resulting interaction score is a linear combination of expression levels of seven genes. Here, a low interaction score favors Tamoxifen treatment. We apply the gene score to classify the patients in the validation set into high and low score groups according to if her score is greater than the median level. In the high score group, the distant metastasis free survival time in the Tamoxifen group is shorter than that in the alternative group with an estimated hazard ratio of 3.52 for Tamoxifen versus non-Tamoxifen treatment group (logrank test $p=0.064$). In the low score group, the distant metastasis free survival time in the Tamoxifen group is longer than that in the alternative group with an estimated hazard ratio of 0.694 ($p=0.421$). The estimated survival functions of both treatment groups are plotted in the upper panels of Figure {\ref{example1}. The interaction between constructed gene score and treatment is statistically significant in the multivariate Cox regression based on the validation set ($p=0.004$).
Furthermore, we implement the efficiency augmentation method and obtain a new score, which is based on expression level of eight genes. Again, we classify the patients in the validation set into high and low score groups based on the constructed gene score. In the high score group, the distant metastasis free survival time in the Tamoxifen group is shorter than that in the alternative group with a $p$ value of 0.158. The estimated hazard ratio is 2.29 for Tamoxifen versus non-Tamoxifen treatment group. In the low score group, the distant metastasis free survival time in the Tamoxifen group is longer than that in the alternative group with an estimated hazard ratio of 0.828. The $p$ value from the logrank test is not significant ($p=0.697$). The estimated survival functions of both treatment groups are plotted in the middle panels of Figure {\ref{example1}. The separation is slightly worse than that based the gene score constructed without augmentation. The interaction between constructed gene score and treatment is also statistically significant at 0.05 level ($p=0.025$).
For comparison purpose, we also fit a multivariate Cox regression model with treatment, the gene expression levels, and all treatment-gene interactions as the covariates. Lasso penalty is selected via 20-fold cross validation. The resulting gene score is a single gene based on the estimated treatment-gene interaction term of the Cox model. However, the interaction score fails to stratify the population according to the treatment effect in the validation set. The results are shown in the lower panel of Figure {\ref{example1}. The interaction between the constructed gene score and treatment is not statistically significant ($p=0.29$).
To further objectively examine the performance of the proposal in this data set, we randomly split the data into training and validation sets and construct the score measuring individualized treatment effect in the training sets with three methods: ``new", ``new/augmented'' and ``full regression''. Patients in the test set are then stratified into high and low score groups. We calculate the difference in log hazard ratio for Tamoxifen versus non-Tamoxifen treatment between high and low score groups. A positive number indicates that women in low score group benefitted more from Tamoxifen treatment than those in high score group as the model indicates. In Figure {\ref{example2}}, we plot the boxplot of the differences in the log hazard ratio based on 100 random splitting. To speed the computation, all scores are constructed using only 2500 genes with top empirical variances. The results indicate that the proposed and the corresponding augmented methods tend to perform better than the common full regression method and this observation is consistent with our previous findings based on simulation studies.
As a limitation of this example, the treatment is not randomly assigned to the patients as in a standard randomized clinical trial. Therefore, the results need to be interpreted with caution. In addition, the sample size is limited and further verification of the constructed gene score with independent data sets is needed.
\begin{figure}
\caption{Survival functions of the Tamoxifen and alternative treatment groups in 393 breast cancer patients. red line, Tamoxifen treatment group; black line, alternative treatment group }
\label{example0}
\end{figure}
\begin{figure}
\caption{Survival functions of the Tamoxifen and alternative treatment groups stratified by the interaction score in the test sets: red line, Tamoxifen treatment group; black line, alternative treatment group. Upper panels: the score based on the ``new'' method; middle panels: the score based on ``new/augmentated'' method; lower panel: the score is based on ``full regression'' method. }
\label{example1}
\end{figure}
\begin{figure}
\caption{Boxplots for differences in log(hazard ratio) between high and low risk groups based on 100 random splitting on GSE6532. The big positive number represents high quality of the constructed score in stratifying patients according to individualized treatment effect.}
\label{example2}
\end{figure}
\section{Discussion} In this paper we have proposed a simple method to explore the potential
interactions between treatment and a set of high dimensional covariates. The
general idea is to use $\mathbf{W}(\mathbf{Z})\cdot T/2$ as new covariates
in a regression or generalized regression model to predict the outcome variable.
The resulting linear
combination $\hat{\boldsymbol{\gamma}}'\mathbf{W}(\mathbf{Z})$ is then used to stratify the patient
population. A simple efficiency augmentation procedure can be used to improve the performance of the method.
The proposed method can be used in much broader
way. For example, after creating the modified covariates $\mathbf{W}(\mathbf{Z})\cdot T/2$, other
data mining techniques such as PAM and support vector machines can also be used to link the new covariates with the outcomes \citep{Fr91, pam2003, hastie06:_discus_suppor_vector_machin_applic}. Most
dimension reduction methods in the literature can be readily adapted
to handle the potentially high dimensional covariates. For univariate
analysis, we also may perform large scale hypothesis testing on the
modified data, to identify a list of covariates having interaction with the treatment; one could for example directly use the
Significance Analysis of Microarrays (SAM) method \citep{SAM02} for this purpose. Extensions in these
directions are promising and warrant further research.
Lastly, the proposed method can also be used to analyze data from observational studies.
However, the constructed interaction score may lose the corresponding causal interpretation. On the other hand, if a reasonable propensity score model is available, then we still can implement the modified covariate approach on matched or reweighted data such that the resulted score still retains
the appropriate causal interpretation \citep{Rosenbaum:Rubin:1983}.
\section{Appendix}
\subsection{Justification of the objective function based on the working model}
Under the linear working model for continuous response, we have
$$ \mbox{E} \{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}= \frac{1}{2}\left[\mbox{E}\{ (Y^{(1)})^2|\mathbf{Z}\}-2m_1(\mathbf{Z})f(\mathbf{Z})+f(\mathbf{Z})^2\right]$$
and
$$ \mbox{E} \{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\}=\frac{1}{2}\left[\mbox{E}\{ (Y^{(-1)})^2|\mathbf{Z}\}+2m_{-1}(\mathbf{Z})f(\mathbf{Z})+f(\mathbf{Z})^2\right],$$
where $m_t(\mathbf{z})=\mbox{E}(Y^{(t)}|\mathbf{Z}=\mathbf{z})$ for $t=1$ and -1. Therefore
\begin{align*}
{\cal L}(f)=&\mbox{E}\{l(Y, f(\mathbf{Z})T)\}\\
=&\mbox{E}_{\mathbf{Z}}\left[\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}+\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\} \right]\\
=&\mbox{E}_{\mathbf{Z}}\left(\left[\frac{1}{2}\{m_1(\mathbf{Z})-m_{-1}(\mathbf{Z})\}-f(\mathbf{Z})\right]^2 \right)+\mbox{constant}.
\end{align*}
Therefore, the minimizer of this objective function
$$f^*(\mathbf{z})=\frac{1}{2}\{m_1(\mathbf{z})-m_{-1}(\mathbf{z})\}=\frac{1}{2}\Delta(\mathbf{z})$$ for all $\mathbf{z} \in \mbox{Support of } \mathbf{Z}.$
$$ $$
Under the logistic working model for binary response, we have
$$ \mbox{E}\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}= m_1(\mathbf{Z})f(\mathbf{Z})-\log(1+e^{f(\mathbf{Z})}),$$
and
$$ \mbox{E}\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\}=-m_{-1}(\mathbf{Z})f(\mathbf{Z})-\log(1+e^{-f(\mathbf{Z})}).$$
Thus
\begin{align*}
{\cal L}(f)=& \mbox{E}\{l(Y, f(\mathbf{Z})T)\}\\
=&\mbox{E}_{\mathbf{Z}}\left[\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}+\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\} \right]\\
=&\frac{1}{2}\mbox{E}_{\mathbf{Z}}\left[\Delta(\mathbf{Z})f(\mathbf{Z})-\log(1+e^{f(\mathbf{Z})})-\log(1+e^{-f(\mathbf{Z})}) \right].
\end{align*}
Therefore
$$\frac{\partial {\cal L}(f)}{\partial f}=\frac{1}{2} \mbox{E}_{\mathbf{Z}}\left[\Delta(\mathbf{Z})-\frac{1-e^{f(\mathbf{Z})}}{1+e^{f(\mathbf{Z})}} \right],$$
which implies that the minimizer of ${\cal L}(f)$
$$f^*(\mathbf{z})=\log\frac{1-\Delta(\mathbf{z})}{1+\Delta(\mathbf{z})}$$
for all $\mathbf{z} \in \mbox{Support of } \mathbf{Z}$ or equivalently
$$\Delta(\mathbf{z})=\frac{1-e^{f^*(\mathbf{z})}}{1+e^{f^*(\mathbf{z})}}.$$
Alternatively, under the logistic working model with binary response, we may focus on the objective function
$$\tilde{l}(Y, f(\mathbf{Z})T)=(1-Y)f(\mathbf{Z})T-Ye^{-f(\mathbf{Z})T}.$$
Therefore
$$ \mbox{E}\{\tilde{l}(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}= \{1-m_1(\mathbf{Z})\}f(\mathbf{Z})-m_1(\mathbf{Z})e^{-f(\mathbf{Z})},$$
and
$$ \mbox{E}\{\tilde{l}(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\}=-\{1-m_{-1}(\mathbf{Z})\}f(\mathbf{Z})-m_{-1}(\mathbf{Z}) e^{f(\mathbf{Z})}.$$
Thus
\begin{align*}
{\cal L}(f)=& \mbox{E}\{\tilde{l}(Y, f(\mathbf{Z})T)\}\\
=&\mbox{E}_{\mathbf{Z}}\left[\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}+\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\} \right]\\
=&\mbox{E}_{\mathbf{Z}}\left[\frac{1}{2}\{m_1(\mathbf{Z})-m_{-1}(\mathbf{Z})\}f(\mathbf{Z})-\frac{1}{2}m_1(\mathbf{Z})e^{-f(\mathbf{Z})}-m_{-1}(\mathbf{Z}) e^{f(\mathbf{Z})} \right]
\end{align*}
Therefore
$$\frac{\partial {\cal L}(f)}{\partial f}=\frac{1}{2} \mbox{E}_{\mathbf{Z}}\left[\{m_1(\mathbf{Z})-m_{-1}(\mathbf{Z})\}+m_1(\mathbf{Z})e^{-f(\mathbf{Z})}-m_{-1}(\mathbf{Z}) e^{f(\mathbf{Z})} \right]$$
which implies that the minimizer of ${\cal L}(f)$
$$f^*(\mathbf{z})=\log\frac{m_{1}(\mathbf{z})}{m_{-1}(\mathbf{z})}$$
for all $\mathbf{z} \in \mbox{Support of } \mathbf{Z}.$
$$ $$
Under the Cox working model for survival outcome, we have
\begin{align*}
\mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T\}=& \mbox{E}_Y \left(\int_0^\tau \left[ Tf(\mathbf{Z})-\log\{\mbox{E}(e^{Tf(\mathbf{Z})}I(\tilde{X}\ge u))\}\right]d N(u)|\mathbf{Z}, T\right)\\
=& \int_0^\tau \left[ f(\mathbf{Z})-\log\{\mbox{E}(e^{Tf(\mathbf{Z})}I(\tilde{X}\ge u))\}\right] \mbox{E} \left\{I(\tilde{X} \ge u)|\mathbf{Z}, T\right\}\lambda_T(u; \mathbf{Z})du
\end{align*}
where $\lambda_t(u; \mathbf{Z})$ is the hazard function for $\tilde{X}^{(t)}$ given $\mathbf{Z}$ for $t=1/-1.$ Since
$$ {\cal L}(f)= \mbox{E}_{\mathbf{Z}}\left[\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=1\}+\frac{1}{2} \mbox{E}_Y\{l(Y, f(\mathbf{Z})T)|\mathbf{Z}, T=-1\} \right] $$
\begin{align*}
\frac{\partial {\cal L}(f)}{\partial f}=&\frac{1}{2}\mbox{E}\int_0^\tau \biggm \{ I(\tilde{X}^{(1)}\ge u)\lambda_1(u; \mathbf{Z})-I(\tilde{X}^{(-1)}\ge u)\lambda_{-1}(u; \mathbf{Z})\\
&-e^{f(\mathbf{Z})}I(\tilde{X}^{(1)}\ge u)\Lambda(u; f)+e^{-f(\mathbf{Z})}I(\tilde{X}^{(-1)}\ge u)\Lambda(u; f) \biggm \}du,
\end{align*}
where $$\Lambda(t; f)=\frac{\mbox{E}[I(\tilde{X} \ge u)\{\lambda_1(u; \mathbf{Z})+\lambda_{-1}(u; \mathbf{Z})\}]}{\mbox{E}\{e^{Tf(\mathbf{Z})}I(\tilde{X}\ge u)\}}.$$
Setting the derivative at zero, the minimizer $f^*(\mathbf{z})$ satisfies
\begin{align*}
&e^{f^*(\mathbf{z})}\mbox{E}\{\Lambda^*(\tilde{X}^{(1)})|\mathbf{Z}=\mathbf{z}\}-e^{-f^*(\mathbf{z})}\mbox{E}\{\Lambda^*(\tilde{X}^{(-1)})|\mathbf{Z}=\mathbf{z}\}\\
=& \mbox{Prob}(C^{(1)}>X^{(1)}|\mathbf{Z}=\mathbf{z})-\mbox{Prob}(C^{(-1)}>X^{(-1)}|\mathbf{Z}=\mathbf{z})
\end{align*}
for all $\mathbf{z} \in \mbox{Support of } \mathbf{Z},$ where $\Lambda^*(u)=\Lambda(u, f^*).$ When censoring rates are the same in two arms for all given $\mathbf{z},$
$$ f^*(z)=-\frac{1}{2}\log \left[ \frac{E \{\Lambda^*(\tilde{X}^{(1)})|\mathbf{Z}=\mathbf{z}\}}{E\{\Lambda^*(\tilde{X}^{(-1)})|\mathbf{Z}=\mathbf{z}\}}\right]$$
\subsection{Justification of the optimal $a_0(\mathbf{z})$ in the efficient augmentation}
Let $S(y, \mathbf{w}^*, \boldsymbol{\gamma})$ be the derivative of the objective function $l(y, \boldsymbol{\gamma}'\mathbf{w}^*)$ with respect to $\boldsymbol{\gamma}.$ $\hat{\boldsymbol{\gamma}}$ is the root to an estimating equation
$$Q(\boldsymbol{\gamma})=N^{-1}\sum_{i=1}^N S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma})=0.$$
Similarly, the augmented estimator $\hat{\boldsymbol{\gamma}}_a$ can be viewed as the root of the estimating equation
$$Q_a(\boldsymbol{\gamma})=N^{-1}\sum_{i=1}^N \left\{S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma})-T_i\cdot \mathbf{a}(\mathbf{Z}_i)\right\}=0,$$
Since $E\{T_i \cdot \mathbf{a}(\mathbf{Z}_i)\}=0$ due to randomization, the solution of the augmented estimating equation always converges to the $\boldsymbol{\gamma}^*$ in probability.
It is straightforward to show that
$$\hat{\boldsymbol{\gamma}}-\boldsymbol{\gamma}^*=N^{-1}A_0^{-1}\sum_{i=1}^N S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma}^*)+o_P(N^{-1})$$
and
$$\hat{\boldsymbol{\gamma}}_a-\boldsymbol{\gamma}^*=N^{-1}A_0^{-1}\sum_{i=1}^N \{S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma}^*)-T_i \mathbf{a}(\mathbf{Z}_i)\}+o_P(N^{-1})$$
where $A_0$ is the derivative of $\mbox{E}\{S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma})\}$ with respect to $\boldsymbol{\gamma}$ at $\boldsymbol{\gamma}=\boldsymbol{\gamma}^*.$ Selecting the optimal $\mathbf{a}(\mathbf{z})$ is equivalent to
minimizing the variance of $\{S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma}^*)-T_i \mathbf{a}(\mathbf{Z}_i)\}.$
Noting that
$$\mbox{E}\left[\{S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma}^*)-T_i \mathbf{a}(\mathbf{Z}_i)\}^{\otimes 2}\right]=\mbox{E}\left[\{S(Y_i, \mathbf{W}_i^*, \boldsymbol{\gamma}^*)-T_i \mathbf{a}_0(\mathbf{Z}_i)\}^{\otimes 2}\right]+\mbox{E}[\{\mathbf{a}(\mathbf{Z}_i)-\mathbf{a}_0(\mathbf{Z}_i)\}^{\otimes 2}],$$
where $\mathbf{a}_0(\mathbf{z})$ satisfies the equation
$$ E \left[\{S(Y, \mathbf{W}^*, \boldsymbol{\gamma}^*)-T \mathbf{a}_0(\mathbf{Z})\} T \eta(\mathbf{Z})\right]=0$$
for any function $\eta(\cdot)$, $\mathbf{a}_0(\cdot)$ is the optimal augmentation term minimizing the variance of $\hat{\boldsymbol{\gamma}}_a.$
Since $\mathbf{a}_0(\cdot)$ is the root of the equation
$$ E\left[\{S(Y, \mathbf{W}^*, \boldsymbol{\gamma}^*)-T \mathbf{a}_0(\mathbf{Z})\}'T \biggm| \mathbf{Z} \right]=0,$$
$$\mathbf{a}_0(\mathbf{z})=\frac{1}{2}\left[\mbox{E}\{S(Y, \mathbf{W}(\mathbf{z})/2, \boldsymbol{\gamma}^*)|\mathbf{Z}=\mathbf{z}, T=1\}- \mbox{E}\{S(Y, -\mathbf{W}(\mathbf{z})/2, \boldsymbol{\gamma}^*)|\mathbf{Z}=\mathbf{z}, T=-1\}\right].$$
$$~~$$
For continuous response, $$S(Y, \boldsymbol{\gamma}'\mathbf{W}^*)=-\frac{1}{2}T\mathbf{W}(\mathbf{Z})\left\{Y-\frac{1}{2}T\mathbf{W}(\mathbf{Z})'\boldsymbol{\gamma} \right\}$$ and
\begin{align*}
a_0(\mathbf{z})=&\frac{1}{2}\left(\mbox{E}[-\mathbf{W}(\mathbf{z})\{Y-\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2\}/2|T=1, \mathbf{Z}=\mathbf{z}]-\mbox{E}[\mathbf{W}(\mathbf{z})\{Y+\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2\}/2|T=-1, \mathbf{Z}=\mathbf{z}]\right)\\
=& -\mathbf{W}(\mathbf{z})\left\{\frac{1}{4}\mbox{E}(Y|T=1, \mathbf{Z}=\mathbf{z})+\frac{1}{4}\mbox{E}(Y|T=-1, \mathbf{Z}=\mathbf{z})\right\}\\
=& -\frac{1}{2}\mathbf{W}(\mathbf{z})\mbox{E}(Y|\mathbf{Z}=\mathbf{z})
\end{align*}
$$~$$
For binary response,
$$S(Y, \boldsymbol{\gamma}'\mathbf{W}^*)=-\frac{1}{2}\mathbf{W}(\mathbf{Z})T \left\{Y-\frac{e^{T\mathbf{W}(\mathbf{Z})'\boldsymbol{\gamma}/2}}{1+e^{T\mathbf{W}(\mathbf{Z})'\boldsymbol{\gamma}/2}}\right\}$$ and
\begin{align*}
a_0(\mathbf{z})=& -\frac{1}{4}\mathbf{W}(\mathbf{z})\left[\mbox{E} \left\{Y-\frac{e^{\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}}{1+e^{\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}}\biggm| T=1, \mathbf{Z}=\mathbf{z}\right\}+\mbox{E} \left\{Y-\frac{e^{-\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}}{1+e^{-\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}} \biggm| T=-1, \mathbf{Z}=\mathbf{z}\right\}\right]\\
=& -\frac{1}{4} \mathbf{W}(\mathbf{z})\left\{\mbox{E}(Y|T=1, \mathbf{Z}=\mathbf{z})+\mbox{E}(Y|T=-1, \mathbf{Z}=\mathbf{z})-\left(\frac{e^{\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}}{1+e^{\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}}+\frac{e^{-\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}}{1+e^{-\mathbf{W}(\mathbf{z})'\boldsymbol{\gamma}^*/2}} \right)\right\}\\
=& -\frac{1}{2}\mathbf{W}(\mathbf{z})\left\{\mbox{E}(Y|\mathbf{Z}=\mathbf{z})-\frac{1}{2}\right\}
\end{align*}
$$~$$
For survival response, the estimating equation based on the partial likelihood function is asymptotically equivalent to the estimating equation $N^{-1}\sum_{i=1}^N S(Y_i, \mathbf{W}^*_i, \boldsymbol{\gamma})=0,$ where
$$S(Y, \mathbf{W}^*, \boldsymbol{\gamma})=-\int_0^\tau \left[\mathbf{W}^*-\mathbf{R}(u; \boldsymbol{\gamma}^*)\right] M(du, \mathbf{W}^*, \boldsymbol{\gamma}^*).$$
Thus, $$\mathbf{a}_0(\mathbf{z})=-\frac{1}{2}\left[\frac{1}{2}\mathbf{W}(\mathbf{z})\left\{G_1(\tau; \mathbf{z})+G_2(\tau; \mathbf{z})\right\}-\int_0^\tau \mathbf{R}(u) \{G_1(du; \mathbf{z})-G_2(du; \mathbf{z})\}\right],$$
\subsection{Lasso algorithm in the efficient augmentation}
In general, the augmentation term is in the form of
$a_0(\mathbf{Z}_i)=\mathbf{W}(\mathbf{Z}_i)'\hat{r}(\mathbf{Z}_i),$
where $\hat{r}(\mathbf{Z}_i)$ is a simple scalar. The lasso regularized objective function can be written as
$$\frac{1}{N}\sum_{i=1}^N \left\{ l(Y_i, \boldsymbol{\gamma}' \mathbf{W}_i^*)-\boldsymbol{\gamma}' \mathbf{W}_i^*\hat{r}(\mathbf{Z}_i)\right\}+\lambda |\boldsymbol{\gamma}|.$$
In general, this lasso problem can be solved iteratively. For example, when $l(\cdot)$ is the log-likelihood function of the logistic regression model, then
with we may update $\hat{\boldsymbol{\gamma}}$ iteratively by solving the standard OLS-lasso problem
$$\frac{1}{N}\sum_{i=1}^N \hat{w}_i(\hat{z}_i-\boldsymbol{\gamma}'\mathbf{W}_i^*)^2 +\lambda|\boldsymbol{\gamma}|$$
where $\hat{\boldsymbol{\gamma}}$ is the current estimator for $\boldsymbol{\gamma},$
$$\hat{z}_i=\hat{\boldsymbol{\gamma}}'\mathbf{W}_i^*+\hat{w}_i^{-1}\{Y_i-\hat{p}_i-\hat{r}(\mathbf{Z}_i)\}, ~~~~~\hat{w}_i=\hat{p}_i(1-\hat{p}_i)$$
and
$$\hat{p}_i=\frac{\exp\{\boldsymbol{\gamma}'\mathbf{W}_i^*\}}{1+\exp\{\boldsymbol{\gamma}'\mathbf{W}_i^*\}}.$$
\end{document} |
\mathbf mathbf begin{document}
\mathbf maketitle
\mathbf mathbf begin{abstract}
Suppose an $n \times d$ design matrix in a linear regression problem is given,
but the response for each point is hidden unless explicitly requested.
The goal is to sample only a small number $k \ll n$ of the responses,
and then produce a weight vector whose sum of squares loss over \mathbf emph{all} points is at most $1+\mathbf epsilon$ times the minimum.
When $k$ is very small (e.g., $k=d$), jointly sampling diverse subsets of
points is crucial. One such method called \mathbf emph{volume sampling} has
a unique and desirable property that the weight vector it produces is an unbiased
estimate of the optimum. It is therefore natural to ask if this method
offers the optimal unbiased estimate in terms of the number of
responses $k$ needed to achieve a $1+\mathbf epsilon$ loss approximation.
Surprisingly we show that volume sampling can have poor behavior when
we require a very accurate approximation -- indeed worse than some
i.i.d.~sampling techniques whose estimates are biased, such as
\mathbf emph{leverage score sampling}.
We then develop a new rescaled variant of volume sampling that
produces an unbiased estimate which avoids
this bad behavior and has at least as good a tail bound as leverage
score sampling: sample size $k=O(d\log d + d/\mathbf epsilon)$ suffices to
guarantee total loss at most $1+\mathbf epsilon$ times the minimum
with high probability. Thus, we improve on the best previously known
sample size for an unbiased estimator, $k=O(d^2/\mathbf epsilon)$.
Our rescaling procedure leads to a new efficient algorithm
for volume sampling which is based
on a \mathbf emph{determinantal rejection sampling} technique with
potentially broader applications to determinantal point processes.
Other contributions include introducing the
combinatorics needed for rescaled volume sampling and developing tail
bounds for sums of dependent random matrices which arise in the
process.
\if 0
A standard approach to this problem is to use i.i.d.~\mathbf emph{leverage score sampling},
but this approach is known to perform poorly when $k$ is small (e.g., $k = d$);
in such cases, it is dominated by \mathbf emph{volume sampling}, a joint sampling method that explicitly promotes diversity.
Also, volume sampling based methods often lead to unbiased estimators whereas leverage scores give biased estimators.
Surprisingly we show that volume sampling can have poor behavior for
large $k$---indeed worse than leverage score sampling.
We then develop a new rescaled variant of volume sample that avoids this bad behavior
and has at least as good a tail bound as leverage score sampling:
sample size $k=O(d\log d + d/\mathbf epsilon)$ suffices to
guarantee total loss at most $1+\mathbf epsilon$ times the minimum
with high probability. Moreover, the resulting estimator is unbiased.
The main technical contribution is developing the
combinatorics of this new variant of volume sampling (which samples with replacement).
Our new techniques also lead to drastically improved running times for
volume sampling.
\mathbf fi
\mathbf end{abstract}
\section{Introduction}
\label{s:intro}
Consider a linear regression problem where the input points
in $\mathbb R^d$ are provided, but the associated response for each point is
withheld unless explicitly requested. The goal is to
sample the responses for just a small subset of inputs,
and then produce a weight vector whose total square loss
on all $n$ points is at most $1+\mathbf epsilon$ times that of the
optimum.\mathbf footnote{The total loss of the algorithm being
at most $1+\mathbf epsilon$ times loss of the optimum can be rewritten
as the regret being at most $\mathbf epsilon$ times the optimum.}
This scenario is relevant in many applications where
data points are cheap to obtain but responses are expensive.
Surprisingly, with the aid of having all input points available,
such multiplicative loss bounds are achievable
without any range dependence on the points or responses common in
on-line learning {n-d\choose s-d}itep[see, e.g.,][]{onlineregr}.
A natural and intuitive approach to this problem is
\mathbf emph{volume sampling}, since it
prefers ``diverse'' sets of points that will likely result in a
weight vector with low total loss, regardless of what the
corresponding responses turn out to be~{n-d\choose s-d}itep{unbiased-estimates}. Volume sampling is closely related
to optimal design criteria~{n-d\choose s-d}itep{optimal-design-book,dual-volume-sampling},
which are appropriate under statistical models of the responses;
here we study a worst-case setting where the algorithm must
use randomization to guard itself against worst-case responses.
Volume sampling and related determinantal point processes are employed in many
machine learning and statistical contexts, including linear
regression~{n-d\choose s-d}itep{dual-volume-sampling,unbiased-estimates,regularized-volume-sampling},
clustering and matrix
approximation~{n-d\choose s-d}itep{pca-volume-sampling,efficient-volume-sampling,avron-boutsidis13},
summarization and information retrieval~{n-d\choose s-d}itep{dpp,k-dpp,dpp-shopping}, and
fairness~{n-d\choose s-d}itep{celis2016fair,celis2018fair}. The availability of fast
algorithms for volume sampling~{n-d\choose s-d}itep{dual-volume-sampling,unbiased-estimates}
has made it an important technique in the algorithmic toolbox alongside
i.i.d.~leverage score sampling~{n-d\choose s-d}itep{drineas2006sampling} and spectral
sparsification~{n-d\choose s-d}itep{batson2012twice,lee2015constructing}.
It is therefore surprising that using volume sampling in the context of linear
regression, as suggested in previous
works~{n-d\choose s-d}itep{unbiased-estimates,dual-volume-sampling}, may lead to suboptimal
performance. We construct an example in which, even after sampling up to half
of the responses, the loss of the weight vector from volume sampling is a fixed
factor ${>}1$ larger than the minimum loss. Indeed,
this poor behavior arises because for any sample size ${>}d$, the marginal
probabilities from volume sampling are a mixture of uniform probabilities and
leverage score probabilities, and uniform sampling is well-known to be
suboptimal when the leverage scores are highly non-uniform.
\mathbf mathbf begin{wrapfigure}{r}{0.45\textwidth}
\mathbf mathbf vspace{-1.2cm}
\mathbf mathbf begin{center}
\includegraphics[width=.48\textwidth]{figs/cpusmall_scale.eps}
{n-d\choose s-d}aptionof{figure}{Plots of the total loss for the sampling
methods (averaged over 100 runs) versus sample
size (shading is standard error) for a libsvm
dataset \textit{cpusmall} {n-d\choose s-d}ite{libsvm}.}
\label{f:lb}
\mathbf end{center}
\mathbf mathbf vspace{-.6cm}
\mathbf end{wrapfigure}
A possible recourse is to abandon volume sampling in favor of leverage score
sampling~{n-d\choose s-d}itep{drineas2006sampling,woodruff2014sketching}. However, all
i.i.d.~sampling methods, including leverage score sampling, suffer from a
coupon collector problem that prevents their effective use at small sample
sizes~{n-d\choose s-d}itep{regularized-volume-sampling}. Moreover, the resulting weight
vectors are biased (regarded as estimators for the least
squares solution using all responses), which is a nuisance when averaging
multiple solutions (e.g., as produced in distributed settings). In contrast,
volume sampling offers multiplicative loss bounds even with sample sizes as small as $d$
and it is the \textit{only} known non-trivial method that gives unbiased weight vectors~{n-d\choose s-d}itep{unbiased-estimates}.
We develop a new solution, called \mathbf emph{leveraged volume sampling}, that
retains the aforementioned benefits of volume sampling while avoiding its
flaws. Specifically, we propose a variant of volume sampling based on rescaling
the input points to ``correct'' the resulting marginals. On the
algorithmic side, this leads to
a new \textit{determinantal rejection sampling} procedure which offers significant
computational advantages over existing volume sampling algorithms,
while at the same time being strikingly simple to implement.
We prove that this new sampling scheme retains the benefits
of volume sampling (like unbiasedness) but avoids the bad behavior demonstrated
in our lower bound example. Along the way, we prove a new generalization of the
Cauchy-Binet formula, which is needed for the rejection sampling denominator.
Finally, we develop a new method for proving matrix tail bounds for leveraged
volume sampling. Our analysis shows that the unbiased
least-squares estimator constructed this way achieves a $1+\mathbf epsilon$
approximation factor from a sample of size $O(d \log d + d/\mathbf epsilon)$,
addressing an open question
posed by {n-d\choose s-d}ite{unbiased-estimates}.
\if 0
Leverage score sampling~{n-d\choose s-d}itep{drineas2006sampling} is a
well-known approach to this problem, and there is an
established methodology~{n-d\choose s-d}itep{woodruff2014sketching} for
proving $1+\mathbf epsilon$ multiplicative loss bounds using
matrix Chernoff bounds~{n-d\choose s-d}itep{matrix-tail-bounds}. However,
non-i.i.d.~joint sampling approaches such as volume
sampling, which chooses subsets of $k$ points based on the
squared volume of the spanned parallelepiped, often yield
better results for small sample sizes---see Figure~\mathbf mathbf ref{f:lb}.
Volume sampling explicitly promotes diverse samples,
thereby avoiding a coupon collector problem that plagues all i.i.d.~sampling methods~{n-d\choose s-d}itep{regularized-volume-sampling}.
The other main advantage of volume sampling is that optimum
the weight vector for the subset of points for which
responses where obtained is an unbiased
estimator of the optimum weight vector for all points and responses.
This unbiasedness holds for all sets of inputs and responses
(i.e. it does not require any noise assumptions.
Since mixtures of unbiased estimators remain unbiased and
therefore unbiasedness is a very useful property when
larger estimators are build from smaller ones.
Ideally we want simply use volume sampling with larger
sample sizes for obtaining $1+\mathbf epsilon$ multiplicative loss
bounds while retaining the good properties of volume sampling.
Surprisingly, we are able to show that volume sampling can have bad
behavior for large sample sizes---even
worse than that of leverage score sampling (also as seen in Figure \mathbf mathbf ref{f:lb}).
Leverage scores are the marginals of size $d$ volume sampling.
However if the sample size $k$ of volume sampling is larger
than $d$, then the marginals are a mixture of the uniform distribution
and leverage scores, which can lead to bad behavior:
We prove that, even after sampling up to half of the responses, the loss from volume sampling can be a fixed factor ${>}1$ larger than the minimum loss.
We circumvent this deficiency as follows:
(i) We rescale the inputs by the inverse leverage scores
(after rescaling the leverage scores are uniform)
and modify volume sampling by sampling with replacement
instead of without replacement.
The resulting new variant called ``leveraged volume sampling'' now
has uniform marginals and this avoids the bad behavior.
(ii) We develop a method for proving
matrix tail bounds for leveraged volume sampling.
Our new analysis method shows that for leveraged volume
sampling, sample size $O(d \log d + d/\mathbf epsilon)$ suffices to
guarantee the $1+\mathbf epsilon$ approximation factor, with high probability.
Moreover the produced weight vector is unbiased.
\mathbf fi
\mathbf mathbf paragraph{Experiments.}
Figure~\mathbf mathbf ref{f:lb} presents experimental evidence on a benchmark dataset
(\textit{cpusmall} from the libsvm collection {n-d\choose s-d}ite{libsvm}) that the
potential bad behavior of volume sampling proven in our lower bound
does occur in practice. Appendix
\mathbf mathbf ref{sec:experiments} shows more
datasets and a detailed discussion of the experiments. In summary,
leveraged volume sampling avoids the bad behavior of standard volume
sampling, and performs considerably better than leverage score
sampling, especially for small sample sizes $k$.
\mathbf mathbf paragraph{Related work.}
Despite the ubiquity of volume sampling in many contexts already mentioned above, it has only recently been analyzed for linear regression.
Focusing on small sample sizes, {n-d\choose s-d}itep{unbiased-estimates} proved multiplicative bounds for the expected loss of size $k=d$ volume sampling.
Because the estimators produced by volume sampling are unbiased, averaging a number of such estimators produced an estimator based on a sample of size $k = O(d^2/\mathbf epsilon)$ with expected loss at most $1+\mathbf epsilon$ times the optimum.
It was shown in {n-d\choose s-d}ite{regularized-volume-sampling} that if the
responses are assumed to be linear functions of the input
points plus white noise, then size $k =
O(d/\mathbf epsilon)$ volume sampling suffices for obtaining the same
expected bounds. These noise assumptions on the response vector are
also central to the task of
A-optimal design, where volume sampling is a key technique
{n-d\choose s-d}itep{optimal-design-book,symmetric-polynomials,tractable-experimental-design,proportional-volume-sampling}.
All of these previous results were concerned with bounds that hold in expectation; it is natural to ask if similar (or better) bounds can also be shown to hold with high probability, without noise assumptions.
Concentration bounds for volume sampling and other strong
Rayleigh measures were studied in
{n-d\choose s-d}ite{pemantle2014concentration}, but these results are
not sufficient to obtain the tail bounds for volume sampling.
Other techniques applicable to our linear regression
problem include leverage score
sampling~{n-d\choose s-d}itep{drineas2006sampling} and spectral
sparsification~{n-d\choose s-d}itep{batson2012twice,lee2015constructing}.
Leverage score sampling is an i.i.d. sampling procedure which achieves
tail bounds matching the ones we obtain here for leveraged volume
sampling, however it produces biased weight vectors
and experimental results (see {n-d\choose s-d}ite{regularized-volume-sampling} and Appendix
\mathbf mathbf ref{sec:experiments}) show that it has weaker performance for small
sample sizes.
A different and more elaborate
sampling technique based on spectral
sparsification~{n-d\choose s-d}itep{batson2012twice,lee2015constructing}
was recently shown to be effective for linear
regression~{n-d\choose s-d}itep{chen2017condition}, however this method
also does not
produce unbiased estimates, which is a primary concern of this paper
and desirable in many settings. Unbiasedness seems to
require delicate control of the sampling probabilities, which we achieve using determinantal rejection sampling.
\if 0
Other techniques applicable to our linear regression
problem include leverage score
sampling~{n-d\choose s-d}itep{drineas2006sampling} and spectral
sparsification~{n-d\choose s-d}itep{batson2012twice,lee2015constructing}.
Leverage score sampling requires sample size $k = O(d \log
d + d/\mathbf epsilon)$ to achieve loss $1+\mathbf epsilon$ times the
optimum (with high probability). This matches the tail
bound we achieve with leveraged volume sampling. A
different more elaborate and time intensive
sampling technique based on spectral sparsification~{n-d\choose s-d}itep{batson2012twice,lee2015constructing} was recently proposed for this problem, and requires a smaller sample size $k = O(d/\mathbf epsilon)$ to achieve the same guarantee~{n-d\choose s-d}itep{chen2017condition}. However, neither of these techniques is guaranteed to produce unbiased estimates, which is desirable in many settings. Achieving unbiasedness seems to require more delicate control over the sampling probabilities.
Leverage score sampling \mathbf mnote{need to say somewhere that
same tail bound but biased} is a well-known technique in the literature on randomized algorithms for numerical linear algebra; see the monographs {n-d\choose s-d}ite{randomized-matrix-algorithms} and~{n-d\choose s-d}ite{woodruff2014sketching} for overviews of this rich area. Other techniques used in this literature improve on leverage score sampling by reducing the overall computational cost of sampling~{n-d\choose s-d}itep[e.g.,][]{regression-input-sparsity-time,sarlos-sketching}. In our setting, it is the number of responses observed (and hence the total number of points selected) that is the primary bottleneck. Thus certain techniques, such as applying random rotations to the design matrix and response vector, are not beneficial for us.
A different technique\mathbf mnote{Need of ref Eric instead. Also based on
BSS} known as \mathbf emph{BSS sampling}~{n-d\choose s-d}itep[after][]{batson2012twice} can also reduce the sample size relative to leverage score sampling~{n-d\choose s-d}itep[see][]{coresets-regression}.
However, its use in the context of linear regression again
depends on the entire response
vector~{n-d\choose s-d}itep[Lemma C.28]{song2017relative}.
The most closely related setting analyzed in the on-line learning
literature for linear regression is the minimax regret
analysis of {n-d\choose s-d}ite{minimax-linear-regression}.
They also are given all input points but need to predict
the responses in a fixed order whereas in the model considered
here, the learner is allowed to sample the responses before
producing predictions for all points. Curiously enough,
ordered prediction requires a range restriction on the responses whereas in
our setup there is no restriction on the responses.
\mathbf fi
\mathbf mathbf paragraph{Outline and contributions.}
We set up our task of subsampling for linear regression in the next section
and present our lower bound for standard volume sampling.
A new variant of rescaled volume sampling is introduced
in Section \mathbf mathbf ref{s:resc}. We develop techniques for
proving matrix expectation formulas for this variant
which show that for any rescaling
the weight vector produced for the subproblem is unbiased.
Next, we show that when rescaling with leverage scores, then
a new algorithm based on rejection sampling
is surprisingly efficient (Section \mathbf mathbf ref{s:alg}):
Other than the preprocessing step of computing
leverage scores, the runtime does not depend on $n$
(a major improvement over existing volume sampling algorithms).
Then, in Section \mathbf mathbf ref{s:tail} we prove
multiplicative loss bounds for leveraged volume sampling
by establishing two important properties which are hard to prove for
joint sampling procedures.
We conclude in Section \mathbf mathbf ref{s:open} with an open problem
and with a discussion of how rescaling with approximate
leverage scores gives further time improvements for
constructing an unbiased estimator.
\section{Volume sampling for linear regression}
\label{s:versus}
In this section, we describe our linear regression setting, and review
the guarantees that standard volume sampling offers in this
context. Then, we present a surprising lower bound which shows that
under worst-case data, this method can exhibit undesirable behavior.
\subsection{Setting}
\label{s:setting}
Suppose the learner is given $n$ input vectors $\mathbf mathbf x_1,\dotsc,\mathbf mathbf x_n\in\mathbb R^d$, which
are arranged as the rows of an $n\times d$ input matrix $\mathbf X$.
Each input vector $\mathbf mathbf x_i$ has an associated response variable
$y_i\in \mathbb R$ from the response vector $\mathbf mathbf y\in\mathbb R^n$. The goal of the
learner is to find a weight vector $\mathbf mathbf w\in \mathbb R^d$ that minimizes the
square loss:
\mathbf mathbf begin{align*}
\mathbf mathbf w^*\defeq \mathbf mathbf argmin_{\mathbf mathbf w\in\mathbb R^d} L(\mathbf mathbf w),
\;\;\text{where}\; L(\mathbf mathbf w)\defeq \sum_{i=1}^n
(\mathbf mathbf x_i^\top\mathbf mathbf w-y_i)^2=\|\mathbf X\mathbf mathbf w - \mathbf mathbf y\|^2.
\mathbf end{align*}
Given both matrix $\mathbf X$ and vector $\mathbf mathbf y$, the least squares solution can be
directly computed as $\mathbf mathbf w^* = \mathbf X^+\mathbf mathbf y$, where $\mathbf X^+$ is the
pseudo-inverse. Throughout the paper we assume w.l.o.g.~that
$\mathbf X$ has (full) rank $d$.
\mathbf footnote{Otherwise just reduce $\mathbf X$ to a subset of independent columns.
Also assume $\mathbf X$ has no rows of all zeros
(every weight vector has the same loss on such rows, so
they can be removed).}
In our setting, the learner is only given the input
matrix $\mathbf X$, while response vector $\mathbf mathbf y$ remains hidden.
The learner is allowed to select a
subset $S$ of row indices in $[n] = \{1,\dotsc,n\}$ for which the corresponding responses
$y_i$ are revealed. The learner constructs an estimate
$\mathbf mathbf wbh$ of $\mathbf mathbf w^*$
using matrix $\mathbf X$ and the partial vector of observed responses.
The learner is evaluated by the loss over all rows
of $\mathbf X$ (including the ones with unobserved responses), and the goal is to
obtain a multiplicative loss bound, i.e., that for some $\mathbf epsilon>0$,
\mathbf mathbf begin{align*}
L(\mathbf mathbf wbh)\leq (1+\mathbf epsilon)\,L(\mathbf mathbf w^*).
\mathbf end{align*}
\subsection{Standard volume sampling}
Given $\mathbf X\in\mathbb R^{n\times d}$ and a size $k\ge d$,
standard volume sampling jointly chooses a set $S$
of $k$ indices in $[n]$ with probability
\mathbf mathbf begin{align*}
\mathbf Pr(S) = \mathbf frac{\det(\mathbf X_S^\top\mathbf X_S)}{{n-d{n-d\choose s-d}hoose k-d}\det(\mathbf X^\top\mathbf X)},
\mathbf end{align*}
where $\mathbf X_S$ is the submatrix of the rows from $\mathbf X$
indexed by the set $S$. The learner then obtains the responses $y_i$, for $i\in S$,
and uses the optimum solution $\mathbf mathbf w_S^*=(\mathbf X_S)^+\mathbf mathbf y_S$
for the subproblem $(\mathbf X_S,\mathbf mathbf y_S)$ as its weight vector. The sampling
procedure can be performed using \mathbf emph{reverse iterative sampling} (shown on the
right), which, if carefully implemented, takes $O(nd^2)$ time (see
{n-d\choose s-d}ite{unbiased-estimates,regularized-volume-sampling}).
\mathbf mathbf begin{wrapfigure}{R}{0.31\textwidth}
\mathbf mathbf renewcommand{\thealgorithm}{}
\ifisarxiv
\mathbf mathbf vspace{-3mm}
\mathbf else
\mathbf mathbf vspace{-6mm}
\mathbf fi
\hspace{-2mm}
\mathbf mathbf begin{minipage}{0.31\textwidth}
\mathbf floatname{algorithm}{}
\mathbf mathbf begin{algorithm}[H]
{\mathbf fontsize{8}{8}\selectfont
{n-d\choose s-d}aption{\mathbf mathbf bf \small Reverse iterative sampling}
\mathbf mathbf begin{algorithmic}[0]
\mathbf STATE VolumeSample$(\mathbf X,\,k)\!:$
\mathbf STATE \quad$S \leftarrow [n]$
\mathbf mathbf vspace{1mm}
\mathbf STATE \quad{\mathbf mathbf bf while} $|S|>k$
\mathbf mathbf vspace{-1.5mm}
\mathbf STATE \quad\quad $\mathbf forall_{i\in S}\!:q_i\!\leftarrow\!
\mathbf frac{\det(\mathbf X_{S\mathbf mathbf backslash i}^\top\!\mathbf X_{S\mathbf mathbf backslash i})}{\det(\mathbf X_S^\top\mathbf X_S)}$
\mathbf STATE \quad\quad Sample $i\mathbf mathbf propto q_i$ out of $S$
\mathbf mathbf vspace{1mm}
\mathbf STATE \quad\quad $S\leftarrow S \mathbf mathbf backslash \{i\}$
\mathbf STATE \quad{\mathbf mathbf bf end}
\mathbb RETURN $S$
\mathbf end{algorithmic}
}
\mathbf end{algorithm}
\mathbf end{minipage}
\mathbf end{wrapfigure}
The key property (unique to volume sampling) is
that the subsampled estimator $\mathbf mathbf w_S^*$ is unbiased, i.e.
\iffalse
Given $\mathbf X\in\mathbb R^{n\times d}$ and a size $k\ge d$,
standard volume sampling jointly chooses a set $S$
of $k$ indices in $[n]$ with probability
\mathbf mathbf begin{align*}
\mathbf Pr(S) = \mathbf frac{\det(\mathbf X_S^\top\mathbf X_S)}{{n-d{n-d\choose s-d}hoose k-d}\det(\mathbf X^\top\mathbf X)},
\mathbf end{align*}
where $\mathbf X_S$ denotes a matrix constructed by selecting rows from $\mathbf X$
indexed by the set $S$. The learner then obtains the responses $y_i$, for $i\in S$,
and uses the optimum solution $\mathbf mathbf w_S^*=(\mathbf X_S)^+\mathbf mathbf y_S$
for the subproblem $(\mathbf X_S,\mathbf mathbf y_S)$ as its weight vector. The sampling
procedure can be performed using an reverse iterative sampling (shown on the
right), which, if carefully implemented, takes $O(nd^2)$ time (see
{n-d\choose s-d}ite{unbiased-estimates} and {n-d\choose s-d}ite{regularized-volume-sampling}).
The key property (unique to volume sampling) is
that the subsampled estimator $\mathbf mathbf w_S^*$ is unbiased, i.e.
\mathbf fi
\mathbf mathbf begin{align*}
\mathbb E[\mathbf mathbf w_S^*] = \mathbf mathbf w^*, \quad \text{where}\quad \mathbf mathbf w^* = \mathbf mathbf argmin_\mathbf mathbf w L(\mathbf mathbf w).
\mathbf end{align*}
As discussed in {n-d\choose s-d}ite{unbiased-estimates}, this property has important
practical implications in distributed settings:
Mixtures of unbiased estimators remain unbiased (and can
conveniently be used to reduce variance).
Also if the rows of $\mathbf X$ are in general position, then
for volume sampling
\mathbf mathbf vspace{-1mm}
\mathbf mathbf begin{align}
\mathbb E\mathbf mathbf big[(\mathbf X_S^\top\mathbf X_S)^{-1}\mathbf mathbf big] =
\mathbf frac{n-d+1}{k-d+1}\,(\mathbf X^\top\mathbf X)^{-1}.
\label{eq:square-inverse}
\mathbf end{align}
This is important because in A-optimal design bounding
$\mathrm{tr}((\mathbf X_S^\top\mathbf X_S)^{-1})$ is the main concern.
Given these direct connections of volume sampling to linear
regression, it is natural to ask whether this distribution
achieves a loss bound of $(1+\mathbf epsilon)$ times the optimum for
small sample sizes $k$.
\iffalse
If we consider a
collection of independently volume sampled sets $S_1,S_2,\dots$, then
the corresponding estimators can be effectively combined via averaging:
\mathbf mathbf begin{align*}
\mathbf frac{1}{t}\sum_{i=1}^t\mathbf mathbf w_{S_i}^*\mathbf underset{t\mathbf mathbf rightarrow\infty}{\longrightarrow} \mathbf mathbf w^*,
\mathbf end{align*}
where the limit is in probability. The above properties hold for an
arbitrary matrix $\mathbf X$ and vector $\mathbf mathbf y$. If we assume that the response
vector $\mathbf mathbf y$ is generated by a linear transformation plus i.i.d.~mean
zero noise, volume sampling offers additional
strong guarantees on the variance of $\mathbf mathbf w_S^*$, also referred to as the
A-optimality criterion in experimental design (see,
e.g. {n-d\choose s-d}ite{regularized-volume-sampling}), through the following
positive definite matrix inequality
\mathbf mathbf begin{align}
\overbrace{\mathbb E\mathbf mathbf big[(\mathbf X_S^\top\mathbf X_S)^{-1}\mathbf mathbf big]}^{\mathbf Var[\mathbf mathbf w_S^*]} \mathbf mathbf preceq
\mathbf frac{n\!-\!d\!+\!1}{k\!-\!d\!+\!1}\overbrace{(\mathbf X^\top\mathbf X)^{-1}}^{\mathbf Var[\mathbf mathbf w^*]}.
\label{eq:square-inverse}
\mathbf end{align}
\mathbf mathbf begin{align*}
\text{If}\quad\mathbf mathbf y=\mathbf X\mathbf mathbf bar{\mathbf mathbf w}+\mathbf mathbf xi,\quad\mathbf mathbf big(\mathbb E[\mathbf mathbf xi]=\mathbf mathbf zero\text{ and }
\mathbf Var[\mathbf mathbf xi]=\sigma^2\mathbf I\mathbf mathbf big),\quad\text{then}\quad \mathbf Var[\mathbf mathbf w_S^*]=\mathbf frac{n\!-\!d\!+\!1}{k\!-\!d\!+\!1}\mathbf Var[\mathbf mathbf w^*].
\mathbf end{align*}
\mathbf fi
\subsection{Lower bound for standard volume sampling}
\label{s:lower}
We show that standard volume sampling cannot
guarantee $1+\mathbf epsilon$ multiplicative loss bounds on some instances, unless over half of the rows are chosen to be
in the subsample.
\if 0
In this section we construct a linear regression problem
where after sampling $n/2$ of the rows with
standard volume sampling, the expected loss of
the subsampled solution is at least 1.5 times the loss of the best.
We also show that the same lower bound on the loss of the subsampled
estimate holds with probability at least $1/4$.
So, for this type of volume sampling, no good multiplicative loss bounds
are possible.
\mathbf fi
\mathbf mathbf begin{theorem}
\label{t:lower}
Let $(\mathbf X,\mathbf mathbf y)$ be an $n\times d$ least squares problem, such that
\mathbf mathbf begin{align*}
\mathbf X=\mathbf mathbf begin{pmatrix}
&\mathbf multirow{1}{*}{$\mathbf I_{d\times d}$}&\\
\hline
&\gamma\,\mathbf I_{d\times d}&\\
\hline
&\mathbf mathbf vdots&\\
\hline
&\gamma\,\mathbf I_{d\times d}&
\mathbf end{pmatrix} , \quad\mathbf mathbf y=
\mathbf mathbf begin{pmatrix}
\mathbf multirow{1}{*}{$\mathbf 1_d$}\\
\hline
\mathbf mathbf zero_d\\
\hline
\mathbf mathbf vdots\\
\hline
\mathbf mathbf zero_d\mathbf end{pmatrix},\qquad \text{where}\quad \gamma>0.
\mathbf end{align*}
Let $\mathbf mathbf w_S^*=(\mathbf X_S)^+\mathbf mathbf y_S$ be
obtained from size $k$ volume sampling for $(\mathbf X,\mathbf mathbf y)$. Then,
\mathbf mathbf begin{align}
\lim_{\gamma\mathbf mathbf rightarrow 0}\mathbf frac{\mathbb E[L(\mathbf mathbf w_S^*)]}{L(\mathbf mathbf w^*)} \, \geq\,
1 + \mathbf frac{n-k}{n-d},\label{eq:L1}
\mathbf end{align}
and there is a $\gamma>0$ such that for any $k\leq \mathbf frac{n}{2}$,
\mathbf mathbf begin{align}
\mathbf Pr\mathbf mathbf bigg(L(\mathbf mathbf w_S^*) \geq \mathbf Big(1+\mathbf frac{1}{2}\mathbf Big)L(\mathbf mathbf w^*)\mathbf mathbf bigg) >
\mathbf frac{1}{4}.\label{eq:L2}
\mathbf end{align}
\mathbf end{theorem}
\textbf{Proof }\
In Appendix \mathbf mathbf ref{a:lower} we show part \mathbf eqref{eq:L1}, and that for the chosen
$(\mathbf X,\mathbf mathbf y)$ we have $L(\mathbf mathbf w^*)\!=\!\sum_{i=1}^d \! (1\!-\!l_i)$ (see
\mathbf eqref{eq:lb-loss}), where $l_i=\mathbf mathbf x_i^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf x_i$ is the
$i$-th leverage score of $\mathbf X$. Here, we show \mathbf eqref{eq:L2}.
The marginal probability of the $i$-th row under volume sampling
(as given by {n-d\choose s-d}ite{unbiased-estimates-journal}) is
\mathbf mathbf begin{align}
\mathbf Pr(i\in S) = \theta \ l_i + (1-\theta) \ 1
= 1 - \theta \ (1-l_i),\;
\text{ where }\theta= \mathbf frac{n-k}{n-d}.\label{eq:marginal}
\mathbf end{align}
Next, we bound the probability that all of the first $d$ input vectors
were selected by volume sampling:
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf mathbf big([d]\subseteq S\mathbf mathbf big)&
\overset{(*)}{\leq} \mathbf mathbf prod_{i=1}^d \mathbf Pr(i\in S)
=\mathbf mathbf prod_{i=1}^d\mathbf Big(1- \mathbf frac{n-k}{n-d}\,(1-l_i)\mathbf Big)
\leq \mathbf exp\mathbf Big(-\mathbf frac{n-k}{n-d}\!\overbrace{L(\mathbf mathbf w^*)}^{\sum_{i=1}^d (1-l_i)}\!\!\mathbf Big),
\mathbf end{align*}
where $(*)$ follows from negative associativity of volume sampling (see {n-d\choose s-d}ite{dual-volume-sampling}).
If for some $i\in[d]$ we have $i\{1..n\}ot\in S$, then $L(\mathbf mathbf w_S^*)\geq
1$. So for $\gamma$ such that
$L(\mathbf mathbf w^*)=\mathbf frac{2}{3}$ and any $k\leq \mathbf frac{n}{2}$:
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf mathbf bigg(L(\mathbf mathbf w_S^*)\geq
\mathbf Big(1+\mathbf frac{1}{2}\mathbf Big)\overbrace{L(\mathbf mathbf w^*)}^{2/3}\mathbf mathbf bigg)
&\geq 1 - \mathbf exp\mathbf Big(\!-\mathbf frac{n-k}{n-d}{n-d\choose s-d}dot\mathbf frac{2}{3}\mathbf Big)
\geq 1 - \mathbf exp\mathbf Big(\!-\mathbf frac{1}{2}{n-d\choose s-d}dot\mathbf frac{2}{3}\mathbf Big)> \mathbf frac{1}{4}.
\hspace{1cm}\mathbf BlackBox
\mathbf end{align*}
Note that this lower bound only makes use of the negative
associativity of volume sampling and the form of the
marginals. However the tail bounds we prove in Section
\mathbf mathbf ref{s:tail} rely on more subtle properties of volume
sampling. We begin by creating a variant of volume sampling with
rescaled marginals.
\section{Rescaled volume sampling} \label{s:resc}
Given any size $k\geq d$, our goal is to
jointly sample
$k$ row indices $\mathbf mathbf pi_1,\dots,\mathbf mathbf pi_k$ with replacement
(instead of a {\mathbf em subset} $S$ of $[n]$ of size $k$, we get a
\mathbf emph{sequence} $\mathbf mathbf pi\in[n]^k$).
The second difference to standard volume sampling is that
we rescale the $i$-th row (and response) by
$\mathbf frac{1}{\sqrt{q_i}}$, where
$q = (q_1,...,q_n)$ is any discrete distribution over the set of
row indices $[n]$, such that $\sum_{i=1}^nq_i=1$ and $q_i>0$ for all $i\in[n]$.
We now define $q$-rescaled size $k$ volume sampling as a
joint sampling distribution over $\mathbf mathbf pi\in[n]^k$, s.t.
\mathbf mathbf begin{align}
\text{$q$-rescaled size $k$ volume sampling:}
\qquad\mathbf Pr(\mathbf mathbf pi) \;\sim \;
\det\mathbf Big(\sum_{i=1}^k \mathbf frac{1}{q_{\mathbf mathbf pi_i}}\mathbf mathbf x_{\mathbf mathbf pi_i}\mathbf mathbf x_{\mathbf mathbf pi_i}^\top\mathbf Big)
\;\;
\mathbf mathbf prod_{i=1}^kq_{\mathbf mathbf pi_i}.\qquad\qquad
\label{eq:sampling}
\mathbf end{align}
Using the following rescaling matrix
$\mathbf mathbf Q_\mathbf mathbf pi\defeq\sum_{i=1}^{|\mathbf mathbf pi|}\mathbf frac1{q_{\mathbf mathbf pi_i}}\mathbf e_{\mathbf mathbf pi_i}\mathbf e_{\mathbf mathbf pi_i}^\top\ \in\mathbb R^{n\times
n},$
we rewrite the determinant as $\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)$.
As in standard volume sampling, the normalization factor in
rescaled volume sampling can be given in a closed form
through a novel extension of the Cauchy-Binet formula (proof in Appendix \mathbf mathbf ref{sec:cauchy-binet-proof}).
\mathbf mathbf begin{proposition}\label{p:cauchy-binet}
For any $\mathbf X\in\mathbb R^{n\times d}$, $k\geq d$ and $q_1,\dots,q_n>0$, such that $\sum_{i=1}^nq_i=1$,
we have
\mathbf mathbf begin{align*}
\sum_{\mathbf mathbf pi\in[n]^k}\!\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)
\,\mathbf mathbf prod_{i=1}^kq_{\mathbf mathbf pi_i} = k(k\!-\!1)...(k\!-\!d\!+\!1)\,\det(\mathbf X^\top\mathbf X).
\mathbf end{align*}
\mathbf end{proposition}
Given a matrix $\mathbf X\in\mathbb R^{n\times d}$, vector $\mathbf mathbf y\in\mathbb R^n$ and a
sequence $\mathbf mathbf pi\in[n]^k$, we are interested in a least-squares problem
$(\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf X, \mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf mathbf y)$, which selects instances indexed by $\mathbf mathbf pi$,
and rescales each of them by the corresponding $1/\!\sqrt{q_i}$. This
leads to a natural subsampled least squares estimator
\mathbf mathbf begin{align*}
\mathbf mathbf w_\mathbf mathbf pi^*=\mathbf mathbf argmin_\mathbf mathbf w\sum_{i=1}^k\mathbf frac{1}{q_{\mathbf mathbf pi_i}}\mathbf mathbf big(\mathbf mathbf x_{\mathbf mathbf pi_i}^\top\mathbf mathbf w-y_{\mathbf mathbf pi_i}\mathbf mathbf big)^2
= (\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf X)^+\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf mathbf y.
\mathbf end{align*}
The key property of standard volume sampling
is that the subsampled least-squares estimator is unbiased.
Surprisingly this property is retained for any $q$-rescaled volume
sampling (proof in Section \mathbf mathbf ref{sec:unbiasedness-proof}).
As we shall see this will give us great leeway for choosing
$q$ to optimize our algorithms.
\mathbf mathbf begin{theorem}\label{t:unbiasedness}
Given a full rank $\mathbf X\in\mathbb R^{n\times d}$ and a response vector
$\mathbf mathbf y\in\mathbb R^n$, for any $q$ as above, if $\mathbf mathbf pi$ is sampled according to
\mathbf eqref{eq:sampling}, then
\mathbf mathbf begin{align*}
\mathbb E[\mathbf mathbf w_\mathbf mathbf pi^*] =\mathbf mathbf w^*,\quad\text{where}\quad
\mathbf mathbf w^*=\mathbf mathbf argmin_\mathbf mathbf w\|\mathbf X\mathbf mathbf w-\mathbf mathbf y\|^2.
\mathbf end{align*}
\mathbf end{theorem}
The matrix formula \mathbf eqref{eq:square-inverse}, discussed in Section
\mathbf mathbf ref{s:versus} for standard volume sampling, has a
natural extension to any rescaled volume sampling, turning here into an
inequality (proof in Appendix \mathbf mathbf ref{sec:square-inverse-proof}).
\mathbf mathbf begin{theorem}\label{t:square-inverse}
Given a full rank $\mathbf X\in\mathbb R^{n\times d}$ and any $q$ as above, if $\mathbf mathbf pi$ is sampled according to \mathbf eqref{eq:sampling}, then
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf big[(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)^{-1}\mathbf mathbf big]
\mathbf mathbf preceq \mathbf frac{1}{k\!-\!d\!+\!1}(\mathbf X^\top\mathbf X)^{-1}.
\mathbf end{align*}
\mathbf end{theorem}
\subsection{Proof of Theorem \mathbf mathbf ref{t:unbiasedness}}
\label{sec:unbiasedness-proof}
We show that the least-squares estimator
$\mathbf mathbf w_\mathbf mathbf pi^* = (\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf X)^+\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf mathbf y$
produced from any $q$-rescaled volume sampling is unbiased,
illustrating a proof technique which is also useful for showing
Theorem \mathbf mathbf ref{t:square-inverse}, as well as Propositions
\mathbf mathbf ref{p:cauchy-binet} and \mathbf mathbf ref{p:marginals}. The key idea is to apply
the pseudo-inverse expectation formula for standard volume sampling
(see e.g., {n-d\choose s-d}ite{unbiased-estimates}) first on the
subsampled estimator $\mathbf mathbf w_\mathbf mathbf pi^*$, and then again on the full estimator
$\mathbf mathbf w^*$. In the first step, this formula states:
\mathbf mathbf begin{align*}
\overbrace{(\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf X)^+\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf mathbf y}^{\mathbf mathbf w_\mathbf mathbf pi^*}=
\sum_{S\in\sets{k}{d}}
\mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf X)}{\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)} \overbrace{(\mathbf mathbf Q_{\mathbf mathbf pi_S}^{\sfrac{1}{2}}\mathbf X)^+\mathbf mathbf Q_{\mathbf mathbf pi_S}^{\sfrac{1}{2}}\mathbf mathbf y}^{\mathbf mathbf w_{\mathbf mathbf pi_S}^*},
\mathbf end{align*}
\mathbf mathbf vspace{-6mm}
where ${[k]{n-d\choose s-d}hoose d} \defeq \{S\!\subseteq\! \{1,\dots,k\}:\ |S|\!=\!d\}$ and $\mathbf mathbf pi_S$ denotes a subsequence of $\mathbf mathbf pi$ indexed by the elements
of set $S$. Note that since $S$ is of size $d$, we can decompose
the determinant:
\mathbf mathbf begin{align*}
\det(\mathbf X^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf X) = \det(\mathbf X_{\mathbf mathbf pi_S})^2\,\mathbf mathbf prod_{i\in S}\mathbf frac{1}{q_{\mathbf mathbf pi_i}}.
\mathbf end{align*}
Whenever this determinant is non-zero, $\mathbf mathbf w_{\mathbf mathbf pi_S}^*$ is the
exact solution of a system of $d$ linear equations:
\mathbf mathbf begin{align*}
\mathbf frac{1}{\sqrt{q_{\mathbf mathbf pi_i}}}\mathbf mathbf x_{\mathbf mathbf pi_i}^\top\mathbf mathbf w=\mathbf frac{1}{\sqrt{q_{\mathbf mathbf pi_i}}}y_{\mathbf mathbf pi_i},\qquad\text{for}\quad
i\in S.
\mathbf end{align*}
Thus, the rescaling of each equation by $\mathbf frac{1}{\sqrt{q_{\mathbf mathbf pi_i}}}$
cancels out, and we can simply write
$\mathbf mathbf w_{\mathbf mathbf pi_S}^*=(\mathbf X_{\mathbf mathbf pi_S})^+\mathbf mathbf y_{\mathbf mathbf pi_S}$. Note that this is not the case for
sets larger than $d$ whenever the optimum solution incurs positive loss.
We now proceed with summing over all $\mathbf mathbf pi\in[n]^k$. Following Proposition
\mathbf mathbf ref{p:cauchy-binet}, we define the normalization constant as
$Z=d!{k{n-d\choose s-d}hoose d}\det(\mathbf X^\top\mathbf X)$, and obtain:
\mathbf mathbf begin{align*}
Z\,\mathbb E[\mathbf mathbf w_\mathbf mathbf pi^*]&=\!\!\!\sum_{\mathbf mathbf pi\in[n]^k}\!\!\mathbf mathbf bigg(\mathbf mathbf prod_{i=1}^kq_{\mathbf mathbf pi_i}\!\mathbf mathbf bigg)\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)\,\mathbf mathbf w_\mathbf mathbf pi^*
=\!\!\! \sum_{\mathbf mathbf pi\in[n]^k}\sum_{S\in\sets{k}{d}}
\!\mathbf mathbf bigg(\mathbf mathbf prod_{i\in[k]\mathbf mathbf backslash S}\!\!q_{\mathbf mathbf pi_i}\mathbf mathbf bigg) \det(\mathbf X_{\mathbf mathbf pi_S})^2 (\mathbf X_{\mathbf mathbf pi_S})^+\mathbf mathbf y_{\mathbf mathbf pi_S}\\
&\overset{(1)}{=}{k{n-d\choose s-d}hoose d}\sum_{\mathbf mathbf bar{\mathbf mathbf pi}\in[n]^d}\det(\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}})^2 (\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}})^+\mathbf mathbf y_{\mathbf mathbf bar{\mathbf mathbf pi}}
\sum_{\tilde{\mathbf mathbf pi}\in[n]^{k-d}}\mathbf mathbf prod_{i=1}^{k-d}q_{\tilde{\mathbf mathbf pi}_i}\\
&\overset{(2)}{=}{k{n-d\choose s-d}hoose d}d! \sum_{S\in\sets{n}{d}}
\det(\mathbf X_S)^2 (\mathbf X_S)^+\mathbf mathbf y_S\
\mathbf mathbf bigg(\sum_{i=1}^nq_i\mathbf mathbf bigg)^{k-d}\
\overset{(3)}{=}\overbrace{{k{n-d\choose s-d}hoose d} d!\det(\mathbf X^\top\mathbf X)}^{Z}\,\mathbf mathbf w^*.
\mathbf end{align*}
Note that in $(1)$ we separate $\mathbf mathbf pi$ into two parts (subset $S$ and its
complement, $[k]\mathbf mathbf backslash S$) and sum over them separately.
The binomial coefficient ${k{n-d\choose s-d}hoose d}$ counts the number of ways that $S$
can be ``placed into'' the sequence $\mathbf mathbf pi$. In $(2)$ we observe that
whenever $\mathbf mathbf bar{\mathbf mathbf pi}$ has repetitions, determinant $\det(\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}})$ is
zero, so we can switch to summing over sets. Finally, $(3)$ again uses the
standard size $d$ volume sampling unbiasedness formula, now for the least-squares
problem $(\mathbf X,\mathbf mathbf y)$, and the fact that $q_i$'s sum to 1.
\section{Leveraged volume sampling: a natural rescaling} \label{s:alg}
\mathbf mathbf begin{wrapfigure}{R}{0.44\textwidth}
\mathbf mathbf renewcommand{\thealgorithm}{}
\mathbf mathbf vspace{-8mm}
\mathbf mathbf begin{minipage}{0.44\textwidth}
\mathbf floatname{algorithm}{}
\mathbf mathbf begin{algorithm}[H]
{\mathbf fontsize{8}{8}\selectfont
{n-d\choose s-d}aption{\mathbf mathbf bf \small Determinantal rejection sampling}
\mathbf mathbf begin{algorithmic}[1]
\mathbf mathbf vspace{-1mm}
\mathbf STATE \textbf{Input:} $\mathbf X\!\in\!\mathbb R^{n\times d},
q=(\mathbf frac{l_1}{d},\dots,\mathbf frac{l_n}{d}), k\geq d$
\mathbf mathbf vspace{-1mm}
\mathbf STATE $s \leftarrow \mathbf max\{k,\,4d^2\}$
\mathbf STATE \textbf{repeat}
\mathbf mathbf vspace{1mm}
\mathbf STATE \quad Sample $\mathbf mathbf pi_1,\dots,\mathbf mathbf pi_{s}$ i.i.d. $\sim
(q_1,\dots,q_n)$
\mathbf STATE \quad Sample $\textit{Accept} \sim \text{Bernoulli}\mathbf Big(\mathbf frac{\det(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf X^\top\mathbf X)}\mathbf Big)$
\mathbf mathbf vspace{-1mm}
\mathbf STATE \textbf{until} $\textit{Accept}=\text{true}$
\mathbf STATE $S\leftarrow$
VolumeSample$\mathbf mathbf big((\mathbf mathbf Q_{[1..n]}^{\sfrac{1}{2}}\mathbf X)_\mathbf mathbf pi,k\mathbf mathbf big)$
\mathbf mathbf vspace{-1mm}
\mathbb RETURN $\mathbf mathbf pi_S$
\mathbf end{algorithmic}
}
\mathbf end{algorithm}
\mathbf end{minipage}
\mathbf mathbf vspace{-0.8cm}
\mathbf end{wrapfigure}
Rescaled volume sampling can be viewed as selecting a
sequence $\mathbf mathbf pi$ of $k$ rank-1 covariates from the covariance matrix
$\mathbf X^\top\mathbf X = \sum_{i=1}^n\mathbf mathbf x_i\mathbf mathbf x_i^\top$.
If $\mathbf mathbf pi_1,\dots,\mathbf mathbf pi_k$ are sampled i.i.d. from $q$, i.e.
$\mathbf Pr(\mathbf mathbf pi)=\mathbf mathbf prod_{i=1}^k q_{\mathbf mathbf pi_i}$, then matrix
$\mathbf frac{1}{k}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X$ is an unbiased estimator of the
covariance matrix because $\mathbb E[q_{\mathbf mathbf pi_i}^{-1} \mathbf mathbf x_{\mathbf mathbf pi_i}\mathbf mathbf x_{\mathbf mathbf pi_i}^\top]=\mathbf X^\top\mathbf X$.
In rescaled volume sampling \mathbf eqref{eq:sampling},
$\mathbf Pr(\mathbf mathbf pi)\sim$
$\mathbf mathbf big(\mathbf mathbf prod_{i=1}^k q_{\mathbf mathbf pi_i}\mathbf mathbf big)
\mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf X^\top\mathbf X)}$,
and the latter volume
ratio introduces a bias to that estimator.
However, we show that this bias vanishes when $q$ is exactly
proportional to the leverage scores (proof in Appendix \mathbf mathbf ref{sec:marginals-proof}).
\mathbf mathbf begin{proposition}\label{p:marginals}
For any $q$ and $\mathbf X$ as before, if $\mathbf mathbf pi\in[n]^k$ is sampled according to
\mathbf eqref{eq:sampling}, then
\mathbf mathbf begin{align*}
\mathbb E[\mathbf mathbf Q_\mathbf mathbf pi] = (k\!-\!d)\,\mathbf I + {[d+1]_{-i}}ag\mathbf Big(\mathbf frac{l_1}{q_1},\dots,\mathbf frac{l_n}{q_n}\mathbf Big),
\quad\text{where}\quad l_i\defeq\mathbf mathbf x_i^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf x_i.
\mathbf end{align*}
In particular,
$\mathbb E[\mathbf frac{1}{k}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X]
=\mathbf X^\top\mathbb E[\mathbf frac{1}{k}\mathbf mathbf Q_\mathbf mathbf pi]\mathbf X
=\mathbf X^\top\mathbf X$ if and only if
$q_i=\mathbf frac{l_i}{d}>0$ for all $i\in[n]$.
\mathbf end{proposition}
This special rescaling, which we call \mathbf emph{leveraged volume sampling},
has other remarkable properties. Most importantly, it leads to a
simple and efficient algorithm we call {\mathbf em determinantal rejection sampling}: Repeatedly
sample $O(d^2)$ indices $\mathbf mathbf pi_1,\dots,\mathbf mathbf pi_s$ i.i.d. from
$q=(\mathbf frac{l_1}{d},\dots,\mathbf frac{l_n}{d})$, and accept the sample with
probability proportional to its volume ratio.
Having obtained a sample,
we can further reduce its size via reverse iterative sampling.
We show next that this procedure not only returns a
$q$-rescaled volume sample, but also exploiting the fact
that $q$ is proportional to the leverage scores,
it requires (surprisingly) only a constant number of
iterations of rejection sampling with high probability.
\mathbf mathbf begin{theorem}\label{t:algorithm}
Given the leverage score distribution
$q=(\mathbf frac{l_1}{d},\dots,\mathbf frac{l_n}{d})$ and the determinant
$\det(\mathbf X^\top\mathbf X)$ for matrix $\mathbf X\in\mathbb R^{n\times d}$, determinantal
rejection sampling returns sequence $\mathbf mathbf pi_S$
distributed according to leveraged volume sampling, and w.p. at
least $1\!-\!\delta$ finishes in time $O((d^2\!+k)d^2\ln(\mathbf frac{1}{\delta}))$.
\mathbf end{theorem}
\mathbf mathbf begin{proof}
We use a composition property of rescaled volume sampling
(proof in Appendix \mathbf mathbf ref{sec:composition-proof}):
\mathbf mathbf begin{lemma}\label{l:composition}
Consider the following sampling procedure, for $s>k$:
\mathbf mathbf begin{align*}
\mathbf mathbf pi\ &\overset{s}{\sim}\quad\mathbf X &&\text{($q$-rescaled size $s$ volume sampling)},\\
S\ &\overset{k}{\sim} \
\mathbf mathbf begin{pmatrix}
\mathbf frac{1}{\sqrt{q_{\mathbf mathbf pi_1}}}\mathbf mathbf x_{\mathbf mathbf pi_1}^\top\\
\dots\\
\mathbf frac{1}{\sqrt{q_{\mathbf mathbf pi_s}}}\mathbf mathbf x_{\mathbf mathbf pi_s}^\top\mathbf end{pmatrix}=
\mathbf mathbf big(\mathbf mathbf Q_{[1..n]}^{\sfrac12}\mathbf X\mathbf mathbf big)_\mathbf mathbf pi&& \text{(standard size $k$ volume sampling)}.
\mathbf end{align*}
Then $\mathbf mathbf pi_S$ is distributed according
to $q$-rescaled size $k$ volume sampling from $\mathbf X$.
\mathbf end{lemma}
First, we show that the rejection sampling probability in line 5 of
the algorithm is bounded by $1$:
\mathbf mathbf begin{align*}
\mathbf frac{\det(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf X^\top\mathbf X)}&
=\det\mathbf Big(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X(\mathbf X^\top\mathbf X)^{-1}\mathbf Big)
\overset{(*)}{\leq}
\mathbf mathbf bigg(\mathbf frac{1}{d}\mathrm{tr}\mathbf Big(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X(\mathbf X^\top\mathbf X)^{-1}\mathbf Big)\mathbf mathbf bigg)^{\!d}\\
&=
\mathbf Big(\mathbf frac{1}{ds}\mathrm{tr}\mathbf mathbf big(\mathbf mathbf Q_\mathbf mathbf pi\mathbf X(\mathbf X^\top\mathbf X)^{-1}\mathbf X^\top\mathbf mathbf big)\mathbf Big)^d
=\mathbf Big(\mathbf frac{1}{ds}\sum_{i=1}^s\mathbf frac{d}{l_i}\mathbf mathbf x_i^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf x_i\mathbf Big)^d=1,
\mathbf end{align*}
where $(*)$ follows from the geometric-arithmetic mean
inequality for the eigenvalues of the underlying matrix. This shows
that sequence $\mathbf mathbf pi$ is drawn according to $q$-rescaled volume sampling
of size $s$. Now, Lemma \mathbf mathbf ref{l:composition} implies correctness of the algorithm.
Next,
we use Proposition \mathbf mathbf ref{p:cauchy-binet} to compute the expected value
of acceptance probability from line 5 under the i.i.d. sampling of line 4:
\mathbf mathbf begin{align*}
\sum_{\mathbf mathbf pi\in[n]^s}\mathbf mathbf bigg(\mathbf mathbf prod_{i=1}^sq_{\mathbf mathbf pi_i}\mathbf mathbf bigg)\mathbf frac{\det(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf X^\top\mathbf X)}
&=\mathbf frac{s(s\!-\!1)\dots(s\!-\!d\!+\!1)}{s^d}\geq
\mathbf Big(1-\mathbf frac{d}{s}\mathbf Big)^d\geq 1 - \mathbf frac{d^2}{s} \geq \mathbf frac{3}{4},
\mathbf end{align*}
where we also used Bernoulli's inequality and the fact that $s\geq
4d^2$ (see line 2). Since the expected value of the acceptance probability is at
least $\mathbf frac{3}{4}$, an easy application of Markov's inequality shows
that at each trial there is at least a 50\% chance of it being above
$\mathbf frac{1}{2}$. So, the probability of at least $r$ trials occurring is
less than $(1-\mathbf frac{1}{4})^r$. Note that the computational cost of one
trial is no more than the cost of SVD decomposition of matrix $\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X$ (for
computing the determinant), which is $O(sd^2)$. The cost of
reverse iterative sampling (line 7) is also $O(sd^2)$ with high probability (as shown by
{n-d\choose s-d}ite{regularized-volume-sampling}). Thus, the overall runtime is
$O((d^2+k)d^2r)$, where $r\leq\ln(\mathbf frac{1}{\delta})/\ln(\mathbf frac{4}{3})$ w.p. at least $1-\delta$.
\mathbf end{proof}
\subsection{Tail bounds for leveraged volume sampling}
\label{s:tail}
An analysis of leverage score sampling, essentially following
{n-d\choose s-d}itep[Section 2]{woodruff2014sketching}
{n-d\choose s-d}itep[which in turn draws from][]{sarlos-sketching},
highlights two basic sufficient conditions on the
(random) subsampling matrix $\mathbf mathbf Q_\mathbf mathbf pi$ that lead to
multiplicative tail bounds for $L(\mathbf mathbf w_\mathbf mathbf pi^*)$.
It is convenient to shift to an orthogonalization of the linear regression task $(\mathbf X,\mathbf mathbf y)$
by replacing matrix $\mathbf X$ with a matrix
$\mathbf U=\mathbf X(\mathbf X^\top\mathbf X)^{-\sfrac12}\in\mathbb R^{n\times d}$. It is easy to
check that the columns of $\mathbf U$ have unit length and are
orthogonal, i.e., $\mathbf U^\top\mathbf U=\mathbf I$.
Now, $\mathbf mathbf v^*=\mathbf U^\top\mathbf mathbf y$ is the least-squares solution for the
orthogonal problem $(\mathbf U,\mathbf mathbf y)$
and prediction vector $\mathbf U\mathbf mathbf v^*=\mathbf U\mathbf U^\top\mathbf mathbf y$ for $(\mathbf U,\mathbf mathbf y)$ is the same as
the prediction vector $\mathbf X\mathbf mathbf w^*=\mathbf X(\mathbf X^\top\mathbf X)^{-1}\mathbf X^\top\mathbf mathbf y$
for the original problem $(\mathbf X,\mathbf mathbf y)$.
The same property holds for the subsampled estimators, i.e.,
$\mathbf U\mathbf mathbf v_\mathbf mathbf pi^*=\mathbf X\mathbf mathbf w_\mathbf mathbf pi^*$, where $\mathbf mathbf v_\mathbf mathbf pi^*=
(\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}\mathbf U)^+\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}} \,\mathbf mathbf y$.
Volume sampling probabilities are also preserved under this transformation,
so w.l.o.g. we can work with the orthogonal problem.
Now $L(\mathbf mathbf v_\mathbf mathbf pi^*)$ can be rewritten as
\mathbf mathbf begin{align}
L(\mathbf mathbf v_\mathbf mathbf pi^*) =\|\mathbf U\mathbf mathbf v_\mathbf mathbf pi^*-\mathbf mathbf y\|^2\overset{(1)}{=}
\|\mathbf U\mathbf mathbf v^*-\mathbf mathbf y\|^2
+
\|\mathbf U(\mathbf mathbf v_\mathbf mathbf pi^*-\mathbf mathbf v^*)\|^2
\overset{(2)}{=}
L(\mathbf mathbf v^*) + \|\mathbf mathbf v_\mathbf mathbf pi^*-\mathbf mathbf v^*\|^2 ,
\label{e:pyth}
\mathbf end{align}
where $(1)$ follows via Pythagorean theorem from the fact that
$\mathbf U(\mathbf mathbf v_\mathbf mathbf pi^*-\mathbf mathbf v^*)$ lies in the column span of $\mathbf U$ and
the residual vector $\mathbf mathbf r=\mathbf U\mathbf mathbf v^*-\mathbf mathbf y$ is orthogonal to all columns of $\mathbf U$,
and $(2)$ follows from $\mathbf U^\top\mathbf U=\mathbf I$.
By the definition of $\mathbf mathbf v_\mathbf mathbf pi^*$, we can write
$\|\mathbf mathbf v_\mathbf mathbf pi^*-\mathbf mathbf v^*\|^2$ as follows:
\mathbf mathbf begin{align}
\|\mathbf mathbf v_\mathbf mathbf pi^*-\mathbf mathbf v^*\| =
\|(\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U)^{-1}\;\;\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi(\mathbf mathbf y-\mathbf U\mathbf mathbf v^*)\|
\leq \|\mathbf underset{d\times d}{(\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U)^{-1}}\|\,
\|\mathbf underset{d\times 1}{\mathbf mathbf vphantom{(\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U)^{-1}}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\,\mathbf mathbf r}\|,
\label{e:prod}
\mathbf end{align}
where $\|\mathbf A\|$ denotes the matrix 2-norm
(i.e., the largest singular value) of $\mathbf A$; when $\mathbf A$ is a
vector, then $\|\mathbf A\|$ is its Euclidean norm.
This breaks our task down to showing two key properties:
\mathbf mathbf begin{enumerate}
\item \textit{Matrix multiplication:}\quad Upper bounding the Euclidean norm $\|\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\,\mathbf mathbf r\|$,
\item \textit{Subspace embedding:}\quad Upper bounding the matrix 2-norm $\|(\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U)^{-1}\|$.
\mathbf end{enumerate}
We start with a theorem that implies
strong guarantees for approximate matrix multiplication with leveraged
volume sampling. Unlike with i.i.d. sampling, this result requires
controlling the pairwise dependence
between indices selected under rescaled volume sampling. Its proof is
an interesting application of a classical Hadamard matrix
product inequality from
{n-d\choose s-d}ite{hadamard-product-inequality} (Proof in Appendix~\mathbf mathbf ref{sec:multiplication-proof}).
\mathbf mathbf begin{theorem}\label{t:multiplication}
Let $\mathbf U\in\mathbb R^{n\times d}$ be a matrix s.t. $\mathbf U^\top\mathbf U=\mathbf I$.
If sequence $\mathbf mathbf pi\in[n]^k$ is selected using leveraged volume
sampling of size $k\geq \mathbf frac{2d}{\mathbf epsilon}$, then
for any $\mathbf mathbf r\in\mathbb R^n$,
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf bigg[\mathbf Big\|\mathbf frac{1}{k}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf mathbf r - \mathbf U^\top\mathbf mathbf r\mathbf Big\|^2\mathbf mathbf bigg] \leq
\mathbf epsilon\, \|\mathbf mathbf r\|^2.
\mathbf end{align*}
\mathbf end{theorem}
Next, we turn to the subspace embedding property. The
following result is remarkable because
standard matrix tail bounds used to prove this property for leverage score
sampling are not applicable to volume sampling. In fact, obtaining
matrix Chernoff bounds for negatively associated joint distributions
like volume sampling is an active area of research, as discussed in
{n-d\choose s-d}ite{harvey2014pipage}. We address this challenge by
defining a coupling procedure for volume sampling and
uniform sampling without replacement, which leads to a curious
reduction argument described in Appendix \mathbf mathbf ref{sec:spectral-proof}.
\mathbf mathbf begin{theorem}
\label{t:spectral}
Let $\mathbf U\in\mathbb R^{n\times d}$ be a matrix s.t. $\mathbf U^\top\mathbf U=\mathbf I$. There is an
absolute constant $C$, s.t. if sequence $\mathbf mathbf pi\in[n]^k$ is selected
using leveraged volume sampling of size $k\geq
C\,d\ln(\mathbf frac{d}{\delta})$, then
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf mathbf bigg(\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{k}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U\mathbf Big)\leq
\mathbf frac{1}{8}\mathbf mathbf bigg) \leq \delta.
\mathbf end{align*}
\mathbf end{theorem}
Theorems \mathbf mathbf ref{t:multiplication} and \mathbf mathbf ref{t:spectral} imply
that the unbiased estimator $\mathbf mathbf w_\mathbf mathbf pi^*$ produced from leveraged volume
sampling achieves multiplicative tail bounds with sample size
$k=O(d\log d + d/\mathbf epsilon)$.
\mathbf mathbf begin{corollary}
Let $\mathbf X\in\mathbb R^{n\times d}$ be a full rank matrix. There is an absolute constant
$C$, s.t.~if sequence $\mathbf mathbf pi\in[n]^k$ is selected using leveraged volume
sampling of size $k\geq C\,\mathbf mathbf big(d\ln(\mathbf frac{d}{\delta}) +
\mathbf frac{d}{\mathbf epsilon\delta}\mathbf mathbf big)$, then for estimator
\mathbf mathbf begin{align*}
\mathbf mathbf w_\mathbf mathbf pi^* = \mathbf mathbf argmin_\mathbf mathbf w \|\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac{1}{2}}(\mathbf X\mathbf mathbf w - \mathbf mathbf y)\|^2,
\mathbf end{align*}
we have $L(\mathbf mathbf w_\mathbf mathbf pi^*)\leq (1+\mathbf epsilon)\,L(\mathbf mathbf w^*)$ with probability at least $1-\delta$.
\mathbf end{corollary}
\textbf{Proof} \ Let
$\mathbf U=\mathbf X(\mathbf X^\top\mathbf X)^{-\sfrac12}$.
Combining Theorem \mathbf mathbf ref{t:multiplication} with
Markov's inequality, we have that for large enough $C$,
$\|\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\,\mathbf mathbf r\|^2\leq \mathbf epsilon\,\mathbf frac{k^2}{8^2}\|\mathbf mathbf r\|^2$ w.h.p., where
$\mathbf mathbf r=\mathbf mathbf y-\mathbf U\mathbf mathbf v^*$. Finally following (\mathbf mathbf ref{e:pyth}) and
(\mathbf mathbf ref{e:prod}) above,
we have that w.h.p.
\mathbf mathbf begin{align*}
L(\mathbf mathbf w_\mathbf mathbf pi^*) &\leq L(\mathbf mathbf w^*) +
\|(\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U)^{-1}\|^2\,\|\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\,\mathbf mathbf r\|^2
\leq L(\mathbf mathbf w^*) + \mathbf frac{8^2}{k^2}\,\mathbf epsilon\mathbf frac{k^2}{8^2}\,\|\mathbf mathbf r\|^2
=(1+\mathbf epsilon)\,L(\mathbf mathbf w^*).\hspace{0.2cm} \mathbf BlackBox
\mathbf end{align*}
\section{Conclusion} \label{s:open}
We developed a new variant of volume sampling which produces the first
known unbiased subsampled least-squares estimator with strong multiplicative
loss bounds. In the process, we proved a novel extension of the
Cauchy-Binet formula, as well as other fundamental combinatorial equalities.
Moreover, we proposed an efficient algorithm called determinantal
rejection sampling,
which is to our knowledge the first joint determinantal sampling
procedure that (after an initial $O(nd^2)$ preprocessing
step for computing leverage scores) produces its $k$ samples in time
$\mathbf mathbf widetilde{O}(d^2\!+\!k)d^2)$, independent of the data size $n$.
When $n$ is very large, the preprocessing time can be reduced to
$\mathbf mathbf widetilde{O}(nd + d^5)$ by rescaling
with sufficiently accurate approximations of the leverage
scores. Surprisingly the estimator
stays unbiased and the loss bound still holds with only
slightly revised constants.
For the sake of clarity we presented the algorithm based
on rescaling with exact leverage scores in the main body of the paper.
However we outline the changes needed when using approximate
leverage scores in Appendix \mathbf mathbf ref{sec:fast-alg}.
In this paper we focused on tail bounds. However we conjecture that
expected bounds of the form $\mathbb E [L(\mathbf mathbf w_\mathbf mathbf pi^*)] \le (1+\mathbf epsilon)
L(\mathbf mathbf w^*)$ also hold for a variant of volume sampling of size $O(\mathbf frac{d}{\mathbf epsilon})$.
\mathbf mathbf bibliographystyle{plain}
\mathbf mathbf bibliography{pap}
{n-d\choose s-d}learpage
\{1..n\}ewpage
\mathbf mathbf appendix
\section{Proof of part \mathbf eqref{eq:L1} from Theorem~\mathbf mathbf ref{t:lower}}
\label{a:lower}
First, let us calculate $L(\mathbf mathbf w^*)$. Observe that
\mathbf mathbf vspace{-2mm}
\mathbf mathbf begin{align*}
(\mathbf X^\top\mathbf X)^{-1} &= \overbrace{\mathbf Big(1 +
\mathbf frac{n-d}{d}\gamma^2\mathbf Big)^{-1}}^{c}\ \mathbf I,\\
\text{and}\quad \mathbf mathbf w^* &= c\,\mathbf X^\top\mathbf mathbf y = c\,\mathbf 1_d.
\mathbf end{align*}
The loss $L(\mathbf mathbf w)$ of any $\mathbf mathbf w \in \mathbb R^d$ can be decomposed as $L(\mathbf mathbf w) = \sum_{i=1}^d L_i(\mathbf mathbf w)$, where $L_i(\mathbf mathbf w)$ is the
total loss incurred on all input vectors $\mathbf e_i$ or $\gamma\mathbf e_i$:
\mathbf mathbf vspace{-4mm}
\mathbf mathbf begin{align*}
L_i(\mathbf mathbf w^*) = (1-c)^2 +
\overbrace{\mathbf frac{n-d}{d}\gamma^2}^{\mathbf frac{1}{c}-1}\, c^2
= 1-c,
\mathbf end{align*}
Note that $i$-th leverage score of $\mathbf X$ is equal $l_i=\mathbf mathbf x_i^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf x_i=c$, so we obtain that
\mathbf mathbf begin{align}
L(\mathbf mathbf w^*)=d\,(1-c) = \sum_{i=1}^d(1-l_i).\label{eq:lb-loss}
\mathbf end{align}
Next, we compute $L(\mathbf mathbf w_S^*)$. Suppose that
$S\subseteq\{1..n\}$ is produced by size $k$ standard volume
sampling. Note that if for some $1\le i\le d$ we have $i\{1..n\}ot\in S$,
then $(\mathbf mathbf w_S^*)_i=0$ and therefore $L_i(\mathbf mathbf w_S^*)=1$.
Moreover, denoting $b_i\defeq \mathbf 1_{[i\in S]}$,
\mathbf mathbf begin{align*}
(\mathbf X_S^\top\mathbf X_S)^{-1} &\!\succeq\! (\mathbf X^\top\mathbf X)^{-1}\!=\!c\,\mathbf I,
\;\;\text{and}\;\; \mathbf X_S^\top\mathbf mathbf y_S \!=\! (b_1,\ldots,b_d)^\top\!,
\mathbf end{align*}
so if $i\in S$, then $(\mathbf mathbf w_S^*)_i\geq c$ and
\mathbf mathbf begin{align*}
L_i(\mathbf mathbf w_S^*) \geq \mathbf frac{n-d}{d} \,\gamma^2\,c^2
= \mathbf Big(\mathbf frac{1}{c}-1\mathbf Big) c^2 = c\,L_i(\mathbf mathbf w^*).
\mathbf end{align*}
Putting the cases of $i\in S$ and $i\{1..n\}ot\in S$ together, we get
\mathbf mathbf begin{align*}
L_i(\mathbf mathbf w_S^*) &\geq c\,L_i(\mathbf mathbf w^*) + (1-c\,L_i(\mathbf mathbf w^*))\,(1-b_i)\\
&\geq c\,L_i(\mathbf mathbf w^*) + c^2(1-b_i).
\mathbf end{align*}
Applying the marginal probability formula for volume sampling
(see \mathbf eqref{eq:marginal}), we note that
\mathbf mathbf begin{align}
\{1..n\}onumber
\mathbb E[1-b_i] &= 1-\mathbf Pr(i\in S) = \mathbf frac{n-k}{n-d}\,(1-c) = \mathbf frac{n-k}{n-d}\,L_i(\mathbf mathbf w^*).
\label{e:1mpi}
\mathbf end{align}
Taking expectation over $L_i(\mathbf mathbf w_S^*)$ and summing the components over $i \in [d]$, we get
\mathbf mathbf begin{align*}
\mathbb E[L(\mathbf mathbf w_S^*)] \geq L(\mathbf mathbf w^*)\mathbf Big(c+c^2\mathbf frac{n-k}{n-d}\mathbf Big).
\mathbf end{align*}
Note that as $\gamma\mathbf mathbf rightarrow 0$, we have $c\mathbf mathbf rightarrow 1$, thus showing \mathbf eqref{eq:L1}.
\section{Properties of rescaled volume sampling}
We give proofs of the properties of rescaled volume
sampling which hold for any rescaling distribution $q$. In this
section, we will use $Z=d!{k{n-d\choose s-d}hoose d}\det(\mathbf X^\top\mathbf X)$ as the
normalization constant for rescaled volume sampling.
\subsection{Proof of Proposition \mathbf mathbf ref{p:cauchy-binet}}
\label{sec:cauchy-binet-proof}
First, we apply the Cauchy-Binet formula to
the determinant term specified by a fixed sequence $\mathbf mathbf pi\in[n]^k$:
\mathbf mathbf begin{align*}
\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)= \sum_{S\in\sets{k}{d}}
\det(\mathbf X^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf X) = \sum_{S\in\sets{k}{d}}
\det(\mathbf X_{\mathbf mathbf pi_S})^2 \mathbf mathbf prod_{i\in S}\mathbf frac{1}{q_{\mathbf mathbf pi_i}}.
\mathbf end{align*}
Next, we compute the sum, using the above identity:
\mathbf mathbf begin{align*}
\sum_{\mathbf mathbf pi\in[n]^k}\!\!\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)\mathbf mathbf prod_{i=1}^kq_{\mathbf mathbf pi_i}
&=\sum_{\mathbf mathbf pi\in[n]^k}\sum_{S\in\sets{k}{d}}\det(\mathbf X_{\mathbf mathbf pi_S})^2
\mathbf mathbf prod_{i\in[k]\mathbf mathbf backslash S}q_{\mathbf mathbf pi_i}\\
&={k{n-d\choose s-d}hoose d}\!\!\sum_{\mathbf mathbf bar{\mathbf mathbf pi}\in[n]^d}\!\!\det(\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}})^2\!\!
\sum_{\tilde{\mathbf mathbf pi}\in[n]^{k\!-\!d}}\mathbf mathbf prod_{i=1}^{k-d}q_{\tilde{\mathbf mathbf pi}_i}\\
&={k{n-d\choose s-d}hoose d}\!\!\sum_{\mathbf mathbf bar{\mathbf mathbf pi}\in[n]^d}\!\!
\det(\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}})^2\ \mathbf Big(\sum_{i=1}^nq_i\mathbf Big)^{k-d}\\
&={k{n-d\choose s-d}hoose d}d!\sum_{S\in\sets{n}{d}}\det(\mathbf X_S)^2
=k(k\!-\!1)...(k\!-\!d\!+\!1)\det(\mathbf X^\top\mathbf X),
\mathbf end{align*}
where the steps closely follow the corresponding derivation for Theorem
\mathbf mathbf ref{t:unbiasedness}, given in Section \mathbf mathbf ref{sec:unbiasedness-proof}.
\subsection{Proof of Theorem \mathbf mathbf ref{t:square-inverse}}
\label{sec:square-inverse-proof}
We will prove that for any vector $\mathbf mathbf v\in\mathbb R^d$,
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf big[\mathbf mathbf v^\top(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)^{-1}\mathbf mathbf v\mathbf mathbf big] \leq \mathbf frac{\mathbf mathbf v^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf v}{k\!-\!d\!+\!1},
\mathbf end{align*}
which immediately implies the corresponding matrix inequality.
First, we use Sylvester's formula, which holds whenever
a matrix $\mathbf A\in\mathbb R^{d\times d}$ is full rank:
\mathbf mathbf begin{align*}
\det(\mathbf A+\mathbf mathbf v\mathbf mathbf v^\top) = \det(\mathbf A)\,
\mathbf mathbf big(1+\mathbf mathbf v^\top \mathbf A^{-1}\mathbf mathbf v\mathbf mathbf big).
\mathbf end{align*}
Note that whenever the matrix is not full rank, its determinant is
$0$ (in which case we avoid computing the matrix inverse), so we have for any $\mathbf mathbf pi\in[n]^k$:
\mathbf mathbf begin{align*}
\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)\ \mathbf mathbf v^\top (\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)^{-1}\mathbf mathbf v
&\leq \det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X+\mathbf mathbf v\mathbf mathbf v^\top) - \det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)\\
&\overset{(*)}{=}
\sum_{S\in\sets{k}{d\!-\!1}}\det(\mathbf X_{\mathbf mathbf pi_S}^\top\mathbf X_{\mathbf mathbf pi_S}+\mathbf mathbf v\mathbf mathbf v^\top)\mathbf mathbf prod_{i\in S}\mathbf frac{1}{q_{\mathbf mathbf pi_i}},
\mathbf end{align*}
where $(*)$ follows from applying the Cauchy-Binet formula to both of
the determinants, and cancelling out common terms. Next, we proceed
in a standard fashion, summing over all $\mathbf mathbf pi\in[n]^k$:
\mathbf mathbf begin{align*}
Z\ \mathbb E\mathbf mathbf big[
\mathbf mathbf v^\top (\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)^{-1}\mathbf mathbf v\mathbf mathbf big]
&=\sum_{\mathbf mathbf pi\in[n]^k}\!\! \mathbf mathbf v^\top
(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)^{-1}\mathbf mathbf v\det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)\mathbf mathbf prod_{i=1}^kq_{\mathbf mathbf pi_i}\\
&\leq
\sum_{\mathbf mathbf pi\in[n]^k}\sum_{S\in\sets{k}{d\!-\!1}}\
\!\!\!\det(\mathbf X_{\mathbf mathbf pi_S}^\top\mathbf X_{\mathbf mathbf pi_S}+\mathbf mathbf v\mathbf mathbf v^\top)
\mathbf mathbf prod_{i\in [k]\mathbf mathbf backslash S}q_{\mathbf mathbf pi_i}\\
&= {k{n-d\choose s-d}hoose d\!-\!1} \sum_{\mathbf mathbf bar{\mathbf mathbf pi}\in[n]^{d-1}}
\!\!\!\det(\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}}^\top\mathbf X_{\mathbf mathbf bar{\mathbf mathbf pi}}+\mathbf mathbf v\mathbf mathbf v^\top)
\sum_{\tilde{\mathbf mathbf pi}\in[n]^{k-d+1}}\mathbf mathbf prod_{i=1}^{k-d+1}q_{\mathbf mathbf pi_i}\\
&= {k{n-d\choose s-d}hoose d\!-\!1}(d\!-\!1)! \sum_{S\in\sets{n}{d\!-\!1}}
\!\!\!\det(\mathbf X_S^\top\mathbf X_S+\mathbf mathbf v\mathbf mathbf v^\top)\\
&=\mathbf frac{d!{k{n-d\choose s-d}hoose
d}}{k\!-\!d\!+\!1}\mathbf mathbf big(\det(\mathbf X^\top\mathbf X+\mathbf mathbf v\mathbf mathbf v^\top)-\det(\mathbf X^\top\mathbf X)\mathbf mathbf big)
=Z\,\mathbf frac{\mathbf mathbf v^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf v}{k\!-\!d\!+\!1}.
\mathbf end{align*}
\subsection{Proof of Proposition \mathbf mathbf ref{p:marginals}}
\label{sec:marginals-proof}
First, we compute the marginal probability of a fixed element of
sequence $\mathbf mathbf pi$ containing a particular index $i\in[n]$ under
$q$-rescaled volume sampling:
\mathbf mathbf begin{align*}
Z&\ \mathbf Pr(\mathbf mathbf pi_k\!=\!i) = \sum_{\mathbf mathbf pi\in[n]^{k-1}}\,\det(\mathbf X^\top\mathbf mathbf Q_{[\mathbf mathbf pi,i]}\mathbf X)
\ q_i\,\mathbf mathbf prod_{t=1}^{k-1}q_{\mathbf mathbf pi_t}\\
&=\mathbf underbrace{q_i\!\!\!\sum_{\mathbf mathbf pi\in[n]^{k\!-\!1}}\sum_{S\in\sets{k\!-\!1}{d}}\!\!\!\det(\mathbf X_{\mathbf mathbf pi_S})^2
\!\!\! \mathbf mathbf prod_{t\in[k\!-\!1]\mathbf mathbf backslash S}\!\!\!q_{\mathbf mathbf pi_t}}_{T_1} +
\!\mathbf underbrace{\sum_{\mathbf mathbf pi\in[n]^{k\!-\!1}}\sum_{S\in\sets{k\!-\!1}{d\!-\!1}}
\!\!\!\det(\mathbf X_{\mathbf mathbf pi_S}^\top\mathbf X_{\mathbf mathbf pi_S} + \mathbf mathbf x_i\mathbf mathbf x_i^\top) \!\!\!
\mathbf mathbf prod_{t\in[k\!-\!1]\mathbf mathbf backslash S}\!\!\!q_{\mathbf mathbf pi_t}}_{T_2},
\mathbf end{align*}
where the first term can be computed by following the derivation in
Appendix \mathbf mathbf ref{sec:cauchy-binet-proof}, obtaining $T_1 =
q_i\mathbf frac{k-d}{k}\,Z$, and the second term is derived as in Appendix
\mathbf mathbf ref{sec:square-inverse-proof}, obtaining
$T_2=\mathbf frac{l_i}{k}\,Z$. Putting this together, we get
\mathbf mathbf begin{align*}
\mathbf Pr(\mathbf mathbf pi_k\!=\!i)=\mathbf frac{1}{k}\mathbf mathbf big((k\!-\!d)\,q_i + l_i\mathbf mathbf big).
\mathbf end{align*}
Note that by symmetry this applies to any element of the sequence. We
can now easily compute the desired expectation:
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big] =\mathbf frac{1}{q_i} \sum_{t=1}^k\mathbf Pr(\mathbf mathbf pi_t\!=\!i)
= (k\!-\!d) + \mathbf frac{l_i}{q_i}.
\mathbf end{align*}
\subsection{Proof of Lemma \mathbf mathbf ref{l:composition}}
\label{sec:composition-proof}
First step of the reverse iterative sampling procedure
described in Section \mathbf mathbf ref{s:versus} involves removing one row from the
given matrix with probability proportional to the square volume of that
submatrix:
\mathbf mathbf begin{align*}
\mathbf forall_{i\in S}\qquad \mathbf Pr(i\,|\,\mathbf mathbf pi_S)=
\mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_{\mathbf mathbf pi_{S\mathbf mathbf backslash i}}\mathbf X)}
{(|S|-d)\det(\mathbf X^\top\mathbf mathbf Q_{\mathbf mathbf pi}\mathbf X)}.
\mathbf end{align*}
Suppose that $k=s-1$ and let
$\tilde{\mathbf mathbf pi}=\mathbf mathbf pi_S\in[n]^{s-1}$ denote the sequence obtained
after performing one step of the row-removal procedure. Then,
\mathbf mathbf begin{align*}
\mathbf Pr({\tilde{\mathbf mathbf pi}})&=\sum_{i=1}^n\ s\
\overbrace{\mathbf Pr(i\,|\,[{\tilde{\mathbf mathbf pi}},i])}^{
\text{removing one row}}\quad
\overbrace{\mathbf Pr([{\tilde{\mathbf mathbf pi}},i])}^{\text{rescaled sampling}} \\
&= \sum_{i=1}^n \ s\
\mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_{\tilde{\mathbf mathbf pi}}\mathbf X)}{(s\!-\!d)\det(\mathbf X^\top\mathbf mathbf Q_{[{\tilde{\mathbf mathbf pi}},i]}\mathbf X)}\
\mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_{[{\tilde{\mathbf mathbf pi}},i]}\mathbf X)\,(\mathbf mathbf prod_{j=1}^{s-1}q_{\tilde{\mathbf mathbf pi}_j})\, q_i}{\mathbf frac{s!}{(s-d)!}\det(\mathbf X^\top\mathbf X)}\\
&=\mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_{{\tilde{\mathbf mathbf pi}}}\mathbf X)
(\mathbf mathbf prod_{j=1}^{s-1}q_{\tilde{\mathbf mathbf pi}_j})}{\mathbf frac{s-d}{s}\mathbf frac{s!}{(s-d)!}\det(\mathbf X^\top\mathbf X)}
\sum_{i=1}^nq_i = \mathbf frac{\det(\mathbf X^\top\mathbf mathbf Q_{{\tilde{\mathbf mathbf pi}}}\mathbf X)\, (\mathbf mathbf prod_{j=1}^{s-1}q_{\tilde{\mathbf mathbf pi}_j}) }{\mathbf frac{(s-1)!}{(s-1-d)!}\det(\mathbf X^\top\mathbf X)},
\mathbf end{align*}
where the factor $s$ next to the sum counts the number of ways to place index $i$
into the sequence $\tilde{\mathbf mathbf pi}$. Thus, by induction, for any $k<s$ the
algorithm correctly samples from $q$-rescaled volume sampling.
\section{Proof of Theorem \mathbf mathbf ref{t:multiplication}}
\label{sec:multiplication-proof}
We rewrite the expected square norm as:
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf bigg[\mathbf Big\|\mathbf frac{1}{k}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf mathbf r - \mathbf U^\top\mathbf mathbf r\mathbf Big\|^2\mathbf mathbf bigg]
&= \mathbb E\mathbf mathbf bigg[\mathbf Big\|\mathbf U^\top\!\mathbf Big(\mathbf frac{1}{k}\mathbf mathbf Q_\mathbf mathbf pi\!-\!\mathbf I\mathbf Big)\mathbf mathbf r\mathbf Big\|^2\mathbf mathbf bigg]
=\mathbb E\mathbf mathbf bigg[\mathbf mathbf r^\top\mathbf Big(\mathbf frac{1}{k}\mathbf mathbf Q_\mathbf mathbf pi\!-\!\mathbf I\mathbf Big)\mathbf U\mathbf U^\top\!
\mathbf Big(\mathbf frac{1}{k}\mathbf mathbf Q_\mathbf mathbf pi\!-\!\mathbf I\mathbf Big)\mathbf mathbf r\mathbf mathbf bigg]\\
&=\mathbf mathbf r^\top\ \mathbb E\mathbf mathbf bigg[ \mathbf Big(\mathbf frac{1}{k}\mathbf mathbf Q_\mathbf mathbf pi\!-\!\mathbf I\mathbf Big)\mathbf U\mathbf U^\top\!
\mathbf Big(\mathbf frac{1}{k}\mathbf mathbf Q_\mathbf mathbf pi\!-\!\mathbf I\mathbf Big)\mathbf mathbf bigg]\ \mathbf mathbf r\\
&\le \lambda_{\mathbf max}\mathbf Big(\mathbf underbrace{ \mathbf mathbf big(\mathbb E[(z_i\!-\!1)(z_j\!-\!1)]\,\mathbf u_i^\top\mathbf u_j\mathbf mathbf big)_{ij}}_{\mathbf M}\mathbf Big)\,\|\mathbf mathbf r\|^2,
\quad\text{where } z_i=\mathbf frac{1}{k}(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}.
\mathbf end{align*}
It remains to bound $\lambda_{\mathbf max}(\mathbf M)$. By Proposition
\mathbf mathbf ref{p:marginals}, for leveraged volume sampling $\mathbb E[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}]=k$, so
\mathbf mathbf begin{align*}
\mathbb E[(z_i\!-\!1)(z_j\!-\!1)] = \mathbf frac{1}{k^2}\mathbf Big(\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}(\mathbf mathbf Q_\mathbf mathbf pi)_{jj}\mathbf mathbf big] -
\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big]\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{jj}\mathbf mathbf big]\mathbf Big) =
\mathbf frac{1}{k^2}\,\mathbf mathrm{cov}\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii},\,(\mathbf mathbf Q_\mathbf mathbf pi)_{jj}\mathbf mathbf big].
\mathbf end{align*}
For rescaled volume sampling this is given in the
following lemma, proven in Appendix \mathbf mathbf ref{a:pairwise-formula}.
\mathbf mathbf begin{lemma}\label{l:pairwise-formula}
For any $\mathbf X$ and $q$, if sequence $\mathbf mathbf pi\in[n]^k$ is sampled from
$q$-rescaled volume sampling then
\mathbf mathbf begin{align*}
\mathbf mathrm{cov}\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii},\,(\mathbf mathbf Q_\mathbf mathbf pi)_{jj}\mathbf mathbf big] =\mathbf 1_{i=j} \mathbf frac{1}{q_i}\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big]
- (k\!-\!d) -
\mathbf frac{(\mathbf mathbf x_i^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf x_j)^2}{q_iq_j}.
\mathbf end{align*}
\mathbf end{lemma}
Since $\|\mathbf u_i\|^2=l_i=dq_i$ and
$\mathbf u_i^\top(\mathbf U^\top\mathbf U)^{-1}\mathbf u_j=\mathbf u_i^\top\mathbf u_j$, we can express matrix $\mathbf M$ as follows:
\mathbf mathbf begin{align*}
\mathbf M = {[d+1]_{-i}}ag\mathbf Big(\mathbf frac{d\ \mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big]}{ \|\mathbf u_i\|^2k^2}\|\mathbf u_i\|^2\mathbf Big)_{i=1}^n
-\mathbf frac{k\!-\!d}{k^2}\mathbf U\mathbf U^\top
- \mathbf frac{d^2}{k^2}\mathbf mathbf bigg(\mathbf frac{(\mathbf u_i^\top\mathbf u_j)^3}{\|\mathbf u_i\|^2\|\mathbf u_j\|^2}\mathbf mathbf bigg)_{ij}.
\mathbf end{align*}
The first term simplifies to $\mathbf frac{d}{k}\mathbf I$, and the second term is
negative semi-definite, so
\mathbf mathbf begin{align*}
\lambda_{\mathbf max}(\mathbf M) \leq \mathbf frac{d}{k} + \mathbf frac{d^2}{k^2}
\mathbf mathbf bigg\|\mathbf mathbf bigg(\mathbf frac{(\mathbf u_i^\top\mathbf u_j)^3}{\|\mathbf u_i\|^2\|\mathbf u_j\|^2}\mathbf mathbf bigg)_{ij}\mathbf mathbf bigg\|.
\mathbf end{align*}
Finally, we decompose the last term into a Hadamard product
of matrices, and apply a classical inequality by
{n-d\choose s-d}ite{hadamard-product-inequality} (symbol ``${n-d\choose s-d}irc$'' denotes
Hadamard matrix product):
\mathbf mathbf begin{align*}
\mathbf mathbf bigg\|\mathbf mathbf bigg(
\mathbf frac{(\mathbf u_i^\top\mathbf u_j)^3}{\|\mathbf u_i\|^2\|\mathbf u_j\|^2}\mathbf mathbf bigg)_{\!ij}\mathbf mathbf bigg\|
\quad&=\quad
\mathbf mathbf bigg\|\mathbf mathbf bigg(
\mathbf frac{\mathbf u_i^\top\mathbf u_j}{\|\mathbf u_i\|\,\|\mathbf u_j\|}
\mathbf mathbf bigg)_{\!ij}
{n-d\choose s-d}irc\mathbf mathbf bigg(
\mathbf frac{(\mathbf u_i^\top\mathbf u_j)^2}{\|\mathbf u_i\|\|\mathbf u_j\|}
\mathbf mathbf bigg)_{\!ij}\mathbf mathbf bigg\|\\
&\leq \quad\mathbf mathbf bigg\|\mathbf mathbf bigg(\mathbf frac{(\mathbf u_i^\top\mathbf u_j)^2}{\|\mathbf u_i\|\|\mathbf u_j\|}
\mathbf mathbf bigg)_{\!ij}\mathbf mathbf bigg\|\quad=\quad
\mathbf mathbf bigg\|\mathbf mathbf bigg(
\mathbf frac{\mathbf u_i^\top\mathbf u_j}{\|\mathbf u_i\|\,\|\mathbf u_j\|}
\mathbf mathbf bigg)_{\!ij}
{n-d\choose s-d}irc\mathbf U\mathbf U^\top\mathbf mathbf bigg\|\\
&\leq \quad\|\mathbf U\mathbf U^\top\|\ =\ 1.
\mathbf end{align*}
Thus, we conclude that
$\mathbb E[\|\mathbf frac{1}{k}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf mathbf r-\mathbf U^\top\mathbf mathbf r\|^2]\leq
(\mathbf frac{d}{k}+\mathbf frac{d^2}{k^2})\|\mathbf mathbf r\|^2$, completing the proof.
\subsection{Proof of Lemma~\mathbf mathbf ref{l:pairwise-formula}}
\label{a:pairwise-formula}
We compute marginal probability of two elements in the
sequence $\mathbf mathbf pi$ having particular values $i,j\in[n]$:
\mathbf mathbf begin{align*}
Z\,\mathbf Pr\mathbf mathbf big((\mathbf mathbf pi_{k-1}\!=\!i)\mathbf mathbf wedge(\mathbf mathbf pi_k\!=\!j)\mathbf mathbf big) &=
\sum_{\mathbf mathbf pi\in[n]^{k-2}}\sum_{S\in\sets{k}{d}}\det(\mathbf X_{[\mathbf mathbf pi,i,j]_S}^\top\mathbf X_{[\mathbf mathbf pi,i,j]_S})
\mathbf mathbf prod_{t\in[k]\mathbf mathbf backslash S}q_{[\mathbf mathbf pi,i,j]_t}.
\mathbf end{align*}
We partition the set ${[k]{n-d\choose s-d}hoose d}$ of all subsets of size $d$ into
four groups, and summing separately over each of the groups, we have
\mathbf mathbf begin{align*}
Z\,\mathbf Pr\mathbf mathbf big((\mathbf mathbf pi_{k-1}\!=\!i)\mathbf mathbf wedge(\mathbf mathbf pi_k\!=\!j)\mathbf mathbf big) = T_{00} + T_{01}
+ T_{10} + T_{11},\qquad\text{where:}
\mathbf end{align*}
\mathbf mathbf begin{enumerate}
\item Let $G_{00} = \{S\!\in\! {[k]{n-d\choose s-d}hoose d}:\ k\!-\!1\!\{1..n\}ot\in \!S,\
k\!\{1..n\}ot\in\! S\}$, and following
derivation in Appendix \mathbf mathbf ref{sec:cauchy-binet-proof},
\mathbf mathbf begin{align*}
T_{00} =
q_i\,q_j\sum_{\mathbf mathbf pi\in[n]^{k-2}}\sum_{S\in G_{00}}
\det(\mathbf X_{\mathbf mathbf pi_S})^2
\mathbf mathbf prod_{t\in[k\!-\!2]\mathbf mathbf backslash S}q_{\mathbf mathbf pi_t} = q_i\,q_j\mathbf frac{(k\!-\!d\!-\!1)(k\!-\!d)}{(k\!-\!1)\,k}\,Z.
\mathbf end{align*}
\item Let $G_{10} = \{S\!\in\! {[k]{n-d\choose s-d}hoose d}:\ k\!-\!1\!\in \!S,\
k\!\{1..n\}ot\in\! S\}$, and following
derivation in Appendix \mathbf mathbf ref{sec:square-inverse-proof},
\mathbf mathbf begin{align*}
T_{10} =
q_j\sum_{\mathbf mathbf pi\in[n]^{k-1}}\sum_{S\in G_{10}}
\det(\mathbf X_{[\mathbf mathbf pi,i]_S})^2
\mathbf mathbf prod_{t\in[k\!-\!1]\mathbf mathbf backslash S}q_{[\mathbf mathbf pi,i]_t} = l_i\,q_j\mathbf frac{(k\!-\!d)}{(k\!-\!1)\,k}\,Z.
\mathbf end{align*}
\item $G_{01} = \{S\!\in\! {[k]{n-d\choose s-d}hoose d}:\ k\!-\!1\!\{1..n\}ot\in \!S,\ k\!\in\!
S\}$, and by symmetry, $T_{01} =
l_j\,q_i\mathbf frac{(k-d)}{(k-1)\,k}\,Z$.
\item Let $G_{11} = \{S\!\in\! {[k]{n-d\choose s-d}hoose d}:\ k\!-\!1\!\in \!S,\
k\!\in\! S\}$, and the last term is
\mathbf mathbf begin{align*}
\hspace{-1cm}T_{11} &=
\sum_{\mathbf mathbf pi\in[n]^{k-1}}\sum_{S\in G_{11}}
\det(\mathbf X_{[\mathbf mathbf pi,i,j]_S})^2
\mathbf mathbf prod_{t\in[k]\mathbf mathbf backslash S}q_{[\mathbf mathbf pi,i,j]_t} \\
&={k\!-\!2{n-d\choose s-d}hoose
d\!-\!2}\sum_{\mathbf mathbf pi\in[n]^{d-2}}\det(\mathbf X_{[\mathbf mathbf pi,i,j]})^2\\
&={k\!-\!2{n-d\choose s-d}hoose d\!-\!2}(d\!-\!2)!\,
\mathbf mathbf big(\det(\mathbf X^\top\mathbf X) -
\det(\mathbf X_{-i}^\top\mathbf X_{-i}) - \det(\mathbf X_{-j}^\top\mathbf X_{-j})+
\det(\mathbf X_{-i,j}^\top\mathbf X_{-i,j}) \mathbf mathbf big)\\
&\overset{(*)}{=}\mathbf frac{d!{k{n-d\choose s-d}hoose
d}}{k(k\!-\!1)}\det(\mathbf X^\top\mathbf X)\mathbf Big(1
-\!\!\mathbf underbrace{(1\!-\!l_i)}_{\mathbf frac{\det(\mathbf X_{-i}^\top\mathbf X_{-i})}{\det(\mathbf X^\top\mathbf X)}}
\!\! -\!\!
\mathbf underbrace{(1\!-\!l_j)}_{\mathbf frac{\det(\mathbf X_{-j}^\top\mathbf X_{-j})}{\det(\mathbf X^\top\mathbf X)}}
\!\! +
\mathbf underbrace{(1\!-\!l_i)(1\!-\!l_j) -l_{ij}^2}_{\mathbf frac{\det(\mathbf X_{-i,j}^\top\mathbf X_{-i,j})}{\det(\mathbf X^\top\mathbf X)}}\mathbf Big)\\[-2mm]
&=\mathbf frac{Z}{k(k\!-\!1)}\mathbf mathbf big(\mathbf ell_i\mathbf ell_j - \mathbf ell_{ij}^2\mathbf mathbf big),
\mathbf end{align*}
\mathbf end{enumerate}
where $l_{ij}=\mathbf mathbf x_i^\top(\mathbf X^\top\mathbf X)^{-1}\mathbf mathbf x_j$, and $(*)$ follows from
repeated application of Sylvester's determinant formula (as in
Appendix \mathbf mathbf ref{sec:square-inverse-proof}). Putting it all together, we
can now compute the expectation for $i\{1..n\}eq j$:
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\,(\mathbf mathbf Q_\mathbf mathbf pi)_{jj}\mathbf mathbf big] &=
\mathbf frac{1}{q_i\,q_j}\sum_{t_1=1}^k\sum_{t_2=1}^k
\mathbf Pr\mathbf mathbf big((\mathbf mathbf pi_{k-1}\!=\!i)\mathbf mathbf wedge(\mathbf mathbf pi_k\!=\!j)\mathbf mathbf big)\\
&=\mathbf frac{k(k\!-\!1)}{q_i\,q_j} \overbrace{\mathbf Pr\mathbf mathbf big((\mathbf mathbf pi_{k-1}\!=\!i)\mathbf mathbf wedge(\mathbf mathbf pi_k\!=\!j)\mathbf mathbf big)}
^{\mathbf frac{1}{Z}(T_{00}+T_{10}+T_{01}+T_{11})}\\
&= (k\!-\!d\!-\!1)(k\!-\!d) + (k\!-\!d)\mathbf frac{l_i}{q_i} +
(k\!-\!d)\mathbf frac{l_j}{q_j} + \mathbf frac{l_il_j}{q_i\,q_j} - \mathbf frac{l_{ij}^2}{q_i\,q_j}\\
&=\mathbf Big((k\!-\!d)q_i+\mathbf frac{l_i}{q_i}\mathbf Big)\mathbf Big((k\!-\!d)q_j +
\mathbf frac{l_j}{q_j}\mathbf Big) - (k\!-\!d) - \mathbf frac{l_{ij}^2}{q_i\,q_j}\\
&=\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big]\,\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{jj}\mathbf mathbf big] -(k\!-\!d) - \mathbf frac{l_{ij}^2}{q_iq_j}.
\mathbf end{align*}
Finally, if $i=j$, then
\mathbf mathbf begin{align*}
\mathbb E[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\,(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}] &= \mathbf frac{1}{q_i^2}\sum_{t_1=1}^k\sum_{t_2=1}^k\mathbf Pr(\mathbf mathbf pi_{t_1}\!=\!i\,
\mathbf mathbf wedge\, \mathbf mathbf pi_{t_2}\!=\!i) \\
&= \mathbf frac{k(k\!-\!1)}{q_i^2} \,\mathbf Pr(\mathbf mathbf pi_{k-1}\!=\!i\, \mathbf mathbf wedge\,
\mathbf mathbf pi_{k}\!=\!i) + \mathbf frac{k}{q_i^2}\, \mathbf Pr(\mathbf mathbf pi_k\!=\!i)\\
&=\mathbf mathbf big(\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big]\mathbf mathbf big)^2 -(k\!-\!d) -
\mathbf frac{l_i^2}{q_i^2} +\mathbf frac{1}{q_i}\mathbb E\mathbf mathbf big[(\mathbf mathbf Q_\mathbf mathbf pi)_{ii}\mathbf mathbf big].
\mathbf end{align*}
\section{Proof of Theorem \mathbf mathbf ref{t:spectral}}
\label{sec:spectral-proof}
We break the sampling procedure down into two stages. First, we do leveraged volume
sampling of a sequence $\mathbf mathbf pi\in[n]^{m}$ of size $m\geq C_0 d^2/\delta$,
then we do standard volume
sampling size $k$ from matrix $(\mathbf mathbf Q_{[1..n]}^{\sfrac{1}{2}}\mathbf U)_\mathbf mathbf pi$. Since
rescaled volume sampling is closed under this
subsampling (Lemma \mathbf mathbf ref{l:composition}),
this procedure is equivalent to size $k$ leveraged volume sampling
from $\mathbf U$. To show that the first stage satisfies the subspace
embedding condition, we simply use the bound from Theorem
\mathbf mathbf ref{t:multiplication} (see details in Appendix \mathbf mathbf ref{a:overestimate}):
\mathbf mathbf begin{lemma}\label{l:overestimate}
There is an absolute constant $C_0$, s.t.~if sequence $\mathbf mathbf pi\in[n]^m$ is
generated via leveraged volume sampling of size $m$ at least
$C_0\,d^2/\delta$ from $\mathbf U$, then
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf mathbf bigg(\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{m}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U\mathbf Big) \leq
\mathbf frac{1}{2}\mathbf mathbf bigg)\leq \delta.
\mathbf end{align*}
\mathbf end{lemma}
The size of $m$ is much larger than what we claim is sufficient.
However, we use it to achieve a tighter bound in the second stage.
To obtain substantially smaller sample sizes for subspace embedding than what Theorem~\mathbf mathbf ref{t:multiplication} can deliver, it is standard to use tail bounds for the
sums of independent matrices. However, applying these results to joint
sampling is a challenging task. Interestingly,
{n-d\choose s-d}ite{dual-volume-sampling} showed that volume sampling is a strongly
Raleigh measure, implying that the sampled vectors are negatively
correlated. This guarantee is sufficient to show tail bounds for
real-valued random variables {n-d\choose s-d}itep[see,
e.g.,][]{pemantle2014concentration},
however it has proven challenging
in the matrix case, as discussed by {n-d\choose s-d}ite{harvey2014pipage}. One
notable exception is uniform sampling without replacement, which is a
negatively correlated joint distribution. A reduction argument originally proposed
by {n-d\choose s-d}ite{hoeffding-with-replacement}, but presented in this context by
{n-d\choose s-d}ite{uniform-matrix-sampling}, shows that uniform sampling without
replacement offers the same tail bounds as i.i.d.~uniform sampling.
\mathbf mathbf begin{lemma}\label{l:without-replacement}
Assume that $\lambda_{\mathbf min}\mathbf mathbf big(\mathbf frac{1}{m}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U\mathbf mathbf big)\geq
\mathbf frac{1}{2}$. Suppose that set $T$ is a set of fixed size
sampled uniformly without replacement from $[m]$. There is a constant
$C_1$ s.t.~if $|T|\ge C_1\,d\ln(d/\delta)$, then
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf Big(\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{|T|}\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_T}\mathbf U\mathbf Big) \leq
\mathbf frac{1}{4}\mathbf Big) \leq \delta.
\mathbf end{align*}
\mathbf end{lemma}
\mathbf mathbf vspace{-1mm}
The proof of Lemma \mathbf mathbf ref{l:without-replacement} (given in appendix
\mathbf mathbf ref{a:without-replacement}) is a straight-forward application of the
argument given by {n-d\choose s-d}ite{uniform-matrix-sampling}. We now propose a
different reduction argument showing that a subspace
embedding guarantee for uniform sampling without replacement leads to
a similar guarantee for volume sampling. We achieve this by exploiting a
volume sampling algorithm proposed recently by
{n-d\choose s-d}ite{regularized-volume-sampling}, shown in Algorithm
\mathbf mathbf ref{alg:volume}, which is a modification of the reverse iterative
sampling procedure introduced in {n-d\choose s-d}ite{unbiased-estimates}. This
procedure relies on iteratively removing elements from the set $S$
until we are left with $k$ elements. Specifically, at each step, we
sample an index $i$ from a conditional distribution, $i\sim \mathbf Pr(i\,|\,S)=(1-\mathbf u_i^\top(\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U)^{-1}\mathbf u_i)/(|S|-d)$. Crucially for us, each step
proceeds via rejection sampling with the proposal distribution being
uniform. We can easily modify the algorithm, so that the
samples from the proposal distribution are used to construct a uniformly
sampled set $T$, as shown in Algorithm \mathbf mathbf ref{alg:coupled}. Note that
sets $S$ returned by both algorithms are identically distributed, and
furthermore, $T$ is a subset of $S$, because every index taken out of
$S$ is also taken out of $T$.
\{1..n\}oindent\mathbf mathbf begin{minipage}{\textwidth}
{n-d\choose s-d}entering
\mathbf mathbf begin{minipage}{.45\textwidth}
\small
{n-d\choose s-d}entering
{n-d\choose s-d}aptionof{algorithm}{Volume sampling}
\label{alg:volume}
\mathbf mathbf vspace{-.3cm}
\mathbf mathbf begin{algorithmic}[1]
\mathbf STATE $S \leftarrow [m]$
\mathbf STATE {\mathbf mathbf bf while} $|S|>k$
\mathbf STATE \quad \textbf{repeat}
\mathbf STATE \quad\quad Sample $i$ unif. out of $S$
\mathbf STATE \quad\quad $q \leftarrow 1-\mathbf u_i^\top (\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U)^{-1}\mathbf u_i$
\mathbf STATE \quad\quad Sample $\textit{Accept} \sim \text{Bernoulli}(q)$
\mathbf STATE \quad \textbf{until} $\textit{Accept}=\text{true}$
\mathbf STATE \quad $S\leftarrow S \mathbf mathbf backslash \{i\}$
\mathbf STATE {\mathbf mathbf bf end}
\mathbb RETURN $S$
\mathbf end{algorithmic}
\mathbf end{minipage}
\mathbf mathbf begin{minipage}{.45\textwidth}
\small
{n-d\choose s-d}entering \mathbf mathbf vspace{5 mm}
{n-d\choose s-d}aptionof{algorithm}{Coupled sampling}
\label{alg:coupled}
\mathbf mathbf vspace{-.3cm}
\mathbf mathbf begin{algorithmic}[1]
\mathbf STATE $S,T \leftarrow [m]$
\mathbf STATE {\mathbf mathbf bf while} $|S|>k$
\mathbf STATE \quad Sample $i$ unif. out of $[m]$
\mathbf STATE \quad $T \leftarrow T - \{i\}$
\mathbf STATE \quad {\mathbf mathbf bf if} $i\in S$
\mathbf STATE \quad\quad $q \leftarrow 1-\mathbf u_i^\top
(\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U)^{-1}\mathbf u_i$
\mathbf STATE \quad\quad Sample $\textit{Accept} \sim \text{Bernoulli}(q)$
\mathbf STATE \quad\quad {\mathbf mathbf bf if} $\textit{Accept}=\text{true}$,\quad $S\leftarrow S\mathbf mathbf backslash\{i\}$ {\mathbf mathbf bf end}
\mathbf STATE \quad{\mathbf mathbf bf end}
\mathbf STATE {\mathbf mathbf bf end}
\mathbb RETURN $S,T$
\mathbf end{algorithmic}
\mathbf end{minipage}
\mathbf mathbf vspace{5mm}
\mathbf end{minipage}
By Lemma \mathbf mathbf ref{l:without-replacement}, if size of $T$ is at least $ C_1\,d\log(d/\delta)$, then this set
offers a subspace embedding guarantee. Next, we will show that in
fact set $T$ is not much smaller than $S$, implying that the same
guarantee holds for $S$. Specifically, we will show that $|S \setminus T|=
O(d\log(d/\delta))$. Note that it suffices to bound the number of times
that a uniform sample is rejected by sampling $A=0$ in line 7 of
Algorithm \mathbf mathbf ref{alg:coupled}. Denote this number by $R$. Note that
$R=\sum_{t=k+1}^m R_t$, where $m=|Q|$ and $R_t$ is the number of times
that $A=0$ was
sampled while the size of set $S$ was $t$. Variables $R_t$
are independent, and each is
distributed according to the geometric distribution (number of
failures until success), with the success probability
\mathbf mathbf begin{align*}
r_t = \mathbf frac{1}{t}\sum_{i\in S}
\mathbf mathbf big(1-\mathbf u_i^\top(\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U)^{-1}\mathbf u_i\mathbf mathbf big)
= \mathbf frac{1}{t}\mathbf Big(t-\mathrm{tr}\mathbf mathbf big((\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U)^{-1}\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U \mathbf mathbf big) \mathbf Big)
=\mathbf frac{t-d}{t}.
\mathbf end{align*}
\mathbf mathbf vspace{-.2cm}
Now, as long as $\mathbf frac{m-d}{k-d}\leq C_0\,d^2/\delta$, we can bound the
expected value of $R$ as follows:
\mathbf mathbf begin{align*}
\mathbb E[R] &=\!\sum_{t=k+1}^m\!\mathbb E[R_t]=\!\!\sum_{t=k+1}^m\!\!\mathbf Big(\mathbf frac{t}{t-d}-1\mathbf Big)
=d\!\!\sum_{t=k-d+1}^{m-d}\mathbf frac{1}{t} \leq d\,\ln\!\mathbf Big(\mathbf frac{m-d}{k-d}\mathbf Big)\leq C_2\,d\ln(d/\delta).
\mathbf end{align*}
In this step, we made use of the first stage sampling, guaranteeing that
the term under the logarithm is bounded. Next, we show that the
upper tail of $R$ decays very rapidly given a sufficiently large gap
between $m$ and $k$ (proof in Appendix \mathbf mathbf ref{a:geometric-tail}):
\mathbf mathbf begin{lemma}\label{l:geometric-tail}
Let $R_t\sim \operatorname{Geom}(\mathbf frac{t-d}{t})$ be a sequence of independent geometrically
distributed random variables (number of failures until success). Then,
for any $d<k<m$ and $a>1$,
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf mathbf big(R \geq a\ \mathbb E[R]\mathbf mathbf big) \leq
\text{e}^{\mathbf frac{a}{2}}\,\mathbf Big(\mathbf frac{k-d}{m-d}\mathbf Big)^{\mathbf frac{a}{2}-1}\quad
\text{for}\quad R=\sum_{t=k+1}^m R_t.
\mathbf end{align*}
\mathbf end{lemma}
Let $a=4$ in Lemma \mathbf mathbf ref{l:geometric-tail}. Setting $C = C_1+2a\,C_2$,
for any $k\geq C\,d\ln(d/\delta)$, using $m=\mathbf max\{C_0\,\mathbf frac{d^2}{\delta},\
d+\text{e}^2\mathbf frac{k}{\delta}\}$, we obtain that
\mathbf mathbf begin{align*}
R&\leq a\,C_2\, d\ln(d/\delta)\leq k/2,
\quad\text{w.p.}\quad
\geq 1- \text{e}^2\,\mathbf frac{k-d}{m-d}\geq 1 -\delta,
\mathbf end{align*}
showing that $|T|\geq k-R\geq C_1\, d\ln(d/\delta)$ and $k\leq
2|T|$.
Therefore, by Lemmas \mathbf mathbf ref{l:overestimate},
\mathbf mathbf ref{l:without-replacement} and
\mathbf mathbf ref{l:geometric-tail}, there is a $1-3\delta$ probability event in which
\[
\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{|T|}\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_T}\mathbf U\mathbf Big)
\geq \mathbf frac14 \quad\text{and}\quad k \leq 2|T| .
\]
In this same event,
\[
\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{k}\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_S}\mathbf U\mathbf Big)
\geq
\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{k}\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_T}\mathbf U\mathbf Big)
\geq
\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{2|T|}\mathbf U^\top\mathbf mathbf Q_{\mathbf mathbf pi_T}\mathbf U\mathbf Big)
\geq
\mathbf frac12 {n-d\choose s-d}dot \mathbf frac14 = \mathbf frac18
,
\]
which completes the proof of Theorem \mathbf mathbf ref{t:spectral}.
\subsection{Proof of Lemma \mathbf mathbf ref{l:overestimate}}
\label{a:overestimate}
Replacing vector $\mathbf mathbf r$ in Theorem \mathbf mathbf ref{t:multiplication} with each
column of matrix $\mathbf U$, we obtain that for $m\geq C\,\mathbf frac{d}{\mathbf epsilon}$,
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf big[\|\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U - \mathbf U^\top\mathbf U\|_F^2\mathbf mathbf big]\leq \mathbf epsilon\,\|\mathbf U\|_F^2 =
\mathbf epsilon\,d.
\mathbf end{align*}
We bound the 2-norm by the Frobenius norm and use Markov's inequality,
showing that w.p. $\geq 1-\delta$
\mathbf mathbf begin{align*}
\|\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U-\mathbf I\|\leq \|\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U - \mathbf I\|_F\leq
\sqrt{\mathbf epsilon\, d/\delta}.
\mathbf end{align*}
Setting $\mathbf epsilon=\mathbf frac{\delta}{4d}$, for
$m\geq C_0\,d^2/\delta$, the above inequality implies that
\mathbf mathbf begin{align*}
\lambda_{\mathbf min}\mathbf Big(\mathbf frac{1}{m}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U\mathbf Big) \geq \mathbf frac{1}{2}.
\mathbf end{align*}
\subsection{Proof of Lemma \mathbf mathbf ref{l:without-replacement}}
\label{a:without-replacement}
Let $\mathbf mathbf pi$ denote the sequence of $m$ indices selected by volume sampling in
the first stage. Suppose that $i_1,...,i_k$ are independent uniformly sampled indices
from $[m]$, and let $j_1,...,j_k$ be indices sampled uniformly
without replacement from $[m]$. We define matrices
\mathbf mathbf begin{align*}
\mathbf Z\defeq \sum_{t=1}^k\overbrace{\mathbf frac{1}{kq_{i_t}}\mathbf u_{i_t}\mathbf u_{i_t}^\top}^{\mathbf Z_t},\quad\text{and}\quad
\mathbf Zbh\defeq\sum_{t=1}^k\overbrace{\mathbf frac{1}{kq_{j_t}}\mathbf u_{j_t}\mathbf u_{j_t}^\top}^{\mathbf Zbh_t}.
\mathbf end{align*}
Note that $\|\mathbf Z_t\|=\mathbf frac{d}{k\,l_i}\|\mathbf u_{i_t}\|^2=\mathbf frac{d}{k}$
and, similarly, $\|\mathbf Zbh_t\|= \mathbf frac{d}{k}$. Moreover,
\mathbf mathbf begin{align*}
\mathbb E[\mathbf Z] =
\sum_{t=1}^k\mathbf mathbf bigg[\mathbf frac{1}{m}\sum_{i=1}^m\mathbf frac{1}{kq_i}\mathbf u_i\mathbf u_i^\top\mathbf mathbf bigg] = k\
\mathbf frac{1}{k}\mathbf frac{1}{m}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U = \mathbf frac{1}{m}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U.
\mathbf end{align*}
Combining Chernoff's inequality with the reduction argument described
in {n-d\choose s-d}ite{uniform-matrix-sampling}, for any $\lambda$, and $\theta>0$,
\mathbf mathbf begin{align*}
\mathbf Pr\mathbf mathbf big(\lambda_{\mathbf max}(-\mathbf Zbh)\geq
\lambda\mathbf mathbf big)\leq \text{e}^{-\theta \lambda}\
\mathbb E\mathbf Big[\mathrm{tr}\mathbf mathbf big(\mathbf exp(\theta (-\mathbf Zbh))\mathbf mathbf big)\mathbf Big]
\leq
\text{e}^{-\theta \lambda}\
\mathbb E\mathbf Big[\mathrm{tr}\mathbf mathbf big(\mathbf exp(\theta (-\mathbf Z))\mathbf mathbf big)\mathbf Big].
\mathbf end{align*}
Using matrix Chernoff bound of {n-d\choose s-d}ite{matrix-tail-bounds} applied to
$-\mathbf Z_1,...,-\mathbf Z_k$ with appropriate $\theta$, we have
\mathbf mathbf begin{align*}
\text{e}^{-\theta \lambda}\
\mathbb E\mathbf Big[\mathrm{tr}\mathbf mathbf big(\mathbf exp(\theta (-\mathbf Z))\mathbf mathbf big)\mathbf Big]\leq d\
\mathbf exp\mathbf Big(-\mathbf frac{k}{16d}\mathbf Big),\quad
\text{for}\quad \lambda = \mathbf frac{1}{2}\,\lambda_{\mathbf max}\mathbf Big(-\mathbf frac{1}{m}\mathbf U^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf U\mathbf Big)\leq-\mathbf frac{1}{4}.
\mathbf end{align*}
Thus, there is a constant $C_1$ such that for $k\geq C_1\,d\ln(d/\delta)$,
w.p.~at least $1-\delta$ we have $\lambda_{\mathbf min}(\mathbf Zbh)\geq
\mathbf frac{1}{4}$.
\subsection{Proof of Lemma \mathbf mathbf ref{l:geometric-tail}}
\label{a:geometric-tail}
We compute the moment generating function of the variable
$R_t\sim\operatorname{Geom}(r_t)$, where $r_t=\mathbf frac{t-d}{t}$:
\mathbf mathbf begin{align*}
\mathbb E\mathbf mathbf big[\text{e}^{\theta R_t}\mathbf mathbf big] =
\mathbf frac{r_t}{1-(1-r_t)\text{e}^{\theta}}=
\mathbf frac{\mathbf frac{t-d}{t}}{1-\mathbf frac{d}{t}\,\text{e}^{\theta}} = \mathbf frac{t-d}{t-d\,\text{e}^{\theta}}.
\mathbf end{align*}
Setting $\theta=\mathbf frac{1}{2d}$, we observe that $d\text{e}^{\theta}\leq
d+1$, and so $\mathbb E[\text{e}^{\theta R_t}]\leq
\mathbf frac{t-d}{t-d-1}$. Letting $\mathbf mu=\mathbb E[R]$,
for any $a>1$ using Markov's inequality we have
\mathbf mathbf begin{align*}
\mathbf Pr(R\geq a\mathbf mu)\leq \text{e}^{-a\theta\mathbf mu}\,\mathbb E\mathbf mathbf big[\text{e}^{\theta
R}\mathbf mathbf big]
\leq \text{e}^{-a\theta\mathbf mu}\mathbf mathbf prod_{t=k+1}^m\mathbf frac{t-d}{t-d-1}=
\text{e}^{-a\theta\mathbf mu}\,\mathbf frac{m-d}{k-d}.
\mathbf end{align*}
Note that using the bounds on the harmonic series we can estimate the
mean:
\mathbf mathbf begin{align*}
\mathbf mu &= d\!\!\sum_{t=k-d+1}^{m-d}\mathbf frac{1}{t}\geq d\, (\ln(m-d) -
\ln(k-d)-1)= d\,\ln\mathbf Big(\mathbf frac{m-d}{k-d}\mathbf Big) - d,\\
\text{so}\quad \text{e}^{-a\theta\mathbf mu} &
\leq \text{e}^{a/2}\,\mathbf exp\mathbf mathbf bigg(-\mathbf frac{a}{2}\ln\mathbf Big(\mathbf frac{m-d}{k-d}\mathbf Big)\mathbf mathbf bigg)=
\text{e}^{a/2}\,\mathbf Big(\mathbf frac{m-d}{k-d}\mathbf Big)^{-a/2}.
\mathbf end{align*}
Putting the two inequalities together we obtain the desired tail bound.
\section{Experiments}
\label{sec:experiments}
We present experiments comparing leveraged volume sampling to standard
volume sampling and to leverage score sampling, in terms of the
total square loss suffered by the subsampled least-squares
estimator. The three estimators can be summarized as follows:
\mathbf mathbf begin{align*}
\textit{volume sampling:} \quad\mathbf mathbf w_S^* &= (\mathbf X_S)^+\mathbf mathbf y_S,&\mathbf Pr(S)&\sim
\det(\mathbf X_S^\top\mathbf X_S),
\quad S\in {[n]{n-d\choose s-d}hoose k};\\
\textit{leverage score sampling:}\quad
\mathbf mathbf w_\mathbf mathbf pi^* &=(\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac12}\mathbf X)^+\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac12}\mathbf mathbf y,
&\mathbf Pr(\mathbf mathbf pi) &= \mathbf mathbf prod_{i=1}^k\mathbf frac{l_{\mathbf mathbf pi_i}}{d},\qquad\qquad\mathbf mathbf pi\in[n]^k;\\
\textit{leveraged volume sampling:}\quad
\mathbf mathbf w_\mathbf mathbf pi^* &=(\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac12}\mathbf X)^+\mathbf mathbf Q_\mathbf mathbf pi^{\sfrac12}\mathbf mathbf y,
&\mathbf Pr(\mathbf mathbf pi) &\sim \det(\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)\mathbf mathbf prod_{i=1}^k\mathbf frac{l_{\mathbf mathbf pi_i}}{d} .
\mathbf end{align*}
Both the volume sampling-based estimators are unbiased, however
the leverage score sampling estimator is not. Recall that
$\mathbf mathbf Q_\mathbf mathbf pi=\sum_{i=1}^{|\mathbf mathbf pi|}q_{\mathbf mathbf pi_i}^{-1}\mathbf e_{\mathbf mathbf pi_i}\mathbf e_{\mathbf mathbf pi_i}^\top$ is the selection
and rescaling matrix as defined for
$q$-rescaled volume sampling with $q_i=\mathbf frac{l_i}{d}$. For each
estimator we plotted its average total loss,
i.e., $\mathbf frac{1}{n}\|\mathbf X\mathbf mathbf w-\mathbf mathbf y\|^2$, for a range of sample sizes $k$,
contrasted with the loss of the best least-squares estimator $\mathbf mathbf w^*$
computed from all data.
\mathbf mathbf begin{wrapfigure}{l}{0.45\textwidth}
\mathbf mathbf begin{tabular}{c|c|c}
Dataset & Instances ($n$) & Features ($d$) \\
\hline
\textit{bodyfat} & 252& 14\\
\textit{housing} &506& 13\\
\textit{mg} & 1,385 & 21\\
\textit{abalone} & 4,177 & 36 \\
\textit{cpusmall} & 8,192 &12\\
\textit{cadata} & 20,640&8\\
\textit{MSD} &463,715&90
\mathbf end{tabular}
{n-d\choose s-d}aptionof{table}{Libsvm regression datasets {n-d\choose s-d}ite{libsvm} (to
increase dimensionality of \textit{mg} and \textit{abalone}, we
expanded features to all degree 2 monomials, and removed redundant
ones).}
\label{tab:datasets}
\mathbf end{wrapfigure}
Plots shown in Figures \mathbf mathbf ref{f:lb} and \mathbf mathbf ref{fig:experiments} were
averaged over 100 runs, with shaded area representing standard error
of the mean. We used seven benchmark datasets from the libsvm
repository {n-d\choose s-d}ite{libsvm} (six in this section and one in Section
\mathbf mathbf ref{s:intro}), whose dimensions are given in Table
\mathbf mathbf ref{tab:datasets}. The results confirm that leveraged volume sampling
is as good or better than either of the baselines for any sample size
$k$. We can see that in some of the examples standard volume sampling
exhibits bad behavior for larger sample sizes, as suggested by the
lower bound of Theorem \mathbf mathbf ref{t:lower} (especially noticeable on
\textit{bodyfat} and \textit{cpusmall} datasets). On the other hand, leverage
score sampling exhibits poor performance for small sample sizes due to
the coupon collector problem, which is most noticeable for
\textit{abalone} dataset, where we can see a very sharp transition
after which leverage score sampling becomes effective. Neither of the
variants of volume sampling suffers from this issue.
\mathbf mathbf begin{figure}
\includegraphics[width=0.5\textwidth]{figs/bodyfat_scale}\{1..n\}obreak
\includegraphics[width=0.5\textwidth]{figs/housing_scale}
\includegraphics[width=0.5\textwidth]{figs/mg_exp}\{1..n\}obreak
\includegraphics[width=0.5\textwidth]{figs/abalone_exp}
\includegraphics[width=0.5\textwidth]{figs/cadata}\{1..n\}obreak
\includegraphics[width=0.5\textwidth]{figs/msd}
{n-d\choose s-d}aption{Comparison of loss of the subsampled estimator when
using \textit{leveraged volume sampling} vs using \textit{leverage score sampling} and
standard \textit{volume sampling} on six datasets.}
\label{fig:experiments}
\mathbf end{figure}
\section{Faster algorithm via approximate leverage scores}
\label{sec:fast-alg}
\mathbf mathbf begin{wrapfigure}{r}{0.4\textwidth}
\mathbf mathbf renewcommand{\thealgorithm}{}
\mathbf mathbf vspace{-9mm}
\mathbf mathbf begin{minipage}{0.4\textwidth}
\mathbf floatname{algorithm}{}
\mathbf mathbf begin{algorithm}[H]
{\mathbf fontsize{8}{8}\selectfont
{n-d\choose s-d}aption{\mathbf mathbf bf \small Fast leveraged volume sampling}
\mathbf mathbf begin{algorithmic}[0]
\mathbf STATE \textbf{Input:} $\mathbf X\!\in\!\mathbb R^{n\times d},\, k\geq
d,\,\mathbf epsilon\geq 0$\\[1mm]
\mathbf STATE Compute $\mathbf A = (1\mathbf mathbf pm \mathbf epsilon) \,\mathbf X^\top\mathbf X$
\mathbf STATE Compute $\tilde{l}_i=(1\mathbf mathbf pm \mathbf frac12)\, l_i\quad \mathbf forall_{i\in[n]}$
\mathbf STATE $s \leftarrow \mathbf max\{k,\,8d^2\}$
\mathbf STATE \textbf{repeat}
\mathbf STATE \quad $\mathbf mathbf pi \leftarrow$ empty sequence
\mathbf STATE \quad\textbf{while} $|\mathbf mathbf pi|<s$
\mathbf STATE \quad\quad Sample $i\ \sim\
(\tilde{l}_1,\dots,\tilde{l}_n)$
\mathbf STATE \quad\quad $a\sim
\text{Bernoulli}\mathbf Big((1\!-\!\mathbf epsilon)\mathbf frac{\mathbf mathbf x_i^\top\mathbf A^{-1}\mathbf mathbf x_i}{2\tilde{l}_i}\mathbf Big)$
\mathbf STATE \quad\quad\textbf{if} $a=\text{true}$,\quad\textbf{then}\quad $\mathbf mathbf pi \leftarrow
[\mathbf mathbf pi, i]$
\mathbf STATE \quad\textbf{end}\\[1mm]
\mathbf STATE \quad $\mathbf mathbf Q_\mathbf mathbf pi\leftarrow \sum_{j=1}^sd\,
(\mathbf mathbf x_{\mathbf mathbf pi_j}^\top\mathbf A^{-1}\mathbf mathbf x_{\mathbf mathbf pi_j})^{-1}\mathbf e_{\mathbf mathbf pi_j}\mathbf e_{\mathbf mathbf pi_j}^\top$
\mathbf mathbf vspace{-1mm}
\mathbf STATE \quad Sample $\textit{Acc}\sim
\text{Bernoulli}\mathbf Big(\mathbf frac{\det(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf A)}\mathbf Big)$
\mathbf mathbf vspace{-1mm}
\mathbf STATE \textbf{until} $\textit{Acc}=\text{true}$
\mathbf STATE $S\leftarrow$ VolumeSample$\mathbf mathbf big((\mathbf mathbf Q_{[1..n]}^{\sfrac{1}{2}}\mathbf X)_\mathbf mathbf pi,k\mathbf mathbf big)$
\mathbb RETURN $\mathbf mathbf pi_S$
\mathbf end{algorithmic}
}
\mathbf end{algorithm}
\mathbf end{minipage}
\mathbf mathbf vspace{-5mm}
\mathbf end{wrapfigure}
In some settings, the primary computational cost of deploying leveraged volume
sampling is the preprocessing cost of computing exact laverage
scores for matrix $\mathbf X\in\mathbb R^{n\times d}$, which takes $O(nd^2)$. There
is a large body of work dedicated to fast estimation of leverage
scores (see, e.g., {n-d\choose s-d}ite{fast-leverage-scores,randomized-matrix-algorithms}),
and in this section we examine how these approaches can be
utilized to make leveraged volume sampling more efficient. The key
challenge here is to show that the determinantal rejection sampling
step remains effective when distribution $q$ consists of approximate
leverage scores. Our strategy, which is described in the algorithm
\textit{fast leveraged volume sampling}, will be to compute an
approximate covariance matrix $\mathbf A=(1\mathbf mathbf pm\mathbf epsilon)\mathbf X^\top\mathbf X$ and use it
to compute the rescaling distribution $q_i\sim
\mathbf mathbf x_i^\top\mathbf A^{-1}\mathbf mathbf x_i$. As we see in the lemma below, for sufficiently small
$\mathbf epsilon$, this rescaling still retains the runtime guarantee of
determinantal rejection sampling from Theorem \mathbf mathbf ref{t:algorithm}.\\
\mathbf mathbf begin{lemma}\label{l:fast-rejection}
Let $\mathbf X\in\mathbb R^{n\times d}$ be a full rank matrix, and suppose that matrix $\mathbf A\in\mathbb R^{d\times d}$ satisfies
\mathbf mathbf begin{align*}
(1-\mathbf epsilon)\,\mathbf X^\top\mathbf X\mathbf mathbf preceq \mathbf A\mathbf mathbf preceq
(1+\mathbf epsilon)\,\mathbf X^\top\mathbf X,\quad \text{where}\quad \mathbf frac{\mathbf epsilon}{1-\mathbf epsilon}\leq\mathbf frac{1}{16d}.
\mathbf end{align*}
Let $\mathbf mathbf pi_1,\dots,\mathbf mathbf pi_s$ be sampled i.i.d.
$\sim(\hat{l}_1,\dots,\hat{l}_n)$, where
$\hat{l}_i=\mathbf mathbf x_i^\top\mathbf A^{-1}\mathbf mathbf x_i$. If $s\geq 8d^2$, then
\mathbf mathbf begin{align*}
\text{for}\quad\mathbf mathbf Q_\mathbf mathbf pi=\sum_{j=1}^s\mathbf frac{d}{\hat{l}_{\mathbf mathbf pi_j}}\mathbf e_{\mathbf mathbf pi_j}\mathbf e_{\mathbf mathbf pi_j}^\top,\qquad
\mathbf frac{\det(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf A)}\leq 1\quad \text{and}\quad\mathbb E\mathbf mathbf bigg[\mathbf frac{\det(\mathbf frac{1}{s}\mathbf X^\top\mathbf mathbf Q_\mathbf mathbf pi\mathbf X)}{\det(\mathbf A)}\mathbf mathbf bigg]\geq \mathbf frac{3}{4}.
\mathbf end{align*}
\mathbf end{lemma}
Proof of Lemma \mathbf mathbf ref{l:fast-rejection} follows along the same lines as
the proof of Theorem \mathbf mathbf ref{t:algorithm}. We can compute matrix $\mathbf A^{-1}$
efficiently in time $\mathbf mathbf widetilde{O}(nd + d^3/\mathbf epsilon^2)$ using a sketching
technique called Fast Johnson-Lindenstraus Transform~{n-d\choose s-d}ite{ailon2009fast}, as described in
{n-d\choose s-d}ite{fast-leverage-scores}. However, the cost of computing the entire
rescaling distribution is still $O(nd^2)$. Standard techniques
circumvent this issue by performing a second matrix sketch. We cannot
afford to do that while at the same time preserving the sufficient
quality of leverage score estimates needed for leveraged volume
sampling. Instead, we first compute weak estimates
$\tilde{l}_i=(1\mathbf mathbf pm\mathbf frac{1}{2})l_i$ in time $\mathbf mathbf widetilde{O}(nd+d^3)$ as
in {n-d\choose s-d}ite{fast-leverage-scores}, then use rejection sampling to sample
from the more accurate leverage score distribution, and finally compute the
correct rescaling coefficients just for the obtained sample. Note that
having produced matrix $\mathbf A^{-1}$, computing a single leverage score
estimate $\hat{l}_i$ takes $O(d^2)$. The proposed algorithm with high
probability only has to compute $O(s)$ such estimates, which
introduces an additional cost of $O(sd^2) = O((k+d^2)\,d^2)$. Thus, as long as
$k=O(d^3)$, dominant cost of the overall procedure still comes from
the estimation of matrix $\mathbf A$, which
takes $\mathbf mathbf widetilde{O}(nd+d^5)$ when $\mathbf epsilon$ is chosen as in Lemma
\mathbf mathbf ref{l:fast-rejection}.
It is worth noting that \textit{fast leveraged volume sampling} is
a valid $q$-rescaled volume sampling distribution (and not an
approximation of one), so the least-squares estimators it produces
are exactly unbiased. Moreover, proofs of Theorems \mathbf mathbf ref{t:multiplication} and
\mathbf mathbf ref{t:spectral} can be straightforwardly extended to the setting
where $q$ is constructed from approximate leverage scores, so our loss
bounds also hold in this case.
\mathbf end{document} |
\begin{document}
\publicationdetails{21}{2019}{1}{4}{4949}
\title{On Weakly Distinguishing Graph Polynomials}
\begin{abstract}
A univariate graph polynomial \begin{math} P(G;X) \end{math} is weakly distinguishing if for almost all finite graphs
\begin{math}G\end{math} there is a finite graph \begin{math}H\end{math} with \begin{math}P(G;X)=P(H;X)\end{math}.
We show that the clique polynomial and the independence
polynomial are weakly distinguishing. Furthermore,
we show that generating functions of induced subgraphs with property \begin{math}C\end{math} are weakly distinguishing
provided that \begin{math}C\end{math} is of bounded degeneracy or treewidth. The same holds for the harmonious chromatic polynomial.
\end{abstract}
\section{Introduction and Outline}
Throughout this paper we consider only simple (i.e. finite, undirected loopless graphs without parallel edges), vertex labelled graphs.
Let \begin{math}P\end{math} be a graph polynomial.
A graph \begin{math}G\end{math} is {\em \begin{math}P\end{math}-unique} if every graph \begin{math}H\end{math} with \begin{math}P(G;X)=P(H;X)\end{math} is isomorphic to \begin{math}G\end{math}.
A graph {\em \begin{math}H\end{math} is a \begin{math}P\end{math}-mate} of \begin{math}G\end{math}
if \begin{math}P(G;X)=P(H;X)\end{math} but \begin{math}H\end{math} is not isomorphic to \begin{math}G\end{math}.
In \cite{noy2003graphs} \begin{math}P\end{math}-unique graphs are studied for the Tutte polynomial \begin{math}T(G;X,Y)\end{math},
the chromatic polynomial \begin{math}\chi(G;X)\end{math}, the matching polynomial \begin{math}m(G;X)\end{math} and the
characteristic polynomial \begin{math}char(P;X)\end{math}.
A statement holds for almost all graphs if the proportion of graphs of order \begin{math}n\end{math}
for which it holds, tends to \begin{math}1\end{math}, when
\begin{math}n\end{math} tends to infinity.
A graph polynomial \begin{math}P\end{math} is
{\em almost complete} if almost all graphs \begin{math}G\end{math} are \begin{math}P\end{math}-unique, and it is
{\em weakly distinguishing} if almost all graphs \begin{math}G\end{math} have a \begin{math}P\end{math}-mate.
In \cite{bollobas2000contraction} it is conjectured that almost all graphs are \begin{math}\chi\end{math}-unique and \begin{math}T\end{math}-unique,
in other words, both \begin{math}\chi(G;X)\end{math} and \begin{math}T(G;X,Y)\end{math} are almost complete.
There are plenty of trivial graph polynomials which are weakly distinguishing, like
\begin{math}X^{|V(G)|}\end{math} or \begin{math}X^{|E(G)|}\end{math}. However, one might expect that the prominent graph polynomials
from the literature are not weakly distinguishing.
Here we show that various non-trivial graph polynomials are still weakly distinguishing.
The degree polynomial \begin{math}Deg(G;x)\end{math} of a graph \begin{math}G\end{math}
is the generating function of the degree sequence of $G$.
A graph \begin{math}G\end{math} is \begin{math}Deg\end{math}-unique, also called in the literature
a {\em unigraph},
if it is determined by its degree sequence.
An updated discussion on how to
recognize unigraphs can be found in \cite{borri2009recognition}.
A simple counting argument gives:
\begin{theorem}
\label{theorem deg}
Almost all graphs \begin{math}G\end{math} have a \begin{math}Deg\end{math}-mate.
\end{theorem}
The {\em Independence and Clique polynomials} of a graph \begin{math}G=(V(G),E(G))\end{math} contain much information about $G$.
Both were first studied in \cite{hoede1994clique}. For a more recent survey on the independence polynomial see \cite{levit2005independence}.
\begin{theorem}
\label{theorem clique ind}
The independence and clique polynomials are weakly distinguishing.
\end{theorem}
The proof uses estimates for the independence number \begin{math}\alpha(G)\end{math} and the clique number \begin{math}\omega(G)\end{math}
for random graphs, (see \cite{bollobas1976cliques} and \cite{frieze1990independence}) together with a counting argument.\\
This theorem can be generalized:
\begin{definition}
\label{definition ind function}
Let \begin{math}\mathcal{C}\end{math} be a graph property.
We say that a function \begin{math}f:\mathbb{N} \rightarrow \mathbb{N}\end{math} is an
{\em independence (clique) function for \begin{math}\mathcal{C}\end{math}}
if for every graph \begin{math}G\in \mathcal{C}\end{math}, the graph \begin{math}G\end{math}
has an independent set (clique) of size \begin{math}f(|V(G)|)\end{math}.
\end{definition}
Denote by \begin{math}\hat{\mathcal{C}}\end{math} the class of complement graphs \begin{math}\bar{G}\end{math} of graphs
\begin{math}G \in \mathcal{C}\end{math}, and \begin{math}P_{\mathcal{C}}(G;X) = \sum_{A \subset V(G): G[A] \in \mathcal{C}} X^{|A|}\end{math}, and
\begin{math}P_{\hat{\mathcal{C}}}(G;X) = \sum_{A \subset V(G): G[A] \in \hat{\mathcal{C}}} X^{|A|}\end{math}.
\begin{theorem}
\label{theorem classes}
Let \begin{math}Q\end{math} be a graph property that has an independence or a clique function
\begin{math}f\end{math} that satisfies that for all \begin{math}n\in \mathbb{N}\end{math} ,
\begin{math}f(n)\geq n/a\end{math} for some fixed \begin{math}a\in \mathbb{N}\end{math} . Then \begin{math}P_Q\end{math} is weakly distinguishing.
\end{theorem}
This applies to the following cases:
\begin{itemize}
\item A graph \begin{math}G\end{math} is \begin{math}k\end{math}-degenerate if every induced subgraph of \begin{math}G\end{math} has a vertex of degree at most \begin{math}k\end{math}.
It is easy to see that
every \begin{math}k\end{math}-degenerate graph \begin{math}G\end{math} of order \begin{math}n\end{math} has an independent set of size \begin{math}{\left\lceil \frac{n}{k+1} \right\rceil}\end{math}.
\item Among the \begin{math}k\end{math}-degenerate graphs we find the graphs of treewidth at most \begin{math}k\end{math}, graphs of degree at most \begin{math}k\end{math},
and planar graphs.
\item A \begin{math}k\end{math}-colourable graphs \begin{math}G\end{math} has an independent set of size at least \begin{math}{\left\lceil \frac{n}{k} \right\rceil}\end{math}.
\item Let \begin{math}\mathcal{C}\end{math} be a graph property. A function \begin{math}\gamma: V(G) \rightarrow [k]\end{math} is a {\em \begin{math}\mathcal{C}\end{math}-colouring} if every
color class induces a graph in \begin{math}\mathcal{C}\end{math}.
Such coloring were studied in \cite{Gutman83Generalizations}.
If we assume that \begin{math}\mathcal{C}\end{math} has an independence (clique) function \begin{math}g(n)\end{math}, then
the graphs which are \begin{math}\mathcal{C}\end{math}-colorable with at most \begin{math}k\end{math} colors have an independence (clique) function
\begin{math}f(n) = {\left\lceil \frac{g(n)}{k} \right\rceil}\end{math}.
\end{itemize}
Therefore, for \begin{math}\mathcal{C}\end{math} one of the properties above, the graph polynomials
\begin{math}P_{\mathcal{C}}(G;X)\end{math}
are all weakly distinguishing.
A harmonious colouring of \begin{math}G\end{math} with at most \begin{math}k\end{math} colors is a proper colouring of \begin{math}G\end{math} such that every pair of colors
occurs at most once along an edge.
Let \begin{math}\chi_{harm}(G;k)\end{math} count the number of harmonious colourings of \begin{math}G\end{math}.
It was observed in \cite{makowsky2006polynomial} that \begin{math}\chi_{harm}(G;k)\end{math} is a polynomial in \begin{math}k\end{math}.
\begin{theorem}
\label{theorem harmonious}
Almost all graphs \begin{math}G\end{math} have a \begin{math}\chi_{harm}\end{math}-mate.
\end{theorem}
The status of \begin{math}P\end{math}-uniqueness remains open for \begin{math}T(G;X,Y), \chi(G;X), m(G;X)\end{math} and \begin{math}char(G;X)\end{math}.
\section{Preliminaries}
Let \begin{math}G=(V,E)\end{math} be a graph.
Denote by \begin{math}\mathcal{G}(n)\end{math} the set of all non-isomorphic graphs with \begin{math}n\end{math} vertices,
and by \begin{math}\mathcal{G}\end{math} the set of all non-isomorphic graphs.
\begin{fact}[\cite{harary2014graphical}]
\begin{math}|\mathcal{G}(n)|\approx \frac{2^{{n \choose 2}}}{n!} \end{math} for a sufficiently large \begin{math}n\end{math}.
\end{fact}
Let \begin{math}P:\mathcal{G} \rightarrow \mathbb{Z}[x]\end{math} be a graph polynomial.
For a graph \begin{math}G\end{math}, we say two non-isomorphic graphs \begin{math}G\end{math} and \begin{math}H\end{math} are called \begin{math}P\end{math}-mates if \begin{math}P(G)=P(H)\end{math}. \\
Denote by \begin{math}U_P(n)\end{math} the set of \begin{math}P\end{math} unique graphs with \begin{math}n\end{math} vertices,
by \begin{math}\beta_P(n)\end{math} the number of polynomials in \begin{math}\mathbb{Z}[x]\end{math}
such that there is a graph of order less or equal to-\begin{math}n\end{math} that maps to that polynomial,
and by \begin{math}\beta_{P,\mathcal{C}}(n)\end{math} the number of polynomials in \begin{math}\mathbb{Z}[x]\end{math}
such that there is a graph of order less or equal to-\begin{math}n\end{math} in \begin{math}\mathcal{C}\end{math} that maps to that polynomial.\\
We denote by \begin{math}K_n\end{math} the clique of size \begin{math}n\end{math},
and by \begin{math}I_n\end{math} the edgeless graph of size \begin{math}n\end{math}.\\
Let \begin{math}G\end{math} be a graph, and \begin{math}A\subseteq V(G)\end{math}. The induced subgraph of \begin{math}A\end{math} in \begin{math}G\end{math},
denoted \begin{math}G[A]\end{math}, is the graph with vertex set \begin{math}A\end{math}, and for \begin{math}v,u \in A\end{math}, \begin{math}(u,v)\in E(G[A])\end{math} iff \begin{math}(u,v) \in E(G)\end{math}.
\begin{definition}
For a graph polynomial \begin{math}P\end{math}, we say \begin{math}P\end{math} is weakly distinguishing if
\begin{math}lim_{n \rightarrow \infty} \frac{|U_P(n)|}{|\mathcal{G}(n)|}=0\end{math}.
For a family of graphs \begin{math}\mathcal{C}\end{math} we say that \begin{math}P\end{math} is weakly distinguishing on
\begin{math}\mathcal{C}\end{math} if \begin{math}lim_{n \rightarrow \infty} \frac{|U_P(n)\cap \mathcal{C}|}{|\mathcal{G}(n) \cap \mathcal{C}|}=0\end{math}
\end{definition}
We wish to consider a particular type of graph polynomials:
\begin{definition}
Let \begin{math}Q\end{math} be a graph property. For all graphs \begin{math}G\end{math}, define \begin{math}P_Q(G;x)=\sum_{A \subset V(G): G[A] \in Q} X^{|A|}\end{math}.
\end{definition}
\section{The Degree Polynomial}
\begin{definition}
For a graph \begin{math}G=(V,E)\end{math} of order \begin{math}n\end{math} and \begin{math}v \in V\end{math}, denote by \begin{math}deg(v)\end{math} the degree of \begin{math}v\end{math}.
Define the Degree polynomial of \begin{math}G\end{math} to be \begin{math}Deg(G,x)=\sum_{v\in V} x^{deg(v)}\end{math}.
\end{definition}
Note that the degree of a vertex is bounded above by \begin{math}n-1\end{math},
so the degree of the polynomial \begin{math}Deg(G,x)\end{math} is at most \begin{math}n-1\end{math}. For every \begin{math}0\leq i\leq n-1\end{math},
the coefficient of \begin{math}x^i\end{math} in \begin{math}Deg(G,x)\end{math} is an integer number between \begin{math}0\end{math} and \begin{math}n\end{math}. Thus, we get
\begin{displaymath}\beta_{Deg}(n)\leq (n+1)^{n-1}\leq (n+1)^n\end{displaymath}
Now we are ready to prove theorem \ref{theorem deg}:\\
{\bf Theorem \ref{theorem deg}}: Almost all graphs \begin{math}G\end{math} have a \begin{math}Dg\end{math}-mate.\\
\begin{proof}
Let \begin{math}G=(V,E)\end{math} be a graph with \begin{math}|V(G)|=n\end{math}.
We now evaluate:
\begin{math}\\ \\
\begin{aligned}
\lim_{n\rightarrow \infty} \dfrac{U_{Deg}(n)}{|\mathcal{G}(n)|}\leq \lim_{n\rightarrow \infty}
\dfrac{\beta_{Deg}(n)}{|\mathcal{G}(n)|}\leq \lim_{n\rightarrow \infty} \dfrac{n^n}{|\mathcal{G}(n)|} =
\lim_{n\rightarrow \infty} \dfrac{(n+1)^n n!}{2^{n(n-1)/2}}
\\ \leq \lim_{n\rightarrow \infty} \dfrac{(n+1)^n \cdot (n+1)^n}{2^{n(n-1)/2}}= \lim_{n\rightarrow \infty} \dfrac{(n+1)^{2n}}{2^{n(n-1)/2}}=0
\end{aligned}\\ \\
\end{math}
\end{proof}
\section{A General Method for Proving Graph Polynomials are Weakly Distinguishing}
We wish to apply the same idea used in proving the degree polynomial is weakly distinguishing
to a large class of graph polynomials. We start with some lemmas.
First, we show that if a graph polynomial \begin{math}P\end{math} is weakly distinguishing on a
large subset of \begin{math}\mathcal{G}\end{math}, it is weakly distinguishing:
\begin{lemma}
\label{lemma 1}
Let \begin{math}P\end{math} be a graph polynomial and \begin{math}\mathcal{C}\end{math} a family of graphs such that \begin{math}\lim_{n\rightarrow \infty} |\mathcal{C}(n)|/|\mathcal{G}(n)| =1\end{math}.
If \begin{math}\lim_{n\rightarrow \infty} |U_P(n) \cap \mathcal{C}|/|\mathcal{G}(n)|=0\end{math} then \begin{math}P\end{math} is weakly distinguishing.
\end{lemma}
\begin{proof}
\begin{displaymath}
\dfrac{|U_P(n)|}{|\mathcal{G}(n)|}=
\dfrac{|U_P(n)\cap \mathcal{C}|+|U_P(n)\cap (\mathcal{G}(n)-\mathcal{C})|}{|\mathcal{G}(n)|}=
\dfrac{|U_P(n)\cap \mathcal{C}|}{|\mathcal{G}(n)|}+\dfrac{|U_P(n)\cap (\mathcal{G}(n)-\mathcal{C})|}{|\mathcal{G}(n)|}
\end{displaymath}
When taking the limit, note that the left term in the sum converges to 0 by assumption, so it remains to evaluate:
\begin{displaymath}
\lim_{n \rightarrow \infty} \dfrac{|U_P(n)\cap (\mathcal{G}-\mathcal{C})|}{|\mathcal{G}(n)|} \leq
\lim_{n \rightarrow \infty} \dfrac{|\mathcal{G}(n)-\mathcal{C}|}{|\mathcal{G}(n)|}=0
\end{displaymath}
\end{proof}
\begin{lemma}
\label{lemma 2}
Let \begin{math}f:\mathbb{N}\rightarrow \mathbb{R}\end{math}. If \begin{math}f(n)\leq (\log n)^{O(1)}\end{math} , then asymptotically
\begin{displaymath}{n \choose f(n)}^{f(n)} \leq (\dfrac{n}{f(n)})^{f(n)+nf(n)}\dfrac{1}{(2 \pi)^{f(n)/2}\cdot n^{f(n)/2}}\end{displaymath}
\end{lemma}
\begin{proof}
By applying the Stirling approximation \begin{math}k!=\sqrt{2 \pi k }(\frac{k}{e})^k\end{math} we evaluate:
\begin{math}\\
\begin{aligned}
{n \choose f(n)}^{f(n)}=(\dfrac{n!}{f(n)!(n-f(n))!})^{f(n)} \\ \\
\approx (\dfrac{\sqrt{2 \pi n}(n/e)^n}{\sqrt{2 \pi f(n)}(f(n)/e)^{f(n)}
\sqrt{2 \pi (n-f(n))}((n-f(n))/e)^{(n-f(n))}})^{f(n)} \\ \\
=(\dfrac{n^{n}}{(f(n))^{f(n)}(n-f(n)^{n-f(n)}} \cdot \dfrac{\sqrt{n}}{\sqrt{2 \pi f(n) (n-f(n)}})^{f(n)}
\\ \\ \leq (\dfrac{n}{2 \pi f(n)\cdot f(n)})^{f(n)/2}\cdot \dfrac{n^{nf(n)}}{f(n)^{nf(n)}}
\end{aligned}\\\\
\end{math}
where the inequality is due to \begin{math}f(n) \leq n-f(n)\end{math} for a sufficiently large \begin{math}n\end{math} .
\begin{math}\\\\
\begin{aligned}
(\dfrac{n}{2 \pi f(n)\cdot f(n)})^{f(n)/2}\cdot \dfrac{n^{nf(n)}}{f(n)^{nf(n)}}=
(\dfrac{n}{f(n)})^{f(n)+nf(n)}\dfrac{1}{(2 \pi)^{f(n)/2}\cdot n^{f(n)/2}}
\end{aligned}\\\\
\end{math}
\end{proof}
Our main tool for proving graph polynomials are weakly distinguishing is theorem \ref{theorem classes}, which provides a sufficient condition for a polynomial \begin{math}P_Q\end{math} to be weakly distinguishing. This condition is given in terms of independence and clique functions (see definition \ref{definition ind function}). We will prove theorem \ref{theorem classes} using the following theorems:
\begin{theorem}[Frieze \cite{frieze1990independence}]
\label{theorem frieze independent}
For a graph \begin{math}G\end{math} , denote by \begin{math}\alpha(G)\end{math} the size of the largest independent set of vertices in \begin{math}G\end{math} .
Then for almost all graphs of order \begin{math}n\end{math} , \begin{math}\alpha(G) \approx 4 \log \frac{n}{2}\end{math}
\end{theorem}
\begin{theorem}[Erd\"os and Bollob\'as \cite{bollobas1976cliques}]
\label{theorem bollobas cliques}
For almost all graphs \begin{math}G\end{math} of order \begin{math}n\end{math}, \begin{math}\omega(G) \approx \frac{2}{\log 2}\cdot \log n\end{math}
\end{theorem}
We are now ready to prove theorem \ref{theorem classes}:\\
{\bf Theorem \ref{theorem classes}:} Let \begin{math}Q\end{math} be a graph property that has an independence or a clique function
\begin{math}f\end{math} that satisfies that for all \begin{math}n\in \mathbb{N}\end{math} ,
\begin{math}f(n)\geq n/a\end{math} for some fixed \begin{math}a\in \mathbb{N}\end{math} . Then \begin{math}P_Q\end{math} is weakly distinguishing.\\
\begin{proof}
Assume \begin{math}f\end{math} is an independence function.
Set \begin{math}\epsilon=1/10\end{math} and let \begin{math}\mathcal{C}=\{G:\alpha(G)\leq 4 \log \frac{n}{2}+\epsilon\}\end{math} .
By theorem \ref{theorem frieze independent}, almost all graphs are in \begin{math}\mathcal{C}\end{math}.
Note that if \begin{math}G\in\mathcal{C}\end{math} , and \begin{math}H\end{math} is an induced subgraph of \begin{math}G\end{math} with \begin{math}H\in Q\end{math},
then there is an independent set of size \begin{math}\frac{|V(H)|}{a}\end{math} in \begin{math}H\end{math}, and hence in \begin{math}G\end{math},
and so \begin{math}|V(H)|\leq 4 \log \frac{n}{2}+\epsilon\end{math}.\\
This implies that
\begin{math}P_Q(G,x)=\sum_{k=1}^{4 \log \frac{|V(G)|}{2}+\epsilon} b_kx^k\end{math}
with \begin{math}0\leq b_k \leq {n \choose k}\end{math} for all \begin{math}k\end{math}, and so
\begin{displaymath}\beta_{P_Q,\mathcal{C}}(n)\leq {n \choose 4 \log \frac{|V(G)|}{2}+\epsilon}^{4 \log \frac{|V(G)|}{2}+\epsilon}\end{displaymath}
hence by lemmas \ref{lemma 1} and \ref{lemma 2}, \begin{math}P_Q\end{math} is weakly distinguishing.\\
If \begin{math}f\end{math} is a clique function, the proof is similar using theorem \ref{theorem bollobas cliques}.
\end{proof}
\section{Applications of the Method}
\subsection{The Clique and Independence Polynomials}
\begin{definition}
Let \begin{math}G\end{math} be a graph. For \begin{math}i\in \mathbb{N} \end{math}, denote \begin{math}c_i(G)=|\{A\subseteq V(G):G[A]\cong K_i\}|\end{math}.
The clique polynomial of \begin{math}G\end{math}, \begin{math}Cl(G,x)\end{math} is defined to be \begin{math}Cl(G,x)=1+\sum_{i=1}^\infty c_i(G)x^i\end{math}.
Note that this is a graph polynomial, and that the sum in the definition is finite.
The clique number of \begin{math}G\end{math}, denoted \begin{math}\omega(G)\end{math}, is the degree of the clique polynomial
(i.e. this is the size of the largest clique subgraph of \begin{math}G\end{math}).
\end{definition}
\begin{definition}
Let \begin{math}G\end{math} be a graph. For \begin{math}i\in \mathbb{N}\end{math}, denote \begin{math}s_i(G)=|\{A\subseteq V(G):G[A]\cong I_i\}|\end{math}.
The independence polynomial of \begin{math}G\end{math}, \begin{math}Ind(G,x)\end{math} is defined to be \begin{math}Ind(G,x)=1+\sum_{i=1}^\infty s_i(G)x^i\end{math}.
Note that this is a graph polynomial, and that the sum in the definition is finite.
The independence number of \begin{math}G\end{math}, denoted \begin{math}\alpha(G)\end{math}, is the degree of the independence polynomial
(i.e. this is the size of the largest independent set in \begin{math}G\end{math}).
\end{definition}
Theorem \ref{theorem clique ind} is now a direct corollary of theorem \ref{theorem classes}:\\
{\bf Theorem \ref{theorem clique ind}}: Almost all graphs \begin{math}G\end{math} have an \begin{math}Ind\end{math}-mate and a \begin{math}Cl\end{math}-mate.\\
\begin{proof}
For the independence polynomial, note that \begin{math}Ind(G,x)=P_Q(G;x)\end{math}, were \begin{math}Q\end{math} is the property consisting of edgeless graphs. Note that the identity function on \begin{math}\mathbb{N} \end{math} is an independence function for \begin{math}Q\end{math}, and clearly it satisfies the condition in theorem \ref{theorem classes} for \begin{math}a=1\end{math}. Hence the independence polynomial is weakly distinguishing.\\
Similarly, for the clique polynomial note that \begin{math}Cl(G,x)=P_Q(G;x)\end{math}, were \begin{math}Q\end{math} is the property of complete graphs. Note that the identity function on \begin{math}\mathbb{N} \end{math} is a clique function for \begin{math}Q\end{math}, and clearly it satisfies the condition in theorem \ref{theorem classes} for \begin{math}a=1\end{math}. Hence the clique polynomial is weakly distinguishing.
\end{proof}
\subsection{Generating Functions}
Theorem \ref{theorem classes} can be applied to many graph classes to produce weakly distinguishing graph polynomials.
Of particular interest are \begin{math}k\end{math}-degenerate classes and amongst them classes of bounded treewidth.
For a graph \begin{math}G\end{math}, and \begin{math}v\in V(G)\end{math} denote by
\begin{math}N_G(v)\end{math} the closed neighbourhood of \begin{math}v\end{math} in \begin{math}G\end{math}, i.e.
\begin{math}N_G(v)=\{v\}\cup \{u\in V(G): \{v,u\}\in E(G)\}\end{math}
\begin{definition}
For \begin{math}k\in \mathbb{N}\end{math}, a graph \begin{math}G\end{math} is said to be \begin{math}k\end{math}-degenerate if every induced subgraph of \begin{math}G\end{math} has a vertex of degree at most \begin{math}k\end{math}.
\end{definition}
The following propositions \ref{theorem 10}, \ref{theorem 12} and lemma \ref{lemma 11} are well known results about degenerate graphs and treewidth. For completeness, we include their proofs:
\begin{proposition}
\label{theorem 10}
A graph \begin{math}G=(V,E)\end{math} is \begin{math}k\end{math} degenerate if and only if there is a enumeration \begin{math}\{v_1,v_2,...,v_n\}=V\end{math} such that for all \begin{math}1\leq i \leq n \end{math}the degree of \begin{math}v_i\end{math}
in the subgraph of \begin{math}G\end{math} induced by \begin{math}V-\{v_1,v_2,...,v_{i-1}\}\end{math} is at most \begin{math}k\end{math}.
\end{proposition}
\begin{proof}
Let \begin{math}G=(V,E)\end{math} be a \begin{math}k\end{math} degenerate graph. From the definition, there is a vertex \begin{math}v\in V\end{math} with degree at most \begin{math}k\end{math}. Denote this vertex by \begin{math}v_1\end{math}.
Define \begin{math}v_i\end{math} inductively: from the definition, the subgraph induced by \begin{math}V-\{v_1,...,v_{i-1}\}\end{math} has a vertex with degree at most \begin{math}k\end{math}.
Define \begin{math}v_i\end{math} to be this vertex. Clearly, the enumeration \begin{math}\{v_1,...,v_n\}=V\end{math} has the desired property.\\
Conversely, let \begin{math}\{v_1,...,v_n\}\end{math} an enumeration as in the theorem, and let \begin{math}H\end{math} be a subgraph of \begin{math}G\end{math} induced by \begin{math}U\subseteq V\end{math}.
Denote \begin{math}u=v_i\end{math} the vertex in \begin{math}H\end{math} who's index in the enumeration is the smallest. Note that the degree of \begin{math}u\end{math} in \begin{math}G[U \cup \{v_j|j \geq i\}]\end{math}
is at most \begin{math}k\end{math}, and \begin{math}H\end{math} is a subgraph of \begin{math}G[U \cup \{v_j|j \geq i\}]\end{math}, hence the degree of \begin{math}u\end{math} in \begin{math}H\end{math} is at most \begin{math}k\end{math}.
So \begin{math}G\end{math} is \begin{math}k\end{math} degenerate, as required.
\end{proof}
\begin{lemma}
\label{lemma 11}
A graph with treewidth at most \begin{math}k\end{math} has a vertex with degree at most \begin{math}k\end{math}.
\end{lemma}
\begin{proof}
Let \begin{math}G\end{math} be a graph, and \begin{math}(T,X)\end{math} a tree decomposition of \begin{math}G\end{math} with width \begin{math}k\end{math}.
Note that \begin{math}T\end{math} has a leaf, and there is a vertex \begin{math}v\end{math} in the bag corresponding to this
leaf that is not in the bag corresponding to its neighbour.
Thus every neighbour of \begin{math}v\end{math} in \begin{math}G\end{math} is in the same bag. But the bag is of size at most \begin{math}k+1\end{math}, so \begin{math}v\end{math} is of degree at most \begin{math}k\end{math}.
\end{proof}
\begin{proposition}
\label{theorem 12}
A graph \begin{math}G\end{math} with treewidth at most \begin{math}k\end{math} is \begin{math}k\end{math} degenerate.
\end{proposition}
\begin{proof}
Let \begin{math}H\end{math} be an induced subgraph of \begin{math}G\end{math}. If \begin{math}H=G\end{math}, then \begin{math}H\end{math} has a vertex of degree at most \begin{math}k\end{math} by the previous lemma.
If \begin{math}H\end{math} is a proper subgraph, note that \begin{math}H\end{math} has treewidth at most \begin{math}k\end{math}, so \begin{math}H\end{math} has a vertex of degree at most \begin{math}k\end{math}. So \begin{math}G\end{math} is \begin{math}k\end{math} degenerate.
\end{proof}\\
The following proposition shows that a \begin{math}k\end{math} degenerate graph has a large independent set:
\begin{proposition}
Every \begin{math}k\end{math} degenerate graph \begin{math}G\end{math} has an independent set of size \begin{math}{\left\lceil \frac{|V|}{k+1} \right\rceil}\end{math}.
\end{proposition}
\begin{proof}
Let \begin{math}G=(V,E)\end{math} be a \begin{math}k\end{math} degenerate graph, and \begin{math}\{v_1,...,v_n\}\end{math} be an enumeration as in proposition \ref{theorem 10}. Let $I_0=\emptyset$, and $H_0=G$. We construct an independent set inductively as follows. There exists $1\leq l \leq n$ and an increasing sequence $i_1,i_2,...,i_l$ in $\{1,2,...,n\}$ with $i_1=1$ such that for all $1\leq j \leq l$
\begin{align*}
I_j=I_{j-1}\cup \{v_{i_j}\}\\
H_j=G[V(H_{j-1})-N_G(v_j)]
\end{align*}
with $I_l$ an independent set in $G$ and $l\geq {\left\lceil \frac{n}{k+1} \right\rceil}$.
Indeed, $I_1=\{v_1\}$ and $H_1=G[V(G)-N_G(v_1)]$. Clearly, $I_1$ is an independent set, and note that $|V(H_1)|\geq n-(k+1)$ and no vertex in $H_1$ is a neighbour of the vertex in $I_1$. Now, given an independent set $I_j$ and an induced subgraph $H_j$ of $G$ such that $|V(H_j)|\geq n-j(k+1)$ and no vertex of $H_j$ is a neighbour of a vertex in $I_j$, select $v_{i_{j+1}}\in V(H_j)$ with minimal index. Now $I_{j+1}$ is an independent set, no vertex of $H_{j+1}$ is a neighbour of a vertex in $I_{j+1}$, and since $deg_{H_j}(v_{i_{j+1}})\leq k$,\\ $|V(H_{j+1})|\geq |V(H_j)|-(k+1)\geq n-(j+1)(k+1)$.\\
The induction stops when no more vertices can be selected, i.e. when $V(H_l)=\emptyset$. From the induction, we have that $0=|V(H_l)|\geq n-l(k+1)$ and hence $l\geq{\left\lceil \frac{n}{k+1} \right\rceil}$ as required.
\end{proof}\\
Combining this proposition with theorem \ref{theorem classes}, we can show that many non trivial graph polynomials are weakly distinguishing:
\begin{corollary}
Fix \begin{math}k\in \mathbb{N}\end{math}, and let \begin{math}Q\end{math} be a class of \begin{math}k\end{math} degenerate graphs. Then \begin{math}P_Q\end{math} is weakly distinguishing.
\end{corollary}
\begin{corollary}
Fix \begin{math}k\in \mathbb{N}\end{math}, and let \begin{math}Q\end{math} be a class of graphs with treewidth at most \begin{math}k\end{math}. Then \begin{math}P_Q\end{math} is weakly distinguishing.
\end{corollary}
\section{The Harmonious and k-Harmonious Polynomials}
\begin{definition}
For a graph \begin{math}G\end{math}, a harmonious colouring in \begin{math}k\end{math} colours is a function \begin{math}f:V(G) \rightarrow [k]\end{math}
such that \begin{math}f\end{math} is a proper colouring, and for all \begin{math}i,j\in [k]\end{math}, \begin{math}G[f^{-1}(i)\cup f^{-1}(j)]\end{math} has at most one edge.
Denote by \begin{math}\chi_{harm}(G,\lambda)\end{math} the number of \begin{math}\lambda\end{math} harmonious colourings of \begin{math}G\end{math}.
Then \begin{math}\chi_{harm}\end{math} is a polynomial in \begin{math}\lambda\end{math}, as shown in \cite{makowsky2006polynomial}
and \cite{godlin2008evaluations}. \begin{math}\chi_{harm}\end{math} is called the harmonious polynomial.
\end{definition}
For more on the harmonious polynomial, see \cite{drgas2017harmonious}. Theorem \ref{theorem harmonious} was observed without proof in \cite{drgas2017harmonious}.\\
{\bf Theorem \ref{theorem harmonious}:} Almost all graphs \begin{math}G\end{math} have a \begin{math}\chi_{harm}\end{math}-mate.\\
\begin{proof}
Let \begin{math}\mathcal{C}\end{math} be the class of graphs \begin{math}G\end{math} that have the property that for every two vertices \begin{math}v,u\in V(G)\end{math},
there is a vertex \begin{math}w\in V(G)\end{math} such that \begin{math}w\end{math} is a neighbour of both \begin{math}v\end{math} and \begin{math}u\end{math}.
This property is one of Gaifman's extension axioms, and hence from Fagin's proof of the 0/1-law for first order logic,
almost all graphs are in \begin{math}\mathcal{C}\end{math}(see \cite{fagin1976probabilities} for details).\\
Note that any harmonious colouring of a graph \begin{math}G\in \mathcal{C}\end{math} of order \begin{math}n\end{math}
has to assign a different colour to each vertex of \begin{math}G\end{math}, and so for \begin{math}\lambda \in \mathbb{N}\end{math} the evaluation
of the harmonious polynomial of \begin{math}G\end{math} at \begin{math}\lambda\end{math} is \begin{math}\chi_{harm}(G,\lambda)=\lambda(\lambda-1)(\lambda-2)...(\lambda-n+1)\end{math}.
Since this is true for every \begin{math}\lambda \in \mathbb{N}\end{math}, by interpolation it is true for every \begin{math}\lambda\in \mathbb{R}\end{math},
and so every two graphs in \begin{math}\mathcal{C}\end{math} of the same order have the same harmonious polynomial.
Hence, there is an \begin{math}n_0\end{math} such that all graphs in \begin{math}\mathcal{C}\end{math} of order greater than \begin{math}n_0\end{math} have an \begin{math}\chi_{harm}\end{math}-mate.
Thus the harmonious polynomial is weakly distinguishing.
\end{proof}
This result can be easily generalised.
\begin{definition}
For a fixed \begin{math}k \in \mathbb{N}\end{math} and a graph \begin{math}G\end{math},
we say that a proper colouring of \begin{math}G\end{math} with \begin{math}\lambda\end{math} colours \begin{math}f:V(G)\rightarrow [\lambda]\end{math} is \begin{math}k\end{math}-harmonious
if for every \begin{math}S\subseteq [\lambda]\end{math} such that \begin{math}|S|=k\end{math}, \begin{math}S\end{math} appears as the colour set of a clique in the graph at most once,
i.e. if \begin{math}\{v_1,v_2,...,v_k\},\{u_1,...,u_k\}\subseteq V(G)\end{math} induce complete graphs of size \begin{math}k\end{math} and
\begin{math}f( \{v_1,v_2,...,v_k\})=f(\{u_1,...,u_k\})\end{math}, then \begin{math}\{v_1,v_2,...,v_k\}=\{u_1,...,u_k\}\end{math}.
\end{definition}
For \begin{math}\lambda \in \mathbb{N}\end{math} define \begin{math}h_k(G,\lambda)=|\{f:V(G)\rightarrow [\lambda]:f\end{math} is proper and \begin{math}k\end{math}-harmonious\begin{math}\}|\end{math}.
\begin{math}h_k(G,\lambda)\end{math} is a polynomial in \begin{math}\lambda\end{math} (again, by \cite{makowsky2006polynomial}).
We will prove that \begin{math}h_k\end{math} is weakly distinguishing.\\
We start with a lemma:
\begin{lemma}
Let \begin{math}\mathcal{C}\end{math} be the class of graphs \begin{math}G\end{math} with the property that for every two vertices \begin{math}v,u \in V(G)\end{math}
there are vertices \begin{math}w_1,...,w_k\in V(G)\end{math} such that \begin{math}\{u,w_1,...,w_k\}\end{math} and \begin{math}\{v,w_1,...,w_k\}\end{math} induce a complete graph.
Then almost all graphs are in \begin{math}\mathcal{C}\end{math}.
\end{lemma}
For convenience, we restate and prove the lemma in probabilistic language:
\begin{lemma}
Fix \begin{math}p\in (0,1)\end{math} and let \begin{math}G\in \mathcal{G}(n,p)\end{math} (i.e. \begin{math}G\end{math} is a graph with \begin{math}n\end{math}
vertices and every edge is in the graph with probability \begin{math}p\end{math}, independently of the others).
Then \begin{math}\lim_{n\rightarrow \infty}\mathbb{P}(G\in \mathcal{C})=1\end{math}
\end{lemma}
\begin{proof}
For a graph $G$, denote by $Y_k(G)$ the number of $k$ cliques in $G$.
For fixed \begin{math}u,v\in V(G)\end{math}, denote by \begin{math}G_{u,v}\end{math} the subgraph of \begin{math}G\end{math} induced by the common neighbours of \begin{math}v\end{math} and \begin{math}u\end{math}.
Note that \begin{math}\mathbb{E}(|V(G_{u,v})|)=(n-2)p^2\end{math}, so from the multiplicative Chernoff bound,
\begin{align*}
\mathbb{P}[|V(G_{u,v})|\leq \frac{1}{9}(n-2)p^2]\leq \left(\frac{e^{-9/10}}{(1/9)^{1/9}}\right)^{(n-2)p^2}<e^{-\frac{1}{200}(n-2)p^2}
\end{align*}
Hence, from the union bound
\begin{displaymath}\mathbb{P}[\exists u,v\in V(G) s.t. |V(G_{u,v})|\leq \frac{1}{9}(n-2)p^2]\leq {n \choose 2}e^{-\frac{1}{200}(n-2)p^2}\end{displaymath}
Denote this number \begin{math}r_p(n)\end{math}, and note that \begin{math}\lim_{n\rightarrow \infty} r_p(n)=0\end{math}.\\
Next, assume that \begin{math}\forall u,v\in V(G)\end{math}, \begin{math}|V(G_{u,v})|>\frac{1}{9}(n-2)p^2\end{math}. Fix \begin{math}u,v\in V(G)\end{math}. Then \begin{math}G_{u,v}\end{math} is a random graph with more than \begin{math}\frac{1}{9}(n-2)p^2\end{math} vertices, and hence \begin{math}\mathbb{P}[Y_k(G_{u,v})< 1]\leq \mathbb{P}[Y_k(G')< 1]\end{math} were \begin{math}G'\in\mathcal{G}(\frac{1}{9}(n-2)p^2,p)\end{math}.
From theorem 2 in \cite{bollobas1988chromatic}, we have:
\begin{displaymath}\mathbb{P}[Y_k(G')<1]\leq \mathbb{P} \left[ Y_k(G')\leq\frac{9}{10}\left(\frac{1}{9}(n-2)p^2\right)^{3/2}\right]
\leq \exp\left[ -(1/100 + \alpha \sqrt{\frac{1}{9}(n-2)p^2})\right]\end{displaymath}
for some constant \begin{math}\alpha>0\end{math}. Hence, if we denote \begin{math}\mathcal{A}=\{G:\exists u,v\in V(G) s.t. |V(G_{u,v})|\leq\frac{1}{9}(n-2)p^2\}\end{math}, from the union bound we have:
\begin{displaymath}
\mathbb{P} \left[ \exists u,v\in V(G)s.t. Y_k(G_{u,v})<1|G\not \in \mathcal{A}\right] \leq {n \choose 2}\exp\left[ -(1/100 + \alpha \sqrt{\frac{1}{9}(n-2)p^2})\right]
\end{displaymath}
Denote the right side of this inequality by \begin{math}r'(n)\end{math}, and note that \begin{math}r'(n)\rightarrow 0\end{math} as \begin{math}n \rightarrow \infty\end{math}.
To conclude, we have that
\begin{align*}
\mathbb{P}[G \not \in \mathcal{C}]=\mathbb{P}[G\in \mathcal{A}\cap \mathcal{C}^c]+\mathbb{P}[G\in\mathcal{A}^c \cap\mathcal{C}^c]\leq\\ \leq\mathbb{P}[G\in \mathcal{A}]+\mathbb{P}[G\in \mathcal{A}^c]\mathbb{P}[G\in \mathcal{C}^c|G\in \mathcal{A}^c]=r_p(n)+(1-r_p(n))r'_p(n)
\end{align*}
and both these terms tend to 0 as \begin{math}n\end{math} tends to infinity.
\end{proof}
We can now prove:
\begin{theorem}
For a fixed \begin{math}k\in\mathbb{N}\end{math}, \begin{math}h_k\end{math} is weakly distinguishing.
\end{theorem}
\begin{proof}
Let \begin{math}\mathcal{C}\end{math} be the same as in the lemma. Similarly to the previous theorem, note that if \begin{math}G\in\mathcal{C}\end{math},
then for \begin{math}\lambda\in \mathbb{N}\end{math} any \begin{math}k\end{math}-harmonious colouring \begin{math}f:V(G) \rightarrow [\lambda]\end{math} of \begin{math}G\end{math} must assign
a different colour to every vertex (otherwise, if \begin{math}v,u \in V(G)\end{math} are such that
\begin{math}f(v)=f(u)\end{math}, and \begin{math}w_1,...,w_k\in V(G)\end{math} are such that \begin{math}\{u,w_1,...,w_k\}\end{math} and \begin{math}\{v,w_1,...,w_k\}\end{math}
induce a complete graph in \begin{math}G\end{math}, then \begin{math}f(\{u,w_1,...,w_k\})=f(\{v,w_1,...,w_k\})\end{math}),
and thus \begin{math}h_k(G,\lambda)=\lambda(\lambda-1)(\lambda-2)...(\lambda-n+1)\end{math}.
By the same reasoning as in the proof of theorem \ref{theorem harmonious}, \begin{math}h_k\end{math} is weakly distinguishing.
\end{proof}
\section{Conclusion}
We have shown for many graph properties \begin{math}Q\end{math} that the polynomials \begin{math}P_Q\end{math} are weakly distinguishing, including the well studied clique and independence polynomials. We have also shown that the harmonious and k-harmonious polynomials are weakly distinguishing.\\
Our results relied on the fact that the number of polynomials that can be the \begin{math}P_Q\end{math} polynomial of a graph is small, and on the fact that almost all graphs have properties that imply their harmonious and k-harmonious polynomials are trivial. This does not seem to be the case for the Tutte and the chromatic polynomials, so the original question of whether they, as well as the characteristic and matching
polynomials, are weakly distinguishing remains open.
\begin{problem}
Find a graph property $Q$ such that the fraction of $P_Q$ unique graphs is strictly positive.
\end{problem}
\begin{problem}
Let $P$ be a graph polynomial, $G,H$ two graphs, and write $G\sim_P H$ if $P(G)=P(H)$. What can be said about the sizes of the equivalence classes of $\sim_P$?
\end{problem}
\end{document} |
\begin{document}
\title{Entropy of automorphisms of ${\rm II}_1$-factors
arising from the dynamical systems theory}
\author{V.Ya. Golodets, S.V. Neshveyev}
\date{\it B. Verkin Institute for Low Temperature Physics and
Engineering, National Academy of Sciences of Ukraine,
47, Lenin Ave., 310164, Kharkov, Ukraine}
\maketitle
\begin{abstract}
Let a countable amenable group $G$ acts freely and ergodically on
a Lebesgue space $(X, \mu)$, preserving the measure
$\mu$. If $T\in\mathop{\mbox{\rm Aut\,}}(X, \mu)$ is an automorphism of the equivalence
relation defined by $G$ then $T$ can be extended to an
automorphism $\alpha_T$ of the II$_1$-factor
$M=L^\infty(X,\mu)\rtimes G$. We prove that if $T$ commutes with
the action of $G$ then $H(\alpha_T)=h(T)$, where $H(\alpha_T)$ is
the Connes-St{\o}rmer entropy of $\alpha_T$, and
$h(T)$ is the Kolmogorov--Sinai entropy of $T$. We prove also that
for given $s$ and $t$, $0\le s\le t\le\infty$, there exists a
$T$ such that $h(T)=s$ and $H(\alpha_T)=t$.
\end{abstract}
\section*{Introduction}
Entropy is an important notion in classical statistical mechanics
and information theory. Initially the conception of entropy for
automorphism in the ergodic theory was introduced by Kolmogorov
and Sinai in 1958. This invariant proved to be extremely useful in
the classical dynamical systems theory and topological dynamics.
The extension of this notion onto quantum dynamical systems was
done by Connes, Narnhofer, St{\o}rmer and Thirring~\cite{CS,CNT}.
At the present time there are several other promising approaches
to entropy of $C^*$-dynamical systems~\cite{S, AF, V}.
An important trend in dynamical entropy is its computation for
various models. A lot of interesting results was obtained in this
field in the recent years. We note several of them. St{\o}rmer,
Voiculescu~\cite{SV}, and the second author~\cite{N} computed the
entropy of Bogoliubov automorphisms of CAR and CCR algebras (see
also~\cite{BG,GN2}). Pimsner, Popa~\cite{PP}, Choda~\cite{Ch1}
computed the entropy of shifts of Temperley-Lieb algebras,
Choda~\cite{Ch2}, Hiai~\cite{H} and St{\o}rmer~\cite{St} computed
the entropy of canonical shifts. The first author,
St{\o}rmer~\cite{GS1,GS2}, Price~\cite{Pr} computed entropy for a
wide class of binary shifts.
In this paper we consider automorphisms of II$_1$ factors arising
from the dynamical systems theory. Let a countable group
$G$ acts freely and ergodically on a Lebesgue space $(X, \mu)$ and
preserves $\mu$. Then one can construct the crossed product
$M=L^\infty(X, \mu)\rtimes G$, which, as well known, is a II$_1$-factor. If
$T\in\mathop{\mbox{\rm Aut\,}}(X, \mu)$ defines an automorphism of the ergodic
equivalence relation induced by $G$ then $T$ can be extended to an
automorphism $\alpha_T$ of $M$~\cite{FM}. It is a natural problem
to compute the dynamical entropy $H(\alpha_T)$ in the sense
of~\cite{CS} and to compare it with the Kolmogorov-Sinai entropy
$h(T)$ of $T$. It should be noted that this last problem is a part of a more
general problem. Namely, let $M$ be a II$_1$-factor,
$\alpha\in\mathop{\mbox{\rm Aut\,}} M$,
$A$ its $\alpha$-invariant Cartan subalgebra, $\alpha(A)=A$, then
it is nature to investigate when $H(\alpha)$ is equal to
$H(\alpha|_A)$.
These problems are studied in our paper. In Section~\ref{2} we
prove that if $T$ commutes with the action of $G$ then
$H(\alpha_T)=h(T)$. More generally, we prove that this result is
valid for crossed products of arbitrary algebras for entropies of
Voiculescu~\cite{V} and of Connes-Narnhofer-Thirring~\cite{CNT}.
In Section~\ref{3} we consider two examples to illustrate this
result. These examples give non-isomorphic ergodic automorphisms
of the hyperfinite ergodic equivalence relation with the same
entropy. In Section~\ref{4} we construct several examples showing
that the entropies $h(T)$ and $H(\alpha_T)$ can be distinct. These
systems are non-commutative analogs of dynamical systems of
algebraic origin (see \cite{A,Y,LSW,S}). In particular, some of our
examples are automorphisms of non-commutative tori.
In Section~\ref{5} we construct flows $T_t$
such that $H(\alpha_{T_1})>h(T_1)$. In particular, we show that
the values $h(T)$ and $H(\alpha_T)$ can be arbitrary.
\section{Computation of entropy of automorphisms of crossed
products}\label{2}
Let $(X, \mu)$ be a Lebesgue space, $G$ a countable amenable
group of automorphisms $S_g$, $g\in G$, of $(X,\mu)$ preserving
$\mu$, and $T$ an automorphism of $(X, \mu)$, $\mu\circ T=\mu$, such
that
$$
TS_g=S_gT,\quad g\in G.
$$
\begin{theorem} \label{2.1}
Let $(X, \mu)$, $G$ and $T$ be as above. Suppose $G$ acts freely
and ergodically on $(X, \mu)$. Then $M=L^\infty(X, \mu)\rtimes_SG$
is the hyperfinite II$_1$-factor with the trace-state $\tau$
induced by $\mu$. The automorphism $T$ can be canonically extended
to an automorphism $\alpha_T$ of $M$, and $$ H(\alpha_T)=h(T)\,,
$$ where $H(\alpha_T)$ is the Connes-St{\o}rmer entropy of
$\alpha_T$, and $h(T)$ is the Kolmogorov-Sinai entropy of $T$.
\end{theorem}
We will prove the following more general result.
\begin{theorem} \label{2.2}
Let $M$ be an approximately finite-dimensional W$^*$-algebra,
$\sigma$ its normal state, $T$ a $\sigma$-preserving automorphism.
Suppose a discrete amenable group $G$ acts on $M$ by automorphisms
$S_g$ that commute with $T$ and preserve $\sigma$. The
automorphism $T$ defines an automorphism $\alpha_T$ of
$M\rtimes_SG$, and the state $\sigma$ is extended to the dual
state which we continue to denote by $\sigma$. Then
(i) $hcpa_\sigma(\alpha_T)=hcpa_\sigma(T)$,
where $hcpa_\sigma$ is the completely positive approximation entropy of
Voiculescu \cite{V};
(ii) $h_\sigma(\alpha_T)=h_\sigma(T)$, where $h_\sigma$ is the
dynamical entropy of Connes-Narnhofer-Thirring~\cite{CNT}.
\end{theorem}
Since CNT-entropy coincides with KS-entropy in the classical case, and with
CS-entropy for tracial $\sigma$ and approximately finite-dimensional $M$,
Theorem~\ref{2.1} follows from Theorem~\ref{2.2}.
To prove Theorem~\ref{2.2} we will generalize a construction
of Voiculescu~\cite{V}.
\begin{lemma} \label{2.3}
Let $B$ be a C$^*$-algebra, $x_1,\ldots,x_n\in B$. Then the mapping
$\Psi\colon{\rm Mat}_n({\mathbb C})\mathop{\otimes}\limits_{i=1}^{\infty}imes B\to B$,
$$
\Psi(e_{ij}\mathop{\otimes}\limits_{i=1}^{\infty}imes b)=x_ibx^*_j,
$$
is completely positive.
\end{lemma}
\begin{verif}{}
Consider the element $V\in{\rm Mat}_n(B)={\rm Mat}_n({\mathbb C})\mathop{\otimes}\limits_{i=1}^{\infty}imes B$,
$$
V=\pmatrix{x_1 & \ldots & x_n\cr
0 & \ldots & 0 \cr
& \ldots & \cr
0 & \ldots & 0 \cr}.
$$
Consider also the projection $p=e_{11}\mathop{\otimes}\limits_{i=1}^{\infty}imes 1\in{\rm Mat}_n({\mathbb C})\mathop{\otimes}\limits_{i=1}^{\infty}imes B$.
Then $\Psi$ is the mapping
${\rm Mat}_n(B)\to p{\rm Mat}_n(B)p=B$, $x\mapsto VxV^*$.
\end{verif}
Let $\lambda$ be the canonical representation of $G$ in $M\rtimes
G$, so that $({\rm Ad}\,\lambda(g))(a)=S_g(a)$ for $a\in M$.
\begin{lemma} \label{2.4}
For any finite subset $F$ of $G$,
there exist normal unital completely positive mappings
$I_F\colon B(l^2(F))\mathop{\otimes}\limits_{i=1}^{\infty}imes M\to M\rtimes G$ and
$J_F\colon M\rtimes G\to B(l^2(F))\mathop{\otimes}\limits_{i=1}^{\infty}imes M$ such that
\begin{eqnarray*}
I_F(e_{g,h}\mathop{\otimes}\limits_{i=1}^{\infty}imes a)
&=&{1\over|F|}\lambda(g)a\lambda(h)^*
={1\over|F|}\lambda(gh^{-1})S_h(a),\\
J_F(\lambda(g)a)
&=&\sum_{h\in F\cap g^{-1}F}e_{gh,h}\mathop{\otimes}\limits_{i=1}^{\infty}imes S_{h^{-1}}(a),\\
(I_F\circ J_F)(\lambda(g)a)
&=&{|F\cap g^{-1}F|\over|F|}\lambda(g)a,\\
\sigma\circ I_F
&=&{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma,\ \ \alpha_T\circ I_F=I_F\circ({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T),\\
({\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma)\circ J_F
&=&\sigma,\ \ ({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)\circ J_F=J_F\circ\alpha_T,
\end{eqnarray*}
where ${\rm tr}_F$ is the unique tracial state on $B(l^2(F))$.
\end{lemma}
\begin{verif}{}
The complete positivity of $I_F$ follows from Lemma~\ref{2.3}.
Consider $J_F$. Suppose that $M\subset B(H)$, and consider
the regular representation of $M\rtimes G$ on $l^2(G)\mathop{\otimes}\limits_{i=1}^{\infty}imes H$:
$$
\lambda(g)(\delta_h\mathop{\otimes}\limits_{i=1}^{\infty}imes\xi)=\delta_{gh}\mathop{\otimes}\limits_{i=1}^{\infty}imes\xi, \ \
a(\delta_h\mathop{\otimes}\limits_{i=1}^{\infty}imes\xi)=\delta_h\mathop{\otimes}\limits_{i=1}^{\infty}imes S_{h^{-1}}(a)\xi\ \ (a\in M).
$$
Let $P_F$ be the projection onto $l^2(F)\mathop{\otimes}\limits_{i=1}^{\infty}imes H$. Then
a direct computation shows that the mapping $J_F(x)=P_FxP_F$,
$x\in M\rtimes G$, has the form written above. All others assertions follow
immediately.
\end{verif}
\begin{verif}{ of Theorem~\ref{2.2}}
\noindent (i) Since there exists a $\tau$-preserving conditional expectation
$M\rtimes G\to M$, we have $hcpa_\sigma(\alpha_T)\ge hcpa_\sigma(T)$.
To prove the opposite inequality we have to show that
$hcpa_\sigma(\alpha_T,\omega)\le hcpa_\sigma(T)$ for any finite subset
$\omega$ of $M\rtimes G$. Fix $\varepsilon>0$. We can find a finite subset $F$ of
$G$ such that $||(I_F\circ J_F)(x)-x||_\sigma<\varepsilon$ for any $x\in\omega$.
Let $(\psi,\phi,B)\in{\mathbb C}PA(B(l^2(F))\mathop{\otimes}\limits_{i=1}^{\infty}imes M,{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma)$.
Then $(I_F\circ\psi,\phi\circ J_F,B)\in{\mathbb C}PA(M\rtimes G,\sigma)$.
Suppose
$$
||(\psi\circ\phi)(J_F(x))-J_F(x)||_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}<\delta
$$
for some $x\in\alpha^k_T(\omega)$ and $k\in{\mathbb N}$. Then
$$
||(I_F\circ\psi\circ\phi\circ J_F)(x)-x||_\sigma
\le||(\psi\circ\phi)(J_F(x))-J_F(x)||_{\sigma\circ I_F}
+||(I_F\circ J_F)(x)-x||_\sigma<\delta+\varepsilon,
$$ where we have used the facts that $\sigma\circ
I_F={\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma$ and that $\alpha_T$ commutes with
$I_F\circ J_F$. Since $J_F\circ\alpha_T=({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)\circ J_F$,
we infer that $$
{rcp}_\sigma(\omega\cup\alpha_T(\omega)\cup\ldots\cup\alpha^{n-1}_T(\omega);
\delta+\varepsilon)\le{rcp}_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}(J_F(\omega)\cup
\ldots\cup({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)^{n-1}(J_F(\omega));\delta),
$$
so that (for $\delta<\varepsilon$)
\begin{eqnarray*}
hcpa_\sigma(\alpha_T,\omega;2\varepsilon)
&\le& hcpa_\sigma(\alpha_T,\omega;\varepsilon+\delta)
\le hcpa_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T,J_F(\omega);\delta)\\
&\le& hcpa_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)=hcpa_\sigma(T),
\end{eqnarray*}
where the last equality follows from the subadditivity of the
entropy~\cite{V}. Since $\varepsilon>0$ was arbitrary, the proof of the
inequality $hcpa_\sigma(\alpha_T,\omega)\le hcpa_\sigma(T)$ is
complete.
\noindent (ii) We always have $h_\sigma(\alpha_T)\ge h_\sigma(T)$.
To prove the opposite inequality consider a channel $\gamma\colon
B\to M\rtimes G$, i.~e., a unital completely positive mapping of a
finite-dimensional C$^*$-algebra~$B$. We have to prove that
$h_\sigma(\alpha_T;\gamma)\le h_\sigma(T)$. Fix $\varepsilon>0$. We can
choose $F$ such that
$$
||(I_F\circ J_F\circ\gamma-\gamma)(x)||_\sigma\le\varepsilon||x||
\ \ \hbox{for any}\ x\in B.
$$
By~\cite[Theorem IV.3]{CNT},
\begin{equation} \label{e2.1}
{1\over n}H_\sigma(\gamma,\alpha_T\circ\gamma,\ldots,\alpha^{n-1}_T\circ\gamma)
\le\delta+{1\over n}
H_\sigma(I_F\circ J_F\circ\gamma,\alpha_T\circ I_F\circ J_F\circ\gamma,
\ldots,\alpha^{n-1}_T\circ I_F\circ J_F\circ\gamma),
\end{equation}
where $\delta=\delta(\varepsilon,{\rm rank}\, B)\to0$ as $\varepsilon\to0$. Since
$\sigma\circ I_F={\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma$, it is easy to see from
the definition of mutual entropy $H_\sigma$ \cite{CNT} that
\begin{equation} \label{e2.2}
H_\sigma(I_F\circ J_F\circ\gamma,I_F\circ J_F\circ\alpha_T\circ\gamma,
\ldots,I_F\circ J_F\circ\alpha^{n-1}_T\circ\gamma)
\le H_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}(J_F\circ\gamma,J_F\circ\alpha_T\circ\gamma,
\ldots,J_F\circ\alpha^{n-1}_T\circ\gamma)
\end{equation}
Since $I_F\circ J_F$ commutes with $\alpha_T$, and
$J_F\circ\alpha_T=({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)\circ J_F$, we infer from (\ref{e2.1}) and
(\ref{e2.2}) that
$$
h_\sigma(\alpha_T;\gamma)\le\delta
+h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T;J_F\circ\gamma)
\le\delta+h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T).
$$
Since we could choose $F$ such that $\delta$ was
arbitrary small, we see that it suffices to prove that
$h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)=h_\sigma(T)$. For abelian $M$
this is proved by standard arguments, using
\cite[Corollary VIII.8]{CNT}. To handle the general case we need the following
lemma.
\begin{lemma} \label{2.5}
For any finite-dimensional C$^*$-algebra $B$, any state $\phi$ of $B$, and
any positive linear functional $\psi$ on ${\rm Mat}_n({\mathbb C})\mathop{\otimes}\limits_{i=1}^{\infty}imes B$, we have
$$
S({\rm tr}_n\mathop{\otimes}\limits_{i=1}^{\infty}imes\phi,\psi)\le S(\phi,\psi|_B)+2\psi(1)\log n.
$$
\end{lemma}
\begin{verif}{}
By \cite[Theorem 1.13]{OP},
$$
S({\rm tr}_n\mathop{\otimes}\limits_{i=1}^{\infty}imes\phi,\psi)=S(\phi,\psi|_B)+S(\psi\circ E,\psi),
$$
where $E={\rm tr}_n\mathop{\otimes}\limits_{i=1}^{\infty}imes{\rm id}\colon{\rm Mat}_n({\mathbb C})\mathop{\otimes}\limits_{i=1}^{\infty}imes B\to B$ is the
(${\rm tr}_n\mathop{\otimes}\limits_{i=1}^{\infty}imes\phi$)-preserving conditional expectation (note that we adopt
the notations of~\cite{CNT}, so we denote by $S(\omega_1,\omega_2)$ the
quantity which is denoted by $S(\omega_2,\omega_1)$ in~\cite{OP}). By the
Pimsner-Popa inequality~\cite[Theorem 2.2]{PP}, we have
$$
E(x)\ge{1\over n^2}x\ \ \hbox{for any}\ \ x\in{\rm Mat}_n({\mathbb C})\mathop{\otimes}\limits_{i=1}^{\infty}imes B,\ x\ge0.
$$
In particular, $\psi\circ E\ge{1\over n^2}\psi$, whence
$S(\psi\circ E,\psi)\le2\psi(1)\log n$.
\end{verif}
Since $M$ is an AFD-algebra, to compute the entropy of ${\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T$ it
suffices to consider subalgebras of the form $B(l^2(F))\mathop{\otimes}\limits_{i=1}^{\infty}imes B$, where
$B\subset M$. From Lemma~\ref{2.5} and the definitions~\cite{CNT} we
immediately get
$$
h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T;B(l^2(F))\mathop{\otimes}\limits_{i=1}^{\infty}imes B)\le h_\sigma(T;B)
+2\log|F|.
$$
Hence
$h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)\le h_\sigma(T)+2\log|F|$.
Applying this inequality to $T^m$, we obtain
$$
h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}(({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)^m)\le h_\sigma(T^m)+2\log|F|
\ \ \forall m\in{\mathbb N}.
$$
But since $M$ is an AFD-algebra, we have
$h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}(({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)^m)
=m\cdot h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)$ and
$h_\sigma(T^m)=m\cdot h_\sigma(T)$. So dividing the above inequality by $m$,
and letting $m\to\infty$, we obtain
$h_{{\rm tr}_F\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)\le h_\sigma(T)$, and the proof of
Theorem is complete.
\end{verif}
\noindent{\bf Remarks.}
\noindent(i) For any AFD-algebra $N$ and any normal state $\omega$ of $N$,
we have $h_{\omega\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)=h_\sigma(T)$. Indeed, we may
suppose that $N$ is finite-dimensional and $\omega$ is faithful (because
if $p$ is the support of $\omega$, then $h_{\omega\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)
=h_{\omega\mathop{\otimes}\limits_{i=1}^{\infty}imes\sigma}(({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes T)|_{pNp\mathop{\otimes}\limits_{i=1}^{\infty}imes M})$). Now the only
thing we need is a generalization of the Pimsner-Popa inequality. Let
$p_1,\ldots,p_m$ be the atoms of a maximal abelian subalgebra of the
centralizer of the state $\omega$. Then
$$
(\omega\mathop{\otimes}\limits_{i=1}^{\infty}imes{\rm id})(x)\ge\left(\sum^m_{i=1}{1\over\omega(p_i)}\right)^{-1}x
\ \ \hbox{for any}\ \ x\in N\mathop{\otimes}\limits_{i=1}^{\infty}imes M,\ x\ge0,
$$
by \cite[Theorem 4.1 and Proposition 5.4]{L}.
\noindent(ii) By Corollary 3.8 in \cite{V}, $hcpa_\mu(T)=h(T)$ for ergodic
$T$. For non-ergodic $T$, the entropies can be distinct. Indeed, let $X_1$
be a $T$-invariant measurable subset of $X$, $\lambda=\mu(X)$, $0<\lambda<1$.
Set $\mu_1=\lambda^{-1}\mu|_{X_1}$, $T_1=T|_{X_1}$, $X_2=X\backslash X_1$,
$\mu_2=(1-\lambda)^{-1}\mu|_{X_2}$, $T_2=T|_{X_2}$. It is easy to see that
$h(T)=\lambda h(T_1)+(1-\lambda)h(T_2)$. On the other hand, it can be proved
that
$$
hcpa_\mu(T)=\max\{hcpa_{\mu_1}(T_1),hcpa_{\mu_2}(T_2)\}.
$$
So if $h(T_1),h(T_2)<\infty$, $h(T_1)\ne h(T_2)$, then $h(T)<hcpa_\mu(T)$.
To obtain an invariant which coincides with KS-entropy in the classical case,
one can modify Voiculescu's definition replacing ${\rm rank}\, B$ with
$\exp S(\sigma\circ\psi)$ in~\cite[Definition 3.1]{V}. Theorem~\ref{2.2}
remains true for this modified entropy.
\section{Examples}\label{3}
We present two examples to illustrate Theorem~\ref{2.1}. These examples
give non-isomorphic ergodic automorphisms of amenable equivalence
relations with the same KS-entropy.
Let us first describe a general construction.
\begin{proposition} \label{3.1}
Let $S_0$, $S_1$, $S_2$ be ergodic automorphisms of $(X,\mu)$ such
that $S_0$ commutes with $S_1$ and $S_2$, and $S_1$ is conjugate
with neither $S_2$, nor $S^{-1}_2$ by an automorphism commuting
with $S_0$. Set $M_i=L^\infty(X,\mu)\rtimes_{S_i}{\mathbb Z}$, $i=1,2$, and
let $\alpha_i$ be the automorphism of $M_i$ induced by $S_0$. Then
there is no isomorphism $\phi$ of $M_1$ onto $M_2$ such that
$\phi\circ\alpha_1=\alpha_2\circ\phi$ and
$\phi(L^\infty(X,\mu))=L^\infty(X,\mu)$.
\end{proposition}
\begin{verif}{}
Suppose such a $\phi$ exists. Let $U_i\in M_i$ be the unitary corresponding
to $\alpha_i$, $i=1,2$, $A=L^\infty(X,\mu)\subset M_1$.
Set $U=\phi^{-1}(U_2)$.
Since $U$ is a unitary operator from $M_1$ such that $({\rm Ad}\, U)(A)=A$,
it is easy to check that $U$ has the form
$$
U=\sum_{i\in{\mathbb Z}}a_iU_1^i E_i,\quad a_i\in{\mathbb T},
$$
where $\{E_i\} $ is a family of projections from $A$, $E_iE_j=0$, for
$i\not=j$, $\sum\limits_iE_i=\sum\limits_iU_1^iE_iU_1^{-i}=I$.
Since $\alpha_1(U)=U$, we have $\alpha_1(E_i)=E_i$, $i\in{\mathbb Z}$.
But $S_0$ is ergodic, therefore $E_i=I$ or $E_i=0$. Hence
$U=a_iU_1^i$ for some $i\in{\mathbb Z}$ and $a_i\in{\mathbb T}$. Since $\phi$ is an isomorphism,
we have either $i=-1$, or $i=1$. We see that $\phi|_{L^\infty(X,\mu)}$ is an
automorphism that commutes with $S_0$ and conjugates $S_2$ with either
$S^{-1}_1$, or $S_1$.
\end{verif}
\noindent{\bf Remark.} It follows from Proposition~\ref{3.1} that $S_0$
defines non-isomorphic automorphisms of the ergodic equivalence
relations induced by $S_1$ and $S_2$ on $X$ correspondingly, despite
of $H(\alpha_1)=H(\alpha_2)=h(S_0)$.
\begin{example} \label{3.2}
\rm
Let $X=[0, 1]$ be the unit interval,
$\mu$ the Lebesgue measure on $X$, $t_0$, $t_1$ and $t_2$ irrational numbers
from $[0, 1]$ such that $t_2\ne t_1,\,1-t_1$.
Consider the shifts $S_ix=x+t_i\pmod1$, $x\in[0, 1]$. Any automorphism
of $X$ commuting with $S_0$ commutes with $S_1$ and $S_2$. Since
$S_1\ne S^{\pm1}_2$, Proposition~\ref{3.1} is applicable. Note that
$h(S_0)=0$.
\end{example}
\begin{example} \label{3.3}
\rm Let $(X, \mu)$ be a Lebesgue space, $T_t$ a Bernoulli flow on
$(X, \mu)$ with $h(T_1)=\log2$ \cite{O}. Choose $t_i\in{\mathbb R}$,
$t_i\ne0$ ($i=0,1,2$), $t_1\ne\pm t_2$, and set $S_i=T_{t_i}$.
Then $h(S_1)\ne h(S_2)$, and we can apply Proposition~\ref{3.1}.
\end{example}
\section{Entropy of automorphisms and their restrictions to a
Cartan subalgebra} \label{4}
Let $M$ be a II$_1$-factor, $A$ its Cartan subalgebra,
$\alpha\in\mathop{\mbox{\rm Aut\,}} M$ such that $\alpha(A)=A$. We consider cases when
$H(\alpha)>H(\alpha|_A)$.
Suppose a discrete abelian group $G$ acts freely and ergodically by
automorphisms $S_g$ on $(X,\mu)$, $\beta$ an automorphism of $G$, and
$S$ an automorphism of $(X,\mu)$ such that $TS_g=S_{\beta(g)}T$. Then
$T$ induces an automorphism $\alpha_T$ of $M=L^\infty(X,\mu)\rtimes_SG$.
Explicitly,
$$
\alpha_T(f)(x)=f(T^{-1}x) \ \hbox{for}\ f\in L^\infty(X,\mu),
\ \alpha_T(\lambda(g))=\lambda(\beta(g)).
$$
The algebra $A=L^\infty(X,\mu)$ is a Cartan subalgebra of $M$. On the other
hand, the operators $\lambda(g)$ generate a maximal abelian subalgebra
$B\cong L^\infty(\hat G)$ of $M$, and $\alpha_T|_B=\hat\beta$, the dual
automorphism of $\hat G$. We have
$$
H(\alpha_T)\ge\max\{h(T),h(\hat\beta)\},
$$
so if $h(\hat\beta)>h(T)$, then $H(\alpha_T)>H(\alpha_T|_A)$.
To construct such examples we consider systems of algebraic origin.
Let $G_1$ and $G_2$ be discrete abelian groups, and $T_1$ an
automorphism of $G_1$. Suppose there exists an embedding $l\colon
G_2\hookrightarrow\hat G_1$ such that $l(G_2)$ is a dense $\hat
T_1$-invariant subgroup. Set $T_2=\hat T_1|_{G_2}$. The group
$G_2$ acts by translations on $\hat G_1$
($g_2\cdot\chi_1=\chi_1+l(g_2)$), and we fall into the situation
described above (with $X=\hat G_1$, $G=G_2$, $T=\hat T_1$ and
$\beta=T_2$).
The roles of $G_1$ and $G_2$ above are almost symmetric. Indeed, to be
given an embedding $G_2\hookrightarrow\hat G_1$ with dense range is just the
same as to be given a non-degenerate pairing
$\langle\cdot\,,\,\cdot\rangle\colon G_1\times G_2\to{\mathbb T}$, then the equality
$T_2=\hat T_1|_{G_2}$ means that this pairing is $T_1\times T_2$-invariant.
The pairing gives rise to an embedding $r\colon G_1\hookrightarrow\hat G_2$.
Then $G_1$ acts on $\hat G_2$ by translations $g_1\cdot\chi_2=\chi_2-r(g_1)$,
and
$L^\infty(\hat G_1)\rtimes G_2\cong G_1\ltimes L^\infty(\hat G_2)$. In fact,
both algebras are canonically isomorphic to the twisted group W$^*$-algebra
$W^*(G_1\times G_2,\omega)$, where $\omega$ is the bicharacter defined by
$$
\omega((g'_1,g'_2),(g''_1,g''_2))=\langle g''_1,g'_2\rangle.
$$
Then $\alpha_T$ is nothing else than the automorphism induced by the
$\omega$-preserving automorphism $T_1\times T_2$.
Let $R={\mathbb Z}[t,t^{-1}]$ be the ring of Laurent polynomials over ${\mathbb Z}$,
$f\in{\mathbb Z}[t]$, $f\ne 1$, a polynomial whose irreducible factors are
not cyclotomic, equivalently, $f$ has no roots of modulus 1. Fix
$n\in\{2,3,\ldots,\infty\}$. Set $G_1=R/(f^\sim)$ and
$G_2=\mathop{\oplus}\limits^n_{k=1}R/(f)$, where
$f^\sim(t)=f(t^{-1})$. Let $T_i$ be the automorphism of $G_i$ of
multiplication by $t$. Let $\chi$ be a character of $G_2$. Then
the mapping $R\ni f_1\mapsto f_1(\hat T_2)\chi\in\hat G_2$ defines
an equivariant homomorphism $G_1\to\hat G_2$. In other words, if
$\chi=(\chi_1,\ldots,\chi_n)\in\hat G_2\subset\hat R^n$, then the
pairing is given by $$ \langle
f_1,(g_1,\dots,g_n)\rangle=\prod^n_{k=1}\chi_k(f^\sim_1\cdot g_k),
$$ where $(f^\sim_1\cdot g_k)(t)=f_1(t^{-1})g_k(t)$. This pairing
is non-degenerate iff the orbit of $\chi$ under the action of
$\hat T_2$ generates a dense subgroup of $\hat G_2$. Since $T_2$
is aperiodic, the dual automorphism is ergodic. Hence the orbit is
dense for almost every choice of $\chi$.
Now let us estimate entropy. First, by Yuzvinskii's formula \cite{Y,LW},
$h(\hat T_1)=m(f)$, $h(\hat T_2)=n\cdot m(f)$, where $m(f)$ is the
logarithmic Mahler measure of $f$,
$$
m(f)=\int^1_0\log|f(e^{2\pi is})|ds=\log|a_m|
+\sum_{j\colon|\lambda_j|>1}\log|\lambda_j|,
$$
where $a_m$ is the leading coefficient of $f$, and $\{\lambda_j\}_j$ are
the roots of $f$. Now suppose that the coefficients of the leading
and the lowest terms of $f$ are equal to 1. Then $G_1\times G_2$ is a
free abelian group of rank $(n+1)\deg f$, and by a result of
Voiculescu~\cite{V} we have
$H(\alpha_T)\le h(\hat T_1\times\hat T_2)=(n+1)m(f)$.
Note also that since the automorphism $T_1\times T_2$ is aperiodic,
the automorphism $\alpha_T$ is mixing.
Let us summarize what we have proved.
\begin{theorem} \label{4.1}
For given $n\in\{2,3,\ldots,\infty\}$ and a polynomial $f\in{\mathbb Z}[t]$, $f\ne1$,
whose
coefficients of the leading and the lowest terms are equal to 1 and which
has no roots of modulus 1, there exists a mixing automorphism $\alpha$
of the hyperfinite II$_1$-factor and an $\alpha$-invariant Cartan subalgebra
$A$ such that
$$
H(\alpha|_A)=m(f),\ \ n\cdot m(f)\le H(\alpha)\le(n+1)m(f).
$$
\end{theorem}
The possibility of constructing on this way systems with arbitrary
values $H(\alpha|_A)<H(\alpha)$ is closely related to the
question, whether 0 is a cluster point of the set
$\{m(f)\,|\,f\in{\mathbb Z}[t]\}$ (note that it suffices to consider
irreducible polynomials whose leading coefficients and constant
terms are equal to 1). This question is known as Lehmer's problem,
and there is an evidence that the answer is {\it negative} (see
\cite{LSW} for a discussion).
In estimating the entropy above we used the result of Voiculescu
stating that the entropy of an automorphism of a non-commutative
torus is not greater than the entropy of its abelian counterpart.
It is clear that this result should be true for a wider class of
systems. Consider the most simple case where the polynomial $f$ is
a constant.
\begin{example} \label{4.2}
\rm
Let $f=2$ and $n=2$. Then
$G_1=R/(2)\cong\mathop\oplus\limits_{k\in{\mathbb Z}}{\mathbb Z}/2{\mathbb Z}$, $G_2=G_1\oplus G_1$,
$T_1$ is the shift to the right, $T_2=T_1\oplus T_1$. Let
$G_1(0)={\mathbb Z}/2{\mathbb Z}\subset G_1$ and $G_2(0)={\mathbb Z}/2{\mathbb Z}\oplus{\mathbb Z}/2{\mathbb Z}\subset G_2$ be the
subgroups sitting at the 0th place. Set
$$
G^{(n)}_i=G_i(0)\oplus T_iG_i(0)\oplus\ldots\oplus T^n_iG_i(0).
$$
Then $\displaystyle
H(\alpha_T)\le hcpa_\tau(\alpha_T)\le\lim_{n\to\infty}{1\over n}\log{\rm rank}\,
C^*(G^{(n)}_1\times G^{(n)}_2,\omega)\le 3\log2$, so (for
$A=L^\infty(\hat G_1)$)
$$
H(\alpha_T|_A)=\log2\ \ \hbox{and}\ \ 2\log2\le H(\alpha_T)\le 3\log2.
$$
The actual value of $H(\alpha_T)$ is probably depends on the choice of the
character $\chi\in\hat G_2$. We want to show that $H(\alpha_T)=2\log2$ for
some special choice of $\chi$. For this it suffices to require the
pairing
$\langle\cdot\,,\,\cdot\rangle|_{G^{(n)}_1\times G^{(n)}_2}$
be non-degenerate
in the first variable for any $n\ge0$ (so that $C^*(G^{(n)}_2)$ is a maximal
abelian subalgebra of $C^*(G^{(n)}_1\times G^{(n)}_2,\omega)$, and the rank of
the latter algebra is equal to $4(n+1)$).
The embedding $G_1\hookrightarrow\hat G_2$ is given by
$$
g_1\mapsto\prod_{n\in{\mathbb Z}:g_1(n)\ne0}\hat T^n_2\chi, \ \
g_1=(g_1(n))_n\in\mathop{\oplus}_{n\in{\mathbb Z}}{\mathbb Z}/2{\mathbb Z}.
$$
So we must choose $\chi$ in a way such that the character
$\prod^m_{k=1}\hat T^{n_k}_2\chi$ is non-trivial on $G^{(n)}_2$ for any
$0\le n_1<\ldots< n_m\le n$. Identify $\hat G_2$ with
$\prod_{n\in{\mathbb Z}}({\mathbb Z}/2{\mathbb Z}\oplus{\mathbb Z}/2{\mathbb Z})$. Then $\hat T_2$ is the shift to the
right, and we may take any $\chi=(\chi_n)_n$ such that
(i) $\chi_n=0$ for $n<0$, $\chi_0\ne0$;
(ii) the group generated by $\hat T^n_2\chi$ is dense in $\hat G_2$.
\end{example}
Finally, we will show that it is possible to construct systems with positive
entropy, which have zero entropy on a Cartan subalgebra.
\begin{example} \label{4.3}
\rm Let $p$ be a prime number, $p\ne2$, $\hat G_1={\mathbb Z}_p$ (the group
of $p$-adic integers), $G_2=\cup_{n\in{\mathbb N}}2^{-n}{\mathbb Z}\subset\hat G_1$,
$\hat T_1$ and $T_2$ the automorphisms of multiplication by $2$.
The group $G_1$ is the inductive limit of the groups ${\mathbb Z}/p^n{\mathbb Z}$,
and $T_1$ acts on them as the automorphism of division by~$2$.
Hence $$
H(\alpha_T|_A)=\lim_{n\to\infty}H(\alpha_T|_{C^*({\mathbb Z}/p^n{\mathbb Z})})=0. $$
Since $G_2=R/(t-2)$, we have $h(\hat T_2)=\log2$, so
$H(\alpha_T)\ge\log2$. We state that $$
H(\alpha_T)=hcpa_\tau(\alpha_T)=\log2. $$ The automorphism
$T^{p^{n-1}(p-1)}_1$ is identical on ${\mathbb Z}/p^n{\mathbb Z}$. Since $$
W^*({\mathbb Z}/p^n{\mathbb Z}\times G_2,\omega)={\mathbb Z}/p^n{\mathbb Z}\ltimes L^\infty(\hat G_2),
$$ by Theorem \ref{2.2} we infer $$
hcpa_\tau(\alpha^{p^{n-1}(p-1)}_T|_{W^*({\mathbb Z}/p^n{\mathbb Z}\times
G_2,\omega)})
=h(\hat T^{p^{n-1}(p-1)}_2),
$$
whence $hcpa_\tau(\alpha_T|_{W^*({\mathbb Z}/p^n{\mathbb Z}\times G_2,\omega)})=\log2$, and
$$
hcpa_\tau(\alpha_T)=\lim_{n\to\infty}
hcpa_\tau(\alpha_T|_{W^*({\mathbb Z}/p^n{\mathbb Z}\times G_2,\omega)})=\log2.
$$
\end{example}
\section{Flows on II$_1$-factors with invariant Cartan subalgebras} \label{5}
Using examples of previous sections and the construction of
associated flow we will construct systems with arbitrary values of
$H(\alpha|_A)$ and $H(\alpha)$ ($0\le H(\alpha|_A)\le H(\alpha)\le\infty$).
Suppose a discrete amenable group $G$ acts freely and ergodically
by measure-preserving transformations $S_g$ on $(X,\mu)$, $T$ an
automorphism of $(X,\mu)$ and $\beta$ an automorphism of $G$ such
that $TS_g=S_{\beta(g)}T$. Consider the flow $F_t$ associated with
$T$. So $Y={\mathbb R}/{\mathbb Z}\times X$, $d\nu=dt\times d\mu$, $$
F_t(\dot{r},x)=(\dot{r}+\dot{t},T^{[r+t]}x)\ \ \hbox{for}\ \
r\in[0,1),\
x\in X,
$$
where $t\mapsto\dot{t}$ is the factorization mapping ${\mathbb R}\to{\mathbb R}/{\mathbb Z}$. The
semidirect product group
$G_0=G\times_\beta{\mathbb Z}$ acts on $(X,\mu)$. This action is ergodic. It is also
free, if
\begin{equation} \label{e5.1}
\hbox{there exist no }g\in G\hbox{ and no }n\in{\mathbb N}\hbox{ such that }S_g=T^n
\hbox{ on a set of positive measure.}
\end{equation}
Let ${\cal G}amma$ be a countable dense subgroup of ${\mathbb R}/{\mathbb Z}$, it acts by
translations on ${\mathbb R}/{\mathbb Z}$. Set ${\cal G}={\cal G}amma\times G_0$. The group ${\cal G}$
is amenable. It acts freely and ergodically on $(Y,\nu)$. The
corresponding equivalence relation is invariant under the flow, so
we obtain a flow $\alpha_t$ on $L^\infty(Y,\nu)\rtimes{\cal G}$. Compute
its entropy.
Let $\alpha_T$ be the automorphism of $L^\infty(X,\mu)\rtimes G$
defined by $T$. We state that
\begin{equation} \label{e5.3}
H(\alpha_t)=|t|H(\alpha_T),\ \ hcpa_\tau(\alpha_t)=|t|hcpa_\tau(\alpha_T),
\ \ \hbox{and}\ \ H(\alpha_t|_{L^\infty(Y,\nu)})=|t|h(T).
\end{equation}
Since $h(F_t)=|t|h(F_1)=|t|h({\rm id}\times T)$, the last equality in~(\ref{e5.3})
is evident. To prove the first two note that
$$
H(\alpha_t)=|t|H(\alpha_1)\ \ \hbox{and}\ \
hcpa_\tau(\alpha_t)=|t|hcpa_\tau(\alpha_1)
$$
(see \cite[Proposition 10.16]{OP} for the first equality, the
second is proved analogously). We have
$$
L^\infty(Y,\nu)\rtimes{\cal G}=(L^\infty({\mathbb R}/{\mathbb Z})\rtimes{\cal G}amma)\mathop{\otimes}\limits_{i=1}^{\infty}imes
(L^\infty(X,\mu)\rtimes G_0),\ \ \alpha_1={\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes\tilde\alpha_T,
$$ where $\tilde\alpha_T$ is the automorphism of
$L^\infty(X,\mu)\rtimes G_0$ defined by $T$. Since completely
positive approximation entropy is subadditive and monotone
\cite{V}, we have
$hcpa_\tau({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes\tilde\alpha_T)=hcpa_\tau(\tilde\alpha_T)$.
We have also $H({\rm id}\mathop{\otimes}\limits_{i=1}^{\infty}imes\tilde\alpha_T)=H(\tilde\alpha_T)$ by
Remark following the proof of Theorem~\ref{2.2}. Since $$
L^\infty(X,\mu)\rtimes
G_0=(L^\infty(X,\mu)\rtimes_SG)\rtimes_{\alpha_T}{\mathbb Z}, $$ we obtain
$hcpa_\tau(\tilde\alpha_T)=hcpa_\tau(\alpha_T)$ and
$H(\tilde\alpha_T)=H(\alpha_T)$ by virtue of Theorem~\ref{2.2}. So
$hcpa_\tau(\alpha_1)=hcpa_\tau(\alpha_T)$ and
$H(\alpha_1)=H(\alpha_T)$,
and the proof of the equalities~(\ref{e5.3}) is complete.
\begin{theorem} \label{5.1}
For any $s$ and $t$, $0\le s< t\le\infty$, there exist an automorphism
$\alpha$ of the hyperfinite II$_1$-factor and an $\alpha$-invariant Cartan
subalgebra~$A$ such that
$$
H(\alpha|_A)=s\ \ \hbox{and} \ \ H(\alpha)=t.
$$
\end{theorem}
\begin{verif}{}{}
Consider a system from Example~\ref{4.3}. Then the condition
(\ref{e5.1}) is satisfied, so the construction above leads to a
flow $\alpha_t$ and an $\alpha_t$-invariant Cartan subalgebra
$A_1$ such that $$ H(\alpha_t|_{A_1})=0 \ \ \hbox{and}\ \
H(\alpha_t)=hcpa_\tau(\alpha_t)
=|t|\log2.
$$
As in Example~\ref{3.3}, consider a Bernoulli flow $S_t$ on $(X,\mu)$ with
$h(S_1)=\log2$. Then for the corresponding flow $\beta_t$ on
$L^\infty(X,\mu)\rtimes_{S_1}{\mathbb Z}$ we have (with $A_2=L^\infty(X,\mu)$)
$$
H(\beta_t|_{A_2})=H(\beta_t)=hcpa_\tau(\beta_t)=|t|\log2.
$$
Since Connes-St{\o}rmer' entropy is superadditive~\cite{SV} and Voiculescu's
entropies are subadditive, we conclude that
$$
H((\alpha_t\mathop{\otimes}\limits_{i=1}^{\infty}imes\beta_s)|_{A_1\mathop{\otimes}\limits_{i=1}^{\infty}imes A_2})=|s|\log2,\ \
H(\alpha_t\mathop{\otimes}\limits_{i=1}^{\infty}imes\beta_s)=H(\alpha_t)+H(\beta_s)=(|t|+|s|)\log2.
$$
Finally, consider an infinite tensor product of systems from
Example~\ref{4.3}. Thus we obtain an automorphism $\gamma$ and an
$\alpha$-invariant Cartan subalgebra~$A_3$ such that
$$
H(\gamma|_{A_3})=0\ \ \hbox{and}\ \ H(\gamma)=\infty.
$$
Then $H(\beta_s\mathop{\otimes}\limits_{i=1}^{\infty}imes\gamma)|_{A_2\mathop{\otimes}\limits_{i=1}^{\infty}imes A_3})=|s|\log2$,
$H(\beta_s\mathop{\otimes}\limits_{i=1}^{\infty}imes\gamma)=\infty$.
\end{verif}
\section{Final remarks} \label{6}
\subsection
Let $p_1$ and $p_2$ be prime numbers, $p_i\ge3$,
$i=1,2$. Construct automorphisms $\alpha_1$ and $\alpha_2$ as in
Example~\ref{4.3}.
\begin{proposition} If $p_1\ne p_2$, then $\alpha_1$ and $\alpha_2$ are not
conjugate as automorphisms of the hyperfinite II$_1$-factor,
though $H(\alpha_1)=H(\alpha_2)=\log 2$.
\end{proposition}
\begin{verif}{}
Indeed, the automorphisms define
unitary operators $U_i$ on $L^2(M, \tau)$.
As we can see, the point part $S_i$ of the
spectrum of $U_i$ is non-trivial. If $p_1\ne p_2$, then
$S_1\ne S_2$, so $\alpha_1$ and $\alpha_2$ are not
conjugate.
\end{verif}
\subsection
The automorphisms of Theorem~\ref{4.1} and Example~\ref{4.2} are
ergodic. On the other hand, the automorphisms of Example~\ref{4.3}
are not ergodic, even on the Cartan subalgebra. Moreover, any
ergodic automorphism of compact abelian group has positive entropy
(it is even Bernoullian), so with the methods of Section~\ref{4}
we can not construct ergodic automorphisms with positive entropy
and zero entropy restriction to a Cartan subalgebra (however, for
actions of ${\mathbb Z}^d$, $d\ge2$, we are able to construct such
examples).
The construction of Section~\ref{5} leads to non-ergodic automorphisms also,
even if we start with an ergodic automorphism (such as in Example~\ref{4.2}).
{\bf Acknowledgement.} The first author (V.G.) is grateful to Erling
St{\o}rmer for interesting and helpful discussions of the first version
of this paper.
\end{document} |
\begin{document}
\title{A new construction for planar Tur\'an number of cycle}
\begin{abstract}
The planar Tur\'an number $\ex_{\mathcal{P}}(n,C_k)$ is the largest number of edges in an $n$-vertex planar graph with no cycle of length $k$. Let $k\ge 11$ and $C,D$ be constants. Cranston, Lidick\'y, Liu and Shantanam \cite{2021Planar}, and independently Lan and Song \cite{LanSong} showed that $\ex_{\mathcal{P}}(n,C_k)\ge 3n-6-\frac{Cn}{k}$ for large $n$. Moreover, Cranston et al. conjectured that $\ex_{\mathcal{P}}(n,C_k)\le 3n-6-\frac{Dn}{k^{lg_23}}$ when $n$ is large.
In this note, we prove that $\ex_{\mathcal{P}}(n,C_k)\ge 3n-6-\frac{12n}{k^{lg_23}}$ for every $k$. It implies Cranston et al.'s conjecture is essentially best possible.
\end{abstract}
\section{Introduction}
The Tur\'an number $\ex(n, H)$ for a graph $H$ is the maximum number of edges in an $n$-vertex graph with no copy of $H$ as a subgraph. The first result on this topic is obtained by Tur\'an \cite{Turan}, who proved that the balanced complete $r$-partite graph is the unique extremal graph of $\ex(n,K_{r+1})$. The Erd\H{o}s-Stone-Simonovits \cite{Erdos1966ALT,erdos1946structure} Theorem then generalized this result and asymptotically determines $\ex(n,H)$ for all nonbipartite graphs $H$: $\ex(n,H)=(1-\frac{1}{\chi(H)-1})\binom{n}{2}+o(n^2)$.
In 2016, Dowden \cite{Dowden} initiated the study of planar Tur\'an problems $\ex_{\mathcal{P}}(n,H)$, i.e., how many edges can a planar $n$-vertex graph have, without containing a copy of $H$?
He proved that $\ex_{\mathcal{P}}(n,C_4)\le\frac{15(n-2)}{7}$ for all $n\ge 4$ and $\ex_{\mathcal{P}}(n,C_5)\le \frac{12n-33}{5}$ for all $n\ge 11$ \cite{Dowden}. For $k\in \{4,5\}$, let $\Theta_k$ denote the graph obtained from $C_k$ by adding a chord. Lan, Shi and Song \cite{2017Extremal} showed that $\ex_{\mathcal{P}}(n,\Theta_4)\le\frac{15(n-2)}{5}$ for all $n\ge 4$, $\ex_{\mathcal{P}}(n,\Theta_5)\le\frac{5(n-2)}{2}$ for all $n\ge 5$ and $\ex_{\mathcal{P}}(n,C_6)\le\frac{18(n-2)}{7}$. The bounds for $\ex_{\mathcal{P}}(n,\Theta_4)$ and $\ex_{\mathcal{P}}(n,\Theta_5)$ are tight infinitely often. However, the upper bound for $\ex_{\mathcal{P}}(n,C_6)$ was improved by Ghosh, Gy\H{o}ri, Martin, Paulos and Xiao \cite{6cycle}. They proved $\ex_{\mathcal{P}}(n,C_6)\le \frac{5n-14}{2}$ for all $n\ge 18$. In the same paper, Ghosh et al. conjectured an upper bound for $\ex_{\mathcal{P}}(n,C_k)$ for each $k\ge 7$ and large $n$.
\begin{conjecture}(\cite{6cycle})\label{Conj1}
For each $k\ge 7$ and sufficiently large $n$,
\[\ex_{\mathcal{P}}(n,C_k)\le 3n-6-\frac{3n+6}{k}.\]
\end{conjecture}
Rencently, Conjecture 1 was disproved by Cranston, Lidick\'y, Liu and Shantanam \cite{2021Planar}, independently by Lan and Song \cite{LanSong} for $k\ge 11$ and sufficiently large $n$. Both of their constructions imply the following theorem.
\begin{theorem}(\cite{2021Planar}, \cite{LanSong})
Let $k\ge 11$ and $n$ be a sufficiently large integer. There is a constant $\epsilon=\epsilon(k)$ such that
\[\ex_{\mathcal{P}}(n,C_k)\ge 3n-6-\frac{3-\epsilon}{\frac{3}{2}k}n.\]
\end{theorem}
Furthermore, Cranston, Lidick\'y, Liu and Shantanam proposed a revised conjecture.
\begin{conjecture}(\cite{2021Planar})\label{Conj2}
There exists a constant $D$ such that for all $k$ and all sufficiently large $n$, we have
\[\ex_{\mathcal{P}}(n,C_k)\le 3n-6-\frac{Dn}{k^{\lg_23}}.\]
\end{conjecture}
In this note, we give a new construction for the lower bound of $\ex_{\mathcal{P}}(n,C_k)$ and obtain the following theorem.
\begin{theorem}\label{thm2}
For all $k$ and all sufficiently large $n$, we have
\[\ex_{\mathcal{P}}(n,C_k)\ge 3n-6-\frac{12n}{k^{lg_23}}.\]
\end{theorem}
This implies Conjecture \ref{Conj2} is essentially best possible.
\section{Our construction}
In this section, we show our construction and prove Theorem \ref{thm2}. We first define a sequence of planar graphs $\{T_i\}$ and use them as a based block in our construction. This sequence was first introduced by Moon and Moser \cite{1963Simple}.
Let $T_1$ be a copy of $K_4$ and $xyz$ be the outer cycle. Suppose $T_{i-1}$ is defined for some $i\ge 2$. Let $T_i$ be the graph obtained from $T_{i-1}$ as follows: in each inner face of $T_{i-1}$, add a new vertex and join the new vertex to the three vertices incident with that face.
By the above construction, each $T_i$ is a triangulation, i.e., a planar graph such that each face is a triangle. The outer cycle of $T_i$ is $xyz$ and the order of $T_i$ is
\[|V(T_i)|=4+3+3^2+\cdots+3^{i-1}=\frac{3^i+5}{2}.\]
Furthermore, Chen and Yu \cite{ChenGuantao2002Long} proved the following properties of $T_i$.
\begin{lemma}(\cite{ChenGuantao2002Long})\label{Chen-Yu}
\begin{enumerate}
\item The length of the longest path from $x$ to $y$ in $T_i$ is $3\cdot 2^{i-1}$.
\item The length of the longest cycle in $T_i$ is $7\cdot 2^{i-2}$.
\end{enumerate}
\end{lemma}
\vskip 2mm
After defining the sequence $\{T_i\}$, we show our construction. Let $i$ be the maximum integer such that $3\cdot 2^{i-1}\le \frac{k}{2}$, i.e., $i=\lfloor{\lg_2\frac{k}{3}}\rfloor$.
Then
\begin{equation}\label{eq1}
|V(T_i)|=\frac{3^{i}+5}{2}\ge \frac{k^{\lg_23}}{12}.
\end{equation}
Now let $s=\lceil\frac{n-2}{(3^i+5)/2-2} \rceil$. By equation \ref{eq1}, we have
\begin{equation}
s\le \frac{12n}{k^{\lg_23}}.
\end{equation}
Let $H_1,\ldots,H_{s-1}$ be $s-1$ copies of $T_i$ and $H_{s}$ be a copy of triangulation of order $n-(s-1)(\frac{3^i+5}{2}-2)$ which is a subgraph of $T_i$. Such a graph $H_s$ exists because of the process of the construction of $T_i$. For each $1\le j\le s$, we may assume the outer cycle of $H_j$ is $x_jy_jz_j$ and $x_jy_jw_j$ is the other triangle face containing $x_jy_j$. Let $H_j^-$ be the subgraph obtained from $H_j$ by deleting the edge $x_jy_j$. Then each face of $H_j^-$ is a triangle except the outer face whose boundary is the cycle $x_jw_jy_jz_j$. We identify all $x_j$ of $H_j^-$ into a single vertex $x$ and all $y_j$ of $H_j^-$ into $y$ and add a new edge $xy$. Say the resulting graph $H$, see Figure 1.
\begin{picture}(140,180)(130,-30)
\centering
{\includegraphics[width=8in]{H.pdf}}
\put(-265,190){\makebox(3,3){$x$}}
\put(-265,55){\makebox(3,3){$y$}}
\put(-385,125){\makebox(3,3){$H_1^-$}}
\put(-430,125){\makebox(3,3){$z_1$}}
\put(-345,125){\makebox(3,3){$w_1$}}
\put(-270,125){\makebox(3,3){$H_j^-$}}
\put(-315,125){\makebox(3,3){$z_j$}}
\put(-235,125){\makebox(3,3){$w_j$}}
\put(-205,125){\makebox(3,3){$z_s$}}
\put(-125,125){\makebox(3,3){$w_s$}}
\put(-165,125){\makebox(3,3){$H_s^-$}}
\put(-290,20){\makebox(3,3){Figure 1: H}}
\end{picture}
We call each $H_j^-+xy$ a block of $H$. We now show that $H$ contains no $C_k$. Let $C$ be a longest cycle in $H$. Note that each block is a copy of $T_i$ except the last block $H_{s}^-+xy$, which is a subgraph of $T_i$. Since $\{x,y\}$ is a vertex-cut of $H$, $C$ passes through at most two blocks. If $C$ passes only one block, then $V(C)\le 7\cdot2^{\lg_2\frac{k}{3}-2}=\frac{7k}{12}$ by $2$ of Lemma \ref{Chen-Yu}. If $C$ passes through two blocks, then $x,y\in V(C)$ and $V(C)\le k-2$ by $1$ of Lemma \ref{Chen-Yu} and the choice of $i$. Hence, $H$ contains no $C_k$.
On the other hand, the order of $H$ is
\[(s-1)(\frac{3^i+5}{2}-2)+2+(n-2)-(s-1)(\frac{3^i+5}{2}-2)=n.\]
If we add the edges $w_1z_2,~w_2z_3,\ldots, w_{s-1}z_{s}$, then $H$ becomes a triangulation. Hence,
$e(H)=3n-6-(s-1)\ge 3n-6-\frac{12n}{k^{\lg_23}}$. We are done.$
\blacksquare$
\end{document} |
\betaegin{document}
\deltaate{\varepsilonmpty}
\tauitle{Corners and fundamental corners for the groups $\mr{Spin}(n,1)$}
\alphauthor{Domagoj Kova\v cevi\' c and Hrvoje Kraljevi\' c, University of Zagreb
\varthetaanks{The authors were supported by the QuantiXLie Centre of Excellence, a project cofinanced by the Croatian Government and European Union through the European Regional Development Fund - the Competitiveness and Cohesion Operational Programme (Grant KK.01.1.1.01.0004).}
\varthetaanks{2010 Mathematics Subject Classification: Primary 20G05, Secondary 16S30}}
\title{Corners and fundamental corners for the groups $\Spin(n,1)$}
\noindent\underbracelinederline{Abstract.} We study corners and fundamental corners of the irreducible representations of the groups $G=\mr{Spin}(n,1)$ that are not elementary, i.e. that are equivalent to subquotients of reducible nonunitary principal seres representations. For even $n$ we obtain results in a way analogous to the results in [10] for the groups $\mr{SU}(n,1).$ Especially, we again get a bijection between the nonelementary part ${\hat G}^0$ of the unitary dual $\hat G$ and the unitary dual $\hat K.$ In the case of odd $n$ we get a bijection between ${\hat G}^0$ and a true subset of $\hat K.$
\sigmaection{Introduction}
\inftynd{\betaf 1. Elementary representations.} Let $G$ be a connected semisimple Lie group with finite center, $\mathfrak{g}_0$ its Lie algebra, $K$ its maximal compact subgroup, and $\mathfrak{g}_0=\mathfrak{k}_0\omegaplus\mathfrak{p}_0$ the corresponding Cartan decomposition of $\mathfrak{g}_0.$ Let\leftftarrowmbdab $P=MAN$ be a minimal parabolic subgroup of $G;$ here the Lie algebra $\mathfrak{a}_0$ of the subgroup $A$ is a Cartan subspace of $\mathfrak{p}_0,$ i.e. a Lie subalgebra of $\mathfrak{g}_0$ which is maximal among those contained in $\mathfrak{p}_0,$ $M=K\cap P$ is the centralizer of $\mathfrak{a}_0$ in $K,$ its Lie algebra $\mathfrak{m}_0$ is the centralizer of $\mathfrak{a}_0$ in $\mathfrak{k}_0,$ $N=\varepsilonxp(\mathfrak{n}_0),$ where $\mathfrak{n}_0$ is the sum of root subspaces $\mathfrak{g}_0^{\alpha}$ with respect to some choice $\Delta^+(\mathfrak{g}_0,\mathfrak{a}_0)$ of positive restricted roots of the pair $(\mathfrak{g}_0,\mathfrak{a}_0).$ Denote by $\Delta_P$ the modular function of the group $P.$ Then $\Delta_P(m)=1$ for every $m\inftyn M,$ $\Delta_P(n)=1$ for every $n\inftyn N$ and for $H\inftyn\mathfrak{a}_0$ we have
$$
\Delta_P(\varepsilonxp\,H)=\mathrm{e}^{\mr{Tr}(\alphad\,H)|\mathfrak{n}_0}=\mathrm{e}^{2\delta(H)},\quad\delta=\varphir{1}{2}\sigmaum_{\alpha\inftyn\Delta^+(\mathfrak{g}_0,\mathfrak{a}_0)}(\deltaim\,\mathfrak{g}_0^{\alpha})\alpha.
$$
Thus,
$$
\Delta_P(man)=\mathrm{e}^{2\delta(\leftftarrowmbdaog\,a)},\quad m\inftyn M,\,a\inftyn A,\,n\inftyn N,
$$
where $\leftftarrowmbdaog:A\leftftarrowmbdara\mathfrak{a}_0$ is the inverse map of the bijection $\varepsilonxp|\mathfrak{a}_0:\mathfrak{a}_0\leftftarrowmbdara A.$ Let $\sigma$ be an irreducible unitary representation of the compact group $M$ on a finitedimensional unitary space ${\cal H}_{\sigma}.$ Let $\mathfrak{a}$ be the complexification of $\mathfrak{a}_0.$ For $\nu\inftyn\mathfrak{a}^*$ let $a\mapsto a^{\nu}$ be the onedimensional representation of the Abelian group $A$ defined by
$$
a^{\nu}=\mathrm{e}^{\nu(\leftftarrowmbdaog\,a)},\quad a\inftyn A.
$$
Define the representation $\sigma\omegatim\nu$ of the group $P=MAN$ on the space ${\cal H}_{\sigma}$ by
$$
(\sigma\omegatim\nu)(man)=a^{\nu}\sigma(m),\quad m\inftyn M,\,a\inftyn A,\,n\inftyn N.
$$
Let $\pi^{\sigma,\nu}$ be the representation of $G$ induced by the representation $\sigma\omegatim\nu.$ The space of the representation $\pi^{\sigma,\nu}$ is the Hilbert space ${\cal H}^{\sigma,\nu}$ of all (classes of) Haar$-$measurable functions $f:G\leftftarrowmbdara{\cal H}_{\sigma}$ such that
$$
f(px)=\sigmaqrt{\Delta_P(p)}(\sigma\omegatim\nu)(p)f(x)\quad\varphia p\inftyn P,\,\,\varphia x\inftyn G,
$$
and such that
$$
\inftynt_K\|f(k)\|_{{\cal H}_{\sigma}}^2\mathrm{d}\mu(k)<+\inftynfty,
$$
where $\mu$ is the normed Haar measure on $K$ and $\|\,\cdot\,\|_{{\cal H}_{\sigma}}$ is the norm on the unitary space ${\cal H}_{\sigma}.$ The representation $\pi^{\sigma,\nu}$ is given by the right action of $G:$
$$
\leftftarrowmbdae[\pi^{\sigma,\nu}(x)f\rhoi](y)=f(yx),\quad f\inftyn{\cal H}^{\sigma,\nu},\,x,y\inftyn G.
$$
The representations $\pi^{\sigma,\nu},$ $\sigma\inftyn\hat{M},$ $\nu\inftyn\mathfrak{a}^*,$ are called {\betaf elementary representations} of $G.$\\
\inftynd Since $\Delta_P(man)=a^{2\delta},$ the condition $f(px)=\sigmaqrt{\Delta_P(p)}(\sigma\omegatim\nu)(p)f(x)$ can be written as
$$
f(manx)=a^{\nu+\delta}\sigma(m)f(x),\quad m\inftyn M,\,a\inftyn A,\,n\inftyn N,\,x\inftyn G.
$$
\inftynd From classical results of Harish$-$Chandra we know that all elementary representations are admissible and of finite length and that every completely irreducible admissible representation of $G$ on a Banach space is infinitesimally equivalent to an irreducible subquotient of an elementary representation. Infinitesimal equivalence of completely irreducible admissible representations means algebraic equivalence of the corresponding $(\mathfrak{g},K)-$modules. We will denote by $\wideparen{G}$ the set of all infinitesimal equivalence classes of completely irreducible admissible representations of $G$ on Banach spaces. $\wideparen{G}^e$ will denote the set of infinitesimal equivalence classes of irreducible elementary representations and $\wideparen{G}^0=\wideparen{G}\sigmam\wideparen{G}^e$ the set of infinitesimal equivalence classes of irreducible suquotients of reducible elementary representations. It is also due to Harish$-$Chandra that every irreducible unitary representation is admissible and that infinitesimal equivalence between such representations is equivalent to their unitary equivalence. Thus the unitary dual $\hat{G}$ of $G$ can be regarded as a subset of $\wideparen{G}.$ We denote $\hat{G}^e=\hat{G}\cap\wideparen{G}^e$ and $\hat{G}^0=\hat{G}\cap\wideparen{G}^0=\hat{G}\sigmam\hat{G}^e.$\\
\inftynd{\betaf 2. Infinitesimal characters.} For a finitedimensional complex Lie algebra $\mathfrak{g}$ we denote by ${\cal U}(\mathfrak{g})$ the universal enveloping algebra of $\mathfrak{g}$ and by $\mathbb{Z}Z(\mathfrak{g})$ the center of ${\cal U}(\mathfrak{g}).$ Any unital homomorphism $\chi:\mathbb{Z}Z(\mathfrak{g})\leftftarrowmbdara\mathbb{C}$ is called {\betaf infinitesimal character} of $\mathfrak{g}.$ We denote by $\hat{\mathbb{Z}Z}(\mathfrak{g})$ the set of all infinitesimal characters of $\mathfrak{g}.$ If $\pi$ is a representation of $\mathfrak{g}$ on a vector space $V$ we say that $\chi\inftyn\hat{\mathbb{Z}Z}(\mathfrak{g})$ is the infinitesimal character of the representation $\pi$ (or of the corresponding ${\cal U}(\mathfrak{g})-$module $V)$ if
$$
\pi(z)v=\chi(z)v\mathfrak{q}u\varphia z\inftyn\mathbb{Z}Z(\mathfrak{g}),\,\,\varphia v\inftyn V.
$$
Let now $\mathfrak{g}$ be semisimple and let $\mathfrak{h}$ be its Cartan subalgebra. Denote by $\Delta=\Delta(\mathfrak{g},\mathfrak{h})\sigmaub\mathfrak{h}^*$ the root system of the pair $(\mathfrak{g},\mathfrak{h}),$ by $W=W(\mathfrak{g},\mathfrak{h})$ its Weyl group, by $\Delta^+$ a choice of positive roots in $\Delta,$ by $\mathfrak{g}^{\alpha}$ the root subspace of $\mathfrak{g}$ for a root $\alpha\inftyn\Delta,$ and
$$
\mathfrak{n}=\sigmaum_{\alpha\inftyn\Delta^+}\deltaotplus\,\mathfrak{g}^{\alpha}\mathfrak{q}u\mathrm{and}\mathfrak{q}u\omegav{\mathfrak{n}}=\sigmaum_{\alpha\inftyn\Delta^+}\deltaotplus\,\mathfrak{g}^{-\alpha}.
$$
Then we have direct sum decomposition
$$
{\cal U}(\mathfrak{g})={\cal U}(\mathfrak{h})\,\deltaotplus\,(\mathfrak{n}\,{\cal U}(\mathfrak{g})+{\cal U}(\mathfrak{g})\,\omegav{\mathfrak{n}}).
$$
Denote by $\varepsilonta:{\cal U}(\mathfrak{g})\leftftarrowmbdara{\cal U}(\mathfrak{h})$ the corresponding projection. By a result of Harish$-$Chandra the restriction $\varepsilonta|\mathbb{Z}Z(\mathfrak{g})$ is an injective homomorphism of $\mathbb{Z}Z(\mathfrak{g})$ into the algebra ${\cal U}(\mathfrak{h}).$ Since the Lie algebra $\mathfrak{h}$ is Abelian, the algebra ${\cal U}(\mathfrak{h})$ identifies with the symmetric algebra ${\cal S}(\mathfrak{h})$ over $\mathfrak{h},$ thus with the polynomial algebra ${\cal P}(\mathfrak{h}^*)$ over the dual space $\mathfrak{h}^*$ of $\mathfrak{h}.$ Therefore $\varepsilonta|\mathbb{Z}Z(\mathfrak{g})$ is a monomorphism of $\mathbb{Z}Z(\mathfrak{g})$ into ${\cal P}(\mathfrak{h}^*).$ This monomorphism depends on the choice of $\Delta^+.$ This dependence is repared by the automorphism $\gamma=\gamma_{\Delta^+}$ of the algebra $U(\mathfrak{h})={\cal P}(\mathfrak{h}^*)$ defined by
$$
(\gamma(u))(\leftftarrowmbda)=u(\leftftarrowmbda-\rho),\,\,\leftftarrowmbda\inftyn\mathfrak{h}^*,\,\,u\inftyn{\cal U}(\mathfrak{h})={\cal P}(\mathfrak{h}^*),\,\,\mathrm{where}\,\,\rho=\rho_{\Delta^+}=\varphir{1}{2}\sigmaum_{\alpha\inftyn\Delta^+}\alpha.
$$
Now the restriction $\omega=(\gamma\circ\varepsilonta)|\mathbb{Z}Z(\mathfrak{g})$ is independent on the choice of $\Delta^+$ and is a unital isomorphism of the algebra $\mathbb{Z}Z(\mathfrak{g})$ onto the algebra ${\cal P}(\mathfrak{h}^*)^W$ of polynomial functions on $\mathfrak{h}^*$ invariant under the Weyl group $W=W(\mathfrak{g},\mathfrak{h})$ of the root system $\Delta=\Delta(\mathfrak{g},\mathfrak{h}).$ $\omega$ is called the {\betaf Harish$-$Chandra isomorphism.} By evaluation at the points of $\mathfrak{h}^*$ one obtains all infinitesimal characters: for $\leftftarrowmbda\inftyn\mathfrak{h}^*$ we define infinitesimal character $\chi_{\leftftarrowmbda}\inftyn\hat{\mathbb{Z}Z}(\mathfrak{g})$ by
$$
\chi_{\leftftarrowmbda}(z)=(\omega(z))(\leftftarrowmbda)=(\varepsilonta(z))(\leftftarrowmbda-\rho),\mathfrak{q}u z\inftyn\mathbb{Z}Z(\mathfrak{g}).
$$
Then $\leftftarrowmbda\mapsto\chi_{\leftftarrowmbda}$ is a surjection of $\mathfrak{h}^*$ onto $\hat{\mathbb{Z}Z}(\mathfrak{g})$ and for $\leftftarrowmbda,\mu\inftyn\mathfrak{h}^*$ we have $\chi_{\leftftarrowmbda}=\chi_{\mu}$ if and only if $\mu=w\leftftarrowmbda$ for some $w\inftyn W.$\\
\inftynd A choice of an ordered basis $(H_1,\leftftarrowmbdadots,H_{\varepsilonll})$ of $\mathfrak{h}$ identifies the dual space $\mathfrak{h}^*$ with $\mathbb{C}^{\varepsilonll}:$ $\leftftarrowmbda\inftyn\mathfrak{h}^*$ identifies with the $\varepsilonll-$tuple $(\leftftarrowmbda(H_1),\leftftarrowmbdadots,\leftftarrowmbda(H_{\varepsilonll}))\inftyn\mathbb{C}^{\varepsilonll}.$ Now, if $\mathfrak{h}^{\prime}$ is another Cartan subalgebra of $\mathfrak{g},$ then there exists an inner automorphism $\varphi$ of $\mathfrak{g}$ such that $\mathfrak{h}^{\prime}=\varphi(\mathfrak{h}).$ $\varphi$ carries $(H_1,\leftftarrowmbdadots,H_{\varepsilonll})$ to a basis $(H_1^{\prime},\leftftarrowmbdadots,H_{\varepsilonll}^{\prime})$ of $\mathfrak{h}^{\prime}$ which we use for the identification of ${\mathfrak{h}^{\prime}}^*$ with $\mathbb{C}^{\varepsilonll}.$ If an $\varepsilonll-$tuple $(c_1,\leftftarrowmbdadots,c_{\varepsilonll})\inftyn\mathbb{C}^{\varepsilonll}$ corresponds to $\leftftarrowmbda\inftyn\mathfrak{h}^*$ and to $\leftftarrowmbda^{\prime}\inftyn{\mathfrak{h}^{\prime}}^*$ then the corresponding infinitesimal characters are the same: $\chi_{\leftftarrowmbda}=\chi_{\leftftarrowmbda^{\prime}}.$\\
\inftynd We return now to the notations of {\betaf 1.} If $\mathfrak{l}_0$ is any real Lie algebra (or its subspace) we will denote by $\mathfrak{l}$ its complexification. It is well known that an elementary representation has infinitesimal character. We are going to write down the formula for the infinitesimal character of the elementary representation $\pi^{\sigma,\nu},$ $\sigma\inftyn\hat{M},$ $\nu\inftyn\mathfrak{a}^*.$ Let $\mathfrak{d}_0$ be a Cartan subalgebra of the reductive Lie subalgebra $\mathfrak{m}_0.$ Denote by $\Delta_{\mathfrak{m}}=\Delta(\mathfrak{m},\mathfrak{d})\sigmaub\mathfrak{d}^*$ the root system of the pair $(\mathfrak{m},\mathfrak{d}).$ Choose a subset $\Delta_{\mathfrak{m}}^+$ of positive roots in $\Delta_{\mathfrak{m}}$ and set
$$
\delta_{\mathfrak{m}}=\rho_{\Delta_{\mathfrak{m}}^+}=\varphir{1}{2}\sigmaum_{\alpha\inftyn\Delta_{\mathfrak{m}}^+}\alpha.
$$
Denote by $\leftftarrowmbda_{\sigma}\inftyn\mathfrak{d}^*$ the highest weight of the representation $\sigma$ with respect to $\Delta_{\mathfrak{m}}^+.$ Now, $\mathfrak{h}_0=\mathfrak{d}_0\deltaotplus\mathfrak{a}_0$ is a Cartan subalgebra of $\mathfrak{g}_0$ and its complexification $\mathfrak{h}=\mathfrak{d}\deltaotplus\mathfrak{a}$ is a Cartan subalgebra of $\mathfrak{g}.$ Then the infinitesimal character of the elementary representation $\pi^{\sigma,\nu}$ is $\chi_{\Lambda(\sigma,\nu)},$ where $\Lambda(\sigma,\nu)\inftyn\mathfrak{h}^*$ is given by
$$
\Lambda(\sigma,\nu)|\mathfrak{d}=\leftftarrowmbda_{\sigma}+\delta_{\mathfrak{m}}\mathfrak{q}u\mathrm{and}\mathfrak{q}u\Lambda(\sigma,\nu)|\mathfrak{a}=\nu.
$$
\inftynd{\betaf 3. Corners and fundamental corners} Suppose now that the rank of $\mathfrak{g}$ is equal to the rank of $\mathfrak{k}.$ Choose a Cartan subalgebra $\mathfrak{t}_0$ of $\mathfrak{k}_0.$ It is then also Cartan subalgebra of $\mathfrak{g}_0$ and the complexification $\mathfrak{t}$ is Cartan subalgebra of the complexifications $\mathfrak{k}$ and $\mathfrak{g}.$ Let $\Delta_K=\Delta(\mathfrak{k},\mathfrak{t})\sigmaub\Delta=\Delta(\mathfrak{g},\mathfrak{t})$ be the root systems of the pairs $(\mathfrak{k},\mathfrak{t})$ and $(\mathfrak{g},\mathfrak{t})$ and $W_K=W(\mathfrak{k},\mathfrak{t})\sigmaub W=W(\mathfrak{g},\mathfrak{t})$ the corresponding Weyl groups. Choose positive roots $\Delta_K^+$ in $\Delta_K$ and let $C$ be the corresponding $W_K-$Weyl chamber in $\mathfrak{t}_{\mathbb{R}}^*=i\mathfrak{t}_0^*.$ Denote by ${\cal D}$ the set of all $W-$Weyl chambers in $i\mathfrak{t}_0^*$ contained in $C.$ For $D\inftyn{\cal D}$ we denote by $\Delta^D$ the corresponding positive roots in $\Delta$ and let $\Delta_P^D$ be the noncompact roots in $\Delta^D,$ i.e. $\Delta_P^D=\Delta^D\sigmam\Delta_K^+.$ Set
$$
\rho_K=\varphir{1}{2}\sigmaum_{\alpha\inftyn\Delta_K^+}\alpha\mathfrak{q}u\mathrm{and}\mathfrak{q}u\rho_P^D=\varphir{1}{2}\sigmaum_{\alpha\inftyn\Delta_P^D}\alpha.
$$
\inftynd Recall some definitions from [10]. For a representation $\pi$ of $G$ and for $q\inftyn\hat{K}$ we denote by $(\pi:q)$ the multiplicity of $q$ in $\pi|K.$ The $K-${\betaf spectrum} $\Gamma(\pi)$ of a representation $\pi$ of $G$ is defined by
$$
\Gamma(\pi)=\{q\inftyn\hat{K};\,\,(\pi:q)>0\}.
$$
We identify $q\inftyn\hat{K}$ with its maximal weight in $i\mathfrak{t}_0^*$ with respect to $\Delta_K^+.$ For $q\inftyn\Gamma(\pi)$ and for $D\inftyn{\cal D}$ we say:
\betaeg{\varepsilonn}
\inftytem[$(i)$] $q$ is a $D-${\betaf corner} for $\pi$ if $q-\alpha\not\inftyn\Gamma(\pi)$ $\varphia\alpha\inftyn\Delta_P^D;$
\inftytem[$(ii)$] $q$ is a $D-${\betaf fundamental corner} for $\pi$ if it is a $D-$corner for $\pi$ and $\chi_{q+\rho_K-\rho_P^D}$ is the infinitesimal character of $\pi;$
\inftytem[$(iii)$] $q$ is {\betaf fundamental corner} for $\pi$ if it is a $D-$fundamental corner for $\pi$ for some $D\inftyn{\cal D}.$
\varepsilonnd{\varepsilonn}
\inftynd In [10] for the case of the groups $G=SU(n,1)$ and $K=U(n)$ the following results were proved:
\betaeg{\varepsilonn}
\inftytem[{\betaf 1.}] Elementary representation $\pi^{\sigma,\nu}$ is reducible if and only if there exist $q\inftyn\Gamma(\pi^{\sigma,\nu})$ and $D\inftyn{\cal D}$ such that $\chi_{q+\rho_K-\rho_P^D}$ is the infinitesimal character of $\pi^{\sigma,\nu},$ i.e. if and only if $\Lambda(\sigma,\nu)=w(q+\rho_K-\rho_P^D)$ for some $w\inftyn W.$
\inftytem[{\betaf 2.}] Every $\pi\inftyn\wideparen{G}^0$ has either one or two fundamental corners.
\inftytem[{\betaf 3.}] $\hat{G}^0=\{\pi\inftyn\wideparen{G}^0;\,\,\pi\,\,\mathrm{has}\,\,\mathrm{exactly}\,\,\mathrm{one}\,\,\mathrm{fundamental}\,\,\mathrm{corner}\}.$
\inftytem[{\betaf 4.}] For $\pi\inftyn\hat{G}^0$ denote by $q(\pi)$ the unique fundamental corner of $\pi.$ Then $\pi\mapsto q(\pi)$ is a bijection of $\hat{G}^0$ onto $\hat{K}.$
\varepsilonnd{\varepsilonn}
\inftynd In this paper we investigate the analogous notions and results for the groups $\mr{Spin}(n,1).$
\sigmaection{The groups $\mr{Spin}(n,1)$}
\inftynd In the rest of the paper $G=\mr{Spin}(n,1),$ $n\gammaeq3,$ is the connected and simply connected real Lie group with simple real Lie algebra
$$
\mathfrak{g}_0=\mathfrak{s}\mathfrak{o}(n,1)=\{A\inftyn\mathfrak{g}\mathfrak{l}(n+1,\mathbb{R});\,\,A^t=-\Gamma A\Gamma\},\mathfrak{q}u\Gamma=\leftftarrowmbdae[\betaeg{array}{cc}I_n&0\\0&-1\varepsilonnd{array}\rhoi],
$$
i.e.
$$
\mathfrak{g}_0=\leftftarrowmbdae\{\leftftarrowmbdae[\betaeg{array}{cc}B&a\\alpha^t&0\varepsilonnd{array}\rhoi];\,\,B\inftyn\mathfrak{s}\mathfrak{o}(n),\,\,a\inftyn M_{n,1}(\mathbb{R})\rhoi\}.
$$
Here and in the rest of the paper we use the usual notation:
\betaeg{\varepsilonn}
\inftytem[$\betaullet$] For $n,m\inftyn\mathbb{N}$ $M_{m,n}(K)$ is the vector space of $m\tauim n$ matrices over a field $K.$
\inftytem[$\betaullet$] $\mathfrak{g}\mathfrak{l}(n,K)$ is $M_{n,n}(K),$ considered as a Lie algebra with commutator $[A,B]=AB-BA.$
\inftytem[$\betaullet$] $\GammaL(n,K)$ is the group of invertible matrices in $M_{n,n}(K).$
\inftytem[$\betaullet$] $A^t$ is the transpose of a matrix $A.$
\inftytem[$\betaullet$] $\mathfrak{s}\mathfrak{o}(n,K)=\{B\inftyn\mathfrak{g}\mathfrak{l}(n,K);\,\,B^t=-B\}.$
\inftytem[$\betaullet$] $\mathfrak{s}\mathfrak{o}(n)=\mathfrak{s}\mathfrak{o}(n,\mathbb{R}).$
\inftytem[$\betaullet$] $\mr{SO}(n)=\{A\inftyn\GammaL(n,\mathbb{R});\,\,A^{-1}=A^t,\,\,\deltaet\,A=1\}.$
\varepsilonnd{\varepsilonn}
\inftynd For the group $G=\mr{Spin}(n,1)$ we choose Cartan decomposition $\mathfrak{g}_0=\mathfrak{k}_0\omegaplus\mathfrak{p}_0$ as follows
$$
\mathfrak{k}_0=\leftftarrowmbdae\{\leftftarrowmbdae[\betaeg{array}{cc}B&0\\0&0\varepsilonnd{array}\rhoi];\,\,B\inftyn\mathfrak{s}\mathfrak{o}(n)\rhoi\},\mathfrak{q}u\mathfrak{p}_0=\leftftarrowmbdae\{\leftftarrowmbdae[\betaeg{array}{cc}0&a\\alpha^t&0\varepsilonnd{array}\rhoi];\,\,a\inftyn M_{n,1}(\mathbb{R})\rhoi\}.
$$
The complexifications are:
$$
\mathfrak{g}=\mathfrak{s}\mathfrak{o}(n,1,\mathbb{C})=\{A\inftyn\mathfrak{g}\mathfrak{l}(n+1,\mathbb{C});\,\,A^t=-\Gamma A\Gamma\},
$$
i.e.
$$
\mathfrak{g}=\leftftarrowmbdae\{\leftftarrowmbdae[\betaeg{array}{cc}B&a\\alpha^t&0\varepsilonnd{array}\rhoi];\,\,B\inftyn\mathfrak{s}\mathfrak{o}(n,\mathbb{C}),\,\,a\inftyn M_{n,1}(\mathbb{C})\rhoi\},
$$
$$
\mathfrak{k}=\leftftarrowmbdae\{\leftftarrowmbdae[\betaeg{array}{cc}B&0\\0&0\varepsilonnd{array}\rhoi];\,\,B\inftyn\mathfrak{s}\mathfrak{o}(n,\mathbb{C})\rhoi\},\mathfrak{q}u\mathfrak{p}=\leftftarrowmbdae\{\leftftarrowmbdae[\betaeg{array}{cc}0&a\\alpha^t&0\varepsilonnd{array}\rhoi];\,\,a\inftyn M_{n,1}(\mathbb{C})\rhoi\}.
$$
$\mr{Spin}(n,1)$ is double cover of the identity component $\mr{SO}_0(n,1)$ of the Lie group
$$
\mr{SO}(n,1)=\{A\inftyn\GammaL(n+1,\mathbb{R});\,\,A^{-1}=\Gamma A^t\Gamma,\,\,\deltaet\,A=1\}.
$$
The analytic subgroup $K\sigmaubset G$ whose Lie algebra is $\mathfrak{k}_0$ is a maximal compact subgroup of $G$ isomorphic with the double cover $\mr{Spin}(n)$ of the group $\mr{SO}(n).$\\
\inftynd Now we choose Cartan subalgebras. $E_{p,q}$ will denote the $(n+1)\tauim(n+1)$ matrix with $(p,q)-$entry equal $1$ and all the other entries $0.$ Set
$$
I_{p,q}=E_{p,q}-E_{q,p},\quad1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq n,\quad p\not=q,
$$
and
$$
B_p=E_{p,n+1}+E_{n+1,p},\quad1\leftftarrowmbdaeq p\leftftarrowmbdaeq n.
$$
Then $\{I_{p,q};\,\,1\leftftarrowmbdaeq q<p\leftftarrowmbdaeq n\}$ is a basis of the real Lie algebra $\mathfrak{k}_0$ and of its complexification $\mathfrak{k}$ and $\{B_p;\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq n\}$ is a basis of the real subspace $\mathfrak{p}_0$ and of its complexification $\mathfrak{p}.$ Now $\mathfrak{t}_0=\sigmapan_{\mathbb{R}}\leftftarrowmbdae\{I_{2p,2p-1};\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq\varphir{n}{2}\rhoi\}$ is a Cartan subalgebra of $\mathfrak{k}_0$ and its complexification $\mathfrak{t}=\sigmapan_{\mathbb{C}}\leftftarrowmbdae\{I_{2p,2p-1};\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq\varphir{n}{2}\rhoi\}$ is a Cartan subalgebra of $\mathfrak{k}.$\\
\inftynd We consider now separately two cases: $n$ even and $n$ odd.
\betaeg{center}
{\betaf $n$ even, $n=2k$}
\varepsilonnd{center}
\inftynd In this case $\mathfrak{t}_0$ is also a Cartan subalgebra of $\mathfrak{g}_0$ and $\mathfrak{t}$ is a Cartan subalgebra of $\mathfrak{g}.$ Set
$$
H_p=-iI_{2p,2p-1},\mathfrak{q}u1\leftftarrowmbdaeq p\leftftarrowmbdaeq k.
$$
Dual space $\mathfrak{t}^*$ identifies with $\mathbb{C}^k$ as follows:
$$
\mathfrak{t}^*\ni\leftftarrowmbda=(\leftftarrowmbda(H_1),\leftftarrowmbdadots,\leftftarrowmbda(H_k))\inftyn\mathbb{C}^k.
$$
Let $\{\alpha_1,\leftftarrowmbdadots,\alpha_k\}$ be the canonical basis of $\mathbb{C}^k=\mathfrak{t}^*.$ The root system of the pair $(\mathfrak{g},\mathfrak{t})$ is
$$
\Delta=\Delta(\mathfrak{g},\mathfrak{t})=\{\pm\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq k,\,\,p\not=q\}\cup\{\pm\alpha_p;\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq k\}.
$$
The Weyl group $W$ of $\Delta$ consists of all permutations of the coordinates combined with multiplying some coordinates with $-1:$
$$
W=\mathbb{Z}_2^k\rhotimes S_k=\{(\varepsilon,\sigma);\,\,\varepsilon\inftyn\mathbb{Z}_2^k,\,\,\sigma\inftyn S_k\},
$$
where $\mathbb{Z}_2$ is the multiplicative group $\{1,-1\}$ and $S_k$ is the group of permutations of $\{1,\leftftarrowmbdadots,k\}.$ $(\varepsilon,\sigma)\inftyn W$ acts on $\mathfrak{t}^*=\mathbb{C}^k$ as follows:
$$
(\varepsilon,\sigma)(\leftftarrowmbda_1,\leftftarrowmbda_2,\leftftarrowmbdadots,\leftftarrowmbda_k)=(\varepsilon_1\leftftarrowmbda_{\sigma(1)},\varepsilon_2\leftftarrowmbda_{\sigma(2)},\leftftarrowmbdadots,\varepsilon_k\leftftarrowmbda_{\sigma(k)}).
$$
\inftynd The root system $\Delta_K$ of the pair $(\mathfrak{k},\mathfrak{t})$ is $\{\pm\alpha_p\pm\alpha_q;\,\,p\not=q\}.$ We choose positive roots in $\Delta_K:$
$$
\Delta_K^+=\{\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p<q\leftftarrowmbdaeq k\}.
$$
The corresponding Weyl chamber in $\mathbb{R}^k=i\mathfrak{t}_0^*$ is
$$
C=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,(\leftftarrowmbda|\gamma_j)>0,\,1\leftftarrowmbdaeq j\leftftarrowmbdaeq k\}=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\leftftarrowmbda_1>\cdots>\leftftarrowmbda_{k-1}>|\leftftarrowmbda_k|>0\},
$$
and its closure is
$$
\omegav{C}=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,(\leftftarrowmbda|\gamma_j)\gammaeq0,\,1\leftftarrowmbdaeq j\leftftarrowmbdaeq k\}=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\leftftarrowmbda_1\gammaeq\cdots\gammaeq\leftftarrowmbda_{k-1}\gammaeq|\leftftarrowmbda_k|\}.
$$
The Weyl group $W_K$ of the root system $\Delta_K$ is the subgroup of $W$ consisting of all $(\varepsilon,\sigma)$ with even number of $\varepsilon_j=-1:$
$$
W_K=\{(\varepsilon,\sigma)\inftyn W;\,\,\varepsilon_1\varepsilon_2\cdots\varepsilon_k=1\}\sigmaimeq\mathbb{Z}_2^{k-1}\rhotimes S_k.
$$
We parametrize now the equivalence classes of irreducible finitedimensional representations of the Lie algebra $\mathfrak{k}$ (i.e. the unitary dual $\hat{K}$ of the group $K=\mr{Spin}(2k)\,)$ by identifying them with the corresponding highest weights. Thus
$$
\betaeg{array}{c}
\hat{K}=\leftftarrowmbdae\{(m_1,\leftftarrowmbdadots,m_k)\inftyn\mathbb{Z}^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k;\,\,m_1\gammaeq m_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq|m_k|\rhoi\}.
\varepsilonnd{array}
$$
\betaeg{center}
{\betaf $n$ odd, $n=2k+1$}
\varepsilonnd{center}
\inftynd Now $\mathfrak{t}_0$ is not a Cartan subalgebra of $\mathfrak{g}_0.$ Set
$$
H=B_n=B_{2k+1}=E_{2k+1,2k+2}+E_{2k+2,2k+1},\quad\mathfrak{a}_0=\mathbb{R} H,\quad\mathfrak{h}_0=\mathfrak{t}_0\deltaotplus\mathfrak{a}_0.
$$
Then $\mathfrak{h}_0$ is a Cartan subalgebra of $\mathfrak{g}_0$ and all the other Cartan subalgebras of $\mathfrak{g}_0$ are $\mathbb{I}nt(\mathfrak{g}_0)-$conjugated with $\mathfrak{h}_0.$ The ordered basis $(H_1,\leftftarrowmbdadots,H_k,H)$ of the complexification $\mathfrak{h}$ of $\mathfrak{h}_0$ is used for the identificaton of $\mathfrak{h}^*$ with $\mathbb{C}^{k+1}:$
$$
\mathfrak{h}^*\ni\leftftarrowmbda=(\leftftarrowmbda(H_1),\leftftarrowmbdadots,\leftftarrowmbda(H_k),\leftftarrowmbda(H))\inftyn\mathbb{C}^{k+1}.
$$
$\mathfrak{t}^*$ identifies with $\mathbb{C}^k$ through ordered basis $(H_1,\leftftarrowmbdadots,H_k)$ of $\mathfrak{t}$ and $\mathfrak{a}^*$ identifies with $\mathbb{C}$ through $H:$
$$
\mathfrak{t}^*\ni\mu=(\mu(H_1),\leftftarrowmbdadots,\mu(H_k))\inftyn\mathbb{C}^k,\mathfrak{q}u\mathfrak{a}^*\ni\nu=\nu(H)\inftyn\mathbb{C}.
$$
Furthermore, $\mathfrak{t}^*$ and $\mathfrak{a}^*$ are identified with subspaces of $\mathfrak{h}^*$ as follows:
$$
\mathfrak{t}^*=\{\leftftarrowmbda\inftyn\mathfrak{h}^*;\,\,\leftftarrowmbda|\mathfrak{a}=0\}=\{\leftftarrowmbda\inftyn\mathbb{C}^{k+1};\,\,\leftftarrowmbda_{k+1}=0\},
$$
$$
\mathfrak{a}^*=\{\leftftarrowmbda\inftyn\mathfrak{h}^*;\,\,\leftftarrowmbda|\mathfrak{t}=0\}=\{(0,\leftftarrowmbdadots,0,\nu);\,\,\nu\inftyn\mathbb{C}\}.
$$
\inftynd Let $\{\alpha_1,\leftftarrowmbdadots,\alpha_{k+1}\}$ be the canonical basis of $\mathbb{C}^{k+1}.$ The root system\leftftarrowmbdab $\Delta=\Delta(\mathfrak{g},\mathfrak{h})$ of the pair $(\mathfrak{g},\mathfrak{h})$ is
$$
\Delta=\{\pm\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq k+1,\,\,p\not=q\}.
$$
The Weyl group $W=W(\mathfrak{g},\mathfrak{h})$ consists of all permutations of coordinates combined with multiplying even number of coordinates with $-1:$
$$
W=\mathbb{Z}_2^k\rhotimes S_{k+1}=\{(\varepsilon,\sigma);\,\,\varepsilon\inftyn\mathbb{Z}_2^{k+1},\,\,\varepsilon_1\cdots\varepsilon_{k+1}=1,\,\,\sigma\inftyn S_{k+1}\}.
$$
The root system $\Delta_K=\Delta(\mathfrak{k},\mathfrak{t})$ of the pair $(\mathfrak{k},\mathfrak{t})$ is
$$
\Delta_K=\{\pm\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq k,\,\,p\not=q\}\cup\{\pm\alpha_p;\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq k\}.
$$
Choose positive roots in $\Delta_K$ as follows:
$$
\Delta_K^+=\{\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p<q\leftftarrowmbdaeq k\}\cup\{\alpha_p;\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq k\}.
$$
The corresponding Weyl chamber in $\mathbb{R}^k=i\mathfrak{t}_0^*$ is
$$
C=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,(\leftftarrowmbda|\gamma_j)>0,\,1\leftftarrowmbdaeq j\leftftarrowmbdaeq k\}=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\leftftarrowmbda_1>\cdots>\leftftarrowmbda_k>0\}
$$
and its closure is
$$
\omegav{C}=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,(\leftftarrowmbda|\gamma_j)\gammaeq0,\,1\leftftarrowmbdaeq j\leftftarrowmbdaeq k\}=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\leftftarrowmbda_1\gammaeq\cdots\gammaeq\leftftarrowmbda_k\gammaeq0\}
$$
The dual $\hat{K}$ is again identified with the highest weights of ireducible representations. Thus:
$$
\betaeg{array}{c}
\hat{K}=\leftftarrowmbdae\{q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\mathbb{Z}_+^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k;\,\,m_1\gammaeq m_2\gammaeq\cdots\gammaeq m_k\rhoi\}.
\varepsilonnd{array}
$$
\betaeg{center}
{\betaf Elementary representations of the groups $\mr{Spin}(n,1)$}
\varepsilonnd{center}
\inftynd Regardless the parity of $n$ we put
$$
H=B_n=E_{n,n+1}+E_{n+1,n},\mathfrak{q}u\mathfrak{a}_0=\mathbb{R} H.
$$
Then $\mathfrak{a}_0$ is maximal among Abelian subalgebras of $\mathfrak{g}_0$ contained in $\mathfrak{p}_0.$ As we already said, if $n$ is odd, $n=2k+1,$ then $\mathfrak{h}_0=\mathfrak{t}_0\deltaotplus\mathfrak{a}_0$ is a Cartan subalgebra of $\mathfrak{g}_0$ and all the other Cartan subalgebras are $\mathbb{I}nt(\mathfrak{g}_0)-$conjugated to $\mathfrak{h}_0.$ If $n$ is even, $n=2k,$ set
$$
\mathfrak{h}_0=\sigmapan_{\mathbb{R}}\{iH_1,\leftftarrowmbdadots,iH_{k-1},H\}.
$$
It is a Cartan subalgebra of $\mathfrak{g}_0.$ In this case $\mathfrak{g}_0$ has two $\mathbb{I}nt(\mathfrak{g}_0)-$conjugacy classes of Cartan subalgebras; $\mathfrak{h}_0$ and $\mathfrak{t}_0$ are their representatives. Their complexifications $\mathfrak{h}$ and $\mathfrak{t}$ are $\mathbb{I}nt(\mathfrak{g})-$conjugated. Explicitely, the matrix
$$
A=\leftftarrowmbdae[\betaeg{array}{ccc}\varphir{1}{\sigmaqrt{2}}P_k&\varphir{1}{\sigmaqrt{2}}P_k&-ie_k\\-\varphir{1}{\sigmaqrt{2}}Q_k&\varphir{1}{\sigmaqrt{2}}I_k&0_k\\-\varphir{i}{\sigmaqrt{2}}e_k^t&\varphir{i}{\sigmaqrt{2}}e_k^t&0\varepsilonnd{array}\rhoi]\inftyn\mr{SO}(2k,1,\mathbb{C}),
$$
where $P_k=I_k-E_{k,k}=\mathrm{diag}(1,\leftftarrowmbdadots,1,0),$ $Q_k=I_k-2E_{k,k}=\mathrm{diag}(1,\leftftarrowmbdadots,1,-1),$ $e_k\inftyn M_{k,1}(\mathbb{C})$ is given by $e_k^t=[0\cdots0\,1]$ and $0_k$ is the zero matrix in $M_{k,1}(\mathbb{C}),$ has the properties
$$
AH_jA^{-1}=H_j,\quad1\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\quad\mathrm{and}\quad AH_kA^{-1}=H;
$$
thus, $A\mathfrak{t} A^{-1}=\mathfrak{h}.$ As we mentioned before, this means that the parameters from $\mathbb{C}^k=\mathfrak{h}^*=\mathfrak{t}^*$ of the infinitesimal characters obtained through the two Harish$-$Chandra isomorphisms $\mathbb{Z}Z(\mathfrak{g})\leftftarrowmbdara{\cal P}(\mathfrak{h}^*)^W$ and $\mathbb{Z}Z(\mathfrak{g})\leftftarrowmbdara{\cal P}(\mathfrak{t}^*)^W$ coincide if the identifications of $\mathfrak{h}^*$ and $\mathfrak{t}^*$ with $\mathbb{C}^k$ are done throught the two ordered bases $(H_1,\leftftarrowmbdadots,H_{k-1},H)$ of $\mathfrak{h}$ and $(H_1,\leftftarrowmbdadots,H_{k-1},H_k)$ of $\mathfrak{t}.$\\
\inftynd For both cases, $n$ even and $n$ odd, $\mathfrak{m}_0$ (the centralizer of $\mathfrak{a}_0$ in $\mathfrak{k}_0)$ is the subalgebra of all matrices in $\mathfrak{g}_0$ with the last two rows and columns $0.$ The subgroup $M$ is isomorphic to $\mr{Spin}(n-1).$ A Cartan subalgebra of $\mathfrak{m}_0$ is
$$
\mathfrak{d}_0=\mathfrak{t}_0\cap\mathfrak{m}_0=\sigmapan_{\mathbb{R}}\{iH_1,\leftftarrowmbdadots,iH_{k-1}\},\mathfrak{q}u k=\leftftarrowmbdae[\varphir{n}{2}\rhoi].
$$
The elements of $\hat{M}$ are identified with their highest weights. For $n$ even, $n=2k,$ we have
$$
\betaeg{array}{c}
\hat{M}=\leftftarrowmbdae\{(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\mathbb{Z}_+^{k-1}\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^{k-1};\,\,n_1\gammaeq n_2\gammaeq\cdots\gammaeq n_{k-1}\gammaeq0\rhoi\}
\varepsilonnd{array}
$$
and for $n$ odd, $n=2k+1,$ we have
$$
\betaeg{array}{c}
\varepsilonnd{array}
\hat{M}=\leftftarrowmbdae\{(n_1,\leftftarrowmbdadots,n_k)\inftyn\mathbb{Z}^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k;\,\,n_1\gammaeq n_2\gammaeq\cdots\gammaeq n_{k-1}\gammaeq|n_k|\rhoi\}.
$$
The branching rules for the restriction of representations of $K$ to the subgroup $M$ are the following:\\
\inftynd If $n$ is even, $n=2k,$ we have
$$
(m_1,\leftftarrowmbdadots,m_k)|M=\betaigoplus_{(n_1,\leftftarrowmbdadots,n_{k-1})\primeec(m_1,\leftftarrowmbdadots,m_k)}(n_1,\leftftarrowmbdadots,n_{k-1});
$$
here the symbol $(n_1,\leftftarrowmbdadots,n_{k-1})\primeec(m_1,\leftftarrowmbdadots,m_k)$ means that either all $m_i$ and $n_j$ are in $\mathbb{Z}$ or all of them are in $\varphir{1}{2}+\mathbb{Z}$ and
$$
m_1\gammaeq n_1\gammaeq m_2\gammaeq n_2\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq|m_k|.
$$
\inftynd If $n$ is odd, $n=2k+1,$ we have
$$
(m_1,\leftftarrowmbdadots,m_k)|M=\betaigoplus_{(n_1,\leftftarrowmbdadots,n_k)\primeec(m_1,\leftftarrowmbdadots,m_k)}(n_1,\leftftarrowmbdadots,n_k);
$$
now the symbol $(n_1,\leftftarrowmbdadots,n_k)\primeec(m_1,\leftftarrowmbdadots,m_k)$ means again that either all $m_i$ and $n_j$ are in $\mathbb{Z}$ or all of them are in $\varphir{1}{2}+\mathbb{Z}$ and now that
$$
m_1\gammaeq n_1\gammaeq m_2\gammaeq n_2\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq|n_k|.
$$
\inftynd The restriction $\pi^{\sigma,\nu}|K$ is the representation of $K$ induced by the representation $\sigma$ of the subgroup $M,$ thus it does not depend on $\nu.$ By Frobenius Reciprocity Theorem the multiplicity of $q\inftyn\hat{K}$ in $\pi^{\sigma,\nu}|K$ is equal to the multiplicity of $\sigma$ in $q|M.$ Thus
$$
\pi^{\sigma,\nu}|K=\betaigoplus_{\sigma\primeec(m_1,\leftftarrowmbdadots,m_k)}(m_1,\leftftarrowmbdadots,m_k).
$$
Hence, the multiplicity of every $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}$ in the elementary representation $\pi^{\sigma,\nu}$ is either $1$ or $0$ and the $K-$spectrum $\Gamma(\pi^{\sigma,\nu})$ consists of all $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap(n_1+\mathbb{Z})^k$ such that
$$
\betaeg{array}{ll}m_1\gammaeq n_1\gammaeq m_2\gammaeq n_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq|m_k|&\,\,\mathrm{if}\,\,n=2k\\
&\\
m_1\gammaeq n_1\gammaeq m_2\gammaeq n_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq|n_k|&\,\,\mathrm{if}\,\,n=2k+1.
\varepsilonnd{array}
$$
\sigmaection{Representations of $\mr{Spin}(2k,1)$}
\inftynd In this section we first write down in our notation the known results on elementary representations and its irreducible subquotients for the groups $\mr{Spin}(2k,1)$ (see [1], [2], [3], [7], [8], [9], [11], [12]). For $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})$ in $\hat{M}\sigmaub\mathbb{R}^{k-1}=i\mathfrak{d}_0^*$ and for $\nu\inftyn\mathbb{C}=\mathfrak{a}^*$ the elementary representation $\pi^{\sigma,\nu}$ is irreducible if and only if either $\nu\not\inftyn\varphir{1}{2}+n_1+\mathbb{Z}$ or
$$
\betaeg{array}{c}
\nu\inftyn\leftftarrowmbdae\{\pm\leftftarrowmbdae(n_{k-1}+\varphir{1}{2}\rhoi),\pm\leftftarrowmbdae(n_{k-2}+\varphir{3}{2}\rhoi),\leftftarrowmbdadots,\pm\leftftarrowmbdae(n_2+k-\varphir{5}{2}\rhoi),\pm\leftftarrowmbdae(n_1+k-\varphir{3}{2}\rhoi)\rhoi\}.
\varepsilonnd{array}
$$
If $\pi^{\sigma,\nu}$ is reducible it has either two or three irreducible subquotients. If it has two, we will denote them by $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu};$ an exception is the case of nonintegral $n_j$ and $\nu=0,$ when we denote them by $\omega^{\sigma,0,\pm}.$ If $\pi^{\sigma,\nu}$ has three irreducible subquotients, we will denote them by $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu,\pm}.$ Their $K-$spectra are as follows:
\betaeg{\varepsilonn}
\inftytem[$(a1)$] If $n_j\inftyn\mathbb{Z}_+$ and $\nu\inftyn\leftftarrowmbdae\{\pm\varphir{1}{2},\pm\varphir{3}{2},\leftftarrowmbdadots,\pm\leftftarrowmbdae(n_{k-1}-\varphir{1}{2}\rhoi)\rhoi\}$ (this is possible only if $n_{k-1}\gammaeq1)$ the representation $\pi^{\sigma,\nu}$ has three irreducible subquotients $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu,\pm}.$ Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)$ in $\hat{K}\cap\mathbb{Z}^k$ such that:
$$
\betaeg{array}{ll}
\Gamma(\tau^{\sigma,\nu}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1},\,\,|m_k|\leftftarrowmbdaeq|\nu|-\varphir{1}{2};\\
\Gamma(\omega^{\sigma,\nu,+}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq|\nu|+\varphir{1}{2};\\
\Gamma(\omega^{\sigma,\nu,-}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1},\,\,\,-|\nu|-\varphir{1}{2}\gammaeq m_k\gammaeq -n_{k-1}.
\varepsilonnd{array}
$$
\inftytem[$(a2)$] If $n_j\inftyn\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)$ and $\nu\inftyn\leftftarrowmbdae\{\pm1,\leftftarrowmbdadots,\pm\leftftarrowmbdae(n_{k-1}-\varphir{1}{2}\rhoi)\rhoi\}$ (this is possible only if $n_{k-1}\gammaeq\varphir{3}{2})$ the representation $\pi^{\sigma,\nu}$ has three irreducible subquotients $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu,\pm}.$ Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)$ in $\hat{K}\cap\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k$ such that:
$$
\betaeg{array}{ll}
\Gamma(\tau^{\sigma,\nu}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1},\,\,|m_k|\leftftarrowmbdaeq|\nu|-\varphir{1}{2};\\
\Gamma(\omega^{\sigma,\nu,+}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq|\nu|+\varphir{1}{2};\\
\Gamma(\omega^{\sigma,\nu,-}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1},\,\,\,-|\nu|-\varphir{1}{2}\gammaeq m_k\gammaeq -n_{k-1}.
\varepsilonnd{array}
$$
\inftytem[$(a3)$] If $n_j\inftyn\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)$ and if $\nu=0$ the representation has two irreducible subquotients $\omega^{\sigma,0,\pm};$ they are both subrepresentations since $\pi^{\sigma,0}$ is unitary. Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)$ in $\hat{K}\cap\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k$ such that:
$$
\betaeg{array}{ll}
\Gamma(\omega^{\sigma,0,+}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq\varphir{1}{2};\\
\Gamma(\omega^{\sigma,0,-}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1},\,\,\,-\varphir{1}{2}\gammaeq m_k\gammaeq -n_{k-1}.
\varepsilonnd{array}
$$
\inftytem[$(bj)$] If $n_{j-1}>n_j$ for some $j\inftyn\{2,\leftftarrowmbdadots,k-1\}$ and if
$$
\betaeg{array}{c}
\nu\inftyn\leftftarrowmbdae\{\pm\leftftarrowmbdae(n_j+k-j+\varphir{1}{2}\rhoi),\pm\leftftarrowmbdae(n_j+k-j+\varphir{3}{2}\rhoi),\leftftarrowmbdadots,\pm\leftftarrowmbdae(n_{j-1}+k-j-\varphir{1}{2}\rhoi)\rhoi\},
\varepsilonnd{array}
$$
then $\pi^{\sigma,\nu}$ has two irreducible subquotients $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu}.$ Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap(n_1+\mathbb{Z})^k$ such that:
$$
\betaeg{array}{ll}
\Gamma(\tau^{\sigma,\nu}):&\quad m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{j-1}\gammaeq n_{j-1},\\
&\quad|\nu|-k+j-\varphir{1}{2}\gammaeq m_j\gammaeq n_j\gammaeq\cdots\gammaeq n_{k-1}\gammaeq|m_k|;\\
\Gamma(\omega^{\sigma,\nu}):&\quad m_1\gammaeq n_1\gammaeq\cdots\gammaeq n_{j-1}\gammaeq m_j\gammaeq|\nu|-k+j+\varphir{1}{2},\\
&\quad n_j\gammaeq m_{j+1}\gammaeq\cdots\gammaeq n_{k-1}\gammaeq|m_k|.
\varepsilonnd{array}
$$
\inftytem[$(c)$] If
$$
\betaeg{array}{c}
\nu\inftyn\leftftarrowmbdae\{\pm\leftftarrowmbdae(n_1+k-\varphir{1}{2}\rhoi),\pm\leftftarrowmbdae(n_1+k+\varphir{1}{2}\rhoi),\pm\leftftarrowmbdae(n_1+k+\varphir{3}{2}\rhoi),\leftftarrowmbdadots\rhoi\},
\varepsilonnd{array}
$$
then $\pi^{\sigma,\nu}$ has two irreducible subquotients: finitedimensional representation $\tau^{\sigma,\nu}$ and infinitedimensional $\omega^{\sigma,\nu}.$ Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap\leftftarrowmbdae(n_1+\mathbb{Z}\rhoi)^k$ such that:
$$
\betaeg{array}{ll}
\Gamma(\tau^{\sigma,\nu}):&\,\,|\nu|-k+\varphir{1}{2}\gammaeq m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq|m_k|;\\
\Gamma(\omega^{\sigma,\nu}):&\,\,m_1\gammaeq|\nu|-k+\varphir{3}{2},\,\,n_1\gammaeq m_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq|m_k|.
\varepsilonnd{array}
$$
\varepsilonnd{\varepsilonn}
\inftynd Irreducible elementary representation $\pi^{\sigma,\nu}$ is unitary if and only if either $\nu\inftyn i\mathbb{R}$ (so called {\betaf unitary principal series}) or $\nu\inftyn\leftftarrowmbdaangle-\nu(\sigma),\nu(\sigma)\rhoangle,$ where
$$
\nu(\sigma)=\min\,\{\nu\gammaeq0;\,\,\pi^{\sigma,\nu}\,\,\mathrm{is}\,\,\mathrm{reducible}\}
$$
(so caled {\betaf complementary series}). Notice that for nonintegral $n_j$'s $\pi^{\sigma,0}$ is reducible, thus $\nu(\sigma)=0$ and the complementary series is empty. In the case of integral $n_j$'s we have the following possibilities:
\betaeg{\varepsilonn}
\inftytem[$(a)$] If $n_{k-1}\gammaeq1,$ then $\nu(\sigma)=\varphir{1}{2}.$ The reducible elementary representation $\pi^{\sigma,\varphir{1}{2}}$ is of the type $(a1).$
\inftytem[$(b)$] If $n_{k-1}=0$ and $n_1\gammaeq1,$ let $j\inftyn\{2,\leftftarrowmbdadots,k-1\}$ be such that\leftftarrowmbdab $n_{k-1}=\cdots=n_j=0<n_{j-1}.$ Then $\nu(\sigma)=k-j+\varphir{1}{2}.$ The reducible elementary representation $\pi^{\sigma,k-j+\varphir{1}{2}}$ is of the type $(bj).$
\inftytem[$(c)$] If $\sigma$ is trivial, i.e. $n_1=\cdots=n_{k-1}=0,$ then $\nu(\sigma)=k-\varphir{1}{2}.$ The reducible elementary representation $\pi^{\sigma,k-\varphir{1}{2}}$ is of the type $(c).$
\varepsilonnd{\varepsilonn}
\inftynd Among irreducible subquotients of reducible elementary representations the unitary ones are $\omega^{\sigma,\nu,\pm},$ $\tau^{\sigma,\nu(\sigma)}$ and $\omega^{\sigma,\nu(\sigma)}.$\\
\inftynd Now we write down the infinitesimal characters. The dual space $\mathfrak{h}^*$ of the Cartan subalgebra $\mathfrak{h}=\mathfrak{d}\deltaotplus\mathfrak{a}$ is identified with $\mathbb{C}^k$ through the basis $(H_1,\leftftarrowmbdadots,H_{k-1},H).$ The infinitesimal character of the elementary representation $\pi^{\sigma,\nu}$ is $\chi_{\Lambda(\sigma,\nu)},$ where $\Lambda(\sigma,\nu)\inftyn\mathfrak{h}^*$ is given by
$$
\betaeg{array}{c}
\Lambda(\sigma,\nu)|\mathfrak{d}=\leftftarrowmbda_{\sigma}+\delta_{\mathfrak{m}}\mathfrak{q}u\mathrm{and}\mathfrak{q}u\Lambda(\sigma,\nu)|\mathfrak{a}=\nu,
\varepsilonnd{array}
$$
where $\leftftarrowmbda_{\sigma}\inftyn\mathfrak{d}^*$ is the highest weight of the representation $\sigma$ and $\delta_{\mathfrak{m}}$ is the halfsum of positive roots of the pair $(\mathfrak{m},\mathfrak{d}).$ Using the earlier described identifications of $\mathfrak{d}^*=\mathbb{C}^{k-1}$ and $\mathfrak{a}^*=\mathbb{C}$ with subspaces of $\mathfrak{h}^*=\mathbb{C}^k$ we have\leftftarrowmbdab $\leftftarrowmbda_{\sigma}=(n_1,\leftftarrowmbdadots,n_{k-1},0),$ $\delta_{\mathfrak{m}}=\leftftarrowmbdae(k-\varphir{3}{2},k-\varphir{5}{2},\leftftarrowmbdadots,n_{k-1}+\varphir{1}{2},0\rhoi),$ $\nu=(0,\leftftarrowmbdadots,0,\nu),$ hence
$$
\betaeg{array}{c}
\Lambda(\sigma,\nu)=\leftftarrowmbdae(n_1+k-\varphir{3}{2},n_2+k-\varphir{5}{2},\leftftarrowmbdadots,n_{k-1}+\varphir{1}{2},\nu\rhoi).
\varepsilonnd{array}
$$
As we pointed out, if $\mathfrak{t}^*$ is identified with $\mathbb{C}^k$ through the basis $(H_1,\leftftarrowmbdadots,H_k)$ od $\mathfrak{t},$ the same parameters determine this infinitesimal character with respect to Harish$-$Chandra isomorphism $\mathbb{Z}Z(\mathfrak{g})\leftftarrowmbdara{\cal P}(\mathfrak{t}^*)^W.$\\
\inftynd The $W_K-$chamber in $\mathbb{R}^k=i\mathfrak{t}_0^*$ corresponding to chosen positive roots $\Delta_K^+$ is
$$
C=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\leftftarrowmbdadots>\leftftarrowmbda_{k-1}>|\leftftarrowmbda_k|>0\}.
$$
The set ${\cal D}$ of $W-$chambers contained in $C$ consists of two elements:
$$
D_1=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>\leftftarrowmbda_k>0\}
$$
and
$$
D_2=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>-\leftftarrowmbda_k>0\}.
$$
The closure $\omegav{D}_1$ is fundamental domain for the action of $W$ on $\mathbb{R}^k,$ i.e. each $W-$orbit in $\mathbb{R}^k$ intersects with $\omegav{D}_1$ in one point. We saw that the reducibility criteria imply that $\Lambda(\sigma,\nu)\inftyn\mathbb{R}^k$ whenever $\pi^{\sigma,\nu}$ is reducible. We denote by $\leftftarrowmbda(\sigma,\nu)$ the unique point in the intersection of $W\Lambda(\sigma,\nu)$ with $\omegav{D}_1.$ In the following theorem without loss of generality we can suppose that $\nu\gammaeq0,$ since $\pi^{\sigma,\nu}$ and $\pi^{\sigma,-\nu}$ have the same irreducible subquotients.
\betaeg{tm}\betaeg{\varepsilonn}\inftytem[$(i)$] $\pi^{\sigma,\nu}$ is reducible if and only if its infinitesimal character is $\chi_{\leftftarrowmbda}$ for some $\leftftarrowmbda\inftyn\Lambda,$ where
$$
\betaeg{array}{c}
\Lambda=\leftftarrowmbdae\{\leftftarrowmbda\inftyn\mathbb{Z}_+^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>\leftftarrowmbda_k\gammaeq0\rhoi\}.
\varepsilonnd{array}
$$
We write $\Lambda$ as the disjoint union $\Lambda^*\cup\Lambda^0,$ where
$$
\betaeg{array}{c}
\Lambda^*=\leftftarrowmbdae\{\leftftarrowmbda\inftyn\mathbb{Z}_+^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>\leftftarrowmbda_k>0\rhoi\},
\varepsilonnd{array}
$$
$$
\betaeg{array}{c}
\Lambda^0=\leftftarrowmbdae\{\leftftarrowmbda\inftyn\mathbb{Z}_+^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>0,\,\,\leftftarrowmbda_k=0\rhoi\}.
\varepsilonnd{array}
$$
\inftytem[$(ii)$] For $\leftftarrowmbda\inftyn\Lambda^*$ there exist $k$ ordered pairs $(\sigma,\nu),$ $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\hat{M},$ $\nu\gammaeq0,$ sucha that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ These ordered pairs are:
\betaeg{\varepsilonn}
\inftytem[$(a)$] $\nu=\leftftarrowmbda_k,$ $n_1=\leftftarrowmbda_1-k+\varphir{3}{2},$ $n_2=\leftftarrowmbda_2-k+\varphir{5}{2},$ $\leftftarrowmbdadots$ $n_{k-1}=\leftftarrowmbda_{k-1}-\varphir{1}{2}.$
\inftytem[$(bj)$] $\nu=\leftftarrowmbda_j,$ $n_1=\leftftarrowmbda_1-k+\varphir{3}{2},$ $\leftftarrowmbdadots$ $n_{j-1}=\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},$\leftftarrowmbdab $n_j=\leftftarrowmbda_{j+1}-k+j+\varphir{1}{2},$ $\leftftarrowmbdadots$ $n_{k-1}=\leftftarrowmbda_k-\varphir{1}{2},$ $2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1.$
\inftytem[$(c)$] $\nu=\leftftarrowmbda_1,$ $n_1=\leftftarrowmbda_2-k+\varphir{3}{2},$ $\leftftarrowmbdadots$ $n_s=\leftftarrowmbda_{s+1}-k+s+\varphir{1}{2},$ $\leftftarrowmbdadots$ $n_{k-1}=\leftftarrowmbda_k-\varphir{1}{2}.$
\varepsilonnd{\varepsilonn}
\inftytem[$(iii)$] For $\leftftarrowmbda\inftyn\Lambda^0,$ the ordered pair $(\sigma,\nu),$ $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\hat{M},$ $\nu\inftyn\mathbb{R},$ such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu},$ is unique:
$$
\betaeg{array}{c}
n_1=\leftftarrowmbda_1-k+\varphir{3}{2},\,\,n_2=\leftftarrowmbda_2-k+\varphir{5}{2},\,\leftftarrowmbdadots\,,\,\,n_{k-1}-\varphir{1}{2},\quad\nu=\leftftarrowmbda_k=0.
\varepsilonnd{array}
$$
\varepsilonnd{\varepsilonn}
\varepsilonnd{tm}
{\betaf Proof:} $(i)$ We already know that for reducible elementary representation $\pi^{\sigma,\nu}$ one has $\Lambda(\sigma,\nu)\inftyn\mathbb{Z}^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k.$ As the Weyl group $W=W(\mathfrak{g},\mathfrak{t})$ consists of all permutations of coordinates combined with multiplying some of the coordinates with $-1,$ we conclude that the infinitesimal character of $\pi^{\sigma,\nu}$ is $\chi_{\leftftarrowmbda}$ for some $\leftftarrowmbda\inftyn\Lambda.$ The sufficiency will follow from the proofs of $(ii)$ and $(iii).$\\
\inftynd $(ii)$ Let $\leftftarrowmbda\inftyn\Lambda^*$ and suppose that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ This means that $\Lambda(\sigma,\nu)$ and $\leftftarrowmbda$ are $W-$conjugated. Now, since $\leftftarrowmbda_j>0,$ $\varphia j\inftyn\{1,\leftftarrowmbdadots,k\},$ $n_s-k+s+\varphir{1}{2}>0,$ $\varphia s\inftyn\{1,\leftftarrowmbdadots,k-1\}$ and $\nu\gammaeq0,$ we conclude that necessarily $\nu=\leftftarrowmbda_j$ for some $j\inftyn\{1,\leftftarrowmbdadots,k\}.$ We inspect now each of these $k$ possibilities.\\
\inftynd $(a)$ $\nu=\leftftarrowmbda_k.$ Then necessarily
$$
\betaeg{array}{c}
n_1=\leftftarrowmbda_1-k+\varphir{3}{2},\,\,\leftftarrowmbdadots\,\,,\,\,n_{k-1}=\leftftarrowmbda_{k-1}-\varphir{1}{2}.
\varepsilonnd{array}
$$
We check now that so defined $(k-1)-$tuple $(n_1,\leftftarrowmbdadots,n_{k-1})$ is indeed in $\hat{M}.$ For $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k-2$ we have
$$
\betaeg{array}{c}
n_j-n_{j+1}=\leftftarrowmbdae(\leftftarrowmbda_j-k+\varphir{2j+1}{2}\rhoi)-\leftftarrowmbdae(\leftftarrowmbda_{j+1}-k+\varphir{2j+3}{2}\rhoi)=\leftftarrowmbda_j-\leftftarrowmbda_{j+1}-1\inftyn\mathbb{Z}_+.
\varepsilonnd{array}
$$
Further, if $\leftftarrowmbda\inftyn\mathbb{N}^k,$ then $\leftftarrowmbda_{k-1}\gammaeq2,$ thus $n_{k-1}\gammaeq\varphir{3}{2},$ and if $\leftftarrowmbda\inftyn\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k,$ then $\leftftarrowmbda_{k-1}\gammaeq\varphir{3}{2},$ thus $n_{k-1}\gammaeq1.$ Especially, $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\hat{M}.$ Finally, we see that $\nu=\leftftarrowmbda_k\leftftarrowmbdaeq\leftftarrowmbda_{k-1}-1=n_{k-1}-\varphir{1}{2},$ so we conclude that the elementary representation $\pi^{~s,\nu}$ is reducible.\\
\inftynd $(bj)$ $\nu=\leftftarrowmbda_j$ for some $j\inftyn\{2,\leftftarrowmbdadots,k-1\}.$ Then necessarily $n_1=\leftftarrowmbda_1-k+\varphir{3}{2},$ $\leftftarrowmbdadots,$ $n_{j-1}=\leftftarrowmbda_{j-1}-k+\varphir{2j-1}{2},n_j=\leftftarrowmbda_{j+1}-k+\varphir{2j+1}{2},$ $\leftftarrowmbdadots,$ $n_{k-1}=\leftftarrowmbda_k-\varphir{1}{2}.$ We check now that so defined $(k-1)-$tuple $(n_1,\leftftarrowmbdadots,n_{k-1})$ is indeed in $\hat{M}.$ For $1\leftftarrowmbdaeq s\leftftarrowmbdaeq j-2$ we have
$$
\betaeg{array}{c}
n_s-n_{s+1}=\leftftarrowmbdae(\leftftarrowmbda_s-k+\varphir{2s+1}{2}\rhoi)-\leftftarrowmbdae(\leftftarrowmbda_{s+1}-k+\varphir{2s+3}{2}\rhoi)=\leftftarrowmbda_s-\leftftarrowmbda_{s+1}-1\inftyn\mathbb{Z}_+.
\varepsilonnd{array}
$$
Further,
$$
\betaeg{array}{c}
n_{j-1}-n_j=\leftftarrowmbdae(\leftftarrowmbda_{j-1}-k+\varphir{2j-1}{2}\rhoi)-\leftftarrowmbdae(\leftftarrowmbda_{j+1}-k+\varphir{2j+1}{2}\rhoi)=\leftftarrowmbda_{j-1}-\leftftarrowmbda_{j+1}-1\inftyn\mathbb{N}.
\varepsilonnd{array}
$$
For $j\leftftarrowmbdaeq s\leftftarrowmbdaeq k-2$ we have
$$
\betaeg{array}{c}
n_s-n_{s+1}=\leftftarrowmbdae(\leftftarrowmbda_{s+1}-k+\varphir{2s+1}{2}\rhoi)-\leftftarrowmbdae(\leftftarrowmbda_{s+2}-k+\varphir{2s+3}{2}\rhoi)=\leftftarrowmbda_{s+1}-\leftftarrowmbda_{s+2}-1\inftyn\mathbb{Z}_+.
\varepsilonnd{array}
$$
Finally, $n_{k-1}=\leftftarrowmbda_k-\varphir{1}{2}\inftyn\varphir{1}{2}\mathbb{Z}_+.$ Thus, $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\hat{M}.$\\
\inftynd We check now that the elementary representation $\pi^{\sigma,\nu}$ is reducible. We have
$$
\betaeg{array}{c}
1\leftftarrowmbdaeq\leftftarrowmbda_{j-1}-\leftftarrowmbda_j=n_{j-1}+k-j+\varphir{1}{2}-\nu\quad\Leftrightarrow\quad\nu\leftftarrowmbdaeq n_{j-1}+k-j-\varphir{1}{2}
\varepsilonnd{array}
$$
and
$$
\betaeg{array}{c}
1\leftftarrowmbdaeq\leftftarrowmbda_j-\leftftarrowmbda_{j+1}=\nu-n_j-k+j+\varphir{1}{2}\quad\Leftrightarrow\quad\nu\gammaeq n_j+k-j+\varphir{1}{2}.
\varepsilonnd{array}
$$
Thus,
$$
\betaeg{array}{c}
\nu\inftyn\leftftarrowmbdae\{n_j+k-j+\varphir{1}{2},\leftftarrowmbdadots,n_{j-1}+k-j-\varphir{1}{2}\rhoi\}
\varepsilonnd{array}
$$
and we conclude that $\pi^{\sigma,\nu}$ is reducible.\\
\inftynd $(c)$ $\nu=\leftftarrowmbda_1.$ Then necessarily
$$
\betaeg{array}{c}
n_1=\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbdadots,n_s=\leftftarrowmbda_{s+1}-k+\varphir{2s+1}{2},\leftftarrowmbdadots,n_{k-1}=\leftftarrowmbda_k-\varphir{1}{2}.
\varepsilonnd{array}
$$
As before we see that for $1\leftftarrowmbdaeq s\leftftarrowmbdaeq k-2$ we have
$$
n_s-n_{s+1}=\leftftarrowmbda_{s+1}-\leftftarrowmbda_{s+2}-1\inftyn\mathbb{Z}_+.
$$
Further, $n_{k-1}=\leftftarrowmbda_k-\varphir{1}{2}\inftyn\varphir{1}{2}\mathbb{Z}_+.$ Thus, $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\hat{M}.$ Finally,
$$
\betaeg{array}{c}
1\leftftarrowmbdaeq\leftftarrowmbda_1-\leftftarrowmbda_2=\nu-\leftftarrowmbdae(n_1+k-\varphir{3}{2}\rhoi)\quad\Leftrightarrow\quad\nu\gammaeq n_+k-\varphir{1}{2},
\varepsilonnd{array}
$$
i.e.
$$
\betaeg{array}{c}
\nu\inftyn\leftftarrowmbdae\{n_1+k-\varphir{1}{2},n_1+k+\varphir{1}{2},n_1+k+\varphir{3}{2},\leftftarrowmbdadots\rhoi\}.
\varepsilonnd{array}
$$
Thus, the elementary representation $\pi^{\sigma,\nu}$ is reducible.\\
\inftynd $(iii)$ Let $\leftftarrowmbda\inftyn\Lambda^0$ and suppose that the elementary representation $\pi^{\sigma,\nu},$ $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})\inftyn\hat{M},$ $\nu\inftyn\mathbb{R},$ has infinitesimal character $\chi_{\leftftarrowmbda}.$ As in the proof of $(ii)$ we conclude that necessarily $|\nu|=\leftftarrowmbda_j$ for some $j\inftyn\{1,\leftftarrowmbdadots,k\}.$ The assumption $j<k$ would imply $n_{k-1}+\varphir{1}{2}=\leftftarrowmbda_k=0$ and this is impossible since $n_{k-1}\gammaeq0.$ Thus, we conclude that $j=k,$ i.e. $\nu=\leftftarrowmbda_k=0.$ It follows that
$$
\betaeg{array}{c}
n_1=\leftftarrowmbda_1-k+\varphir{3}{2},\,\,n_2=\leftftarrowmbda_2-k+\varphir{5}{2},\,\,\leftftarrowmbdadots\,\,,\,\,n_{k-1}=\leftftarrowmbda_{k-1}-\varphir{1}{2}.
\varepsilonnd{array}
$$
As before we conclude that so defined $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1})$ is in $\hat{M}.$ Finally, $n_j$ are nonintegral and $\nu=0,$ therefore $\pi^{\sigma,0}$ is reducible.\\
\inftynd Fix now $\leftftarrowmbda\inftyn\Lambda^*.$ By $(ii)$ in Theorem 1. there exist $k$ pairs $(\sigma,\nu)\inftyn\hat{M}\tauim\varphir{1}{2}\mathbb{N}$ such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ Denote them by $(\sigma_j,\nu_j),$ $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k,$ where $\nu_j=\leftftarrowmbda_j$ and
$$
\betaeg{array}{ll}
(c)&\sigma_1=\leftftarrowmbdae(\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{s+1}-k+\varphir{2s+1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2}\rhoi),\\
&\\
(bj)&\sigma_j=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+\varphir{2j-1}{2},\leftftarrowmbda_{j+1}-k+\varphir{2j+1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2}\rhoi),\\
&\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
&\\
(a)&\sigma_k=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_s-k+\varphir{2s+1}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2}\rhoi).
\varepsilonnd{array}
$$
\inftynd There are altogether $k+2$ mutually infinitesimally inequivalent irreducible subquotients of the reducible elementary representations $\pi^{\sigma_1,\nu_1},\leftftarrowmbdadots,\pi^{\sigma_k,\leftftarrowmbda_k}$ which we denote by $\tau_1^{\leftftarrowmbda},\leftftarrowmbdadots,\tau_k^{\leftftarrowmbda},\omega_+^{\leftftarrowmbda},\omega_-^{\leftftarrowmbda}:$
$$
\betaeg{array}{l}
\tau_1^{\leftftarrowmbda}=\tau^{\sigma_1,\nu_1},\\
\\
\tau_2^{\leftftarrowmbda}=\omega^{\sigma_1,\nu_1}\cong\tau^{\sigma_2,\nu_2},\\
\\
\vdots\\
\\
\tau_j^{\leftftarrowmbda}=\omega^{\sigma_{j-1},\nu_{j-1}}\cong\tau^{\sigma_j,\nu_j},\\
\\
\vdots\\
\\
\tau_k^{\leftftarrowmbda}=\omega^{\sigma_{k-1},\nu_{k-1}}\cong\tau^{\sigma_k,\nu_k},\\
\\
\omega_+^{\leftftarrowmbda}=\omega^{\sigma_k,\nu_k,+},\\
\\
\omega_-^{\leftftarrowmbda}=\omega^{\sigma_k,\nu_k,-}.
\varepsilonnd{array}
$$
\inftynd The $K-$spectra of these irreducible representations consist of all\leftftarrowmbdab $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap\leftftarrowmbdae(\leftftarrowmbda_1+\varphir{1}{2}+\mathbb{Z}\rhoi)^k$ that satisfy:
$$
\betaeg{array}{ll}
\Gamma(\tau_1^{\leftftarrowmbda}):&\leftftarrowmbda_1-k+\varphir{1}{2}\gammaeq m_1\gammaeq\leftftarrowmbda_2-k+\varphir{3}{2}\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_k-\varphir{1}{2}\gammaeq|m_k|,\\
&\,\,\vdots\\
\Gamma(\tau_j^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+\varphir{3}{2}\gammaeq m_2\gammaeq\cdots\gammaeq m_{j-1}\gammaeq\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},\\
&\leftftarrowmbda_j-k+j-\varphir{1}{2}\gammaeq m_j\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_k-\varphir{1}{2}\gammaeq|m_k|,\\
&\,\,\vdots\\
\Gamma(\tau_k^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+\varphir{3}{2}\gammaeq m_2\gammaeq\leftftarrowmbda_2-k+\varphir{5}{2}\gammaeq\cdots m_{k-1}\gammaeq\leftftarrowmbda_{k-1}-\varphir{1}{2},\\
&\leftftarrowmbda_k-\varphir{1}{2}\gammaeq|m_k|,\\
&\\
\Gamma(\omega_+^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+\varphir{3}{2}\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_{k-1}-\varphir{1}{2}\gammaeq m_k\gammaeq\leftftarrowmbda_k+\varphir{1}{2},\\
&\\
\Gamma(\omega_-^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+\varphir{3}{2}\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_{k-1}-\varphir{1}{2}\gammaeq-m_k\gammaeq\leftftarrowmbda_k+\varphir{1}{2}.
\varepsilonnd{array}
$$
\inftynd It is obvious that each of these representations $\pi$ has one $D_1-$corner, we denote it by $q_1(\pi),$ and one $D_2-$corner, we denote it by $q_2(\pi).$ The list is:
$$
\betaeg{array}{l}
q_1(\tau_1^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{3}{2},\leftftarrowmbda_k-\varphir{1}{2},-\leftftarrowmbda_k+\varphir{1}{2}\rhoi),\\
q_2(\tau_1^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{3}{2},\leftftarrowmbda_k-\varphir{1}{2},\leftftarrowmbda_k-\varphir{1}{2}\rhoi),\\
q_1(\tau_j^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},\leftftarrowmbda_{j+1}-k+j+\varphir{1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},-\leftftarrowmbda_k+\varphir{1}{2}\rhoi),\\
q_2(\tau_j^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},\leftftarrowmbda_{j+1}-k+j+\varphir{1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},\leftftarrowmbda_k-\varphir{1}{2}\rhoi),\\
q_1(\tau_k^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\leftftarrowmbda_k+\varphir{1}{2}\rhoi),\\
q_2(\tau_k^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\leftftarrowmbda_k-\varphir{1}{2}\rhoi),\\
q_1(\omega_+^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\leftftarrowmbda_k+\varphir{1}{2}\rhoi),\\
q_2(\omega_+^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\leftftarrowmbda_{k-1}-\varphir{1}{2}\rhoi),\\
q_1(\omega_-^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\leftftarrowmbda_{k-1}+\varphir{1}{2}\rhoi),\\
q_2(\omega_-^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\leftftarrowmbda_k-\varphir{1}{2}\rhoi).
\varepsilonnd{array}
$$
We inspect now which of these corners are fundamental. Since
$$
\betaeg{array}{l}
\rho_K-\rho_P^{D_1}=\leftftarrowmbdae(k-\varphir{3}{2},k-\varphir{5}{2},\leftftarrowmbdadots,\varphir{1}{2},-\varphir{1}{2}\rhoi),\\
\\
\rho_K-\rho_P^{D_2}=\leftftarrowmbdae(k-\varphir{3}{2},k-\varphir{5}{2},\leftftarrowmbdadots,\varphir{1}{2},\varphir{1}{2}\rhoi),
\varepsilonnd{array}
$$
we have
$$
\betaeg{array}{ll}
q_1(\tau_1^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_2,\leftftarrowmbdadots,\leftftarrowmbda_k,-\leftftarrowmbda_k),&\mathrm{not\,\,fundamental},\\
q_2(\tau_1^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_2,\leftftarrowmbdadots,\leftftarrowmbda_k,\leftftarrowmbda_k),&\mathrm{not\,\,fundamental},\\
q_1(\tau_j^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{j-1},\leftftarrowmbda_{j+1},\leftftarrowmbdadots,\leftftarrowmbda_k,-\leftftarrowmbda_k),&\mathrm{not\,\,fundamental},\\
q_2(\tau_j^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{j-1},\leftftarrowmbda_{j+1},\leftftarrowmbdadots,\leftftarrowmbda_k,\leftftarrowmbda_k),&\mathrm{not\,\,fundamental},\\
q_1(\tau_k^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},-\leftftarrowmbda_k),&\mathrm{fundamental},\\
q_2(\tau_k^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},\leftftarrowmbda_k),&\mathrm{fundamental},\\
q_1(\omega_+^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},\leftftarrowmbda_k),&\mathrm{fundamental},\\
q_2(\omega_+^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},\leftftarrowmbda_{k-1}),&\mathrm{not\,\,fundamental},\\
q_1(\omega_-^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},-\leftftarrowmbda_{k-1}),&\mathrm{not\,\,fundamental},\\
q_2(\omega_-^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},-\leftftarrowmbda_k),&\mathrm{fundamental}.
\varepsilonnd{array}
$$
\inftynd Notice that finite dimensional $\tau_1^{\leftftarrowmbda}$ is not unitary and $q_1(\tau_1^{\leftftarrowmbda})\not=q_2(\tau_1^{\leftftarrowmbda})$ unless it is the trivial $1-$dimensional representation $(\leftftarrowmbda=\leftftarrowmbdae(k-\varphir{1}{2},k-\varphir{3}{2},\leftftarrowmbdadots,\varphir{1}{2}\rhoi))$ when $q_1(\tau_1^{\leftftarrowmbda})=q_2(\tau_1^{\leftftarrowmbda})=(0,\leftftarrowmbdadots,0).$ Next, $\tau_j^{\leftftarrowmbda}$ for $2\leftftarrowmbdaeq j\leftftarrowmbdaeq k$ is not unitary and $q_1(\tau_j^{\leftftarrowmbda})\not=q_2(t_j^{\leftftarrowmbda}).$ Finally, $\omega_+^{\leftftarrowmbda}$ and $\omega_-^{\leftftarrowmbda}$ are unitary (these are the discrete series representations) and each of them has one fundamental corner; the other corner is not fundamental.\\
\inftynd We consider now the case $\leftftarrowmbda\inftyn\Lambda^0,$ so $\leftftarrowmbda_k=0.$ Then the unique pair $(\sigma,\nu)\inftyn\hat{M}\tauim\mathbb{R},$ such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of the elementary representation $\pi^{\sigma,\nu},$ is
$$
\betaeg{array}{ll}
\sigma=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2}\rhoi),&\nu=0.
\varepsilonnd{array}
$$
The elementary representation $\pi^{\sigma,0}$ is unitary and it is direct sum of two unitary irreducible representations $\omega_+^{\leftftarrowmbda}$ and $\omega_-^{\leftftarrowmbda}.$ Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k$ that satisfy
$$
\betaeg{array}{ll}
\Gamma(\omega_+^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+\varphir{3}{2}\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_{k-1}-\varphir{1}{2}\gammaeq m_k\gammaeq\varphir{1}{2},\\
&\\
\Gamma(\omega_-^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+\varphir{3}{2}\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_{k-1}-\varphir{1}{2}\gammaeq-m_k\gammaeq\varphir{1}{2}.
\varepsilonnd{array}
$$
Again each of these representations have one $D_1-$corner and one $D_2-$corner:
$$
\betaeg{array}{l}
q_1(\omega_+^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\varphir{1}{2}\rhoi),\\
q_2(\omega_+^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\leftftarrowmbda_{k-1}-\varphir{1}{2}\rhoi),\\
q_1(\omega_-^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\leftftarrowmbda_{k-1}+\varphir{1}{2}\rhoi),\\
q_2(\omega_-^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\varphir{1}{2}\rhoi).
\varepsilonnd{array}
$$
Two of them are fundamental:
$$
\betaeg{array}{ll}
q_1(\omega_+^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},0),&\mathrm{fundamental},\\
q_2(\omega_+^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},\leftftarrowmbda_{k-1}),&\mathrm{not\,\,fundamental},\\
q_1(\omega_-^{\leftftarrowmbda})+\rho_k-\rho_P^{D_1}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},-\leftftarrowmbda_{k-1}),&\mathrm{not\,\,fundamental},\\
q_2(\omega_-^{\leftftarrowmbda})+\rho_k-\rho_P^{D_2}=(\leftftarrowmbda_1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1},0),&\mathrm{fundamental}.
\varepsilonnd{array}
$$
Thus, we see that again each of these unitary representation has one fundamental corner and the other corner is not fundamental.\\
\inftynd To sumarize, we see that $\pi\inftyn\wideparen{G}^0$ with exactly one fundamental corner is unitary; its fundamental corner we denote by $q(\pi).$ For all the others $\pi\inftyn\hat{G}^0$ one has $q_1(\pi)=q_2(\pi)$ and we denote by $q(\pi)$ this unique corner of $\pi.$
\betaeg{tm} $\pi\mapsto q(\pi)$ is a bijection of $\hat{G}^0$ onto $\hat{K}.$
\varepsilonnd{tm}
{\betaf Proof:} We have
$$
\hat{G}^0=\betaigcup_{j=1}^{k}\{\tau_j^{\leftftarrowmbda};\,\,\leftftarrowmbda\inftyn\Lambda_j^*\}\cup\{\omega_+^{\leftftarrowmbda};\,\,\leftftarrowmbda\inftyn\Lambda\}\cup\{\omega_-^{\leftftarrowmbda};\,\,\leftftarrowmbda\inftyn\Lambda\},
$$
where $\Lambda_1^*=\leftftarrowmbdae\{\leftftarrowmbdae(k-\varphir{1}{2},k-\varphir{3}{2},\leftftarrowmbdadots,\varphir{1}{2}\rhoi)\rhoi\}$ and for $2\leftftarrowmbdaeq j\leftftarrowmbdaeq k$
$$
\betaeg{array}{c}
\Lambda_j^*=\leftftarrowmbdae\{\leftftarrowmbda\inftyn\Lambda^*;\,\,\leftftarrowmbda_{j-1}>k-j+\varphir{3}{2}\,\,\mathrm{and}\,\,\leftftarrowmbda_s=k-s+\varphir{1}{2}\,\,\mathrm{for}\,\,j\leftftarrowmbdaeq s\leftftarrowmbdaeq k\rhoi\}.
\varepsilonnd{array}
$$
\inftynd Let $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}.$ We have three possibilities:\\
\inftynd$(1)$ $m_k=0.$ Then $q\inftyn\mathbb{Z}_+^k$ and $m_1\gammaeq m_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq0.$ Let
$$
j=\min\,\{s;\,\,1\leftftarrowmbdaeq s\leftftarrowmbdaeq k,\,\,m_s=0\}.
$$
Set
$$
\betaeg{array}{ll}
\leftftarrowmbda_s=m_s+k-s+\varphir{1}{2},&\quad1\leftftarrowmbdaeq s\leftftarrowmbdaeq j-1,\\
&\\
\leftftarrowmbda_s=k-s+\varphir{1}{2},&\quad j\leftftarrowmbdaeq s\leftftarrowmbdaeq k.
\varepsilonnd{array}
$$
Then for $1\leftftarrowmbdaeq s\leftftarrowmbdaeq j-2$ we have $\leftftarrowmbda_s-\leftftarrowmbda_{s+1}=m_s-m_{s+1}+1\gammaeq1,$ next $\leftftarrowmbda_{j-1}-\leftftarrowmbda_j=m_{j-1}+1\gammaeq2,$ further, for $j\leftftarrowmbdaeq s\leftftarrowmbdaeq k-1$ we have $\leftftarrowmbda_s-\leftftarrowmbda_{s+1}=1,$ and finally $\leftftarrowmbda_k=\varphir{1}{2}.$ Thus, we see that $\leftftarrowmbda\inftyn\Lambda^*.$ If $j=1$ we see that $\leftftarrowmbda$ is the unique element of $\Lambda_1^*.$ If $j\gammaeq2$ we have $m_{j-1}>0$ and so
$$
\betaeg{array}{c}
\leftftarrowmbda_{j-1}=m_{j-1}+k-j+1+\varphir{1}{2}>k-j+\varphir{3}{2}
\varepsilonnd{array}
$$
i.e. $\leftftarrowmbda\inftyn\Lambda_j^*.$ From the definition of $\leftftarrowmbda$ we see that $q=q(\tau_j^{\leftftarrowmbda}).$\\
\inftynd$(2)$ $m_k>0.$ Set now
$$
\betaeg{array}{c}
\leftftarrowmbda_j=m_j+k-j-\varphir{1}{2}.
\varepsilonnd{array}
$$
Then $\leftftarrowmbda_j-\leftftarrowmbda_{j+1}=m_j-m_{j-1}+1\gammaeq1$ for $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1$ and $\leftftarrowmbda_k=m_k-\varphir{1}{2}\gammaeq0.$ Thus, $\leftftarrowmbda\inftyn\Lambda$ and one sees that $q=q(\omega_+^{\leftftarrowmbda}).$\\
\inftynd$(3)$ $m_k<0.$ Set now
$$
\betaeg{array}{c}
\leftftarrowmbda_j=m_j+k-j-\varphir{1}{2},\,\,1\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\quad\leftftarrowmbda_k=-m_k-\varphir{1}{2}.
\varepsilonnd{array}
$$
Then $\leftftarrowmbda_j-\leftftarrowmbda_{j+1}=m_j-m_{j+1}+1\gammaeq1$ for $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k-2,$ further\leftftarrowmbdab $\leftftarrowmbda_{k-1}-\leftftarrowmbda_k=m_{k-1}+m_k+1=m_{k-1}-|m_k|+1\gammaeq1,$ and finally $\leftftarrowmbda_k=|m_k|-\varphir{1}{2}\gammaeq0.$ Thus, $\leftftarrowmbda\inftyn\Lambda$ and one sees that $q=q(\omega_-^{\leftftarrowmbda}).$\\
\inftynd We have proved that $\pi\mapsto q(\pi)$ is a surjection of $\hat{G}^0$ onto $\hat{K}.$ From the proof we see that this map is injective too.\\
\inftynd Consider now minimal $K-$types in the sense of Vogan: we say that $q\inftyn\hat{K}$ is a {\betaf minimal} $K-${\betaf type} of the representation $\pi$ if $q\inftyn\Gamma(\pi)$ and
$$
\|q+2\rho_K\|=\min\,\{\|q^{\prime}+2\rho_K\|;\,\,q^{\prime}\inftyn\Gamma(\pi)\}.
$$
For $q\inftyn\hat{K}$ we have
$$
\|q+2\rho_K\|^2=(m_1+2k-2)^2+(m_2+2k-4)^2+\cdots+(m_{k-1}+2)^2+m_k^2
$$
and so we find:\\
\inftynd If $\leftftarrowmbda\inftyn\Lambda\cap\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k,$ i.e. $\leftftarrowmbda\inftyn\Lambda^*$ and $\Gamma(\tau_j^{\leftftarrowmbda})\sigmaub\mathbb{Z}^k,$ the representation $\tau_j^{\leftftarrowmbda}$ has one minimal $K-$type which we denote by $q^V(\tau_j^{\leftftarrowmbda}):$
$$
\betaeg{array}{l}
q^V(\tau_1^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbda_3-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},0\rhoi),\\
\\
q^V(\tau_j^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},\leftftarrowmbda_{j+1}-k+j+\varphir{1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},0\rhoi),\\
\mathfrak{q}u\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
\\
q^V(\tau_k^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},0\rhoi).
\varepsilonnd{array}
$$
\inftynd If $\leftftarrowmbda\inftyn\Lambda\cap\mathbb{Z}^k,$ i.e. $\Gamma(\tau_j^{\leftftarrowmbda})\sigmaub\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k,$ the representation $\tau_j^{\leftftarrowmbda}$ has two minimal $K-$types $q_1^V(\tau_j^{\leftftarrowmbda})$ and $q_2^V(\tau_j^{\leftftarrowmbda}):$
$$
\betaeg{array}{l}
q_1^V(\tau_1^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbda_3-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},\varphir{1}{2}\rhoi),\\
\\
q_2^V(\tau_1^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_2-k+\varphir{3}{2},\leftftarrowmbda_3-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},-\varphir{1}{2}\rhoi),\\
\\
q_1^V(\tau_j^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},\leftftarrowmbda_{j+1}-k+j+\varphir{1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},\varphir{1}{2}\rhoi),\\
\mathfrak{q}u\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
\\
q_2^V(\tau_j^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-\varphir{1}{2},\leftftarrowmbda_{j+1}-k+j+\varphir{1}{2},\leftftarrowmbdadots,\leftftarrowmbda_k-\varphir{1}{2},-\varphir{1}{2}\rhoi),\\
\mathfrak{q}u\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
\\
q_1^V(\tau_k^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\varphir{1}{2}\rhoi),\\
\\
q_2^V(\tau_k^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\varphir{1}{2}\rhoi).
\varepsilonnd{array}
$$
\inftynd Finally, for every $\leftftarrowmbda\inftyn\Lambda$ the representation $\omega_{\pm}^{\leftftarrowmbda}$ has one minimal $K-$type $q^V(\omega_{\pm}^{\leftftarrowmbda}):$
$$
\betaeg{array}{l}
q^V(\omega_+^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},\leftftarrowmbda_k+\varphir{1}{2}\rhoi),\\
\\
q^V(\omega_-^{\leftftarrowmbda})=\leftftarrowmbdae(\leftftarrowmbda_1-k+\varphir{3}{2},\leftftarrowmbda_2-k+\varphir{5}{2},\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-\varphir{1}{2},-\leftftarrowmbda_k-\varphir{1}{2}\rhoi).
\varepsilonnd{array}
$$
\inftynd So we see that if $\pi\inftyn\wideparen{G}^0$ has two minimal $K-$types it is not unitary. Further, every $\pi\inftyn\hat{G}^0$ has one minimal $K-$type $q^V(\pi)$ and it coincides with $q(\pi).$ But there exist nonunitary representations in $\wideparen{G}^0$ that have one mi\-ni\-mal $K-$type: this property have all $\tau_j^{\leftftarrowmbda}$ for $\leftftarrowmbda\inftyn\Lambda\cap\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k$ that are not subquotinets of the ends of compelemntary series. In other words, unitarity of a representation $\pi\inftyn\wideparen{G}^0$ is not characterized by having unique minimal $K-$type.
\sigmaection{Representations of $\mr{Spin}(2k+1,1)$}
\inftynd Now $M=\mr{Spin}(2k).$ Cartan subalgebra $\mathfrak{t}_0$ of $\mathfrak{k}_0$ (resp. $\mathfrak{t}$ of $\mathfrak{k})$ is also Cartan subalgebra of $\mathfrak{m}_0$ (resp. $\mathfrak{m}).$ The root systems are:
$$
\Delta_K=\Delta(\mathfrak{k},\mathfrak{t})=\{\pm\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq k,\,\,p\not=q\}\cup\{\pm\alpha_p;\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq k\}
$$
and
$$
\Delta_M=\Delta(\mathfrak{m},\mathfrak{t})=\{\pm\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq k,\,\,p\not=q\}.
$$
We choose positive roots:
$$
\Delta_K^+=\{\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p<q\leftftarrowmbdaeq k\}\cup\{\alpha_p;\,\,1\leftftarrowmbdaeq p\leftftarrowmbdaeq k\},
$$
$$
\Delta_M^+=\{\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p<q\leftftarrowmbdaeq k\}.
$$
The corresponding Weyl chambers in $\mathbb{R}^k=i\mathfrak{t}_0^*$ are
$$
C_K=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>\leftftarrowmbda_k>0\}
$$
with the closure
$$
\omegav{C}_K=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1\gammaeq\leftftarrowmbda_2\gammaeq\cdots\gammaeq\leftftarrowmbda_{k-1}\gammaeq\leftftarrowmbda_k\gammaeq0\}
$$
and
$$
C_M=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{k-1}>|\leftftarrowmbda_k|>0\}
$$
with the closure
$$
\omegav{C}_M=\{\leftftarrowmbda\inftyn\mathbb{R}^k;\,\,\leftftarrowmbda_1\gammaeq\leftftarrowmbda_2\gammaeq\cdots\gammaeq\leftftarrowmbda_{k-1}\gammaeq|\leftftarrowmbda_k|\}.
$$
The halfsums of positive roots are
$$
\betaeg{array}{ccc}
\rho_K=\leftftarrowmbdae(k-\varphir{1}{2},k-\varphir{3}{2},\leftftarrowmbdadots,\varphir{3}{2},\varphir{1}{2}\rhoi)&\mathrm{and}&\delta_{\mathfrak{m}}=(k-1,k-2,\leftftarrowmbdadots,1,0).
\varepsilonnd{array}
$$
Now
$$
\betaeg{array}{c}
\hat{K}=\leftftarrowmbdae\{(m_1,\leftftarrowmbdadots,m_k)\inftyn\mathbb{Z}_+^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}_+\rhoi)^k;\,\,m_1\gammaeq m_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq m_k\gammaeq0\rhoi\}\\
\\
\hat{M}=\leftftarrowmbdae\{(n_1,\leftftarrowmbdadots,n_k)\inftyn\mathbb{Z}^k\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k;\,\,n_1\gammaeq n_2\gammaeq\cdots\gammaeq n_{k-1}\gammaeq|n_k|\rhoi\}.
\varepsilonnd{array}
$$
The branching rule is
$$
(m_1,\leftftarrowmbdadots,m_k)|M=\betaigoplus_{(n_1,\leftftarrowmbdadots,n_k)\primeec(m_1,\leftftarrowmbdadots,m_k)}(n_1,\leftftarrowmbdadots,n_k)
$$
where $(n_1,\leftftarrowmbdadots,n_k)\primeec(m_1,\leftftarrowmbdadots,m_k)$ means that $(m_1,\leftftarrowmbdadots,m_k)\inftyn(n_1+\mathbb{Z})^k$ and
$$
m_1\gammaeq n_1\gammaeq m_2\gammaeq n_2\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq|n_k|.
$$
So by the Frobenius Reciprocity Theorem for $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M}$ and $\nu\inftyn\mathbb{C}=\mathfrak{a}^*$ we have
$$
\pi^{\sigma,\nu}|K=\betaigoplus_{(n_1,\leftftarrowmbdadots,n_k)\primeec(m_1,\leftftarrowmbdadots,m_k)}(m_1,\leftftarrowmbdadots,m_k).
$$
We identify the dual $\mathfrak{h}^*$ with $\mathbb{C}^{k+1}$ so that $\leftftarrowmbda\inftyn\mathfrak{h}^*$ is identified with the\leftftarrowmbdab $(k+1)-$tuple $(\leftftarrowmbda(H_1),\leftftarrowmbdadots,\leftftarrowmbda(H_k),\leftftarrowmbda(H))$ and $\mathfrak{t}^*=\mathbb{C}^k$ is identified with the subspace of $\mathfrak{h}^*=\mathbb{C}^{k+1}$ of all $(k+1)-$tuples with $0$ at the end. The infinitesimal character of the elementary representation $\pi^{\sigma,\nu}$ is equal $\chi_{\Lambda(\sigma,\nu)},$ where $\Lambda(\sigma,\nu)\inftyn\mathfrak{h}^*$ is defined by
$$
\Lambda(\sigma,\nu)|\mathfrak{t}=\leftftarrowmbda_{\sigma}+\delta_{\mathfrak{m}}\quad\mathrm{and}\quad\Lambda(\sigma,\nu)|\mathfrak{a}=\nu.
$$
Here $\leftftarrowmbda_{\sigma}$ is the highest weight of $\sigma$ with respect to $\Delta_M^+.$ Thus
$$
\Lambda(\sigma,\nu)=(n_1+k-1,n_2+k-2,\leftftarrowmbdadots,n_{k-1}+1,n_k,\nu).
$$
For $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M}\cap\mathbb{Z}^k$ and $\nu\inftyn\mathbb{C}$ the elementary representation $\pi^{\sigma,\nu}$ is ireducible if and only if either $\nu\not\inftyn\mathbb{Z}$ or
$$
\nu\inftyn\{0,\pm1,\leftftarrowmbdadots,\pm|n_k|,\pm(n_{k-1}+1),\pm(n_{k-2}+2),\leftftarrowmbdadots,\pm(n_1+k-1)\}.
$$
For $\sigma\inftyn\hat{M}\cap\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^k$ and $\nu\inftyn\mathbb{C}$ the representation $\pi^{\sigma,\nu}$ is irreducible if and only if either $\nu\not\inftyn\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)$ or
$$
\betaeg{array}{c}
\nu\inftyn\leftftarrowmbdae\{\pm\varphir{1}{2},\leftftarrowmbdadots,\pm|n_k|,\pm(n_{k-1}+1),\pm(n_{k-2}+2),\leftftarrowmbdadots,\pm(n_1+k-1)\rhoi\}.
\varepsilonnd{array}
$$
If the elementary representation $\pi^{\sigma,\nu}$ is reducible, it always has two irreducible subquotients which will be denoted by $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu}.$ The $K-$spectra of these representations consist of all $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap(n_1+\mathbb{Z})^k$ that satisfy:
$$
\betaeg{array}{l}
\betaullet\,\mathrm{If}\,\,n_{k-1}>|n_k|\,\,\mathrm{and}\,\,\nu\inftyn\{\pm(|n_k|+1),\pm(|n_k|+2),\leftftarrowmbdadots,\pm n_{k-1}\}:\\
\Gamma(\tau^{\sigma,\nu}):\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\,\,\mathrm{and}\,\,|\nu|-1\gammaeq m_k\gammaeq|n_k|,\\
\Gamma(\omega^{\sigma,\nu}):\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq n_{k-1}\gammaeq m_k\gammaeq|\nu|.\\
\betaullet\,\mathrm{If}\,\,n_{j-1}>n_j\,\,\mathrm{for\,\,some}\,\,j\inftyn\{2,\leftftarrowmbdadots,k-1\}\,\,\mathrm{and}\\
\nu\inftyn\{\pm(n_j+k-j+1),\pm(n_j+k-j+2),\leftftarrowmbdadots,\pm(n_{j-1}+k-j)\}:\\
\Gamma(\tau^{\sigma,\nu}):\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{j-1}\gammaeq n_{j-1}\,\,\mathrm{and}\\
\mathfrak{q}u\mathfrak{q}u|\nu|-k+j-1\gammaeq m_j\gammaeq n_j\gammaeq\cdots\gammaeq m_k\gammaeq|n_k|,\\
\Gamma(\omega^{\sigma,\nu}):\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{j-1}\gammaeq n_{j-1}\gammaeq m_j\gammaeq|\nu|-k+j\,\,\mathrm{and}\\
\mathfrak{q}u\mathfrak{q}u n_j\gammaeq m_{j+1}\gammaeq\cdots\gammaeq m_k\gammaeq|n_k|.\\
\betaullet\,\mathrm{If}\,\,\nu\inftyn\{\pm(n_1+k),\pm(n_1+k+1),\leftftarrowmbdadots\}:\\
\Gamma(\tau^{\sigma,\nu}):\,\,|\nu|-k\gammaeq m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_k\gammaeq|n_k|,\\
\Gamma(\omega^{\sigma,\nu}):\,\,m_1\gammaeq|\nu|-k+1\,\,\mathrm{and}\,\,n_1\gammaeq m_2\gammaeq n_2\gammaeq\cdots\gammaeq m_k\gammaeq|n_k|.
\varepsilonnd{array}
$$
\inftynd Similarly to the preceeding case of even $n=2k$ we now write down the infinitesimal characters of reducible elementary representations $\pi^{\sigma,\nu}$ (and so of its irreducible subquotients $\tau^{\sigma,\nu}$ and $\omega^{\sigma,\nu}$ too). We know that the infinitesimal character of $\pi^{\sigma,\nu}$ is $\chi_{\Lambda(\sigma,\nu)},$ where
$$
\Lambda(\sigma,\nu)=(n_1+k-1,n_2+k-2,\leftftarrowmbdadots,n_{k-1}+1,n_k,\nu).
$$
Since $\nu\inftyn\varphir{1}{2}\mathbb{Z}\sigmaubset\mathbb{R}=\mathfrak{a}_0^*$ we have $\Lambda(\sigma,\nu)\inftyn i\mathfrak{t}_0^*\omegaplus\mathfrak{a}_0^*=\mathbb{R}^{k+1}.$\\
\inftynd The root system of the pair $(\mathfrak{g},\mathfrak{h})$ is
$$
\Delta=\{\pm\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p,q\leftftarrowmbdaeq k+1,\,\,p\not=q\}.
$$
We choose positive roots:
$$
\Delta^+=\{\alpha_p\pm\alpha_q;\,\,1\leftftarrowmbdaeq p<q\leftftarrowmbdaeq k+1\}.
$$
The corresponding Weyl chamber in $\mathbb{R}^{k+1}$ is
$$
D=\{\leftftarrowmbda\inftyn\mathbb{R}^{k+1};\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_k>|\leftftarrowmbda_{k+1}|>0\}
$$
with the closure
$$
\omegav{D}=\{\leftftarrowmbda\inftyn\mathbb{R}^{k+1};\,\,\leftftarrowmbda_1\gammaeq\leftftarrowmbda_2\gammaeq\cdots\gammaeq\leftftarrowmbda_k\gammaeq|\leftftarrowmbda_{k+1}|\}.
$$
The Weyl group $W$ of $\Delta$ consists of all permutations of coordinates in $\mathbb{C}^{k+1}=\mathfrak{h}^*$ combined with multiplying even number of coordinates with $-1.$ By Harish$-$Chandra's theorem $\chi_{\leftftarrowmbda}=\chi_{\leftftarrowmbda^{\prime}}$ if and only if $\leftftarrowmbda,\leftftarrowmbda^{\prime}\inftyn\mathfrak{h}^*$ are in the same $W-$orbit. As $\omegav{D}$ is a fundamental domain for the action of $W$ on $\mathbb{R}^{k+1}=i\mathfrak{t}_0^*\omegaplus\mathfrak{a}_0^*,$ there exists unique $\leftftarrowmbda(\sigma,\nu)\inftyn\omegav{D}$ such that $\chi_{\Lambda(\sigma,\nu)}=\chi_{\leftftarrowmbda(\sigma,\nu)}.$ We now write down $\leftftarrowmbda(\sigma,\nu)$ for all reducible elementary representations $\pi^{\sigma,\nu}.$ In the following for $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M}$ we write $-\sigma$ for its contragredient class in $\hat{M}:$ $-\sigma=(n_1,\leftftarrowmbdadots,n_{k-1},-n_k).$ Without loss of geq-ne\-ra\-li\-ty we can suppose that $\nu\gammaeq0$ because $\pi^{\sigma,\nu}$ and $\pi^{-\sigma,-\nu}$ have equivalent irreducible subquotients and because $\Lambda(\sigma,\nu)$ is $W-$conjugated with $\Lambda(-\sigma,-\nu):$ multiplying the last two coordinates by $-1.$\\
\inftynd $\betaullet$ If $n_{k-1}>|n_k|$ and $\nu\inftyn\{|n_k|+1,|n_k|+2,\leftftarrowmbdadots,n_{k-1}\}$ we have $n_{k-1}>\nu>|n_k|$ and so
$$
\leftftarrowmbda(\sigma,\nu)=(n_1+k-1,n_2+k-2,\leftftarrowmbdadots,n_{k-1}+1,\nu,n_k).
$$
\inftynd $\betaullet$ If $2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,$ $n_{j-1}>n_j$ and $\nu\inftyn\{n_j+k-j+1,\leftftarrowmbdadots,n_{j-1}+k-j\}$ we have $n_{j-1}+k-j+1>\nu>n_j+k-j$ and so
$$
\leftftarrowmbda(\sigma,\nu)=(n_1+k-1,\leftftarrowmbdadots,n_{j-1}+k-j+1,\,\nu\,,n_j+k-j,\leftftarrowmbdadots,n_{k-1}+1,n_k).
$$
\inftynd $\betaullet$ If $\nu\inftyn\{n_1+k,n_1+k+1,\leftftarrowmbdadots\}$ we have $\nu>n_1+k-1$ and so
$$
\leftftarrowmbda(\sigma,\nu)=(\nu,n_1+k-1,\leftftarrowmbdadots,n_{k-1}+1,n_k).
$$
\inftynd Similarly to the preceeding case of even $n=2k,$ we see that now every reducible elementary representation has infinitesimal character $\chi_{\leftftarrowmbda}$ with $\leftftarrowmbda\inftyn\Lambda,$\leftftarrowmbdab where
$$
\betaeg{array}{c}
\Lambda=\leftftarrowmbdae\{\leftftarrowmbda\inftyn\mathbb{Z}^{k+1}\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^{k+1};\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_k>|\leftftarrowmbda_{k+1}|\rhoi\}.
\varepsilonnd{array}
$$
We again write $\Lambda$ as the disjoint union $\Lambda=\Lambda^*\cup\Lambda^0,$ where
$$
\betaeg{array}{c}
\Lambda^*=\leftftarrowmbdae\{\leftftarrowmbda\inftyn\mathbb{Z}^{k+1}\cup\leftftarrowmbdae(\varphir{1}{2}+\mathbb{Z}\rhoi)^{k+1};\,\,\leftftarrowmbda_1\leftftarrowmbda_2>\cdots>\leftftarrowmbda_k>|\leftftarrowmbda_{k+1}|>0\rhoi\},\\
\\
\Lambda^0=\{\leftftarrowmbda\inftyn\mathbb{Z}_+^{k+1};\,\,\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_k>0,\,\,\leftftarrowmbda_{k+1}=0\}.
\varepsilonnd{array}
$$
\betaeg{tm} $(i)$ For every $\leftftarrowmbda\inftyn\Lambda^*$ there exist $k+1$ ordered pairs $(\sigma,\nu),$ $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M},$ $\nu\gammaeq0,$ such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ These are $(\sigma_j,\nu_j),$ where $\nu_j=\leftftarrowmbda_j$ for $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k,$ $\nu_{k+1}=|\leftftarrowmbda_{k+1}|$ and
$$
\betaeg{array}{l}
\sigma_1=(\leftftarrowmbda_2-k+1,\leftftarrowmbda_3-k+2,\leftftarrowmbdadots,\leftftarrowmbda_k-1,\leftftarrowmbda_{k+1}),\\
\sigma_j=(\leftftarrowmbda_1-k+1,\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-1,\leftftarrowmbda_{j+1}-k+j,\leftftarrowmbdadots,\leftftarrowmbda_k-1,\leftftarrowmbda_{k+1}),\\
\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
\sigma_k=(\leftftarrowmbda_1-k+1,\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,\leftftarrowmbda_{k+1}),\\
\sigma_{k+1}=\leftftarrowmbdae\{\betaeg{array}{ll}
\!\!(\leftftarrowmbda_1-k+1,\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,\leftftarrowmbda_k)&\,\,\mathrm{if}\,\,\leftftarrowmbda_{k+1}>0\\
\!\!(\leftftarrowmbda_1-k+1,\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,-\leftftarrowmbda_k)&\,\,\mathrm{if}\,\,\leftftarrowmbda_{k+1}<0
\varepsilonnd{array}
\rhoi.
\varepsilonnd{array}
$$
$\pi^{\sigma_j,\nu_j},$ $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k,$ are reducible, while $\pi^{\sigma_{k+1},\nu_{k+1}}$ is irreducible.\\
\inftynd $(ii)$ For $\leftftarrowmbda\inftyn\Lambda^0$ there exist $k+2$ ordered pairs $(\sigma,\nu),$ $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M},$ $\nu\gammaeq0,$ such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ These are the $(\sigma_j,\nu_j),$ where $\nu_j=\leftftarrowmbda_j$ for $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k,$ $\nu_{k+1}=\nu_{k+2}=0$ and
$$
\betaeg{array}{l}
\sigma_1=(\leftftarrowmbda_2-k+1,\leftftarrowmbda_3-k+2,\leftftarrowmbdadots,\leftftarrowmbda_k-1,0),\\
\sigma_j=(\leftftarrowmbda_1-k+1,\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-1,\leftftarrowmbda_{j+1}-k+j,\leftftarrowmbdadots,\leftftarrowmbda_k-1,0),\\
\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
\sigma_k=(\leftftarrowmbda_1-k+1,\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,0),\\
\sigma_{k+1}=(\leftftarrowmbda_1-k+1,\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,\leftftarrowmbda_k),\\
\sigma_{k+2}=(\leftftarrowmbda_1-k+1,\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,-\leftftarrowmbda_k).
\varepsilonnd{array}
$$
$\pi^{\sigma_j,\nu_j},$ $1\leftftarrowmbdaeq j\leftftarrowmbdaeq k,$ are reducible, while $\pi^{\sigma_{k+1},\nu_{k+1}}$ and $\pi^{\sigma_{k+2},\nu_{k+2}}$ are irreducible.
\varepsilonnd{tm}
{\betaf Proof:} $(i)$ Fix $\leftftarrowmbda\inftyn\Lambda^*$ and let $(\sigma,\nu),$ $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M},$ $\nu\gammaeq0,$ be such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ Then $\Lambda(\sigma,\nu)$ and $\leftftarrowmbda$ are in the same $W-$orbit. Since $\nu\gammaeq0$ we have necessarily $\nu=\leftftarrowmbda_j$ for some $j\leftftarrowmbdaeq k$ or $\nu=|\leftftarrowmbda_{k+1}|.$\\
\inftynd Suppose $\nu=\leftftarrowmbda_j$ for some $j\leftftarrowmbdaeq k.$ Since $W$ acts as permutations of coordinates combined with multiplying evene number of coordinates by $-1,$ the inequalities
$$
n_1+k-1>n_2+k-2>\cdots>n_{k-1}+1>|n_k|
$$
and
$$
\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_{j-1}>\leftftarrowmbda_{j+1}>\cdots>\leftftarrowmbda_k>|\leftftarrowmbda_{k+1}|>0
$$
imply
$$
n_1+k-1=\leftftarrowmbda_1,\leftftarrowmbdadots,n_{j-1}+k-j+1=\leftftarrowmbda_{j-1},
$$
$$
n_j+k-j=\leftftarrowmbda_{j+1},\leftftarrowmbdadots,n_{k-1}+1=\leftftarrowmbda_k,\,\,n_k=\leftftarrowmbda_{k+1}.
$$
The following possibilities follow:
$$
\betaeg{array}{ll}
\nu_1=\leftftarrowmbda_1,&\!\!\sigma_1=(\leftftarrowmbda_2-k+1,\leftftarrowmbda_3-k+2,\leftftarrowmbdadots,\leftftarrowmbda_k-1,\leftftarrowmbda_{k+1}),\\
&\\
\nu_j=\leftftarrowmbda_j,&\!\!\sigma_j=(\leftftarrowmbda_1-k+1,\leftftarrowmbdadots,\leftftarrowmbda_{j-1}-k+j-1,\leftftarrowmbda_{j+1}-k+j,\leftftarrowmbdadots,\leftftarrowmbda_k-1,\leftftarrowmbda_{k+1}),\\
&\mathfrak{q}u2\leftftarrowmbdaeq j\leftftarrowmbdaeq k-1,\\
&\\
\nu_k=\leftftarrowmbda_k,&\!\!\sigma_k=(\leftftarrowmbda_1-k+1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,\leftftarrowmbda_{k+1}).
\varepsilonnd{array}
$$
One easily checks that so defined $\sigma_1,\leftftarrowmbdadots,\sigma_k$ are really in $\hat{M}$ and that $\pi^{\sigma_j,\nu_j}$ are reducible.\\
\inftynd Suppose now that $\leftftarrowmbda_{k+1}>0$ and $\nu=\leftftarrowmbda_{k+1}.$ Then it follows that necessarily
$$
n_1+k-1=\leftftarrowmbda_1,n_2+k-2=\leftftarrowmbda_2,\leftftarrowmbdadots,n_{k-1}+1=\leftftarrowmbda_{k-1},n_k=\leftftarrowmbda_k,
$$
i.e.
$$
n_1=\leftftarrowmbda_1-k+1,n_2=\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,n_{k-1}=\leftftarrowmbda_{k-1}-1,n_k=\leftftarrowmbda_k.
$$
On the other hand, if $\leftftarrowmbda_{k+1}<0,$ hence $\nu=-\leftftarrowmbda_{k+1},$ we see that in $W-$action which $\Lambda(\sigma,\nu)$ transforms into $\leftftarrowmbda$ there should be one more change of sign and so necessarily $n_k=-\leftftarrowmbda_k.$ Thus we have
$$
n_1=\leftftarrowmbda_1-k+1,n_2=\leftftarrowmbda_2-k+2,\leftftarrowmbdadots,n_{k-1}=\leftftarrowmbda_{k-1}-1,n_k=-\leftftarrowmbda_k.
$$
One checks that so defined
$$
\sigma_{k+1}=(n_1,\leftftarrowmbdadots,n_k)=(\leftftarrowmbda_1-k+1,\leftftarrowmbdadots,\leftftarrowmbda_{k-1}-1,\pm\leftftarrowmbda_k)
$$
is really in $\hat{M}.$ Further, we have $|n_k|-\nu_{k+1}=\leftftarrowmbda_k-|\leftftarrowmbda_{k+1}|\inftyn\mathbb{N}.$ Thus, either $\nu_{k+1}\inftyn\{0,1,\leftftarrowmbdadots,|n_k|-1\}$ or $\nu_{k+1}\inftyn\{\varphir{1}{2},\varphir{3}{2},\leftftarrowmbdadots,|n_k|-1\}.$ Therefore, the elementary representation $\pi^{\sigma_{k+1},\nu_{k+1}}$ is irreducible.\\
\inftynd $(ii)$ Let $\leftftarrowmbda\inftyn\Lambda^0$ and let $(\sigma,\nu),$ $\sigma=(n_1,\leftftarrowmbdadots,n_k)\inftyn\hat{M},$ $\nu\gammaeq0,$ be such that $\chi_{\leftftarrowmbda}$ is the infinitesimal character of $\pi^{\sigma,\nu}.$ As in the proof of $(i)$ we find that necessarily $\nu=\leftftarrowmbda_j$ for some $j.$ The rest of the proof for $j\leftftarrowmbdaeq k$ is completely the same as in $(i).$ So we are left with the case $\nu=\leftftarrowmbda_{k+1}=0.$ As in $(i)$ besause of the inequalities $n_1+k-1>n_2+k-2>\cdots>n_{k-1}+1>|n_k|$ and $\leftftarrowmbda_1>\leftftarrowmbda_2>\cdots>\leftftarrowmbda_k>0$ we get two possibilies for $\sigma,$ $\sigma=\sigma_{k+1}$ and $\sigma_{k+2}$ from the statement $(ii).$ Finally, as in the proof of $(i)$ we check that $\sigma_{k+1},\sigma_{k+2}\inftyn\hat{M}$ and that the representations $\pi^{\sigma_{k+1},0}$ and $\pi^{\sigma_{k+2},0}$ are irreducible.\\
\inftynd We note that in fact the representations $\pi^{\sigma_{k+1},0}$ and $\pi^{\sigma_{k+2},0}$ are equivalent, but this is unimportant for studying and parametrizing $\wideparen{G}^0$ and $\hat{G}^0.$\\
\inftynd Fix $\leftftarrowmbda\inftyn\Lambda.$ By Theorem 3. there exist $k$ ordered pairs $(\sigma,\nu),$ $\sigma\inftyn\hat{M},$ $\nu\gammaeq0,$ with reducible $\pi^{\sigma,\nu}$ having $\chi_{\leftftarrowmbda}$ as the infinitesimal character. There are $k+1$ mutually inequivalent irreducible subquotients of these elementary representations; we denote them $\tau_1^{\leftftarrowmbda},\leftftarrowmbdadots,\tau_k^{\leftftarrowmbda},\omega^{\leftftarrowmbda}:$
$$
\betaeg{array}{l}
\tau_1^{\leftftarrowmbda}=\tau^{\sigma_1,\nu_1},\\
\\
\tau_2^{\leftftarrowmbda}=\omega^{\sigma_1,\nu_1}\cong\tau^{\sigma_2,\nu_2},\\
\\
\vdots\\
\\
\tau_j^{\leftftarrowmbda}=\omega^{\sigma_{j-1},\nu_{j-1}}\cong\tau^{\sigma_j,\nu_j},\\
\\
\vdots\\
\\
\tau_k^{\leftftarrowmbda}=\omega^{\sigma_{k-1},\nu_{k-1}}\cong\tau^{\sigma_k,\nu_k},\\
\\
\omega^{\leftftarrowmbda}=\omega^{\sigma_k,\nu_k}.
\varepsilonnd{array}
$$
Their $K-$spectra consist of all $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}\cap(n_1+\mathbb{Z})^k$ satisfying:
$$
\betaeg{array}{ll}
\Gamma(\tau_1^{\leftftarrowmbda}):&\leftftarrowmbda_1-k\gammaeq m_1\gammaeq\leftftarrowmbda_2-k+1\gammaeq m_2\gammaeq\cdots\gammaeq\leftftarrowmbda_k-1\gammaeq m_k\gammaeq|\leftftarrowmbda_{k+1}|.\\
&\\
\Gamma(\tau_j^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+1\gammaeq\cdots\gammaeq m_{j-1}\gammaeq\leftftarrowmbda_{j-1}-k+j-1\,\,\mathrm{and}\\
&\leftftarrowmbda_j-k+j-1\gammaeq m_j\gammaeq\cdots\gammaeq\leftftarrowmbda_k-1\gammaeq m_k\gammaeq|\leftftarrowmbda_{k+1}\,\,\mathrm{for}\,\,2\leftftarrowmbdaeq j\leftftarrowmbdaeq k.\\
&\\
\Gamma(\omega^{\leftftarrowmbda}):&m_1\gammaeq\leftftarrowmbda_1-k+1\gammaeq\cdots\gammaeq m_{k-1}\gammaeq\leftftarrowmbda_{k-1}-1\gammaeq m_k\gammaeq\leftftarrowmbda_k.
\varepsilonnd{array}
$$
\inftynd The definitons of corners and fundamental corners do not have sense when $\rhoank\,\mathfrak{k}<\rhoank\,\mathfrak{g}.$ Consider the Vogan's minimal $K-$types. Note that
$$
\|q+2\rho_K\|^2=(m_1+2k-1)^2+(m_2+2k-3^2+\cdots+(m_k+1)^2,
$$
so every $\pi\inftyn\wideparen{G}^0$ has unique minimal $K-$type that will be denoted by $q^V(\pi):$ this is the element $(m_1,\leftftarrowmbdadots,m_k)\inftyn\Gamma(\pi)$ whose every coordinate $m_j$ is the smallest possible.
\betaeg{tm} The map $\pi\mapsto q^V(\pi)$ is a surjection of $\wideparen{G}^0$ onto $\hat{K}.$ More precisely, for $q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K}:$\\
\inftynd $(a)$ There exist infinitely many $\leftftarrowmbda$'s in $\Lambda$ such that $q^V(\tau_1^{\leftftarrowmbda})=q.$\\
\inftynd $(b)$ Let $j\inftyn\{2,\leftftarrowmbdadots,k\}.$ The number of mutually different $\leftftarrowmbda$'s $\Lambda$ such the $q^V(\tau_j^{\leftftarrowmbda})=q$ is equal:
$$
\betaeg{array}{cl}
0&\quad\mathrm{if}\,\,\,m_{j-1}=m_j,\\
m_{j-1}-m_j&\quad\mathrm{if}\,\,\,m_{j-1}>m_j\,\,\,\mathrm{and}\,\,\,m_k=0,\\
2(m_{j-1}-m_j)&\quad\mathrm{if}\,\,\,m_{j-1}>m_j\,\,\,\mathrm{and}\,\,\,m_k>0.
\varepsilonnd{array}
$$
\inftynd $(c)$ The number of $\leftftarrowmbda$'s in $\Lambda$ such that $q^V(\omega^{\leftftarrowmbda})=q$ is equal:
$$
\betaeg{array}{cl}
0&\quad\mathrm{if}\,\,\,m_k=0\,\,\,\mathrm{or}\,\,\,m_k=\varphir{1}{2},\\
1&\quad\mathrm{if}\,\,\,m_k=1,\\
2\leftftarrowmbdae[m_k-\varphir{1}{2}\rhoi]&\quad\mathrm{if}\,\,\,m_k>1.
\varepsilonnd{array}
$$
Here we use the usual notation for $p\inftyn\mathbb{R}:$ $[p]=\max\,\{j\inftyn\mathbb{Z};\,\,j\leftftarrowmbdaeq p\}.$
\varepsilonnd{tm}
{\betaf Proof:} $(a)$ These are all $\leftftarrowmbda\inftyn\Lambda$ such that
$$
\leftftarrowmbda_1\inftyn(m_1+k+\mathbb{Z}_+),\,\,\,\leftftarrowmbda_j=m_{j-1}+k-j+1\,\,\,2\leftftarrowmbdaeq j\leftftarrowmbdaeq k,\,\,\,\leftftarrowmbda_{k+1}=\pm m_k.
$$
\inftynd $(b)$ These are all $\leftftarrowmbda\inftyn\Lambda$ such that
$$
\betaeg{array}{ll}
\leftftarrowmbda_s=m_s+k-s,&\,\,1\leftftarrowmbdaeq s\leftftarrowmbdaeq j-1,\\
\leftftarrowmbda_{j-1}>\leftftarrowmbda_j>\leftftarrowmbda_{j+1},&\\
\leftftarrowmbda_s=m_{s-1}+k-s+1,&\,\,j+1\leftftarrowmbdaeq s\leftftarrowmbdaeq k,\\
\leftftarrowmbda_{k+1}=\pm m_k.&
\varepsilonnd{array}
$$
\inftynd$(c)$ These are all $\leftftarrowmbda\inftyn\Lambda$ such that
$$
\betaeg{array}{l}
\leftftarrowmbda_s=m_s+k-s,\,\,\,1\leftftarrowmbdaeq s\leftftarrowmbdaeq k,\,\,\,|\leftftarrowmbda_{k+1}|<m_k.
\varepsilonnd{array}
$$
The number of such $\leftftarrowmbda$'s is $0$ if $m_k=0$ or $m_k=\varphir{1}{2},$ exactly $1$ if $m_k=1$ $(\leftftarrowmbda_{k+1}=0),$ and twice the number of natural numbers $<m_k$ if $m_k\gammaeq\varphir{3}{2}.$\\
\inftynd We now parametrize $\hat{G}^0.$ A class in $\wideparen{G}^0$ is unitary if and only if it is an irreducible subquotient of an end of complementary series. For $\sigma\inftyn\hat{M}$ the complementary series is nonempty if and only if $\sigma$ is selfcontragredient, i.e. equivalent to its contragredient. Contragredient representation of\leftftarrowmbdab $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1},n_k)$ is $-\sigma=(n_1,\leftftarrowmbdadots,n_{k-1},-n_k).$ Thus, $\sigma$ is selfcontragredient if and only if $n_k=0.$ In this case we set
$$
\nu(\sigma)=\min\,\{\nu\gammaeq0;\,\,\pi^{\sigma,\nu}\,\,\mathrm{is}\,\,\mathrm{reducible}\}.
$$
From the necessary and sufficient conditions for reducibility of elementary representations we find that for $\sigma=(n_1,\leftftarrowmbdadots,n_{k-1},0)\inftyn\hat{M}:$\\
\inftynd $\betaullet$ If $n_1=\cdots=n_{k-1}=0,$ i.e. if $\sigma=\sigma_0=(0,\leftftarrowmbdadots,0)$ is the trivial onedimensional representation of $M,$ then
$$
\nu(\sigma_0)=k.
$$
In this case
$$
\Gamma(\tau^{\sigma_0,k})=\{(0,\leftftarrowmbdadots,0)\}\quad\mathrm{and}\quad\Gamma(\omega^{\sigma_0,k})=\{(s,0,\leftftarrowmbdadots,0);\,\,s\inftyn\mathbb{N}\}
$$
and so $q^V(\tau^{\sigma_0,k})=(0,\leftftarrowmbdadots,0)$ and $q^V(\omega^{\sigma_0,k})=(1,0,\leftftarrowmbdadots,0).$\\
\inftynd $\betaullet$ If $n_1>0,$ let $j\inftyn\{2,\leftftarrowmbdadots,k\}$ be the smallest index such that $n_{j-1}>0.$ Then
$$
\nu(\sigma)=k-j+1.
$$
The $K-$spectra of irreducible subquotients of $\pi^{\sigma,k-j+1}$ consist of all $(m_1,\leftftarrowmbdadots,m_k)$ in $\hat{K}\cap\mathbb{Z}_+^k$ such that
$$
\betaeg{array}{ll}
\Gamma(\tau^{\sigma,k-j+1}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{j-1}\gammaeq n_{j-1}\quad\mathrm{and}\quad m_s=0\,\,\varphia s\gammaeq j,\\
&\\
\Gamma(\omega^{\sigma,k-j+1}):&\,\,m_1\gammaeq n_1\gammaeq\cdots\gammaeq m_{j-1}\gammaeq n_{j-1}\gammaeq m_j\gammaeq1\quad\mathrm{and}\quad m_s=0\,\,\varphia s>j.
\varepsilonnd{array}
$$
So we have
$$
q^V(\tau^{\sigma,k-j+1})=(n_1,\leftftarrowmbdadots,n_{j-1},0,\leftftarrowmbdadots,0),\,\,\,q^V(\omega^{\sigma,k-j+1})=(n_1,\leftftarrowmbdadots,n_{j-1},1,0,\leftftarrowmbdadots,0).
$$
Thus
\betaeg{tm} The map $\pi\mapsto q^V(\pi)$ is a bijection of $\hat{G}^0$ onto
$$
\hat{K}_0=\{q=(m_1,\leftftarrowmbdadots,m_k)\inftyn\hat{K};\,\,m_k=0\}.
$$
\varepsilonnd{tm}
\betaeg{thebibliography}{99}
\betaibitem{pa} A. M. Gavrilik and A. U. Klimyk, \varepsilonmph{Irreducible and indecomposable representations of the algebras $\mathfrak{s}\mathfrak{o}(n,1)$ and $\mathfrak{i}\mathfrak{s}\mathfrak{o}(n)$}, (in Russian) Preprint ITP$-$73$-$153R, Kiev, 1975.
\betaibitem{pa} A. M. Gavrilik and A. U. Klimyk, \varepsilonmph{Analysis of the representations of the Lorents and Euclidean groups of $n-$th order}, Preprint ITP$-$75$-$18E, Kiev, 1975.
\betaibitem{pa} A. Guichardet, \varepsilonmph{Repr\' esentations des groupes $\mr{SO}_0(n,1)$}, unpublished manuscript 1976.
\betaibitem{pa} Harish$-$Chandra, \varepsilonmph{On some applications of the universal enveloping algebra of a semi$-$simple Lie algebra}, Transactions of the American Mathematical Society, vol. 70(1951), pp. $28-96.$
\betaibitem{pa} Harish$-$Chandra, \varepsilonmph{Representations of a semi$-$simple Lie group on Banach spaces I}, Transactions of the American Mathematical Society, vol. 75(1953), pp. $185-243.$
\betaibitem{pa} Harish$-$Chandra, \varepsilonmph{Representations of a semi$-$simple Lie group on Banach spaces II}, Transactions of the American Mathematical Society, vol. 76(1954), pp. $26-65.$
\betaibitem{pa} T. Hirai, \varepsilonmph{On infinitesimal operators of irreducible representations of the Lorentz group of $n-$th order}, Proceedings of the Japan Academy, vol. 38(1962), pp. $83-87.$
\betaibitem{pa} T. Hirai, \varepsilonmph{On irreducible representations of the Lorentz group of $n-$th order}, Proceedings of the Japan Academy, vol. 38(1962), pp. $258-262.$
\betaibitem{pa} A. U. Klimyk and A. M. Gavrilik, \varepsilonmph{Representations matrix elements and Clebsch$-$Gordan coefficients of the semisimple Lie groups}, Journal of Mathematical Physics, vol. 20(1979), pp. $1624-1642.$
\betaibitem{pa} H. Kraljevi\' c, \varepsilonmph{On representations of the group $\mr{SU}(n,1)$}, Transactions of the American Mathematical Society, vol. 221(1976), pp. $433-448.$
\betaibitem{pa} U. Ottoson, \varepsilonmph{A classification of the unitary irreducible represnettions of $\mr{SO}_0(N,1)$}, Communications in Mathematical Physics, vol. 8(1968), pp. $228-244.$
\betaibitem{pa} D. P. Zhelobenko, \varepsilonmph{Description of quasisimple irreducible representations of the groups $\mathrm{U}(n,1),$ $\mr{Spin}(n,1)$} (in Russian), Izvestia Akademii nauk SSSR, Seria matematiceskaia, vol. 41(1977), pp. $34-53.$
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title{Quenched invariance principles for random walks with random conductances. }
\begin{abstract}
We prove an almost sure invariance principle for a random walker
among i.i.d. conductances in ${\mathbb Z}^d$, $d\gammaeq 2$. We assume
conductances are bounded from above but we do not require that they are
bounded from below.
\epsilonnd{abstract}
\sigmaection{Introduction}
We consider continuous-time, nearest-neighbor random walks among random
(i.i.d.) conductances in ${\mathbb Z}^d$, $d\gammaeq 2$ and prove that they satisfy
an almost sure invariance principle.
\sigmaubsection{Random walks and environments}
For $x, y \in {\mathbb Z}^d$, we write: $x \sigmaim y$ if $x$ and $y$ are
neighbors in the grid ${\mathbb Z}^d$ and let ${\mathbb E}_d$ be the set of
non-oriented nearest-neighbor pairs $(x,y)$.\\
An {\it environment} is a function $\omega} \deltaef\oo{{\tilde{\omega}}mega:{\mathbb E}_d\rightarrow
[0,+\infty[$. Since edges in ${\mathbb E}_d$ are not oriented, i.e. we
identified the edge $(x,y)$ with the reversed edge $(y,x)$, it is
implicit in the definition that environments are symmetric i.e.
$\omega} \deltaef\oo{{\tilde{\omega}}(x,y)=\omega} \deltaef\oo{{\tilde{\omega}}(y,x)$ for any pair of neighbors $x$ and $y$. \\
We let
$(\tau_z\,,\, z\in{\mathbb Z}^d)$ be the group of transformations of
environments
defined by $\tau_z\omega} \deltaef\oo{{\tilde{\omega}}(x,y)=\omega} \deltaef\oo{{\tilde{\omega}}(z+x,z+y)$.
We shall always assume that our environments are uniformly bounded from above.
Without loss of generality, we may assume that $\omega} \deltaef\oo{{\tilde{\omega}}(x,y)\lambdaeq 1$ for any edge.
Thus, for the rest of this paper, an environment will rather be a function
$\omega} \deltaef\oo{{\tilde{\omega}}mega:{\mathbb E}_d\rightarrow [0,1]$.
We use the notation $\Omega=[0,1]^{E_d}$ for the set of environments (endowed with
the product topology and the corresponding Borel structure).
The value of an environment $\omega} \deltaef\oo{{\tilde{\omega}}$ at a given edge is called the {\it conductance}.
Let $\omega} \deltaef\oo{{\tilde{\omega}}\in\Omega$. We are interested in the behavior of the random
walk in the environment $\omega} \deltaef\oo{{\tilde{\omega}}$. We denote with $D({\mathbb R}_+,{\mathbb Z}^d)$ the
space of c\`ad-l\`ag ${\mathbb Z}^d$-valued functions on ${\mathbb R}_+$ and let
$X(t)$, $t\in{\mathbb R}_+$, be the coordinate maps from $D({\mathbb R}_+,{\mathbb Z}^d)$ to
${\mathbb Z}^d$. The space $D({\mathbb R}_+,{\mathbb Z}^d)$ is endowed with the Skorokhod
topology, see \cite{kn:Bill} or \cite{kn:JS}. For a given $\omega} \deltaef\oo{{\tilde{\omega}}mega\in [0,1]^{{\mathbb E}_d}$ and for $x\in{\mathbb Z}^d$,
let $P^\omega} \deltaef\oo{{\tilde{\omega}}_x$ be the probability measure on $D({\mathbb R}_+,{\mathbb Z}^d)$ under
which the coordinate process is the Markov chain starting at
$X(0)=x$ and with generator \begin{eqnarray}\lambdaabel{int:gen} \LL^\omega} \deltaef\oo{{\tilde{\omega}} f(x)=\frac
1{n^\omega} \deltaef\oo{{\tilde{\omega}}(x)}\sigmaum_{y\sigmaim x} \omega} \deltaef\oo{{\tilde{\omega}}(x,y) (f(y)-f(x))\,, \epsilonnd{eqnarray} where
$n^\omega} \deltaef\oo{{\tilde{\omega}}(x)=\sigmaum_{y\sigmaim x} \omega} \deltaef\oo{{\tilde{\omega}}(x,y)$. If $n^\omega} \deltaef\oo{{\tilde{\omega}}(x)=0$, let $\LL^\omega} \deltaef\oo{{\tilde{\omega}}
f(x)=0$ for any function $f$.
The behavior of $X(t)$ under $P^\omega} \deltaef\oo{{\tilde{\omega}}_x$ can be described as follows:
starting from point $x$, the random walker waits for an exponential
time of parameter $1$ and then chooses at random one of its
neighbors to jump to according to the probability law
$\omega} \deltaef\oo{{\tilde{\omega}}(x,.)/n^\omega} \deltaef\oo{{\tilde{\omega}}(x)$. This procedure is then iterated with independent
hopping times.
We have allowed environments to take the value $0$ and it is clear
from the definition of the random walk that $X$ will only travel
along edges with positive conductances. This remark motivates the
following definitions: call a {\it cluster} of the environment $\omega} \deltaef\oo{{\tilde{\omega}}$
a connected component of the graph $({\mathbb Z}^d,\{e\in E_d\,;\,
\omega} \deltaef\oo{{\tilde{\omega}}(e)>0\})$. By construction, our random walker never leaves the
cluster of $\omega} \deltaef\oo{{\tilde{\omega}}$ it started from. Since edges are not oriented, the
measures with weights $n^\omega} \deltaef\oo{{\tilde{\omega}}(x)$ on the possibly different clusters
of $\omega} \deltaef\oo{{\tilde{\omega}}$ are reversible.
\sigmaubsection{Random environments} Let $Q$ be a product probability measure on $\Omega$.
In other words, we will now pick environments at random, in such a
way that the conductances of the different edges form a family of
independent identically distributed random variables. $Q$ is of
course invariant under the action of $\tau_z$ for any $z\in{\mathbb Z}^d$.
The random variables $({\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)>0}\,;\, e\in E_d)$ are independent Bernoulli
variables with common parameter $q=Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)>0)$. Depending on the value of $q$,
a typical environment chosen w.r.t. $Q$ may or may not have infinite clusters.
More precisely, it is known from percolation theory that there is a critical
value $p_c$, that depends on the dimension $d$, such that for $q<p_c$,
$Q$.a.s. all clusters of $\omega} \deltaef\oo{{\tilde{\omega}}$ are finite and for $q>p_c$, $Q$.a.s. there is a
unique infinite cluster. In the first case the random walk is almost surely confined
to
a finite set and therefore does not satisfy the invariance principle (or satisfies
a degenerate version of it with vanishing asymptotic variance). We shall therefore
assume that the law $Q$ is {\it super-critical} i.e. that $$q=Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)>0)>p_c\,.$$
Then the event `the origin belongs to the infinite cluster' has a non vanishing
$Q$ probability and we may define the conditional law:
\begin{eqnarray}n
Q_0(.)=Q(.\,\vert\, \hbox{$0$ belongs to the infinite cluster})\,.
\epsilonnd{eqnarray}n
\sigmaubsection{Annealed results} Part of the analysis of the behavior of random
walks in random environments can be done using the
{\it point of view of the particle}: we consider the random walk $X$ started
at the origin and look at the random process describing the environment shifted
by the position of the random walker i.e. we let
$\omega} \deltaef\oo{{\tilde{\omega}}(t)=\tau_{X(t)}\omega} \deltaef\oo{{\tilde{\omega}}$. Thus $(\omega} \deltaef\oo{{\tilde{\omega}}(t)\,,\, t\in{\mathbb R}_+)$ is a random process taking
its values in $\Omega$.
Let us also introduce the measure \begin{eqnarray}n
{\tilde Q}_0(A)=\frac{\int_A n^\omega} \deltaef\oo{{\tilde{\omega}}(0)dQ_0(\omega} \deltaef\oo{{\tilde{\omega}})}{\int n^\omega} \deltaef\oo{{\tilde{\omega}}(0)dQ_0(\omega} \deltaef\oo{{\tilde{\omega}})}\,.
\epsilonnd{eqnarray}n
Observe that ${\tilde Q}_0$ is obviously absolutely
continuous with respect to $Q_0$.
We list some of the properties of the process $\omega} \deltaef\oo{{\tilde{\omega}}(.)$ as proved in \cite{kn:DFGW}:
\begin{prop} \lambdaabel{propDeMasi} (Lemmata 4.3 and 4.9 in \cite{kn:DFGW})\\
The random process $\omega} \deltaef\oo{{\tilde{\omega}}(t)$ is Markovian under $P^\omega} \deltaef\oo{{\tilde{\omega}}_0$.
The measure ${\tilde Q}_0$ is reversible, invariant and ergodic with respect to
$\omega} \deltaef\oo{{\tilde{\omega}}(t)$.
\epsilonnd{prop}
Based on this proposition, the authors of \cite{kn:DFGW} could
deduce that the random walk $X(t)$ satisfies the invariance
principle {\it in the mean}. Let us define the so-called {\it
annealed} semi-direct product measure
\begin{eqnarray}n Q_0.P_x^\omega} \deltaef\oo{{\tilde{\omega}}[\,F(\omega} \deltaef\oo{{\tilde{\omega}},X(.))\,]=\int P_x^\omega} \deltaef\oo{{\tilde{\omega}}[\,F(\omega} \deltaef\oo{{\tilde{\omega}},X(.))\,]\,dQ_0(\omega} \deltaef\oo{{\tilde{\omega}})\,.\epsilonnd{eqnarray}n
\begin{theo} \lambdaabel{theoDeMasi} (Annealed invariance principle, \cite{kn:DFGW})\\
Consider a random walk with i.i.d. super-critical conductances.
Under $Q_0.P_0^\omega} \deltaef\oo{{\tilde{\omega}}$, the process $(X^\varepsilon(t)=\varepsilon X(\frac
t{\varepsilon^2}),t\in{\mathbb R}_+)$ converges in law to a non-degenerate Brownian
motion with covariance matrix $\sigmaigma^2Id$ where $\sigmaigma^2$ is
positive.
\epsilonnd{theo}
It should be pointed out that the result of \cite{kn:DFGW} is in fact
much more general. On one hand, \cite{kn:DFGW} deals with random
walks with unbounded jumps, under a mild second moment condition.
Besides, a similar annealed invariance principle is in fact proved
for any stationary law $Q$ rather than just product measures.
The positivity of $\sigmaigma^2$ is not ensured by the general results
of \cite{kn:DFGW}) but it can be proved using comparison with the
Bernoulli case, see Remark \ref{rem:positivity}.
\sigmaubsection{The almost sure invariance principle}
The annealed invariance principle is not enough to give a completely
satisfactory description of the long time behavior of the random
walk. It is for instance clear that the annealed measure
$Q_0.P_0^\omega} \deltaef\oo{{\tilde{\omega}}$ retains all the symmetries of the grid. In particular
it is invariant under reflections through hyperplanes passing
through the origin. This is not true anymore for the law of the
random walk in a given environment. Still, one would expect
symmetries to be restored in the large scale, for a given
realization of $\omega} \deltaef\oo{{\tilde{\omega}}$.
Our main result is the following almost sure version of Theorem \ref{theoDeMasi}:
\begin{theo} \lambdaabel{theorem1} (Quenched invariance principle)\\
Consider a random walk with i.i.d. super-critical conductances.
$Q_0$ almost surely, under $P^\omega} \deltaef\oo{{\tilde{\omega}}_0$, the
process $(X^\varepsilon(t)=\varepsilon X(\frac t{\varepsilon^2}),t\in{\mathbb R}_+)$ converges in law
as $\varepsilon$ tends to $0$ to a non-degenerate
Brownian motion with covariance matrix $\sigmaigma^2Id$ where $\sigmaigma^2$
is positive and
does not depend on $\omega} \deltaef\oo{{\tilde{\omega}}$.
\epsilonnd{theo}
\sigmaubsection{The Bernoulli case and other cases} The main difficulty in proving Theorem \ref{theorem1}
is the lack of assumption on a lower bound for the values of the conductances.
Indeed, if one assumes that almost any environment is bounded from below by a
fixed constant i.e. there exists a $\deltaelta>0$ such that $Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)<\deltaelta)=0$
then the conclusion of Theorem \ref{theorem1} was already proved in \cite{kn:SS}
using the classical `corrector approach' adapted from \cite{kn:Ko}.
Another special case recently solved is the Bernoulli case: let us
assume that only the values $0$ and $1$ are allowed for the
conductances i.e. $Q$ is a product of Bernoulli measures of
parameter $q$. Remember that we assume that we are in the
supercritical regime $q>p_c$. An environment can then be also
thought of as a (unweighted) random sub-graph of the grid and our
random walk is the simple symmetric random walk on the clusters of
the environment, i.e. jumps are performed according to the uniform
law on the neighbors of the current position in the graph $\omega} \deltaef\oo{{\tilde{\omega}}$.
In the Bernoulli case, quenched invariance principles have been
obtained by various authors in \cite{kn:BB}, \cite{kn:MP} and
\cite{kn:SS}. These three works develop different approaches to
handle the lack of a positive lower bound for the conductances. They
have in common the use of quantitative bounds on the transition
probabilities of the random walk. It is indeed known from
\cite{kn:Ba} that the kernel of the simple random walk on an
infinite percolation cluster satisfies Gaussian bounds. A careful
analysis of the proofs shows that a necessary condition to obtain
the invariance principle using any of the three approaches in
\cite{kn:BB}, \cite{kn:MP} or \cite{kn:SS} is a Poincar\'e
inequality of the correct scaling (and in fact \cite{kn:MP} shows
that the Poincar\'e inequality is `almost' sufficient.) To be more
precise, let $A_n$ be the Poincar\'e constant on a box of size $n$
centered at the origin. In other words, $A_n$ is the inverse
spectral gap of the operator $\LL^\omega} \deltaef\oo{{\tilde{\omega}}$ restricted to the connected
component at the origin of the graph $\omega} \deltaef\oo{{\tilde{\omega}}\cap[-n,n]^d$ and with
reflection boundary conditions. Then one needs know that $Q_0$
almost surely, \begin{eqnarray} \lambdaabel{int:poinc} \lambdaimsup n^{-2}A_n<\infty\,.
\epsilonnd{eqnarray} Such a statement was originally proved in \cite{kn:MR} for the
Bernoulli case.
It turns out that (\ref{int:poinc}) is false in the general case of
i.i.d. conductances, even if one assumes that conductances are
always positive. We can choose for instance a product law with a
polynomial tail at the origin i.e. we assume that there exists a
positive parameter $\gammaamma$ such that $Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)\lambdaeq a)\sigmaim a^\gammaamma$
as $a$ tends to $0$. Then it is not difficult to prove that, for
small values of $\gammaamma$, \begin{eqnarray}n \lambdaiminf\frac{\lambdaog A_n}{\lambdaog n}>
2\,.\epsilonnd{eqnarray}n In \cite{kn:FM}, we considered a slightly different model
of symmetric random walks with random conductances with a polynomial
tail but non i.i.d. (although with finite range dependency only) and
we proved that \begin{eqnarray}n \frac{\lambdaog A_n}{\lambdaog n}\rightarrow 2\vee\frac
d\gammaamma\,,\epsilonnd{eqnarray}n showing that, at least in the case $\gammaamma<d/2$,
the Poincar\'e constant is too big to be directly used to prove the
diffusive behavior of the random walk and one needs some new
ingredient to prove Theorem \ref{theorem1}.
\begin{rmk} In \cite{kn:FM}, we derived annealed estimates
on the decay of the return probability of the random walk.
More interestingly, in the very recent work \cite{kn:BBHK}, the authors
could also obtain
quenched bounds on the decay of the return probability for quite general
random walks with random conductances.
Their results in particular show that anomalous decays do occur in high dimension.
In such situations, although the almost sure invariance principle holds, see
Theorem \ref{theorem1}, the local CLT fails.
\epsilonnd{rmk}
Our proof of Theorem \ref{theorem1} uses a time change argument that
we describe in the next part of the paper.
\vskip.5cm
{\it Acknowledgments:} the author would like to thank the referees of the first version
of the paper for their careful reading and comments that lead to an improvement of the paper.
{\it Note: after this paper was posted on the Arxiv, M. Biskup and
T. Prescott wrote a preprint with a different proof of Theorem \ref{theorem1},
see \cite{kn:BiPres}.
Their approach is based on ideas from \cite{kn:BB} when we prefer to invoke \cite{kn:MP}.
They also need a time change argument, as here, and percolation results
like Lemma \ref{lem:site''}.
}
\vskip 1cm
\sigmaection{A time changed process}
\sigmaetcounter{equation}{0}
\lambdaabel{sec:timechange}
In this section, we introduce a time changed process, $X^\xi$, and state an invariance
principle for it: Theorem \ref{theorem'}.
Choose a threshold parameter $\xi>0$ such that $Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi)>p_c$.
For $Q$ almost any environment $\omega} \deltaef\oo{{\tilde{\omega}}$, the percolation graph
$({\mathbb Z}^d,\{ e\in E_d\,;\, \omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi\})$ has
a unique infinite cluster that we denote
with ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$.
By construction ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ is a subset of ${\mathbb C}C(\omega} \deltaef\oo{{\tilde{\omega}})$.
We will refer to the connected components of the complement of ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ in
${\mathbb C}C(\omega} \deltaef\oo{{\tilde{\omega}})$ as {\it holes}.
By definition, holes are connected sub-graphs of the grid. Let $\HH^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ be the
collection
of all holes. Note that holes may contain edges such that $\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi$.
We also define the conditioned measure
\begin{eqnarray}n Q_0^\xi(.)=Q(.\vert 0\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}}))\,.\epsilonnd{eqnarray}n
Consider the following additive functional of the random walk:
\begin{eqnarray}n
A^\xi(t)=\int_0^t {\mathbf 1}_{X(s)\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})}\,ds\,,
\epsilonnd{eqnarray}n
its inverse $(A^\xi)^{-1}(t)=\inf \{s\,;\, A^\xi(s)>t\}$ and
define the corresponding time changed process
\begin{eqnarray}n
\tX(t)=X((A^\xi)^{-1}(t))\,.
\epsilonnd{eqnarray}n
Thus the process $\tX$ is obtained by suppressing in the trajectory of $X$ all the
visits to the holes.
Note that, unlike $X$, the process $\tX$ may perform long jumps when straddling holes.
As $X$ performs the random walk in the environment $\omega} \deltaef\oo{{\tilde{\omega}}$, the
behavior of the random process $\tX$ is described in the next
\begin{prop}
Assume that the origin belongs to ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$. Then,
under $P^\omega} \deltaef\oo{{\tilde{\omega}}_0$, the random process $\tX$ is a symmetric Markov
process on ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$.
\epsilonnd{prop}
The Markov property, which is not difficult to prove, follows from a very general argument
about time changed Markov processes. The reversibility of $\tX$ is a consequence of the
reversibility of $X$ itself as will be discussed after equation (\ref{2:rates}).
The generator of the process $\tX$ has the form
\begin{eqnarray}\lambdaabel{2:gen'}
{\tLLo} f(x)=\frac 1{n^\omega} \deltaef\oo{{\tilde{\omega}}(x)}\sigmaum_{y} \omega} \deltaef\oo{{\tilde{\omega}}xi(x,y) (f(y)-f(x))\,,
\epsilonnd{eqnarray}
where
\begin{eqnarray} \nonumber
\frac{\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)}{n^\omega} \deltaef\oo{{\tilde{\omega}}(x)} &=&\lambdaim_{t\rightarrow 0} \frac 1t P_x^\omega} \deltaef\oo{{\tilde{\omega}}(\tX(t)=y)\\
&=&P_x^\omega} \deltaef\oo{{\tilde{\omega}}(\hbox{ $y$ is the next point in ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ visited by the random walk
$X$})\,,
\lambdaabel{2:rates} \epsilonnd{eqnarray}
if both $x$ and $y$ belong to ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ and $\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)=0$ otherwise.
The function $\omega} \deltaef\oo{{\tilde{\omega}}xi$ is symmetric: $\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)=\omega} \deltaef\oo{{\tilde{\omega}}xi(y,x)$ as follows
from the reversibility of $X$ and formula (\ref{2:rates}), but it is
no longer of nearest-neighbor type i.e. it might happen that
$\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)\not=0$ although $x$ and $y$ are not neighbors. More
precisely, one has the following picture: $\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)=0$ unless
either $x$ and $y$ are neighbors and $\omega} \deltaef\oo{{\tilde{\omega}}(x,y)\gammaeq \xi$, or there exists a hole, $h$, such
that both $x$ and $y$ have neighbors in $h$. (Both conditions may
be fulfilled by the same pair $(x,y)$.)
Consider a pair of neighboring points $x$ and $y$, both of them
belonging to the infinite cluster ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ and such that
$\omega} \deltaef\oo{{\tilde{\omega}}(x,y)\gammaeq\xi$, then \begin{eqnarray} \lambdaabel{2:lowbo}
\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)\gammaeq\xi\,.\epsilonnd{eqnarray} This simple remark will play an important
role. It implies, in a sense to be made precise later, that the
parts of the trajectory of $\tX$ that consist in nearest-neighbors
jumps are similar to what the simple symmetric random walk on
${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ does.
Finally observe that the environment $\omega} \deltaef\oo{{\tilde{\omega}}xi$ is stationary i.e. the law of $\omega} \deltaef\oo{{\tilde{\omega}}xi$
under $Q$
is invariant with respect to $\tau_z$ for all $z\in{\mathbb Z}^d$ as can be immediately seen
from
formula \ref{2:rates}.
\begin{theo} \lambdaabel{theorem'} (Quenched invariance principle for $\tX$)\\
There exists a value $\xi_0>0$ such that for any $0<\xi\lambdaeq\xi_0$ the following holds.
For $Q_0$ almost any environment, under $P^\omega} \deltaef\oo{{\tilde{\omega}}_0$, the
process $(X^{\xi,\,\varepsilon}(t)=\varepsilon \tX(\frac t{\varepsilon^2}),t\in{\mathbb R}_+)$ converges in law
as $\varepsilon$ tends to $0$ to a non-degenerate
Brownian motion with covariance matrix $\sigmaigma^2(\xi) Id$ where $\sigmaigma^2(\xi)$
is positive and
does not depend on $\omega} \deltaef\oo{{\tilde{\omega}}$.
\epsilonnd{theo}
The proof of Theorem \ref{theorem'} will be given in part
\ref{sec:proof}. It very closely mimics the arguments of
\cite{kn:MP}. Indeed, one uses the lower bound (\ref{2:lowbo}) to
bound the Dirichlet form of the process $\tX$ in terms of the
Dirichlet form of the simple symmetric random walk on ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$
and thus get the Poincar\'e inequality of the correct order. It is
then not difficult to adapt the approach of \cite{kn:MR} and
\cite{kn:Ba} to derive the tightness of the family $X^{\xi,\,\varepsilon}$
and the invariance principle follows as in
\cite{kn:MP}.
\begin{rmk} \lambdaabel{rem:positivity}
The positivity of $\sigmaigma^2$ in Theorem \ref{theorem1} and the
positivity of $\sigmaigma^2(\xi)$ in Theorem \ref{theorem'} can be
checked using comparison arguments from \cite{kn:DFGW}. Indeed
it follows from the expression of the effective diffusivity,
see Theorem 4.5 part (iii) of \cite{kn:DFGW}, and from the
discussion on monotonicity in part 3 of \cite{kn:DFGW} that $\sigmaigma^2$
is an increasing function of the probability law $Q$ (up to some multiplicative factor).
Therefore, if $Q$
stochastically dominates $Q'$ and the effective diffusivity under $Q'$
is positive, then the effective diffusivity under
$Q$ is also positive.
Here $Q$ stochastically dominates the law of the environment with
conductances $\omega} \deltaef\oo{{\tilde{\omega}}'(e)=\xi{\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi}$. The random walk in the
environment $\omega} \deltaef\oo{{\tilde{\omega}}'$ is the simple random walk on a percolation cluster
which is known to have a positive asymptotic diffusivity, see
\cite{kn:Ba} or the references in \cite{kn:MP}. The same argument
shows that $\sigmaigma^2(\xi)>0$ for any $\xi$ such that
$Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi)>p_c$.
\epsilonnd{rmk}
To derive Theorem \ref{theorem1} from Theorem \ref{theorem'}, we
will compare the processes $X$ and $X^\xi$, for small values of
$\xi$. The large time asymptotic of the time change $A^\xi$ is easily
deduced from the ergodic theorem, as shown in Lemma \ref{lem:ergoA}
below and it implies that the asymptotic variance $\sigmaigma^2(\xi)$ is
continuous at $\xi=0$, see Lemma \ref{lem:sigma}.
Let
\begin{eqnarray}n
c(\xi)={\tilde Q}_0 (0\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})) \,.
\epsilonnd{eqnarray}n
\begin{lm} \lambdaabel{lem:ergoA}
\begin{eqnarray}n
\frac {A^\xi(t)} t \rightarrow c(\xi)\, \hbox{ $Q_0$ a.s.}
\epsilonnd{eqnarray}n
as $t$ tends to $\infty$ and
\begin{eqnarray}\lambdaabel{2:cxi}
c(\xi)\rightarrow 1\,,
\epsilonnd{eqnarray}
as $\xi$ tends to $0$.
\epsilonnd{lm}
{\it Proof}: remember the notation $\omega} \deltaef\oo{{\tilde{\omega}}(t)=\tau_{X(t-)}\omega} \deltaef\oo{{\tilde{\omega}}$.
The additive functional $A^\xi(t)$ can also be written in the form
$A^\xi(t)=\int_0^t {\mathbf 1}_{0\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}}(s))}\,ds$.
From Proposition \ref{propDeMasi}, we know that
${\tilde Q}_0$ is an invariant and ergodic measure for the process
$\omega} \deltaef\oo{{\tilde{\omega}}(t)=\tau_{X(t-)}\omega} \deltaef\oo{{\tilde{\omega}}$ and that it is absolutely continuous with respect to $Q_0$.
Thus the existence of the limit $\lambdaim_{t\rightarrow +\infty} \frac
{A^\xi(t)} t$ follows from the ergodic theorem and the limit is
$c(\xi)={\tilde Q}_0(0\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}}))$. To check (\ref{2:cxi}), note
that ${\mathbf 1}_{0\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})}$ almost surely converges to
${\mathbf 1}_{0\in{\mathbb C}C(\omega} \deltaef\oo{{\tilde{\omega}})}$ as $\xi$ tends to $0$. Since ${\tilde Q}_0(0\in{\mathbb C}C(\omega} \deltaef\oo{{\tilde{\omega}}))=1$, we get that
$c(\xi)$ converges to $1$.
\rule{.2cm}{.2cm}
\begin{lm} \lambdaabel{lem:sigma}
The asymptotic variances $\sigmaigma^2$ in Theorem \ref{theoDeMasi} and $\sigmaigma^2(\xi)$
from
Theorem \ref{theorem'}, and the constant $c(\xi)$ from Lemma \ref{lem:ergoA} satisfy
the
equality
\begin{eqnarray}\lambdaabel{form:variance} c(\xi)\sigmaigma^2(\xi)=\sigmaigma^2\,.\epsilonnd{eqnarray}
As a consequence, $\sigmaigma^2(\xi)$ converges to $\sigmaigma^2$ as $\xi$ tends to $0$.
\epsilonnd{lm}
{\it Proof}:
formula (\ref{form:variance}) is deduced from Lemma \ref{lem:ergoA}. One can,
for instance, compute the law of the exit times from a large slab for both processes
$X$ and $X^\xi$.
Let $\tau(r)$ (resp. $\tau^\xi(r)$) be the exit time of $X$ (resp. $X^\xi$) from the
set
$[-r,r]\times{\mathbb R}^{d-1}$. Under the annealed measure, the Laplace transform of
$\tau(r)/r^2$
converges to $E(\epsilonxp(-\lambdaambda T/\sigmaigma^2))$ where $T$ is the exit time of $[-1,1]$
by a Brownian motion. This is a consequence of the invariance principle of Theorem
\ref{theoDeMasi}.
Theorem \ref{theorem'} implies that the Laplace transform of $\tau^\xi(r)/r^2$
converges
to $E(\epsilonxp(-\lambdaambda T/\sigmaigma^2(\xi)))$. (The convergence holds for $Q_0$ almost any
environment
and, by dominated convergence, under the annealed measure.) \\
On the other hand, we have $\tau^\xi(r)=A^\xi(\tau(r))$ and therefore Lemma
\ref{lem:ergoA}
implies that the Laplace transform of $\tau^\xi(r)/r^2$ has the same limit as the
Laplace
transform of $c(\xi)\tau^\xi(r)/r^2$ and therefore converges to $E(\epsilonxp(-\lambdaambda
c(\xi)T/\sigmaigma^2))$.
We deduce from these computations that
$$E(\epsilonxp(-\lambdaambda c(\xi)T/\sigmaigma^2))=E(\epsilonxp(-\lambdaambda T/\sigmaigma^2(\xi)))\,,$$
and, since this is true for any $\lambdaambda\gammaeq 0$, we must have
$c(\xi)\sigmaigma^2(\xi)=\sigmaigma^2$. \\
The continuity of
$\sigmaigma^2(\xi)$ for $\xi=0$ is ensured by the continuity of $c(\xi)$.
\rule{.2cm}{.2cm}
\vskip 1cm
\sigmaection{How to deduce Theorem \ref{theorem1} from Theorem \ref{theorem'}}
\sigmaetcounter{equation}{0}
\lambdaabel{sec:deduce}
We start stating a percolation lemma that will be useful to control
the contribution of holes to the behavior of the random walk.
\begin{lm} \lambdaabel{lem:holes}
There exists a value $\xi_0>0$ such that for any $0<\xi\lambdaeq\xi_0$
the following holds. There exists a constant $a$ such that, $Q$
almost surely, for large enough $n$, the volume of any hole
$h\in\HH^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ intersecting the box $[-n,n]^d$ is bounded from
above by $(\lambdaog n)^a$. ($a=7$ would do.)
\epsilonnd{lm}
The proof of Lemma \ref{lem:holes} is postponed to part \ref{sec:perco}.
\sigmaubsection{Tightness}
In this section, we derive the tightness of the sequence of processes $X^\varepsilon$ from
Theorem \ref{theorem'}.
\begin{lm} \lambdaabel{lem:tight}
Under the assumptions of Theorem \ref{theorem1}, $Q_0$ almost
surely, under $P^\omega} \deltaef\oo{{\tilde{\omega}}_0$, the family of processes $(X^\varepsilon(t)=\varepsilon
X(\frac t{\varepsilon^2}),t\in{\mathbb R}_+)$ is tight in the Skorokhod topology.
\epsilonnd{lm}
{\it Proof}: we read from \cite{kn:JS}, paragraph 3.26, page 315 that a sequence of
processes
$x^\varepsilon$ is tight if and only if the following two estimates hold:\\
(i) for any $T$, any $\deltaelta>0$, there exist $\varepsilon_0$ and $K$ such that for any
$\varepsilon\lambdaeq\varepsilon_0$
\begin{eqnarray} \lambdaabel{ti1}
P(\sigmaup_{t\lambdaeq T} \vert x^\varepsilon(t)\vert\gammaeq K)\lambdaeq\deltaelta\,,
\epsilonnd{eqnarray}
and\\
(ii) for any $T$, any $\deltaelta>0$, any $\epsilonta>0$, there exist $\varepsilon_0$
and $\theta_0$ such that for any $\varepsilon\lambdaeq\varepsilon_0$ \begin{eqnarray} \lambdaabel{ti2}
P(\sigmaup_{v\lambdaeq u\lambdaeq T\,;\, u-v\lambdaeq\theta_0} \vert
x^\varepsilon(u)-x^\varepsilon(v)\vert>\epsilonta)\lambdaeq\deltaelta\,. \epsilonnd{eqnarray}
Choose $\xi$ as in Theorem \ref{theorem'}. The sequence $X^{\xi,\,\varepsilon}$ converges;
therefore it is tight and satisfies (\ref{ti1}) and (\ref{ti2}). By definition,
$$X^{\xi,\,\varepsilon}(t)=X^\varepsilon(\varepsilon^2 (A^\xi)^{-1}(\frac t{\varepsilon^2}))\,.$$
{\it Proof of condition (i)}: let us first check that $X^\varepsilon$
satisfies (\ref{ti1}). \\ Assume that $\sigmaup_{t\lambdaeq T} \vert
X^{\xi,\,\varepsilon}(t)\vert\lambdaeq K$. Given $t_0\lambdaeq T$, let
$x_0=X^\varepsilon(t_0)$ i.e. $X(\frac {t_0}{\varepsilon^2})=\frac 1 \varepsilon {x_0}$
and define $s_0=\varepsilon^2 A^\xi(\frac {t_0}{\varepsilon^2})$. Since
$A^\xi(t)\lambdaeq t$, we have
$s_0\lambdaeq t_0$.\\
If $\frac 1 \varepsilon {x_0}$ belongs to ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$, then $t_0=\varepsilon^2
(A^\xi)^{-1}(\frac {s_0}{\varepsilon^2})$
and $ X^{\xi,\,\varepsilon}(s_0)=X^\varepsilon(t_0)=x_0$ and therefore $\vert x_0\vert\lambdaeq K$. \\
Now suppose that $\frac 1 \varepsilon {x_0}$ does not belong to ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ and let
$t_1=\varepsilon^2 (A^\xi)^{-1}(\frac {s_0}{\varepsilon^2})$ and $x_1=X^\varepsilon(t_1)$.
Then $t_1\lambdaeq t_0$ and $\frac 1 \varepsilon {x_1}$ belongs to ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$. The same
argument as
before shows that $\vert x_1\vert\lambdaeq K$. On the other hand, by definition of the
time changed process $X^\xi$, $\frac 1\varepsilon {x_1}$ is the last point in ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$
visited by $X$ before time $t_0$. Thus $\frac 1 \varepsilon {x_0}$ belongs to a hole
on the boundary of which sits $\frac 1 \varepsilon{x_1}$. It then follows from Lemma
\ref{lem:holes}
that $$\vert \frac 1 \varepsilon{x_1}-\frac 1 \varepsilon{x_0}\vert \lambdaeq (\lambdaog\frac K{\varepsilon})^a\,.$$
Thus we have proved that
$$\vert x_0\vert\lambdaeq K+\varepsilon (\lambdaog\frac K{\varepsilon})^a\,.$$
We can choose $\varepsilon_0$ small enough so that $\varepsilon (\lambdaog\frac K{\varepsilon})^a\lambdaeq K$
and therefore we have
$$\sigmaup_{t\lambdaeq T} \vert X^{\xi,\,\varepsilon}(t)\vert\lambdaeq K
\implies \sigmaup_{t\lambdaeq T} \vert X^{\varepsilon}(t)\vert\lambdaeq 2K\,.$$ Since the
sequence $X^{\xi,\,\varepsilon}$ satisfies (\ref{ti1}), the event
`$\sigmaup_{t\lambdaeq T} \vert X^{\xi,\,\varepsilon}(t)\vert\lambdaeq K$' has a large
probability; therefore $\sigmaup_{t\lambdaeq T} \vert X^{\varepsilon}(t)\vert\lambdaeq
2K$ has a large probability and the sequence $X^{\varepsilon}$ satisfies
(\ref{ti1}).
{\it Proof of condition (ii)}: as before, we will deduce that the sequence
$X^{\varepsilon}$ satisfies (\ref{ti2}) from the fact that the sequence $X^{\xi,\,\varepsilon}$
satisfies
(\ref{ti1}) and (\ref{ti2}).
Assume that
$$\sigmaup_{v\lambdaeq u\lambdaeq T\,;\, u-v\lambdaeq\theta_0} \vert X^{\xi,\,\varepsilon}(u)-
X^{\xi,\,\varepsilon}(v)\vert\lambdaeq \epsilonta\,.$$
We further assume that $\sigmaup_{t\lambdaeq T} \vert X^{\xi,\,\varepsilon}(t)\vert\lambdaeq K$.\\
Given $v_0\lambdaeq u_0\lambdaeq T$ such that $u_0-v_0\lambdaeq\theta_0$, let
$x_0=X^\varepsilon(u_0)$, $y_0=X^\varepsilon(v_0)$ and define $s_0=\varepsilon^2
A^\xi(\frac {u_0}{\varepsilon^2})$, $t_0=\varepsilon^2 A^\xi(\frac
{v_0}{\varepsilon^2})$, $u_1=\varepsilon^2 (A^\xi)^{-1}(\frac {s_0}{\varepsilon^2})$ and
$v_1=\varepsilon^2 (A^\xi)^{-1}(\frac {t_0}{\varepsilon^2})$.
Also let $x_1=X^\varepsilon(u_1)$, $y_1=X^\varepsilon(v_1)$.\\
Since $A^\xi(t)-A^\xi(s)\lambdaeq t-s$ whenever $s\lambdaeq t$, we have
$t_0\lambdaeq s_0\lambdaeq T$ and $s_0-t_0\lambdaeq\theta_0$. Besides, by
definition of $A^\xi$, we have $x_1= X^{\xi,\,\varepsilon}(s_0)$ and $y_1=
X^{\xi,\,\varepsilon}(t_0)$. We conclude that
$$ \vert x_1-y_1\vert\lambdaeq \epsilonta\,.$$
On the other hand, the same argument as in the proof of condition (i) based on Lemma
\ref{lem:holes}
shows that
$$ \vert x_1-x_0\vert+ \vert y_1-y_0\vert\lambdaeq 2\varepsilon (\lambdaog\frac K{\varepsilon})^a\,.$$
We have proved that
$$\sigmaup_{v\lambdaeq u\lambdaeq T\,;\, u-v\lambdaeq\theta_0} \vert X^\varepsilon(u)- X^\varepsilon(v)\vert
\lambdaeq \epsilonta+2\varepsilon (\lambdaog\frac K{\varepsilon})^a\,.$$ Since both events
`$\sigmaup_{v\lambdaeq u\lambdaeq T\,;\, u-v\lambdaeq\theta_0} \vert X^{\xi,\,\varepsilon}(u)-
X^{\xi,\,\varepsilon}(v)\vert\lambdaeq \epsilonta$' and `$\sigmaup_{t\lambdaeq T} \vert
X^{\xi,\,\varepsilon}(t)\vert\lambdaeq K$' have large probabilities, we deduce
that the processes $X^\varepsilon$ satisfy condition (ii).
\rule{.2cm}{.2cm}
\sigmaubsection{Convergence}
To conclude the derivation of Theorem \ref{theorem1} from Theorem \ref{theorem'}, it
only
remains to argue that, for any given time $t$, the two random variables
$X^\varepsilon(t)$ and $X^{\xi,\,\varepsilon}(t)$ are close to each other in probability.
\begin{lm} \lambdaabel{lem:conv}
Under the assumptions of Theorem \ref{theorem1}, $Q_0$ almost surely,
for any $t$, any $\deltaelta>0$, any $\epsilonta>0$, then, for small enough $\xi$,
\begin{eqnarray}n
\lambdaimsup_{\varepsilon\rightarrow 0} P^\omega} \deltaef\oo{{\tilde{\omega}}_0 ( \vert
X^\varepsilon(t)-X^{\xi,\,\varepsilon}(t)\vert>\epsilonta)\lambdaeq\deltaelta\,.
\epsilonnd{eqnarray}n
\epsilonnd{lm}
{\it Proof}: we shall rely on Lemma \ref{lem:ergoA}.
If $\vert X^\varepsilon(t)-X^{\xi,\,\varepsilon}(t)\vert>\epsilonta$, then one of the following two
events must hold:
$$(I)=
\{\sigmaup_{\theta c(\xi)t\lambdaeq s\lambdaeq t} \vert
X^{\xi,\,\varepsilon}(s)-X^{\xi,\,\varepsilon}(t)\vert>\frac \epsilonta 2\}\,,$$
$$(II)
=\{ \inf_{\theta c(\xi)t\lambdaeq s\lambdaeq t} \vert X^{\xi,\,\varepsilon}(s)-X^\varepsilon(t)\vert>\frac
\epsilonta 2\}\,.$$
Here $\theta$ is a parameter in $]0,1[$.\\
The invariance principle for $X^{\xi,\,\varepsilon}$, see Theorem
\ref{theorem'}, implies that the probability of $(I)$ converges as
$\varepsilon$ tends to $0$ to the probability $P(\sigmaup_{\theta c(\xi)t\lambdaeq
s\lambdaeq t} \sigmaigma(\xi)\vert B(s)-B(t)\vert>\frac \epsilonta 2)$, where $B$
is a Brownian motion. Since $\sigmaigma(\xi)$ is bounded away from $0$,
see Lemma \ref{lem:sigma}, and since $c(\xi)\rightarrow 1$ as
$\xi\rightarrow 0$, we deduce that there exists a value for $\theta$
such that \begin{eqnarray}\lambdaabel{eq:12} \lambdaimsup_{\xi \rightarrow 0}\lambdaimsup_{\varepsilon\rightarrow 0}
P^\omega} \deltaef\oo{{\tilde{\omega}}_0 (I)\lambdaeq \deltaelta\,. \epsilonnd{eqnarray}
We now assume that $\theta$ has been chosen so that (\ref{eq:12})
holds. We shall end the proof of the Lemma by showing that \begin{eqnarray}
\lambdaabel{eq:11} \lambdaimsup_{\varepsilon\rightarrow 0} P^\omega} \deltaef\oo{{\tilde{\omega}}_0 (II)=0\,. \epsilonnd{eqnarray}
Since, from the tightness of the processes $X^\varepsilon$, see Lemma
\ref{lem:tight}, we have
$$\lambdaimsup_{\varepsilon\rightarrow 0}P^\omega} \deltaef\oo{{\tilde{\omega}}_0 (\sigmaup_{s\lambdaeq t}\vert X^\varepsilon(s)\vert\gammaeq
\varepsilon^{-1})=0\,,$$
we will estimate the probability that both events $(II)$ and
`$\sigmaup_{s\lambdaeq t}\vert X^\varepsilon(s)\vert\lambdaeq \varepsilon^{-1}$' hold. \\
Let $u=\varepsilon^2 A^\xi(\frac t{\varepsilon^2})$ and note that $u\lambdaeq t$. From Lemma
\ref{lem:ergoA},
we know that $u\gammaeq \theta c(\xi) t$ for small enough $\varepsilon$ depending on $\omega} \deltaef\oo{{\tilde{\omega}}$. \\
If $X^\varepsilon(t)$ belongs to ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$, then $X^\varepsilon(t)=X^{\xi,\,\varepsilon}(u)$
and therefore $(II)$ does not hold. \\
Otherwise $X^\varepsilon(t)$ belongs to a hole on the boundary of which sits
$X^{\xi,\,\varepsilon}(u)$.
Using the condition $\sigmaup_{s\lambdaeq t}\vert X^\varepsilon(s)\vert\lambdaeq \varepsilon^{-1}$ and
Lemma \ref{lem:holes}, we get that
$$\vert X^\varepsilon(t)-X^{\xi,\,\varepsilon}(u)\vert\lambdaeq \varepsilon(\lambdaog \frac 1\varepsilon)^a\,.$$
For sufficiently small $\varepsilon$ we have $\varepsilon(\lambdaog 1/\varepsilon)^a<\frac \epsilonta 2$ and
therefore $(II)$ fails. The proof of (\ref{eq:11}) is complete.
\rule{.2cm}{.2cm}
{\it End of the proof of Theorem \ref{theorem1}}: choose times
$0<t_1<...<t_k$. Use Lemma \ref{lem:conv}, to deduce that for small
enough $\xi$, as $\varepsilon$ tends to $0$, the law of
$(X^\varepsilon(t_1),...,X^\varepsilon(t_k))$ comes close to the law of
$(X^{\xi,\,\varepsilon}(t_1),...,X^{\xi,\,\varepsilon}(t_k))$, which in turn,
according to Theorem \ref{theorem'}, converges to the law of
$(\sigmaigma(\xi)B(t_1),...,\sigmaigma(\xi)B(t_k))$, where $B$ is a Brownian
motion. We now let $\xi$ tend to $0$: since $\sigmaigma(\xi)$ converges
to $\sigmaigma$, see Lemma \ref{lem:sigma}, the limiting law of
$(X^\varepsilon(t_1),...,X^\varepsilon(t_k))$ is the law of $(\sigmaigma
B(t_1),...,\sigmaigma B(t_k))$ i.e. we have proved that $X^\varepsilon$
converges in law to a Brownian motion with variance $\sigmaigma^2$ in
the sense of finite dimensional marginals. The tightness Lemma
\ref{lem:tight} implies that the convergence in fact holds in the
Skorokhod topology.
\rule{.2cm}{.2cm}
\vskip 1cm
\sigmaection{Proof of Theorem \ref{theorem'}}
\sigmaetcounter{equation}{0}
\lambdaabel{sec:proof}
We will outline here a proof of Theorem \ref{theorem'}.
Our strategy is quite similar to the one recently used in
\cite{kn:MR}, \cite{kn:Ba} and \cite{kn:MP} to study the simple symmetric
random walk on a percolation cluster. No new idea is required.
\vskip .5cm {\bf Step 0: notation}
As before, we use the notation $\omega} \deltaef\oo{{\tilde{\omega}}mega$ to denote a typical environment
under the measure $Q$.
For a given edge $e\in{\mathbb E}_d$ (and a given choice of $\omega} \deltaef\oo{{\tilde{\omega}}mega$), we define
\begin{eqnarray}n
\alphalpha(e)={\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)>0}\,;\, \alphalpha'(e)={\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi}\,.
\epsilonnd{eqnarray}n
As in part \ref{sec:timechange}, let ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ be the infinite cluster of the
percolation
graph $\alphalpha'$. For $x,y\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$,
we define the {\it chemical distance} $d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(x,y)$ as the minimal number of jumps
required
for the process $\tX$ to go from $x$ to $y$, see part \ref{sec:deviation}.
We recall the definition of the generator ${\tLLo}$ from formula (\ref{2:gen'}).
Since the function $\omega} \deltaef\oo{{\tilde{\omega}}xi$ is symmetric, the operator ${\tLLo}$ is reversible with
respect
to the measure $\mu_\omega} \deltaef\oo{{\tilde{\omega}}=\sigmaum_{z\in{\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})} n^\omega} \deltaef\oo{{\tilde{\omega}}(z)\deltaelta_z$.
Let ${\mathbb C}C^{n}(\omega} \deltaef\oo{{\tilde{\omega}})$ be the connected
component of ${\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})\cap [-n,n]^d$ that contains the
origin. Let $(\tXn(t),\ t \gammaeq 0)$ be the random walk $\tX$ restricted to the set
${\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})$.
The definition of $\tXn$ is the same as for $\tX$ except that jumps outside ${\mathbb C}C^n$
are
now forbidden. Its Dirichlet form is
\begin{eqnarray}n
\tEEon(f,f)=\frac 12 \sigmaum_{x\sigmaim y\in {\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})}
\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y) (f(x)-f(y))^2
\epsilonnd{eqnarray}n
We use the notation $\tau^n$ for the exit time of the process $\tX$ from the box
$[-2n+1,2n-1]^d$ i.e. $\tau^n=\inf\{t\,;\, \tX(t)\notin[-2n+1,2n-1]^d\}$.
\vskip .5cm {\bf Step 1: Carne-Varopoulos bound}
The measure $\mu_\omega} \deltaef\oo{{\tilde{\omega}}$ being reversible for the process $\tX$,
the transition probabilities satisfy a Carne-Varopoulos bound:
\begin{eqnarray}n
P^\omega} \deltaef\oo{{\tilde{\omega}}_x(\tX(t)=y)\lambdaeq Ce^{-d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(x,y)^2/(4t)}+e^{-ct}\,,
\epsilonnd{eqnarray}n
where $c=\lambdaog 4-1$ and $C$ is some constant that depends on $\xi$ and $\omega} \deltaef\oo{{\tilde{\omega}}$. (See \cite{kn:MR}, appendix C.)
By Lemma \ref{lem:deviation}, we can replace the chemical distance
$d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(x,y)$ by the Euclidean distance $\vert x-y\vert$, provided
that $x\in[-n,n]^d$ and $n$ is large enough. We get that, $Q_0^\xi$
almost surely, for large enough $n$, for any $x\in[-n,n]^d$ and any
$y\in{\mathbb Z}^d$ such that $\vert x-y\vert\gammaeq (\lambdaog n)^2$, then
\begin{eqnarray}\lambdaabel{eq:carnevaro} P^\omega} \deltaef\oo{{\tilde{\omega}}_x(\tX(t)=y)\lambdaeq
Ce^{-\frac{\vert
x-y \vert^2}{Ct}}+e^{-ct}\,.\epsilonnd{eqnarray}
The same reasoning as in \cite{kn:MR}, appendix C (using Lemma
\ref{lem:deviation} again) then leads to upper bounds
for the exit time $\tau^n$: $Q_0^\xi$
almost surely, for large enough $n$, for any $x\in[-n,n]^d$ and any
$t$, we have
\begin{eqnarray} \lambdaabel{eq:taun}
P_x^\omega} \deltaef\oo{{\tilde{\omega}}mega[\tau^n\lambdaeq t] \lambdaeq Ct
n^d e^{-\frac{n^2}{Ct}}+e^{-ct}\,. \epsilonnd{eqnarray}
Indeed, let $N(t)$ be the number of jumps the random walk performs until time $t$
and let $\sigmaigma^n$ be the number of jumps of the walk until it exits the box $[-2n+1,2n-1]^d$,
so that $\sigmaigma^n=N(\tau^n)$.
Note that the process $(N(t)\,,\, t\in{\mathbb R}_+)$ is a Poisson process of rate $1$.
With probability larger than $1-e^{-ct}$, we have $N(t)\lambdaeq 2t$.
If $N(t)\lambdaeq 2t$ and $\tau^n\lambdaeq t$, then $\sigmaigma^n\lambdaeq 2t$ and there are
at most $2t$ choices for the value of $\sigmaigma^n$.
Let $y$ be the position of the walk at the exit time and let $z$ be the last point
visited before exiting. Note that $d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(z,y)=1$.
Due to Lemma
\ref{lem:deviation}, we have
\begin{eqnarray}n
\vert x-y\vert\lambdaeq \frac 1{c^-} d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(x,y)
\lambdaeq \frac 1{c^-} (d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(x,z)+1) \lambdaeq \frac{c^+}{c^-} (1+\vert x-z\vert)\lambdaeq
\frac{c^+}{c^-} (1+2n)\,.
\epsilonnd{eqnarray}n
Note that our use of Lemma
\ref{lem:deviation} here is legitimate. Indeed $\vert x-y\vert$ is of order $n$
and, since $d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(z,y)=1$, Lemma \ref{lem:holes} implies that $\vert y-z\vert$ is at most
of order $(\lambdaog n)^7$. Therefore $\vert x-z\vert$ is of order $n$ and thus certainly larger that $(\lambdaog n)^2$.
Thus we see that there are at most of order $n^d$ possible choices for $y$.
Finally, due to (\ref{eq:carnevaro}),
\begin{eqnarray}n P^\omega} \deltaef\oo{{\tilde{\omega}}_x(\tX(s)=y)\lambdaeq C e^{-\frac{n^2}{Ct}}\,,\epsilonnd{eqnarray}n
for any $s\lambdaeq 2t$, $x\in[-n,n]^d$ and $y\notin [-2n+1,2n-1]^d$.
Putting everything together, we get (\ref{eq:taun}).
\vskip .5cm {\bf Step 2: Nash inequalities and on-diagonal decay}
\begin{lm}\lambdaabel{lem:1}
For any $\theta>0$, there exists a constant $c_u(\theta)$ such
that, $Q_0^\xi$ a.s. for large enough $t$, we have \begin{eqnarray}
\lambdaabel{eq:ondiag}
P_x^{\omega} \deltaef\oo{{\tilde{\omega}}mega}[\tX(t)=y]
\lambdaeq
\frac{c_u(\theta)}{t^{d/2}}\,,
\epsilonnd{eqnarray} for any $x\in {\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$ and $y\in{\mathbb Z}^d$ such that $\vert x\vert\lambdaeq
t^\theta$.
\epsilonnd{lm}
{\it Proof}:
We use the notation $\alphalpha'(e)={\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi}$. Note that the random variables
$(\alphalpha'(e)\,;\, e\in E_d)$ are independent Bernoulli
variables with common parameter $Q(\alphalpha'(e)>0)=Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi)$. Since we have assumed that
$Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi)>p_c$, the environment $\alphalpha'$ is a typical realization of super-critical bond
percolation.
The following Nash inequality is proved in \cite{kn:MR}, equation (5):
there exists a constant $\beta$ such that
$Q_0^\xi$ a.s. for large enough $n$, for any function
$f:{\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})\rightarrow{\mathbb R}$ one has
\begin{eqnarray}n
{\mathbb V}ar(f)^{1+\frac{2}{\varepsilon(n)}}
\lambdaeq
\beta \,
n^{2(1-\frac{d}{\varepsilon(n)})}\,{\mathbb E}E^{\alphalpha',n}(f,\,f)\,\|f\|_1^{4/\varepsilon(n)}\,,
\epsilonnd{eqnarray}n where \begin{eqnarray}n {\mathbb E}E^{\alphalpha',n}(f,f)=\frac 12 \sigmaum_{x\sigmaim y\in
{\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})}
\alphalpha'(x,y) (f(x)-f(y))^2
\,.
\epsilonnd{eqnarray}n
The variance and the $L_1$ norms are computed with respect to the
counting measure on ${\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})$ and $\varepsilon(n)=d+2d\frac{\lambdaog\lambdaog
n}{\lambdaog n}$.
(Note that there is a typo in \cite{kn:MR} where it is claimed that
(5) holds for the uniform probability on ${\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})$ instead of the counting measure.)
Inequality (\ref{2:lowbo}) implies that $\alphalpha'(x,y)\lambdaeq \xi^{-1}
\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y)$. Therefore ${\mathbb E}E^{\alphalpha',n}$ and $\tEEon$ satisfy the
inequality \begin{eqnarray}\lambdaabel{eq:compDF} {\mathbb E}E^{\alphalpha',n}(f,f)\lambdaeq \frac
1\xi\, \tEEon(f,f)\,. \epsilonnd{eqnarray}
Using inequality (\ref{eq:compDF}) in the previous Nash inequality, we
deduce that there exists a constant $\beta$ (that depends on $\xi$) such that
$Q_0^\xi$ a.s. for large enough $n$, for any function
$f:{\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})\rightarrow{\mathbb R}$ one has
\begin{eqnarray} \lambdaabel{eq:nash}
{\mathbb V}ar(f)^{1+\frac{2}{\varepsilon(n)}}
\lambdaeq
\beta\,
n^{2(1-\frac{d}{\varepsilon(n)})}\,\tEEon(f,\,f)\,\|f\|_1^{4/\varepsilon(n)}\,.
\epsilonnd{eqnarray}
As shown in \cite{kn:MR} part 4, the Carne-Varopoulos inequality
(\ref{eq:carnevaro}), inequality (\ref{eq:taun}) and the Nash
inequality (\ref{eq:nash}) can be combined to prove upper bounds on
the transition probabilities. We thus obtain that: there exists a
constant $c_u$ such that, $Q_0^\xi$ a.s. for large enough $t$, we
have \begin{eqnarray} \lambdaabel{eq:ondiag0}
P_0^{\omega} \deltaef\oo{{\tilde{\omega}}mega}[\tX(t)=y]
\lambdaeq
\frac{c_u}{t^{d/2}}\,,
\epsilonnd{eqnarray} for any $y\in{\mathbb Z}^d$.
Using the translation invariance of $Q$, it is clear that estimate
(\ref{eq:ondiag0}) in fact holds if we choose another point
$x\in{\mathbb Z}^d$ to play the role of the origin. Thus, for any $x\in{\mathbb Z}^d$,
$Q$ a.s. on the set $x\in {\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})$, for $t$ larger than some
random value $t_0(x)$, we have \begin{eqnarray} \lambdaabel{eq:ondiag'}
P_x^{\omega} \deltaef\oo{{\tilde{\omega}}mega}[\tX(t)=y]
\lambdaeq
\frac{c_u}{t^{d/2}}\,,
\epsilonnd{eqnarray} for any $y\in{\mathbb Z}^d$.
In order to deduce the Lemma from the upper bound
(\ref{eq:ondiag'}), one needs control the tail of the law of
$t_0(0)$. \\
Looking at the proofs in \cite{kn:MR}, one sees that all the error
probabilities decay faster than any polynomial. More precisely, the
$Q_0^\xi$ probability that inequality (\ref{eq:nash}) fails for some
$n\gammaeq n_0$ decays faster than any polynomial in $n_0$. From the
proof of Lemma \ref{lem:deviation}, we also know that the $Q_0^\xi$
probability that inequality (\ref{eq:carnevaro}) fails for some
$n\gammaeq n_0$ decays faster than any polynomial in $n_0$. As a
consequence, a similar bound holds for inequality (\ref{eq:taun}).\\
To deduce error bounds for (\ref{eq:ondiag0}), one then needs to go
to part 4 of \cite{kn:MR}. Since the proof of the upper bound
(\ref{eq:ondiag0}) is deduced from (\ref{eq:carnevaro}),
(\ref{eq:taun}) and (\ref{eq:nash}) by choosing $t\lambdaog t =b n^2$ for
an appropriate constant $b$, we get that $Q_0^\xi(\hbox {inequality
(\ref{eq:ondiag0}) fails for some $t\gammaeq t_0$})$ decays faster than
any polynomial in $t_0$. By translation invariance, the same holds
for (\ref{eq:ondiag'}) i.e. for any $A>0$, there exists $T$ such
that
\begin{eqnarray}n Q(x\in {\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})\,\hbox{and}\,t_0(x)\gammaeq t_0 )\lambdaeq
t_0^{-A}\,,\epsilonnd{eqnarray}n
for any $t_0>T$. Therefore,
\begin{eqnarray}n Q(\epsilonxists x\in {\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})\,;\,
\vert x\vert\lambdaeq t_0^\theta \,\hbox{and}\, t_0(x)\gammaeq t_0)\lambdaeq t_0^{d\theta-A}\,.\epsilonnd{eqnarray}n
One then chooses $A$
larger than $d\theta+1$ and the Borel-Cantelli lemma gives the end of
the proof of (\ref{eq:ondiag}).
\rule{.2cm}{.2cm}
\vskip .5cm {\bf Step 3: exit times estimates and tightness}
We denote with $\tau(x,r)$ the exit time of the random walk from the
ball of center $x$ and Euclidean radius $r$.
\begin{lm}\lambdaabel{lem:2}
For any $\theta>0$, there exists a constant $c_e$ such that,
$Q_0^\xi$ a.s. for large enough $t$, we have \begin{eqnarray} \lambdaabel{eq:exit}
P_x^{\omega} \deltaef\oo{{\tilde{\omega}}mega}[\tau(x,r)< t]
\lambdaeq
c_e \frac{\sigmaqrt{t}}r\,, \epsilonnd{eqnarray} for any $x\in {\mathbb Z}^d$ and $r$ such that
$\vert x\vert\lambdaeq t^\theta$ and $r\lambdaeq t^\theta$.
\epsilonnd{lm}
{\it Proof}: the argument is the same as in \cite{kn:Ba}, part 3. We
define
$$ M_x(t)=E^\omega} \deltaef\oo{{\tilde{\omega}}_x[d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}(x,X^\xi(t))]$$
and
$$ Q_x(t)=-E^\omega} \deltaef\oo{{\tilde{\omega}}_x[\lambdaog q^\omega} \deltaef\oo{{\tilde{\omega}}_t(x,X^\xi(t))]\,,$$
where $q^\omega} \deltaef\oo{{\tilde{\omega}}_t(x,y)=P^\omega} \deltaef\oo{{\tilde{\omega}}_x(X^\xi(t)=y)/\mu_\omega} \deltaef\oo{{\tilde{\omega}}(x)$. Then, for large
enough $t$ and for $\vert x\vert\lambdaeq t^\theta$, one has:
\begin{eqnarray}n &&Q_x(t)\gammaeq -\lambdaog c_u+\frac d 2 \lambdaog t\,,\\
&&M_x(t)\gammaeq c_2 \epsilonxp(Q_x(t)/d)\,,\\
&&Q_x'(t)\gammaeq \frac 12 (M_x'(t))^2\,. \epsilonnd{eqnarray}n The first inequality is
obtained as an immediate consequence of Lemma \ref{lem:1}. The
second one is proved as in \cite{kn:Ba}, Lemma 3.3 and the third one
as in \cite{kn:Ba}, equation (3.10), using ideas from \cite{kn:Bass}
and \cite{kn:Nash}.
Note that, in the proof of the
second inequality, we used Lemma \ref{lem:deviation} to control the
volume growth in the chemical distance $d^\xi_\omega} \deltaef\oo{{\tilde{\omega}}$. One now
integrates these inequalities to deduce that \begin{eqnarray}\lambdaabel{eq:mean}
c_1\sigmaqrt t \lambdaeq M_x(t)\lambdaeq c_2 \sigmaqrt t\,. \epsilonnd{eqnarray} Once again the proof
is the same as in \cite{kn:Ba}, Proposition 3.4. Note that, in the
notation of \cite{kn:Ba}, $T_B=\vert x\vert^{1/\theta}$ so that
equation (\ref{eq:mean}) holds for $t\gammaeq \frac 1\theta \vert
x\vert^{1/\theta}\lambdaog\vert x\vert$. The end of the proof is
identical to the proof of Equation (3.13) in \cite{kn:Ba}.
\rule{.2cm}{.2cm}
\begin{lm}\lambdaabel{lem:2'}
$Q_0^\xi$ a.s. for large enough $t$, we have \begin{eqnarray} \lambdaabel{eq:exit'}
P_x^{\omega} \deltaef\oo{{\tilde{\omega}}mega}[\tau(x,r)< t]
\lambdaeq
27
(c_e)^3 (\frac{\sigmaqrt{t}}r)^3\,, \epsilonnd{eqnarray} for any $x\in {\mathbb Z}^d$ and $r$ such that
$\vert x\vert\lambdaeq t^\theta$ and $r\lambdaeq t^\theta$.
\epsilonnd{lm}
{\it Proof}: let $x'=X^\xi(\tau(x,r/3))$, $x''=X^\xi(\tau'(x',r/3))$ where $\tau'(x',r/3)$ is the exit
time from the ball of center $x'$ and radius $r/3$ after time $\tau(x,r/3)$ and let $\tau''(x'',r/3)$
be the exit
time from the ball of center $x''$ and radius $r/3$ after time $\tau'(x,r/3)$.
In order that $\tau(x,r)< t$ under $P_x^{\omega} \deltaef\oo{{\tilde{\omega}}mega}$ we must have
$\tau(x,r/3)< t$ and
$\tau'(x',r/3)< t$ and $\tau''(x'',r/3)<t$. We can then use Lemma \ref{lem:2} to estimate the probabilities
of these $3$ events and conclude that (\ref{eq:exit'}) holds.
\rule{.2cm}{.2cm}
\begin{lm}\lambdaabel{lem:3}
For small enough $\xi$, $Q_0$ almost surely, under $P^\omega} \deltaef\oo{{\tilde{\omega}}_0$, the
family of processes $(X^{\xi,\varepsilon}(t)=\varepsilon X^\xi(\frac
t{\varepsilon^2}),t\in{\mathbb R}_+)$ is tight in the Skorokhod topology (as $\varepsilon$ goes to $0$).
\epsilonnd{lm}
{\it Proof}: we shall prove that, for any $T>0$, for any $\epsilonta>0$ and for small enough $\theta_0$ then
\begin{eqnarray} \lambdaabel{eq:ti10} \lambdaimsup_\varepsilon
\sigmaup_{v\lambdaeq T} P^\omega} \deltaef\oo{{\tilde{\omega}}_0(\sigmaup_{u\lambdaeq T\,;\, v\lambdaeq u\lambdaeq v+\theta_0} \vert
X^{\xi,\varepsilon}(u)-X^{\xi,\varepsilon}(v)\vert >\epsilonta) \lambdaeq
27 (c_e)^3 (\frac{\sigmaqrt
{\theta_0}}\epsilonta)^3\,. \epsilonnd{eqnarray}
Indeed inequality (\ref{eq:ti10}) implies that
\begin{eqnarray} \lambdaabel{eq:ti11}
\lambdaimsup_{\theta_0} \frac 1 {\theta_0} \lambdaimsup_\varepsilon
\sigmaup_{v\lambdaeq T}P^\omega} \deltaef\oo{{\tilde{\omega}}_0(\sigmaup_{u\lambdaeq T\,;\, v\lambdaeq u\lambdaeq v+\theta_0} \vert
X^{\xi,\varepsilon}(u)-X^{\xi,\varepsilon}(v)\vert >\epsilonta)=0\,.\epsilonnd{eqnarray}
According to Theorem 8.3 in Billingsley's book \cite{kn:Bill}, this
last inequality is sufficient to ensure the tightness.
We use Lemma \ref{lem:2} with $\theta=1$ to check
that
\begin{eqnarray}n P^\omega} \deltaef\oo{{\tilde{\omega}}_0(\sigmaup_{t\lambdaeq T}\vert
X^{\xi,\varepsilon}(t)\vert\gammaeq K) =P^\omega} \deltaef\oo{{\tilde{\omega}}_0(\tau(0,\frac K\varepsilon)\lambdaeq\frac
T{\varepsilon^2})\lambdaeq c_e\frac{\sigmaqrt T}K\,. \epsilonnd{eqnarray}n
(We could use Lemma
\ref{lem:2} since $\frac K\varepsilon\lambdaeq \frac T{\varepsilon^2}$ for small
$\varepsilon$.)
Next choose $\epsilonta>0$ and use Lemma \ref{lem:2'} with $\theta=3$ and the Markov property to get
that \begin{eqnarray}n P^\omega} \deltaef\oo{{\tilde{\omega}}_0(\sigmaup_{v\lambdaeq u\lambdaeq T\,;\, u-v\lambdaeq\theta_0} \vert
X^{\xi,\varepsilon}(u)-X^{\xi,\varepsilon}(v)\vert >\epsilonta) \lambdaeq
P^\omega} \deltaef\oo{{\tilde{\omega}}_0(&&\sigmaup_{t\lambdaeq T}\vert X^{\xi,\varepsilon}(t)\vert\gammaeq K) \\&&+
\sigmaup_{y\,;\, \vert y\vert\lambdaeq K/\varepsilon}
P^\omega} \deltaef\oo{{\tilde{\omega}}_y(\tau(y,\frac\epsilonta\varepsilon)\lambdaeq \frac{\theta_0}{\varepsilon^2})\,.\epsilonnd{eqnarray}n
If we choose $K$ of order $1/\varepsilon$ and pass to the limit as $\varepsilon$ tends to $0$,
then, due to the previous inequality, the contribution of the first term vanishes.
As for the second term, by Lemma \ref{lem:2'}, it is bounded by
$27 (c_e)^3 (\frac{\sigmaqrt
{\theta_0}}\epsilonta)^3$. Note that we could use
Lemma
\ref{lem:2'} since $\frac K\varepsilon\lambdaeq (\frac {\theta_0}{\varepsilon^2})^3$
and $\frac \epsilonta\varepsilon\lambdaeq (\frac {\theta_0}{\varepsilon^2})^3$ for small
$\varepsilon$. Thus the proof of (\ref{eq:ti10}) is complete.
\rule{.2cm}{.2cm}
\vskip .5cm {\bf Step 4: Poincar\'e inequalities and end of the
proof of Theorem \ref{theorem'}}
Applied to a centered function $f$,
Nash inequality (\ref{eq:nash})
reads:
\begin{eqnarray}n
{\mathbb V}ert f{\mathbb V}ert_2^{2+\frac{4}{\varepsilon(n)}}
\lambdaeq
\beta\,
n^{2(1-\frac{d}{\varepsilon(n)})}\,\tEEon(f,\,f)\,\|f\|_1^{4/\varepsilon(n)}\,.
\epsilonnd{eqnarray}n
Holder's inequality implies that
$${\mathbb V}ert f{\mathbb V}ert_1\lambdaeq {\mathbb V}ert f{\mathbb V}ert_2 (2n+1)^{d/2}$$ since $\# C^n(\omega} \deltaef\oo{{\tilde{\omega}})\lambdaeq (2n+1)^d$.
We deduce that any centered function on ${\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})$ satisfies
\begin{eqnarray}n {\mathbb V}ert f{\mathbb V}ert_2^2 \lambdaeq \beta n^2\,\tEEon(f,\,f)\,, \epsilonnd{eqnarray}n
for some constant $\beta$. Equivalently, any (not necessarily centered)
function on ${\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})$ satisfies
\begin{eqnarray}n {\mathbb V}ar (f)\lambdaeq \beta n^2\,\tEEon(f,\,f)\,. \epsilonnd{eqnarray}n
Thus we have proved the following Poincar\'e inequality on $C^n(\omega} \deltaef\oo{{\tilde{\omega}})$: there is a constant $\beta$
such that, $Q_0^\xi$.a.s. for large enough $n$, for any function
$f:{\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})\rightarrow{\mathbb R}$ then \begin{eqnarray}\lambdaabel{eq:poinc1} \sigmaum_{x\in
{\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})}f(x)^2 \lambdaeq \beta n^2\, \sigmaum_{x\sigmaim y\in
{\mathbb C}C^n(\omega} \deltaef\oo{{\tilde{\omega}})}\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y) (f(x)-f(y))^2 \epsilonnd{eqnarray}
Our second Poincar\'e inequality is derived from \cite{kn:Ba}, see
Definition 1.7, Theorem 2.18, Lemma 2.13 part a) and Proposition 2.17 part b): there
exist constants $M<1$ and $\beta$ such that $Q_0^\xi$.a.s. for any
$\deltaelta>0$, for large enough $n$, for any $z \in {\mathbb Z}^d$ s.t. $\vert
z\vert\lambdae n$ and for any function $f:{\mathbb Z}^d\rightarrow{\mathbb R}$ then
\begin{eqnarray}\lambdaabel{eq:poinc2} \sigmaum_{x\in {\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})\cap (z+[-M\deltaelta
n,M\deltaelta n]^d)}
f(x)^2
\lambdaeq \beta \deltaelta^2 n^2\, \sigmaum_{x\sigmaim y\in {\mathbb C}C^\xi(\omega} \deltaef\oo{{\tilde{\omega}})\cap
(z+[-\deltaelta n,\deltaelta n]^d)}
\omega} \deltaef\oo{{\tilde{\omega}}xi(x,y) (f(x)-f(y))^2
\epsilonnd{eqnarray} In \cite{kn:Ba}, inequality (\ref{eq:poinc2}) is in fact
proved for the Dirichlet form ${\mathbb E}E^{\alphalpha',n}$ but the comparison
inequality (\ref{eq:compDF}) implies that it also holds for the
Dirichlet form $\tEEon$.
One can now conclude the proof of the Theorem following the argument
in \cite{kn:MP} line by line starting from paragraph 2.2.
\rule{.2cm}{.2cm}
\vskip 1cm
\sigmaection{Percolation results}
\sigmaetcounter{equation}{0}
\lambdaabel{sec:perco}
\sigmaubsection{Prerequisites on site percolation}
We shall use some properties of site percolation that we state below.
By site percolation of parameter $r$ on ${\mathbb Z}^d$, we mean the product
Bernoulli measure of parameter $r$ on the set of applications
$\zeta:{\mathbb Z}^d\rightarrow\{0,1\}$. We identify any such application
with the sub-graph of the grid whose vertices are the points
$x\in{\mathbb Z}^d$ such that $\zeta(x)=1$ and equipped with the edges of the
grid linking two points $x,y$ such that $\zeta(x)=\zeta(y)=1$.
Let $l>1$. Call a sub-set of ${\mathbb Z}^d$ {\it $l$-connected} if it is
connected for the graph structure defined by: two points are
neighbors when the Euclidean distance between them is less than $l$.
We recall
our notation $\vert x-y\vert$ for the Euclidean distance between $x$ and $y$.
A {\it path} is a sequence of vertices of ${\mathbb Z}^d$ such that
two successive vertices in $\pi$ are neighbors.
We mostly consider injective paths.
With some abuse of vocabulary, a sequence of vertices of ${\mathbb Z}^d$
in which two successive vertices are at distance not more
than $l$ will be called a {\it $l$-nearest-neighbor path}.
Let $\pi=(x_0,...,x_k)$ be a sequence of vertices. We define its length
$$\vert \pi\vert=\sigmaum_{j=1}^k \vert x_{j-1} -x_j\vert\,,$$
and its cardinality $\#\pi=\#\{x_0,...,x_k\}$. ($\#\pi=k+1$ for an injective path.)
When convenient, we identify an injective path with a set (its range).
\begin{lm}\lambdaabel{lem:site}
Let $l>1$.
There exists $p_1>0$ such that for $r<p_1$,
almost any realization of site percolation of parameter $r$ has only
finite $l$-connected components and,
for large enough $n$, any $l$-connected component that
intersects the box $[-n,n]^d$ has volume smaller than $(\lambdaog n)^{6/5}$.
\epsilonnd{lm}
{\it Proof}: the number of $l$-connected sets that contain a fixed
vertex and of volume $m$ is smaller than $e^{a(l)m}$ for some
constant $a(l)$, see \cite{kn:G}. Thus the number of $l$-connected
sets of volume $m$ that intersect the box $[-n,n]^d$ is smaller than
$(2n+1)^d e^{a(l)m}$. But the probability that a given set of volume
$m$ contains only opened sites is $r^m\lambdaeq p_1^m$. We now choose
$p_1$ small enough so that $\sigmaum_n\sigmaum_{m\gammaeq (\lambdaog n)^{6/5}}
(2n+1)^d e^{a(l)m} p_1^m<\infty$ and the Borel-Cantelli lemma yields
the conclusion of Lemma \ref{lem:site}.
\rule{.2cm}{.2cm}
As in the case of bond percolation discussed in the introduction, it
is well known that for $r$ larger than some critical value then
almost any realization of site percolation of parameter $r$ has a
unique infinite connected component - the {\it infinite cluster} -
that we will denote with ${\mathbb C}C$.
\begin{lm}\lambdaabel{lem:site'}
There exists $p_2<1$ such that for $r>p_2$, for
almost any realization of site percolation of parameter $r$ and
for large enough $n$, any connected component of the complement of the
infinite cluster ${\mathbb C}C$ that
intersects the box $[-n,n]^d$ has volume smaller than $(\lambdaog n)^{5/2}$.
\epsilonnd{lm}
{\it Proof}: let $\zeta$ be a typical realization of site percolation of parameter
$r$.
We assume that $r$ is above the critical value so that there is a unique infinite
cluster, ${\mathbb C}C$.
We also assume that $1-r<p_1$ where $p_1$ is the value provided by Lemma
\ref{lem:site} for $l=d$.
Let $A$ be a connected component of the complement of ${\mathbb C}C$. Define
the {\it interior boundary of $A$}: $\partial_{int}A=\{x\in A\,;\,
\epsilonxists y\, s.t.\, (x,y)\in {\mathbb E}_d\,\hbox{and}\, y\notin A\}$.
It is known
that $\partial_{int}A$ is $d$-connected,
see \cite{kn:DP}, Lemma 2.1.
By construction any $x\in
\partial_{int}A$ satisfies $\zeta(x)=0$. Since the application
$x\rightarrow 1-\zeta(x)$ is a typical realization of site
percolation of parameter $1-r$ and $1-r<p_1$, as an application of
Lemma \ref{lem:site} we get that $\partial_{int}A$ is finite.
Because we already know that the complement of $A$ is infinite
(since it contains ${\mathbb C}C$), it implies that $A$ itself is finite.
We now assume that $A$ intersects the box $[-n,n]^d$. Choose $n$
large enough so that ${\mathbb C}C\cap [-n,n]^d\not=\epsilonmptyset$ so that
$[-n,n]^d$ is not a sub-set of $A$. Then it must be that
$\partial_{int}A$ intersects $[-n,n]^d$. Applying Lemma
\ref{lem:site} again, we get that, for large $n$, the volume of
$\partial_{int}A$ is smaller than $(\lambdaog n)^{6/5}$. The classical
isoperimetric inequality in ${\mathbb Z}^d$ implies that, for any finite
connected set $B$, one has $(\#\partial_{int} B)^{d/(d-1)}\gammaeq
{\cal I} \# B$ for some constant $\cal I$. Therefore $\# A\lambdaeq
{\cal I}^{-1} ( \lambdaog n)^{6d/5(d-1)}$. Since $6d/5(d-1)<5/2$, the
proof is complete.
\rule{.2cm}{.2cm}
\begin{lm}\lambdaabel{lem:site''}
There exists $p_3<1$ and a constant $c_3$ such that for $r>p_3$, for
almost any realization of site percolation of parameter $r$ and
for large enough $n$, for any two points $x,y$ in the box $[-n,n]^d$
such that $\vert x-y\vert\gammaeq (\lambdaog n)^{3/2}$ we have\\
(i) for any injective $d$-nearest-neighbor path $\pi$ from $x$ to $y$ then
\begin{eqnarray}n
\#\{z\in\pi\,;\,\zeta(z)=1\}\gammaeq c_3\vert x-y\vert\,.
\epsilonnd{eqnarray}n
(ii) for any injective ($1$-nearest-neighbor) path $\pi$ from $x$ to $y$ then
\begin{eqnarray}n
\#({\mathbb C}C\cap\pi)\gammaeq c_3\vert x-y\vert\,.
\epsilonnd{eqnarray}n
\epsilonnd{lm}
{\it Proof}: we assume that $r$ is close enough to $1$ so that there is a unique
infinite cluster ${\mathbb C}C$. We also assume that $1-r<p_1$,
where $p_1$ is the constant appearing in Lemma \ref{lem:site} for $l=1$.
Then the complement
of ${\mathbb C}C$ only has finite connected components.
Part (i) of the Lemma is proved by a classical Borel-Cantelli argument
based on the following simple observations: the number of
injective $d$-nearest-neighbor paths $\pi$ from $x$ of length $L$ is bounded by $(c_d)^L$ for
some constant $c_d$ that depends on the dimension $d$ only; the probability that
a given set of cardinality $L$ contains less than $dc_3L$ sites where $\zeta=1$ is bounded
by $exp(\lambdaambda dc_3 L)(re^{-\lambdaambda}+1-r)^L$ for all $\lambdaambda>0$.
We choose $c_3<\frac 1d$
and $\lambdaambda$ such that $c_d e^{-(1-dc_3)\lambdaambda}<1$ and $p_3$ such that
$\gammaamma=c_d e^{\lambdaambda dc_3} (p_3e^{-\lambdaambda}+1-p_3)<1$.
Let now $x$ and $y$ be as in the Lemma. Note that any
injective $d$-nearest-neighbor path $\pi$ from $x$ to $y$
satisfies $\#\pi\gammaeq \frac 1d \vert x-y\vert\gammaeq \frac 1d (\lambdaog n)^{3/2}$.
Therefore the probability that there is an injective $d$-nearest-neighbor
path $\pi$ from $x$ to $y$ such that $\#\{z\in\pi\,;\,\zeta(z)=1\}<c_3\vert x-y\vert$
is smaller than
$\sigmaum_{L\gammaeq \frac 1d (\lambdaog n)^{3/2}} \gammaamma^L$ and the probability
that (i) fails for some $x$ and $y$ is smaller than
$(2n+1)^{2d}\sigmaum_{L\gammaeq \frac 1d (\lambdaog n)^{3/2}} \gammaamma^L$.
Since $\sigmaum_n (2n+1)^{2d} \sigmaum_{L\gammaeq \frac 1d (\lambdaog n)^{3/2}} \gammaamma^L<\infty$,
the Borel-Cantelli lemma then yields that, for large enough $n$, part (i) of Lemma
\ref{lem:site''} holds.
We prove part (ii) by reducing it to an application of part (i).
Assume that, for some points $x$ and $y$ as in the Lemma, there exists
an injective
nearest-neighbor path $\pi$ from $x$ to $y$ such that
$\#({\mathbb C}C\cap\pi)< c_3\vert x-y\vert$.
We first modify the path $\pi$ into a $d$-nearest-neighbor path from $x$ to $y$, say
$\pi'$, in the following way: the parts of $\pi$ that lie in ${\mathbb C}C$ remain unchanged but
the parts of $\pi$ that visit the complement of ${\mathbb C}C$ are modified so that they only
visit points where $\zeta=0$. Such a modified path $\pi'$ exists because
the interior boundary of a connected component of the complement of ${\mathbb C}C$
is $d$ connected (as we already mentioned in the proof of Lemma \ref{lem:site'})
and only contains points where $\zeta=0$.
Observe that ${\mathbb C}C\cap\pi'={\mathbb C}C\cap\pi$ and that ${\mathbb C}C\cap\pi'=\{z\in\pi'\,;\,\zeta(z)=1\}$
so that
\begin{eqnarray}n \#\{z\in\pi'\,;\,\zeta(z)=1\}< c_3\vert x-y\vert\,.
\epsilonnd{eqnarray}n
Next turn $\pi'$ into an injective $d$-nearest-neighbor path, say $\pi''$, by suppressing loops
in $\pi'$. Clearly $\{z\in\pi''\,;\,\zeta(z)=1\}\sigmaubset\{z\in\pi'\,;\,\zeta(z)=1\}$
and therefore
\begin{eqnarray}n \#\{z\in\pi''\,;\,\zeta(z)=1\}< c_3\vert x-y\vert\,,
\epsilonnd{eqnarray}n
a contradiction with part (i) of the Lemma.
\rule{.2cm}{.2cm}
\sigmaubsection{Proof of Lemma \ref{lem:holes}}
Lemma \ref{lem:holes} only deals with the geometry of percolation
clusters, with no reference to random walks. We will restate it as a
percolation lemma at the cost of changing a little our notation. In
order to make a distinction with a typical realization of an
environment for which we used the notation $\omega} \deltaef\oo{{\tilde{\omega}}mega$, we will use the
letters $\alphalpha$ or $\alphalpha'$ to denote typical realizations of a
percolation graphs. Thus one switches from the notation of the
following proof back to the notation of part \ref{sec:deduce} using
the following dictionary: \begin{eqnarray}n
\alphalpha(e)={\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)>0}\,&;&\, \alphalpha'(e)={\mathbf 1}_{\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi}\\
q=Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)>0)\,&;&\, p=Q(\omega} \deltaef\oo{{\tilde{\omega}}(e)\gammaeq\xi\,\vert\, \omega} \deltaef\oo{{\tilde{\omega}}(e)>0)\,.
\epsilonnd{eqnarray}n
This way taking $\xi$ close to $0$ is equivalent to taking $p$ close to $1$.
We very much rely on renormalization technics, see Proposition 2.1.
in \cite{kn:AP}.
As in the introduction, we identify a sub-graph of ${\mathbb Z}^d$ with an
application $\alphalpha:{\mathbb E}_d\rightarrow\{0,1\}$, writing $\alphalpha(x,y)=1$
if the edge $(x,y)$ is present in $\alphalpha$ and $\alphalpha(x,y)=0$
otherwise. Thus $\AAA=\{0,1\}^{{\mathbb E}_d}$ is identified with the set of
sub-graphs of ${\mathbb Z}^d$. Edges pertaining to $\alpha$ are then called {\it
open}. Connected components of such a sub-graph will be called {\it
clusters}.
Define now $Q$ to be the probability measure on $\{0,1\}^{{\mathbb E}_d}$ under which
the random
variables $(\alphalpha(e),\,e \in {\mathbb E}_d)$ are Bernoulli$(q)$ independent variables
with
\begin{eqnarray}n q>p_c.
\epsilonnd{eqnarray}n
Then, $Q$ almost surely, the graph $\alpha$ has a unique infinite cluster denoted with
${\mathbb C}C(\alpha)$.
For a typical realization of the percolation graph under $Q$, say $\alphalpha$, let
$Q^\alphalpha$ be the law of bond percolation on ${\mathbb C}C(\alphalpha)$ with parameter $p$.
We shall denote $\alphalpha'$ a typical realization under $Q^\alphalpha$ i.e. $\alphalpha'$
is a random subgraph of ${\mathbb C}C(\alphalpha)$ obtained by keeping (resp. deleting) edges
with probability $p$ independently of each other.
We always assume that $p$ is close enough to $1$ so that $Q^\alphalpha$ almost surely
there is a unique infinite cluster in $\alphalpha'$ that we denote ${\mathbb C}C^\alphalpha(\alphalpha')$.
By construction ${\mathbb C}C^\alphalpha(\alphalpha')\sigmaubset {\mathbb C}C(\alphalpha)$.
Connected components of the complement of ${\mathbb C}C^\alphalpha(\alphalpha')$ in ${\mathbb C}C(\alphalpha)$ are
called {\it holes}.
We now restate Lemma \ref{lem:holes}:\\
{\it there exists $p_0<1$ such that for $p>p_0$, for $Q$ almost any $\alphalpha$, for
$Q^\alphalpha$ almost any $\alphalpha'$, for large enough $n$, then any hole
intersecting the box $[-n,n]^d$ has volume smaller than $(\lambdaog n)^a$.}
{\it Renormalization}: let $\alphalpha$ be a typical realization of percolation under $Q$.
Let $N$ be an integer. We chop ${\mathbb Z}^d$ in a disjoint union of boxes
of side length $2N+1$. Say ${\mathbb Z}^d=\cup_{{\mathbf i}\in{\mathbb Z}^d}B_{\mathbf
i}$, where $B_{\mathbf i}$ is the box of center $(2N+1){\mathbf i}$.
Following \cite{kn:AP}, let $B'_{\mathbf i}$ be the box of center
$(2N+1){\mathbf i}$ and side length $\frac 5 2 N +1$. From now on,
the word {\it box} will mean one of the boxes $B_{\mathbf i},
{\mathbf i}\in{\mathbb Z}^d$.
We say that a box $B_{\mathbf i}$ is {\it white} if
$B_{\mathbf i}$ contains at least one edge from $\alphalpha$
and the event $R_{\mathbf i}^{(N)}$ in equation (2.9) of
\cite{kn:AP} is satisfied. Otherwise, $B_{\mathbf i}$ is a {\it black} box.
We recall that the event $R_{\mathbf i}^{(N)}$ is defined by:
there is a unique cluster of $\alphalpha$ in $B'_{\mathbf i}$, say $K_{\mathbf i}$;
all open paths
contained in $B'_{\mathbf i}$ and of radius larger than $\frac 1 {10} N$
intersect $K_{\mathbf i}$ within $B'_{\mathbf i}$; $K_{\mathbf i}$ is crossing for
each subbox
$B\sigmaubset B'_{\mathbf i}$ of side larger than $\frac 1 {10} N$.
See \cite{kn:AP} for details.
We call $K_{\mathbf i}$ the {\it crossing cluster }
of $\alphalpha$ in the box $B_{\mathbf i}$.
Note the following consequences of this definition.
(Fact i) If $x$ and $y$ belong to the same white box $B_{\mathbf i}$ and
both $x$ and $y$ belong to the infinite cluster of $\alphalpha$, then
there is a path in ${\mathbb C}C(\alphalpha)$ connecting $x$ and $y$ within
$B'_{\mathbf i}$.
(Fact ii) Choose two neighboring indices $\mathbf i$ and $\mathbf j$
with $\vert {\mathbf i}-{\mathbf j}\vert=1$ and such that both boxes
$B_{\mathbf i}$ and $B_{\mathbf j}$ are white. As before, let
$K_{\mathbf i}$ and $K_{\mathbf j}$ be the crossing clusters in
$B_{\mathbf i}$ and $B_{\mathbf j}$ respectively. Let $x\in
K_{\mathbf i}$ and $y\in K_{\mathbf j}$. Then there exists a path in
$\alphalpha$ connecting $x$ and $y$ within $B'_{\mathbf i}\cup
B'_{\mathbf j}$.
We call {\it renormalized} process the random subsets of ${\mathbb Z}^d$ obtained
by taking the image of the initial percolation model by the application $\phi_N$,
see equation (2.11) in \cite{kn:AP}. A site $\mathbf i \in{\mathbb Z}^d$ is thus declared
{\it white} if the box $B_{\mathbf i}$ is white.
Let $\mathbf Q$ be the law of the renormalized process. The comparison result of
Proposition 2.1 in \cite{kn:AP} states that $\mathbf Q$ stochastically dominates
the law of site percolation with parameter $p(N)$ with
$p(N)\rightarrow 1$ as $N$ tends to $\infty$.
We now introduce the extra percolation $Q^\alphalpha$. Let us call {\it grey} a white
box $B_{\mathbf i}$ that contains
an edge $e\in {\mathbb C}C(\alphalpha)$ such that $\alphalpha'(e)=0$. We call
{\it pure white} white boxes that are not grey.
Let $\mathbf Q'$ be the law on subsets of the renormalized grid
obtained by keeping pure white boxes, and deleting both black and
grey boxes. We claim that $\mathbf Q'$ dominates the law of site
percolation with parameter $p'(N)=p(N) p^{\,e_N(d)}$ where $e_N(d)$
is the number of edges in a box of side length $2N+1$. (Remember
that $p$ is the parameter of $Q^\alphalpha$.) This claim is a
consequence of the three following facts. We already indicated that
$\mathbf Q$ stochastically dominates the law of site percolation
with parameter $p(N)$. The conditional probability that a box
$B_{\mathbf i}$ is pure white given it is white is larger or equal
than $p^{\,e_N(d)}$. Besides, still under the condition that
$B_{\mathbf i}$ is white, the event `$B_{\mathbf i}$ is pure white'
is independent of the colors of the other boxes.
We further call {\it immaculate} a pure white box $B_{\mathbf i}$
such that any box $B_{\mathbf j}$ intersecting $B'_{\mathbf i}$ is
also pure white. Call $\mathbf Q''$ the law on subsets of the
renormalized grid obtained by keeping only immaculate boxes. Since
the event `$B_{\mathbf i}$ is immaculate' is an increasing function
with respect to the percolation process of pure white boxes, we get
that $\mathbf Q''$ stochastically dominates the law of site
percolation with parameter $p''(N)=p'(N)^{3^d}$.
{\it End of the proof of Lemma \ref{lem:holes}}: choose $p_0$ and $N$
such that $p''(N)$ is close enough to $1$
so that, $\mathbf Q''$ almost surely,
there is an infinite cluster of immaculate boxes that we call ${\mathbb C}$.
For $\mathbf i\in{\mathbb C}$, let $K_{\mathbf i}$ be the crossing cluster in
the box $B_{\mathbf i}$ and let $K=\cup_{\mathbf i\in{\mathbb C}}K_{\mathbf i}$.
Then $K$ is connected (This follows from the definition of white
boxes, see (Fact i) and (Fact ii) above.) and infinite (Because ${\mathbb C}$
is infinite.). Thus we have $K\sigmaubset {\mathbb C}C^\alphalpha(\alphalpha')$.
Let $A$ be a hole and let $\mathbf A$ be the set of indices $\mathbf i$ such that
$B_{\mathbf i}$
intersects $A$. Observe that $\mathbf A$ is connected.
We claim that $$\mathbf A\cap{\mathbb C}=\epsilonmptyset\,.$$
Indeed, assume there exists $x\in B_{\mathbf i}$ such that $\mathbf
i\in {\mathbb C}$ and $x\in A$. By definition $A$ is a subset of
${\mathbb C}C(\alphalpha)$ and therefore $x\in{\mathbb C}C(\alphalpha)$. Let $y\in K_{\mathbf
i}$, $y\not=x$. As we already noted $y\in{\mathbb C}C^\alphalpha(\alphalpha')$. Since
$x\in {\mathbb C}C(\alphalpha)$ and $y\in{\mathbb C}C(\alphalpha)$ there is a path, $\pi$,
connecting $x$ and $y$ within $B'_{\mathbf i}$, see (Fact i) above.
But $B_{\mathbf i}$ is immaculate and therefore $B'_{\mathbf i}$
only contains edges $e$ with $\alphalpha'(e)=1$. Therefore all edges
along the path $\pi$ belong to $\alphalpha'$ which imply that
$x\in{\mathbb C}C^\alphalpha(\alphalpha')$. This is in contradiction
with the assumptions that $x\in A$.
We have proved that $\mathbf A\cap{\mathbb C}=\epsilonmptyset$.
To conclude the proof of Lemma \ref{lem:holes}, it only remains to choose $p_0$ and
$N$
such that $p''(N)\gammaeq p_2$ and apply Lemma \ref{lem:site'}. We deduce that the
volume of
$\mathbf A$ is bounded by $(\lambdaog n)^{5/2}$ and therefore the volume of $A$ is smaller
than $(2N+1)^d (\lambdaog n)^{5/2}$.
\rule{.2cm}{.2cm}
\sigmaubsection{Deviation of the chemical distance} \lambdaabel{sec:deviation}
We use the same notation as in the preceeding section. For given
realizations of the percolations $\alphalpha$ and $\alphalpha'$, we define
the corresponding {\it chemical distance} $d^\alphalpha_{\alphalpha'}$ on
${\mathbb C}C^\alphalpha(\alphalpha')$: two points $x\not=y$ in ${\mathbb C}C^\alphalpha(\alphalpha')$
satisfy $d^\alphalpha_{\alphalpha'}(x,y)=1$ if and only if one (at least) of the following
two conditions is satisfied: either $x$ and $y$ are neighbors in ${\mathbb Z}^d$ and
$\alphalpha'(x,y)=1$ or both $x$ and $y$ are at the boundary of a hole $h$
i.e. there is a hole $h$ and $x',y'\in h$ such that $x'$ is a neighbor
of $x$ and $y'$ is a neighbor of $y$.
In general, $d^\alphalpha_{\alphalpha'}(x,y)$ is defined as the smaller integer $k$
such that there exists a sequence of points $x_0,...,x_k$ in ${\mathbb C}C^\alphalpha(\alphalpha')$
with $x_0=x$, $x_k=y$ and
such that $d^\alphalpha_{\alphalpha'}(x_j,x_{j+1})=1$ for all $j$.
\begin{lm}\lambdaabel{lem:deviation}
There exists $p_4<1$ such that for $p>p_4$, there exist constants
$c^+$ and $c^-$ such that for $Q$ almost any $\alphalpha$, for
$Q^\alphalpha$ almost any $\alphalpha'$, for large enough $n$, then
\begin{eqnarray}\lambdaabel{eq:deviation} c^- \vert x-y\vert\lambdaeq
d^\alphalpha_{\alphalpha'}(x,y) \lambdaeq c^+ \vert x-y\vert\,,\epsilonnd{eqnarray} for any
$x,y\in{\mathbb C}C^\alphalpha(\alphalpha')$ such that $x\in[-n,n]^d$ and $\vert
x-y\vert\gammaeq (\lambdaog n)^2$.
\epsilonnd{lm}
{\it Proof}: let $d^\alphalpha(x,y)$ be the chemical distance between
$x$ and $y$ within ${\mathbb C}C(\alphalpha)$ i.e. $d^\alphalpha(x,y)$ is the minimal
length of a path from $x$ to $y$, say $\pi$, such that any edge
$e\in\pi$ satisfies $\alphalpha(e)=1$. \\
Applying Theorem 1.1 in \cite{kn:AP} together with the
Borel-Cantelli Lemma, we deduce that there exists a constant $c^+$
such that $d^\alphalpha(x,y) \lambdaeq c^+ \vert x-y\vert$ for any
$x,y\in{\mathbb C}C(\alphalpha)$ such that $x\in[-n,n]^d$ and $\vert x-y\vert\gammaeq
(\lambdaog n)^2$. Since $d^\alphalpha_{\alphalpha'}(x,y) \lambdaeq d^\alphalpha(x,y)$, it gives
the upper bound in (\ref{eq:deviation}).
We now give a proof of the lower bound.
As for Lemma \ref{lem:holes}, we use a renormalization argument.
The notation used below is borrowed from the proof of Lemma \ref{lem:holes}
except that the role of $p_0$ is now played by $p_4$. .
We wish to be able to apply Lemma \ref{lem:site''} (ii) to the renormalized site percolation model
with law $\mathbf Q''$ (i.e. the percolation model of immaculate boxes):
therefore we choose $p_4$ and $N$ such that $p''(N)\gammaeq p_3$ and observe that the event considered in
Lemma \ref{lem:site''} (ii) is increasing.
Consider two points $x$ and $y$ as in Lemma \ref{lem:deviation}
and let $\pi$ be an injective path from $x$ to $y$ within ${\mathbb C}C(\alphalpha)$. We shall prove that
\begin{eqnarray}\lambdaabel{eq:5.3.1}
\#{\mathbb E}E_\pi\gammaeq c_5\vert x-y\vert\,,\epsilonnd{eqnarray}
where ${\mathbb E}E_\pi=\{z,z'\in\pi\cap{\mathbb C}C^\alphalpha(\alphalpha')\,;\, \alphalpha'(z,z')=1\}$.
By construction of the chemical distance $d^\alphalpha_{\alphalpha'}$,
(\ref{eq:5.3.1}) implies the lower bound in (\ref{eq:deviation})
with $c^-=c_5$.
Let ${\mathbb P}i'$ be the sequence of the indices of the boxes $B_{\mathbf i}$ that $\pi$
intersects. At the level of the renormalized grid, ${\mathbb P}i'$ is a nearest-neighbor
path from $\mathbf i_0$ to $\mathbf i_k$ with $x\in B_{\mathbf i_0}$
and $y\in B_{\mathbf i_k}$.
Let ${\mathbb P}i=({\mathbf i}_0,...,{\mathbf i}_k)$ be the injective path
obtained by suppressing loops in ${\mathbb P}i'$.
We may, and will, assume that $n$ is large enough so that
$i_0\not= i_k$ so that $\vert \mathbf i_0-\mathbf i_k \vert$ and $\vert x-y\vert$ are comparable.
Applying Lemma \ref{lem:site''} (ii) to $\mathbf Q''$, we get that
\begin{eqnarray}\lambdaabel{eq:5.3.2}\# ({\mathbb C}\cap{\mathbb P}i)\gammaeq c_3\vert \mathbf i_0-\mathbf i_k \vert
\gammaeq c'_3\vert x-y\vert\,,\epsilonnd{eqnarray}
for some constant $c'_3$.
Let ${\mathbf i}\in {\mathbb C}\cap{\mathbb P}i$ and choose $z\in B_{\mathbf i}\cap\pi$.
Since the path $\pi$ is not entirely contained in one box,
it must be that $\pi$ connects $z$ to some point
$z'\notin B_{\mathbf i}$. Since $z'\in\pi$, we also have $z'\in {\mathbb C}C(\alphalpha)$.
By definition of a white box, it
implies that $z\in K_{\mathbf i}$.
Since ${\mathbf i}\in {\mathbb C}$, it implies that actually $z\in K$ and therefore
$z\in {\mathbb C}C^\alphalpha(\alphalpha')$. As a matter of fact, since the box $B_{\mathbf i}$ is pure
white, we must have $\alphalpha'=1$ on all the edges of $\pi$ from $z$ to $z'$.
In particular $z$ has a neighbor in ${\mathbb C}C(\alphalpha)$, say $z''$,
such that $\alphalpha'(z,z'')=1$. Therefore $(z,z'')\in{\mathbb E}E_\pi$.
We conclude that any indice in ${\mathbb C}\cap{\mathbb P}i$
gives a contribution of at least $1$ to $\#{\mathbb E}E_\pi$.
Therefore (\ref{eq:5.3.2}) implies that $$ \#{\mathbb E}E_\pi\gammaeq c'_3\vert x-y\vert\,.$$
\rule{.2cm}{.2cm}
\begin{thebibliography}{xxxxxx 89}
\bibitem{kn:AP} Antal P.,Pisztora A.~(1996)\\
On the chemical distance for supercritical Bernouilli percolation\\
{\epsilonm Ann.~Probab.}~{\bf 24}, 1036-1048.
\bibitem{kn:Ba} Barlow, M.T.~(2004)\\
Random walks on supercritical percolation clusters\\
{\epsilonm Ann.~Probab.}~{\bf 32}, 3024-3084.
\bibitem{kn:Bass} Bass, R.F.~(2002)\\
On Aronson's upper bounds for heat kernels.\\
{\epsilonm Bull.~London~Math.~Soc.}~{\bf 34}, 415-419.
\bibitem{kn:BB} Berger, N., Biskup, M.~(2007)\\
Quenched invariance principle for simple random walk
on percolation clusters. \\
{\epsilonm Prob.~Th.~Rel.~Fields}~{\bf 137}, 83-120.
\bibitem{kn:BBHK} Berger, N., Biskup, M., Hoffman, C., Kozma, G.~(2006)\\
Anomalous heat kernel decay for random walk among bounded random conductances.
To appear in {\epsilonm Ann.~Inst.~Henri~Poincar\'e}.
\bibitem{kn:Bill} Billingsley, P.~(1968) \\
{\epsilonm The convergence of probability measures.}\\
John Wiley, New York.
\bibitem{kn:BiPres} Biskup, M., Prescott, T.M.~(2007)\\
Functional CLT for random walk among bounded random conductances.\\
Preprint 2007.
\bibitem{kn:DFGW} De Masi, A., Ferrari, P., Goldstein, S., Wick, W.D.~(1989)\\
An invariance principle for reversible Markov
processes. Applications to random motions in random environments\\
{\epsilonm Journ.~Stat.~Phys.}~{\bf 55}~(3/4), 787-855.
\bibitem{kn:DP} Deuschel, J-D., Pisztora A.~(1996)\\
Surface order deviations for high density percolation.\\
{\epsilonm Prob.~Th.~Rel.~Fields}~{\bf 104}, 467-482.
\bibitem{kn:EK} Ethier, S.N., Kurtz, T.G.~(1986)\\
{\epsilonm Markov processes}\\
John Wiley, New York.
\bibitem{kn:FM} Fontes, L.R.G., Mathieu, P.~(2006)\\
On symmetric random walks with random conductances on ${\mathbb Z}^d$\\
{\epsilonm Prob.~Th.~Rel.~Fields}~{\bf 134}, 565-602.
\bibitem{kn:G} Grimmett, G.~(1999)\\
{\epsilonm Percolation}\\
Springer-Verlag, Berlin (Second edition).
\bibitem{kn:JS} Jacod, J., Shiryaev, A.N.~(1987)\\
{\epsilonm Limit theorems for stochastic processes}\\
Springer-Verlag, Berlin.
\bibitem{kn:Ko} Kozlov, S.M.~(1985)\\
The method of averaging and walks in inhomogeneous environments\\
{\epsilonm Russian~Math.~Surveys}~{\bf 40}~(2), 73-145.
\bibitem{kn:MP} Mathieu, P., Piatnitski, A.L.~(2007)\\
Quenched invariance principles for random walks on percolation clusters.\\
{\epsilonm Proc.~R.~Soc.~A}~{\bf 463}, 2287-2307.
\bibitem{kn:MR} Mathieu, P., Remy, E.~(2004)\\
Isoperimetry and heat kernel decay on percolations clusters\\
{\epsilonm Ann.~Probab.}~{\bf 32}, 100-128.
\bibitem{kn:Nash} Nash, J.~(1958)\\
Continuity of solutions of parabolic and elliptic equations\\
{\epsilonm Amer.~J.~Math.}~{\bf 80}, 931-954.
\bibitem{kn:SS} Sidoravicius, V., Sznitman, A-S.~(2004)\\
Quenched invariance principles for walks on clusters of percolation
or among random conductances\\
{\epsilonm Prob.~Th.~Rel.~Fields}~{\bf 129}, 219-244.
\epsilonnd{thebibliography}
\epsilonnd{document} |
\begin{document}
\begin{frontmatter}
\title{\emph{OptQC}: An optimised parallel quantum compiler}
\author{T. Loke, J. B. Wang \footnote{corresponding author: jingbo.wang@uwa.edu.au} and Y.H. Chen}
\address{School of Physics, The University of Western Australia, 6009 Perth, Australia}
\begin{abstract}
The software package \emph{Qcompiler} \cite{chen_qcompiler:_2013} provides a general quantum compilation framework, which maps any given unitary operation into a quantum circuit consisting of a sequential set of elementary quantum gates. In this paper, we present an extended software \emph{OptQC}, which finds permutation matrices $P$ and $Q$ for a given unitary matrix $U$ such that the number of gates in the quantum circuit of $U = Q^TP^TU'PQ$ is significantly reduced, where $U'$ is equivalent to $U$ up to a permutation and the quantum circuit implementation of each matrix component is considered separately. We extend further this software package to make use of high-performance computers with a multiprocessor architecture using MPI. We demonstrate its effectiveness in reducing the total number of quantum gates required for various unitary operators.
\noindent\textbf{Program summary}
\noindent \emph{Program title}: \emph{OptQC} \\
\noindent \emph{Catalogue identifier}: \\
\noindent \emph{Program summary}: URL: http://cpc.cs.qub.ac.uk/summaries/ \\
\noindent \emph{Program obtainable from}: CPC Program Library, Queens University, Belfast, N. Ireland \\
\noindent \emph{Licensing provisions}: Standard CPC licence, http://cpc.cs.qub.ac.uk/licence/licence.html \\
\noindent \emph{Distribution format}: tar.gz \\
\noindent \emph{Programming language}: Fortran, MPI \\
\noindent \emph{Computer}: Any computer with Fortran compiler and MPI library. \\
\noindent \emph{Operating system}: Linux \\
\noindent \emph{Classification}: \\
\noindent \emph{Nature of problem}: It aims to minimise the number of quantum gates required to implement a given unitary operation. \\
\noindent \emph{~Solution method}: It utilises a threshold-based acceptance strategy for simulated annealing to select permutation matrices $P$ and $Q$ for a given unitary matrix $U$ such that the number of gates in the quantum circuit of $U = Q^TP^TU'PQ$ is minimised, where $U'$ is equivalent to $U$ up to a permutation. The decomposition of a unitary operator is performed by recursively applying the cosine-sine decomposition. \\
\noindent \emph{~Running time}: Running time increases with the size of the unitary matrix, as well as the prescribed maximum number of iterations for qubit permutation selection and the subsequent simulated annealing algorithm. Running time estimates are provided for each example in Section \ref{sec:res}. All simulation results presented in this paper are obtained from running the program on the Fornax supercomputer managed by iVEC@UWA with Intel Xeon X5650 CPUs.\\
\end{abstract}
\end{frontmatter}
\section{Introduction}
Quantum computation aims to solve problems that are classically intractable by harnessing intricate quantum correlations between densely encoded states in quantum systems \cite{nielsen_quantum_2011}. A well-known example is Shor's algorithm for the factorisation of numbers \cite{shor_polynomial-time_1997, vandersypen_experimental_2001}. Quantum algorithms are designed to be implemented on quantum computers by means of a quantum circuit, which consists of qubits and quantum gates. It is therefore of vital importance to be able to obtain a quantum circuit representation for any given quantum algorithm (which is always described by a unitary matrix) in terms of an elementary set of quantum gates - this role is that of a quantum compiler.
Barenco \emph{et al} and Deutsch \emph{et al} \cite{barenco_elementary_1995, deutsch_universality_1995} proved that any arbitrarily complex unitary operation can be implemented by a quantum circuit involving only one- or two-qubit elementary quantum logic gates. Earlier studies applied the standard triangularisation or QR-factorisation scheme with Givens rotations and Gray codes to map a quantum algorithm to a series of elementary gate operations \cite{barenco_elementary_1995, deutsch_universality_1995, cybenko_reducing_2001, nielsen_quantum_2011}. Several research groups examined a more efficient and versatile scheme based on the cosine-sine decomposition was proposed and utilized \cite{tucci_rudimentary_1999, mottonen_quantum_2004, bergholm_quantum_2005, khan_synthesis_2006, manouchehri_quantum_2009}. De Vos \emph{et al} \cite{de_vos_multiple-valued_2009, alexis_de_vos_reversible_2012} looked into another decomposition scheme, namely the Birkhoff decomposition, which was found to provide simpler quantum circuits for certain types of unitary matrices than the cosine-sine decomposition. However, the Birkhoff decomposition does not work for general unitary matrices.
More recently, Chen and Wang \cite{chen_qcompiler:_2013} developed a general quantum compiler package written in Fortran, entitled the \emph{Qcompiler}, which is based on the cosine-sine decomposition scheme and works for arbitrary unitary matrices. The number of gates required to implement a general $2^n$-by-$2^n$ unitary matrix using the CSD method scales as O($4^n$) \cite{mottonen_quantum_2004, manouchehri_quantum_2009}. In other words, the number of gates scales exponentially with the number of qubits. Thus, in any practical application of the CSD method to decomposing matrices, it is of considerable interest to reduce the number of gates required as much as possible.
In this work, we adopt the CSD method due to the reasons outlined above, and we split the unitary matrix $U$ into an equivalent sequence of unitaries with the aim of reducing the number of gates required to implement the entire sequence of unitaries. In general, this means writing $U$ as a sequence of $s$ unitaries, i.e. $U = U_s U_{s-1} \ldots U_1$. At first glance, this seems counterintuitive, since if we were to apply the CSD to each unitary, this would increase the scaling of the number of gates required to O($4^ns$), which is undesirable. However, we note that (1) certain $U_i$ can be decomposed more efficiently than CSD such as qubit permutation matrices; and (2) some matrices requires only a few gates when separately decomposed using the CSD method.
This paper is organised as follows. Section \ref{sec:oa} describes in detail our approach for reducing the number of gates required to implement any given unitary matrix $U$. Section \ref{sec:po} details our developed program, called \emph{OptQC}, that uses the methods described in section \ref{sec:oa} to reduce the number of gates required to implement any given unitary matrix. Some sample results using the program are given in section \ref{sec:res}, and then we discuss our conclusions and possible future work in section \ref{sec:cafw}.
\section{Our approach}
\label{sec:oa}
Suppose we are given an $m$-by-$m$ (where $m=2^n$) unitary matrix $U$. As mentioned above, we are interested in splitting $U$ into a sequence of unitaries with the aim of reducing the total number of gates required to implement the entire sequence. One means of splitting $U$ into an equivalent sequence of unitaries is by using permutation matrices. A permutation matrix is a square binary matrix that contains, in each row and column, precisely a single 1 with 0s everywhere else. For any permutation matrix $P$, its corresponding inverse is $P^{-1} = P^T$. For convenience, we also define an equivalent representation of permutations using lists - a permutation list $p$ (lowercase) is equivalent to the permutation matrix $P$ (uppercase) by the relation:
\begin{equation}
\left( P \right)_{i,j} = \delta_{p\llbracket i \rrbracket, j} ,
\label{eqn:pltom}
\end{equation}
where $\delta$ is the Kronecker delta function, and $p\llbracket i \rrbracket$ denotes the $i$th list element of $p$. For example, the permutation list $p = \{2,1,4,3\}$ corresponds to the 4-by-4 permutation matrix:
\begin{equation*}
P =
\mat{cccc}
{
0 & 1 & 0 & 0 \\
1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0
} .
\end{equation*}
Now define $ \mbox{CSD}(M) $ to be the number of gates required to implement the unitary matrix $M$ according to the CSD method. If we were to write $U$ as $U = P^T U' P$ (where $U'$ is equivalent to $U$ up to a permutation), then we find that $ \mbox{CSD}(U) \neq \mbox{CSD}(U') + \mbox{CSD}(P) + \mbox{CSD}(P^T) $ in general (note also that $\mbox{CSD}(P) \neq \mbox{CSD}(P^T)$). The general aim is thus to find a $P$ that minimises the total cost function $ \mbox{CSD}(U') + \mbox{CSD}(P) + \mbox{CSD}(P^T) $, with the obvious restriction that it has to be less than $ \mbox{CSD}(U)$.
In our approach, we write $U$ as $U = Q^T P^T U' P Q$, where $P$ and $Q$ are both permutation matrices, and $U'$ is equivalent to $U$ up to a permutation. In general, $P$ is allowed to be any permutation matrix ($m!=(2^n)!$ permutations possible), but $Q$ is restricted to a class of permutation matrices that correspond to qubit permutations (only $n!$ permutations possible). The advantage of this approach is that qubit permutations can be easily implemented using a sequence of swap gates - so for a system with $n$ qubits, it requires at most $n-1$ swap gates to implement any qubit permutation. This also enables the program (in the parallel version) to start different threads at different points in the search space of $m!$ permutations by using different qubit permutations.
Let $ s_{num}(Q) $ be the number of swap gates required to implement a qubit permutation matrix $Q$. Note that $s_{num}(Q) = s_{num}(Q^T)$, since the reverse qubit permutation would just be the same swap gates applied in reverse order. The total cost function $c_{num}$ of implementing a given unitary $U = Q^T P^T U' P Q$ is then:
\begin{equation}
c_{num}(U) = \mbox{CSD}(U') + \mbox{CSD}(P) + \mbox{CSD}(P^T) + 2 s_{num}(Q) .
\end{equation}
To make the dependencies in this function clear, we write this as:
\begin{equation}
c_{num}(U,P,Q) = \mbox{CSD}(P Q U Q^T P^T) + \mbox{CSD}(P) + \mbox{CSD}(P^T) + 2 s_{num}(Q) ,
\label{eqn:costf}
\end{equation}
which is the function that we aim to minimise with respect to $P$ and $Q$.
\section{Program outline}
\label{sec:po}
We have developed a Fortran program, called \emph{OptQC}, which reads in a unitary matrix $U$, minimises the total cost function $ c_{num}(U,P,Q) $, and outputs a quantum circuit that implements $U$. A significant portion of this program is based on the CSD code provided by the LAPACK library \cite{anderson_lapack_1999}) and the recursive procedure implemented in \emph{Qcompiler}, developed by Chen and Wang \cite{chen_qcompiler:_2013}. As with \emph{Qcompiler}, the new \emph{OptQC} program has two different branches, one treating strictly real unitary (i.e. orthogonal) matrices, and another treating arbitrary complex unitary matrices, with the former generally providing a circuit that is half in size of the latter \cite{chen_qcompiler:_2013}.
Note that the CSD procedure requires the round up of the matrix dimension to the closest power of two, i.e. the dimension used is
\begin{equation}
m' = 2^{\lceil {log}_2 m \rceil}.
\label{eqn:nd}
\end{equation}
\noindent The expanded unitary operator $\bar{U} $ is an $m'$-by-$m'$ matrix, where
\begin{equation}
\left( \bar{U} \right)_{i,j} = \left\{
\begin{array}{ll}
\left( U \right)_{i,j} & : i \leq m, j \leq m \\
\delta_{i,j} & : \mbox{otherwise}
\end{array}
\right.
\label{eqn:Uexpand}
\end{equation}
\noindent which we will subsequently treat as the unitary $ U $ to be optimised via permutations.
\begin{figure}
\caption{Flowchart overview of the serial version of \emph{OptQC}
\label{fig:fchart}
\end{figure}
In the following subsections we describe the key procedures in \emph{OptQC}, depicted in Figure \ref{fig:fchart}, which serve to progressively reduce the total cost function $ c_{num}(U,P,Q) $. We first detail the serial version of the program, followed by an extension to a parallel architecture using MPI.
\subsection{Selection of qubit permutation}
\label{sec:initialq}
Qubit permutations are a class of permutations that are expressible in terms of a reordering of qubits, which can be efficiently implemented using swap gates that serve to interchange qubits. Recalling that $U$ is of dimensions $m$-by-$m$ (where $m=2^n$), this implies that there are only $n!$ qubit permutations possible for a given $U$. A qubit permutation can be expressed as a list $q$ (lowercase) of length $n$, or as a permutation matrix $Q$ (uppercase) of dimensions $m$-by-$m$. A qubit permutation of length $n$ requires at most $n-1$ swap gates.
The selection of the qubit permutation matrix $Q$ is done by varying $Q$ and computing the corresponding change in the cost function $c_{num}$, while holding $P$ constant as the identity matrix $I$. An example implementation of the $n=3$ qubit permutation $q = \{ 3, 1, 2 \}$ is shown in Figure \ref{fig:swapeg}. By considering how the basis states are mapped to each other by $q$, a regular permutation list $\bar{q}$ of length $m$ can be readily constructed from $q$, and then we use the relation between permutation lists and permutation matrices (see Eq.~(\ref{eqn:pltom})) to obtain $Q$ from $\bar{q}$.
\begin{figure}
\caption{Example implementation of qubit permutation $q = \{ 3, 1, 2 \}
\label{fig:swapeg}
\end{figure}
We start the program with an identity qubit permutation $ q $, i.e. $ q[[i]] = i $ (corresponding to $ Q = I $), and compute the corresponding cost of implementation $c_{num}(U,I,Q)$. Then, for some prescribed number of iterations $j_{max}$, we generate a random qubit permutation $ q' $ each time and compute the new cost as $c_{num}(U,I,Q')$. If the new cost is lower than the initial cost (recorded by $c_{num}(U,I,Q)$), the current qubit permutation $q$ is replaced by $q'$. Figure \ref{fig:qsfchart} shows a flowchart overview of the qubit selection procedure. After this procedure, we have an optimised qubit permutation matrix $Q$, which will remain unchanged while we find the unrestricted permutation matrix $P$ in the next section through a simulated annealing process.
\begin{figure}
\caption{Flowchart overview of the qubit selection procedure.}
\label{fig:qsfchart}
\end{figure}
\subsection{Simulated annealing}
\label{sec:simann}
Here, we aim to find an optimal permutation $p'$ such that $ c_{num}(U,P',Q) < c_{num}(U,P,Q) $ in the discrete search space of all $m!$ permutations. Given the massive size of the search space, use of a heuristic optimisation method is practically necessary. Simulated annealing is one such method for finding a minimum in a discrete search space. In the OptQC program, we adopt a threshold acceptance based simulated annealing algorithm. There are three key components to the algorithm:
\begin{enumerate}
\item Cost function: the function to be minimised, i.e. $ c_{num}(U,P,Q) $.\\
\item Neighbourhood operator: the procedure that alters the current solution slightly by altering the current permutation $p$ to a slightly different permutation $p'$. Our neighbourhood operator acts to interchange any two \emph{random} positions in $p$ to form $p'$. \\
\item Threshold value: any 'bad' trades (increase in cost function) that are below some threshold value $\beta$ are accepted, otherwise they are rejected. We define the threshold value as $\beta(P,Q) = \mbox{min}\left(\lceil \alpha c_{num}(U,P,Q) \rceil,\lceil \alpha c_{num}(U,I,Q) \rceil\right)$, where $ 0 \leq \alpha < 1 $. As such, the threshold value is taken to be the proportion $\alpha$ of the current number of gates (with a fixed maximum value of the proportion $\alpha$ of the initial number of gates to ensure that $\beta(P,Q)$ cannot grow arbitrarily large).
\end{enumerate}
We start with $p$ as the identity permutation. By iterating the neighbourhood operator and evaluating the subsequent change in the number of gates, we accept the change in the permutation if it reduces the number of gates, or if the increase in the number of gates is below the threshold $\beta$. After some prescribed number of iterations $i_{max}$, we terminate the simulated annealing procedure, returning the permutation $p_{min}$ that provides the minimum number of gates. Figure \ref{fig:safchart} shows a flowchart overview of the simulated annealing procedure. Note that $p_{min}$ is not necessarily the permutation $p$ at the end of $i_{max}$ iterations - rather, we keep track of $p_{min}$ separately during the procedure.
\begin{figure}
\caption{Flowchart overview of the simulated annealing procedure.}
\label{fig:safchart}
\end{figure}
\subsection{Gate reduction procedure}
Here, we focus on reducing the number of gates in some prescribed quantum circuit by combining 'similar' gates. In a quantum circuit, we can combine CUGs (controlled unitary gates) that apply the same unitary operation $U_{op}$ to the same qubit, with all but one of the conditionals of the CUGs being the same. This reduction process is carried out after every application of the CSD method to a matrix - in particular, it is applied three times (to $PQUQ^TP^T$, $P$ and $P^T$ respectively) when computing $c_{num}(U,P,Q)$ (see Eq.~(\ref{eqn:costf})). While it does impose a significant computational overhead, it gives a better reflection of the true cost function, since the reduced circuit is the circuit that one would use for implementation. Figure \ref{fig:red} shows an example result of applying the reduction procedure to a quantum circuit.
\begin{figure}
\caption{Example of applying the reduction procedure to a quantum circuit.}
\label{fig:bred}
\label{fig:rred}
\label{fig:red}
\end{figure}
\subsection{MPI parallelisation}
\label{sec:mpiparallel}
The program described above can be readily extended to a parallel architecture using MPI. Since the neighbourhood operator in the simulated annealing procedure acts to interchange any two random positions, it follows that if the random number generator is seeded differently, then a different set of positions would be interchanged, i.e. a different search through the space of permutations would be conducted. Similarly, the qubit permutation that is generated would also change when seeded differently, which enables the program to start threads at multiple locations in the search space of $m!$ permutations, so that the search procedure explores as much of the permutation space as possible. We do, however, restrict the root thread (thread index 0) of the program to use the identity qubit permutation for comparison purposes. Hence, using MPI, we can spawn a team of threads that simultaneously searches through the space of permutations independently and differently (by seeding the random number generator of each thread differently), and then collate the results to pick out the thread with the most optimal permutation, that is, it has the lowest $c_{num}(U,P,Q)$ value.
\section{Results}
\label{sec:res}
We now apply the software program \emph{OptQC} to various unitary operations to obtain corresponding optimised quantum circuits. All the results shown here are obtained using parameters $i_{max} = 40000$, $j_{max} = 1000$ and $\alpha = 0.01$ (we choose this $\alpha$ value because it provides, on average, the best results for the unitary operators being considered in this paper. Using these parameters, we run \emph{OptQC} on the supercomputer Fornax with Intel Xeon X5650 CPUs, managed by iVEC@UWA, using 8 nodes with 12 cores on each (i.e. 96 threads).
\subsection{Real unitary matrix}
A random real unitary (i.e. orthogonal) matrix is given below:
\begin{equation*}
U =
\left(
\begin{array}{cccccccc}
0.0438 & 0 & 0 & 0 & 0.9990 & 0 & 0 & 0 \\
0.1297 & 0.8689 & -0.2956 & 0 & -0.0057 & 0.1538 & -0.3423 & 0 \\
-0.2923 & 0 & 0.6661 & 0 & 0.0128 & 0 & -0.6861 & 0 \\
-0.0061 & -0.0412 & 0.0140 & 0.7058 & 0.0003 & 0.3008 & 0.0162 & -0.6397 \\
0.9147 & 0 & 0.4021 & 0 & -0.0401 & 0 & 0 & 0 \\
0.0185 & 0.1242 & -0.0422 & 0.3961 & -0.0008 & -0.9073 & -0.0489 & 0 \\
0.2424 & -0.4762 & -0.5524 & 0 & -0.0106 & 0 & -0.6397 & 0 \\
0.0051 & 0.0343 & -0.0117 & -0.5874 & -0.0002 & -0.2503 & -0.0135 & -0.7686 \\
\end{array}
\right)
\end{equation*}
Note that this matrix is not completely filled, otherwise no reduction via permutations would generally be possible. By using \emph{OptQC}, the reduction process gives the following results for the thread which achieves the optimal solution:
\begin{itemize}
\itemsep0em
\item No optimisation: $c_{num}(U,I,I) = 29$ gates
\item After selection of an optimised qubit permutation $q$: $c_{num}(U,I,Q) = 1+ 26 + 1 = 28$ gates
\item After simulated annealing process for the permutation $p$: $c_{num}(U,P,Q) = 1 + 2+ 16 + 2 + 1 = 22$ gates
\end{itemize}
Hence, we achieve a reduction of $\sim 25\%$ from the original number of gates. Figure \ref{fig:randrealcir} shows a comparison between the original and optimised circuit for $U$. Runtime for this calculation is $\sim 14.5$ seconds.
\begin{figure}
\caption{Result of quantum circuit optimisation as performed by \emph{OptQC}
\label{fig:randrealcir1}
\label{fig:randrealcir2}
\label{fig:randrealcir}
\end{figure}
\subsection{Quantum walk operators}
One important class of unitary operators are quantum walk operators - in particular, discrete-time quantum walk (DTQW) step operators \cite{kempe_quantum_2003, berry_two-particle_2011, loke_efficient_2012}. For a given undirected graph $ G(V,E) $, defined by a vertex set $V$ and edge set $E$, we can define the DTQW step operator $ U = SC $, where $S$ and $C$ are the shifting and coin operators respectively. The shifting operator acts to swap coin states that are connected by an edge, and the coin operator acts to mix the coin states at each individual vertex.
\subsubsection{8-star graph}
\begin{figure}
\caption{The 8-star graph}
\label{fig:S8graph}
\end{figure}
The 8-star graph (shown in Figure \ref{fig:S8graph}) is a graph with 1 centre vertex connected to 8 leaf vertices by undirected edges. Using the Grover coin operator, the resulting quantum walk operator on this graph corresponds to a 16-by-16 real unitary matrix, as given below:
\begin{equation*}
U =
\left(
\begin{array}{cccccccccccccccc}
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 & 0.25 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & 0.25 & -0.75 \\
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
\end{array}
\right)
\end{equation*}
By using \emph{OptQC}, the reduction process gives the following results for the thread which achieves the optimal solution:
\begin{itemize}
\itemsep0em
\item No optimisation: $c_{num}(U,I,I) = 34$ gates
\item After selection of an optimised qubit permutation: $c_{num}(U,I,Q) = 27$ gates
\item After simulated annealing process to select a permutation $p$: $c_{num}(U,P,Q) = 0 + 2 + 19 + 2 + 0 = 23$ gates
\end{itemize}
Hence, we achieve a reduction of $\sim 32\%$ from the original number of gates. Figure \ref{fig:randrealcir} shows the optimized circuit obtained for $U$. Runtime for this calculation is $\sim 47$ seconds.
\begin{figure}
\caption{Optimised circuit (with 23 gates) for the quantum walk operator of the 8-star graph.}
\label{fig:S8cir}
\end{figure}
\subsubsection{3\textsuperscript{rd} generation 3-Cayley tree}
The 3\textsuperscript{rd} generation 3-Cayley tree (abbreviated as the 3CT3 graph) is a tree of 3 levels in which all interior nodes have degree 3, as shown in Figure \ref{fig:3ct3graph}. The corresponding quantum walk operator using the Grover coin operator is shown in Figure \ref{fig:3ct3mat} - the quantum walk operator $U$ is a 42-by-42 real unitary matrix (which is fairly sparse), which, for the purposes of the decomposition, is expanded to a 64-by-64 unitary matrix as per Eq.~(\ref{eqn:Uexpand}).
\begin{figure}
\caption{The 3CT3 graph and its corresponding quantum walk operator using the Grover coin operator. The colours/shades in (b) denote the matrix entries for $-1/3$ (light grey), $2/3$ (dark grey) and $1$ (black) - all other matrix entries are $0$ (white).}
\label{fig:3ct3graph}
\label{fig:3ct3mat}
\label{fig:3ct3graphmat}
\end{figure}
By using \emph{OptQC}, the reduction process gives the following results for the thread which achieves the optimal solution:
\begin{itemize}
\itemsep0em
\item No optimisation: $c_{num}(U,I,I) = 996$ gates
\item After selection of an optimised qubit permutation: $c_{num}(U,I,Q) = 3+ 345 + 3 = 351$ gates
\item After simulated annealing process to select a permutation $p$: $c_{num}(U,P,Q) = 3 + 33 + 231 + 30 + 3 = 300$ gates
\end{itemize}
Hence, we achieve a reduction of $\sim 70\%$ from the original number of gates. Runtime for this calculation is $\sim 12$ minutes. Figure \ref{fig:3ct3history} shows the time-series for $c_{num}(U,P,Q)$ during both the qubit permutation selection phase and the simulated annealing process (separated by a dotted line) to achieve the above result.
\begin{figure}
\caption{Time-series of $c_{num}
\label{fig:3ct3history}
\end{figure}
\subsection{Quantum Fourier transform}
\label{sec:qftalg}
Quantum Fourier transform is the quantum counterpart of the discrete Fourier transform in classical computing. It is an essential ingredient in several well-known quantum algorithms, such as Shor's factorization algorithm \cite{shor_polynomial-time_1997} and the quantum phase estimation algorithm \cite{cleve_quantum_1998}. The matrix representation of the quantum Fourier transform on $n$ dimensions is given by:
\begin{equation}
(\mbox{QFT})_{jk} = \frac{1}{\sqrt{n}} \omega^{jk}, \quad \mbox{where } \omega = \mbox{exp}(2 \pi i / n).
\label{eqn:qftdef}
\end{equation}
An efficient quantum circuit implementation of the quantum Fourier transform is given in \cite{nielsen_quantum_2011}, which scales logarithmically as $O(\mbox{log}(n)^2)$. Such a circuit implementation for $n=2^6=64$ is shown in Figure \ref{fig:qft64org}.
\begin{figure}
\caption{Circuit implementation of quantum Fourier transform for $n=64$.}
\label{fig:qft64org}
\end{figure}
Now, let us apply \emph{OptQC} to the corresponding 64-by-64 complex unitary operator, given by Eq.~(\ref{eqn:qftdef}). With $\alpha = 0.002$, the reduction process gives the following results for the thread achieving an optimal solution:
\begin{itemize}
\itemsep0em
\item No optimisation: $c_{num}(U,I,I) = 4095$ gates
\item After selection of an optimised qubit permutation: $c_{num}(U,I,Q) = 5 + 3577 + 5 = 3587$ gates
\item After simulated annealing process to select a permutation $p$: $c_{num}(U,P,Q) = 5 + 69 + 3359 + 70 + 5 = 3508$ gates
\end{itemize}
Hence, we achieve a reduction of $\sim 14\%$ from the original number of gates. Runtime of this calculation is $\sim 20$ minutes. Figure \ref{fig:qft64history} shows the time-series for $c_{num}(U,P,Q)$ during both the qubit permutation selection phase and the simulated annealing process (separated by a dotted line) to achieve the above result. Clearly, this result is by far inferior to the quantum circuit of only 24 gates shown in Figure \ref{fig:qft64org}. Similarly, the \emph{OptQC} package would not be able to provide quantum circuits as efficient as those presented in \cite{douglas_efficient_2009, loke_efficient_2012} for the implementation of quantum walks on highly symmetric graphs. This is to be expected, since the CS decomposition is a general technique that decomposes a given unitary into a fixed circuit structure using many conditional gates, with an upper bound of O($4^n$). This algorithm is performed without foreknowledge or explicitly exploiting the structure of the unitary, which would clearly be crucial in achieving the lowest possible number of gates for a given unitary, as exemplified by the above examples. Instead, the \emph{OptQC} package is designed to work for any arbitrary unitary operator for which we do not already have an efficient quantum circuit implementation of, for example, quantum walk operators on arbitrarily complex graphs. In such cases, we have demonstrated that the \emph{OptQC} package provides optimised quantum circuits that are far more efficient than the original \emph{Qcompiler}.
\begin{figure}
\caption{Time-series of $c_{num}
\label{fig:qft64history}
\end{figure}
\section{Conclusion and future work}
\label{sec:cafw}
We have developed an optimised quantum compiler, named as \emph{OptQC}, that runs on a parallel architecture to minimise the number of gates in the resulting quantum circuit of a unitary matrix $U$. This is achieved by finding permutation matrices $Q$ and $P$ such that $U = Q^TP^TU'PQ$ requires less total number of gates to be implemented, where the implementation for each matrix is considered separately. Decompositions of unitary matrices is done using the CSD subroutines provided in the LAPACK library \cite{anderson_lapack_1999} and adapted from \emph{Qcompiler} \cite{chen_qcompiler:_2013}. \emph{OptQC} utilises an optimal selection of qubit permutations $Q$, a simulated annealing procedure to find $P$, and a combination of similar gates in order to reduce the total number of gates required as much as possible. We find that for many different types of unitary operators, \emph{OptQC} is able to reduce the number of gates required by a significant amount, but its efficacy does vary depending on the unitary matrix given. In particular, this optimisation procedure works well for sparse unitary matrices.
For future work, we hope to look at characterising the optimal solutions reached to see if the matrix $U'$ (and the associated permutation $P$) have some common preferential structure that leads to a reduced cost of implementation using the CSD method. Such information could be used to implement a guided search for the optimal solution, rather than using random adjustments of the permutation matrix. We also want to characterise `bad' permutations (that is, permutations with a large cost) and avoid them in the search procedure, perhaps by eliminating the conjugacy class of `bad' permutations from the search space.
\end{document} |
\begin{document}
\markboth{G.Marmo, A.Simoni and F.Ventriglia} {Quantum Systems and
Alternative Unitary Descriptions}
\catchline{}{}{}{}{}
\title{QUANTUM SYSTEMS AND \\ ALTERNATIVE UNITARY DESCRIPTIONS }
\author{\footnotesize G. MARMO}
\address{Dip. Scienze Fisiche and INFN Sez. di Napoli, Universit\`a Federico II ,
Compl. Univ. Monte S.Angelo\\
Napoli, 80126, Italy \footnote{marmo@na.infn.it} }
\author{A. SIMONI}
\address{Dip. Scienze Fisiche and INFN Sez. di Napoli, Universit\`a Federico II ,
Compl. Univ. Monte S.Angelo\\
Napoli, 80126, Italy \footnote{simoni@na.infn.it}}
\author{F. VENTRIGLIA}
\address{Dip. Scienze Fisiche and INFM Unit\`a di Napoli, Universit\`a
Federico II , Compl. Univ. Monte S.Angelo\\
Napoli, 80126, Italy \footnote{ventriglia@na.infn.it} }
\maketitle
\pub{Received (Day Month Year)}{Revised (Day Month Year)}
\begin{abstract}
Motivated by the existence of bi-Hamiltonian classical systems and
the correspondence principle, in this paper we analyze the problem
of finding
Hermitian scalar products which turn a given flow on
a Hilbert space into a unitary one. We show how different
invariant Hermitian scalar products give rise to different
descriptions of a quantum system in the Ehrenfest and
Heisenberg picture.
\keywords{Quantum systems; completely integrable systems;
symplectic dynamics.}
\end{abstract}
\section{\protect
Introduction}
Classical bi-Hamiltonian systems have played a relevant role in
the past decades for the study of completely integrable systems,
both for finite and infinite number of degrees of freedom (see
Ref. 1 and Ref. 2 for quantum systems).
Quantum systems admitting alternative commutation relations have
been considered many times since the pioneering paper of E.P.
Wigner\cite{Wigner}, see also Ref. 4, 5, 6.
Quantum systems described by non Hermitian operators and
possessing a real spectrum have been analyzed by several authors
(e.g. Ref. 7, 8).
In a recent paper\cite {Morandi} we have shown that these
situations may be better tackled within the framework of the
''inverse problem for quantum systems''. From a mathematical point
of view a similar problem was first discussed by Nagy\cite{Nagy}
long ago.
To clearly formulate this problem let us briefly recall first the
symplectic inverse problem for classical linear systems. Starting
with a vector field
\begin{equation}
\Gamma =\Gamma ^{i}\frac{\partial }{\partial \xi ^{i}}\ \ \ ,
\end{equation}
one searches far all possible Hamiltonian descriptions in terms of
symplectic structures
\begin{equation}
\omega =\omega _{jk}d\xi ^{j}\wedge d\xi ^{k}\ \ \ \ \ \ ,
\end{equation}
by solving for the equation $L_{\Gamma }\omega =0$ . Every
symplectic structure admits an ''inverse'', it is a bivector
field, usually called a Poisson tensor, defined by $\Lambda
^{ik}\omega _{kj}=\delta _{j}^{i}.$
Thus, the inverse problem amounts to search for all decompositions of $
\Gamma $ as the product
\begin{equation}
\Gamma ^{i}=\Lambda ^{ik}\frac{\partial H}{\partial \xi ^{k}}\ \ \ \ .
\end{equation}
Where $\Lambda ^{ik}$ is a skewsymmetric real, point dependent, matrix. If
we deal with linear vector fields and quadratic Hamiltonians, say
\begin{equation}
\Gamma ^{i}=A_{k}^{i}\xi ^{k}\ \ \ ,\ \ \ \ H=\frac{1}{2}\xi ^{k}H_{kj}\xi
^{j}\ \ ,
\end{equation}
the inverse problem becomes a problem of linear algebra, i.e.
searching for all decompositions of the dynamical matrix $A$ into
the product of a non degenerate skew-symmetric matrix $\Lambda $
and a symmetric matrix $H$, in compact form $A=\Lambda \cdot H.$ \
\ When $\Lambda $ is not required to be non-degenerate, we are
dealing with the ''inverse problem for Poisson dynamic''.\cite{??}
We should remark \ that this problem is more interesting when we
are considering non-linear situations and non constant rank of
$\Lambda $, otherwise, by quotienting with respect to the kernel
of $\Lambda ,$ we may go back to the symplectic situation.
Thus all possible decompositions of $A$, in the stated form, provide us with
alternative Hamiltonian descriptions. In many physical situations, when
dealing with completely integrable systems, we are interested in the
existence of alternative decompositions once one has been already given,
i.e. we search for alternative Hamiltonian descriptions for a given
Hamiltonian system $\Gamma $. The alternative descriptions would
characterize $\Gamma $ as a multi-Hamiltonian vector field. It is almost
obvious that many alternative Hamiltonian descriptions will be generated by
symmetries for $\Gamma $ which are not canonical transformations for $
\Lambda $. However this way of generating new Hamiltonian
descriptions will not exhaust the class of alternative ones. For
instance\cite{Ventriglia} the two dimensional isotropic Harmonic
Oscillator has different decompositions with either $H$ definite
positive or with signature (+,+,-,-), which arise from the
following Hamiltonian descriptions:
\begin{equation}
\begin{array}{l}
H_{0}=\frac{1}{2}(p_{1}^{2}+q_{1}^{2}+p_{2}^{2}+q_{2}^{2}) \\
\omega _{0}=dp_{1}\wedge dq_{1}+dp_{2}\wedge dq_{2}
\end{array}
\end{equation}
and
\begin{equation}
\begin{array}{l}
H=\frac{1}{2}(p_{1}^{2}+q_{1}^{2}-p_{2}^{2}-q_{2}^{2}) \\
\omega =dp_{1}\wedge dq_{1}-dp_{2}\wedge dq_{2}\ .
\end{array}
\label{4}
\end{equation}
These factorizations cannot be related by any similarity
transformation. In this setting, it has been shown\cite{Giordano}
that when the critical point of the linear vector field $\Gamma $
is stable in the Liapunov sense, among the alternative Hamiltonian
descriptions there is one which is positive definite.
The formulation of the inverse problem for quantum systems is now quite
natural: given a vector field $\Gamma $ on some vector space $V$, we search
for all Hermitian structures $h$ which are solution of the equation $
L_{\Gamma }$ $h=0$ .
In our previous papers Ref. 7, 17, to avoid technicalities, we
have carried on our study of the inverse problem within the
framework of finite dimensional Hilbert spaces; in this paper we
would like to consider the problem in the more relevant case of
infinite dimensional Hilbert space.
This paper is organized as follows: in Sections 2, 3, 4, 5 the
problem is specified, mathematical results are reviewed, some
applications are given and the dependence of results on the
fiducial metric $\ h_{0}$ \ is studied. In Section 6 we give some
results on the existence of alternative invariant Hermitian scalar
products; in Section 7 a way for finding invariant scalar products
for Abelian groups is considered; in Section 8 we deal with the
more general situation of related operators instead of similar
ones; in Section 9 we apply our considerations to the Heisenberg
group. Finally in Section 10 we describe quantum systems as
Hamiltonian systems and discuss the consequences of the existence
of alternative invariant scalar products, in Section 11 we draw
some conclusions. Results which are available in the literature
are referred here as Theorems, while our considerations are recast
in Propositions and Corollaries.
\section{The Inverse Problem for Quantum Systems}
We consider a linear dynamical system $\Gamma $ on a complex vector space of
states $\mathbb{H}$ carrying a Hilbert space structure $h_{0}$ \ (we define
all scalar products to be linear in their second factor). Then $\Gamma $
\begin{equation}
\Gamma :\mathbb{H}\rightarrow T\mathbb{H\ =}\ \mathbb{H\times H\ \ },\mathbb{
\ \ }\Gamma (\Psi )=\mathbb{(}\Psi ,-\frac{i}{\hbar }H\Psi )
\end{equation}
determines the differential equation
\begin{equation}
\frac{d}{dt}\psi =-\frac{i}{\hbar }H\psi \ \ , \label{Scroe}
\end{equation}
where $H$ is a linear operator on $\mathbb{H}$ ,\ but we do not require any
Hermiticity properties with respect to $h_{0}$. Hereafter we put $\hbar =1.$
The inverse problem consists of finding which conditions have to be
satisfied by $\Gamma $ for the existence of Hermitian scalar products
invariant under the time evolution associated with $\Gamma $ .
In finite dimensions, i.e. for a complex linear vector field on $\mathbb{C}
^{n}$ , we have\cite{Ventriglia} the following Proposition:
\begin{proposition} A complex linear vector field $\Gamma (\Psi )=\mathbb{(}\Psi ,-
iH\Psi )$ generates a flow $\phi _{t}:$ $\mathbb{C}
^{n}\rightarrow \mathbb{C}^{n}$ preserving some Hermitian scalar product $h$
, i.e. $\phi _{t}^{\ast }h=h$, iff any one of the following
equivalent conditions is satisfied:
1) $H=H^{\dagger },$ where the adjoint is taken with respect to
the scalar product defined by $h$ , i.e. $L_{\Gamma }h=0$;
2) $H$ is diagonalizable and has a real spectrum;
3) all the orbits $e^{-iHt}\Psi $ are bounded sets, for any
initial condition $\Psi $.
\end{proposition}
\textbf{Remark. } Sometimes, properties stated in 2) are derived
by requiring that the Hamiltonian is $\mathcal{PT}-$symmetric (see
for instance Ref. 12, 13).
The proof of this Proposition relies on the existence of a
decomposition of $H$ into a nilpotent part and a semisimple part,
commuting among themselves. Once we exponentiate this
decomposition, the boundedness condition rules out the nilpotent
part and requires that the semisimple part of $-iH$ has only
imaginary eigenvalues. When going to infinite dimensions one may
try to use a similar procedure, however the corresponding
separation of $-iH$ holds true only for a special class of
operators.\cite{Dunford} Therefore we have to use a different
approach.
When dealing with infinite dimensions, it is clear that, according
to Weyl,\cite{Weyl} condition 3) will play a more convenient role
because it is stated in terms of (what is going to become) the
bounded operators $e^{-iHt}$ instead of the infinitesimal
generator $-iH$ which, in the most physical situations, turns out
to be unbounded and therefore would raise domain problems which
make statements more cumbersome.
Therefore, within Weyl's ideology, it is better to deal with
finite transformations (i.e. automorphisms of the state space)
rather than infinitesimal transformations (i.e. endomorphisms,
which, in general, create domain problems). This step, when
starting with Eq. (\ref{Scroe}), may be achieved by using the
Cayley map, i.e. by replacing $H$ with
\begin{equation}
T=(H-i\mathbb{I)}(H+i\mathbb{I)}^{-1}\ . \label{Cayley}
\end{equation}
For a recent, authoritative analysis of this map see
Kostant-Michor.\cite {Michor} In this way we search for scalar
products turning $T$ into a unitary operator and accordingly $H$
into a self-adjoint operator with a unitary flow $e^{-iHt}$.
Of course we could also decide to formulate the inverse problem in quantum
mechanics directly in terms of one-parameter groups of automorphisms for the
state space and to seek for all Hermitian products which turn the
one-parameter group of transformations into a unitary one-parameter group.
We shall therefore start with automorphisms of the state space instead of
endomorphisms. To set the stage, we consider the vector space of states to
be a Hilbert space, i.e. topology, completeness and bases are defined with
respect to a chosen fiducial scalar product $h_{0}$. However this Hermitian
structure need not be invariant under the transformations we are dealing
with.
In the next Section we review few relevant results scattered in
the existing literature, they were motivated by the search for
stability criteria for infinite dimensional systems\cite{Krein}
(compare the paragraph after formula Eq. (\ref{4}) of the
Introduction).
\section{Uniformly Bounded Operators}
Inspired by condition 3) of the above Proposition 1 we may
consider an automorphism $T$ \ of a Hilbert space $\mathbb{H}$
with a Hermitian scalar product $h_{0}$ and construct the orbits
\begin{equation}
\left\{ T^{k}\Psi \right\} \ \ \ \ ;\ \ \ k\in \{0.\pm 1,\pm 2,\dots \}
\end{equation}
and require that all of them, with respect to the norm induced by
$h_{0}$ , are bounded sets for any value of $\Psi $. The use of
the principle of uniform boundedness\cite{Simon} shows that this
is equivalent to require that $T$ is uniformly bounded.
We recall that the automorphism $T$ on $\mathbb{H}$ is said to be uniformly
bounded if there exists an upper bound $c<\infty $ such that
\begin{equation}
||T^{k}||<c\ \ ;\ \ \ k\in \{0.\pm 1,\pm 2,\dots \}\ \ . \label{unifbound}
\end{equation}
Condition (\ref{unifbound}) is called Nagy criterion. For such an operator $
T $ the following Theorem\cite{Nagy} holds:
\begin{theorem} For a uniformly bounded operator $T$ \ there
exists a bounded positive selfadjoint transformation $Q$ such that
\begin{equation}
\frac{1}{c}\mathbb{I}\leq Q\leq c\mathbb{I}
\end{equation}
and $QTQ^{-1}=U$ \ is unitary with respect to the fiducial $h_{0}$. This
implies that $T=$ $Q^{-1}UQ$ is unitary with respect to
\begin{equation}
h_{T}(X,Y):=h_{0}(Q^{2}X,Y)\ .
\end{equation}
\end{theorem}
\begin{proof}(Sketch) The essential idea of the proof is to
define the invariant scalar product $h_{T}(X,Y)$ as the limit, for
$n$ going to infinity, of $\ h_{0}(T^{n}X,T^{n}Y)=:h_{n}(X,Y).$
This is the limit of a bounded sequence of complex numbers which
does not exist in general, at least in the usual sense. Therefore
a generalized concept of limit for bounded sequence, introduced by
Banach and Mazur\cite{Banach}, has to be used. This generalized
limit (denoted as $Lim$) amounts to define the invariant scalar
product $h_{T}$ as the transformed scalar product $h_{n}$
''at infinity'' , where $T$ is interpreted as the generator of a $\mathbb{Z}
- $action on $\mathbb{H}$.
\end{proof}
It is possible\cite{Nagy} to use the same approach to deal with an $\mathbb{
R}-$action instead of the $\mathbb{Z}-$action so that:
\begin{theorem} When the one-parameter group of automorphisms
$T(s)$ of linear transformations is uniformly bounded, that is
$||T(s)||<c ,\ \ s\in (-\infty ,\infty )\ ,$ there exists a
bounded selfadjoint transformation $Q$ such that
$QT(s)Q^{-1}=U(s)$ is a one-parameter group of \ unitary
transformations. Clearly continuity properties with respect to $s$
are the same for both $T(s)$ and $U(s)$.
\end{theorem}
\textbf{Remark.}The iterated application of the uniformly bounded operator $
T $ may be viewed as a discrete time evolution, which, according
to Theorem 2, can be made unitary. Here we note that such a
discrete time evolution may be fitted within a continuous
differentiable time evolution. We have the following
\begin{proposition}If $T$ is a uniformly bounded operator then there
exists a
bounded operator $A$, selfadjoint with respect to the invariant product $
h_{T},$ such that $T^{n}=e^{iAn}$ for any $n\in \mathbb{Z}$.
\end{proposition}
\begin{proof} From Theorem 2 we know that $T$ is $h_{T}-$unitary
so that it can be written as
\begin{equation}
T=\int\limits_{0}^{2\pi }e^{i\lambda }dE_{\lambda }^{T}\ \ \ ,
\end{equation}
where $E_{\lambda }^{T}$ is the uniquely defined spectral family
for $T$ with the property $E_{0}^{T}=0$ and $E_{2\pi
}^{T}=\mathbb{I}$.
Now define $A=\int\limits_{0}^{2\pi }\lambda dE_{\lambda }^{T}$ \ , so that $
T^{n}=e^{iAn}$ . $\ $The operator $A$ is of course
$h_{T}-$selfadjoint, bounded and defined on the entire Hilbert
space.
\end{proof}
Of course, the one-dimensional unitary group $e^{iAt}$ is a
continuous one-parameter group containing the discrete subgroup
$\left\{ T^{n}\right\} $ and moreover all orbits $e^{iAt}\Psi $ ,
for any $\Psi \in \mathbb{H}$ , are differentiable in $t$ and
solve\ the Schroedinger equation for $A.$ It is possible to go
from one-parameter groups of transformations to
finitely-many-parameters groups of transformations if they define
an Abelian group. The following Theorem (see for instance Ref. 20)
holds:
\begin{theorem} A uniformly bounded action on $\mathbb{H}$ of
an Abelian group $\mathcal{G}$ , i.e.
\begin{equation}
||G||<c\ \ \ \ ,\ \ \ \ \forall \ G\in \mathcal{G}
\end{equation}
can be turned into a unitary action with the help of a bounded selfadjoint
transformation $Q,$ i.e. $QGQ^{-1}$ are unitary transformations for any $\
G\in \mathcal{G}$.
\end{theorem}
The proof of this Theorem cannot use the idea of the generalized
Banach limit, like in the previous more restricted cases, because
no assumptions are made on the structure and the topology of
$\mathcal{G}$ . In fact, the proof relies on a very general result
on the existence of fixed points for any continuous transformation
of convex sets (Ref. 21, 22, see also Ref. 23).
For practical purposes, when Nagy condition is not easy to check,
some equivalent conditions may be used (see Ref. 24, 25). It has
been shown that Nagy condition Eq. (\ref{unifbound}) is equivalent
to
\begin{equation}
\begin{array}{l}
\sup\limits_{|\lambda |>1}(|\lambda |^{2}-1)\int\limits_{0}^{2\pi
}||(T-\lambda )^{-1}u||^{2}d\theta \leq C||u||^{2} \\
\sup\limits_{|\lambda |>1}(1-|\lambda |^{-2})\int\limits_{0}^{2\pi
}||(T^{\dagger }-\lambda ^{-1})^{-1}u||^{2}d\theta \leq C||u||^{2}
\end{array}
\end{equation}
where $\theta = \arg \lambda$. By using the Cayley transform a
condition for the similarity of an operator $L$ to a selfadjoint
one is recovered as:
\begin{equation}
\begin{array}{l}
\sup\limits_{\varepsilon >0}\varepsilon \int\limits_{R}^{{}}||(L-\lambda
)^{-1}u||^{2}dk\leq C||u||^{2} \\
\sup\limits_{\varepsilon >0}\varepsilon \int\limits_{R}^{{}}||(L^{\dagger
}-\lambda )^{-1}u||^{2}dk\leq C||u||^{2}
\end{array}
\end{equation}
where $\lambda =k+i\varepsilon $.
In Theorems 2, 3, 5 the commutativity hypothesis is
crucial.\cite{Pisner} In the non commutative case one needs
necessarily some other assumptions, for instance that
$\mathcal{G}$ is a (representation of a) compact group. Then the
existence of the invariant Haar measure guarantees that
$\mathcal{G}$ is similar to a unitary representation.
In Section 9, we discuss an application of Theorems 2 and 3 to the
Heisenberg group, a very special case of non-commutativity.
In the following we will be concerned mainly with Theorems 2 and
3.
\section{Applications}
All the examples that follows are applications of Theorem 2 and
refer to operators which are not \emph{normal}. This is so because
of the following corollary to Nagy's Theorem 2:
\begin{corollary} A normal operator $T$ either is already unitary
or it is not similar to any unitary operator. Equivalently: A
normal operator $T$ is unitary if and only if it satisfies the
Nagy condition.
\end{corollary}
\begin{proof} \ When $T$ is normal the operators appearing in its
polar decomposition, $T=V|T|,$ commute, so that $|T|$ satisfies
Nagy's condition Eq. (\ref{unifbound}) together with $T$. Then
$|T|$ is both similar to a unitary operator and positive
selfadjoint. This implies that the spectrum of $|T|$ reduces to 1
, so that $|T|=\mathbb{I}$ and $T=V$.
\end{proof}
\textbf{Example 1)} As a simple example consider the group of
translation on the line realized on $L_{2}(\mathbb{R})$ with a
measure which is not translationally invariant, i.e.
\begin{equation}
(T_{t}\Psi )(x):=\Psi (x+t) \ ,\ \Psi\in L_{2}(\mathbb{R},\rho
(x)dx), \end{equation}
where $\rho(x)$ is any function $0<\alpha <\rho (x)<\beta <\infty $ and denote by $
h_{\rho }$ the corresponding scalar product. If the limit $
\lim\limits_{x\rightarrow -\infty }\rho (x)$ exists , say $
\lim\limits_{x\rightarrow -\infty }\rho (x)=a,$ then it is trivial to
compute the Banach limit because it agrees with a limit in the usual sense.
In fact by Lebesgue Theorem we have:
\begin{equation}
\begin{array}{l}
\lim\limits_{t\rightarrow \infty }\int\limits_{\mathbb{R}}\Psi ^{\ast
}(x+t)\Phi (x+t)\rho (x)dx=\lim\limits_{t\rightarrow \infty }\int\limits_{
\mathbb{R}}\Psi ^{\ast }(x)\Phi (x)\rho (x-t)dx= \\
\int\limits_{\mathbb{R}}\lim\limits_{t\rightarrow \infty }\Psi
^{\ast }(x)\Phi (x)\rho (x-t)dx=a\int\limits_{\mathbb{R}}\Psi
^{\ast }(x)\Phi (x)dx=ah_{0}(\Psi ,\Phi ).
\end{array}
\end{equation}
This shows that the Banach limit gives $h_{T}(\Psi ,\Phi )=ah_{0}(\Psi ,\Phi
),$ i.e. \ it is a multiple of the standard translation invariant scalar
product. Therefore
\begin{equation}
h_{T}(\Psi ,\Phi )=h_{\rho }(Q^{2}\Psi ,\Phi )=h_{\rho }((\sqrt{\frac{a}{
\rho }})^{2}\Psi ,\Phi )\ \ \ ,
\end{equation}
that is $Q=\sqrt{\frac{a}{\rho }}$ and
\begin{equation}
(U_{t}\Phi )(x)=(QT_{t}Q^{-1}\Phi )(x)=\sqrt{\frac{\rho (x+t)}{\rho (x)}}
\Phi (x+t)
\end{equation}
is unitary in $L_{2}(\mathbb{R},\rho (x)dx).$
\textbf{Example 2)} The following example deals with a non-diagonalizable uniformly bounded
operator $T$ defined on $L_{2}(\mathbb{R
},dx)$ as
\begin{equation}
(T\Psi )(x):=f(x)\Psi (-x)
\end{equation}
where $f(x)$ is a bounded function:
\begin{equation}
0<\alpha \leq |f(x)|\leq \beta <\infty \;\;.
\end{equation}
In other words $T$ is the product of the parity operator $P$ times
the multiplicative bounded operator $f$. Note that $T$ is
non-normal as
\begin{equation}
fPPf^{\ast }-Pf^{\ast }fP=|f(x)|^{2}-|f(-x)|^{2}
\end{equation}
which is not zero for a generic $f$. Moreover this function $f$
has to be chosen in such a way that $T$ satisfies Nagy's
condition. For this, taking into account that:
for $n>0$
\begin{equation}
(T^{n}\Psi )(x)=\left( \prod\limits_{k=1}^{n}f((-1)^{k+1}x)\right) \Psi
((-1)^{n}x)\ \ \ \ \ \ n>0
\end{equation}
while for $n<0$
\begin{equation}
(T^{n}\Psi )(x)=\left( \prod\limits_{k=1}^{-n}f^{-1}((-1)^{k}x)\right) \Psi
((-1)^{n}x)\ \ \ \ \ \ n<0
\end{equation}
the condition $||T^{n}||<K$ implies the functional relation $|f(x)f(-x)|=1$
, which admits the general solution :
\begin{equation}
f(x)=\frac{\mu (x)}{\mu (-x)}e^{i\varphi (x)}
\end{equation}
where $\mu (x)$ is a real function such that: $0<\mu _{1}\leq \mu (x)\leq
\mu _{2}<\infty .$
The Banach limit is readily evaluated and results
\begin{equation}
\begin{array}{l}
h_{T}(\Phi ,\Psi )=Lim_{n\rightarrow \infty }h_{0}(T^{n}\Phi ,T^{n}\Psi )=
\\
=\frac{1}{2}\int \Phi ^{\ast }(x)\Psi (x)(1+\frac{\mu ^{2}(-x)}{\mu ^{2}(x)}
)dx=h_{0}(Q^2\Phi ,\Psi )
\end{array}
\end{equation}
where $Q^2=\frac{1}{2}(1+\frac{\mu ^{2}(-x)}{\mu ^{2}(x)})$ is a
bounded positive operator, as expected. The operator $U_{T}$,
similar to $T$, which is unitary with respect to the standard
scalar product $h_{0}$ is
\begin{equation}
U_{T}=QTQ^{-1}=e^{i\varphi (x)}P\;\;.
\end{equation}
$U_{T}$ has only continuous spectrum given by
\begin{equation}
\lambda _{\pm }=\pm e^{\frac{i}{2}(\varphi (x_{0})+\varphi
(-x_{0}))}\;\;;\;\;x_{0}\in \mathbb{R}
\end{equation}
with corresponding generalized eigenfunctions $\Psi _{\pm }(x)$ :
\begin{equation}
\Psi _{\pm }(x)=e^{\frac{i}{2}\varphi (x_{0})}\delta (x-x_{0})\pm e^{\frac{i
}{2}\varphi (-x_{0})}\delta (x+x_{0})\;\;.
\end{equation}
\textbf{Example 3)} The following example is similar to the
previous one but parity is now replaced by a translation of a
fixed amount $a$:
\begin{equation}
(T_{a}\Psi )(x):=f(x)\Psi (x+a)
\end{equation}
where $f(x)$ is a bounded function
\begin{equation}
0<\alpha \leq |f(x)|\leq \beta <\infty \ \ \ \ .
\end{equation}
Imposing $||T^{n}||<K$ one gets that $f(x)=g(x)e^{i\phi (x)}$ with
$g(x)$ real and positive such that $g(x+a)=g(x)^{-1}$ and $\phi
(x)$ arbitrary and
real. Then as before one gets $Q^2 =\frac{1}{2}(1+g^{2}(x))$ . The spectrum of $
T_{a}$ is continuous, indeed the equation:
\begin{equation}
(T_{a}\Psi )(x)=g(x)e^{i\phi (x)}\Psi (x+a)=\mu \Psi (x)
\end{equation}
can be solved in the form \ $\Psi (x)=\sqrt{g(x)}e^{i(\lambda x+\chi (x))}$
; then
\begin{equation}
(T_{a}\Psi )(x)=T_{a}\sqrt{g(x)}e^{i(\lambda x+\chi (x))}=\sqrt{g(x)}
e^{i(\lambda x+\chi (x+a)+\phi (x))}e^{i\lambda a}\;\;.
\end{equation}
Therefore $\chi (x)$ must fulfill the functional relation $\chi (x+a)=\chi
(x)+\phi (x)$ ; this relation determines $\chi $ on the entire line once it
is arbitrarily chosen on $[0,a]$ . The continuous spectrum is then the
entire circle $\mu =e^{i\lambda a}$ .
\section{Dependence of the Invariant Metric on the Choice of the Initial One}
In this Section we analyze to what extent the invariant metric
$h_{T}$ changes by a change of the starting fiducial metric
$h_{0}$ to a
topologically equivalent one $h_{0}^{\prime }$. Any change of $h_{0}$ to $
h_{0}^{\prime }$ is parameterized by a positive definite Hermitian operator $
C$ by the relation
\begin{equation}
h_{0}(x,y)=h_{0}^{\prime }(Cx,y)
\end{equation}
and we get by Banach limits two invariant scalar products $h_{T}(x,y)$ and $
h_{T}^{\prime }(x,y)$, which are related by a similar relation
\begin{equation}
h_{T}(x,y)=h_{T}^{\prime }(Rx,y)\ .
\end{equation}
\begin{proposition}Consider the above $C$ and $R$ \ and define $A$ \
in the following way:
\begin{equation}
Lim_{n\rightarrow \infty }\ h_{T}^{\prime
}(A_{n}x,T^{n}y)=:F(x,y)=h_{T}^{\prime }(Ax,y)
\end{equation}
where
\begin{equation}
A_{n}=[C,T^{n}]\ \ .
\end{equation}
Then $R=C+A$ and $[A,T]=-[C,T]$.
\end{proposition}
\begin{proof} From the definition it is trivial to show that
$\{A_{n}\}$ is a set of uniformly bounded operators; therefore it
makes sense to compute
the bilinear functional $F(x,y)$ and the operator $A$ \ such that $
F(x,y)=h_{T}^{\prime }(Ax,y)$ is well defined via Riesz Theorem.
Then it requires only algebraic manipulations to show both of the
following results: $R=C+A$ \ and $[A,T]=-[C,T]$.
\end{proof}
This shows also that $[R,T]=0$, as it should, because any operator
connecting two $T-$invariant Hermitian scalar product necessarily
commutes with $T$. We will see this in the next Section.
\section{Alternative Invariant Hermitian Structures}
Starting with the automorphism $T$, we may investigate for the
existence of alternative Hermitian structures invariant under the
$\mathbb{Z}-$group action generated by $T.$ Such a $T$ could than
be said to be a bi-unitary map.
Assume therefore that $T$ is uniformly bounded. For the time being
we assume in addition that $T$ is diagonalizable and multiplicity
free (i.e. the commutant of $T$ is Abelian\cite{Simon}). Choose
then a bounded transformation $S$ with bounded inverse such that
\begin{equation}
T=S^{-1}US\ \ .
\end{equation}
where $U$ is unitary. Theorem 2 guarantees that at least one such
$S$ exists. Then
\begin{equation}
h^{S}(x,y):=h_{0}(S^{\dagger }Sx,y)
\end{equation}
turns $T$ into a unitary operator. Because of the uniformly boundedness of $
T $ , we may also use the Banach limit
\begin{equation}
Lim_{n\rightarrow \infty }h_{0}\left( T^{n}x,T^{n}y\right) =:h_{T}(x,y)
\end{equation}
to define a new invariant Hermitian structure.
By using an $h_{0}-$orthonormal basis $\left\{ \varphi
_{k}\right\} $ of eigenvectors of $U$
\begin{equation}
U\varphi _{k}=e^{i\lambda _{k}}\varphi _{k}\ \ \ ;\ \ h_{0}(\varphi
_{k},\varphi _{j})=\delta _{k,j}
\end{equation}
we find by explicit computation of the Banach limit:
\begin{equation}
h_{T}(x,y)=\sum\limits_{k}h_{0}(Sx,\varphi _{k})h_{0}(\varphi
_{k},Sy)||S^{-1}\varphi _{k}||^{2} , \label{blim}
\end{equation}
while:
\begin{equation}
h^{S}(x,y)=\sum\limits_{k}h_{0}(Sx,\varphi _{k})h_{0}(\varphi
_{k},Sy).
\end{equation}
We see that the invariant Banach scalar product $h_{T}$ is obtained from $
h^{S}$ scaling each $\varphi _{k}$ by a factor $||S^{-1}\varphi
_{k}||.$ Notice that any bounded sequence of positive numbers can
be used in the same way to scale $\varphi _{k}$ to obtain
alternative invariant scalar products.
Thus the sequence $\{1,1,1\dots \}$ corresponds to $h^{S}(x,y)$ while $
\{||S^{-1}\varphi _{1}||,||S^{-1}\varphi _{2}||,\dots \}$ to $h_{T}(x,y)$ .
If the eigenvalues of $U$ \ are not multiplicity free we need another index $
l$ to label eigenvectors; then Eq. (\ref{blim}) becomes
\begin{equation}
h_{T}(x,y)=\sum\limits_{k,l,n}h_{0}(Sx,\varphi _{kl})h_{0}(\varphi
_{kn},Sy)h_{0}(S^{-1}\varphi _{kl},S^{-1}\varphi _{kn}).
\end{equation}
As in the multiplicity free case one obtains invariant scalar products
replacing $h_{0}(S^{-1}\varphi _{kl},S^{-1}\varphi _{kn})$ with any bounded
sequence of positive matrices.
What we learn from this example is that we can look for
alternative Hermitian structures on each eigenspace of $U$ and
then combine them with arbitrary positive coupling coefficients
provided that bounded vectors remain bounded with respect to the
newly defined products ( boundedness of the sequence). In this
respect see our previous paper\cite{Morandi} where similar
considerations came out within the finite dimensional situation.
In particular this procedure shows that we may start with $h_{0}$ already
invariant and construct $S$ out of constants of the motion for $T,$ in this
way we would obtain alternative descriptions whenever the sequence $||$ $
S^{-1}\varphi _{k}||$ is appropriate. For instance in a central force
problem $2+\sin (J^{2}),$ in a basis for the angular momentum, would give $
2+\sin j(j+1)$ and $1/[$ $2+\sin j(j+1)]$ \ would be an appropriate sequence.
\textbf{Remark} From what we said, it is clear that instead of a
once-for-all chosen sequence of bounded positive numbers (for
instance like $\{1,1,1\dots \}$ and $\{||S^{-1}\varphi
_{1}||,||S^{-1}\varphi _{2}||,\dots \}$ ) we could use a
''point-dependent'' sequence so that the Hermitian metric we
define will be dependent on the point and therefore the ''energy
function'' we associate with linear transformations will not be
quadratic anymore.\cite{Dubrovin} In this way, vector fields are
still linear and therefore compatible with the dynamical linear
superposition rule for state vectors, however the associated
Hamiltonian functions (infinitesimal generators) are no more
homogeneous of degree two. One is getting a kind of
''non-linearity'', one should compare this situation with the one
proposed by Weinberg.\cite {Weinberg}
Having learned from the diagonalizable case, we can obtain a class
of invariant scalar products dropping this assumption. Suppose
again $T$ uniformly bounded. Then
\begin{equation}
T=Q^{-1}UQ
\end{equation}
and
\begin{equation}
h_{T}(x,y)=h_{0}(Q^{2}x,y)\ \ .
\end{equation}
Consider the spectral decomposition of $U$ :
\begin{equation}
U=\int\limits_{0}^{2\pi }e^{i\lambda }dE_{\lambda }^{U}\ \ \ .
\end{equation}
Choose now any positive bounded function $\varphi $ on $[0,2\pi ]$ and
define, in analogy with the diagonalizable case, the scalar product:
\begin{equation}
h_{\varphi }(x,y)=\int\limits_{0}^{2\pi }\varphi (\lambda
)h_{0}(Qx,dE_{\lambda }^{U}Qy)
\end{equation}
One checks easily that $h_{\varphi }(x,y)$ is also invariant and that $
\varphi =1$ corresponds to the Nagy product $h_{T}(x,y)$ .
Via Riesz Theorem, we may write $h_{\varphi
}(x,y)=h_{T}(C_{\varphi }x,y)$ \ and solve for $C_{\varphi }$. To
this aim we define $B$ as
\begin{equation}
B=\int\limits_{0}^{2\pi }\varphi (\lambda )dE_{\lambda }^{U}
\end{equation}
to get
\begin{equation}
\begin{array}{l}
h_{\varphi }(x,y)=\int\limits_{0}^{2\pi }\varphi (\lambda
)h_{0}(Qx,dE_{\lambda }^{U}Qy)=h_{0}(Qx,BQy)= \\
=h_{0}(Qx,QQ^{-1}BQy)=h_{T}(x,Q^{-1}BQy).
\end{array}
\end{equation}
This formula furnishes $C_{\varphi }=Q^{-1}BQ$ .
It is not hard to show that $T$ commutes with $C_{\varphi }$ , as
\begin{equation}
h_{\varphi }(Tx,Ty)=h_{\varphi }(x,y)=h_{T}(C_{\varphi
}x,y)=h_{T}(C_{\varphi }Tx,Ty)=h_{T}(TC_{\varphi }x,Ty)
\end{equation}
so that $[T,C_{\varphi }]=0$ follows.
We conclude this Section by noting that the class of invariant
scalar product compatible with the starting one is parameterized
by the definite positive elements in the commutant of $T$, and in
particular it is empty only when $T$ is not uniformly bounded$.$
\section{Invariant Hermitian Structures for Commuting Uniformly Bounded
Operators}
The method of finding an invariant scalar product for an automorphism $T$
via a limiting procedure ''at infinity'' applies also in the case of many
uniformly bounded commuting automorphisms.
We analyze first the case of two uniformly bounded commuting
operators $T_{1} $ , $T_{2}$ . They generate an action of the
Abelian group $\mathbb{Z}\times \mathbb{Z}$ by uniformly bounded
operators; Theorem 5 guarantees the existence of an invariant
Hermitian scalar product. We show how to compute one of them.
One first compute $h_{T_{1}}$ as a Banach limit; this is by
construction invariant under $T_{1}$ \ but in general not under
$T_{2}$ . As second step one defines:
\begin{equation}
h_{12}(x,y)=Lim_{n\rightarrow \infty }\ h_{T_{1}}(T_{2}^{n}x,T_{2}^{n}y)\ .
\label{h12}
\end{equation}
\begin{proposition} Consider two uniformly bounded commuting
operators $T_{1}$ , $T_{2}$. Then
\begin{description}
\item[(i)] $h_{1,2}$, defined in Eq. (\ref{h12}), is
invariant under the action of the entire $\ \mathbb{Z}\times \mathbb{Z}$ .
\item[(ii)] If $T_{1}$ is multiplicity free, $h_{T_{1}}$ is already
invariant under the action of the entire $\ \mathbb{Z}\times \mathbb{Z}$.
\end{description}
\end{proposition}
\begin{proof} The proof of (i) is trivial. To prove (ii) consider
$C_{1}$ defined through
\begin{equation}
h_{T_{1}}(x,y)=:h_{12}(C_{1}x,y)\ . \label{Cformula}
\end{equation}
As shown before $C_{1}$ commutes with $T_{1}$ , but $T_{1}$ is multiplicity
free and therefore its commutant is Abelian. So $T_{2}$ must commute with $
C_{1}$ and the result follows at once from Eq.
(\ref{Cformula}).
\end{proof}
\textbf{Remark: }These results may obviously be extended to any uniformly
bounded action of the product of a finite number of groups $\mathbb{Z}$
and/or $\mathbb{R}$ .
\begin{corollary} Suppose that, in a uniformly bounded action of
an Abelian group, a particular operator $\widetilde{T}$ is
multiplicity free. Then the corresponding Nagy product
$h_{\widetilde{T}}$ \ is invariant for the entire action.
\end{corollary}
\begin{proof}The result follows readily extending the argument
of the proof of the above point (ii) to the general case of
Theorem 5.
\end{proof}
\section{Relating Two Uniformly Bounded Operators via Banach Limits}
The uniform boundedness condition is necessary and sufficient for
an operator
$T$ to be similar to a unitary one $U$ as stated in Theorem 2, then $T$ and $
U$ have the same spectrum. The $A-$relatedness property is a
condition weaker than similarity that has been utilized to discuss
in more general terms operators and relations between their
spectra, see for instance Ref. 17. We recall that two operators
$T_{1}$ and $T_{2}$ are said to be $
A- $related if
\begin{equation}
T_{1}A=AT_{2};
\end{equation}
then the operator $A$ is called an intertwining operator.
In this Section we discuss the case of two not necessarily
commuting operators $T_{1}$ and $T_{2}$ both uniformly bounded and
ask for conditions under which there exists a nontrivial operator
$A$ intertwining them. We require also that $A$ should be bounded.
For this discussion consider the bilinear functional $\mathcal{F}(x,y)$
defined as follows:
\begin{equation}
\mathcal{F}(x,y):=Lim_{n\rightarrow \infty }h_{0}(T_{2}^{n}x,T_{1}^{n}y)
\end{equation}
this limit is well defined for all $x,y\in \mathbb{H}$ \ and a
simple computation shows that $|\mathcal{F}(x,y)|\leq K||x||\
||y||$ .
Using Riesz Theorem we define three bounded operators $A_{0}$, $A_{1}$, $
A_{2}$ via:
\begin{equation}
\mathcal{F}(x,y)=h_{0}(A_{0}x,y)=h_{T_{1}}(A_{1}x,y)=h_{T_{2}}(A_{2}x,y).
\end{equation}
The following proposition holds:
\begin{proposition} With $\mathcal{F}$, $A_{0},$ $A_{1},$ $A_{2}$
defined as above the relations 1-4 hold:
\begin{enumerate}
\item $A_{0}=Q_{1}^{2}A_{1}=Q_{2}^{2}A_{2}$ ;
\item $A_{0}T_{2}=(T_{1}^{\dagger })^{-1}A_{0}$ \ , the adjoint is with
respect to the $h_{0}$ scalar product;
\item $A_{1}T_{2}=(T_{1}^{\dagger })^{-1}A_{1}$ \ , the adjoint is with
respect to the $h_{T_{1}}$ scalar product;
\item $A_{2}T_{2}=(T_{1}^{\dagger })^{-1}A_{2}$ \ , the adjoint is with
respect to the $h_{T_{2}}$ scalar product.
\end{enumerate}
\end{proposition}
\begin{proof} These relations follow at once from the definitions
of the $A$'s operators and the invariance property of $\mathcal{F}$ : $\mathcal{F}
(T_{2}x,T_{1}y)=\mathcal{F}(x,y)$.
\end{proof}
\textbf{Remark.} Relation (3) shows that $T_{1}$ and $T_{2}$ are
$A_{1}-$related when $\mathcal{F\neq }0$, because in this case it results that $
(T_{1}^{\dagger })^{-1}=T_{1}$.
We therefore are led to examine some conditions which guarantees that $
\mathcal{F\neq }0.$ Consider the simple case of $T_{1}$ and $T_{2}$ both
diagonalizable and multiplicity free. Recalling that in our hypothesis on $
T_{1}$ and $T_{2}$ we have:
\begin{equation}
T_{1}=Q_{1}^{-1}U_{1}Q_{1}\ \ \ ,\ \ T_{2}=Q_{2}^{-1}U_{2}Q_{2}\ \ ,
\end{equation}
it is easy, by using two suitable orthonormal basis, to obtain the following
expression for $\mathcal{F}$ :
\begin{equation}
\mathcal{F}(x,y)=\sum\limits_{k,q}\delta _{\lambda _{k},\mu
_{q}}h_{0}(x,Q_{2}\psi _{q})\ h_{0}(Q_{2}^{-1}\psi _{q},Q_{1}^{-1}\varphi
_{k})\ h_{0}(Q_{1}\varphi _{k},y) \label{Flimit}
\end{equation}
where $U_{1}\varphi _{k}=\lambda _{k}\varphi _{k}$\ , $U_{2}\psi _{q}=\mu
_{q}\psi _{q}$ \ and $h_{0}(\varphi _{k},\varphi _{j})=\delta _{k,j}$ , $
h_{0}(\psi _{k},\psi _{j})=\delta _{k,j}$ .
We have shown therefore that
\begin{proposition} If $T_{1}$ and $T_{2}$ are uniformly bounded,
diagonalizable and multiplicity free operators then
$\mathcal{F\neq }0$ if and only if they have at least one common
eigenvalue and $h_{0}(Q_{2}^{-1}\psi _{q},Q_{1}^{-1}\varphi
_{k})\neq 0$.
\end{proposition}
We note also that from Eq. (\ref{Flimit}) it is easy to obtain an
explicit expression for the intertwining operators $A$'s. For
instance:
\begin{equation}
A_{0}=\sum\limits_{k,q}\delta _{\lambda _{k},\mu _{q}}\
h_{0}(Q_{1}^{-1}\varphi _{k},Q_{2}^{-1}\psi _{q})h_{0}(Q_{2}\psi _{q},\cdot
)Q_{1}\varphi _{k}\ \ \ . \label{interwining}
\end{equation}
An argument similar to the one used after Eq. (\ref{blim}) shows
that one can replace in Eq. (\ref{interwining})
$h_{0}(Q_{1}^{-1}\varphi _{k},Q_{2}^{-1}\psi _{q})$ with any
bounded sequence of numbers, obtaining in this way other
intertwining operators.
One can use Eq. (\ref{interwining}) even in cases involving
continuous spectra. For instance in $L_{2}(\mathbb{R},dx)$
consider $(T_{1}\Psi )(x)=e^{ix}\Psi (x)$ and \ $(T_{2}\Psi
)(x)=\Psi (x+a)$ \ , $a\in \mathbb{R}$; then, when the sequence
$h(Q_{1}^{-1}\varphi _{k},Q_{2}^{-1}\psi _{q})$ is replaced by a
constant sequence, Eq. (\ref{interwining}) leads to the Fourier
transform operator.
\section{Invariant Hermitian Structures for Realizations of the Heisenberg
Group}
Elsewhere we have shown\cite{Romp2001} how alternative symplectic
structures, on a finite dimensional real symplectic\ vector space
$V$ , give rise to alternative Weyl systems, i.e. alternative
projective unitary representations of the Abelian vector group
$V.$
In this Section we show that it is possible to find invariant
Hermitian structures for any realization of the Heisenberg group
in terms of uniformly bounded operators.
We consider operators $T_{1}$ , $T_{2}$ , $T_{3}$ uniformly
bounded and obeying the following commutation relations
\begin{equation}
T_{1}T_{3}T_{1}^{-1}T_{3}^{-1}=\mathbb{I}=T_{2}T_{3}T_{2}^{-1}T_{3}^{-1}
\end{equation}
and
\begin{equation}
T_{1}T_{2}T_{1}^{-1}T_{2}^{-1}=T_{3}\ \ .
\end{equation}
\begin{proposition} For $T_{1}$ , $T_{2}$ , $T_{3}$ satisfying the
stated conditions it is possible to find a Hermitian structure
that converts them into unitary operators.
\end{proposition}
\begin{proof} There is a scalar product, say $\ h_{13}$, that makes $T_{1}$ and
$T_{3}$ unitary since they commute and are uniformly bounded; in
this scalar product $T_{2}$ will not be unitary in general.
Consider therefore
\begin{equation}
h(x,y):=Lim_{n\rightarrow \infty }h_{13}(T_{2}^{n}x,T_{2}^{n}y).
\end{equation}
This scalar product $h$ makes $T_{2}$ unitary and one checks
easily that it leaves unitary $T_{1}$ and $T_{3}$ as well. In fact
\begin{eqnarray}
h(T_{1}x,T_{1}y) &=&Lim_{n\rightarrow \infty
}h_{13}(T_{2}^{n}T_{1}x,T_{2}^{n}T_{1}y)= \nonumber \\
&=&Lim_{n\rightarrow \infty
}h_{13}(T_{1}T_{3}^{n}T_{2}^{n}x,T_{1}T_{3}^{n}T_{2}^{n}y)=h(x,y)
\end{eqnarray}
and the same for $T_{3}$ .
\end{proof}
In the future we will show the use of this proposition, to compare
alternative Weyl systems when they are considered on the same
Hilbert space.
\section{Quantum Systems as Hamiltonian Systems}
Let us recall that for a complex Hilbert space $\mathbb{H}$ with
Hermitian structure $h$ it is possible to define a symplectic
structure by setting
\begin{equation}
\omega _{h}(x,y):=\frak{Im}\ \ h(x,y)\ \ \ .
\end{equation}
Given a symplectic vector space $(V,\omega )$ we may define a
Poisson Bracket on $V^{\ast }$ by defining it first on linear
functions and then by using the Leibnitz rule on all
differentiable functions $\mathcal{F}(V^{\ast })$ . On
$Lin(V^{\ast },\mathbb{C})\subset \mathcal{F}(V^{\ast })$ we set
\begin{equation}
\left\{ v_{1},v_{2}\right\} =\omega (v_{1},v_{2})
\end{equation}
where on the left hand side $v_{1},v_{2}\in \mathcal{F}(V^{\ast })$ and on
the right $v_{1},v_{2}\in V$ .
More directly, on any totally reflexive space\cite{Nelson}, it is
possible to define a non-degenerate Poisson Bracket if the space
is strongly symplectic.\cite{Abraham} Indeed introducing
differentials of functions
\begin{equation}
df:V^{\ast }\rightarrow V^{\ast }\times V^{\ast \ast }\equiv
V^{\ast }\times V
\end{equation}
in intrinsic form at each point $\alpha \in V^{\ast },$ we define
\begin{equation}
\left\{ f,g\right\} (\alpha ):=\omega (df(\alpha ),dg(\alpha ))\ \ .
\end{equation}
In our setting $V$ is a Hilbert space and therefore there is a non intrinsic
isomorphism between $V$ and $V^{\ast }$ so that we have a Poisson Bracket
defined also on $\mathcal{F}(V).$ It follows easily that a complex unitary
linear transformation $U$ on $\mathbb{H}$ is symplectic. A densely defined
complex linear operator $\Gamma $ is a Hamiltonian vector field iff $\ H$ is
Hermitian. The Hamiltonian function associated with $\Gamma $ is given by
the formula
\begin{equation}
f_{H}(\psi )=\frac{1}{2}h(H\psi ,\psi )\ \ \ \ \psi \in D(\Gamma )\ \ .
\end{equation}
Thus, we associate an Hamiltonian function (infinitesimal
generating function) with any vector field which preserves the
Hermitian structure $h$.
Disregarding domain problems, for the moment, it is easy to show that if $
A,B,.....$ are Hermitian operators, the associated Hamiltonian functions,
say $f_{A},f_{B},....,$ satisfy the Poisson Bracket relations
\begin{equation}
\left\{ f_{A},f_{B}\right\} (\psi )=f_{i[A,B]}(\psi )\ \ \ .
\end{equation}
It is now possible to investigate the consequence of the existence
of alternative invariant Hermitian structures. With the notation
of Theorem 2, we have a new Poisson Bracket defined by
\begin{equation}
\left\{ v_{1},v_{2}\right\} _{T}=i\omega _{h}(Q^{2}v_{1},v_{2})\ \ \ .
\end{equation}
These new metrics will associate alternative quadratic functions
with every operator $A.$ All of the induced Poisson Bracket on
quadratic functions will be pairwise compatible in the sense of
bi-Hamiltonian systems. In the Ehrenfest description of quantum
dynamics, we have
\begin{equation}
i\hbar \frac{d}{dt}f_{A}=\left\{ f_{H},f_{A}\right\}
\end{equation}
and therefore the same vector field will be given different Hamiltonian
descriptions, with $f_{H}$ \ and the Poisson Bracket depending on the chosen
invariant metric.
The relation $h(v_{1},v_{2})=h^{^{\prime }}(Rv_{1},v_{2})$,
between two invariant Hermitian forms, suggests the correspondence
between operators
\begin{equation}
N:A\mapsto RA
\end{equation}
According to the Ref. 32, we may define a new associative product
on the space of operators
\begin{equation}
A\circ _{N}B=N(A)B+AN(B)-N(AB)
\end{equation}
which gives $A\circ _{N}B=ARB.$ Any time that $R$ is a constant of the
motion for $H,$ as in this case, we obtain a new alternative associative
product on the space of operators which makes $H$ into an inner derivation.
In conclusion, we have shown how alternative Hamiltonian descriptions for
the Schroedinger equation give rise to alternative description in the
Ehrenfest picture and the Heisenberg picture.
\section{Conclusions}
In this paper we have addressed the problem of alternative quantum
Hamiltonian descriptions of the same vector field on the space of
quantum states. In this way we have avoided dealing with the
ambiguity of quantization procedures for classical bi-Hamiltonian
systems.
We have briefly addressed the question of the alternative
descriptions at the level of the Ehrenfest picture and Heisenberg
picture. By using this results, in the future we shall consider
the quantum-classical transition to show how these alternative
description at the quantum level will reproduce known alternative
Hamiltonian descriptions for the corresponding classical systems.
\end{document} |
\begin{document}
\title{Acoustic transmission problems:\
wavenumber-explicit bounds and resonance-free regions}
\begin{abstract}
We consider the Helmholtz transmission problem with one penetrable star-shaped Lipschitz obstacle. Under a natural assumption about the ratio of the wavenumbers, we prove bounds on the solution in terms of the data, with these bounds explicit in all parameters.
In particular, the (weighted) $H^1$ norm of the solution is bounded by the $L^2$ norm of the source term, independently of the wavenumber.
These bounds then imply the existence of a resonance-free strip beneath the real axis.
The main novelty is that the only comparable results currently in the literature are for smooth, convex obstacles with strictly positive curvature,
while here we assume only Lipschitz regularity and star-shapedness with respect to a point.
Furthermore, our bounds are obtained using identities first introduced by Morawetz (essentially integration by parts), whereas the existing bounds use the much-more sophisticated technology of microlocal analysis and propagation of singularities.
We also adapt existing results to show that if the assumption on the wavenumbers is lifted, then no bound with polynomial dependence on the wavenumber is possible.
\noindent
\textbf{AMS subject classification}: 35B34, 35J05, 35J25, 78A45
\noindent
\textbf{Keywords}: transmission problem, resonance, Helmholtz equation, acoustic, frequency explicit, wavenumber explicit, Lipschitz domain, Morawetz identity, semiclassical
\end{abstract}
\section{Introduction}\label{sec:intro}
The acoustic transmission problem, modelled by the Helmholtz equation, is a classic problem in scattering theory.
Despite having been studied from many different perspectives over the years, it remains a topic of active research. For example, recent research on this problem includes the following.
\bit
\item
Designing novel integral-equation formulations of the transmission problem; see, e.g.,
\cite{BoDoLeTu:15} and the reviews \cite{ClHiJe:13, ClHiJePi:15}.
\item
Designing hybrid numerical-asymptotic methods to approximate the solution of the transmission problem with a number of degrees of freedom that grows slowly (or, ideally, is constant) as the wavenumber increases \cite{GrHeLa:15,GHL17}.
\item
Quantifying how uncertainty in the shape of the obstacle affects the solution of the transmission problem \cite{HiScScSc:18}.
\item Obtaining sharp bounds on the location of resonances of the transmission problem \cite{Ga:15}.
\item Obtaining sharp estimates on the scattered field away from the obstacle, for example in the case when the obstacle is a ball \cite{HPV07, Cap12, CLP12, NV12}, motivated by applications in imaging, inverse problems, and cloaking.
\item Designing fast solvers for the Helmholtz equation in media where the wavenumber is piecewise smooth (i.e.~transmission problems); see, e.g., the recent review \cite{GaZh:16} and the references therein.
\item Proving wavenumber-explicit bounds on solutions of boundary value problems that approximate transmission problems, motivated by applications in numerical analysis \cite{BaChGo:16, Ch:16, SaTo:17, GrSa:18}.
\eit
In this paper we focus on the case of transmission through one obstacle; i.e.~the problem has two real wavenumbers: one inside the obstacle, and one outside the obstacle. We give a precise definition of this problem in Equation \eqref{eq:BVP} below. Our results can also be extended to more general situations (see Remark \ref{rem:extensions} below).
A natural question to ask about the transmission problem is:
\bit
\item[Q1.] Can one find a bound on the solution in terms of the data, with the bound explicit in the two wavenumbers?
\eit
Ideally, we would also like the bound to be either independent of the shape of the scatterer, or explicit in any of its natural geometric parameters; for example, if the domain is star-shaped, then we would ideally like the bound to be explicit in the star-shapedness parameter.
Another fundamental question is
\bit
\item[Q2.] Does the solution operator (thought of as a function of the wavenumber) have a resonance-free region underneath the real axis?
\eit
The relationship between Q1, Q2, and the question of local-energy decay for solutions of the corresponding wave equation is well-understood in scattering theory, and goes back to the work of Lax, Morawetz, and Phillips. In this particular situation of the Helmholtz transmission problem, Vodev proved in \cite[Theorem 1.1 and Lemma 2.3]{Vo:99} that an appropriate bound on the solution for real wavenumbers implies the existence of a resonance-free strip beneath the real axis.
\paragraph{Existing work on Q1 and Q2 for the transmission problem.}
To the authors' knowledge, there are five main sets of results regarding Q1 and Q2 for the Helmholtz transmission problem in the literature; we highlight that several of these results cover more general transmission problems than the single-penetrable-obstacle one considered in this paper.
\ben
\item[(a)] When the wavenumber outside the obstacle is \emph{larger} than the wavenumber inside the obstacle, and the obstacle is $C^\infty$ and convex with strictly positive curvature, Cardoso, Popov, and Vodev proved that the solution can be bounded independent of the wavenumber, and thus that there exists a resonance-free strip beneath the real axis \cite{CaPoVo:99} (these results were an improvement of the earlier work by Popov and Vodev \cite{PoVo:99a}).
\item[(b)] When the wavenumber outside the obstacle is \emph{smaller} than the wavenumber inside the obstacle, and the obstacle is $C^\infty$ and convex with strictly positive curvature, Popov and Vodev proved that there exists a sequence of complex wavenumbers (lying super-algebraically close to the real axis) through which the norm of the solution grows faster than any algebraic power of the wavenumber \cite{PoVo:99}.
\item[(c)] For either configuration of wavenumbers, and for any $C^\infty$ obstacle, Bellassoued proved that the norm of the solution cannot grow faster than exponentially with the wavenumber \cite{Be:03}.
\item[(d)] Further information about the location and the asymptotics of the resonances when the obstacle is $C^\infty$ and convex with strictly positive curvature, for both wavenumber configurations above,
was obtained by Cardoso, Popov, and Vodev in \cite{CaPoVo:01}.
Sharp bounds on the location of the resonances (again for both configurations of the wavenumbers) were given recently by Galkowski in \cite{Ga:15}.
\item[(e)] The case when the obstacle is a ball, in both configurations of the wavenumbers, has been studied by Capdeboscq and coauthors in \cite{CLP12,Cap12}
(and summarised in \cite[Chapter~5]{AC16})
using separation of variables and bounds on Bessel and Hankel functions.
\een
\paragraph{The main results of this paper and their novelty.} In this paper we prove analogues of the bound in (a) above when the obstacle is Lipschitz and star-shaped (Theorems \ref{thm:FirstBound} and \ref{thm:nonzero}) and hence also the existence of a resonance-free strip beneath the real axis (Theorem \ref{thm:res}).
Our condition on the ratio of the wavenumbers is slightly more restrictive than that in \cite{CaPoVo:99}, since the parameters in the transmission conditions are also involved (see Equation \eqref{eq:cond} and Remark \ref{rem:unnatural} below). Nevertheless
we believe this is the first time such results have been proved for the transmission problem when the obstacle is non-convex or non-smooth.
Furthermore, the constant in our bound is given completely explicitly, and the bound is valid for all wavenumbers greater than zero (satisfying the restriction on the ratio).
On the other hand, the bound for smooth convex obstacles in \cite{CaPoVo:99} assumes that both wavenumbers are large, with the ratio of the two fixed, but the bound is not explicit in this ratio (although we expect the results of \cite{Ga:15} could be used to get a bound for smooth convex obstacles that is explicit in the ratio of wavenumbers, when the wavenumbers are large enough \cite{Ga:16}).
An additional feature of the constant in our bound is that, whilst the bound is valid for all star-shaped Lipschitz obstacles, the constant only depends on the diameter of the obstacle. This ``shape-robustness" makes the bound particular suitable for applications in quantifying how uncertainty in the shape of the obstacle affects the solution (done for the transmission problem at low frequency in \cite{HiScScSc:18}), and these applications are currently under investigation.
We highlight that the bound in \cite{CaPoVo:99} relies on microlocal analysis and the deep results of Melrose and Sj\"ostrand on propagation of singularities \cite{MeSj:78, MeSj:82}.
In contrast, our bound is obtained using identities for solutions of the Helmholtz equation first introduced by Morawetz in \cite{MoLu:68, Mo:75}, which boil down to multiplying the PDEs by carefully-chosen test functions and integrating by parts.
Whereas Morawetz's identities have been used to prove bounds on many Helmholtz BVPs, famously the exterior Dirichlet and Neumann problems in \cite{MoLu:68, Mo:75}, it appears that (surprisingly) this paper is their first application to the Helmholtz transmission problem involving one penetrable obstacle (Remark \ref{rem:rough} below discusses their applications to transmission problems not involving a bounded obstacle).
The novelty of this paper is therefore not in the techniques that are used, but the fact that these well-known techniques can be applied to a classic problem to obtain new results.
\paragraph{Outline of the paper.}
In \S\ref{sec:form} we define the Helmholtz transmission problem for Lipschitz obstacles and recap results on existence, uniqueness, and regularity.
In \S\ref{sec:result} we give the main results (Theorems~\ref{thm:FirstBound}, \ref{thm:nonzero} and \ref{thm:res}).
In \S\ref{sec:Morawetz} we derive the Morawetz identities used in the proofs of Theorems~\ref{thm:FirstBound} and \ref{thm:nonzero}, and in \S\ref{sec:proofs} we prove the main results.
In \S\ref{sec:blowup} we adapt the existing results of \cite{PoVo:99} about super-algebraic growth of the norm of the solution through a sequence of complex wavenumbers to prove analogous growth through a sequence of real wavenumbers; we illustrate this growth through real wavenumbers with plots when the obstacle is a 2-d ball.
\paragraph{Motivation for \S\ref{sec:blowup}.}
Our motivation for adapting the results of \cite{PoVo:99} (and also highlighting the results of \cite{Cap12, CLP12}) in \S\ref{sec:blowup} is the recent investigations
\cite{Ch:16, BaChGo:16, SaTo:17, GrSa:18} of the interior impedance problem for the Helmholtz equation with piecewise-constant wavenumber (and the related investigation \cite{OhVe:16} for piecewise-Lipschitz wavenumber).
These investigations, coming from the numerical-analysis community, concern Helmholtz transmission problems, but consider the interior impedance problem, because the impedance boundary condition is a simple approximation of the Sommerfeld radiation condition that is commonly used when implementing finite-element methods (see Remark~\ref{rem:trunc} below).
The results of \cite{PoVo:99}, adapted as in \S\ref{sec:blowup} (using, in particular, recent wavenumber-explicit bounds on layer-potential operators from \cite{Sp2013a, HaTa:15}),
\bit
\item[(a)] show that the ``technical'' assumptions on the wavenumber in \cite[\S1]{BaChGo:16} are in fact necessary for the results of \cite[\S1]{BaChGo:16} to hold, and
\item[(b)] partially answer the conjecture in \cite[\S2.3]{SaTo:17} about the maximal growth of the norm of the solution operator.
\eit
\section{Formulation of the problem}\label{sec:form}
\subsection{Geometric notation.}\label{sec:Notation}
Let ${\Omega_i}\subset\IR^d$, $d \geq 2$, be a bounded Lipschitz open set.
Denote ${\Omega_o}:=\IR^d\setminus\overline{\Omega_i}$ and $\Gamma:={\partial\Omega}_i={\partial\Omega}_o$.
Let $\bn$ be the unit normal vector field on $\Gamma$ pointing from ${\Omega_i}$ into ${\Omega_o}$.
We denote by ${\partial_\bn}$ the corresponding Neumann trace from one of the two domains ${\Omega_i}$ and ${\Omega_o}$ and we do not use any symbol for the Dirichlet trace on $\Gamma$.
For any $\varphi\in L^2_{\mathrm{loc}}(\IR^d)$, we write $\varphi_i:=\varphi|_{\Omega_i}$ and $\varphi_o:=\varphi|_{\Omega_o}$.
For $a>0$ and $\bx_0\in\IR^d$ we denote by $B_a(\bx_0)=\{\bx\in\IR^d:|\bx|<a\}$ the ball with centre $\bx_0$ and radius $a$; if $\bx_0=\bzero$ we write $B_a=B_a(\bzero)$.
Given $R>0$ such that $\conj{\Omega_i}\subset B_R$,
let $D_R:=B_R\cap{\Omega_o}$ and $\Gamma_R:=\partial B_R=\{|\bx|=R\}$.
On $\Gamma_R$ the unit normal $\bn$ points outwards.
With $D$ denoting an open set or a $d-1$-dimensional manifold, $\N{\cdot}_{D}$ denotes $L^2(D)$ norm for scalar or vector fields.
On $\Gamma$ and $\Gamma_R$, $\nabla_T$ denotes the tangential gradient.
To state the main results, we need to define the notions of \emph{star-shaped} and \emph{star-shaped with respect to a ball}.
\begin{defin}
(i) ${\Omega_i}$ is \emph{star-shaped with respect to the point $\bx_0$} if, whenever $\bx \in {\Omega_i}$, the segment $[\bx_0,\bx]\subset {\Omega_i}$.
\noindent (ii) ${\Omega_i}$ is \emph{star-shaped with respect to the ball $B_{a}(\bx_0)$} if it is star-shaped with respect to every point in $B_{a}(\bx_0)$.
\end{defin}
These definitions make sense even for non-Lipschitz ${\Omega_i}$, but when ${\Omega_i}$ is Lipschitz one can characterise star-shapedness with respect to a point or ball in terms of $(\bx-\bx_0)\cdot \bn(\bx)$ for $\bx\in\Gamma$.
\begin{lemma}\label{lem:star}
(i) If ${\Omega_i}$ is Lipschitz, then it is star-shaped with respect to $\bx_0$ if and only if $(\bx-\bx_0)\cdot\bn(\bx)\geq 0$ for all $\bx \in\Gamma$ for which $\bn(\bx)$ is defined.
\noindent (ii) ${\Omega_i}$ is star-shaped with respect to $B_{a}(\bx_0)$ if and only if
it is Lipschitz and
$(\bx-\bx_0) \cdot \bn(\bx) \geq {a}$ for all $\bx \in \Gamma$ for which $\bn(\bx)$ is defined;
\end{lemma}
\bpf
See \cite[Lemma 5.4.1]{AndreaPhD} or \cite[Lemma 3.1]{MaxwellPDE}.
\epf
In the rest of the paper, whenever ${\Omega_i}$ is star-shaped with respect to a point or ball, we assume (without loss of generality) that $\bx_0=\bzero$.
\subsection{The Helmholtz transmission problem.}
From the point of view of obtaining wavenumber-explicit bounds, we are interested in the case when the wavenumber is real. Nevertheless, in order to talk about resonance-free regions, we must also consider complex wavenumbers.
\begin{defin}\mythmname{Sommerfeld radiation condition}
Given $\varphi\in C^1(\IR^d\setminus B_R)$, for some ball $B_R=\{|\bx|<R\}$, and $\kappa\in \Com\setminus\{0\}$ with $\Im \kappa\geq 0$, we say that $\varphi$ satisfies the Sommerfeld radiation condition if
\beq
\label{eq:src}
\lim_{r\to\infty}r^{\frac{d-1}2}\left(\der{\varphi(\bx)}r-\ri\kappa \varphi(\bx)\right)=0
\eeq
uniformly in all directions, where $r=|\bx|$; we then write $\varphi\in\mathrm{SRC}(\kappa)$.
\end{defin}
Recall that when $\Im \kappa>0$, if $\varphi$ satisfies the Sommerfeld radiation condition, then $\varphi$ decays exponentially at infinity (see, e.g., \cite[Theorem 3.6]{CoKr:83}).
\begin{defin}\mythmname{The Helmholtz transmission problem}\label{def:HTP}
Let $k\in \Com\setminus\{0\}$ with $\Im k\geq 0$, and let ${n_i},{n_o},{a_i},{a_o},{A_D},{A_N}$ be positive real numbers.
Let $f_i\in L^2({\Omega_i})$, $f_o\in L^2({\Omega_o})$, $g_D\in H^1(\Gamma)$, $g_N\in L^2(\Gamma)$, and assume $f_o$ has compact support.
The Helmholtz transmission problem is: find $u\in H^1_{\mathrm{loc}}(\IR^d\setminus\Gamma)$ such that,
\begin{align}
\begin{aligned}
{a_i}\Delta u_i+k^2 {n_i} u_i &=f_i &&\;\text{in}\;{\Omega_i},\\
{a_o}\Delta u_o+k^2 {n_o} u_o &=f_o &&\;\text{in}\;{\Omega_o},\\
u_o&={A_D} u_i+g_D &&\;\text{on}\; \Gamma,\\
{a_o}{\partial_\bn} u_o&={A_N}{a_i}{\partial_\bn} u_i+g_N &&\;\text{on}\; \Gamma,\\
&&&u_o\in\mathrm{SRC}(k\sqrt{n_o/a_o}).
\end{aligned}
\label{eq:BVP}
\end{align}
\end{defin}
Four of the parameters ${n_i},{n_o},{a_i},{a_o},{A_D},{A_N}$ are redundant; in particular we can set either ${A_D}={a_i}={a_o}={n_o}=1$ or ${A_D}={A_N}={a_o}={n_o}=1$ and still cover all problems by rescaling the remaining coefficients, $u_i$, and the source terms.
Nevertheless, we keep all six parameters in \eqref{eq:BVP} since given a specific problem it is then easy to write it in the form \eqref{eq:BVP}, setting some parameters to one.
Some extensions to transmission problems more general than those in Definition~\ref{def:HTP} are discussed in Remark~\ref{rem:extensions}.
\bre\mythmname{Relation to acoustics and electromagnetics}\label{rem:physics}
Time-harmonic acoustic transmission problems are often written in the form
\begin{align}
\mathop{\rm div}\nolimits\Big(\frac1\rho\nabla u\Big)+\frac{\kappa^2}\rho u=F,
\qquad
u\in H^1(\IR^d),\qquad \frac1\rho\nabla u\in H(\mathop{\rm div}\nolimitse;\IR^d),\quad
\label{eq:BVPrho}
\end{align}
and $u$ satisfies the Sommerfeld radiation condition, where $\rho(\bx)$ and $\kappa(\bx)$ are positive functions; see e.g.\ \cite[eq.~(1)]{HMK02}.
(Recall that $\bv\in H(\mathop{\rm div}\nolimitse;\IR^d)$ if and only if $\bv|_{\Omega_i}\in H(\mathop{\rm div}\nolimitse;\Oi)$, $\bv|_{\Omega_o}\in H(\mathop{\rm div}\nolimitse;{\Omega_o})$ and $\bv|_{\Omega_i}\cdot\bn=\bv|_{\Omega_o}\cdot\bn$ in $H^{-1/2}(\Gamma)$.)
In the particular case where $\rho$ and $\kappa$ take two different values on ${\Omega_i}$ and ${\Omega_o}$, problem \eqref{eq:BVPrho} can be written in the form \eqref{eq:BVP} choosing, for example,
\[
{A_D}={A_N}=1,\quad
a=\frac1\rho,
\quad k = \kappa_o, \quad {n_o}=\frac1\rho_o,\quad
{n_i}=\Big(\frac{\kappa_i}{\kappa_o}\Big)^2\frac1\rho_i,\quad f=F,\quad g_D=g_N=0,
\]
or
\[
{A_D}={a_i}={a_o}={n_o}=1,\quad
{A_N}=\frac{\rho_o}{\rho_i},
\quad k = \kappa_o, \quad
{n_i}=\Big(\frac{\kappa_i}{\kappa_o}\Big)^2,\quad f=\rho F,\quad g_D=g_N=0.
\]
(More generally, one can choose any constant $k>0$ and then let $n=a\kappa^2/ k^2$.)
The time-harmonic Maxwell equations are
\begin{align}\label{eq:Maxwell}
\curl \bH+\ri k\varepsilon\bE=(\ri k)^{-1}\bJ,\qquad
\curl\bE-\ri k\mu\bH=\bzero
\qquad \;\text{in}\; \IR^3.
\end{align}
When all fields and parameters involved depend only on two Cartesian space variables, say $x$ and $y$, Equations \eqref{eq:Maxwell}
reduce to the (heterogeneous) Helmholtz equation in $\IR^2$.
In the transverse-magnetic (TM) mode, $\bJ$ and $\bE$ are given by $\bJ=(0,0,J_z(x,y))$ and $\bE=(0,0,$ $E_z(x,y))$, so \eqref{eq:Maxwell} reduce to a scalar equation for the third component of the electric field:
\begin{align*}
\mathop{\rm div}\nolimits\left(\frac1\mu\nabla E_z\right)+k^2\varepsilon E_z=-J_z.
\end{align*}
If the permittivity $\varepsilon$ and the permeability $\mu$ are constant in $\widetilde{\Omega_i}={\Omega_i}\times\IR\subset\IR^3$ and $\widetilde{\Omega_o}={\Omega_o}\times\IR\subset\IR^3$
for ${\Omega_i},{\Omega_o}\subset\IR^2$ as in \S\ref{sec:Notation}, then \eqref{eq:Maxwell} (supplemented with suitable radiation conditions) can be written as Problem \eqref{eq:BVP}
for $u=E_z$ in $\IR^2$ with $a=1/\mu$, $n=\varepsilon$, ${A_D}={A_N}=1$.
Vice versa, in the transverse-electric (TE) mode, $\bJ=(J_x(x,y),J_y(x,y),0)$, $\bH=(0,0,H_z(x,y))$ and
\begin{align*}
\mathop{\rm div}\nolimits\left(\frac1\varepsilon\nabla H_z\right)+k^2\mu H_z=(\ri k\varepsilon)^{-1}\Big(\der{J_x}y-\der{J_y}x\Big),
\end{align*}
so \eqref{eq:Maxwell} can be written as \eqref{eq:BVP} for $u=H_z$ with $a=1/\varepsilon$, $n=\mu$, ${A_D}={A_N}=1$.
Observe that for TM and TE modes, the parameters ${n_i}, {n_o}, {a_i}, {a_o}$ depend on properties of the medium through which the waves propagate, whereas $k$ depends on the wave itself.
\ere
\begin{defin}\mythmname{Scattering problem}
Let $k\in \Com\setminus\{0\}$ with $\Im k\geq 0$, and let ${n_i},{n_o},{a_i}$, ${a_o}$, ${A_D}$, ${A_N}$ be positive real numbers.
Let $u^I$ be a solution of ${a_o}\Delta u^I+k^2{n_o} u^I=0$ that is $C^\infty$ in a neighbourhood of $\conj{\Omega_i}$ (for example a plane wave, a circular or spherical wave, or a fundamental solution centred in ${\Omega_o}$).
Define the total field $u^T$ to be solution of
\begin{align}
\begin{aligned}
{a_i}\Delta u_i^T+k^2 {n_i} u_i^T &=0 &&\;\text{in}\;{\Omega_i},\\
{a_o}\Delta u_o^T+k^2 {n_o} u_o^T &=0 &&\;\text{in}\;{\Omega_o},\\
u_o^T&={A_D} u_i^T &&\;\text{on}\; \Gamma,\\
{a_o}{\partial_\bn} u_o^T&={A_N}{a_i}{\partial_\bn} u_i^T &&\;\text{on}\; \Gamma,\\
&&& (u_o^T-u^I)\in\mathrm{SRC}(k\sqrt{{n_o}/{a_o}}).
\end{aligned}
\label{eq:BVPuT}
\end{align}
The scattered field defined by $u:=u^T-u^I$ satisfies
\begin{align}
\begin{aligned}
{a_i}\Delta u_i+k^2 {n_i} u_i &=k^2\Big(\frac{a_i}{a_o}{n_o}-{n_i}\Big)u^I=:f_i &&\;\text{in}\;{\Omega_i},\\
{a_o}\Delta u_o+k^2 {n_o} u_o &=0 &&\;\text{in}\;{\Omega_o},\\
u_o&={A_D} u_i +({A_D}-1)u^I&&\;\text{on}\; \Gamma,\\
{a_o}{\partial_\bn} u_o&={A_N}{a_i}{\partial_\bn} u_i+({A_N}-1){a_i}{\partial_\bn} u^I &&\;\text{on}\; \Gamma,\\
&&& u_o\in\mathrm{SRC}(k\sqrt{{n_o}/{a_o}}).
\end{aligned}
\label{eq:BVPuS}
\end{align}
The scattering problem \eqref{eq:BVPuS} can therefore be written in the form \eqref{eq:BVP} for $f_i=k^2(\frac{a_i}{a_o}{n_o}-{n_i})u^I$, $f_o=0$, $g_D=({A_D}-1)u^I$ and $g_N=({A_N}-1){a_i}{\partial_\bn} u^I $.
\end{defin}
The next lemma, proved in Appendix \ref{sec:appA}, addresses the questions of existence, uniqueness, and regularity of the solution of \eqref{eq:BVP}.
\begin{lemma}\mythmname{Existence, uniqueness, and regularity}\label{lem:exist}
The Helmholtz transmission problem of Definition \ref{def:HTP} admits a unique solution $u\in H^1_{\mathrm{loc}}(\IR^d\setminus\Gamma)$.
Moreover $u_i,u_o\in H^1(\Gamma)$ and ${\partial_\bn} u_i,{\partial_\bn} u_o\in L^2(\Gamma)$.
\end{lemma}
The key point about Lemma \ref{lem:exist} is that, whilst the existence and uniqueness results are well known, the
regularity results $u_i,u_o\in H^1(\Gamma)$ and ${\partial_\bn} u_i,{\partial_\bn} u_o\in L^2(\Gamma)$ are currently only available in the literature in the case $d=3$, $f_i = f_o=0$, and $k\in \Rea$.
These results are consequences of the harmonic-analysis results about layer potentials in \cite{CoMcMe:82, Ve:84, EsFaVe:92}
and the regularity results of Ne\v{c}as for strongly elliptic systems in \cite[\S5.1.2 and \S5.2.1]{Ne:67}, \cite[Theorem 4.24]{MCL00}, and are needed to apply the Morawetz identities of \S\ref{sec:Morawetz} to the solution of the transmission problem when $\Oi$ is Lipschitz.
Finally, recall that $k$ is a resonance of the boundary value problem \eqref{eq:BVP} if there exists a non-zero $u_o$ satisfying \eqref{eq:BVP} with $f_i=f_o=g_D=g_N=0$ and the Sommerfeld radiation condition replaced by
\beq\label{eq:BU1}
u_o (\bx) = \frac{{\ee}^{\ri k\sqrt{n_o/a_o}r}}{r^{(d-1)/2}}\left( u_\infty(\widehat{\bx}) + {\mathcal O}\left(\frac{1}{r}\right)\right) \quad\text{ as } r:= |\bx|\rightarrow \infty,
\eeq
for some function $u_\infty$ of $\widehat{\bx}=\bx/|\bx|$ (the far-field pattern); see, e.g., \cite[\S3.6 and Theorem 4.9]{DyZw:16} or \cite[\S2]{PaK09}.
The uniqueness result of Lemma \ref{lem:exist} implies that any resonance must have $\Im k<0$, and thus \eqref{eq:BU1} implies that $u_o$ grows exponentially at infinity.
\section{Main results}\label{sec:result}
\subsection{Bounds on the solution (answering Q1)}
\label{sec:resultQ1}
In this section we assume that $k>0$, but analogous results hold for $k<0$ as well.
We recall that the main assumptions we stipulate, namely \eqref{eq:cond} and \eqref{eq:gDgNcond} below, mean that the wavenumber is larger in the exterior region ${\Omega_o}$ than in the interior region ${\Omega_i}$, or equivalently that the wavelength $\lambda=\frac{2\pi\sqrt{a}}{k\sqrt n}$ is longer in ${\Omega_i}$ than in ${\Omega_o}$.
We recall from \S\ref{sec:Notation} that the notation $\N{\cdot}_D$ stands for the $L^2(D)$ norm.
\begin{theorem}\label{thm:FirstBound}
Assume that $\Oi$ is star-shaped,
\beq\label{eq:cond}
\frac{{n_i}}{{n_o}}\leq \frac{A_D}{A_N}\leq \frac{a_i}{a_o},
\eeq
$g_N=g_D=0$ and $k>0$.
Given $R>0$ such that $\operatorname{supp} f_o \subset B_R$, recall that $D_R:= {\Omega_o} \cap B_R$. The solution of BVP \eqref{eq:BVP} then satisfies
\begin{align}
\begin{aligned}\label{eq:bound1}
&a_i\N{\nabla u_i}_{{\Omega_i}}^2+k^2n_i\N{u_i}^2_{{\Omega_i}}
+\frac1{A_D A_N}\left(a_o\N{\nabla u_o}_{D_R}^2+k^2n_o\N{u_o}^2_{D_R}\right)\\
&\hspace{2cm}\le
\bigg[\frac{4\diam(\Omega_i)^2}{a_i}+\frac{1}{n_i}\left(2\sqrt{\frac{n_o}{a_o}} R +\frac{d-1}{k}\right)^2\bigg]
\N{f_i}_{\Omega_i}^2\\
&\hspace{30mm}+\frac1{A_D A_N}
\bigg[\frac{4R^2}{a_o}+\frac{1}{{n_o}}\left(2\sqrt{\frac{n_o}{a_o}} R +\frac{d-1}{k}\right)^2\bigg]\N{f_o}_{D_R}^2.
\end{aligned}
\end{align}
\end{theorem}
The bound \eqref{eq:bound1} is valid for all star-shaped Lipschitz $\Omega_i$, but the constants on the right-hand side only depend on $\Oi$ via $\diam(\Omega_i)$.
As highlighted in \S\ref{sec:intro}, we expect that this uniformity of the bound with respect to the geometry makes it particular suitable for applications in quantifying how uncertainty in the shape of $\Oi$ affects the solution (as done for small $k$ in \cite{HiScScSc:18}).
In Theorem \ref{thm:FirstBound} we assumed that the boundary source terms $g_D$ and $g_N$ vanish.
In the next theorem we consider general $g_D\in H^1(\Gamma)$ and $g_N\in L^2(\Gamma)$.
In order to do this, we need to assume that the inequalities \eqref{eq:cond} on the parameters are strict and that ${\Omega_i}$ is star-shaped with respect to a ball.
\begin{theorem}\label{thm:nonzero}
Assume that ${\Omega_i}$ is star-shaped with respect to $B_{\gamma\diam({\Omega_i})}$ for some $0<\gamma\le1/2$,
\begin{equation}\label{eq:gDgNcond}
\frac{n_i}{n_o}<\frac{A_D}{A_N}<\frac{a_i}{a_o},
\end{equation}
$k>0$ and $R>0$ is such that $\operatorname{supp} f_o\subset B_R$.
Then the solution of \eqref{eq:BVP} satisfies
\begin{align}\label{eq:gDgNbound}
\begin{aligned}
&a_i\N{\nabla u_i}_{{\Omega_i}}^2+k^2n_i\N{u_i}^2_{{\Omega_i}}
+\frac1{A_D A_N}\left(a_o\N{\nabla u_o}_{D_R}^2+k^2n_o\N{u_o}^2_{D_R}\right)\\
&\hspace{0mm}\le
\bigg[\frac{4\diam(\Omega_i)^2}{a_i}+\frac{1}{n_i}\left(2\sqrt{\frac{n_o}{a_o}} R +\frac{d-1}{k}\right)^2\bigg]
\N{f_i}_{\Omega_i}^2\\
&\hspace{7mm}+\frac1{A_D A_N}
\bigg[\frac{4R^2}{a_o}+\frac{1}{{n_o}}\left(2\sqrt{\frac{n_o}{a_o}} R +\frac{d-1}{k}\right)^2\bigg]\N{f_o}_{D_R}^2\\
&\hspace{7mm}+2\bigg[\frac{\diam({\Omega_i}){a_o}\big((3+2\gamma) {a_i} {A_N} + 2 {a_o}{A_D}\big)}{{A_D}{A_N}\gamma({a_i}{A_N}-{a_o}{A_D})}\bigg]\N{\nabla_T{g_D}}_\Gamma^2\\
&\hspace{7mm}
+2\bigg[\frac{2\diam({\Omega_i}) n_o^2}{\gamma{A_N}({n_o}{A_D}-{n_i}{A_N})}
+\frac{(3+\gamma){a_i}\Big({n_o} R^2+\frac{{a_o}(d-1)^2}{4k^2}\Big)}{\gamma{A_D}\diam({\Omega_i})({a_i}{A_N}-{a_o}{A_D})}\bigg]
k^2\N{{g_D}}_\Gamma^2
\\
&\hspace{7mm}+\frac{2}{\gamma{a_o}{A_N}{A_D}}\bigg[
\frac{\diam({\Omega_i})(4{a_i}{A_N}+2{a_o}{A_D})}{{a_i}{A_N}-{a_o}{A_D}}
+\frac{2{A_D}\Big({n_o} R^2+\frac{{a_o}(d-1)^2}{4k^2}\Big)}{\diam({\Omega_i})({n_o}{A_D}-{n_i}{A_N})}
\bigg]\N{g_N}_\Gamma^2.
\end{aligned}
\end{align}
\end{theorem}
Note that each of the coefficients in front of the norms on the right-hand side of the bound \eqref{eq:gDgNbound} is a
non-increasing function of $k$, apart from the coefficient $k^2$ multiplying $\N{{g_D}}_\Gamma^2$.
\begin{cor}\mythmname{Theorems~\ref{thm:FirstBound} and \ref{thm:nonzero} applied to the scattering problem \eqref{eq:BVPuS}}
\label{cor:ScatteringBound}
The solution $u$ of the scattering problem \eqref{eq:BVPuS} with ${n_o}={a_o}={A_D}={A_N}=1$, ${n_i}\le1\le{a_i}$, and $\Oi$ star-shaped satisfies
\begin{align*}
{a_i}\N{\nabla u_i}_{{\Omega_i}}^2+k^2n_i\N{u_i}^2_{{\Omega_i}}
&+\N{\nabla u_o}_{D_R}^2+k^2\N{u_o}^2_{D_R}\\
&\le\bigg[\frac{4\diam(\Omega_i)^2}{a_i}+\frac{1}{n_i}\left(2 R +\frac{d-1}{k}\right)^2\bigg]
k^4({a_i}-{n_i})^2\N{u^I}_{\Omega_i}^2.
\end{align*}
The solution $u$ of the scattering problem \eqref{eq:BVPuS} with ${n_o}={a_o}={a_i}={A_D}=1$, ${n_i}<1/{A_N}<1$ and $\Oi$ star-shaped with respect to $B_{\gamma\diam({\Omega_i})}$ (for some $0<\gamma\le1/2$) satisfies
\begin{align*}
\N{\nabla u_i}_{{\Omega_i}}^2+&k^2n_i\N{u_i}^2_{{\Omega_i}}
+\frac1{A_N}\Big(\N{\nabla u_o}_{D_R}^2+k^2\N{u_o}^2_{D_R}\Big)\\
&\le\bigg[4\diam(\Omega_i)^2+\frac{1}{n_i}\left(2 R +\frac{d-1}{k}\right)^2\bigg]
k^4(1-{n_i})^2\N{u^I}_{\Omega_i}^2\\
&\quad+\frac4{\gamma{A_N}}\bigg[
\frac{\diam({\Omega_i})(2{A_N}+1)}{{A_N}-1}
+\frac{R^2+\frac{(d-1)^2}{4k^2}}{\diam({\Omega_i})(1-{n_i}{A_N})}
\bigg]({A_N}-1)^2\N{{\partial_\bn} u^I}_\Gamma^2.
\end{align*}
\end{cor}
\bre\mythmname{Extensions of Theorems \ref{thm:FirstBound} and \ref{thm:nonzero}}\label{rem:extensions}
We have only considered the case of a single penetrable obstacle with $a_i, a_o, n_i$, and $n_o$ all real and constant, but analogues of Theorems~\ref{thm:FirstBound} and \ref{thm:nonzero} hold in the following cases (and also when the cases are combined).
\ben
\item When there are multiple ``layers'', each with constant $a$ and $n$, and with the boundaries of the layers star-shaped with respect to the origin (for the analogue of Theorem \ref{thm:FirstBound}) or star-shaped with respect to balls centred at the origin (for the analogue of Theorem \ref{thm:nonzero}); in this case the conditions \eqref{eq:cond}/\eqref{eq:gDgNcond} must hold at each interface.
\item When $a_i, a_o, n_i$, and $n_o$ are functions of position and satisfy conditions that ensure nontrapping of rays.
\item When ${\Omega_i}$ contains an impenetrable star-shaped Dirichlet scatterer.
\item When ${\Omega_o}$ is truncated by a star-shaped boundary and the radiation condition is approximated by an impedance boundary condition.
\item When $n_i$ is complex with $0<\Im n_i\leq \delta/k$, for $\delta$ a sufficiently small constant; this models a particular case of a lossy scatterer in a lossless background.
\item When Condition \eqref{eq:cond} is partly violated, namely when ${n_i}/{n_o}$ is slightly larger (in a $k$-dependent way) than ${A_D}/{A_N}$ and ${a_i}/{a_o}$.
\een
The extension to Case 1 is clear from the proofs in \S\ref{sec:proofs}, the extension to Case 2 and 3 are covered in \cite{GrPeSp:18}, the extension to the Case 4 is discussed in Remark~\ref{rem:impbc}, the extension to Case 5 is discussed in Remarks \ref{rem:complex} and \ref{rem:complex2}, and the extension to Case 6 is described in Proposition~\ref{thm:BoundViaTrace2}.
\ere
\subsection{Resonance-free strip (answering Q2)}
We let $R(k)$ denote the solution operator of the Helmholtz transmission problem of Definition \ref{def:HTP} when $g_D$ and $g_N$ are both zero, i.e.
\beqs
R(k): \left(
\begin{array}{c}
f_i\\
f_o
\end{array}
\right)
\mapsto
\left(
\begin{array}{c}
u_i\\
u_o
\end{array}
\right).
\eeqs
Although $R(k)$ depends also on the parameters $n_o, n_i, a_o, a_i, A_D,$ and $A_N$, in what follows we consider these fixed and consider $k$ as variable.
Let $\chi_1, \chi_2 \in C_0^\infty(\Rea^d)$ such that $\chi_j\equiv 1$ in a neighbourhood of $\Oi$, and let
\beq\label{eq:Rimp}
{R_{\chi}}(k):= \chi_1 R(k) \chi_2;
\eeq
i.e.~${R_{\chi}}(k)$ is the cut-off resolvent. Then
\beqs
{R_{\chi}}(k): L^2({\Omega_i})\oplus L^2({\Omega_o}) \rightarrow H^1({\Omega_i})\oplus H^1({\Omega_o})
\eeqs
for $k\in \Rea\setminus\{0\}$.
\bth\mythmname{Pole-free strip beneath the real axis}\label{thm:res}
The operator family ${R_{\chi}}(k)$ defined above is holomorphic on $\Im k>0$.
Assume that $\Oi$ is star-shaped and the condition \eqref{eq:cond} is satisfied.
Then, there exists $C_j>0$, $j=1,2,3$ such that ${R_{\chi}}(k)$
extends from the upper-half plane to a holomorphic operator family on
$|\Re k|\geq C_1 , \Im k\geq -C_2$
satisfying the estimate
\beq\label{eq:55}
\N{{R_{\chi}}(k)}_{L^2({\Omega_i})\oplus L^2({\Omega_o}) \rightarrow L^2({\Omega_i})\oplus L^2({\Omega_o})} \leq\frac{C_3}{|k|}
\eeq
in this region.
\end{theorem}
This follows from the bound of Theorem \ref{thm:FirstBound} using the result \cite[Lemma 2.3]{Vo:99}.
Recall that this result of Vodev takes a resolvent estimate on the real axis and converts it into a resolvent estimate in a strip beneath the real axis.
In principle, one could go into the details of this result and make the width of the strip explicit in the constant from the bound on the real axis. Since Theorem \ref{thm:FirstBound} gives an explicit expression for that constant, we would then have an explicit lower bound for the width of the strip.
\subsection{Discussion of the main results in the context of previous results}\label{sec:previous}
We now discuss Theorems \ref{thm:FirstBound}--\ref{thm:nonzero} and \ref{thm:res} in the context of the results summarised in (a)--(e) of \S\ref{sec:intro}
(i.e.~\cite{PoVo:99,PoVo:99a,CaPoVo:99,CaPoVo:01,Be:03,Ga:15,CLP12,Cap12}) and other related work.
We focus on results about the Helmholtz transmission problem of Definition \ref{def:HTP}, i.e.~one penetrable obstacle and piecewise-constant wavenumber. Many of these results apply to the more-general case when the wavenumber is piecewise-smooth, but we focus on the piecewise-constant case.
There is also a substantial literature on the Helmholtz equation with continuous wavenumber, including \cite{BlKa:77} and \cite{PeVe:99}; for a survey of these result we refer the reader to \cite[\S2.4]{GrPeSp:18}.
At the end of this section we briefly discuss (i) truncated Helmholtz transmission problems (in Remark~\ref{rem:trunc}), (ii) Helmholtz transmission problems with piecewise-constant wavenumber but \emph{not} involving a bounded obstacle (in Remark \ref{rem:rough}), and (iii) Helmholtz transmission problems when ${n_i}\in \Com$ with $\Im {n_i} >0$ (in Remark \ref{rem:complex}).
The results summarised in (a)--(d) of \S\ref{sec:intro} all consider the case when
$a_i=a_o=n_o=A_D=1$ and $g_D=g_N=0$; that is, the BVP
\begin{align}
\begin{aligned}
\Delta u_i+k^2 n_i u_i &=f_i &&\;\text{in}\;{\Omega_i},\\
\Delta u_o+k^2 u_o &=f_o &&\;\text{in}\;{\Omega_o},\\
u_o&=u_i &&\;\text{on}\; \Gamma,\\
{\partial_\bn} u_o&={A_N}{\partial_\bn} u_i &&\;\text{on}\; \Gamma,\\
&&&u_o\in\mathrm{SRC}(k).
\end{aligned}\label{eq:BVP2}
\end{align}
The results summarised in (e) of \S\ref{sec:intro} consider \eqref{eq:BVP2} with $A_N=1$; these results are in a slightly different direction to those of (a)--(d) (involving bounds in different norms) and so we discuss them separately in Remark \ref{rem:Capdeboscq} below.
In this discussion we use the notation for the cut-off resolvent ${R_{\chi}}(k)$, and observe that the bound \eqref{eq:bound1} in the case of the BVP \eqref{eq:BVP2} is essentially equivalent to the following bound: given $k_0>0$ there exist $C_m, m=0,1,$ such that
\beq\label{eq:res1}
\N{{R_{\chi}}(k)}_{L^2 \rightarrow H^m} \leq C_m k^{m-1} \quad{\widetilde{f}}a k\geq k_0,
\eeq
where $L^2$ and $H^0$ denote $L^2({\Omega_i})\oplus L^2({\Omega_o})$, $H^1$ denotes $H^1({\Omega_i})\oplus H^1({\Omega_o})$, and the constants $C_m$ are given explicitly in terms of $n_i, A_N$, $d$, $\diam({\Omega_i})$, and $R$ (with $R$ such that the support of the cut-off function $\chi_1$ appearing in the definition of ${R_{\chi}}(k)$ \eqref{eq:Rimp} is contained in $B_R$).
Once the bound
\beq\label{eq:res2}
\N{{R_{\chi}}(k)}_{L^2 \rightarrow L^2} \leq C_0 k^{-1} \quad{\widetilde{f}}a k\geq k_0,
\eeq
is proven, Green's identity can be used to prove the $L^2 \rightarrow H^1$ bound, with the constant $C_1$ in \eqref{eq:res1} then given explicitly in terms of
$n_i, A_N$, $d$, $\diam({\Omega_i})$, $R$, and $C_0$ (the analogous argument for scattering by impenetrable Dirichlet or Neumann obstacles is given in, e.g., \cite[Lemma~2.2]{Sp2013a}).
Cardoso, Popov, and Vodev \cite{CaPoVo:99} proved the bound \eqref{eq:res2}
when $\Oi$ is a smooth, convex obstacle with strictly positive curvature; the existence of a resonance-free strip then followed from Vodev's result in \cite{Vo:99}.
The bound in \cite{CaPoVo:99} is proved under the assumptions $n_i<1$ and $A_N>0$ and
the dependence of the constant $C_0$ on $n_i$ and $A_N$ is not given.
The conditions $n_i<1$ and $A_N>0$ are less restrictive than our condition \eqref{eq:cond}, which in this situation is $n_i \leq 1/A_N\le 1$
(see Remark \ref{rem:unnatural} below for how this condition appears in our proof).
The particular case when $\Oi$ is a ball shows that a strip is the largest region one can prove is free of resonances in the case $n_i<1$ and $A_N>0$.
This was known in \cite{PoVo:99a}, but the recent results of Galkowski \cite{Ga:15} in the case when $\Oi$ is $C^\infty$ with strictly positive curvature include bounds on the width of the resonance-free strip in terms of appropriate averages of the reflectivity and chord lengths of the billiard ball trajectories in $\Oi$ \cite[Theorem 1]{Ga:15}, and these bounds are sharp when $\Oi$ is a ball \cite[\S12]{Ga:15}.
Furthermore, these results (which build on earlier work by Cardoso, Popov, and Vodev\cite{CaPoVo:01}) show that if ${n_i} <1$ and $\sqrt{{n_i}}<1/A_N$, then the resonances themselves lie in a strip (i.e.~there exist $C_1, C_2>0$ such that the resonances $k$ satisfy $-C_2 \leq \Im k \leq -C_1$).
Popov and Vodev \cite{PoVo:99} showed that when $n_i>1$ and $A_N>0$ there exists a sequence of resonances tending to the real-axis and one has super-algebraic growth of $\|{R_{\chi}}(k)\|_{L^2\rightarrow L^2}$ through a sequence of complex wavenumbers with super-algebraically small imaginary parts; we recap this result in more detail in \S\ref{sec:PoVo}. Exponential growth in $k$ is the fastest growth possible by the results of Bellassoued \cite{Be:03}. Indeed, he proved that for any $n_i>0$ and any $A_N>0$ there exist $\widetilde{C_j}$, $j=1,\ldots,5$, such that
\beqs
\N{{R_{\chi}}(k)}_{L^2\rightarrow L^2} \leq \widetilde{C}_1 \exp(\widetilde{C}_2 |\Re k|)
\eeqs
in the region
\beqs
\Re k\geq \widetilde{C}_3, \quad \Im k \geq - \widetilde{C}_4 \exp(-\widetilde{C}_5 |\Re k|),
\eeqs
implying there is always an exponentially-small region free of resonances.
\bre\mythmname{Transmission problems when the obstacle is a 2- or 3-d ball}\label{rem:Capdeboscq}
When ${\Omega_i}$ is a ball, the solution of BVP \eqref{eq:BVP} can be written explicitly using separation of variables and expansions in Fourier--Bessel functions.
Capdeboscq and co-authors considered this problem for BVP \eqref{eq:BVP2} with ${A_N}=1$ for $d=2$ in \cite{Cap12} and for $d=3$ in \cite{CLP12}, with the main results summarised in \cite[Chapter~5]{AC16}.
These results differ from the resolvent estimates discussed above, since they involve Sobolev norms of arbitrary order on spherical surfaces in $D_R$ (hence the radial derivative term in the $H^1(D_R)$ norm is not directly controlled, nor the $H^1({\Omega_i})$ norm).
Some of these results describe in detail the behaviour of the solution when ${n_i}>1$, including the super-algebraic growth of the solution operator through a sequence of real wavenumbers, and we recap them in \S\ref{sec:blowup} below.
\ere
\bre\mythmname{Truncated transmission problems}
\label{rem:trunc}
When solving scattering problems on unbounded domains numerically, it is common to truncate the domain and impose a boundary condition to approximate the Sommerfeld radiation condition; the simplest such boundary condition is the impedance condition ${\partial_\bn} u - \ri k u=0$ (see, e.g., the discussion in \cite[\S5.1]{BaSpWu:16} and the references therein).
The truncated transmission problem is therefore equivalent to the interior impedance problem with piecewise-constant wavenumber.
The paper \cite{CaVo:10} contains the analogue of the results in \cite{CaPoVo:99} for the truncated transmission problem (and in particular the bound \eqref{eq:res2} above).
Wavenumber-explicit bounds on this BVP have recently been obtained in \cite{Ch:16, BaChGo:16, SaTo:17, GrSa:18, GrPeSp:18} for real $n_i$ and \cite{OhVe:16} for complex $n_i$.
Apart from \cite{SaTo:17}, these recent investigations all use Morawetz identities (either explicitly or implicitly), with the impedance boundary condition dealt with as described in Remark \ref{rem:impbc} below (again, either explicitly or implicitly).
The investigation \cite{SaTo:17} concerns the interior impedance problem in 1-d with piecewise-constant wavenumber, and uses
the fact that, in this case, the Green's function can be expressed in terms of the solution of a linear system.
\ere
\bre\mythmname{Transmission problems not involving a bounded obstacle}\label{rem:rough}
Identities related to those of Morawetz have also been used to prove results about (i) scattering by rough surfaces when the wave-number is piecewise constant and (ii) the transmission problem through an infinite penetrable layer (where in both cases the wavenumbers satisfy appropriate analogues of \eqref{eq:cond}).
For (i) see \cite{ChZh:98, ZhCh:98, ZhCh:98a,HuLiQuZh:15}, and \cite[Chapter 2]{Th:06}
(these works consider more general classes of wave-number that include piecewise-constant cases), and for (ii) see \cite{ChZh:99, Fo:05,LeRi:10}, and \cite[Chapter 4]{Th:06}.
The identities used are essentially \eqref{eq:morid1} below with $\beta=0$ and the vector field $\bx$ replaced by a vector field perpendicular to the surface/layer.
\ere
\bre\mythmname{Transmission problems when $\Im {n_i}>0$}
\label{rem:complex}
Remark \ref{rem:complex2} below shows how analogues of Theorems \ref{thm:FirstBound}--\ref{thm:nonzero} hold when $n_i \in \Com$ with $0<\Im n_i\leq \delta/k$ and $\delta$ is a sufficiently small constant (the occurrences of $n_i$ in the conditions \eqref{eq:cond}, \eqref{eq:condTracea}, and \eqref{eq:gDgNcond} are then replaced by $\Re n_i$).
This condition on the imaginary part is similar to that in \cite{NV12}, with this paper considering
the Helmholtz transmission problem when $\Oi$ is the union of two concentric balls, modelling an inhomogeneity (with $n_i$ real) surrounded by an absorbing layer (with $n_i$ complex and $\Im n_i$ proportional to $1/k$).
Like the works \cite{AC16, Cap12, CLP12} discussed above, \cite{NV12} is interested in bounding the solution away from $\Oi$, but instead of using separation of variables, \cite{NV12} uses Morawetz identities to prove its bounds.
The paper \cite{HPV07} proves bounds analogous to those in \cite{AC16, Cap12, CLP12} in the case when $\Oi$ is a ball and $\Im n_i>0$, again using separation of variables and bounds on Bessel and Hankel functions.
\ere
\section{Morawetz identities}\label{sec:Morawetz}
In this section we prove the identities that are the basis of the proofs of Theorems \ref{thm:FirstBound} and \ref{thm:nonzero}. The history of these identities is briefly discussed in Remark \ref{rem:biblio} below.
\ble\mythmname{Morawetz-type identity}\label{le:morid1}
Let $D\subset \Rea^d$, $d\geq 2$. Let $v\in C^2(D)$ and let $a,n,\alpha,$ $\beta\in\Rea$.
Let
\beqs
{\mathcal L}_{a,n} v:= a\Delta v + k^2 n\, v,
\eeqs
and let
\beq\label{eq:cM}
{\mathcal M} v:= \bx\cdot \nabla v - \ri k \beta v + \alpha v.
\eeq
Then
\begin{align}\nonumber
2 \Re \big\{\overline{{\mathcal M} v } \,{\mathcal L}_{a,n} v \big\} = &\, \nabla \cdot \bigg[ 2 \Re\big\{\overline{{\mathcal M} v}\, a \nabla v\big\} + \bx\big(k^2n |v|^2 -a|\nabla v|^2\big)\bigg] \\
&\hspace{10ex}
- (2\alpha -d +2) a|\nabla v|^2 -(d-2\alpha)nk^2 |v|^2.
\label{eq:morid1}
\end{align}
\ele
\bpf
This follows from expanding the divergence on the right-hand side of \eqref{eq:morid1}. Note that the identity \eqref{eq:morid1} is a special case of both \cite[Lemma 2.1]{SpKaSm:15} (where the multiplier ${\mathcal M} u$ is generalised) and
\cite{GrPeSp:18} (where the operator ${\mathcal L}_{a,n}$ is generalised).
\epf
The proofs of the main results are based on integrating the identity \eqref{eq:morid1} over ${\Omega_i}$ and $D_R(:= {\Omega_o}\cap B_R)$ and using the divergence theorem. Our next result, therefore, is an integrated version of \eqref{eq:morid1}. To state this result it is convenient to define the space
\beq\label{eq:V}
V(D):= \bigg\{
v\in H^1(D) :\; \Delta v\in L^2(D), \;{\partial_\bn}u v \in L^2(\partial D),\; v \in H^1(\partial D)
\bigg\},
\eeq
where $D$ is a bounded Lipschitz open set with outward-pointing unit normal vector $\bnu$.
\begin{lemma}
\mythmname{Integrated form of the Morawetz identity \eqref{eq:morid1}}
\label{lem:morid1int}
Let $D$ be a bounded Lipschitz open set, with boundary $\partial D$ and outward-pointing unit normal vector $\bnu$.
If $v \in V(D)$, $a,n,\alpha,\beta\in\Rea$, then
\begin{align}\nonumber
&\int_D 2 \Re \big\{\overline{{\mathcal M} v } \,{\mathcal L}_{a,n} v \big\}
+ (2\alpha -d +2) a|\nabla v|^2 +(d-2\alpha) n k^2 |v|^2
\\
&=\int_{\partial D}(\bx\cdot\bnu)\left(a \left|{\partial_\bn}u v\right|^2 -a|\nabla_T v|^2 + k^2 n |v|^2\right)
+ 2\Re\Big\{\big( \bx\cdot\overline{\nabla_T v}+ \ri k \beta \overline{v} + \alpha \overline{v}\big)a {\partial_\bn}u v\Big\}.
\label{eq:morid1int}
\end{align}
\ele
\begin{proof}
If $v\in C^\infty(\conj D)$, then \eqref{eq:morid1int} follows from divergence theorem
$\int_D \nabla \cdot \bF = \int_{\partial D} \bF \cdot \bnu$.
By \cite[Lemmas 2 and 3]{CoD98}, $C^\infty(\conj D)$ is dense in $V(D)$ and the result then follows
since \eqref{eq:morid1int} is continuous in $v$ with respect to the topology of $V(D)$.
\epf
The proofs of the main results use different multipliers in different domains. More precisely, we use
\begin{align*}
A_D A_N \bigg(& \bx\cdot \nabla u -\ri kR \sqrt{\frac{n_o}{a_o}} u + \frac{d-1}{2}u\bigg) \quad\text{ in } {\Omega_i},\\
&\bx\cdot \nabla u -\ri kR \sqrt{\frac{n_o}{a_o}} u + \frac{d-1}{2}u\,\,\,\,\quad\text{ in } D_R,\text{ and }\\
&\bx\cdot \nabla u -\ri kr \sqrt{\frac{n_o}{a_o}} u + \frac{d-1}{2}u\qquad\text{ in } \Rea^d\setminus D_R,
\end{align*}
where $R$ is the radius of the ball in which we bound the solution, and $r:=|\bx|$.
The first two of these three multipliers are multiples of ${\mathcal M} u$, as defined in \eqref{eq:cM}, with $\alpha=\frac{d-1}2$ and $\beta=R\sqrt{{n_o}/{a_o}}$; the third one is slightly different in that the coefficient $\beta=r\sqrt{{n_o}/{a_o}}$ depends on the position vector. Therefore, the identity arising from this last multiplier is not covered by Lemma \ref{le:morid1} but is given in the following lemma.
\begin{lemma}
\mythmname{Morawetz--Ludwig identity, \cite[Equation 1.2]{MoLu:68}}
\label{le:ML}
Let $v \in C^2(D)$ for some $D\subset \Rea^d$, $d\geq 2$.
Let $\kappa\in\IR$, ${\mathcal L} v:=(\Delta +\kappa^2)v$ and let
\beq\label{Malpha}
{\mathcal M}_\alpha v := r\left(v_r -\ri \kappa v + \frac{\alpha}{r}v\right),
\eeq
where $\alpha \in \Rea$ and $v_r=\bx\cdot \nabla v/r$. Then
\bal\nonumber
2\Re\{ \overline{{\mathcal M}_\alpha v} {\mathcal L} v\} =
&\,\nabla \cdot \bigg[2\Re \left\{\overline{{\mathcal M}_{\alpha} v} \nabla v\right\}
+ \left(\kappa^2|v|^2 - |\nabla v|^2 \right)\bx\bigg] \\&+ \big(2\alpha -(d-1)\big)\big(\kappa^2 |v|^2 - |\nabla v|^2\big) - \big(|\nabla v|^2 -|v_r|^2\big)- \big| v_r -\ri \kappa v\big|^2.
\label{eq:ml2d}
\end{align}
\end{lemma}
The Morawetz--Ludwig identity \eqref{eq:ml2d} is a variant of the identity \eqref{eq:morid1} with $a=1$, $n=1$, $k=\kappa$, and $\beta=r$ (instead of being a constant); for a proof, see \cite{MoLu:68}, \cite[Proof of Lemma~2.2]{SpChGrSm:11}, or \cite[Proof of Lemma 2.3]{SpKaSm:15}.
As stated above, we use the Morawetz--Ludwig identity in $\Rea^d\setminus B_R$ (it turns out that this identity ``takes care'' of the contribution from infinity). It is convenient to encode the application of this identity in $\Rea^d\setminus B_R$ in the following lemma (slightly more general versions of which appear in \cite[Lemma 2.1]{CWM08} and
\cite{GrPeSp:18}).
\ble\mythmname{Inequality on $\Gamma_R$ used to deal with the contribution from infinity} \label{lem:2.1}
Let $u$ be a solution of the homogeneous Helmholtz equation ${\mathcal L} u=0$ in $\Rea^d\setminus \overline{B_{R_0}}$ (with $d\geq 2$), for some $R_0>0$, satisfying the Sommerfeld radiation condition.
Then, for $R>R_0$,
\beq\label{eq:2.1}
\int_{\Gamma_{R}} R\left( \left|\pdiff{u}{r}\right|^2 - |\nabla_{T} u|^2 + \kappa^2 |u|^2\right) - 2 \kappa R\, \Im \int_{\Gamma_R} \bar{u} \pdiff{u}{r} + (d-1)\Re \int_{\Gamma_R}\bar{u}\pdiff{u}{r} \leq 0,
\eeq
where $\nabla_{T}$ is the tangential gradient on $r=R$ (recall that this is such that $\nabla v = \nabla_T v + \widehat{\bx}v_r$ on $r=R$).
\ele
\bpf
We now integrate \eqref{eq:ml2d} with $v=u$ and $2\alpha =d-1$ over $B_{R_1}\setminus B_R$, use the divergence theorem, and then let $R_1\rightarrow \infty$
(note that using the divergence theorem is allowed since $u$ is $C^\infty$ by elliptic regularity).
Writing the identity \eqref{eq:ml2d} as $\nabla\cdot\bQ(v)=P(v)$, we have that
if $u$ is a solution of ${\mathcal L} u=0$ in $\Rea^d\setminus \overline{B_{R_1}}$ satisfying the Sommerfeld radiation condition \eqref{eq:src}, then
\beqs
\int_{\Gamma_{R_1}} \bQ(u) \cdot\widehat{\bx}
\rightarrow 0 \quad \text{ as } R_1\rightarrow \infty
\eeqs
(independent of the value of $\alpha$ in the multiplier ${\mathcal M}_\alpha u$); see \cite[Proof of Lemma 5]{MoLu:68}, \cite[Lemma~2.4]{SpChGrSm:11}.
Note that, although the set-up of \cite{SpChGrSm:11} is for $d=2, 3$, the proof of \cite[Lemma~2.4]{SpChGrSm:11} holds for $d\geq 2$.
Then, using the decomposition $\nabla v = \nabla_T v + \widehat{\bx}v_r$ on the integral over $\Gamma_R$ (or equivalently the right-hand side of \eqref{eq:morid1int}),
we obtain that
\begin{align*}
\int_{\Gamma_R} \bQ(u) \cdot\widehat{\bx} &=
\int_{\Gamma_R} R\left( \left|\pdiff{u}{r}\right|^2 - |\nabla_T u|^2+ \kappa^2 |u|^2\right) - 2 \kappa R\, \Im \int_{\Gamma_R} \bar{u} \pdiff{u}{r} + (d-1)\Re \int_{\Gamma_R}\bar{u}\pdiff{u}{r} \nonumber \\
& = -\int_{\Rea^d\setminus B_R}\left(\big(|\nabla u|^2 -|u_r|^2\big)+ \left|
u_r - \ri \kappa u \right|^2\right)\leq 0.
\end{align*}
\epf
\bre\mythmname{Far-field impedance boundary condition}\label{rem:impbc}
If the infinite domain ${\Omega_o}$ is truncated, and the radiation condition approximated by an impedance boundary condition, then
an analogous inequality to that in Lemma \ref{lem:2.1} holds; see \cite[Lemma~4.6]{GrPeSp:18}.
This analogous inequality allows one to extend the results of Theorems \ref{thm:FirstBound} and \ref{thm:nonzero} to this truncated BVP (as mentioned in Remark \ref{rem:extensions} above); see \cite{GrPeSp:18} for more details.
\ere
\bre\mythmname{Bibliographic remarks}\label{rem:biblio}
The multiplier $\bx \cdot \nabla v$ was introduced by Rellich in \cite{Re:40}, and has been well-used since then in the study of the Laplace, Helmholtz, and other elliptic equations, see, e.g., the references in \cite[\S5.3]{CGLS12}, \cite[\S1.4]{MOS12}.
The idea of using a multiplier that is a linear combination of derivatives of $v$ and $v$ itself, such as ${\mathcal M} v$,
is attributed by Morawetz in \cite{Mo:61} to Friedrichs. The multiplier ${\mathcal M}_\alpha v$ \eqref{Malpha} for the Helmholtz equation was introduced by Morawetz and Ludwig in \cite{MoLu:68} and the multiplier ${\mathcal M} v$ \eqref{eq:cM} (with $\bx$ replaced by a general vector field and $\alpha$ and $\beta$ replaced by general scalar fields) is implicit in Morawetz's paper \cite{Mo:75} (for more discussion on this, see \cite[Remark 2.7]{SpKaSm:15}).
\ere
\section{Proofs of the main results}\label{sec:proofs}
\bpf[Proof of Theorem \ref{thm:FirstBound}]
We use the integrated Morawetz identity \eqref{eq:morid1int} with first $D=\Oi$ and then $D=D_R$. In both cases we take $2\alpha=d-1$ and use the same (as yet unspecified) $\beta$; in the first case we take
$v=u_i$, $a=a_i, n=n_i$, and in the second case we take $v=u_o$, $a=a_o, n=n_o$.
Using \eqref{eq:morid1int} is justified since the regularity result in Lemma \ref{lem:exist} shows that $u_i \in V(\Oi)$ and $u_o\in V(D_R)$.
We get
\begin{align}\nonumber
&\int_{\Oi} a_i|\nabla u_i|^2 + n_i k^2 |u_i|^2 \\
&\qquad=- 2 \Re \int_{\Oi} \overline{{\mathcal M} u_i }\, f_i
+\int_{\Gamma}(\bx\cdot\bn)\left(a_i \left|{\partial_\bn} u_i\right|^2 -a_i|\nabla_T u_i|^2 + k^2 n_i |u_i|^2\right)
\nonumber\\&\hspace{43mm}
+ 2\Re\left\{\left( \bx\cdot\overline{\nabla_T u_i}+ \ri k \beta\overline{u_i} + \frac{d-1}{2} \overline{u_i}\right)a_i {\partial_\bn} u_i\right\}\label{eq:1}
\end{align}
and
\begin{align}\nonumber
&\int_{D_R} a_o|\nabla u_o|^2 + n_o k^2 |u_o|^2 \\
&=- 2 \Re \int_{D_R} \overline{{\mathcal M} u_o}\, f_o
-\int_{\Gamma}(\bx\cdot\bn)\left(a_o \left|{\partial_\bn} u_o\right|^2 -a_o|\nabla_T u_o|^2 + k^2 n_o|u_o|^2\right)
\nonumber\\&\hspace{43mm}
+ 2\Re\left\{\left( \bx\cdot\overline{\nabla_T u_o}+ \ri k \beta\overline{u_o} + \frac{d-1}{2} \overline{u_o}\right)a_o{\partial_\bn} u_o\right\}
\nonumber\\
&\hspace{3mm}+\int_{\Gamma_R} R \left( a_o \left|\pdiff{u_o}{r}\right|^2 - a_o |\nabla_T u_o|^2 + k^2 n_o |u_o|^2 \right)
\nonumber\\&\hspace{3mm}
- 2a_o k \beta \Im \int_{\Gamma_R} \overline{u_o}\pdiff{u_o}{r} + a_o(d-1) \Re \int_{\Gamma_R} \overline{u_o}\pdiff{u_o}{r}.\label{eq:2}
\end{align}
Multiplying the inequality \eqref{eq:2.1} by $a_o$ and letting $\kappa= k \sqrt{n_o/a_o}$, we see that if we choose $\beta= R\sqrt{n_o/a_o}$ then the terms on $\Gamma_R$ on the right-hand side of \eqref{eq:2} are non-positive.
We then multiply \eqref{eq:2} by an arbitrary $\eta>0$ and add to \eqref{eq:1} to get
\begin{align}\nonumber
&\int_{\Omega_i}(a_i|\nabla u_i|^2+k^2n_i|u_i|^2)
+\eta\int_{D_R}(a_o|\nabla u_o|^2+k^2n_o|u_o|^2)\\ \nonumber
&\le- 2 \Re \int_{\Oi} \overline{{\mathcal M} u_i }\, f_i- 2\eta \Re \int_{D_R} \overline{{\mathcal M} u_o}\, f_o\\
& + \int_\Gamma (\bx\cdot\bn)\left(a_i \left|{\partial_\bn} u_i\right|^2-\eta a_o \left|{\partial_\bn} u_o\right|^2
-a_i|\nabla_T u_i|^2 +\eta a_o|\nabla_T u_o|^2
+ k^2 n_i|u_i|^2-\eta k^2 n_o|u_o|^2\right)\nonumber\\
&\hspace{10mm}+ 2\Re\left\{\left( \bx\cdot\overline{\nabla_T u_i}+ \ri k \sqrt{\frac{n_o}{a_o}}R\overline{u_i} + \frac{d-1}{2} \overline{u_i}\right)a_i{\partial_\bn} u_i\right\}\nonumber\\
&\hspace{10mm}-2 \eta \Re\left\{\left( \bx\cdot\overline{\nabla_T u_o}+ \ri k \sqrt{\frac{n_o}{a_o}}R\overline{u_o} + \frac{d-1}{2} \overline{u_o}\right)a_o{\partial_\bn} u_o\right\}.
\label{eq:3}
\end{align}
The volume terms at the right-hand side are bounded above by
\begin{align}\nonumber
&\N{f_i}_{\Omega_i}\left( 2 \diam(\Omega_i) \N{\nabla u_i}_{\Omega_i} + \left(2 k \sqrt{\frac{n_o}{a_o}}R + d-1\right)\N{u_i}_{\Omega_i}\right)\\
&\qquad+\eta\N{f_o}_{D_R}\left(2R\N{\nabla u_o}_{D_R} + \left(2 k \sqrt{\frac{n_o}{a_o}}R + d-1\right)\N{u_o}_{\Omega_o}\right)\label{eq:4}
\end{align}
(recall that $\|\cdot\|_{\Omega_{i/o}}$ denotes the $L^2$ norm on $\Omega_{i/o}$).
We now focus on the terms on $\Gamma$, and recall that we are assuming that $g_D=g_N=0$.
Our goal is to choose $\eta$ so that the terms without a sign (i.e.~those on the last two lines of \eqref{eq:3}) cancel. Using the transmission conditions in \eqref{eq:BVP}, we see that this cancellation occurs if $\eta = 1/(A_D A_N)$. (It is at this point that we need $A_D$ and $A_N$ to be real; indeed, if the product $A_D A_N$ has non-zero real and imaginary parts, we cannot chose even a complex $\eta$ to cancel these terms).
Making this choice of $\eta$ and using the transmission conditions, we see that
the remaining terms on $\Gamma$ become
\beq
\int_\Gamma (\bx\cdot\bn) \left(
k^2 n_i |u_i|^2 \left (1 - \frac{A_D}{A_N}\frac{n_o}{n_i}\right)+
a_i |{\partial_\bn} u_i|^2 \left (1 - \frac{A_N}{A_D}\frac{a_i}{a_o}\right)
+a_i |\nabla_T u_i |^2
\left (-1 +\frac{A_D}{A_N}\frac{a_o}{a_i}\right)
\right).
\label{eq:GammaTerm}
\eeq
These terms are negative and thus can be neglected if
\beqs
A_N n_i \leq A_D n_o\quad\text{ and } \quad A_N a_i \geq A_D a_o,
\eeqs
or equivalently if \eqref{eq:cond} holds.
In summary, under the conditions \eqref{eq:cond}, we have that
\beqs
a_i\N{\nabla u_i}_{{\Omega_i}}^2+k^2n_i\N{u_i}^2_{{\Omega_i}}
+\frac1{A_D A_N}\left(a_o\N{\nabla u_o}_{D_R}^2+k^2n_o\N{u_o}^2_{D_R}\right)
\eeqs
is bounded by \eqref{eq:4}.
Using the Cauchy--Schwarz and Young inequalities we obtain the assertion of Theorem~\ref{thm:FirstBound}.
\epf
\bre\label{rem:unnatural}\mythmname{The origin of the condition \eqref{eq:cond}}
Condition \eqref{eq:cond} comes from requiring that each of the terms in \eqref{eq:GammaTerm} are non-positive. These terms are not independent, however, since they all depend on $u_i$. Despite this connection, we have not been able to lessen the requirements of \eqref{eq:cond} using only these elementary arguments, other than in Proposition \ref{thm:BoundViaTrace2} below where the term on $\Gamma$ involving $|u_i|^2$ is controlled by the norms of $u_i$ in ${\Omega_i}$ via a trace inequality.
\ere
\bpf[Proof of Theorem \ref{thm:nonzero}]
In the proof of Theorem~\ref{thm:FirstBound}, the assumption $g_D=g_N=0$ was used to derive
\eqref{eq:GammaTerm} from \eqref{eq:3}.
Now that $g_D$ and $g_N$ are not necessarily zero, we expand the terms on $\Gamma$ appearing in \eqref{eq:3} using the transmission conditions with $g_D,g_N\ne0$ and the fact that $\eta=1/{A_D}{A_N}$.
We control these terms on $\Gamma$ using $|\bx|\le\diam({\Omega_i})$ and $-\bx\cdot\bn\le-\gamma\diam({\Omega_i})$ for a.e.\ $\bx\in\Gamma$, and $({a_i}{A_N}-{a_o}{A_D})>0$ and $({n_o}{A_D}-{n_i}{A_N})>0$ from assumption~\eqref{eq:gDgNcond}.
We apply the weighted Young's inequality to nine terms, denoted $T_1,\ldots,T_9$, using positive coefficients $\xi_1,\ldots,\xi_9$:
\begin{align*}
&\int_\Gamma(\bx\cdot\bn)\bigg(a_i \left|{\partial_\bn} u_i\right|^2 -a_i|\nabla_T u_i|^2
+ k^2 n_i|u_i|^2
\\
&-\frac1{{a_o}{A_D}{A_N}} \left|{A_N} {a_i}{\partial_\bn} u_i+g_N\right|^2
+ \frac{a_o}{{A_D}{A_N}}|{A_D}\nabla_T u_i+\nabla_T{g_D}|^2
- k^2 \frac{n_o}{{A_D}{A_N}}|{A_D} u_i+{g_D}|^2\bigg)\\
&+ 2\Re\left\{\left( \bx\cdot\overline{\nabla_T u_i}+ \ri k \sqrt{\frac{n_o}{a_o}}R\overline{u_i} + \frac{d-1}{2} \overline{u_i}\right)a_i{\partial_\bn} u_i\right\}
\\
&-2 \Re\left\{\left( \bx\cdot\Big(\overline{\nabla_T u_i+\frac1{A_D}\nabla_T{g_D}}\Big)
+\Big( \ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big)\overline{\Big(u_i+\frac1{A_D}{g_D}\Big)} \right)
\Big(a_i{\partial_\bn} u_i+\frac1{A_N} g_N\Big)\right\}
\hspace{-5mm}
\\
=&\int_\Gamma
(\bx\cdot\bn)\bigg({a_i}\Big(1-\frac{{a_i}{A_N}}{{a_o}{A_D}}\Big) |{\partial_\bn} u_i|^2
+{a_i}\Big(\frac{{a_o}{A_D}}{{a_i}{A_N}}-1\Big)|\nabla_T u_i|^2
+k^2{n_i}\Big(1-\frac{n_o{A_D}}{{n_i}{A_N}}\Big)|u_i|^2
\\&
-\frac1{{a_o}{A_D}{A_N}} |g_N|^2
+\frac{a_o}{{A_D}{A_N}}|\nabla_T{g_D}|^2
-k^2 \frac{n_o}{{A_D}{A_N}}|{g_D}|^2
\\&
-2 \frac{a_i}{{a_o}{A_D}}\Re\underbrace{\{{\partial_\bn} u_i \conj{g_N}\}}_{T_1}
+2\frac{a_o}{A_N}\Re\underbrace{\{\nabla_T u_i\cdot\nabla_T\conj{g_D}\}}_{T_2}
-2k^2\frac{n_o}{{A_N}}\Re\underbrace{\{u_i\conj{g_D}\}}_{T_3}\bigg)
\\&
-2\frac1{{A_N}}\Re\underbrace{\{\bx\cdot\nabla_T \conj{u_i}g_N\}}_{T_4}
-2\frac1{{A_N}}\Re\underbrace{\Big\{\Big( \ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big)\conj u_ig_N\Big\}}_{T_5}
\\&
-2\frac{{a_i}}{A_D}\Re\underbrace{\left\{\bx\cdot\nabla_T\conj{{g_D}} {\partial_\bn} u_i\right\}}_{T_6}
-2\frac{{a_i}}{A_D}\Re\underbrace{\left\{\Big( \ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big)\overline{{g_D}}
{\partial_\bn} u_i\right\}}_{T_7}
\\&
-2\frac1{{A_N}{A_D}}\Re\underbrace{\left\{\bx\cdot\nabla_T\conj{{g_D}} g_N\right\}}_{T_8}
-2\frac1{{A_N}{A_D}}\Re\underbrace{\left\{\Big( \ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big)\overline{{g_D}}
g_N\right\}}_{T_9}
\\
\le&
-\gamma\diam({\Omega_i})\bigg({a_i}\frac{{a_i}{A_N}-{a_o}{A_D}}{{a_o}{A_D}}\N{{\partial_\bn} u_i}_\Gamma^2
\\&\hspace{20mm}
+\frac{{a_i}{A_N}-{a_o}{A_D}}{{A_N}}\N{\nabla_T u_i}_\Gamma^2
+k^2\frac{{n_o}{A_D}-{n_i}{A_N}}{{A_N}}\N{u_i}_\Gamma^2\!\bigg)
\\
&-\gamma\diam({\Omega_i})\frac1{{a_o}{A_D}{A_N}} \N{g_N}_\Gamma^2
+\diam({\Omega_i})\frac{a_o}{{A_D}{A_N}}\N{\nabla_T{g_D}}_\Gamma^2
-k^2 \gamma\diam({\Omega_i})\frac{n_o}{{A_D}{A_N}}\N{{g_D}}_\Gamma^2
\\
&+\xi_1{a_i}\gamma\diam({\Omega_i})\frac{{a_i}{A_N}-{a_o}{A_D}}{{a_o}{A_D}}\N{{\partial_\bn} u_i}_\Gamma^2
+\frac{{a_i}\diam({\Omega_i})}{\xi_1\gamma{a_o}{A_D}({a_i}{A_N}-{a_o}{A_D})}\N{g_N}_\Gamma^2
\\
&+\xi_2\gamma\diam({\Omega_i})\frac{{a_i}{A_N}-{a_o}{A_D}}{{A_N}}\N{\nabla_T u_i}_\Gamma^2
+\frac{\diam({\Omega_i}){a_o}^2}{\xi_2\gamma{A_N}({a_i}{A_N}-{a_o}{A_D})}\N{\nabla_T{g_D}}_\Gamma^2
\\
&+\xi_3k^2\gamma\diam({\Omega_i})\frac{{n_o}{A_D}-{n_i}{A_N}}{{A_N}}\N{u_i}_\Gamma^2
+\frac{k^2\diam({\Omega_i})n_o^2}{\xi_3\gamma{A_N}({n_o}{A_D}-{n_i}{A_N})}\N{{g_D}}_\Gamma^2
\\
&+\xi_4\gamma\diam({\Omega_i})\frac{{a_i}{A_N}-{a_o}{A_D}}{{A_N}}\N{\nabla_T u_i}_\Gamma^2
+\frac{\diam({\Omega_i})}{\xi_4\gamma{A_N}({a_i}{A_N}-{a_o}{A_D})}\N{g_N}_\Gamma^2
\\
&+\xi_5k^2\gamma\diam({\Omega_i})\frac{{n_o}{A_D}-{n_i}{A_N}}{{A_N}}\N{u_i}_\Gamma^2
+\frac{\Big|\ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big|^2}{\xi_5\gamma\diam({\Omega_i}) k^2{A_N}({n_o}{A_D}-{n_i}{A_N})}
\N{g_N}_\Gamma^2
\\
&+\xi_6{a_i}\gamma\diam({\Omega_i})\frac{{a_i}{A_N}-{a_o}{A_D}}{{a_o}{A_D}}\N{{\partial_\bn} u_i}_\Gamma^2
+\frac{{a_i}{a_o}\diam({\Omega_i})}{\xi_6\gamma{A_D}({a_i}{A_N}-{a_o}{A_D})}\N{\nabla_T{g_D}}_\Gamma^2
\\
&+\xi_7{a_i}\gamma\diam({\Omega_i})\frac{{a_i}{A_N}-{a_o}{A_D}}{{a_o}{A_D}}\N{{\partial_\bn} u_i}_\Gamma^2
+\frac{{a_i}{a_o}\Big|\ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big|^2}{\xi_7\gamma{A_D}\diam({\Omega_i})({a_i}{A_N}-{a_o}{A_D})}\N{{g_D}}_\Gamma^2
\\
&+\xi_8\frac{\diam({\Omega_i})}{{a_o}{A_N}{A_D}}\N{g_N}_\Gamma^2
+\frac{\diam({\Omega_i}){a_o}}{\xi_8{A_N}{A_D}}\N{\nabla_T{g_D}}_\Gamma^2
\\&
+\xi_9\frac{\diam({\Omega_i})}{{a_o}{A_N}{A_D}} \N{g_N}_\Gamma^2
+\frac{{a_o}\Big|\ri k \sqrt{\frac{n_o}{a_o}}R+\frac{d-1}{2}\Big|^2}
{\xi_9\diam({\Omega_i}){A_D}{A_N}} \N{{g_D}}_\Gamma^2.
\end{align*}
We choose the weights as
$$\xi_1=\xi_6=\xi_7=\frac13,\quad \xi_2=\xi_3=\xi_4=\xi_5=\frac12,\quad \xi_8=\xi_9=1,$$
so that all terms containing $u_i$ cancel each other, and we are left with
\begin{align*}
&\frac{\diam({\Omega_i})}{{A_N}}\bigg[
-\frac{\gamma{n_o}}{A_D}+\frac{2n_o^2}{\gamma({n_o}{A_D}-{n_i}{A_N})}
\\&\hspace{20mm}
+\frac{1}{{A_D}\diam({\Omega_i})^2}\Big(1+\frac{3{a_i}{A_N}}{\gamma({a_i}{A_N}-{a_o}{A_D})}\Big)
\Big({n_o} R^2+\frac{{a_o}(d-1)^2}{4k^2}\Big)
\bigg]k^2\N{{g_D}}_\Gamma^2
\\
&+\frac{\diam({\Omega_i}){a_o}}{{A_D}{A_N}}\bigg[
2+\frac{2{a_o}{A_D}+3{a_i}{A_N}}{\gamma({a_i}{A_N}-{a_o}{A_D})}\bigg]\N{\nabla_T{g_D}}_\Gamma^2
\\
&+\frac{\diam({\Omega_i})}{{a_o}{A_N}{A_D}}\bigg[
2-\gamma+\frac{3{a_i}{A_N}+2{a_o}{A_D}}{\gamma({a_i}{A_N}-{a_o}{A_D})}
+\frac{2{A_D}\Big({n_o} R^2+\frac{{a_o}(d-1)^2}{4k^2}\Big)}{\gamma\diam({\Omega_i})^2({n_o}{A_D}-{n_i}{A_N})}
\bigg]\N{g_N}_\Gamma^2
\\
\le&
\bigg[
\frac{2\diam({\Omega_i})n_o^2}{\gamma{A_N}({n_o}{A_D}-{n_i}{A_N})}
+\frac{(3+\gamma){a_i}\Big({n_o} R^2+\frac{{a_o}(d-1)^2}{4k^2}\Big)}{{A_D}\gamma\diam({\Omega_i})({a_i}{A_N}-{a_o}{A_D})}
\bigg]k^2\N{{g_D}}_\Gamma^2
\\
&+\frac{\diam({\Omega_i}){a_o}\big((3+2\gamma) {a_i} {A_N} + 2 {a_o}{A_D}\big)}{{A_D}{A_N}\gamma({a_i}{A_N}-{a_o}{A_D})}\N{\nabla_T{g_D}}_\Gamma^2
\\
&+\frac{1}{\gamma{a_o}{A_N}{A_D}}\bigg[
\frac{\diam({\Omega_i})(4{a_i}{A_N}+2{a_o}{A_D})}{{a_i}{A_N}-{a_o}{A_D}}
+\frac{2{A_D}\Big({n_o} R^2+\frac{{a_o}(d-1)^2}{4k^2}\Big)}{\diam({\Omega_i})({n_o}{A_D}-{n_i}{A_N})}
\bigg]\N{g_N}_\Gamma^2,
\end{align*}
where we used also $0<\gamma\le1/2$ and dropped some negative terms.
Then the bound in the assertion, \eqref{eq:gDgNbound}, follows as in the proof of Theorem \ref{thm:FirstBound}, recalling that the use of Young's inequality for the volume norms gives a further factor of 2 in front of norms on $\Gamma$ in the final bound.
\epf
\bre\label{rem:complex2}\mythmname{Extensions of Theorems \ref{thm:FirstBound}--\ref{thm:nonzero} to the case when $\Im n_i>0$}
We now explain how analogues of Theorems~\ref{thm:FirstBound}--\ref{thm:nonzero} hold when $n_i\in \Com$ with $0<\Im n_i\leq \delta/k$ and $\delta$ is sufficiently small (the occurrences of $n_i$ in the conditions \eqref{eq:cond}, \eqref{eq:condTracea}, and \eqref{eq:gDgNcond} are then replaced by $\Re n_i$).
We first consider Theorem \ref{thm:FirstBound}.
Under the assumption that the existence, uniqueness, and regularity results of Lemma \ref{lem:exist} hold for the boundary value problem with such $n_i$,
Equation \eqref{eq:3} now holds with $n_i$ replaced by $\Re n_i$ and $f_i$ replaced by $f_i -\ri k^2 (\Im n_i) u_i$
We therefore have the extra term
\beqs
-2\Im\int_{{\Omega_i}} \overline{{\mathcal M} u_i} \,k^2 (\Im n_i) u_i
\eeqs
on the right-hand side of \eqref{eq:3}. If $0<\Im n_i\leq \delta/k$ and $\delta$ is sufficiently small, then this term can be absorbed into the weighted $H^1$-norm of $u_i$ on the left-hand side of \eqref{eq:3} (using the Cauchy--Schwarz and weighted Young inequalities). The result is a bound with the same $k$-dependence as \eqref{eq:bound1},
but slightly different constants on the right-hand side. This proof of an a priori bound, under the assumption of existence, implies uniqueness. One can then check that the proof of existence and regularity in Appendix \ref{sec:appA} goes through with $n_i$ of this particular form.
Finally, the extensions to the proof of Theorem \ref{thm:FirstBound} needed to prove Theorem \ref{thm:nonzero} go through as before (since these only involve the terms on $\Gamma$).
\ere
\bpf[Proof of Theorem \ref{thm:res}]
The result \cite[Lemma 2.3]{Vo:99} implies that the assertion of the theorem will hold if
(i) ${R_{\chi}}(k)$ is holomorphic for $\Im k> 0$ and (ii) there exist $C_4>0$ and $k_0>0$ such that
\beq\label{eq:pert1}
\N{{R_{\chi}}(k)}_{{L^2({\Omega_i})\oplus L^2({\Omega_o}) \rightarrow L^2({\Omega_i})\oplus L^2({\Omega_o})}}\leq \frac{C_4}{k} \quad {\widetilde{f}}a k\geq k_0.
\eeq
Note that, in applying Vodev's result, we take Vodev's obstacle $\Omega$ to be the empty set, $N_0=1$, $\Omega_1$ equal to our $\Omega_i$, and $g_{ij}^{(1)}=\delta_{ij}$.
We also note that the set-up in \cite{Vo:99} assumes that $\Oi$ is smooth. Nevertheless, the result \cite[Lemma 2.3]{Vo:99} boils down to a perturbation argument (via Neumann series) and a result about the free resolvent (i.e.~the inverse of the Helmholtz operator in the absence of any obstacle) \cite[Lemma~2.2]{Vo:99}; both of these results are independent of $\Oi$, and so \cite[Lem\-ma~2.3]{Vo:99} is valid when $\Oi$ is Lipschitz.
Since $\Oi$ is star-shaped and the condition \eqref{eq:cond} is satisfied, Theorem \ref{thm:FirstBound} implies that the estimate on the real axis \eqref{eq:pert1} holds, and thus we need only show that
${R_{\chi}}(k)$ is holomorphic on $\Im k> 0$.
Observe that ${R_{\chi}}(k)$ is well-defined for $\Im k\geq 0$ by the existence and uniqueness results of Lemma \ref{lem:exist}.
Analyticity follows by applying the Cauchy--Riemann
operator $\partial/\partial \overline{k}$ to the BVP \eqref{eq:BVP}. Indeed,
by using Green's integral representation in $\Omega_{i/o}$ we find that $\partial (\Delta u)/\partial \overline{k} = \Delta( \partial u/\partial \overline{k})$, and similarly for $\nabla u$. These in turn imply that ${\partial_\bn} (\partial u/\partial \overline{k})= \partial ({\partial_\bn} u)/\partial \overline{k}$. Therefore, applying $\partial/\partial \overline{k}$ to \eqref{eq:BVP}, we find that $\partial u/\partial \overline{k}$ satisfies the Helmholtz transmission problem with zero volume and boundary data, and thus must vanish by the uniqueness result.
\epf
The condition \eqref{eq:cond} in Theorem \ref{thm:FirstBound} implies that ${n_i}/{a_i}\le{n_o}/{a_o}$, namely that the wave\-length $\lambda=(2\pi\sqrt a)/(\sqrt nk)$ of the solution $u$ is larger in the inner domain ${\Omega_i}$ than in ${\Omega_o}$.
In the next proposition we extend the result of Theorem \ref{thm:FirstBound} to a case where this condition is slightly violated.
\begin{proposition}\label{thm:BoundViaTrace2}
Assume that $\Oi$ is star-shaped,
\beq\label{eq:condTracea}
\frac{{n_o}}{{n_i}}
\left(\frac{{n_i}}{{n_o}}- \frac{{A_D}}{{A_N}}\right)
\left(d+ \sqrt{d^2 + \frac{4n_i}{a_i}\big(k\diam({\Omega_i})\big)^2}\right)
<1,
\qquad
\frac{A_D}{A_N}\leq \frac{a_i}{a_o},
\eeq
$g_N=g_D=0$ and $k>0$.
Then the solution of BVP \eqref{eq:BVP} satisfies
\begin{align}
&G\left(a_i\N{\nabla u_i}_{{\Omega_i}}^2+k^2n_i\N{u_i}^2_{{\Omega_i}}\right)
+\frac1{A_D A_N}\left(a_o\N{\nabla u_o}_{D_R}^2+k^2n_o\N{u_o}^2_{D_R}\right)
\nonumber\\
&\hspace{2cm}\le
\bigg[\frac{4\diam(\Omega_i)^2}{a_i}+\frac{1}{n_i}\left(2\sqrt{\frac{n_o}{a_o}} R +\frac{d-1}{k}\right)^2\bigg]
\N{f_i}_{\Omega_i}^2
\nonumber\\
&\hspace{3cm}+\frac1{A_D A_N}\bigg[\frac{4R^2}{a_o}+\frac{1}{n_o}\left(2\sqrt{\frac{n_o}{a_o}} R +\frac{d-1}{k}\right)^2\bigg]\N{f_o}_{D_R}^2,
\label{eq:bound2a}
\end{align}
where $G$ is defined by
\beq\label{eq:G2}
G:=\half \left(1-
\frac{{n_o}}{{n_i}}
\left(\frac{{n_i}}{{n_o}}- \frac{{A_D}}{{A_N}}\right)
\left(d+ \sqrt{d^2 + \frac{4n_i}{a_i}\big(k\diam({\Omega_i})\big)^2}\right)
\right)
\eeq
and is positive by the first inequality in \eqref{eq:condTracea}.
\end{proposition}
To better understand the condition \eqref{eq:condTracea}, consider the simple case when ${n_i}={a_i}={a_o}={A_D}={A_N}=1$. Then the condition \eqref{eq:condTracea} is satisfied if
\beq\label{eq:condTrace2a}
{n_o}>1-\frac1{d + \sqrt{d^2+4(k\diam(\Oi))^2}}.
\eeq
For a fixed ${n_o}>1$ and sufficiently small, the condition \eqref{eq:condTrace2a} is an upper bound on $k$ under which the estimate \eqref{eq:bound2a} holds---this is consistent with the results on super-algebraic growth in $k$ of the resolvent for ${n_o}>1$ recapped in \S\ref{sec:blowup} below.
If we allow ${n_o}$ to be a function of $k$, then the condition \eqref{eq:condTrace2a} implies that the estimate \eqref{eq:bound2a} holds if the distance between ${n_o}$ and ${n_i}(=1)$ decreases like $1/k$ as $k\rightarrow \infty$.
\bpf[Proof of Proposition \ref{thm:BoundViaTrace2}]
The proof proceeds exactly the same was as the proof of Theorem \ref{thm:FirstBound} up to \eqref{eq:GammaTerm}. Now, the assumption in \eqref{eq:condTracea} that $A_D a_o \leq A_N a_i$ implies that the terms in \eqref{eq:GammaTerm} are bounded by
\beq\label{eq:GammaTerm2}
\int_\Gamma (\bx\cdot\bn) \left(
k^2 n_i |u_i|^2 \left (1 - \frac{A_D}{A_N}\frac{n_o}{n_i}\right)\right).
\eeq
For all $w\in H^1({\Omega_i})$ and $\epsilon>0$, we have the following weighted trace inequality:
\begin{align}\nonumber
\int_\Gamma (\bx\cdot\bn)|w|^2
&=\int_{\Omega_i} \mathop{\rm div}\nolimits(\bx|w|^2)\\&
=\int_{\Omega_i} (d|w|^2+2\bx\cdot\Re\{w\nabla \conj w\})
\le (d+\epsilon)\N{w}^2_{{\Omega_i}}+\frac1\epsilon\diam({\Omega_i})^2\N{\nabla w}^2_{{\Omega_i}}.
\label{eq:trace}
\end{align}
We chose $\epsilon$ so that
\beq\label{eq:euan2}
\frac{d+\epsilon}{n_i k^2} =\frac{1}{\epsilon} \frac{\diam(\Oi)^2}{a_i},
\eeq
so that the right-hand side of \eqref{eq:trace} becomes
\beqs
\frac{d+\epsilon}{n_i k^2}\left( n_i k^2 \N{w}^2_{{\Omega_i}} + a_i \N{\nabla w}^2_{{\Omega_i}}\right).
\eeqs
The requirement \eqref{eq:euan2}, and the fact that $\epsilon>0$, imply that
\beqs
\epsilon = \half \left( - d + \sqrt{d^2 + \frac{4n_i}{a_i} \big(k \diam(\Oi)\big)^2}\right).
\eeqs
We then get that \eqref{eq:GammaTerm2} is bounded by
\beqs
\frac{n_o}{n_i}\left(\frac{{n_i}}{{n_o}}- \frac{{A_D}}{{A_N}}\right)
\half\left(d+ \sqrt{d^2 + \frac{4n_i}{a_i}\big(k\diam({\Omega_i})\big)^2}\right)\big( n_i k^2\N{u_i}^2_{{\Omega_i}} + a_i \N{\nabla u_i}^2_{{\Omega_i}}\big).
\eeqs
The requirement \eqref{eq:condTracea} implies that this term is strictly less than
$( n_i k^2\N{u_i}^2_{{\Omega_i}} + a_i \N{\nabla u_i}^2_{{\Omega_i}})/2$,
and thus the argument proceeds as before (with the other half of the norm being used to deal with the terms in \eqref{eq:4} via the Cauchy--Schwarz and Young inequalities).
\epf
\section{Super-algebraic growth in \texorpdfstring{$k$}{k} when the condition \texorpdfstring{\eqref{eq:cond}}{} is violated}\label{sec:blowup}
In \S\ref{sec:PoVo} we adapt the results of \cite{PoVo:99}, for $C^\infty$ convex $\Oi$, on super-algebraic growth of the solution operator through a sequence of complex wavenumbers when the condition \eqref{eq:cond} does not hold to prove an analogous result for real wavenumbers. In \S\ref{sec:ball} we briefly highlight the analogous results of \cite{Cap12, CLP12, AC16}, valid for real wavenumbers, when $\Oi$ is a ball.
As explained at the end of \S\ref{sec:intro}, our main motivation for doing this is the recent interest in \cite{Ch:16, BaChGo:16, OhVe:16, SaTo:17, GrSa:18} on
how the solution of the interior impedance problem with piecewise-constant wavenumber (which is an approximation of the transmission problem) depends on the wavenumber, and the fact that the results in this section partially answer questions/conjectures from \cite{BaChGo:16, SaTo:17}.
\subsection{Adapting the results of Popov and Vodev \texorpdfstring{\cite{PoVo:99}}{}}\label{sec:PoVo}
The paper \cite{PoVo:99} considers the Helmholtz transmission problem \eqref{eq:BVP} with
${\Omega_i}$ a $C^\infty$ convex domain with strictly positive curvature, ${a_i}={a_o}={n_o}={A_D}=1$ and $g_D=g_N=0$; i.e., the BVP \eqref{eq:BVP2}.
From our point of view, the importance of \cite{PoVo:99}
is that their results can be adapted to show that if ${\Omega_i}$ is a $C^\infty$ convex domain with strictly positive curvature, $n_i>1$ and $A_N>0$, then there exists an increasing sequence of real wavenumbers through which the solution operator grows super-algebraically; this is stated as Corollary \ref{cor:PoVo2} below. To state the results of \cite{PoVo:99} we need to define a \emph{quasimode}.
\begin{defin}\mythmname{Quasimode for the BVP \eqref{eq:BVP2}}\label{def:quasi}
A quasimode is a sequence
\beqs
\big\{ k_j , \big(u_i^{(j)}, u_o^{(j)}\big) \big\}_{j=1}^\infty
\eeqs
where $k_j \in \Com$, $|k_j|\rightarrow \infty$, $\Re k_j \geq 1$, $u_{i/o}^{(j)} \in C^\infty(\overline{\Omega_{i/o}})$, the support of $u^{(j)}_{i/o}$ is contained in a fixed compact neighbourhood of $\Gamma$, $\|u_i^{(j)}\|_{L^2(\Gamma)}=1$,
\begin{subequations}
\begin{align}
\N{ (\Delta +n_i k_j^2)u_i^{(j)}}_{L^2({\Omega_i})} &= {\mathcal O}(|k_j|^{-\infty}),\\
\N{ (\Delta +k_j^2)u_o^{(j)}}_{L^2({\Omega_o})} &= {\mathcal O}(|k_j|^{-\infty}),\\
\N{ u_i^{(j)}-u_o^{(j)} }_{H^2(\Gamma)} &= {\mathcal O}(|k_j|^{-\infty}),\quad \text{ and } \label{eq:QM3}\\
\N{ {\partial_\bn} u_i^{(j)}-A_N {\partial_\bn} u_o^{(j)} }_{H^2(\Gamma)} &= {\mathcal O}(|k_j|^{-\infty}),\label{eq:QM4}
\end{align}
\end{subequations}
where, given an infinite sequence of complex numbers $\{z_j\}_{j=1}^\infty$, $z_j ={\mathcal O}(|k_j|^{-\infty})$ if for every $N>0$ there exists a $C_N>0$ such that $|z_j|\leq C_N |k_j|^{-N}$.
\end{defin}
The significance of the assumption $\Re(k_j)\geq 1$ in Definition~\ref{def:quasi} is that it {\it(a)} bounds the wavenumbers away from zero, and {\it(b)} specifies that we are considering wavenumbers in the right-half complex plane. Therefore $\Re(k_j)\geq 1$ could be replaced by $\Re(k_j)\geq k_0$ for any $k_0>0$.
The concentration of the quasimodes near the boundary $\Gamma$ means that they are understood in the asymptotic literature as ``whispering gallery'' modes; see, e.g., \cite{BaBu:91}.
\begin{theorem}\mythmname{Existence of quasimodes \cite{PoVo:99}}\label{thm:PoVo}
If ${\Omega_i}$ is a $C^\infty$ convex domain with strictly positive curvature, $n_i>1$ and $A_N>0$, then there exists a quasimode for the transmission problem \eqref{eq:BVP2}. Furthermore, $0>\Im k_j = {\mathcal O}(|k_j|^{-\infty})$ (i.e.~$k_j$ is super-algebraically close to the real axis).
\end{theorem}
The main result in \cite{PoVo:99} about resonances then follows from showing that there exists an infinite sequence of resonances that are super-algebraically close to the quasimodes \cite[Proposition~2.1]{PoVo:99}.
Theorem \ref{thm:PoVo} implies that there exists an increasing sequence of \emph{complex} wavenumbers through which the solution operator grows super-algebraically.
We show in Corollary \ref{cor:PoVo2} below that this result implies that there exists an increasing sequence of \emph{real} wavenumbers through which one obtains this growth.
To prove this corollary we need two preparatory results. The first (Corollary \ref{cor:PoVoa} below) is that one can change the normalisation $\|u_i^{(j)}\|_{L^2(\Gamma)}=1$ in the definition of the quasimode to $\|\nabla_T u_i^{(j)}\|_{L^2(\Gamma)}+ |k_j|\|u_i^{(j)}\|_{L^2(\Gamma)}=|k_j|$; it turns out that it will be more convenient for us to work with this normalisation (note that we put the factor $|k_j|$ on the right-hand side because, since we expect $\|\nabla_T u_i^{(j)}\|_{L^2(\Gamma)}$ to be proportional to $|k_j|\|u_i^{(j)}\|_{L^2(\Gamma)}$, this normalisation therefore keeps $\|u_i^{(j)}\|_{L^2(\Gamma)}$ being $\mathcal{O}(1)$).
The second result (Lemma \ref{lem:L2}) is that, under the new normalisation, the $L^2$ norms of $u^{(j)}_{i/o}$ in $\Omega_{i/o}$ are bounded by $C|k_j|^m$ for some $C$ and $m$ independent of $j$ and $k_j$.
\begin{cor}\mythmname{Quasimode under different normalisation}\label{cor:PoVoa}
If the normalisation $\|u_i^{(j)}\|_{L^2(\Gamma)}$ $=1$ in Definition \ref{def:quasi} is changed to
\beq\label{eq:norm}
\|\nabla_T u_i^{(j)}\|_{L^2(\Gamma)}+ |k_j|\|u_i^{(j)}\|_{L^2(\Gamma)}=|k_j|,
\eeq
then the result of Theorem \ref{thm:PoVo} still holds.
\end{cor}
This follows from the construction of the quasimode in \cite[\S5.4]{PoVo:99}; instead of dividing the $f_j$s by $\|f_j\|_{L^2(\Gamma)}$ one divides by $(\|\nabla_T f_j\|_{L^2(\Gamma)}+ |k_j|\|f_j\|_{L^2(\Gamma)})/|k_j|$. Since $\|f_j\|_{L^2(\Gamma)}$ is not super-algebraically small by \cite[last equation on page 437]{PoVo:99}, neither is $(\|\nabla_Tf_j\|_{L^2(\Gamma)}+ |k_j|\|f_j\|_{L^2(\Gamma)})/|k_j|$.
\ble\mythmname{Bound on the $L^2$ norms of $u^{(j)}_{i/o}$ in $\Omega_{i/o}$}\label{lem:L2}
The quasimode of Theorem \ref{thm:PoVo} (under the normalisation \eqref{eq:norm} in Corollary \ref{cor:PoVoa}) satisfies
\beq\label{eq:L2}
\big\|u^{(j)}_i\big\|_{L^2(\Oi)}+\big\|u^{(j)}_o\big\|_{L^2({\Omega_o})}\leq C |k_j|^{1/2}+ {\mathcal O}(|k_j|^{-\infty}),
\eeq
where $C$ is independent of $j$ and $k_j$.
\ele
\bpf
The plan is to obtain $k_j$-explicit bounds on the Cauchy data of $u^{(j)}_i$ and $u^{(j)}_o$ and then use Green's integral representation and $k_j$-explicit bounds on layer potentials.
In the proof we use the notation that $a\lesssim b$ if $a\leq C b$ for some $C$ independent of $j$ and $k_j$ (but not necessarily independent of $n_i$ and $A_N$).
By the transmission condition \eqref{eq:QM3} and the normalisation \eqref{eq:norm}, we have
\begin{align*}\nonumber
\big\|\nabla_T u_o^{(j)}\big\|_{L^2(\Gamma)}+ |k_j|\big\|u_o^{(j)}\big\|_{L^2(\Gamma)}
&\leq \big\|\nabla_T u_i^{(j)}\big\|_{L^2(\Gamma)}+ |k_j|\big\|u_i^{(j)}\big\|_{L^2(\Gamma)}+{\mathcal O}(|k_j|^{-\infty})\\
&= |k_j|+{\mathcal O}(|k_j|^{-\infty}).
\end{align*}
The bound in \cite[Lemma 5]{MoLu:68} on the Dirichlet-to-Neumann map for $\Oi$ that are star-shaped with respect to a ball (and thus, in particular, smooth convex $\Oi$) implies that
\begin{align}\nonumber
\big\|{\partial_\bn} u_o^{(j)}\big\|_{L^2(\Gamma)} &\lesssim \big\|\nabla_T u_o^{(j)}\big\|_{L^2(\Gamma)} + |k_j|\big\|u_o^{(j)}\big\|_{L^2(\Gamma)} + {\mathcal O}(|k_j|^{-\infty})\\
&\lesssim |k_j| \big(1+ {\mathcal O}(|k_j|^{-\infty})\big)
\label{eq:L22}
\end{align}
where the ${\mathcal O}(|k_j|^{-\infty})$ in the first line is the contribution from $(\Delta +k_j^2)u_o^{(j)}$.
Note that \cite[Lemma 5]{MoLu:68} is valid for real wavenumber, but an analogous bound holds for complex wavenumbers with sufficiently small ${\mathcal O}(1)$ imaginary parts -- see \cite[Theorem I.2D]{Mo:75}.
Via the transmission condition \eqref{eq:QM4}, the bound \eqref{eq:L22} holds with
${\partial_\bn} u_o^{(j)}$ replaced by ${\partial_\bn} u_i^{(j)}$.
The result \eqref{eq:L2} then follows from (i) Green's integral representation (applied in both ${\Omega_o}$ and ${\Omega_i}$),
(ii) the classical bound on the free resolvent
\beq\label{eq:NP}
\N{\chi R(k) \chi}_{L^2(\Rea^d)\rightarrow L^2(\Rea^d)} \lesssim \frac{1}{|k|}\exp(a (\Im k)_-),
\eeq
for some $a>0$ (depending on $\chi$), where $x_-=0$ for $x\geq 0$ and $x_-=-x$ for $x<0$, and $\chi$ is any cutoff function, and (iii)
the bounds on the single- and double-layer
potentials
\beq\label{eq:SLDL}
\N{\chi {\mathcal S}_k }_{L^2(\Gamma)\rightarrow L^2(\Rea^d)} \lesssim |k|^{-1/2} \exp(a (\Im k)_-)\text{ and }
\N{\chi {\mathcal D}_k }_{L^2(\Gamma)\rightarrow L^2(\Rea^d)} \lesssim |k|^{1/2}\exp(a (\Im k)_-).
\eeq
For a proof of \eqref{eq:NP} see, e.g., \cite[Theorems 3 and 4]{Va:75} or \cite[Theorem 3.1]{DyZw:16}. The bounds \eqref{eq:SLDL} with $\Im k=0$ are obtained from \eqref{eq:NP} in \cite[Lemma 4.3]{Sp2013a}; the same proof goes through with $\Im k\neq 0$.
\epf
\bre\mythmname{Sharp bounds on the single- and double-layer operators}
The bounds \eqref{eq:SLDL} are not sharp in their $k$-dependence. Sharp bounds for real $k$ are
given in \cite[Theorems 1.1, 1.3, and 1.4]{HaTa:15}, and we expect analogous bounds to be valid in the case when $k$ is complex with sufficiently-small imaginary part \cite{Ta:16}. In this case, the exponent $1/2$ in \eqref{eq:L2} would be lowered to $1/6$, but
this would not affect the following result (Corollary \ref{cor:PoVo}), which uses \eqref{eq:L2}.
\ere
\begin{cor}\mythmname{Existence of quasimodes with real wavenumbers}\label{cor:PoVo}
Given the quasi\-mode in Theorem \ref{thm:PoVo} (under the normalisation \eqref{eq:norm}),
\beqs
\big\{\Re k_j , \big(u_i^{(j)}, u_o^{(j)}\big) \big\}_{j=1}^\infty
\eeqs
is also a quasimode (again under the normalisation in Corollary \ref{cor:PoVoa}).
\end{cor}
\bpf
From Definition \ref{def:quasi}, we only need to show that
$\| (\Delta + n_i(\Re k_j)^2)u_i^{(j)}\|_{L^2({\Omega_i})}\!=\!{\mathcal O}(|k_j|^{-\infty})$
and
$\| (\Delta + (\Re k_j)^2)u_o^{(j)}\|_{L^2({\Omega_o})}={\mathcal O}(|k_j|^{-\infty})$.
We have
\beqs
\N{ (\Delta + n_i (\Re k_j)^2)u_i^{(j)}}_{L^2(\Oi)}\leq \N{n_i\big(-2 \ri (\Re k_j) (\Im k_j) + (\Im k_j)^2\big)u_i^{(j)}}_{L^2(\Oi)} + {\mathcal O}(|k_j|^{-\infty}),
\eeqs
which is ${\mathcal O}(|k_j|^{-\infty})$ since $\Im k_j= {\mathcal O}(|k_j|^{-\infty})$ and the $L^2$ norm of $u_i^{(j)}$ is bounded by $C|k_j|^{1/2}$ by the bound \eqref{eq:L2}. Similarly, we have $\| (\Delta + (\Re k_j)^2)u_o^{(j)}\|_{L^2({\Omega_o})}={\mathcal O}(|k_j|^{-\infty})$.
\epf
The super-algebraic growth of the solution operator through real values of $k$ can therefore be summarised by the following.
\begin{cor}\mythmname{Super-algebraic growth through real wavenumbers}\label{cor:PoVo2}
If ${\Omega_i}$ is a $C^\infty$ convex domain with strictly positive curvature, $n_i>1$ and $A_N>0$, then there
exists a sequence
\beqs
\big\{ \widetilde{k}_j , \big(u_i^{(j)}, u_o^{(j)}\big) \big\}_{j=1}^\infty,
\eeqs
where $\widetilde{k}_j \in \Rea$, $\widetilde{k}_j\rightarrow \infty$, $u_{i/o}^{(j)} \in C^\infty(\overline{\Omega_{i/o}})$, the support of $u^{(j)}_{i/o}$ is contained in a fixed compact neighbourhood of $\Gamma$, and for all $N\in\IN$ there exists $C_N>0$ independent of $\widetilde k_j$ such that
\begin{align*}
&\min\Big\{\big\|u_i^{(j)}\big\|_{H^{3/2}({\Omega_i})},\big\|u_o^{(j)}\big\|_{H^{3/2}({\Omega_o})}\Big\}\\
&\ge C_N \widetilde k_j^N \Big(\big\|(\Delta + n_i \widetilde{k}_j^2)u_i^{(j)}\big\|_{L^2({\Omega_i})}
+\big\|(\Delta +\widetilde{k}_j^2)u_o^{(j)}\big\|_{L^2({\Omega_o})}
\\&\hspace{20mm}
+\N{ u_i^{(j)}-u_o^{(j)} }_{H^2(\Gamma)} +\N{ {\partial_\bn} u_i^{(j)}-A_N {\partial_\bn} u_o^{(j)} }_{H^2(\Gamma)}
\Big).
\end{align*}
\end{cor}
\bpf
This follows from Corollary \ref{cor:PoVo} by the trace theorem (see, e.g., \cite[Theorem 3.37]{MCL00}).
\epf
\subsection{The results of \texorpdfstring{\cite{Cap12,CLP12}}{Capdeboscq} when \texorpdfstring{$\Oi$}{Omega-i} is a ball and numerical examples}
\label{sec:ball}
The results of \cite{Cap12, CLP12} (summarised in \cite[Chapter 5]{AC16}) consider the case when $\Oi$ is a 2- or 3-d ball. It is convenient to discuss these alongside numerical examples in 2-d. (We therefore only discuss the 2-d results of \cite{Cap12}, but the 3-d analogues of these are in \cite{CLP12}.)
When $\Oi$ is the 2-d unit ball (${\Omega_i}=B_1$), $u_i$ and $u_o$ can be expressed in terms of Fourier series $\ee^{\ri \nu \theta}$, where $\nu\in \mathbb{Z}$ and $\theta$ is the angular polar coordinate, with coefficients given in terms of Bessel and Hankel functions; see, e.g., \cite[\S12]{Ga:15} or \cite[\S5.3]{AC16}.
The resonances of the BVP \eqref{eq:BVP2} (with $n_i>1$) are then the (complex) zeros of
\beqs
F_{\nu}(k):={A_N}\sqrt{n_i} \,J'_\nu(\sqrt{n_i}\, k)H^{(1)}_\nu (k) - H^{(1)\prime}_\nu (k) J_\nu(\sqrt{n_i}\,k),
\quad \nu\in\IZ,
\eeqs
see, e.g., \cite[Equation (81)]{Ga:15}.
The asymptotics of the zeros of $F_{\nu}(k)$ in terms of $\nu$ are given by, e.g., \cite{LaLeYo:92, Sc:93}.
Indeed, let $k_{\nu,m}$ denote the $m$th zero of $F_{\nu}(k)$ with positive real part (where the zeros are ordered in terms of magnitude), and let $\alpha_m$ denote the $m$th zero of the Airy function ${\rm Ai}(-z)$.
Then by \cite[Equation~(1.1)]{LaLeYo:92}, for fixed $m$,
\beq\label{eq:resonance_asym}\sqrt{n_i}\, k_{\nu,m} = \nu + 2^{-1/3}\alpha_m \nu^{1/3} + {\mathcal O}(1)\quad\text{ as }\quad \nu\rightarrow \infty;
\eeq
recall that the result \cite[Proposition~2.1]{PoVo:99} (discussed in \S\ref{sec:PoVo}) implies that the resonances are real to all algebraic orders.
\paragraph{Exponential growth in $k$.}
For our first numerical examples, we take ${n_i}>1$, ${n_o}=1$, ${A_N}=1$ and for $\nu\in\IN_0$ we let $k$ be the real part of the first resonance corresponding to the angular dependence $\ee^{\ri \nu\theta}$ (i.e.~$k=\Re k_{\nu,1}$ in the previous paragraph).
We then let $f_o=0$ and $f_i= c_\nu J_{\nu}(kr)\ee^{\ri \nu \theta}$, where $c_\nu=(\pi(J_\nu^2(k)-J_{\nu+1}(k)J_{\nu-1}(k)))^{-1/2}$
is such that $\|f_i\|_{L^2({\Omega_i})}=1$ for all $\nu\in \IN_0$.
The field $u$ is then
\begin{align*}
u_i&=c_\nu\frac{\ee^{\ri\nu\theta}}{k^2(n_i\!-\!1)}\bigg(
J_\nu(kr)-J_\nu(k\sqrt{{n_i}}r)
\frac{{A_N} J_\nu'(k)H^{(1)}_\nu(k)-H^{(1)'}_\nu(k) J_\nu(k)}
{F_\nu(k)}
\bigg),
\\
u_o&=c_\nu\frac{\ee^{\ri\nu\theta}H^{(1)}_\nu(kr)}{k^2(n_i\!-\!1) H^{(1)}_\nu(k)}
\bigg(J_\nu(k)-J_\nu(k\sqrt{{n_i}})
\frac{{A_N} J_\nu'(k)H^{(1)}_\nu(k)-H^{(1)'}_\nu(k) J_\nu(k)}
{F_\nu(k)}
\bigg).
\end{align*}
Figure \ref{fig:1} plots the $k\sqrt n$-weighted $L^2$ norm and the $H^1$ seminorm of the particular $u_{i/o}$ above in $\Oi$ and $D_R$ respectively (with $R$ taken to be 2) as $\nu$ runs from $0$ to $\nu_{\max}$.
The left and right panels in Figure~\ref{fig:1} show the norms of $u_{i/o}$ for ${n_i}=3$ and $\nu_{\max}=64$, and for ${n_i}=10$ and $\nu_{\max}=23$, respectively.
We see that the norms of the solution appear to grow exponentially; the existence of a sequence growing super-algebraically is expected by Corollary \ref{cor:PoVo2}, although we are considering lower-order norms than in this result.
We note that the estimate \eqref{eq:resonance_asym} is not enough to deduce exponential growth of the scattered field for $k=\Re k_{\nu,1}$ and increasing $\nu$, but a more refined analysis is needed (taking into account the fact that the imaginary parts of the resonances are superalgebraically small).
The result \cite[Theorem 5.4]{AC16} proves that, at least in the case of plane-wave incidence, Sobolev norms of $u_{o}$ on spherical surfaces in $D_R$ sufficiently close to $\Gamma$ grow exponentially through a sequence of real wavenumbers, where the wavenumbers are defined in terms of the resonances by \cite[Lemma 5.16]{AC16}; this result can then be used to prove exponential growth in the $L^2$ norm of $u_o$.
\begin{figure}
\caption{Left plot: the norms of the solution of BVP \eqref{eq:BVP2}
\label{fig:1}
\end{figure}
\begin{figure}
\caption{The absolute value of the field scattered by plane waves $\ee^{\ri k(x\cos\phi,y\sin\phi)}
\label{fig:Fields}
\end{figure}
\paragraph{Localisation of $u_o$ in ${\Omega_o}$ at resonant frequencies.}
The plots in Figure \ref{fig:Fields} show the absolute value of the fields scattered by a plane wave impinging on the unit disc with ${n_i}=100$.
In the left plots, a wavenumber equal to the real part of a resonance $k_{\nu,m}$ excites a quasimode.
In both these examples, $u_{o}$ is localised close to ${\Omega_i}$; this is expected both from Theorem~\ref{thm:PoVo} above, and from
\cite[Theorem 5.2]{AC16}. Indeed, this latter result gives bounds on $u_{o}$ for all values of $k$ and ${n_i}$, but if ${n_i}>1$ they hold only in the ``far field'', i.e.\ at distance at least $({n_i}-1)\diam{\Omega_i}/2$ from ${\Omega_i}$, showing that the quasimodes generate large fields in a small neighbourhood of ${\Omega_i}$ only.
\paragraph{Sensitivity to the wavenumber.}
The right plots in Figure \ref{fig:Fields} show how a small perturbation of the wavenumber $k$ (e.g.\ by a relative factor of about $2.4\cdot10^{-12}$ from that in the upper left plot to that in the upper right one, and of $7.6\cdot10^{-5}$ in the second row) avoids the quasimode and gives a solution $u$ with much smaller norm; see the scale displayed by the colour bar.
This phenomenon suggests that for certain data the exponential blow-up of the solution operator
when ${n_i}> 1$ can be avoided.
Indeed, given $k$, if $f_{i/o}(r,\theta)=\sum_\ell \alpha_{\nu,i/o}(r)\ee^{\ri \nu \theta}$ and $\alpha_{\nu,i/o}\equiv 0$ for any $\nu$ such that there exists an $m$ such that $k_{\nu,m}\approx k$, then
the resonant modes should be excluded from the solution.
In the context of scattering by an incident field, \cite[Theorem~6.5]{Cap12} describes the size of the neighbourhood $I$ of $\{\Re k_{\nu,m}\}_{\nu\in\IZ,m\in\IN}$, such that the scattered field is uniformly bounded for $k\in(0,\infty)\setminus I$ (see also \cite[Remark 5.5]{AC16}).
The strong sensitivity of the quasimodes with respect to the wavenumber explains why their presence can go unnoticed even by extensive numerical calculation; see, e.g.,\cite{BaChGo:16} for a related class of problems.
\appendix
\section{Proof of Lemma \texorpdfstring{\ref{lem:exist}}{2.7} (existence, uniqueness, and regularity of the BVP solution)}\label{sec:appA}
\paragraph{Uniqueness:}
In this setting of Lipschitz $\Oi$, uniqueness follows from Green's identity and a classical result of Rellich (given in, e.g., \cite[Lemma 3.11 and Theorem 3.12]{CoKr:83}); the proof for our assumptions on the parameters $k, n_i,n_o,a_o,a_i, A_D,$ and $A_N$ is short, and so we give it here.
With $f_i=0$, $f_o=0$, $g_D=0$, $g_N=0$, we apply Green's identity to $u_i$ in $\Oi$ (this is allowed since $\Oi$ is Lipschitz and $u_i \in H^1(\Oi,\Delta)$ by, e.g., \cite[Theorem 4.4]{MCL00}) to obtain
\beqs
\int_\Gamma a_i u_i \overline{{\partial_\bn} u_i} - a_i \int_{{\Omega_i}}|\nabla u|^2 + n_i \overline{k^2} \int_{\Omega_i} |u|^2=0.
\eeqs
Using the transmission conditions in \eqref{eq:BVP} we get
\beq\label{eq:A1}
\frac{a_o}{A_D A_N}\int_\Gamma u_o\overline{{\partial_\bn} u_o} - a_i \int_{{\Omega_i}}|\nabla u|^2 + n_i \overline{k^2} \int_{\Omega_i} |u|^2=0.
\eeq
By \cite[Theorem 3.12]{CoKr:83}, if
\beqs
\Im\left( k \sqrt{\frac{n_o}{a_o}} \int_\Gamma u_o \overline{{\partial_\bn} u_o}\right)\geq 0
\eeqs
then $u_o\equiv 0$ in ${\Omega_o}$; the Cauchy data of $u_i$ is then zero by the transmission conditions, and thus $u_i\equiv 0$ in ${\Omega_i}$.
Note that \cite[Theorem 3.12]{CoKr:83} is stated and proved for $d=3$, but the proof goes through in an identical way for $d=2$ and $d\geq 4$ using the radiation condition \eqref{eq:src} and the asymptotics of the fundamental solution in these dimensions (see, e.g., \cite[Theorem~3.5]{DyZw:16}).
Multiplying \eqref{eq:A1} by $k\sqrt{n_o/a_o} (A_N A_D/a_o)$ and recalling that all the parameters apart from $k$ are real, we see that a sufficient condition for uniqueness is
\beqs
\Im\left( k a_i\int_{{\Omega_i}}|\nabla u|^2 - k n_i \overline{k^2} \int_{\Omega_i} |u|^2\right)\geq 0;
\eeqs
this inequality holds since $n_i$ and $a_i$ are real, and $\Im k\geq 0$.
\paragraph{Existence and regularity:}
At least in the case $g_D=g_N=0$, existence immediately follows from uniqueness via Fredholm theory. Here, however, we use the integral-equation argument of \cite{ToWe:93}, since this also establishes the regularity results on $\Gamma$. This integral-equation argument is the Lipschitz analogue of the argument for
sufficiently smooth $\Gamma$ in \cite[Theorem 4.6]{KrRo:78} (see also \cite[Corollary 4.6]{CoSt:85}, which covers 2-d polygons).
If $f_i=0$ and $f_o=0$ then the existence and the regularity of $u$ follow from the integral-equation argument in \cite[Theorem 7.2]{ToWe:93};
to match their notation we choose $u_e=\frac{{a_o}}{{A_N}{a_i}}u_o$ and
\begin{align*}
k_1=k\sqrt{\frac{n_i}{a_i}},\quad
k_2=k\sqrt{\frac{n_o}{a_o}},\quad
\mu_1={A_D}{a_o},\quad
\mu_2={A_N}{a_i},\quad
f={a_o} g_D,\quad
g=\frac1{{A_N}{a_i}}g_N.
\end{align*}
The result \cite[Theorem 7.2]{ToWe:93} is stated only for $d= 3$ and $k\in \Rea\setminus\{0\}$, but we now outline why it also holds for $d\geq 2$ and $\Im k>0$; we first discuss the dimension.
The two reasons Torres and Welland only consider $d=3$ is that
\ben
\item Their argument treats the Helmholtz integral operators as perturbations of the corresponding Laplace ones, and there
is a technical difficulty that the fundamental solution of Laplace's equation does not tend to zero at infinity when $d=2$, whereas it does for $d\geq 3$.
\item The case $d \geq 4$ is very similar to $d=3$, except that the bounds on the kernels of the integral operators are slightly more involved.
\een
Regarding the second reason: the only places in \cite{ToWe:93} where these bounds are used and that contribute to the existence result \cite[Theorem 7.2]{ToWe:93} are Part (vi) of Lemma 6.2, and Points (i)--(vi) of Page 1466. The analogue of the Hankel-function bounds in \cite[Equation 2.25]{CGLS12} for $d\geq 4$ can be used to show that these results in \cite{ToWe:93} hold for $d\geq 4$.
Regarding the first reason: the harmonic-analysis results about Laplace integral operators, upon which Torres and Welland's proof rests, also hold when $d=2$. More specifically, the results in Sections 4 and 6 of \cite{ToWe:93} hold when $d=2$ by \cite{Ve:84} and \cite{CoMcMe:82} (a convenient summary of these harmonic-analysis results is given in \cite[Chapter 2]{CGLS12}). The results (i) and (iii)--(vi) on Page 1463 of \cite{ToWe:93} hold when $d=2$ by results in \cite[\S4]{Ve:84}, and the result (ii) holds if one defines the Laplace fundamental solution as $(1/2\pi)\log(a/|\bx-\by|)$ where the constant $a$ is not equal to the so-called ``capacity'' of $\Gamma$; see \cite[Page 115]{CGLS12}, \cite[Theorem 8.16]{MCL00}.
Finally, Lemma 3.1 of \cite{ToWe:93} holds when $d=2$ by \cite[Theorem~2]{EsFaVe:92}.
All this means that Lemma 3.2 of \cite{ToWe:93} holds when $d=2$, which in turn means that \cite[Theorem 7.2]{ToWe:93} holds when $d=2$.
We therefore have that \cite[Theorem 7.2]{ToWe:93} holds for $d\geq 2$ and for $k\in \Rea\setminus\{0\}$. Inspecting the proof of \cite[Theorem 7.2]{ToWe:93} we see that the properties of the Helmholtz boundary-integral operators used are unchanged if $\Im k>0$. Indeed, the compactness of the differences of the Helmholtz and Laplace boundary-integral operators holds for $\Im k>0$ by (v) and (vi) on Page 1466 of \cite{ToWe:93} and by (vi) in Lemma 6.2 of \cite{ToWe:93}, and the uniqueness of the BVP (used on Page 1483) holds by the argument above. Therefore \cite[Theorem 7.2]{ToWe:93} holds for $d\geq 2$ and for $k\in\Com\setminus\{0\}$ with $\Im k\geq 0$.
If the volume source terms $f_i$ and $f_o$ are different from zero, we define $w_i$ and $w_o$ to be the solutions to the following problems:
\begin{align*}
\begin{cases}
w_i\in H^1({\Omega_i})\\
{a_i}\Delta w_i+k^2{n_i} w_i=f_i &\;\text{in}\; {\Omega_i},\\
{\partial_\bn} w_i-\ri k w_i=0 &\;\text{on}\;\Gamma.
\end{cases}
\qquad
\begin{cases}
w_o\in H^1_{\mathrm{loc}}({\Omega_o})\\
{a_o}\Delta w_o+k^2{n_o} w_o=f_o &\;\text{in}\; {\Omega_o},\\
w_o=0 &\;\text{on}\;\Gamma,\\
w_o \in \mathrm{SRC}(k\sqrt{n_o/a_o}).
\end{cases}
\end{align*}
Then by \cite[Proposition~3.2]{MOS12} $w_i\in H^1({\Omega_i})$, ${\partial_\bn} w_i\in L^2(\Gamma)$ and $w_i\in H^1(\Gamma)$, and by \cite[Part (i) of Lemma 3.5]{Sp2013a}
$w_o\in H^1_{\mathrm{loc}}({\Omega_o})$, ${\partial_\bn} w_o\in L^2(\Gamma)$ and $w_o\in H^1(\Gamma)$
(note that both of the results \cite[Proposition~3.2]{MOS12} and \cite[Part (i) of Lemma 3.5]{Sp2013a} are specialisations of the regularity results of Ne\v{c}as \cite[\S5.1.2 and \S5.2.1]{Ne:67} to Helmholtz BVPs).
Then $\widetilde u_i:=u_i-w_i$ and $\widetilde u_o:=\frac{{a_o}}{{A_N}{a_i}}(u_o-w_o)$ satisfy problem (P) of \cite{ToWe:93} with $k_1,k_2,\mu_1,\mu_2$ as above and
$$f={a_o} g_D+{A_D}{a_o} w_i\in H^1(\Gamma), \qquad g=\frac{g_N-{a_o}{\partial_\bn} w_o}{{A_N}{a_i}}+{\partial_\bn} w_i\in L^2(\Gamma).$$
The existence and regularity of $u_i,u_o$ follow by applying again Theorem 7.2 of \cite{ToWe:93} to $\widetilde u_i,\widetilde u_o$.
\end{document} |
\begin{document}
\title{Spectral gap and cutoff phenomenon for the Gibbs sampler of $\nabla {\rm Var}phi$ interfaces with convex potential}
\author{Pietro Caputo}
\address{Department of Mathematics and Physics, Roma Tre University, Largo San Murialdo 1, 00146 Roma, Italy.}
\email{caputo@mat.uniroma3.it}
\author{Cyril Labb\'e}
\address{Universit\'e Paris-Dauphine, PSL University, Ceremade, CNRS, 75775 Paris Cedex 16, France.}
\email{labbe@ceremade.dauphine.fr}
\author{Hubert Lacoin}
\address{IMPA, Estrada Dona Castorina 110, Rio de Janeiro, Brasil.}
\email{lacoin@impa.br}
\pagestyle{fancy}
\fancyhead[LO]{}
\fancyhead[CO]{\sc{P.~Caputo, C.~Labb\'e and H.~Lacoin}}
\fancyhead[RO]{}
\fancyhead[LE]{}
\fancyhead[CE]{\sc{Cutoff phenomenon for $\nabla {\rm Var}phi$ interfaces}}
\fancyhead[RE]{}
\date{\small\today}
\begin{abstract}
We consider the Gibbs sampler, or heat bath dynamics associated to log-concave measures on $\mathbb{R}^N$ describing $\nabla{\rm Var}phi$ interfaces with convex potentials. Under minimal assumptions on the potential, we find that the spectral gap of the process is always given by $\alphap_N=1-\cos(\pi/N)$, and that for all $\epsilon\in(0,1)$, its $\epsilon$-mixing time satisfies $T_N(\epsilon)\sim \frac{\log N}{2\alphap_N}$ as $N\to\infty$, thus establishing the cutoff phenomenon. The results reveal a universal behavior in that they do not depend on the choice of the potential.
\noindent
{\bf MSC 2010 subject classifications}: Primary 60J25; Secondary 37A25, 82C22.\\
\noindent
{\bf Keywords}: {\it Spectral gap; Mixing time; Cutoff.}
\end{abstract}
\maketitle
\setcounter{tocdepth}{1}
\tableofcontents
\section{Introduction}
\subsection{Model and result}
We consider the $1+1$ dimensional interface model defined as follows. The state space of the interface is defined by
$$\Omega_{N}:=\left\{ (x_0,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,x_N)\in \mathbb{R}^{N+1} \ : \ x_0=0, x_N= 0 \right\}.$$
We fix a potential $V\in\mathscr{C}$, where $\mathscr{C}$ denotes the set of all functions $V:\mathbb{R}\to\mathbb{R}$ satisfying the following assumptions:
\begin{enumerate}[(i)]
\item\label{ass:conv} $V$ is convex,
\item\label{ass:poly} $V$ grows at most polynomially: there exist $C>0$ and $K\ge 1$ such that for all $x\in \mathbb{R}$,
\begin{equation}\label{ass:1}
|V(x)| \le C (1+|x|)^{K}\;.
\end{equation}
\item\label{ass:nonaff} $V$ is non-affine: namely we have $V'_+ > V'_-$ where
\begin{equation}\label{VminusVplus}
V'_+:=\lim_{x\to \infty} V(x)/ x \text{ \ and \ } V'_-:=\lim_{x\to -\infty} V(x)/x\;
\end{equation}
\end{enumerate}
The $\nabla{\rm Var}phi$ interface with potential $V$ is the random element of $\Omega_N$ with distribution $\pi_N$, whose density with respect to Lebesgue measure is given by
\begin{equation}\label{defpin}
\frac{\,\text{\rm d}\pi_{N}}{ \,\text{\rm d} x_1\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi \,\text{\rm d} x_{N-1}}= \frac{e^{-H}}{Z_N}\;,
\end{equation}
where $Z_N$ is the normalization constant and $H=H_{N,V}$ is the Hamiltonian
$$ H(x):=\sum_{k=1}^N V(x_i-x_{i-1}).$$
The Gibbs sampler for the measure $\pi_N$ that we wish to consider is the heat-bath dynamics defined as follows.
Let $\mathcal{Q}_k$ be the operator that equilibrates the $k$-th coordinate of $x_k$ conditionally given the remaining coordinates.
More precisely, letting $x^{(k,u)}$ denote the vector $(x_0,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,x_{k-1},u,x_{k+1},\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,x_N)$, set
\begin{equation}\begin{split}\label{projectors}
\mathcal{Q}_kf(x)&:= \frac{\int f(x^{(k,u)}) e^{-H(x^{(k,u)})}\,\text{\rm d} u}{\ \int e^{-H(x^{(k,u)})}\,\text{\rm d} u }\\
&= \int f(x^{(k,u)}) \rho_{x_{k-1},x_{k+1}}(u) \,\text{\rm d} u\;,
\end{split}
\end{equation}
where
$$\rho_{b,c}(u) := \frac{e^{-V(u-b)-V(c-u)}}{\int e^{-V(s-b)-V(c-s)} \,\text{\rm d} s}\;.$$
Define the Markov generator $\mathcal{L}=\mathcal{L}_{N,V}$ by
\begin{equation}
\mathcal{L} f:=\sum_{k=1}^{N-1}(\mathcal{Q}_k f-f).
\end{equation}
Let $\mathbf{X}^{x}=(\mathbf{X}^{x}(t))_{t\ge 0}$ be the continuous time Markov chain on $\Omega_{N}$ with generator $\mathcal{L}$ and initial condition $x$. Given $x\in \Omega_N$ and $\nu$ a probability measure on $\Omega_N$, let
$P^x_t$ and $P^{\nu}_t$ denote the distribution at time $t$ of the Markov chain with initial condition $x$ and $\nu$ respectively.
One can describe the evolution of the process as follows: each coordinate of $\mathbf{X}^{x}(t)$ is updated with rate $1$ independently. When an update is performed at time $t$ for coordinate $k$ the value of $X^x_k$ is resampled according to the conditional equilibrium measure, whose density is $\rho_{b,c}$ with $b=X^x_{k-1}(t)$, $c=X^x_{k+1}(t)$.
Since $\mathcal{L}$ is a finite sum of orthogonal projectors, it is a bounded self-adjoint operator on $L^2=L^2(\Omega_N,\pi_N)$, and therefore, the corresponding process is reversible with respect to $\pi_N$.
The spectral gap of the Gibbs sampler is defined by
\begin{equation}\label{defgap}
\alphap_N = \inf_{f\in L^2:\, \pi_N(f)=0} \frac{\pi_N(f(-\mathcal{L} f))}{\pi_N(f^2)},
\end{equation}
we use the notation $\pi_N(f) = \int f \,\text{\rm d}\pi_N$.
We do not know whether the operator $\mathcal{L}$ has pure point spectrum in general, and therefore the spectral gap does not a priori coincide with (the opposite of) some eigenvalue of $\mathcal{L}$. Our first result computes the value of $\alphap_N$ and shows that it is indeed an eigenvalue.
\begin{theorem}\label{Th:gap}
For any potential $V\in\mathscr{C}$, for all $N\ge 2$, the spectral gap of $\mathcal{L}$ is given by
$$ \alphap_N = 1 - \cos\left(\frac{\pi}{N}\right)\;,$$
and the function
\begin{equation}\label{deffn}
f_N(x) = \sum_{k=1}^{N-1} \sin\left(\frac{k\pi}{N}\right) x_k\;,
\end{equation}
is an eigenfunction of $\mathcal{L}$ with eigenvalue $-\alphap_N$.
\end{theorem}
We remark that the spectral gap of the dynamics is independent of the choice of the potential $V$, as long as $V\in\mathscr{C}$, and it coincides with the first Dirichlet eigenvalue of the discrete Laplace operator on the segment $\{1,\ldots,N-1\}$.
Our next results concern the mixing time of the Gibbs sampler.
Without restriction on the set of possible initial conditions, this mixing time is infinite. Consequently, we restrict ourselves to initial conditions with absolute height at most $N$, and consider the distance to equilibrium at time $t$ from a worst case initial condition:
\begin{align}\label{defdn}
d_N(t) &:= \sup_{x\in \Omega_N\,:\; |x|_\infty \le N} \| P_N^x(t) - \pi_N\|_{TV},
\end{align}
where the total variation distance between two probability measures $\mu,\nu$ on $\Omega_N$ is defined as
$$
\|\mu-\nu\|_{TV}= \sup_{B\in \mathcal{B}(\Omega_N)} \left(\mu(B)-\nu(B)\right),
$$
the supremum ranging over all Borel subsets of $\Omega_N$.
Note that we do \emph{not} condition the dynamics to keep the height of the interface within $[-N,N]$.
For any $\epsilon\in(0,1)$, the $\epsilon$-mixing time is then defined as
$$ T_N(\epsilon) := \inf\{t\ge 0: d_N(t) < \epsilon\}\;.$$
\begin{theorem}\label{th:main1}
For any $V\in\mathscr{C}$, for all $\epsilon \in (0,1)$:
\begin{align}\label{eq:cutoff}
T_N(\epsilon) \sim \frac{\log N}{2 \alphap_N}\;.
\end{align}
\end{theorem}
We use the symbol ``$\sim$" for asymptotic equivalence as $N\to\infty$, so that in view of Theorem \ref{Th:gap}, \mbox{\tiny eq}ref{eq:cutoff} is equivalent to
\begin{align}\label{eq:cutoff2}
\lim_{N\to\infty}
\frac{T_N(\epsilon)}{N^2\log N}=\frac1{\pi^2}\;.
\end{align}
Theorem \ref{th:main1} shows that the $\epsilon$-mixing time is, to leading order, insensitive to the threshold parameter $\epsilon$, that is, the Gibbs sampler satisfies the cutoff phenomenon. Note again the universal behavior, that is the fact that nothing depends on $V$, as long as $V\in\mathscr{C}$.
\begin{remark}
If the restriction on the absolute height $|x|_\infty \le N$ is replaced by $|x|_\infty \le a_N$ with $a_N \gg \sqrt N$ then our proof carries over and yields
$$ T_N(\epsilon) \sim \frac{\log(a_N N^{-1/2})}{\alphap_N}\;.$$
For an interpretation of this result, observe that if the initial condition is $x_i = a_N$ for every $i\in \{1,\ldots,N-1\}$, then $t= \frac{\log(a_N N^{-1/2})}{\alphap_N}$ is exactly the time it takes for
$$ \mathbb{E}[f_N(\mathbf{X}^x(t))] = f_N(x) e^{-\alphap_N t}\;,$$
to drop from $f_N(x) = \Theta(N a_N)$ to $\Theta(N^{3/2})$, the latter being the order of fluctuations of $f_N$ at equilibrium.
\end{remark}
\begin{remark}\label{rem:tilt}
The Markov chain can be viewed as taking values in the larger space
\begin{equation}\label{def:tildeom}
\widetilde \Omega_{N}:=\left\{ (x_0,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,x_N)\in \mathbb{R}^{N+1} \ : \ x_0=0 \right\}.
\end{equation}
In that case, the value at the endpoint $X^x_N(t)$ remains equal to its initial value $x_N$ for all $t$. Moreover, we could have fixed the endpoint $x_N = hN$ with $h \ne 0$ and thus have considered the mixing property of the process within the set
$$\Omega_{N,h}:=\left\{ (x_0,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,x_N)\in \mathbb{R}^{N+1} \ : \ x_0=0, x_N= hN \right\}.$$
The results of Theorems \ref{Th:gap} and \ref{th:main1} still hold in this more general setting. Indeed, using the transformation $(x_k)\mapsto (x_{k}- kh)$ which maps $\Omega_{N,h}$ to $\Omega_{N}$, and considering the modified potential $V_h(\cdot):=V(h+\cdot)$, again an element of $\mathscr{C}$, we are back to the original setting. In particular, it follows that the spectral gap is {\em independent} of $h$. Concerning the mixing time, one can actually prove that the $N\to\infty$ limit in Theorem \ref{th:main1} holds uniformly over $h$ in compact sets.
\end{remark}
\begin{remark}
Let us comment on our assumptions on the potential $V\in\mathscr{C}$. The convexity hypothesis on $V$ is the most important, and it is required at various points in the proof. In the language of interacting particle systems it makes the system \textit{attractive}, in the sense that it entails the existence of a coupling that preserves monotonicity (see Lemma \ref{lem:mongc} below).
The assumption \mbox{\tiny eq}ref{ass:1} about polynomial growth is merely technical. It helps us obtain certain estimates, and in practice it does not appear to be very restrictive.
Finally the assumption \mbox{\tiny eq}ref{VminusVplus} is the easiest to justify:\
if $V$ is an affine function then the measure $\pi_N$ is not defined since $e^{-H}$ would not be integrable in this case.
Note that the definition \mbox{\tiny eq}ref{defpin} remains unchanged if $V(u)$ is replaced by $V'(u)=V(u)+au+b$, since in that case $H_{N,V'}(x)=H_{N,V}(x)+bN$ is only modified by a constant.
\end{remark}
\subsection{Related works}
The relaxation to equilibrium of $\nabla{\rm Var}phi$ interfaces has been the object of many remarkable works in recent years, especially in conjunction with hydrodynamic limits, see e.g.\ \cite{Giacomin,Funaki} and the references therein. In particular, the validity of functional inequalities for the equilibrium measure $\pi_N$ has been explored under various assumptions on the potential $V$. The dynamics considered in these works is usually the conservative diffusion process, namely the Langevin dynamics associated to the Hamiltonian $H$ in the state space $\Omega_N$. For instance, when the potential is a bounded perturbation of a uniformly strictly convex function, then upper and lower bounds of order $N^{-2}$ on the spectral gap of the Langevein diffusion have been obtained in \cite{Cap}. Moreover, the stronger logarithmic Sobolev inequality has been established by Menz and Otto \cite{OttoMenz}. These results were shown to hold uniformly in the tilt parameter $h$ when the interface endpoint is fixed at $x_N=hN$. The uniformity in $h$ is a consequence of the assumption of uniform strict convexity and it cannot hold for the diffusion process if the potential is only assumed to be convex. In the non-uniformly convex case, spectral gap bounds with the correct dependence on the tilt $h$ were obtained in \cite{BartheWolff,BartheMilman} for certain potentials such as the solid-on-solid (SOS) potential $V(u)=|u|$.
While several of the techniques employed in these works carry over to the jump process we consider in this paper, as far as we know none of the previous works allows one to actually compute the spectral gap as we do here. As discussed in Remark \ref{rem:tilt}, our results hold uniformly for $h$ in a compact set. Comparison with the SOS case studied in \cite{BartheMilman} shows in particular that the spectral gap of the Gibbs sampler is much less sensitive than the spectral gap of the diffusion process regarding the choice of the tilt parameter $h$. Moreover, as already mentioned our results are largely insensitive to the choice of the potential $V$. On the other hand, we cannot handle perturbations of a convex potential, since we strongly rely on the FKG inequality and other monotonicity properties which in general do not hold if the convexity assumption is dropped.
Interface models of the form \mbox{\tiny eq}ref{defpin} are also commonly studied in the discrete setting, namely when the heights $x_i$ are restricted to take only integer values, in which case they form natural models for the interface separating two distinct phases in low temperature spin systems; see, e.g.\ \cite{BodineauIoffeVelenik}. For the discrete SOS model, estimates that are tight up to multiplicative constants for the spectral gap and the mixing time of the Gibbs sampler were obtained in \cite{MartinelliSinclair}. We believe that our main results Theorem \ref{Th:gap} and Theorem \ref{th:main1} can be extended to include this case as well, with small modifications in the proof. Certainly more challenging would be the determination of the spectral gap and mixing time of the
local dynamics for the discrete SOS interfaces considered e.g.\ in \cite{Posta,CapMarTon}, where only $\pm1$ increments of the height are allowed at each update.
The problem of determining whether a given Markov chain exhibits the cutoff phenomenon or not keeps attracting a lot of attention. While a general theory is still out of reach, more and more instances of the phenomenon are being understood. Most of the known results concern Markov chains with finite state space, see e.g.\ the monograph \cite{LevPerWil}. Especially closely related to our analysis here are the results concerning the exclusion process \cite{Lac16,labbe2019cutoff}. As in our recent work \cite{CLL}, one of the motivations in the present paper is to investigate the phenomenon for Markov chains with continuous state space. Our previous paper \cite{CLL} establishes the cutoff phenomenon for a heat bath dynamics over the simplex, when the target distribution is uniform or some log-concave generalization thereof. While our assumptions on the potential $V$ here are general enough to handle target distributions $\pi_N$ from a very large family of log-concave measures, we note that they do not include the measures on the simplex considered in \cite{CLL} since the positivity constraint characterizing the simplex would require dropping the polynomial growth condition.
\subsection{Overview}
Section \ref{Sec:Tools} introduces several tools and presents some estimates to be used in the sequel. In Section \ref{Sec:Lower}, we establish the lower bound on the mixing time of Theorem \ref{th:main1} by identifying an initial condition for which the process remains far from equilibrium until the putative mixing time: this initial condition is built in such a way that rather explicit computations can be performed on the law of the image through $f_N$ (from \mbox{\tiny eq}ref{deffn}) of the process. A first upper bound on the mixing time is obtained in Section \ref{Sec:Upper}: it catches the correct order but not the precise constant. This bound allows us to determine the spectral gap of the generator. In Sections \ref{Sec:UpperTight} and \ref{Sec:Wedge} we refine the upper bound of the previous section by estimating, under some appropriate coupling, the merging time of two processes starting from a (random) `maximal' initial condition and any arbitrary initial condition, and by proving that the process starting from this maximal initial condition reaches equilibrium by the putative mixing time.
\section{Main tools}\label{Sec:Tools}
\subsection{The gradient dynamics}
The process $(\boldsymbol\eta(t))_{t\ge 0}$ defined on $\mathbb{R}^N$ by
the increments $$\eta_k(t)=(X_{k}-X_{k-1})(t)$$ is also Markovian. We sometimes use the notation $\boldsymbol\eta(t)=\nabla{\bf X}(t)$. To describe its evolution, we introduce some notation.
Given $a \in \mathbb{R}$, we define the resampling potential $W_a$ as
$$ W_a(u) := V\left(a+u\right) + V\left(a-u\right) - 2 V\left(a\right)\;,$$
and set
\begin{equation}\label{deftheta}
\theta_a(u):=\frac{ e^{-W_a(u)} \,\text{\rm d} u}{Z(a)} \quad \text{ with } \quad Z(a) := \int_{\mathbb{R}} e^{-W_a(u)} \,\text{\rm d} u\;.
\end{equation}
The function $W_a(u)$ is symmetric (with respect to $u$, but not with respect to $a$ in general), convex and non-negative. It is minimized at $0$ where it admits the value $0$. Our assumption $V\in\mathscr{C}$ ensures that $Z(a)$ is finite.
Note also that
\begin{equation}\label{transinv}
\rho_{-a,a}(u)=\theta_a (u)\quad \text{ and } \quad \rho_{b,c}(u)=\theta_{\frac{c-b}{2}}\left(u -\frac{c+b}{2} \right).
\end{equation}
The dynamics of the gradients is then described as follows. For each $k\in \llbracket 1, N-1\rrbracket$ at rate one
$(\eta_k,\eta_{k+1})$ jumps to $(\bar \eta_k - U, \bar \eta_k+U)$ where $\bar \eta_k=\frac{\eta_{k+1}+\eta_{k}}{2}$ and $U$ is a r.v. with density $\theta_{\bar \eta_k}$ . The associated Markov generator is given by
$$ \widetilde \mathcal{L} f(\eta) = \sum_{k=1}^{N-1} \int \big(f(\eta^{(k,u}) - f(\eta)\big) \theta_{\bar \eta_k}(u) \,\text{\rm d} u\;,$$
where $f:\mathbb{R}^N\mapsto\mathbb{R}$ and $\eta^{(k,u)}$, $u\in\mathbb{R}$, denotes the vector $\eta$ with the pair $(\eta_{k-1},\eta_k)$ replaced by $(\bar \eta_k - u, \bar \eta_k+u)$.
Note that the invariant measure $\pi_N$, in terms of the gradient variables $\eta$, is nothing but the product probability measure with density proportional to $\otimes_{k=1}^N e^{-V}$, conditioned on $\sum_{k=1}^N \eta_k = 0$.
\subsection{The action on linear functions}\label{sec:linear}
The generators $\mathcal{L}$ and $ \widetilde \mathcal{L}$ take a particularly simple form when applied to linear functions.
If $g_k$ denotes the coordinate map $g_k:x\mapsto x_k$ then
\begin{equation}\label{Eq:laplace}
\mathcal{L} g_k(x) = \frac{x_{k-1} + x_{k+1}}{2}-x_k = \frac12 \Delta x_k\;,
\end{equation}
where $\Delta$ denotes the discrete Laplacian.
Summation by parts and \mbox{\tiny eq}ref{Eq:laplace}
then show that for every $j\in \llbracket 1,N-1\rrbracket$ the map $f_N^{(j)}:\Omega_N\mapsto \mathbb{R}$ given by
\begin{equation}\label{defj}
f_N^{(j)} (x) := \sum_{k=1}^{N-1} \sin\left( \frac{j \pi k}{N}\right) x_k\;
\end{equation}
is an eigenfunction of $\mathcal{L}$ with the eigenvalue $-\lambda_N^{(j)}$ where
\begin{equation}\label{defjj}
\lambda_N^{(j)} := 1 - \cos\left( \frac{j\pi}{N}\right)\;.
\end{equation}
Thus, linear functions form an invariant subspace, and the spectrum of $-\mathcal{L}$ restricted to that subspace consists of the $N$ eigenvalues
$$0=:\lambda_N^{(0)}<\lambda_N^{(1)}<\lambda_N^{(2)}<\cdots<\lambda_N^{(N-1)}.$$
In the case $j=1$, we simply write $f_N$ for $f_N^{(1)}$ and $\lambda_N$ for $ \lambda_N^{(1)}$.
In particular, it follows that $\alphap_N\le \lambda_N$. Theorem \ref{Th:gap} will establish that $\lambda_N$ is actually equal to the spectral gap of $\mathcal{L}$.
\subsection{General spectral gap considerations}
Next, we give a rather general characterization of the spectral gap.
Consider a reversible Markov process $(X_t)_{t\ge 0}$ on a measurable space $\Omega$ with generator $\mathcal{L}$ and stationary distribution $\pi$. Assume that $\mathcal{L}$ is self-adjoint in $L^2=L^2(\Omega,\pi)$, and define its spectral gap as
\begin{equation}\label{defgapgen}
\alphap = \inf_{f\in L^2:\, \pi(f)=0} \frac{\langle f,-\mathcal{L} f\rangle}{\langle f,f\rangle},
\end{equation}
where we write $\langle\cdot,\cdot\rangle$ for the scalar product in $L^2$.
Given a probability measure $\nu$ on $\Omega$, we let $P^{\nu}_t$ denote the distribution of $X_t$ starting with initial condition $\nu$.
Finally for a probability measure $\nu\ll \pi$ we let $\|\nu\|_{\infty}$ denote the $L^{\infty}$ norm of density $\,\text{\rm d} \nu /\,\text{\rm d}\pi$.
\begin{proposition}\label{genresult}
The spectral gap satisfies
\begin{equation}\label{eq:gapchar}
\alphap= -\sup_{ \|\nu\|_{\infty}<\infty } \limsup_{t\to \infty} \frac{1}{t} \log \| P^{\nu}_t- \pi \|_{TV}.
\end{equation}
If furthermore $\Omega$ is a topological space exhausted by compact sets (and equipped with its Borel $\sigma$-algebra) we can restrict the supremum to $\nu$ with compact support.
\end{proposition}
\begin{proof}
Suppose $\nu$ is a probability measure on $\Omega$ with $\|\nu\|_{\infty}<\infty$. Let $\rho$ and $\rho_t$ denote respectively the density of
$\nu$ and $P^{\nu}_t$ with respect to $\pi$. Then $\rho_t=e^{t\mathcal{L}}\rho$, and the spectral theorem implies
\begin{align*}
\| P^{\nu}_t- \pi \|_{TV}&= \frac{1}{2}\| \rho_t- 1 \|_{L^1(\pi)}\\&\le
\frac{1}{2}\| \rho_t- 1 \|_{L^2(\pi)}\le \frac1 2 \|\rho-1\|_{L^2(\pi)} e^{-\alphap t}.
\end{align*}
This proves that the spectral gap is at most the right hand side in \mbox{\tiny eq}ref{eq:gapchar}.
The other inequality requires a bit more work.
Let us first treat the simpler case where $-\alphap$ is an eigenvalue of $\mathcal{L}$. Let $f$ be a normalized eigenfunction such that $\mathcal{L} f=-\alphap f$. Assume without loss of generality that the
positive part $f_+$ satisfies $\| f_+ \|^2_{L^2(\pi)}\ge 1/2$ (if not take the negative part).
Given $M>0$, consider the bounded density
$$\rho_{M}= \frac{f_+\wedge M}{\| f_+\wedge M\|_{L^1(\pi)}}.$$
By monotone convergence
and using $\| f_+ \|_{L^1(\pi)}\le \| f_+ \|_{L^2(\pi)}\le 1$,
$$\lim_{M\to \infty} \langle \rho_{M}, f\rangle= \frac{\| f_+ \|^2_{L^2(\pi)}}{\| f_+ \|_{L^1(\pi)}}\ge \frac12.$$
Let us thus fix $M$ sufficiently large so that
$$\langle \rho_{M}, f\rangle= \langle \rho_{M}-1, f\rangle\ge \frac{1}{3}.$$
Recall that $\| \mu_1 - \mu_2 \|_{TV} = \sup\{ \int h \,\text{\rm d} (\mu_1-\mu_2): \|h\|_\infty \le 1\}$. If $\nu$ has density $\rho_{M}$,
then
\begin{align}
\| P^{\nu}_t- \pi \|_{TV} &\ge \int \frac{\rho_M}{\|\rho_{M}\|_\infty} \,\text{\rm d} (P^{\nu}_t - \pi) = \int \frac{(\rho_{M}-1)}{\|\rho_{M}\|_\infty} \,\text{\rm d} (P^{\nu}_t - \pi)
\nonumber\\ &= \frac{\langle e^{t\mathcal{L} } (\rho_{M}-1), (\rho_{M}-1) \rangle}{\|\rho_{M}\|_\infty} .
\label{eq:sp1}
\end{align}
If $f$ is an eigenfunction, then
$ \rho_{M}-1= \langle \rho_{M}-1, f\rangle f
+g$,
where $g$ is orthogonal to $f$ and $e^{t\mathcal{L} } g$ is orthogonal to $f$. Therefore,
$$
\| P^{\nu}_t- \pi \|_{TV}
\ge \frac{\langle \rho_{M}-1, f\rangle^2}{\|\rho_{M}\|_\infty}\,\langle e^{t\mathcal{L} } f, f \rangle \ge \frac{1}{9{\|\rho_{M}\|_\infty}} e^{-t\alphap }.
$$
This implies the desired bound in the case where $-\alphap$ is an eigenvalue of $\mathcal{L}$.
If $-\alphap$ is not an eigenvalue we argue as follows. Let ${\mathbf E}_{\delta}$ denote the spectral projector of $-\mathcal{L}$ associated to the interval $[\alphap,\alphap+\delta]$, and let $H_\delta$ denote the corresponding closed subspace of $L^2(\pi)$. Suppose that $f$ is normalized and $f\in H_\delta$.
Let $\rho_{M}$ be defined as above and notice that \mbox{\tiny eq}ref{eq:sp1} continues to hold. Since
$ \rho_{M}= {\mathbf E}_{\delta} \rho_{M} + g$,
where $g\in H_\delta^\perp$ and $e^{t\mathcal{L} } g\in H_\delta^\perp$, one has
\begin{align}
\langle e^{t\mathcal{L} } (\rho_{M}-1), (\rho_{M}-1) \rangle \ge
e^{-(\alphap+\delta) t} \| {\mathbf E}_{\delta}\rho_M\|^2_{L^2(\pi)}.
\end{align}
Since $f$ is normalized and $f\in H_\delta$, one has
$\| {\mathbf E}_{\delta}\rho_M\|^2_{L^2(\pi)}\ge \langle \rho_M,f \rangle^2$. In conclusion, we have shown that if $\nu$ has density $\rho_M$ then
\begin{align*}
\| P^{\nu}_t- \pi \|_{TV} &\ge \frac{1}{9{\|\rho_{M}\|_\infty}} e^{-t(\alphap + \delta) }.
\end{align*}
By the arbitrariness of $\delta$ this implies the desired inequality.
If $\Omega$ is exhausted by compact sets then we can modify the definition of $\rho_{M}$ to make it compactly supported.
\end{proof}
\subsection{Monotone grand coupling}\label{Sec:GC}
We will consider two partial orders on interface configurations:
\begin{equation}\label{Eq:order}
\begin{split}
x \le y \quad &\Leftrightarrow \quad \forall k \in \llbracket 0, N \rrbracket,\quad x_k\le y_k.\\
x \preccurlyeq y \quad &\Leftrightarrow \quad \forall k \in \llbracket 1, N \rrbracket,\quad (x_k-x_{k-1})\le (y_k-y_{k-1}).
\end{split}
\end{equation}
Note that $\le$ is a natural partial order in both spaces $\Omega_N$ and $\widetilde \Omega_N$ while $\preccurlyeq$ is only relevant for the enlarged space $\widetilde \Omega_N$(recall the definition in \mbox{\tiny eq}ref{def:tildeom}).
We present a global coupling of the trajectories $\mathbf{X}^x$ (and therefore $\boldsymbol\eta$) starting from all possible initial conditions $x$ which preserves both types of monotonicity.
\begin{lemma}\label{lem:mongc}
There exists a coupling of $\{\mathbf{X}^x, x\in\widetilde \Omega_N\}$ such that
\begin{itemize}
\item If $x \le y$, then $\mathbf{X}^x(t)\le \mathbf{X}^y(t)$ for all $t\ge 0$;
\item If $x \preccurlyeq y$, then $\mathbf{X}^x(t)\preccurlyeq \mathbf{X}^y(t)$ for all $t\ge 0$.
\end{itemize}
\end{lemma}
\begin{proof}
The coupling is a version of the usual graphical construction (see e.g.~\cite{Liggettbook}). To each $k\in\llbracket 1, N-1\rrbracket$ we associate a Poisson clock process $(\mathcal{T}^{(k)}_i)_{i\ge 1}$ whose increments are i.i.d.~rate one exponentials, and a sequence $(U^{(k)}_i)_{i\ge 1}$ of i.i.d.\ uniform r.v.
Then, for every $x\in\widetilde\Omega_N$, $(\mathbf{X}^x(t))_{t\ge 0}$ is a c\`ad-l\`ag process that only evolves at the update times $(\mathcal{T}^{(k)}_i)_{k\in \llbracket 1, N-1\rrbracket, i\ge 1}$. More precisely at time $t=\mathcal{T}^{(k)}_i$, if $U^{(k)}_i=u$ then the $k$-th coordinate is updated as follows
$$ X_k^x(t)= F^{-1}_{X_{k-1}(t_-),X_{k+1}(t_-)}(u) \text{ and } X_j^x(t):= X^x_j(t_{-}) \text{ for } j\ne k\;,$$
where for $b,c$ we define $F_{b,c}: \mathbb{R} \to [0,1]$ as
$$F_{b,c}(t)=\int_{-\infty}^t \rho_{b,c}(u) \,\text{\rm d} u\;.$$
By construction, the law of $\mathbf{X}^x$ under this coupling is the desired one.
To check that this coupling preserves the partial order ``$\le$" it is sufficient to check that for every $t\in\mathbb{R}$
\begin{equation}\label{firstorder}
b\le b' \text{ and } c\le c' \quad \mathbb{R}ightarrow \quad
F_{b,c}(t)\ge F_{b',c'}(t)\;.
\end{equation}
For the partial order ``$\preccurlyeq$" it suffices to show that if $c-b\le c'-b'$ then
\begin{equation}\label{secondorder}
F_{b,c}(t+b)\ge F_{b',c'}(t+b') \quad \text{ and } \quad F_{b,c}(c-t)\le F_{b',c'}(c'-t).
\end{equation}
We start with \mbox{\tiny eq}ref{firstorder}. As $\rho_{b,c}$ and $\rho_{b',c'}$ are positive, continuous and integrate to the same value, there must exist $u_0$ such that
$\rho_{b,c}(u_0)=\rho_{b',c'}(u_0)$. Let us show that $u_0$ must satisfy
\begin{equation}\label{comparr}
\begin{cases}
\forall u\le u_0, \quad \rho_{b',c'}(u)\le\rho_{b,c}(u),\\
\forall u\ge u_0, \quad \rho_{b',c'}(u)\ge\rho_{b,c}(u).
\end{cases}
\end{equation}
We note that the desired inequality \mbox{\tiny eq}ref{firstorder} is a simple consequence of \mbox{\tiny eq}ref{comparr}.
To prove \mbox{\tiny eq}ref{comparr}, we set $W_{b,c}=\log \rho_{b,c}$, and show that
$W_{b',c'}-W_{b,c}$ is nondecreasing. Indeed, everywhere except on a countable set, $W_{b',c'}-W_{b,c}$ is differentiable and we have by convexity
$$(W_{b',c'}-W_{b,c})'(u)= V'(u-b)-V'(u-b')-V'(c-u)+V'(c'-u)\ge 0.$$
This proves \mbox{\tiny eq}ref{comparr}.
Now \mbox{\tiny eq}ref{secondorder} only needs to be proved for $b=b'=0$ by translation invariance. With this in mind, the first inequality in \mbox{\tiny eq}ref{secondorder} is a consequence of \mbox{\tiny eq}ref{firstorder}. Regarding the second inequality in \mbox{\tiny eq}ref{secondorder}, we observe that it is equivalent to
$$ \widetilde{F}_{c',0}(-t+c') \ge \widetilde{F}_{c,0}(-t+c)\;,$$
where $\widetilde{F}$ is the distribution function associated to the potential $\widetilde{V}(\cdot) = V(-\cdot)$. The later inequality is then exactly of the same form as the first inequality in \mbox{\tiny eq}ref{secondorder}: since $\widetilde{V}$ satisfies the same assumptions as $V$ we are done. \end{proof}
\subsection{The sticky coupling}\label{Subsec:sticky}
In this section we construct a coupling of two trajectories $(\mathbf{X}^{x}(t))_{t\ge 0}$ and $(\mathbf{X}^{y}(t))_{t\ge 0}$ which is aimed at minimizing the merging time.
This coupling is also monotone, that is if $x\le y$ then $\mathbf{X}^{x}(t)\le\mathbf{X}^{y}(t)$ at all times.
In contrast with that of the previous section, this construction cannot naturally be extending to a grand-coupling.
It
can (and will) also be used for two processes $\mathbf{X}^{(1)}$ and $\mathbf{X}^{(2)}$ with initial conditions $\mathbf{X}^{(1)}(0)$ and $\mathbf{X}^{(2)}(0)$ sampled according to some prescribed distributions on $\Omega_N$.
As in the previous construction, to each $k\in\llbracket 1, N-1\rrbracket$ we associate a Poisson clock process $(\mathcal{T}^{(k)}_i)_{i\ge 1}$ whose increments are i.i.d.\ rate one exponentials.
Let us now describe how the updates are performed. If
$\mathcal{T}^{(k)}_i=t$ for some $i$ we resample the values of $X^x_k$ and $X^y_k$.
We use the short hand notation
\begin{equation}
\rho_x:= \rho_{X^x_{k-1}(t_-),X^x_{k+1}(t_-)}\,,\quad \rho_y:= \rho_{X^y_{k-1}(t_-),X^y_{k+1}(t_-)}\;,
\end{equation}
and set
\begin{equation}
p(t,k):=\int_{\mathbb{R}}\rho_x(u)\wedge \rho_y(u) \,\text{\rm d} u\;.
\end{equation}
Finally we define three probability measures $\nu_1$, $\nu_2$ and $\nu_3$ with densities proportional to $(\rho_x-\rho_y)_+$, $(\rho_x\wedge \rho_y)$ and $(\rho_y-\rho_x)_+$ (in the case were $\rho_x=\rho_y$ we can set $\nu_1$ and $\nu_3$ to be the Dirac mass at $0$, or any other arbitrary distribution).
The update then goes as follows
\begin{itemize}
\item With probability $p=p(t,k)$, we set $X^x_k(t)=X^y_k(t)$, and we draw their common value according to $\nu_2$.
\item With probability $q=1-p$, we draw $X^x_k(t)$ and $X^y_k(t)$ independently with respective distributions $\nu_1$ and $\nu_3$.
\end{itemize}
To see that this coupling preserves ``$\le$" notice that if the configurations are ordered before the update (or more specifically if $X^x_{k\pm 1}(t_{-} )\le X^y_{k\pm 1}(t_{-})$) then there exists $u_0$ such that
$\nu_1$ is supported on $(-\infty,u_0]$ and $\nu_3$ on $[u_0,\infty)$, the latter fact being a direct consequence of \mbox{\tiny eq}ref{comparr}.
\begin{remark}
More formally, we can define the coupling using, on top of the clock process, $4$ sequences of independent uniform random variables on $[0,1]$ for each coordinate, from which the updates are defined in a deterministic fashion: we couple if and only if the first uniform is smaller than $p$ and we use the three other uniforms to sample independent random variables with distribution $\nu_1$, $\nu_2$ and $\nu_3$ respectively.
\end{remark}
\subsection{FKG inequalities}
Recall the partial order $\le$ introduced in \mbox{\tiny eq}ref{Eq:order}. We say that $f : \Omega_N \to \mathbb{R}$ is increasing if
$$ x \le y \;\; \mathbb{R}ightarrow \;\; f(x) \le f(y)\;.$$
For two probability measures $\mu,\nu$ on $\Omega_N$, we write $\mu\le \nu$ and say that $\mu$ is stochastically dominated by $\nu$ if for all increasing $f: \Omega_N \to \mathbb{R}$ one has $\mu(f)\le\nu(f)$.
We also say that a set $A\subset \Omega_N$ is increasing if the map $\mathbf{1}_A$ is increasing. Finally, for any two configurations $x,y \in \Omega_N$ we introduce the configurations $\min(x,y), \max(x,y)$ defined as
$$ \min(x,y)_i := \min(x_i,y_i)\;,\quad \max(x,y)_i := \max(x_i,y_i)\;.$$
\begin{proposition}[FKG inequalities]
\label{prop:fkg}
If $f,g$ are increasing then
$$ \pi_N(fg) \ge \pi_N(f) \pi_N(g)\;.$$
Furthermore if $A,B \subset \Omega_N$ are increasing and satisfy
$$\left\{ x\in A \ \text{ and } \ y\in B\right\}\;\; \mathbb{R}ightarrow \;\;\min(x,y)\in B,$$
then
\begin{equation}\label{piapib}
\pi_N(\cdot \,|\, A) \ge \pi_N(\cdot \,|\, B)\,.
\end{equation}
\end{proposition}
\begin{proof}
By~\cite[Thm 3]{Preston}, the first part of the statement is granted if we have for all $x,y\in \Omega_N$
\begin{equation}\label{Hmaxmix} H(\max(x,y)) + H(\min(x,y)) \le H(x) + H(y)\;.\end{equation}
The convexity of $V$ is sufficient to ensure this inequality, see for instance~\cite[Appendix B1]{Giacomin}.
We turn to the second part of the statement. Set $\mu_A := \pi_N(\cdot \,|\, A)$ and define $\mu_B$ similarly. The densities of these measures are proportional to $e^{-H_A}, e^{-H_B}$ where
$$ H_A(x) := \begin{cases} H(x)&\mbox{ if } x\in A\;,\\
+\infty&\mbox{ if } x\notin A\;.\end{cases}$$
By~\cite[Prop 1]{Preston}, it suffices to check that for all $x,y \in \Omega_N$
$$ H_A(\max(x,y)) + H_B(\min(x,y)) \le H_A(x) + H_B(y)\;.$$
This is granted by \mbox{\tiny eq}ref{Hmaxmix} and the assumption on $A,B$.
\end{proof}
A useful example to keep in mind is as follows. Let $\mathscr{K}\subset \{1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1\}$ be a set of labels and define, for some $a\in\mathbb{R}$, the sets
\begin{align}\label{ex:abex}
A_i = \left\{x:\,x_i\ge a\right\}\,,\qquad A = \bigcap_{i\in\mathscr{K}}A_i\,,\qquad B = \bigcup_{i\in\mathscr{K}}A_i\,.
\end{align}
Then $A,B\subset \Omega_N$ satisfy the requirement in Proposition \ref{prop:fkg} and the inequality \mbox{\tiny eq}ref{piapib} is crucially used in the proof of Proposition \ref{prop:muW} below.
\subsection{Absolute continuity}\label{sec:ac}
It will be useful to compare the conditional probability measure $\pi_N$ to an unconditional measure under which the increments $\eta_i$ are independent and have the same mean. We need a preliminary lemma.
\begin{lemma}\label{lem:mean}
Let $V\in\mathscr{C}$ and set $I:= ( V'_-, V'_+)$, where $V'_\pm$ are defined in \mbox{\tiny eq}ref{VminusVplus}.
The function $\psi: I\mapsto \mathbb{R}$ defined by
\begin{equation}
\psi(\lambda) = \frac{\int u e^{-V(u)+\lambda u} \,\text{\rm d} u }{\int e^{-V(u)+\lambda u} \,\text{\rm d} u}
\end{equation}
is
bijective from $I$ to $\mathbb{R}$.
\end{lemma}
\begin{proof}
The function $\psi$ is increasing since for any $\lambda\in I$:
\begin{align}\label{eq:leg2}
\psi'(\lambda)
= \frac{\int (u-\psi(\lambda))^2 e^{-V(u)+\lambda u} \,\text{\rm d} u }{\int e^{-V(u)+\lambda u} \,\text{\rm d} u}>0.
\end{align}
To prove that $\psi$ is surjective we show that
$\psi(\lambda)\uparrow\infty$ when $\lambda\uparrow V'_+$ (a similar argument proves that $\psi(\lambda)\downarrow-\infty$ when $\lambda\downarrow V'_-$). When $V'_+<\infty$ this follows from the fact that $\psi(\lambda)$ is the derivative of $\log \int e^{-V(u)+\lambda u} \,\text{\rm d} u$ which itself tends to infinity when $\lambda\to V'_+$ (by convexity we have that
$V(u)\le V'_+ u + V(0)$). When $V'_+=\infty$ it is a standard task to check that $\log \int e^{-V(u)+\lambda u} \,\text{\rm d} u$ grows superlinearly at infinity.
\end{proof}
As a consequence
there exists $\lambda \in I$ such that for $\widetilde{V}(x) := V(x) - \lambda x$ we have $\int x e^{-\widetilde{V}(x)}\,\text{\rm d} x = 0$. Note that $\widetilde{V}\in\mathscr{C}$.
Let $\nu_N$ be the probability measure under which the r.v.~$\eta_k$, $k\in\llbracket 1,N\rrbracket$ are i.i.d.~with density proportional to $e^{-\widetilde{V}}$.
Under $\nu_N$, the expectation of the r.v.~$x_N$ vanishes. The next lemma shows that the law of a fixed proportion (bounded away from $1$) of the $\eta_k$'s under $\pi_N$ is absolutely continuous with respect to \ the law of the same r.v.~under $\nu_N$, uniformly in $N$. The point here is that $\nu_N$ remains a product law and is therefore more tractable.
\begin{lemma}\label{compare}
Fix $a \in (0,1)$ and write $N_a := \lfloor a N \rfloor$ for all $N\ge 1$. There exists a constant $C_a > 0$ such that for all $N\ge 1$ and all positive bounded measurable functions $f:\mathbb{R}^{N_a} \to \mathbb{R}_+$ we have
$$ \pi_N[f(\eta_1,\ldots,\eta_{N_a})] \le C_a \,\nu_N[f(\eta_1,\ldots,\eta_{N_a})]\;.$$
\end{lemma}
\begin{remark}
Note that from exchangeability the above statement is also valid for the functional of an arbitrary subset of the increments of cardinality smaller than $ aN$.
\end{remark}
\begin{corollary}\label{cor:bazics}
There exist two constants $c,C>0$ such that for all $u>0$
\begin{equation}\label{deviates}
\pi_N\left( \|x\|_{\infty}\ge u \sqrt{N} \right) \le C N e^{- c \left[ u^2\wedge(\sqrt{N}u)\right]},
\end{equation}
and
\begin{equation}\label{largegrad}
\pi_N\left( \max_{i\in \llbracket 1, N \rrbracket} |\eta_i| \ge u \right) \le N e^{- cu}.
\end{equation}
\end{corollary}
\begin{proof}[Proof of Corollary \ref{cor:bazics}]
For \mbox{\tiny eq}ref{largegrad} we apply the lemma to $\mathbf{1}_{|\eta_i|\ge u}$ and use the union bound. For \mbox{\tiny eq}ref{deviates} we only need to prove that
\begin{equation}\label{toprove}
\pi_N\left( x_i \ge u \sqrt{N} \right) \le (C/2) e^{- c \left[ u^2\wedge(\sqrt{N}u)\right]}.
\end{equation}
for $i\le N/2$ (the corresponding lower bound and the case $i\ge N/2$ can be dealt with by symmetry) and use union bound. Lemma \ref{compare} applied to $a=N/2$ allows to prove the bound for $\nu_N$ under which $x_i$ is a sum of IID exponentially integrable random variables. Reproducing the classic upper bound computation in the proof of Cramer's Theorem (see e.g. \cite[Chapter 1]{DZ09}) we have
$$\nu_N\left( x_i \ge u \sqrt{i} \right)\le e^{- i{\rm Var}phi( u i^{-1/2})}$$
where ${\rm Var}phi(x):= \max_{t\ge 0} \left( tx-\log \frac{\int e^{-\widetilde V(u)+tu} \,\text{\rm d} u}{\int e^{-\widetilde V(u)} \,\text{\rm d} u}\right).$
Our assumptions on $\widetilde V$ imply that ${\rm Var}phi$ has quadratic behavior at zero. Since in addition ${\rm Var}phi$ is convex, we have necessarily ${\rm Var}phi(x)\ge c x^2\wedge x$ for all $x>0$ (for some positive $c>0$) yielding \mbox{\tiny eq}ref{toprove}.
\end{proof}
\begin{proof}[Proof of Lemma \ref{compare}]
Let $\sigma^2$ be the variance of $\eta$ under the measure with density proportional to $e^{-\widetilde{V}}$. Let $q_k$ be the density of the random variable $\eta_1+ \ldots +\eta_k$ under $\nu_N$. The Local Limit Theorem~\cite[Th. VII.2.7]{Petrov} gives
$$ \lim_{k\to\infty} \sup_{y\in \mathbb{R}} \big|{\rm Var}epsilon(k,y)\big| = 0\;,$$
where we define
$$
{\rm Var}epsilon(k,y)= \sigma\sqrt{k} \,q_k(y \sigma\sqrt{k}) - g(y),
$$
and $g$ is the density of the standard Gaussian distribution.
Since $g$ is maximized at $0$, for $k$ sufficiently large we may estimate
$$
\sup_{z\in\mathbb{R}}\sqrt{k}\,q_{k}(z)\le 2g(0)
$$
One can check that, for any $f_0$ which is a bounded measurable function of $x_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,x_{N-1}$, we have
$$ \pi_N[f_0] = \lim_{\delta\downarrow 0} \frac{\nu_N[f_0\, \mathbf{1}_{x_N \in [-\delta,\delta]}]}{\nu_N(x_N \in [-\delta,\delta])}\;.$$
Taking $f$ as in the statement of the lemma, we thus get for all $N$ sufficiently large,
\begin{align*}
\pi_N[f(\eta_1,\ldots,\eta_{N_a})] &= \frac{\nu_N\big[f(\eta_1,\ldots,\eta_{N_a}) q_{N-N_a}(- x_{N_a})\big]}{q_N(0)}
\\
&\le \frac{2g(0)}{\sqrt{N-N_a}q_N(0)} \nu_N\big[f(\eta_1,\ldots,\eta_{N_a})\big]\;.
\\
&\le \frac{4}{\sqrt{1-a}}\,\nu_N\big[f(\eta_1,\ldots,\eta_{N_a})\big]\;.
\end{align*}
The result of the lemma follows by adjusting the value of $C_a$ in order to cover also the small values of $N$.
\end{proof}
\subsection{Technical estimates for the resampling probability}\label{sec:tecnos}
The goal of this subsection is to collect some useful estimates on the resampling distribution of our dynamics. All the constants are allowed to depend on the potential $V\in\mathscr{C}$ and on nothing else.
Let us mention before starting that, as a consequence of Assumptions
(\ref{ass:conv}) and (\ref{ass:poly}) on $V$, we have
\begin{equation}\label{eq:ct22}
|V'(u)| \le C(1+|u|)^{K-1}\;,
\end{equation}
for all $u$ where $V$ is differentiable, and by continuity, also for the derivatives on the left and on the right when they differ. All issues concerning differentiability appearing in the proofs below can be resolved in this fashion, so we will not mention them.
\noindent Our first estimate guaranties that our distribution is sufficiently spread-out. Recall \mbox{\tiny eq}ref{deftheta}.
\begin{lemma}\label{lem:lerho}
There exists a constant $C>0$ such that
\begin{equation} \label{lowZ}
Z(a)\ge \frac{1}{C(1 \vee |a|^K)} \;.
\end{equation}
As a consequence, we have
\begin{equation}\label{eq:ct3}
\|\rho_{b,c}\|_{\infty} \le C(1 \vee |c-b|^K)\;.
\end{equation}
\end{lemma}
\noindent
Our second lemma ensures that the distribution $\rho_{b,c}$ displays an exponential decay outside of the interval $[b,c]$.
\begin{lemma}\label{lem:letail}
There exists positive constants $\alpha$ and $C$ such that for all $s\ge 0$ and all $b, c\in \mathbb{R}$
we have
\begin{equation}\label{eq:tail1}
\int_{ (b \vee c) + s}
^\infty\rho_{b,c}(u)du\le Ce^{-\alpha s} \,.
\end{equation}
Symmetrically we have
\begin{equation}\label{eq:tail2}
\int_{-\infty}
^{ (b \wedge c) - s}\rho_{b,c}(u)\,\text{\rm d} u\le C e^{-\alpha s} \,.
\end{equation}
In particular,
the variance of the random variable with density $\rho_{b,c}$ satisfies, for some possibly different choice of $C$, for every $b,c\in\mathbb{R}$:
\begin{equation}\label{lavar}
{\rm Var}(\rho_{b,c}):= \int_{\mathbb{R}} \left(u-\frac{b+c}{2}\right)^2 \,\text{\rm d} u \le C (|b-c|+1)^2
\end{equation}
\end{lemma}
Finally the third lemma allows us to control the total variation distance between the distributions associated with $\rho_{b,c}$ and $\rho_{b',c'}$.
\begin{lemma}\label{lem:lequ}
There exists a constant $C$ such that for any $b,b',c,c'$
\begin{equation}\label{eq:q1}
q=\frac{1}{2}\int_\mathbb{R}|\rho_{b,c}(u)-\rho_{b',c'}(u)|\,\text{\rm d} u\le C\Delta (1\vee |c-b|^K),
\end{equation}
where $\Delta:= (|b'-b|+ |c'-c|)/2$.
\end{lemma}
\begin{proof}[Proof of Lemma \ref{lem:lerho}]
From \mbox{\tiny eq}ref{transinv}, it suffices to prove \mbox{\tiny eq}ref{eq:ct3} with $a$ and $\theta_a$ instead of $(c-b)$ and $\rho_{b,c}$. Since $W_a(u)\ge W_a(0)=0$, we have $\|\theta_{a}\|_{\infty}=Z(a)^{-1}$ and therefore we only need to prove \mbox{\tiny eq}ref{lowZ}. Let $z_a$ be defined as the unique positive solution of
$e^{-W_a(z_a)}= \frac12$. Existence and uniqueness of $z_a$ follow from convexity of $W_a$ and the fact that $W_a$ is minimized at $W_a(0)=0$.
We have
\begin{equation}\label{eq:ct1}
Z(a)\ge \int_{|u|\le z_a} e^{-W_a(z_a)}du\ge z_a.
\end{equation}
If $z_a > 1$, then \mbox{\tiny eq}ref{lowZ} immediately follows. We now assume that $z_a\le 1$. Writing
$$
\log 2= W_a(z_a)=\int_0^{z_a}\left(V'(a+u)-V'(a-u)\right)\,\text{\rm d} u \;,
$$
we deduce from \mbox{\tiny eq}ref{eq:ct22} that
\begin{equation}\label{eq:ct2}
\log 2 \le 2 z_a\max_{|u-a|\le 1} |V'(u)| \le 2 C Z(a) (|a|+2)^K \;,
\end{equation}
thus concluding the proof.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:letail}]
Using translation invariance \mbox{\tiny eq}ref{transinv} we
only need to prove an upper bound for the tail distribution associated with $\theta_a$, that is, for
$\int_{ |a| + s} \theta_a(u)\,\text{\rm d} u.$
Also, at the cost of changing the value of $C$, we can assume that $s\ge s_0$ for some sufficiently large $s_0 \ge 1$ independent of $a$.
Recalling that $\theta_a$ integrates to $1$ and is decreasing on $\mathbb{R}_+$, we have
\begin{equation}
\int_{ |a| + s}^{\infty} \theta_a(u)\,\text{\rm d} u\le \frac{ \int^{\infty}_{ |a| + s} \theta_a(u)\,\text{\rm d} u}{ \int_{0}^{ |a| + s_0} \theta_a(u)\,\text{\rm d} u}\le \frac{1}{|a|+s_0}\int^{\infty}_{ s} \frac{\theta_a(|a|+u)}{\theta_a(|a|+s_0)}\,\text{\rm d} u \;.
\end{equation}
We can then conclude if we show that for all $u\ge s_0$
\begin{equation}
\frac{\theta_a(|a|+u)}{\theta_a(|a|+s_0)}\le C e^{-\alpha u}.
\end{equation}
From our assumptions (i) and (iii) on the potential $V$, we have
$$\lim_{u\to+\infty} V'(u)-V'(-u) \in (0,\infty]\;.$$
Therefore, there exist $\alpha > 0$ and $s_0 > 1$ such that for all $u \ge s_0$, we have $V'(u)-V'(-u) \ge \alpha$. We then compute for all $u\ge s_0$
\begin{align*}
\partial_u \left[\log \theta_a(|a|+u)\right] &= V'(a-|a|-u)-V'(|a|+a +u) \\ &\le V'(-u)-V'(u)\le -\alpha\;,
\end{align*}
which readily yields
\begin{equation}
\frac{\theta_a(|a|+u)}{\theta_a(|a|+s_0)}\le e^{-\alpha(u-s_0)}\;.
\end{equation}
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:lequ}]
Note that we may assume $|b'-b|+ |c'-c|\le 1$, otherwise the result is trivial.
In particular, $|c'-b'|\le |c-b|+1$.
Using the triangle inequality
$$|\rho_{b,c}(u)-\rho_{b',c'}(u)|\le | \rho_{b,c}(u)-\rho_{b,c'}(u)|
+| \rho_{b,c'}(u)-\rho_{b',c'}(u)|$$
it is sufficient to treat the case where either $b=b'$ or $c=c'$. By translation invariance we reduce to the case $b=b'=0$ (the case $c=c'$ can be treated symmetrically).
Interchanging the variables if necessary, we may further assume that $Z(c/2)\ge Z(c'/2)$. Setting
\begin{equation*}
\Gamma_{c,c'}(u)= V(c'-u)-V(c-u)\;.
\end{equation*}
we observe that
\begin{align}
\label{eq:q2}
q&= \int_\mathbb{R} \rho_{0,c}(u)\left(1-\frac{Z(c)}{Z(c')}e^{-\Gamma_{c,c'}(u)}\right)_+\,\text{\rm d} u \\ &\le
\int_\mathbb{R}\rho_{0,c}(u)\left(1-e^{-\Gamma_{c,c'}(u)}\right)_+\,\text{\rm d} u
\le \int_\mathbb{R}\rho_{0,c}(u)(\Gamma_{c,c'}(u))_+\,\text{\rm d} u.
\end{align}
Using \mbox{\tiny eq}ref{eq:ct22} and $2\Delta=|c-c'|$ we have
\begin{equation}\label{eq:q21}
|\Gamma_{c,c'}(u)|\le C\Delta(|u| + |c| + 1)^{K-1}.
\end{equation}
We can conclude using
\begin{equation}
\int_\mathbb{R}\rho_{0,c}(u)|u|^{K-1} \,\text{\rm d} u \le C (1\vee |c|)^K,
\end{equation}
which follows from Lemma \ref{lem:letail}.
\end{proof}
\section{Lower bound on the mixing time}\label{Sec:Lower}
\begin{proposition}\label{prop:lalb}
There exists a constant $c>0$ such that, for every $N$ and $t\ge 0$,
\begin{equation}\label{Eq:lowbo}
d_N(t) \ge 1- \frac{1}{1+ c N e^{-2\lambda_N t}},
\end{equation}
where $\lambda_N=1-\cos(\pi/N)$.
As a consequence, there exists another constant $C$ such that, for all ${\rm Var}epsilon\in (0,1)$,
\begin{equation}\label{Eq:lowbo1}
T_N({\rm Var}epsilon)\ge \frac{1}{2\lambda_N} \left(\log N + \log (1-{\rm Var}epsilon)- C\right).
\end{equation}
\end{proposition}
To prove \mbox{\tiny eq}ref{Eq:lowbo} we select a test function $f$ and use the fact that if at time $t$ the value $f({\bf X}(t))$ is far from the equilibrium value $\pi_{N}(f)$ with large probability then $d_N(t)$ must be large. This is implemented by choosing a suitable initial condition and by estimating the first two moments of $f({\bf X}(t))$. This is a variant of Wilson's method \cite{Wil04}.
As for the exclusion process \cite{Wil04} and for the Beta-sampler on the simplex \cite{CLL}, we take $f=f_N$, the eigenfunction appearing in Theorem \ref{Th:gap}.
For the remainder of this section we assume for notational simplicity that $N$ is even and we write $\mathbf{X}$ for the process started from the random initial condition $\mathbf{X}(0)$ drawn according to the measure
\begin{equation}
{\rm Var}rho_N:=\pi_N\left( \cdot \ | \ x_{N/2} = N/2, |x|_\infty \le N\right).
\end{equation}
\begin{proposition}\label{prop:meanandvariance}
There exists a constant $C$ such that for every $t\ge 0$
$$ \mathbb{E}[f_N(\mathbf{X}(t))] \ge C^{-1} N^2 e^{-\lambda_N t}\;,\quad {\rm Var}[f_N(\mathbf{X}(t))] \le C N^3.$$
\end{proposition}
\begin{proof}[Proof of Proposition \ref{prop:lalb} using Proposition \ref{prop:meanandvariance}]
By definition, $$d_N(t)\ge \| P^{{\rm Var}rho_N}_t- \pi_N \|_{TV}.$$
From \cite[Proposition 7.12]{LevPerWil} one has
\begin{equation}\label{eq:lpw}
\| P^{{\rm Var}rho_N}_t- \pi_N \|_{TV} \ge 1- \left(1+ \frac{|\mathbb{E}[f_N(\mathbf{X}(t))]-\pi_N(f_N)|^2}{2{\rm Var}(f_N(\mathbf{X}(t))+2{\rm Var}_{\pi_N}(f_N) }\right)^{-1},
\end{equation}
where ${\rm Var}_{\pi_N}(f_N)$ denotes the the variance of $f_N$ with respect to \ $\pi_N$. Using $\pi_N(f_N)=0$ and Fatou's lemma for weak convergence to control ${\rm Var}_{\pi_N}(f_N)$ through the variance ${\rm Var}(f_N(\mathbf{X}(t))$ at $t=\infty$, Proposition \ref{prop:meanandvariance} implies the estimate
\begin{align}
\| P^{{\rm Var}rho_N}_t- \pi_N \|_{TV}\ge 1- \left(1+ \frac{N e^{-2\lambda_N t}}{4C^3}\right)^{-1},
\end{align}
which proves \mbox{\tiny eq}ref{Eq:lowbo} with $c=1/4C^3$ if $C$ is the constant in Proposition \ref{prop:meanandvariance}.
The lower bound \mbox{\tiny eq}ref{Eq:lowbo1} is a simple consequence of \mbox{\tiny eq}ref{Eq:lowbo}.
\end{proof}
\begin{proof}[Proof of Proposition \ref{prop:meanandvariance}]
As $f_N$ is an eigenfunction associated with the eigenvalue $-\lambda_N$, see Section \ref{sec:linear}, the process
$$M_t:=e^{\lambda_N t}f_N(\mathbf{X}(t))$$ is a martingale. In particular,
\begin{equation}\label{eq:lowbo11}
\mathbb{E}[f_N(\mathbf{X}(t))] =e^{-\lambda_N t}\mathbb{E}[f_N(\mathbf{X}(0))]=e^{-\lambda_N t}{\rm Var}rho_N(f_N).
\end{equation}
Under $\hat \pi_N=\pi_{N}( \cdot \ | x_{N/2}=N/2)$, the increments $\eta_i$, $i\in\llbracket 1,N/2\rrbracket$ are exchangeable and have all mean $1$.
The same can be said for $i\in\llbracket N/2+1,N\rrbracket$ with mean $-1$. The distribution $\hat \pi_{N}$ restricted to the variables in the first half of the segment is the distribution of $\{\eta_i+1\}$ where the $\eta_i$ are distributed according to the measure $\pi_{N/2}$ for the shifted potential $ V^+(u)=V(u+1)$, see Remark \ref{rem:tilt}. Similarly, for the second half of the segment with
$V^-(u)=V(u-1)$. Then, an application of Corollary \ref{cor:bazics}
shows that for any $a_0 > 0$ there exists $c>0$ such that
for every $a\ge a_0$ and for all $N$ sufficiently large,
\begin{equation}\label{eq:concentration}
\hat \pi_N( \max |x_i- \hat\pi_N(x_i)|\ge a N \ ) \le e^{-c a N}\;,
\end{equation}
where $\hat\pi_N(x_i)=i$ if $i\le N/2$ and $\hat\pi_N(x_i)=N-i$ if $i\ge N/2$.
Moreover, using the Cauchy-Schwarz inequality and \mbox{\tiny eq}ref{deviates} one has $$\hat\pi_N(f_N\,\mathbf{1}_{\|x\|_\infty > N})= o(N^2).$$
It follows that ${\rm Var}rho_N(f_N)=\hat\pi_N(f_N\ |\ \|x\|_\infty \le N)$ satisfies
\begin{equation}
{\rm Var}rho_{N}( f_N )
= \frac{2 N^2}{\pi^2}(1+o(1)).
\end{equation}
Combined with \mbox{\tiny eq}ref{eq:lowbo11} this proves the desired lower bound on $\mathbb{E}[f_N(\mathbf{X}(t))]$.
To control the variance, we write
\begin{equation}
{\rm Var}[f_N(\mathbf{X}(t))]= e^{-2\lambda_N t} {\rm Var}[M_t]= e^{-2\lambda_N t} \left({\rm Var}[M_0]+\mathbb{E} \left[\langle M \rangle_t\right]\right),
\end{equation}
where $\langle M \rangle_t$ is the increasing predictable process, or angle bracket, associated to the martingale $M_t$ defined above.
The control of ${\rm Var}[M_0]={\rm Var}_{{\rm Var}rho_N}(f_N)$ can be obtained by reducing to the measure $\hat\pi_N$ and using Lemma \ref{compare}, considering the cases $i\in \llbracket 1, N/2\rrbracket$ and $i\in \llbracket N/2+1, N\rrbracket$ separately as above. More precisely,
for some constant $C$, for every $i\in\llbracket 1, N\rrbracket$:
\begin{equation}
\hat \pi_N\left( (x_i- \hat\pi_N(x_i))^2 \right) \le C N.
\end{equation}
Using Cauchy-Schwarz,
\begin{equation}
\hat \pi_N \left(\Big( f_N(x)-\sum_{i=1}^{N} \hat\pi_N(x_i)\sin(i \pi/N)\Big)^2 \right)\le C N^3.
\end{equation}
Recalling \mbox{\tiny eq}ref{eq:concentration}, ${\rm Var}rho_N$ is obtained by conditioning $\hat\pi_N$ to an event of probability larger than $1/2$, and therefore, using the variational representation for the variance of a random variable $X$, ${\rm Var}(X)=\inf_{m\in\mathbb{R}}\mathbb{E}[(X-m)^2]$, one finds
\begin{equation}
{\rm Var}_{{\rm Var}rho_N}(f_N)\le {\rm Var}rho_N \left(\Big( f_N(x)-\sum_{i=1}^{N} \hat\pi_N(x_i)\sin(i \pi/N)\Big)^2\right)\le 2 CN^3.
\end{equation}
The martingale bracket can be given an explicit expression.
The contribution to the bracket of the potential update at site $k$ at time $s$ is bounded by
\begin{equation}\label{eq:contr}
e^{2\lambda_N s}\sin^2(k \pi/N)\,\,\mathbf{E}\left[\left(X_k(s)-X_k(s^-)\right)^2\right],
\end{equation}
where $\mathbf{E}[\cdot]$ is the expectation with respect to \ the resampling random variable $X_k(s)$ with distribution $\rho_{X_{k-1}(s^-),X_{k+1}(s^-)}$.
Notice that
\begin{equation}
X_k(s)-X_k(s^-)=
\frac12(\eta_{k+1}(s^-)-\eta_k(s^-)) -U,
\end{equation}
where $U$ has distribution $\theta_{\bar \eta_k(s^-)}$, see \mbox{\tiny eq}ref{transinv}. Using
Lemma \ref{lem:letail} to estimate the variance of $U$, we see that \mbox{\tiny eq}ref{eq:contr} is bounded above by
\begin{align}\label{eq:contro}
Ce^{2\lambda_N s}\left[
1+ \eta_{k}(s^-)^2+\eta_{k+1}(s^-)^2\right],
\end{align}
for some constant $C>0$.
Hence,
\begin{equation}\label{lecrochet}
\langle M \rangle_t
\le C \int^t_0 e^{2\lambda_N s}\sum_{k=1}^{N-1} \left( 1+\eta_{k}(s)^2+\eta_{k+1}(s)^2\right)\,\text{\rm d} s.
\end{equation}
To conclude we prove that there exists $C>0$ such that
\begin{equation}\label{touborne}
\forall N\ge 1, \forall k\in\llbracket 1,N\rrbracket, \ \forall s\ge 0, \quad \mathbb{E}[\eta_k(s)^2] \le C.
\end{equation}
Indeed, \mbox{\tiny eq}ref{touborne} combined with \mbox{\tiny eq}ref{lecrochet} yields
\begin{equation}
e^{-2\lambda_N t} \mathbb{E} \left[\langle M \rangle_t\right]\le C N \lambda^{-1}_N \le C' N^3.
\end{equation}
By symmetry, it is sufficient to show \mbox{\tiny eq}ref{touborne} for $k\le N/2$. Moreover, using \mbox{\tiny eq}ref{eq:concentration} as above, we may consider the dynamics with initial distribution $\hat\pi_N$ instead of ${\rm Var}rho_N$.
With slight abuse of notation
we still use the notation $\mathbf{X}$ for this process.
We are going to prove a bound for $\mathbb{E}[\max(\eta_k(s),0)^2]$, the analogous bound for the negative part being proved by a symmetric argument.
Using Lemma \ref{lem:mean}, we fix $\lambda$ such that
\begin{equation}
(\smallint u e^{-V(u)+\lambda u} \,\text{\rm d} u) / ( \smallint e^{-V(u)+\lambda u} \,\text{\rm d} u)=2.
\end{equation}
We consider the measure $\widetilde \pi_N$ under which the $\eta_i$ are IID with a distribution whose density with respect to Lebesgue is proportional to $e^{-V(u)+\lambda u}$, and note that $\widetilde \pi_N$ is an invariant measure for the generator $\mathcal{L}$ in the enlarged state space $\widetilde\Omega_N$.
In the enlarged state space, we couple $\mathbf{X}$ with the process $\mathbf{X}'$ with initial condition distributed according to $\widetilde \pi_N( \cdot | \ x_{N/2}\ge N/2)$. Observe that the law of the increments $(\eta_k)_{k\le N/2}$ under $\hat\pi_N$ coincides with the law of $(\eta_k)_{k\le N/2}$ under $\widetilde\pi_N(\cdot \ |\ x_{N/2}= N/2)$. Therefore, by Lemma \ref{lem:mongc}, $\mathbf{X}$ and $\mathbf{X}'$ can be coupled in such a way that $\eta_k(s)\le \eta'_k(s)$ for all $s\ge 0$ and $k\le N/2$.
Hence
\begin{equation}
\mathbb{E} \left[\max(\eta_k(s),0)^2\right]\le \mathbb{E} \left[(\eta'_k(s))^2\right].
\end{equation}
Simple estimates for i.i.d.\ random variables show that $\widetilde \pi_N(x_{N/2}\ge N/2) \ge 1/2$, and therefore, using the invariance of $\widetilde \pi_N$:
\begin{equation}
\mathbb{E} \left[\max(\eta_k(s),0)^2\right]\le 2\,\widetilde \pi_N(\eta_k^2) =\frac{2\int u^2 e^{-V(u)+\lambda u} \,\text{\rm d} u}{\int e^{-V(u)+\lambda u} \,\text{\rm d} u}.
\end{equation}
\end{proof}
\section{A first upper bound and the spectral gap}\label{Sec:Upper}
In this section, we establish an upper bound on the total-variation distance to equilibrium that holds for \emph{all} $N\ge 2$. From this bound we will derive the value of the spectral gap of the generator. This upper bound is sharp enough to catch the order of the mixing time when $N\to\infty$ but not the right prefactor: this will be sharpened in the next section.
The main result of this section is formulated as follows.
For a probability distribution $\nu$ on $\Omega_N$ we let $B(\nu)$ denote the following quantity
\begin{equation}\label{eq:bnu}
B(\nu):=\mintwo{x\sim \nu}{x'\sim \pi_N} \sqrt{ \sum_{k=1}^{N-1} \mathbf{E}\left[ |x_k- x'_k|\right]^2},
\end{equation}
where $\mathbf{E}$ denotes the expectation with respect to a coupling of $(x,x')$ with marginals $\nu$ and $\pi_N$, and the minimum is taken over all such couplings.
\begin{proposition}\label{Prop:RW}
There exists a constant $C>0$ such that for any distribution $\nu$ on $\Omega_N$, all $t\ge C \log N$ and all $N\ge 2$
\begin{equation}
\| P_t^{\nu} - \pi_N \|_{TV} \le C \left( N^{1/2} B(\nu) t^{C} e^{-\lambda_N t}+ Ne^{-t}\right)\;,
\end{equation}
where $\lambda_N=1-\cos(\pi/N)$.
\end{proposition}
Before giving the proof of Proposition \ref{Prop:RW} we describe some of its consequences for the spectral gap and the mixing time.
\subsection{Proof of Theorem \ref{Th:gap}}
The upper bound in Proposition \ref{Prop:RW}
is valid for all $N\ge 2$ and for all initial distributions $\nu$, without restrictions on the maximal height. In particular, it
allows us to identify the spectral gap of the generator and prove Theorem \ref{Th:gap}.
We already saw that $f_N$ is an eigenfunction of $-\mathcal{L}$ associated with $\lambda_N$.
It remains to check that the latter is indeed the spectral gap of $\mathcal{L}$. Using Proposition \ref{genresult},
it is sufficient to check that for any compactly supported distribution $\nu$
$$\limsup_{t\to \infty} \frac{1}{t}\log \| P^{\nu}_t- \pi_N \|_{TV}
\le -\lambda_N.$$
This follows from Proposition \ref{Prop:RW} since $B(\nu)<\infty$ if $\nu$ has compact support.
\subsection{A first upper bound on the mixing time}
From the considerations in Section \ref{sec:linear} we obtain the following useful contraction bounds.
\begin{lemma}\label{lem:contraction}
For any $x,y\in\Omega_N$, for all $t\ge 0$:
\begin{equation}\label{Eq:BdHeat}
\left( \sum_{k=1}^{N-1} \mathbf{E}[X^x_k(t) - X^y_k(t)] \right)^2 \le N
e^{-2\lambda_N t}\sum_{k=1}^{N-1} (x_k - y_k)^2\;,
\end{equation}
where $\mathbf{E}$ denotes the expectation with respect to an arbitrary coupling of $\mathbf{X}^x(t)$ and $\mathbf{X}^y(t)$.
Moreover, for any distribution $\nu$ on $\Omega_N$ and $t\ge 0$, the quantity defined in \mbox{\tiny eq}ref{eq:bnu} satisfies
\begin{equation}\label{Eq:BdHeat21}
B(P_t^\nu)\le B(\nu) e^{-\lambda_N t}.
\end{equation}
\end{lemma}
\begin{proof}
From \mbox{\tiny eq}ref{Eq:laplace} we have
\begin{equation}\label{Eq:Heat} \partial_t a(t,k) = \frac12 \Delta a(t,k)\;,\end{equation}
where $a(t,k) := \mathbf{E}[X^x_k(t) - X^y_k(t)]$. An orthonormal basis for $\Delta$ on the segment $\{1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1\}$
with Dirichlet boundary condition at $0$ and $N$ is given by the eigenfunctions ${\rm Var}phi^{(j)}$, $j=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1$:
\begin{equation}\label{Eq:eigenfcts}
{\rm Var}phi_k^{(j)} = \sqrt{\frac2N}\sin\left(\frac{ jk \pi}{N}\right)\,, \quad (\Delta {\rm Var}phi^{(j)})_k= -2\lambda_N^{(j)} {\rm Var}phi_{k}^{(j)},
\end{equation}
where $\lambda_N^{(j)}$ is given in \mbox{\tiny eq}ref{defjj}.
Expanding $a(t,\cdot)$ along this basis one obtains
$$ \sum_{k=0}^N a(t,k)^2 \le e^{-2\lambda_N t} \sum_{k=0}^N a(0,k)^2\;,$$
and the bound \mbox{\tiny eq}ref{Eq:BdHeat} follows from the Cauchy-Schwarz inequality. To prove \mbox{\tiny eq}ref{Eq:BdHeat21} we argue as follows. By definition of $B(\nu)$ we may choose a coupling $\mathbf{P}_0$ of $(\nu,\pi)$ such that
\begin{equation}\label{initcoupling}
\sum_{k=1}^{N-1}\mathbf{E}_0 \big[ | X^{\nu}_k(0)- X^{\pi}_k(0)|\big]^2 =B(\nu)^2.
\end{equation}
Under this coupling we let $Y$ and $W$ denote the upper and lower enveloppe of $\{ \mathbf{X}^{\nu}(0),\mathbf{X}^{\pi}(0)\}$, setting
$Y_k=X_k^{\nu}(0)\vee X_k^\pi(0)$ and $W_k=X_k^{\nu}(0)\wedge X_k^\pi(0)$. We have by definition
$$ \sum_{k=1}^{N-1} \mathbf{E}_0[Y_k - W_k]^2 = B(\nu)^2\;.$$
Now we couple four Markov chains $[\mathbf{X}^{\nu}(t), \mathbf{X}^\pi(t), \mathbf{X}^{Y}(t),\mathbf{X}^W(t)]_{t\ge 0}$ using the coupling $\mathbf{P}_0$ to set the initial condition ($\mathbf{X}^{Y}(0)=Y$ and $\mathbf{X}^W(0)=W$ respectively) and using the monotone grand coupling from Section \ref{Sec:GC} for the dynamics. We let $\mathbf{P}$ denote the joint law.
As the initial conditions are ordered we obtain from Lemma \ref{lem:mongc} that under $\mathbf{P}$ for any $t\ge 0$ we have
$$\mathbf{X}^W(t)\le\mathbf{X}^\nu(t)\le \mathbf{X}^Y(t) \quad \text{ and } \quad \mathbf{X}^W(t)\le\mathbf{X}^\pi(t)\le \mathbf{X}^Y(t) .$$ Therefore the argument used to prove \mbox{\tiny eq}ref{Eq:BdHeat} implies that
\begin{align}\label{Eq:BdHeatao}
\sum_{k=1}^{N-1} \mathbf{E}[|X^\nu_k(t) - X^\pi_k(t)|]^2 &\le
\sum_{k=1}^{N-1} \mathbf{E}[X^Y_k(t) - X^W_k(t)]^2\nonumber \\ &
\le e^{-2\lambda_N t}\sum_{k=1}^{N-1} \mathbf{E}[Y_k - W_k]^2\nonumber\\& =e^{-2\lambda_N t} B(\nu)^2.
\end{align}
By stationarity of $\pi$, under $\mathbf{P}$ the distribution of $\mathbf{X}^\nu(t)$ and $\mathbf{X}^\pi_k(t)$ are respectively $P_t^\nu$ and $\pi$, and \mbox{\tiny eq}ref{Eq:BdHeat21} follows.
\end{proof}
Next, we show that Proposition \ref{Prop:RW} provides an upper bound on the mixing time which is of order $N^2\log N$.
This bound is off by a factor $4$ with respect to Theorem \ref{th:main1}. In the next section we will refine the proof in order to catch the right prefactor.
\begin{corollary}\label{th:corol}
For any $\delta>0$, for all ${\rm Var}epsilon \in (0,1)$ and all $N\ge N_0({\rm Var}epsilon,\delta)$ sufficiently large
$$ T_N({\rm Var}epsilon) \le \frac{2+\delta}{\lambda_N} \log N\;.$$
\end{corollary}
\begin{remark}\label{rem:uniformity}
An important observation here which is used in Section \ref{sec:censor} is that not only the above estimate is also valid when the boundary condition $x_N=0$ is replaced by $x_N=hN$ (cf. Remark \ref{rem:tilt}), but it is uniform when $h$ takes value in a compact interval (say $[-C,C]$ for some constant $C>0$). Checking this uniformity is a tedious but rather straightforward procedure. We have chosen to omit it in the proof, but the reader can check that it boils down to making sure that all technical estimates in Section \ref{sec:tecnos} are indeed uniform in this sense. A second observation (which can, this time, immediately be checked from the proof) is that if the bound on $\|x\|_{\infty}$ is chosen to be $N^{\alpha}$, with $\alpha>1/2$ then the corresponding ${\rm Var}epsilon$-mixing time is smaller than $\frac{1+\alpha+\delta}{\lambda_N} \log N\;.$
Let us also remark that Corollary \ref{th:corol} is sufficient to establish the so-called pre-cutoff phenomenon, namely the fact that
$$
\limsup_{N\to\infty}\frac{T_N({\rm Var}epsilon)}{T_N(1-{\rm Var}epsilon)}
$$
is uniformly bounded for ${\rm Var}epsilon\in(0,1/2)$.
\end{remark}
\begin{proof}
Consider an initial condition $x\in \Omega_N$ such that $\|x\|_\infty \le N$. We have $B(\delta_x)\le C N^{3/2}$ so that a direct application of Proposition \ref{Prop:RW} would yield $T_N({\rm Var}epsilon) \le \frac{C'}{\lambda_N} \log N$ for some constant $C'$ depending on $C$ and $N$ large enough. However one can sharpen this upper bound as follows.\\
By \mbox{\tiny eq}ref{Eq:BdHeat21} we have for $s\ge t$, $B(P^x_{s-t})\le CN^{3/2} e^{-\lambda_N(s-t)}$.
Now using Proposition \ref{Prop:RW} for $\nu=P^x_{s-t}$ we obtain for some new constant $C$:
\begin{equation}
\| P_s^{x} - \pi_N \|_{TV}= \| P_t^{P^x_{s-t}} - \pi_N \|_{TV} \le C \left( N^{2} t^{C} e^{-\lambda_N s}+ Ne^{-t}\right).
\end{equation}
Then choosing $s= \frac{2+\delta}{\lambda_N} \log N$ and $t=(\log N)^2$ we can conclude.
\end{proof}
\subsection{Proof of Proposition \ref{Prop:RW}}
The rest of this subsection is devoted to the proof of Proposition \ref{Prop:RW}.
We are going to perform the proof for $N\ge 3$ (we require $\lambda_N<1$ in \mbox{\tiny eq}ref{Eq:EstimateTell}). For $N=2$ since the system equilibrates after one update, we have
\begin{equation}
\| P_t^{\nu} - \pi_2 \|_{TV}\le e^{-t}.
\end{equation}
Moreover, since the total variation distance $\| P_t^{\nu} - \pi_N \|_{TV} $ is monotone as a function of $t$, we may assume without loss of generality that $t$ is an integer.
Fix $t \in \mathbb{N}$ and a distribution $\nu$ on $\Omega_N$. For notational simplicity we often write $\pi$ instead of $\pi_N$. We are going to construct a (non-Markovian) coupling $(\mathbf{X}^{\nu}(s), \mathbf{X}^\pi(s))_{s\in [0,t]}$, for the two processes starting with respective distributions $\nu$ and $\pi$. We let $\mathbb{P}_t$ denote the law of this coupling.
First we couple the initial conditions $\mathbf{X}^{\nu}(0), \mathbf{X}^\pi(0)$ in such a way that \mbox{\tiny eq}ref{initcoupling} holds.
The second ingredient for our coupling is a set of independent, rate $1$, Poisson clocks $(\tau_{k})^{N-1}_{k=1}$ (which are independent of the initial conditions) indexed by coordinates from $1$ to $N-1$ (each $\tau_k$ is considered as a subset of $\mathbb{R}_+$ ). These clocks determine the update times for the coordinates of our processes. We then define the random time $\mathcal{T}$ as the largest integer $\ell$ before $t$ such that all the Poisson clocks $\tau_k$ have rung at least once on $(\ell,t)$. More formally, we set (here $\sup\emptyset=0$)
\begin{equation}
\mathcal{T}:= \sup \left\{ \ell\in \llbracket 0, t\rrbracket \ : \forall k, \
\tau_{k}\cap (\ell,t) \ne \emptyset \right\}.
\end{equation}
Note that we have
\begin{equation}
\mathbb{P}_t(t-\mathcal{T}=\ell)=\begin{cases}
(1-e^{-1})^N &\text{ if }\ell=1\;,\\
(1-e^{-\ell})^N - (1-e^{-\ell+1})^N &\text{ if }\ell\in \llbracket 2, t-1\rrbracket\;,\\
1 - (1-e^{-t+1})^N &\text{ if } \ell=t\;.
\end{cases}
\end{equation}
Observe that there exists $C>0$ such that for all $N\ge 3$ and all $\ell \ge 0$
\begin{equation}\label{Eq:SimpleTell}
\mathbb{P}_t(t-\mathcal{T}\ge \ell) \le C N e^{-\ell}\;.
\end{equation}
Thus, using the fact that $\mathbb{E}[f(Z)]=f(0)+\sum_{k=1}^\infty[f(k)-f(k-1)]\mathbb{P}(Z\ge k)$ for non negative integer valued random variables $Z$ and any function $f$, provided that the sum in the r.h.s.\ converges, one has
\begin{equation}\label{Eq:EstimateTell}
\mathbb{E} \left[e^{\lambda_N(t-\mathcal{T})}\right]\le 1+ 2CN\sum_{\ell=1}^t \lambda_N e^{\ell (\lambda_N-1)}\le C',
\end{equation}
for some constant $C'>0$.
Now we perform our coupling as follows
\begin{itemize}
\item For $s\le \mathcal{T}$, we use the monotone coupling of Subsection \ref{Sec:GC}\ : At each update time we draw a uniform variable $U$ and the updated values of $X^{\nu}_k$, $X^{\pi}_k$ are constructed composing $U$ with the inverse of the conditional distribution function.
\item For $s> \mathcal{T}$, we use the sticky coupling of Subsection \ref{Subsec:sticky}\ : At each update time we couple $X^{\nu}_k$ \text{ and } $X^{\pi}_k$ with maximal probability.
\end{itemize}
To prove Proposition \ref{Prop:RW}, we introduce the r.v.
\begin{equation}
A_s:= \sum_{k=1}^{N-1} |X^{x}_k(s)-X^{\pi}_k(s)|\;,\quad s\in [0,t]\;.
\end{equation}
\begin{lemma}\label{Lemma:EstimateBruteGap}
There exist $c',C'>0$ such that for all $N\ge 2$, all $t\ge \log N$ and all $\ell \in \llbracket 1,t-1\rrbracket$ we have
\begin{equation}
\mathbb{P}_t\left( \mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t) \ | \mathcal{T}=\ell \right) \le C'\Big( e^{-c' t^2} + t^{2K+1} \mathbb{E}_t[A_\ell \,|\, \mathcal{T} = \ell] \Big)\;.
\end{equation}
\end{lemma}
\begin{proof}
For every $k\in \llbracket 1,N-1\rrbracket$, let us denote by $(t_k^{(i)})_{i=1}^{n_k}$ the ordered set of update times occurring at site $k$ on the time-interval $(\mathcal{T},t)$. Let $\widetilde\mathcal{F}$ be the sigma-field generated by all the $(t_k^{(i)})_{i=1}^{n_k}$, $k\in \llbracket 1,N-1\rrbracket$, and by the processes $\mathbf{X}^{\nu},\mathbf{X}^\pi$ up to time $\mathcal{T}$. Denote by $\widetilde{\mathbb{P}}_t$ the associated conditional probability. We are going to show that, for some constant $C>0$, on the event $\{\mathcal{T} = \ell\}$ we have
\begin{equation}\label{eq:timecondit}
\widetilde\mathbb{P}_t(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t)) \le C\left(\max_{k\in \llbracket 1,N-1\rrbracket} n_k\right) \left(e^{-c't^2}+t^{2K} A_{\ell}\right)\;.
\end{equation}
and that
\begin{equation}\label{Eq:Maxcondit}
\mathbb{E}_t \Big[\max_{k\in \llbracket 1,N-1\rrbracket} n_k \ |\ \mathcal{T}=\ell\Big] \le Ct\;.
\end{equation}
Let us first show how we conclude from \mbox{\tiny eq}ref{eq:timecondit} and \mbox{\tiny eq}ref{Eq:Maxcondit}. Since $\{\mathcal{T}=\ell\}$ is $\widetilde\mathcal{F}$-measurable we have
\begin{equation}
\mathbb{P}_t\left(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t) \ | \ \mathcal{T}=\ell \right) \le C \,\mathbb{E}_t \left[ \max_{k\in \llbracket 1,N-1\rrbracket} n_k\left(e^{-c't^2}+ t^{2K}A_{\ell}\right) \ | \ \mathcal{T}=\ell \right]
\end{equation}
Observe that $A_{\ell}$ and $\max_{k\in \llbracket 1,N-1\rrbracket} n_k$ are independent under $\mathbb{P}_t( \cdot \ | \ \mathcal{T}=\ell )$. Therefore we get
$$ \mathbb{P}_t\left(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t) \ | \ \mathcal{T}=\ell \right) \le C'\big(e^{-c'' t^2} + t^{2K+1} \mathbb{E}_t[A_\ell \,|\, \mathcal{T} = \ell]\big)\;,$$
as required.
Now let us prove \mbox{\tiny eq}ref{eq:timecondit}. We introduce the event
\begin{equation}\begin{split}
\mathcal{C}_\ell&:= \{ \forall s\in [\ell,t]: \|\nabla \mathbf{X}^{\pi}_{s} \|_{\infty}\le t^2 \}.
\end{split}
\end{equation}
We note that $\mathcal{C}_\ell = \cap_{i,k} \mathcal{C}^{i,k}_\ell$ where
\begin{equation}\begin{split}
\mathcal{C}^{i,k}_\ell&:= \{ \forall s\in [\ell,t_k^{(i)}): \|\nabla \mathbf{X}^{\pi}_{s} \|_{\infty}\le t^2 \}.
\end{split}
\end{equation}
We say that the update at time $t_k^{(i)}$ is successful if $X^{\nu}_k(t_k^{(i)})=X^{\pi}_k(t_k^{(i)})$.
We let $\tau$ be the time of the first unsuccessful update among the update times $(t_k^{(i)})_{i=1}^{n_k}$, $k\in\llbracket 1,N-1\rrbracket$. If all the updates are successful, we set $\tau := t$. We have
$$ \{\mathbf{X}^{\nu}(t) \ne \mathbf{X}^\pi(t)\}\cap \{\mathcal{T}=\ell\} \subset \{\tau < t\}\cap \{\mathcal{T}=\ell\}\;.$$
Indeed, on the event $\{\tau = t\}\cap\{\mathcal{T}=\ell\}$, there is at least one update per coordinate on $(\ell,t)$ and all the updates are successful so that the two processes merge by time $t$.
Then we write
\begin{align*}
\widetilde\mathbb{P}_t(\tau < t) &= \widetilde\mathbb{P}_t(\cup_{i,k} \{\tau = t_k^{(i)}\})\\
&\le \widetilde\mathbb{P}_t(\cup_{i,k} (\mathcal{C}^{i,k}_\ell)^\complement) + \widetilde\mathbb{P}_t(\cup_{i,k} \{\tau = t_k^{(i)}\}\cap \mathcal{C}^{i,k}_\ell)\\
&\le \widetilde\mathbb{P}_t(\mathcal{C}^\complement_\ell) + \sum_{i,k}\widetilde\mathbb{P}_t(\{\tau = t_k^{(i)}\}\cap \mathcal{C}^{i,k}_\ell)\;.
\end{align*}
Using Lemma \ref{lem:lequ}, we have
\begin{align*}
&\widetilde\mathbb{P}_t(\{\tau = t_k^{(i)}\}\cap \mathcal{C}_\ell^{i,k})\\
&= \widetilde\mathbb{E}_t\big[\widetilde\mathbb{P}_t\big(X_k^{\nu}(t_k^{(i)}) \ne X_k^\pi(t_k^{(i)}) \,|\, \mathcal{F}_{t_k^{(i)}-}\big)\mathbf{1}_{\{\tau \ge t_k^{(i)}\}\cap \mathcal{C}^{i,k}_\ell}\big]\\
&\le \widetilde\mathbb{E}_t\big[C \max(1, \|\nabla \mathbf{X}^{\pi}(t_k^{(i)}-) \|_{\infty})^K \Delta_k(t_k^{(i)}-) \mathbf{1}_{\{\tau \ge t_k^{(i)}\}\cap \mathcal{C}^{i,k}_\ell}\big]\;,
\end{align*}
where
$$ 2\Delta_k(s) := |X^{\pi}_{k-1}(s)-X^{\nu}_{k-1}(s)|+|X^{\pi}_{k+1}(s)-X^{\nu}_{k+1}(s)| \;.$$
On the event $\{\tau \ge t_k^{(i)}\}$, all the updates are successful up to time $t_k^{(i)}$ so that
$$ \Delta_k(t_k^{(i)}-) \le \Delta_k(\ell)\;.$$
Consequently, we have
\begin{align*}
\widetilde\mathbb{P}_t(\{\tau = t_k^{(i)}\}\cap \mathcal{C}^{i,k}_\ell) &\le C t^{2K} \Delta_k(\ell) \;.
\end{align*}
Putting everything together, we find that on the event $\{\mathcal{T}=\ell\}$ (which is $\widetilde\mathcal{F}$-measurable):
\begin{align*}
\widetilde\mathbb{P}_t\big(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t)\big) &\le \widetilde\mathbb{P}_t(\{\tau < t\})\\
&\le \widetilde\mathbb{P}_t(\mathcal{C}^\complement_\ell) + (\max_{k\in \llbracket 1,N-1\rrbracket} n_k) C' t^{2K} A_{\ell}\;.
\end{align*}
To bound the first term, we use stationarity and Corollary \ref{cor:bazics} to obtain
\begin{align*}
\widetilde\mathbb{P}_t(\mathcal{C}^\complement_\ell) &\le \left(\max_{k\in \llbracket 1,N-1\rrbracket} n_k\right)\, \pi_N(\max_{i\in \llbracket 1,N\rrbracket} |\eta_i| > t^2/2)\\
&\le \left(\max_{k\in \llbracket 1,N-1\rrbracket} n_k\right) N e^{-c't^2}\;.
\end{align*}
Since $t\ge \log N$, this yields \mbox{\tiny eq}ref{eq:timecondit}.\\
Let us now estimate the conditional expectation of $\max_{k\in \llbracket 1,N-1\rrbracket} n_k$. Let us first describe the conditional law of the $n_k$'s. Let $G$ be the random number of Poisson clocks that have not rung on $(\ell+1,t)$. On the event $\{\mathcal{T}=\ell\}$ this number is positive. Given $\{\mathcal{T}=\ell\}$ the $n_k$'s can be obtained as $G$ i.i.d.~Poisson r.v.~of parameter $1$ conditioned to be positive and $N-1-G$ i.i.d.~r.v.~which are the independent sum of a Poisson r.v.~of parameter $1$ and a Poisson r.v.~of parameter $t-\ell-1$ conditioned to be positive.\\
It is simple to check that the law of a Poisson r.v.~of parameter $q$ conditioned to be positive is stochastically increasing with $q$. As a consequence of these observations, we deduce that $\max_{k\in \llbracket 1,N-1\rrbracket} n_k$, conditionally given $\{\mathcal{T}=\ell\}$, is stochastically smaller than $\max_{k\in \llbracket 1,N-1\rrbracket} Z_k$ where $Z_k$ are i.i.d.~r.v.~obtained as the independent sum of a Poisson r.v.~of parameter $1$ and a Poisson r.v.~of parameter $t-1$ conditioned to be positive. Recalling that a Poisson random variable $W$ with parameter $\lambda$ satisfies $\mathbb{P}(W\ge k)\le e^{-k(\log(k/\lambda)-1)}$, and that $t\ge \log N$, it is not difficult to check that
\begin{equation}\label{eq:poissonmax}
\mathbb{E}[\max_{k\in \llbracket 1,N-1\rrbracket} Z_k] \le Ct\;,
\end{equation}
for some new constant $C>0$. This implies \mbox{\tiny eq}ref{Eq:Maxcondit}.
\end{proof}
We now proceed to the proof of our proposition.
\begin{proof}[Proof of Proposition \ref{Prop:RW}]
We start with an upper bound on the expectation of $A_\ell$ given $\{\mathcal{T}=\ell\}$ for any $\ell \in \llbracket 0,t-1\rrbracket$. Since up to time $\mathcal{T}$ we use the monotone grand coupling, arguing as in the proof of Lemma \ref{lem:contraction}, setting $Y_k=(X^\nu_k\vee X^\pi_k)(0)$ and $W_k=(X^\nu_k\wedge X^\pi_k)(0)$
one obtains
\begin{align*}
\mathbb{E}_t \left[ A_\ell \ |\ \mathcal{T}=\ell \right]^2 &\le \Big(\sum_{k=1}^{N-1} \mathbb{E}_t[X^Y_k(\ell) -X^W_k(\ell)\ |\ \mathcal{T}=\ell]\Big)^2\\
&\le N \sum_{k=1}^{N-1} \mathbb{E}_t[X^Y_k(\ell) -X^W_k(\ell)\ |\ \mathcal{T}=\ell]^2\\
&\le N e^{-2\lambda_N \ell} \sum_{k=1}^{N-1} \mathbb{E}_t[Y_k -W_k]^2.
\end{align*}
Therefore, by \mbox{\tiny eq}ref{initcoupling}
\begin{equation}\label{Eq:Bell}
\mathbb{E}_t \left[ A_\ell \ |\ \mathcal{T}=\ell \right] \le \sqrt{N} B(\nu) e^{-\lambda_N \ell}\;.
\end{equation}
By definition of the total-variation distance we have
\begin{align*} \| P_t^{\nu} - \pi_N \|_{TV} &\le
\mathbb{P}_t(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t))
\\&= \sum_{\ell=0}^{t-1} \mathbb{P}_t(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t) \ |\ \mathcal{T}=\ell) \mathbb{P}_t(\mathcal{T}=\ell)\;.
\end{align*}
We treat separately the case $\ell = 0$ (recall that $\mathcal{T}=0$ on the event where not all Poisson clocks have rung on $(0,t)$). Using \mbox{\tiny eq}ref{Eq:SimpleTell} we have
$$ \mathbb{P}_t(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t) \ |\ \mathcal{T}=0) \mathbb{P}_t(\mathcal{T}=0) \le \mathbb{P}_t(\mathcal{T}=0) \le CNe^{-t}\;.$$
On the other hand, combining, \mbox{\tiny eq}ref{Eq:Bell} and Lemma \ref{Lemma:EstimateBruteGap} we find
\begin{multline*}
\sum_{\ell=1}^{t-1} \mathbb{P}_t(\mathbf{X}^{\nu}(t)\ne \mathbf{X}^\pi(t) \ |\ \mathcal{T}=\ell) \mathbb{P}_t(\mathcal{T}=\ell) \\ \le Ce^{-c't^2} + C'' N^{1/2} B(\nu) t^{2K+1} e^{-\lambda_N t} \mathbb{E} \left[ e^{\lambda_N (t- \mathcal{T})} \right]\;.
\end{multline*}
and we can conclude using \mbox{\tiny eq}ref{Eq:EstimateTell}.
\end{proof}
\section{Upper bound on the mixing time}\label{Sec:UpperTight}
\subsection{Proof strategy}
The overall strategy is similar to that in \cite{CLL}. First, we show that the `maximal' evolution gets close to equilibrium by time $\log N / (2\lambda_N)$. More precisely, let $\nu^\wedge$ denote the equilibrium measure $\pi$ conditioned to having $x_i\ge N$ for all $i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1$. Let $\mathbf{X}^\wedge$ denote the evolution with initial condition $\nu^\wedge$ and call $P_{t}^\wedge$ its law at time $t$. We have the following result.
\begin{theorem}\label{th:wedge}
For any $\delta>0$,
$$\lim_{N\to \infty}
\| P_{t_{\delta}}^\wedge - \pi \|_{TV} = 0\;,$$
where $ t_{\delta}:= (1+\delta) \frac{\log N}{2 \lambda_N}.$
\end{theorem}
\noindent Next, using Theorem \ref{th:wedge} as an input, we compare the evolution $\mathbf{X}^x$ for an arbitrary initial state $x$ with $\|x\|_\infty\le N$ to $\mathbf{X}^\wedge$ and show that they come close in total variation by time $t_{\delta}$.
\begin{theorem}\label{th:xwedge}
For any $\delta>0$,
\begin{equation}\label{eq:thth}
\lim_{N\to \infty} \sup_{x\in \Omega_N : \,\|x\|_{\infty}\le N} \| P_{t_{\delta}}^x - P_{t_{\delta}}^\wedge \|_{TV}=0\;.
\end{equation}
\end{theorem}
The upper bound stated in Theorem \ref{th:main1} follows from the two results above and the triangle inequality. Although Theorem \ref{th:main1} can be deduced from Theorem \ref{th:xwedge} alone, the intermediate result provided by Theorem \ref{th:wedge} is a crucial ingredient in our proof of Theorem \ref{th:xwedge}.
Let us briefly explain the importance of Theorem \ref{th:wedge} as an intermediary step. Our proof of Theorem \ref{th:xwedge} is based on a coupling argument that uses monotonicity. For this reason it is important to start with initial conditions that are ordered (for the order on $\Omega_N$). This is the case here since the random initial configuration $\nu^\wedge$ is by definition always above $x$ if $\|x\|_{\infty}\le N$ (while using directly $\pi_N$ as an initial condition instead of $\nu^\wedge$ would not work).
On the other hand, our proof of Theorem \ref{th:xwedge} also requires to apply the equilibrium estimates of Lemma \ref{compare} to $\mathbf{X}^\wedge$. It is the double requirement of having a trajectory which is already close to equilibrium and above $\mathbf{X}^{x}(t)$
which makes Theorem \ref{th:wedge} a necessity.
Observe that for all $t$ the density
$\,\text{\rm d} P_{t}^\wedge/ \,\text{\rm d} \pi_N$ is an increasing function. This allows for the use of various tools in order to control $\| P_{t_{\delta}}^\wedge - \pi \|_{TV}$, such as the FKG inequality as well as the censoring inequality. Our proof of Theorem \ref{th:wedge} (which is postponed to Section \ref{Sec:Wedge}) is entirely based on these tools and cannot be adapted to an arbitrary initial condition.
\subsubsection*{Proof strategy for Theorem \ref{th:xwedge}}
The remainder of this section is devoted to the proof of Theorem \ref{th:xwedge}.
From now on, the processes $\mathbf{X}^{\wedge}(t)$ and
$\mathbf{X}^{x}(t)$ are coupled through the sticky coupling of Subsection \ref{Subsec:sticky} (we denote by $\mathbb{P}$ the associated distribution).
To prove Theorem \ref{th:xwedge} we want to estimate the time at which the trajectories $\mathbf{X}^{\wedge}(t)$ and
$\mathbf{X}^{x}(t)$ merge using the auxiliary function
\begin{equation}\label{defat}
A_t=\sum_{k=1}^{N-1} (X^{\wedge}_k(t)-X^{x}_k(t))\;,
\end{equation}
which corresponds to the area between the two configurations at time $t$. By monotonicity $A_t\ge 0$ and the merging time of the two trajectories is the hitting time of $0$ by the random process $A_t$.
The control of the evolution of $A_t$ proceeds in several steps.
First we use the heat equation for a time $t_{\delta/2}$ to bring the area $A_t$ between the ordered configurations $X^\wedge_t$ and $X^x_t$ below a first threshold equal to $N^{3/2-\eta}$ where $\eta>0$ is a parameter that will be taken to be small depending on $\delta$. This step relies on Lemma \ref{lem:contraction}.
In a second step, we show that within an additional time $T=O(N^2)$, with large probability, $A_t$ falls below a second threshold $N^{-\eta}$.
This is a delicate step, which requires the application of diffusive estimates for super-martingales during a finite sequence of intermediate stages each running for a time $O(N^2)$. It relies tremendously on the specificity of the sticky coupling, and also on the fact that one of the trajectories we are trying to couple is already at equilibrium (cf. Theorem \ref{th:wedge}).
The final step brings the area from $N^{-\eta}$ to zero, by using
Proposition \ref{Prop:RW}, the proof of which indicates that after the second threshold has been attained
merging occurs with large probability as soon as every coordinate has been updated once, which by the standard coupon collector argument, takes a time of order $\log N$.
\subsection{Proof of Theorem \ref{th:xwedge}}
We introduce the successive stopping times
$$ \mathcal{T}_i:= \inf \{ t \ge t_{\delta/2}: \ A_t\le N^{3/2-i\eta} \}\;, \quad i\ge 0\;,$$
where $\eta>0$ is a parameter that we are going to choose small enough depending on $\delta$.
\textit{Step 1}: We want to show that by time $t_{\delta}/2$, $A_t$ is much smaller than $N^{3/2-4\eta}$ (here the factor $4$ is present only for technical reason, and can be considered irrelevant since $\eta$ is arbitrary).
\begin{lemma}\label{lem:A}
Setting
$\mathcal{A}=\mathcal{A}_N:= \{ \mathcal{T}_4 = t_{\delta/2}\}\;,$ and fixing $\eta\le \delta/20$ we have
$$ \lim_{N\to\infty}\mathbb{P}(\mathcal{A}_N) = 1\;.$$
\end{lemma}
\begin{proof}
As in Lemma \ref{lem:contraction}
\begin{align*}
\mathbb{E}[A_t]
\le
\sqrt{N} \sqrt{\sum_{k=1}^{N-1}\left( \mathbb{E}\left[ X^{\wedge}_k(0)- x_k\right]\right)^2 }e^{-\lambda_N t}\le 4 N^2 e^{-\lambda_N t}.
\end{align*}
In the last inequality we used the fact that $|x_k|\le N$ (by definition) and the fact that $\mathbb{E}\left[ X^{\wedge}_k(0)\right]\le 3N$ (cf. the proof of Proposition \ref{prop:Wt} for this estimate).
Using this estimate for $t=t_{\delta/2}$ we obtain
$$\mathbb{E}[A_t]\le 4N^{\left(3-\delta/2\right)/2}.$$
Since by monotonicity of the coupling, $A_t$ is positive, we can conlude using Markov's inequality.
\end{proof}
\noindent\textit{Step 2}: The aim of the second step is to prove the following estimate
\begin{proposition}\label{propstep2}
Introduce $I:= \min\{i\ge 1: 3/2-i\eta \le -\eta\}$. We have
$$ \lim_{N\to\infty} \mathbb{P}(\mathcal{T}_I \le t_{\delta/2} + N^2/2) = 1\;.$$
\end{proposition}
To highlight better the main ideas of the proof, we postpone the proof of some of the technical lemmas (namely Lemma \ref{lem:le1}, Lemma \ref{lem:bracket} and Lemma \ref{lem:le3}) to the next subsection and focus on the main steps of the reasoning.
By \mbox{\tiny eq}ref{Eq:Heat}, we observe that $A_t$ is a super-martingale.
More precisely, considering the natural filtration $(\mathcal{F}_s)_{s\ge 0}$ associated with the process $(\mathbf{X}^{\wedge},\mathbf{X}^{x})$ the conditional version of \mbox{\tiny eq}ref{Eq:Heat} summed along the coordinates yield
\begin{equation}
\mathbb{E}[ A_t \ | \ \mathcal{F}_s]= A_s -\int^t_s \mathbb{E}[ X^{\wedge}_1(u)-X^{x}_1(u)+ X^{\wedge}_{N-1}(u)-X^{x}_{N-1}(u)]\,\text{\rm d} u \le A_s,
\end{equation}
where again we have used the fact that our coupling preserves the ordering.
To prove Proposition \ref{propstep2}, we would like to use diffusive estimates in the form of \cite[Proposition 21]{CLL} but this requires
a modification of $(A_t)$ in such a way that it becomes a super-martingale with bounded jumps.
We thus define
\begin{align*}
\mathcal{R}_i&:= \inf \{ s\ge \mathcal{T}_{i-1} \ : \ A_{s}\ge N^{3/2-(i-2)\eta}\}\;,\quad i\ge 1\;,\\
\mathcal{Q}&:= \inf \{ s\ge t_{\delta/2}:\; \|\nabla\mathbf{X}_{s}^{\wedge}\|_{\infty}\ge (\log N)^{2}\}\;.
\end{align*}
and
$\mathcal{R}:= \inf_{i \in \llbracket 1, I\rrbracket} \mathcal{R}_i\wedge \mathcal{Q}$.
We consider the super-martingale
$$ M_t := \begin{cases} A_t &\mbox{ if } t<\mathcal{R}\\
A_\mathcal{R} \wedge N^{\frac32 - (i-2)\eta} &\mbox{ if } t\ge \mathcal{R} \mbox{ and } \mathcal{R} =\mathcal{R}_i < \mathcal{R}_{i+1}\;,\\
A_\mathcal{R} &\mbox{ if }t\ge\mathcal{R}\mbox{ and } \mathcal{R}= \mathcal{Q} < \inf_{i \in \llbracket 1, I\rrbracket} \mathcal{R}_i\;.
\end{cases}$$
The construction of $M_t$ is designed so that with large probability it coincides with $A_t$. To show this we
introduce a collection of events:
\begin{align*}
& \mathcal{B}= \mathcal{B}_N := \big\{ \forall t\in [t_{\delta/2},N^3]:\quad \|\nabla\mathbf{X}_{t}^{\wedge}\|_{\infty} < (\log N)^{2} \big\},\\
& \mathcal{C}= \mathcal{C}_N := \big\{ \forall i\in \llbracket 4, I\rrbracket, \forall s\ge \mathcal{T}_{i-1}:\quad A_s\le N^{3/2-(i-3/2)\eta} \big\},\\
&\mathcal{D}=\mathcal{D}_N := \big\{ \forall t\in [t_{\delta/2},N^3]:\quad \max( \| \mathbf{X}_{t}^{\wedge}\|_{\infty},
\|\mathbf{X}_{t}^{x}\|_{\infty})\le \sqrt{N}(\log N)^2 \big\},
\end{align*}
Note that on $\mathcal{B}\cap \mathcal{C}$, we have $\mathcal{R}\ge N^3$. We will show that $\mathcal{B},\mathcal{C},\mathcal{D}$ are all very likely. This step of the proof requires Theorem \ref{th:wedge} as an input.
\begin{lemma}\label{lem:le1}
We have $\lim_{N\to\infty} \mathbb{P}(\mathcal{B}_N\cap \mathcal{C}_N\cap \mathcal{D}_N) = 1$.
\end{lemma}
\noindent Then using the method developped in \cite{CLL} we control the increments of $\langle M \rangle$, which denotes the angle bracket of the martingale part of $M_t$, between each consecutive $\mathcal{T}_i$.
\begin{lemma}\label{lem:bracket}
The probability of the event
\begin{equation}
\mathcal{E}=\mathcal{E}_N:=\left\{ \forall i\le I,\ \langle M \rangle_{\mathcal{T}_i}-\langle M \rangle_{\mathcal{T}_{i-1}}\le 4 N^{3-2(i-2)\eta}\right\}.
\end{equation}
satisfies $\lim_{N\to \infty} \mathbb{P}(\mathcal{E}_N)=1.$
\end{lemma}
Then in order to compare ${\mathcal{T}_i}-\mathcal{T}_{i-1}$ to $\langle M \rangle_{\mathcal{T}_i}-\langle M \rangle_{\mathcal{T}_{i-1}}$, we prove the following estimates on the bracket derivative
\begin{lemma}\label{lem:le3}
When $\mathcal{B}\cap \mathcal{C} \cap \mathcal{D}$ holds, for all $t\in [t_{\delta/2},N^3\wedge \mathcal{T}_I]$ we have
\begin{equation}
\partial_t \langle M \rangle_t \ge \frac1{8(\log N)^{C}} \min \left( \frac{M_t}{\sqrt{N}}, \frac{M^2_t}{N}\right),
\end{equation}
for some constant $C>0$.
\end{lemma}
Then we can conclude by simply combining the control we have on the bracket increments, and that on the bracket derivative. The following Lemma, combined with the fact that $\mathcal{A}\cap\mathcal{B}\cap\mathcal{C}\cap \mathcal{D}\cap\mathcal{E}$ holds with large probability, implies Proposition \ref{propstep2}
\begin{lemma}\label{lem:4}
On the event $\mathcal{A}\cap\mathcal{B}\cap\mathcal{C}\cap \mathcal{D}\cap\mathcal{E}$
we have $$\forall i\in \llbracket 5, I\rrbracket, \ \mathcal{T}_i - \mathcal{T}_{i-1} \le 2^{-i} N^2.$$
In particular we have $\mathcal{T}_{I}\le t_{\delta/2}+N^2/2$.
\end{lemma}
\begin{proof}
We work on the event $\mathcal{A}\cap\mathcal{B}\cap\mathcal{C}\cap \mathcal{D}\cap\mathcal{E}$. Let $j$ be the smallest $i\ge 5$ such that $\mathcal{T}_i - \mathcal{T}_{i-1} > 2^{-i} N^2$ and assume that $j \le I$. Then, $\mathcal{T}_{j-1}+2^{-j}N^2 \le N^3$ so that by Lemma \ref{lem:le3}
$$ \langle M\rangle_{\mathcal{T}_{j-1} + 2^{-j} N^2}- \langle M\rangle_{\mathcal{T}_{j-1}} \ge (\log N)^{-C'} 2^{-j} N^2 (N^{1-j\eta} \wedge N^{2(1-j\eta)})\;,$$
where we use the fact tht $A_t\ge N^{3/2-j\eta}$ if $t\le \mathcal{T}_j$ and $M_t=A_t$ on $\mathcal{B}\cap\mathcal{C}$. Moreover since we work on $\mathcal{E}$ we have
$$ \langle M\rangle_{\mathcal{T}_{j-1} + 2^{-j} N^2}- \langle M\rangle_{\mathcal{T}_{j-1}} \le 4 N^{3-2(j-2)\eta}\;.$$
These two inequalities are incompatible for $N$ large enough and the lemma is proved.
\end{proof}
\noindent \textit{Step 3}: \ The last step consists in bringing the area to $0$ within a short time after $t_{\delta/2}+N^2/2$. Introduce the event
$$ \mathcal{G} := \big\{ A_{t_{\delta/2}+N^2/2} \le N^{-\eta/2}\big\}\;.$$
The following estimates can be proved as a variant of Lemma \ref{Lemma:EstimateBruteGap}.
\begin{lemma}\label{lem:5}
There exists $C>0$ such that for any $t\ge \log N$ we have
$$\mathbb{P}( \mathbf{X}^\wedge(t_{\delta/2}+N^2/2+t) \ne \mathbf{X}^x(t_{\delta/2}+N^2/2+t) \,|\, \mathcal{G}) \le C(Ne^{-t} + t^{2K+1} N^{-\eta/2})\;.$$
\end{lemma}
\begin{proof}
This is an adaptation of the argument in Lemma \ref{Lemma:EstimateBruteGap}. Denote by $(t_k^{(i)})_{i=1}^{n_k}$ the ordered set of updates times occurring at site $k$ on the time-interval $(t_{\delta/2}+N^2/2,t_{\delta/2}+N^2/2+t)$. Let $\widetilde{\mathcal{F}}$ be the sigma-field generated by all the $(t_k^{(i)})$ and by $\mathbf{X}^\wedge(t_{\delta/2}+N^2/2)$, $\mathbf{X}^x(t_{\delta/2}+N^2/2)$, and let $\widetilde{\mathbb{P}}$ be the associated conditional probability. Define $\mathcal{H} := \{\forall k\in\llbracket 1,N-1\rrbracket: n_k \ge 1\}$. Then, the very same arguments as in the proof of \mbox{\tiny eq}ref{eq:timecondit} show that on the $\widetilde{\mathcal{F}}$-measurable event $\mathcal{G}\cap\mathcal{H}$ we have
$$ \widetilde{\mathbb{P}}( \mathbf{X}^\wedge(t_{\delta/2}+N^2/2+t) \ne \mathbf{X}^x(t_{\delta/2}+N^2/2+t) ) \le C\left(\max_{k\in \llbracket 1,N-1\rrbracket} n_k\right) (e^{-ct^2}+t^{2K} B)\;,$$
with
\begin{align*} B &= \sum_{k=1}^{N-1} \big|X^\wedge_k(t_{\delta/2}+N^2/2) - X^x_k(t_{\delta/2}+N^2/2)\big|\\
&= \sum_{k=1}^{N-1} X^\wedge_k(t_{\delta/2}+N^2/2) - X^x_k(t_{\delta/2}+N^2/2)\;.\end{align*}
Furthermore, given $\mathcal{H}$, the $n_k$'s are i.i.d.~Poisson r.v.~of parameter $t$ conditioned to be positive. Therefore, reasoning as in \mbox{\tiny eq}ref{eq:poissonmax}, for all $t\ge \log N$
$$ \mathbb{E}[\max_{k\in \llbracket 1,N-1\rrbracket} n_k \ |\ \mathcal{H}] \le Ct\;.$$
Finally, we have
$$ \mathbb{P}(\mathcal{H}^\complement) \le N e^{-t}\;.$$
Putting everything together we obtain the stated estimate.
\end{proof}
With the help of this final step, we can conclude the proof.
\begin{proof}[Proof of Theorem \ref{th:xwedge}]
By the Martingale Stopping Theorem, since $(A_t)_{t\ge 0}$ is a supermartingale then $(A_{\mathcal{T}_I+t})_{t\ge 0}$ is also a càd-làg non-negative super-martingale (for the adequate filtration). A maximal inequality (sometimes referred to as Ville's Maximal Inequality see \cite[Exercise 8.4.2]{Dur19} for the discrete time version and also \cite{ville1939})
\begin{equation}\label{villeo1}
\mathbb{P}\left(\sup_{t\ge 0} A_{\mathcal{T}_I + t} > N^{-\eta/2}\right) \le \mathbb{E}\left[A_{\mathcal{T}_I}\right]N^{\eta/2}\;.
\end{equation}
Therefore,
\begin{equation}\label{ville1}
\lim_{N\to\infty} \mathbb{P}\left(\sup_{t\ge 0} A_{\mathcal{T}_I + t} > N^{-\eta/2}\right) = 0\;.
\end{equation}
Combining this with Lemma \ref{lem:4}, we deduce that the probability of the event $\mathcal{G}$ goes to $1$.
Applying Lemma \ref{lem:5} we thus deduce that for $t=2\log N$ we have
$$ \lim_{N\to\infty} \mathbb{P}( \mathbf{X}^\wedge(t_{\delta/2}+N^2/2+t) \ne \mathbf{X}^x(t_{\delta/2}+N^2/2+t) \,|\, \mathcal{G}) = 0\;.$$
Since all our estimates hold uniformly over all $x\in \Omega_N$ with $\|x\|_\infty\le N$, this suffices to deduce \mbox{\tiny eq}ref{eq:thth}.
\end{proof}
\subsection{Proof of the technical estimates of step 2}
\begin{proof}[Proof of Lemma \ref{lem:le1}]
To prove that $\mathcal{B}$ and $\mathcal{D}$ have small probability, we are going to show that similar events have small probability for the stationary version of our Markov chain $(\mathbf{X}^{\pi}(t))_{t\ge 0}$ and then use Theorem \ref{th:wedge}.
By a simple coupling argument, for any $\mathcal{A}\subset \Omega_N$ we have
\begin{multline}\label{Eq:wut}
\mathbb{P}\left(\exists t\in [t_{\delta/2},N^3]:\, \mathbf{X}_{t}^{\wedge}\in \mathcal{A} \right)\\ \le \| P_{t_{\delta/2}}^\wedge - \pi\|_{TV} + \mathbb{P}\Big( \exists t\in [0,N^3-t_{\delta/2}]:\, \mathbf{X}_{t}^{\pi}\in \mathcal{A} \Big)\;,
\end{multline}
where, with slight abuse of notation we denote by $\mathbb{P}$ the distribution of $\mathbf{X}^{\pi}$, the Markov chain starting from the equilibrium distribution.
By symmetry arguments (using the fact that $\hat V(x):=V(-x)$ satisfies $\hat V\in\mathscr{C}$), \mbox{\tiny eq}ref{Eq:wut} remains true upon replacing $\mathbf{X}^\wedge$ by $\mathbf{X}^\vee$ the dynamics with initial distribution $\pi( \cdot \ | \ \forall i\in \llbracket 1,N-1\rrbracket,\;, x_i\le -N)$.
The first term in the r.h.s.\ of \mbox{\tiny eq}ref{Eq:wut} goes to zero by Theorem \ref{th:wedge}. To bound the second term, we use a standard subdivision scheme and estimates on the invariant measure. More precisely, if one subdivides $[0,N^3]$ into intervals of length $N^{-6}$ then with a probability $1-O(N^{-1})$, there are at most one resampling event per interval. Since the process is stationary, we can bound the second term in the r.h.s.\ of \mbox{\tiny eq}ref{Eq:wut} by
$ N^9 \pi_N(\mathcal{A})+C N^{-1}$.
To prove that $\lim_{N\to\infty}\mathbb{P}[\mathcal{B}^{\complement}_N]=0$ use \mbox{\tiny eq}ref{Eq:wut} with $\mathcal{A}= \{ \|\nabla x\|_{\infty} > (\log N)^{2} \}$,
and apply Corollary \ref{cor:bazics} which entails that $ N^9 \pi_N(\mathcal{A})\le N^{-1}$.
We turn now to $\mathcal{D}$. Using \mbox{\tiny eq}ref{Eq:wut} and the argument above with $\mathcal{A}= \{\|x\|_{\infty} > \sqrt{N}(\log N)^2\}$ and Corollary \ref{cor:bazics} we deduce that
\begin{equation}\label{Eq:Decaywedge} \lim_{N\to\infty}\mathbb{P}\big( \exists t\in [t_{\delta/2},N^3]:\, \| \mathbf{X}_{t}^{\wedge}\|_{\infty} > \sqrt{N}(\log N)^2 \big) = 0\;.\end{equation}
and similarly for $\mathbf{X}^{\vee}$. To get a similar estimate for $\mathbf{X}^x$
it is sufficient to observe that from Lemma \ref{lem:mongc}
$\mathbf{X}^x$ is stochastically dominated by $\mathbf{X}^\vee$
and stochastically dominates $\mathbf{X}^\vee$, so that we can deduce from \mbox{\tiny eq}ref{Eq:Decaywedge} the desired bound for $\max_k X_{k}^{x}(t)$ and $\min_k X_{k}^{x}(t)$ respectively, concluding the proof of $\lim_{N\to\infty}\mathbb{P}(\mathcal{D}_N)=1$.
Finally let us focus on the event $\mathcal{C}_N$.
For every $i\ge 1$, by the Martingale Stopping Theorem and Ville's Maximal Inequality (as in \mbox{\tiny eq}ref{ville1}) we have
$$ \mathbb{P}\left(\sup_{t\ge 0} A_{\mathcal{T}_{i-1}+t} > N^{3/2-(i-3/2)\eta}\right) \le \mathbb{E}[A_{\mathcal{T}_{i-1}}] N^{-3/2+(i-3/2)\eta} \le N^{-\eta/2}\;.$$
Since $I$ is a fixed non-random integer, a union bound shows that $\lim\limits_{N\to\infty} \mathbb{P}(\mathcal{C}_N) = 1$.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:bracket}]
The proof follows from a diffusivity bound developped in an earlier work \cite[Proposition 21]{CLL}, applied to the super-martingales
$$M^{(i)}_s= M_{s+\mathcal{T}_{i-1}},$$
whose jump sizes are bounded above by $N^{3/2-(i-2)\eta}$.
We refer to \cite{CLL} for more intuition about this inequality.
\end{proof}
To prove Lemma \ref{lem:le3} we will require an intermediate technical result derived from the preliminary work of Section \ref{sec:tecnos} which allows us to estimate the bracket derivative.
Define $$ \delta X_k(t) := X^\wedge_k(t) - X^x_k(t).$$
\begin{lemma}\label{lem:le2}
When $\mathcal{B}\cap \mathcal{C}$ holds, then for all $t\in [t_{\delta/2},N^3 \wedge \mathcal{T}_I]$ where
$\partial_t \langle M \rangle_t$ is differentiable (all $t$ except a random countable set)
\begin{equation}
\partial_t \langle M \rangle_t \ge \frac{1}{2}\sum_{k=1}^{N-1}\left[ (\delta X_k(t))^2\wedge (\log N)^{-C_K}\right],
\end{equation}
for some constant $C_K > 0$.
\end{lemma}
\begin{proof}[Proof of Lemma \ref{lem:le3} assuming Lemma \ref{lem:le2}]
Write $A_t=U_t + V_t$ where, for some $a>0$:
\begin{equation}
U_t=\sum_{k=1}^{N-1}
\delta X_k(t)\mathbf{1}_{\{\delta X_k(t)< a\}}\,,\quad V_t=\sum_{k=1}^{N-1}
\delta X_k(t)\mathbf{1}_{\{\delta X_k(t)\ge a\}}.
\end{equation}
The Cauchy-Schwarz inequality shows that $$U_t^2\le N \sum_{k=1}^{N-1}
(\delta X_k(t))^2\mathbf{1}_{\{\delta X_k(t)\le a\}}.$$
Take $a=(\log N)^{-\frac12 C_K}$. If $U_t\ge A_t/2$, then
Lemma \ref{lem:le2} implies
\begin{equation}
\partial_t \langle M \rangle_t \ge \frac{A_t^2}{8N}.
\end{equation}
If on the other hand $V_t\ge A_t/2$, then letting $n_t$ denote the number of indices $k$ such
that $\delta X_k(t)\ge a$,
Lemma \ref{lem:le2} implies
\begin{equation}
\partial_t \langle M \rangle_t \ge \frac12(\log N)^{-C_K} n_t.
\end{equation}
Since $0\le \delta X_k(t)\le 2\max( \| \mathbf{X}_{t}^{\wedge}\|_{\infty},
\|\mathbf{X}_{t}^{x}\|_{\infty})$, on the event $\mathcal{D}$ we get
\begin{equation}
n_t\ge \frac1{2\sqrt{N}(\log N)^2}\sum_{k=1}^{N-1}
\delta X_k(t)\mathbf{1}_{\{\delta X_k(t)\ge a\}} \ge \frac{A_t}{4\sqrt{N}(\log N)^2}.
\end{equation}
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:le2}]
We write $\rho^\wedge_k=\rho_{X^\wedge_{k-1},X^\wedge_{k+1}}$, $\rho^x_k=\rho_{X^x_{k-1},X^x_{k+1}}$ for the resampling densities at $k$. Define
$$q_k:=\frac12 \int_\mathbb{R}|\rho^\wedge_k(u)-\rho^x_k(u)|du \;.$$
Recall the sticky coupling of Subsection \ref{Subsec:sticky}, in particular the laws $\nu_i$ defined therein. The derivative of the angle bracket
$\partial_t \langle M \rangle_t$ admits an explicit expression which can be derived from the sticky coupling description. For any $t \in [\mathcal{T}_{i-1}, \mathcal{T}_{i} \wedge \mathcal{R})$
\begin{equation}\label{drifft}
\partial_t \langle M \rangle_t = \sum_{k=1}^{N-1}\left(
(1-q_k) (\delta X_k(t_-))^2 + q_k \mathbb{E}[Y^2\,|\,\mathcal{F}_{t_-}] \right),
\end{equation}
where
$$
Y=(Y^\wedge - Y^x - \delta X_k(t_-))\wedge (R-M_{t-})\,,\quad R:= N^{3/2-(i-2)\eta}
$$
and
$(Y^\wedge,Y^x)$ are, conditionally given $\mathcal{F}_{t-}$, independent r.v.~with densities $\nu_3$ and $\nu_1$ respectively. The expression \mbox{\tiny eq}ref{drifft} simply comes from the fact that for each $k$,
$M_t$ will jump by an amount $\delta X_k(t_-)$ with probability $1-q_k$ and by an amount $Y$ with probability $q_k$. Note that the truncation with $R-M_{t-}$ in the variable $Y$ comes from the definition of $M$ in terms of $A$.
We now work on the event $\mathcal{B} \cap \mathcal{C}$. From Lemma \ref{lem:lequ} we have
\begin{equation}\label{eq:qq1}
q_k\le C (\delta\bar X_k)(\log N)^{2K},
\end{equation}
for all $k$, where we use the notation
$$
\delta\bar X_k=\frac12(X^\wedge_{k+1} +X^\wedge_{k-1} -
X^x_{k+1}- X^x_{k-1}).
$$
To prove Lemma \ref{lem:le2} it is then sufficient to show that if $q_k\ge 1/2$ then
\begin{equation}\label{eq:tos}
\mathbb{E}\left[Y^2\,|\,\mathcal{F}_{t_-}\right]\ge (\log N)^{-C_K}\;,
\end{equation}
for some constant $C_K>0$. Note that under the event $\mathcal{C}$ we have $ R-M_{t-}\ge R/2$. Moreover, if $q_k \ge 1/2$, because of the event $\mathcal{B}$ by Lemma \ref{lem:lerho} the density of the random variable $\widetilde{Y}:=Y^\wedge - Y^x - \delta X_k(t_-)$ is bounded above by $L:=C'(\log N)^{2K}$.
We next observe that we may assume $R\ge 2$. Indeed, if $R\le 2$ and
$q_k\ge 1/2$, then by \mbox{\tiny eq}ref{eq:qq1} we also have $\delta \bar X_k \ge (2C)^{-1} (\log N)^{-2K}$ and
$$(2C)^{-1} (\log N)^{-2K} \le M_{t-} \le N^{3/2-(i-3/2)\eta} = N^{-\eta/2} R \le 2 N^{-\eta/2}\;,$$
thus raising a contradiction. Hence assuming $ R-M_{t-}\ge R/2$ and $R\ge 2$ we may estimate
\begin{align*}
\mathbb{E}\left[Y^2\,|\,\mathcal{F}_{t_-}\right]&\ge\mathbb{E}\left[\widetilde{Y}^2\wedge (R/2)^2\,|\,\mathcal{F}_{t_-}\right]\ge \int_0^{1} 2v\mathbb{P}(|\widetilde{Y}|>v\,|\,\mathcal{F}_{t_-})dv.
\end{align*}
The bounded density property implies $\mathbb{P}(|\widetilde{Y}|>v\,|\,\mathcal{F}_{t_-})\ge 1-2Lv\ge 1/2$ for all $v\in[0,(4L)^{-1}]$. It follows that
\begin{align*}
\mathbb{E}\left[Y^2\,|\,\mathcal{F}_{t_-}\right]\ge\int_0^{(4L)^{-1}} vdv =\frac1{32 L^{2}}.
\end{align*}
This proves \mbox{\tiny eq}ref{eq:tos}.
\end{proof}
\section{Proof of Theorem \ref{th:wedge}}\label{Sec:Wedge}
The proof is based on ideas first introduced in \cite{Lac16} for card shuffling by adjacent transpositions.
An adaptation to the continuous setting was later developed in \cite{CLL}, for the specific case of the adjacent walk on the simplex. Here we are going to follow the proof of \cite[Proposition 14]{CLL}, with some minor modifications due to the different setting.
We start by recalling the Peres-Winkler censoring inequality.
\subsection{Censoring}\label{sec:censor}
The censoring inequality of Peres and Winkler \cite{PWcensoring} compares the distance to equilibrium at time $t$ for two Markov processes, one of which is obtained as a censored version of the other by omitting some of the updates according to a given censoring scheme. The version of the result that we need here is formulated as Proposition \ref{pro:censor} below. The proof is an
adaptation to the present
setting of the original argument for monotone finite spins systems in \cite{PWcensoring}. For completeness we give a brief self-contained account below.
A {\em censoring scheme} $\mathcal{C}$ is defined as a c\`adl\`ag map
$$
\mathcal{C}:[0,\infty)\mapsto \mathcal{P}(\{1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1\}),
$$
where $\mathcal{P}(A)$ denotes the set of all subsets of a set $A$. The subset $\mathcal{C}(s)$, at any time $s\ge 0$, represents the set of labels whose update is to be suppressed at that time. More precisely, given a censoring scheme $\mathcal{C}$, and an initial condition $x\in\Omega_N$, we write $P_{t,\mathcal{C}}^x$ for the law of the random variable obtained by starting at $x$ and applying the standard graphical construction (see Section \ref{Sec:GC}) with the proviso
that if label $j$ rings at time $s$, then the update is performed if and only if $j\notin\mathcal{C}(s)$. In particular, the uncensored evolution $P^x_t$ corresponds to $P^x_{t,\mathcal{C}}$ when $\mathcal{C}(s)\mbox{\tiny eq}uiv\eset$. Given a distribution $\mu$ on $\Omega_N$, we write $$\mu P_{t,\mathcal{C}}=\int P^x_{t,\mathcal{C}}\,\mu(dx).$$
Let $\mathcal{S}_N$ denote the set of probability measures $\mu$ on $\Omega_N$ which are absolutely continuous with respect to \ $\pi_N$ and such that the density $d\mu/d\pi_N$ is an increasing function on $\Omega_N$. Recall the notation $\mu\le \nu$ for stochastic domination.
\begin{proposition}\label{pro:censor}
If $\mu\in\mathcal{S}_N$, and $\mathcal{C}$ is a censoring scheme, then for all $t\ge 0$
\begin{equation}\label{Eq:censura}
\|\mu P_t-\pi_N\|_{TV}\le \|\mu P_{t,\mathcal{C}}-\pi_N\|_{TV}.
\end{equation}
\end{proposition}
The proof is a consequence of the next two lemmas.
\begin{lemma}\label{lem:kfs}
If $\mu,\nu$ are two probability measures on $\Omega_N$ such that $\mu\in\mathcal{S}_N$ and $\mu\le \nu$, then
\begin{align}\label{Eq:familyS2}
\|\mu-\pi_N\|_{TV}\le \|\nu-\pi_N\|_{TV}.
\end{align}
\end{lemma}
\begin{proof}
Setting ${\rm Var}phi=d\mu/d\pi_N$, and $A=\{{\rm Var}phi\ge 1\}$,
\begin{align}
\|\mu-\pi_N\|_{TV} &= \mu(A)-\pi_N(A).
\end{align}
Since $A$ is increasing, $\mu(A)\le \nu(A)$, and therefore
\begin{align}
\|\mu-\pi_N\|_{TV} &\le \nu(A) - \pi_N(A)\le \|\nu-\pi_N\|_{TV}\,.
\end{align}
\end{proof}
Let $\mathcal{Q}_i:L^2(\Omega_N,\pi_N)\mapsto L^2(\Omega_N,\pi_N)$, $i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1$, denote the
integral operator
\begin{align}
\label{eq:qif}
\mathcal{Q}_i f (x) = \int f(x^{(i,u)})\rho_{x_{i-1},x_{i+1}}(u)du,
\end{align}
so that $\mathcal{Q}_i f $ is the expected value of $f$ after the update of label $i$; see \mbox{\tiny eq}ref{projectors}.
If $\mu$ is a probability on $\Omega_N$,
we write $\mu \mathcal{Q}_i$ for the probability measure defined by
$$
\mu \mathcal{Q}_i (f) = \int \mu(dx)\mathcal{Q}_i f(x)\,.
$$
\begin{lemma}\label{lem:muqi}
If $\mu \in\mathcal{S}_N$ then $\mu \mathcal{Q}_i\in\mathcal{S}_N$ and $\mu \mathcal{Q}_i\le \mu$, for all $i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1$.
\end{lemma}
\begin{proof}
Set ${\rm Var}phi=d\mu/d\pi_N$. Then $\mu \mathcal{Q}_i$ has density $\mathcal{Q}_i{\rm Var}phi$ with respect to \ $\pi_N$.
Since ${\rm Var}phi$ is increasing, for any $x,y\in\Omega_N$ with $x\le y$, from \mbox{\tiny eq}ref{eq:qif} and Lemma \ref{lem:mongc} (or more precisely \mbox{\tiny eq}ref{firstorder})it follows that
\begin{align*}
\mathcal{Q}_i{\rm Var}phi(x) &\le \mathcal{Q}_i{\rm Var}phi(y)\,.
\end{align*}
Therefore $\mu\mathcal{Q}_i\in\mathcal{S}_N$.
To prove the stochastic domination $\mu \mathcal{Q}_i\le \mu$,
we show that $\mu \mathcal{Q}_i (g)\le \mu (g)$ for any bounded measurable increasing function $g$.
Notice that
$$
\mu \mathcal{Q}_i (g) = \pi_N\left[{\rm Var}phi\mathcal{Q}_ig\right] = \pi_N\left[(\mathcal{Q}_i{\rm Var}phi) (\mathcal{Q}_ig)\right].
$$
Since ${\rm Var}phi$, $g$ are increasing, the FKG inequality on $\mathbb{R}$, which is valid for any probability measure, implies that $(\mathcal{Q}_i{\rm Var}phi) (\mathcal{Q}_ig)\le \mathcal{Q}_i({\rm Var}phi g)$ pointwise. Therefore,
$$
\mu \mathcal{Q}_i (g) \le \pi_N\left[\mathcal{Q}_i({\rm Var}phi g)\right]
= \pi_N\left[{\rm Var}phi g\right] = \mu (g).
$$
\end{proof}
\begin{proof}[Proof of Proposition \ref{pro:censor}]
By Lemma \ref{lem:kfs} it is sufficient to prove that $\mu P_t\in \mathcal{S}_N$ and $\mu P_t\le \mu P_{t,\mathcal{C}}$
for all $t$.
By conditioning on the realization $\mathcal{T}_t$ of the Poisson clocks $\mathcal{T}^{(j)}$, $j\in\llbracket 1,N-1\rrbracket $ up to time $t$ in the graphical construction,
the uncensored
evolution at time $t$ has a distribution of the form
\begin{align}\label{Eq:muz}
\mu^z=\mu\, \mathcal{Q}_{z_1}\cdots \mathcal{Q}_{z_n}\,,
\end{align}
where $z:=(z_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,z_n)\in\llbracket1,N-1\rrbracket^n$ is a fixed sequence, while the censored
evolution at time $t$ has distribution of the form $\mu^{z'}$, where
$z'$ denotes a sequence obtained from $z$ by removing some of its entries.
Taking the expectation over $\mathcal{T}_t$ then shows that
it is sufficient to prove that $\mu^z\in\mathcal{S}_N$ and $\mu^{z}\le \mu^{z'}$ for any pair of such sequences $z,z'$.
Lemma \ref{lem:muqi} shows that $\mu^z\in \mathcal{S}_N$ for any $\mu\in\mathcal{S}_N$ and any sequence $z$. To prove
$\mu^{z}\le \mu^{z'}$ we may restrict to the case where $z$ and $z'$ differ by the removal of a single update, say $z_j$, so that
$$
z=(z_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,z_{j-1},z_j,z_{j+1},\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,z_n)\,,\quad z'=(z_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,z_{j-1},z_{j+1},\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,z_n).
$$
Let $\mu_1 = \mu \mathcal{Q}_{z_1}\cdots \mathcal{Q}_{z_{j}}$, and $\mu_2 = \mu \mathcal{Q}_{z_1}\cdots \mathcal{Q}_{z_{j-1}}$. Then $\mu_1=\mu_2\mathcal{Q}_{z_{j}}$ and thus, by Lemma \ref{lem:muqi} one has $\mu_1\le \mu_2$.
Moreover,
$$
\mu^z = \mu_1\mathcal{Q}_{z_{j+1}}\cdots \mathcal{Q}_{z_{n}} \le \mu_2\mathcal{Q}_{z_{j+1}}\cdots \mathcal{Q}_{z_{n}} = \mu^{z'}\,,
$$
where the inequality follows from the fact that each update preserves the monotonicity, (cf. Equation \mbox{\tiny eq}ref{firstorder}).
\end{proof}
\subsection{Relaxation of skeletons}\label{sec:skeleton}
For any integer $K\ge 2$, consider the $K-1$ labels $u_i:=\lfloor iN/K\rfloor$, $i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1$. We consider the evolution of the heights
\begin{align}\label{Eq:topk}
Y_i(t) = X_{u_i}(t)\,,\qquad i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1,
\end{align}
which will be referred to as the $K$-{\em skeleton} of the interface ${\bf X}(t)$.
\begin{proposition}\label{prop:skeleton}
Fix an integer $K\ge 2$. Let $\mu_t= P^\wedge_t$ and let $\bar \mu_t$ denote the marginal of $\mu_t$ on the $K$-skeleton $\{Y_i(t), i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1\}$. If $\bar\pi_N$ denotes the corresponding equilibrium distribution, then for any fixed $\delta>0$, with $t_\delta = (1+\delta)\frac{\log N}{2\alphap_N}$ one has
\begin{equation}\label{Eq:top1}
\lim_{N\to\infty}\|\bar \mu_{t_\delta} - \bar\pi_N\|_{TV} = 0.
\end{equation}
\end{proposition}
Following \cite{Lac16}, the proof of Proposition \ref{prop:skeleton} is based on a subtle use of the FKG inequality together with an explicit estimate on the expected value of the variables $Y_i(t)$.
Given a probability $\mu$ on $\Omega_N$, we write $\bar \mu$ for the marginal of $\mu$ on the $K$-skeleton $y:=(y_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,y_{K-1})$, where $y_i=x_{u_i}$ for each $i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1$.
We use the following notation for the area associated to $K$-skeleton variables $y_i = x_{u_i}$:
$$
W= \sum_{i=1}^{K-1}y_i,
$$
and write $\mu(W)= \bar \mu(W)$ for the expected value of $W$ under $\mu$.
\begin{proposition}\label{prop:muW}
For any ${\rm Var}epsilon>0$, $K\ge 2$, there exists $\eta=\eta(K,{\rm Var}epsilon)>0$ such that for all $N\ge 2$, $\mu\in\mathcal{S}_N$ one has:
\begin{equation}\label{Eq:contr1}
\mu(W)\le\eta \sqrt N\;\;\;\mathbb{R}ightarrow\;\;\;\|\bar \mu -\bar \pi_N\|_{TV}\le {\rm Var}epsilon.
\end{equation}
\end{proposition}
The proof of Proposition \ref{prop:muW} is omitted since it is identical to the proof of Proposition 36 in \cite{CLL}. Let us however point out that this proof uses in a crucial way the improved FKG inequality \mbox{\tiny eq}ref{piapib} in Proposition \ref{prop:fkg}.
Next, we control the expected value of $W$ at time $t$. Let ${\bf X}^\wedge(t)=\{X^\wedge_k(t)\}$
denote the random variables with joint law $ P^\wedge_t$.
\begin{proposition}\label{prop:Wt}
For any $k=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,N-1$, any $t\ge 0$:
$$
\mathbb{E}\left[X^\wedge_k(t)\right]\le 12Ne^{-\alphap_Nt}.
$$
In particular, if $\mu_t= P^\wedge_t$, then for all $t\ge 0$:
\begin{equation}\label{Eq:contrW1}
\mu_t(W)\le 12KN e^{-\alphap_Nt}.
\end{equation}
\end{proposition}
\begin{proof}
Set $v(t)=(v_1(t),\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,v_{N-1}(t))$, where $v_k(t)=\mathbb{E}\left[X^\wedge_k(t)\right]$.
Expanding $v_k(t)$ in the orthonormal basis \mbox{\tiny eq}ref{Eq:eigenfcts},
one finds
$v_k(t) = \sum_{j=1}^{N-1} a_j(t) {\rm Var}phi_{k}^{(j)}$, where
$a_j(t)
=\sum_{k=1}^{N-1}{\rm Var}phi_{k}^{(j)}v_k(t)$. Since $\frac{d}{dt}v_k(t)=\frac12(\Delta v(t))_k$, it follows that
$$
a_j(t) = a_j(0)e^{-\lambda_j t}\,,\qquad a_j(0) =\sum_{k=1}^{N-1}
{\rm Var}phi_{k}^{(j)}v_k(0).
$$
In particular, $|a_j(0)|\le \sqrt{ 2N}|v(0)|_\infty$, where $|v(0)|_\infty=\max_k v_k(0)$. Therefore,
\begin{equation}\label{Eq:diagD1}
v_k(t)\le 2|v(0)|_\infty\sum_{j=1}^{N-1}e^{-\lambda_j t}\,.
\end{equation}
Let us show that $|v(0)|_\infty\le 3N$ for all $N$ large enough. Raising the boundary condition from $(0,0)$ to $(2N,2N)$ and using monotonicity, we see that for all $k$
the random variable $X_k$ with distribution $\nu^\wedge$ is stochastically dominated by the random variable $X_k+2N$ where $X_k$ has distribution $\pi(\cdot\,|\,\min_i x_i\ge -N)$. The claimed monotonicity with respect to the boundary conditions can be checked using the FKG inequality for $\pi(\cdot\,|\,\min_i x_i\ge n)$.
Indeed the density of the measure with raised boundary with respect to the original one is equal (up to a renormalizing constant) to
$$e^{V(2N+x_1)-V(x_1)+V(2N+x_{N-1})-V(x_{N-1})}$$
which by convexity of $V$ is increasing for the order ``$\le$'' on $\Omega_N$.
It follows that
\begin{equation}\label{Eq:monobc}
v_k(0) \le 2N + \pi(x_k|\,{\min}_ix_i\ge -N)\,.
\end{equation}
From Corollary \ref{cor:bazics} and the union bound,
\begin{equation}\label{Eq:expot1}
\pi({\min}_i x_i\ge -N)\ge 1-Ne^{-cN},
\end{equation}
for some constant $c>0$ and all $N$ large enough. Moreover, Lemma \ref{compare} also shows that, uniformly in $k$,
\begin{equation}\label{Eq:expot11}
\pi(x_k; {\min}_i x_i\ge -N)\le \pi(x_k^2)^\frac12\le C\sqrt N\,,
\end{equation}
for some constant $C>0$ and all $N$ large enough. The estimates \mbox{\tiny eq}ref{Eq:monobc}-\mbox{\tiny eq}ref{Eq:expot11} imply $|v(0)|_\infty\le 3N$ for $N$ large.
From \mbox{\tiny eq}ref{Eq:diagD1},
using $\lambda_j \ge j\lambda_1$ it follows that
$$
v_k(t)\le \frac{6Ne^{-\lambda_1 t}}{1-e^{-\lambda_1 t}}.
$$
If $t$ is such that $e^{-\lambda_1 t}\le 1/4$ then this implies $v_k(t)\le 8Ne^{-\lambda_1 t}$. On the other hand if
$e^{-\lambda_1 t}\ge 1/4$ then, using the monotonicity $P^\wedge_t\le \nu^\wedge$ one has $$v_k(t)\le v_k(0)\le 3N \le 12Ne^{-\lambda_1 t}.$$ Since $\lambda_1=\alphap_N$, this proves the desired upper bound.
\end{proof}
\begin{proof}[Proof of Proposition \ref{prop:skeleton}]
Proposition \ref{prop:Wt} shows that
\begin{equation}\label{Eq:top2}
\lim_{N\to\infty}\frac{\bar \mu_{t_\delta}(W)}{\sqrt N} = 0,
\end{equation}
and Proposition \ref{prop:muW} shows that \mbox{\tiny eq}ref{Eq:top2} is sufficient to achieve the desired convergence of $K$-skeletons.
\end{proof}
\subsection{Relaxation of the censored dynamics}\label{sec:censoreddyn}
Consider the censored process obtained by suppressing all updates of the skeleton variables. That is, we use the censoring scheme $\mathcal{C}$ such that $\mathcal{C}(s)=\{u_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,u_{K-1}\}$, $s\ge 0$.
\begin{proposition}\label{prop:special2}
Let $P_{t,\mathcal{C}}^x=\delta_x P_{t,\mathcal{C}}$ and let $\pi_N(\cdot|y)$ denote the equilibrium distribution given the skeleton heights $y_i=x_{u_i}, i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1$. For any $\delta\in(0,1)$, define $K=\lfloor \delta^{-1}\rfloor$ and $s_\delta = \delta\frac{\log N}{2\alphap_N}$, and let $B_{N,\delta}$ denote the event
\begin{equation}\label{Eq:top0bn}
B_{N,\delta}=\Big\{x\in \Omega_N:\, \|x\|_\infty \le 2N,\; \max_{i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K} |x_{u_i}| \le N/2K\Big\}.
\end{equation}
Then there exists $\delta_0\in(0,1)$ such that for all fixed $\delta\in(0, \delta_0)$ and for all $N$ sufficiently large:
\begin{equation}\label{Eq:top10}
\sup_{x\in B_{N,\delta}}\| P_{s_\delta,\mathcal{C}}^x- \pi_N(\cdot|y)\|_{TV} \le \delta\,.
\end{equation}
\end{proposition}
\begin{proof}
The censored process is a collection of $K$ independent processes each describing the evolution of an interface on a segment of length $n:=\lfloor N/K\rfloor$, with fixed boundary heights $(y_{i-1},y_i)$, where $y_i=x_{u_i}$. If $ x\in B_{N,\delta}$ then the left and right boundary conditions of each interface satisfy $$|y_{i-1}-y_i|\le N/K\le 2n.$$
Moreover, if $x\in B_{N,\delta}$ then the initial condition satisfies $\|x\|_\infty\le 2N\le n^2$, if $N$ is large enough.
From the mixing time bound given in Corollary \ref{th:corol} (see Remark \ref{rem:uniformity}) it follows that for any given ${\rm Var}epsilon\in (0,1)$, when $N$ is sufficiently large, each individual process has ${\rm Var}epsilon$-mixing time bounded above by
\begin{equation}\label{Eq:crudemix}
C\, n^2\log n\le \frac{C }{K^2}\,N^2\log (N) \le s_{\delta}\,,
\end{equation}
if $\delta > 0$ is small enough. Thus the entire censored process satisfies
$$
\| P_{s_\delta,\mathcal{C}}^x- \pi_N(\cdot|y)\|_{TV} \le K {\rm Var}epsilon\,.
$$
The claimed inequality follows by taking ${\rm Var}epsilon=K^{-1}\delta$.
\end{proof}
\subsection{Proof of Theorem \ref{th:wedge}}
We want to prove that for any $\delta>0$,
$$ \lim_{N\to\infty}\| P_{t_\delta}^\wedge - \pi_N \|_{TV} =0\;,$$
where $ t_\delta=(1+\delta)\frac{\log N}{2\alphap_N}$.
Set $K=\lfloor \delta^{-1}\rfloor$ and let $\mathcal{C}'$ denote the censoring scheme defined by
$\mathcal{C}'(s)=\eset$ for $s\in[0, t_{\delta/2})$ and $\mathcal{C}'(s)=\{u_1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,u_{K-1}\}$ for $s\ge t_{\delta/2}$.
Let also $P^\wedge_{t,*}=P^\wedge_{t,\mathcal{C}'}$ denote the corresponding censored process. From Proposition \ref{pro:censor} we have
$$
\| P_{t_\delta}^\wedge - \pi_N \|_{TV} \le \| P_{t_\delta,*}^\wedge - \pi_N \|_{TV}.
$$
We are going to construct a coupling of $P_{t_\delta,*}^\wedge$ and $\pi_N$.
We first couple the skeleton heights at time $t_{\delta/2}$.
Set $\mu=P^\wedge_{t_{\delta/2}}$,
and let $\mathbb{P}$ denote a coupling of $\mu$ and $\pi_N$. Let $(X,Z)$ denote the corresponding height variables, so that $X$ has distribution $\mu$ and $Z$ has distribution $\pi_N$.
The coupling $\mathbb{P}$ can be chosen in such a way that the skeleton variables are optimally coupled, that is
$$
\mathbb{P}(X_{u_i}=Z_{u_i}, \;i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1) =1- \|\bar \mu_{t_{\delta/2}} - \bar\pi_N\|_{TV}.
$$
Consider the event $$E=\{x\in\Omega_N: \,|x|_\infty\le 2N\}.$$
Monotonicity implies that $\pi_N\le \mu\le \nu^\wedge$ and therefore
\begin{align}\label{Eq:xnotine}
\mu(E^{\complement})&\le N \max_i \mu(|x_i|>2N)\\&
\le N \max_i\pi_N(x_i<-2N) + N \max_i\nu^\wedge(x_i>2N).
\end{align}
Corollary \ref{cor:bazics} implies
\begin{equation}\label{Eq:expott}
\max_{i\in \llbracket 1,N-1\rrbracket}\pi_N(x_i<-N)\le Ce^{-N/C},
\end{equation}
for some constant $C>0$. Raising the boundary condition from $(0,0)$ to $(\frac32N,\frac32N)$ and using monotonicity, we see that for all $i$
the random variable $X_k$ with distribution $\nu^\wedge$ is stochastically dominated by the random variable $X_k+3N/2$ where $X_k$ has distribution $\pi(\cdot\,|\,\min_i x_i\ge -N/2)$. Thus, reasoning as in \mbox{\tiny eq}ref{Eq:monobc} one finds
\begin{equation}\label{Eq:monobc1}
\max_{i\in \llbracket 1,N-1\rrbracket}\nu^\wedge(x_i>2N)\le Ce^{-N/C},
\end{equation}
for some constant $C>0$.
Define the event
$$
\mathcal{A}=\{X_{u_i}=Z_{u_i}, \,i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1\}\cap\{X\in B_{N,\delta}\},$$
where $B_{N,\delta}$ is given in Proposition \ref{prop:skeleton}. Let $F=\{x\in\Omega_N:\, |x_{u_i}|\le N/2K\}$ so that
$\{X\in B_{N,\delta}\}=\{X\in E\cap F\}$.
Then,
\begin{equation}\label{Eq:evea}
\mathcal{A}=\{X\in E\}\cap\{Z\in F\}\cap\{X_{u_i}=Z_{u_i}, \,i=1,\ifmmode\mathinner{\ldotp\kern-0.2em\ldotp\kern-0.2em\ldotp}\else.\kern-0.13em.\kern-0.13em.\fi,K-1\}.
\end{equation}
Therefore,
$$
\mathbb{P}(\mathcal{A}^c)\le \|\bar \mu_{t_{\delta/2}} - \bar\pi_N\|_{TV} + \mu(X\notin E) + \pi_N(Z\notin F).
$$
From \mbox{\tiny eq}ref{Eq:xnotine}-\mbox{\tiny eq}ref{Eq:monobc1} we have $\mu(X\notin E)\le 2CNe^{-N/C}$.
From Corollary \ref{cor:bazics} and the union bound
one has that $$\pi_N(Z\notin F)\le C_1e^{-N/C_1},$$ for some $C_1=C_1(K)>0$ independent of $N$.
If the event $\mathcal{A}$ occurs, then
we couple the interfaces at time $t_\delta =t_{\delta/2}+s_{\delta/2}$ with the optimal coupling attaining the total variation distance $\| P_{s_{\delta/2},\mathcal{C}}^x- \pi_N(\cdot|y)\|_{TV}$, where $\mathcal{C}$ is as in Proposition \ref{prop:special2}. This shows that
$$
\| P_{t_\delta,*}^\wedge - \pi_N \|_{TV} \le \mathbb{P}(\mathcal{A}^c)
+ \sup_{x\in B_{N,\delta}}
\| P_{s_{\delta/2},\mathcal{C}}^x- \pi_N(\cdot|y)\|_{TV}.
$$
From \mbox{\tiny eq}ref{Eq:evea}, Proposition \ref{prop:skeleton} and Proposition \ref{prop:special2},
$$
\limsup_{N\to\infty}\| P_{t_\delta}^\wedge - \pi_N \|_{TV} \le 2\delta.
$$
The distance $\| P_{t_\delta}^\wedge - \pi_N \|_{TV}$ is decreasing as a function of $\delta$, and therefore we may take $\delta\to 0$ in the right hand side above to conclude.
\appendix
\end{document} |
\begin{document}
\title{A proximal average for prox-bounded functions}
\begin{abstract}
In this work, we construct a proximal average for two prox-bounded functions, which recovers the classical
proximal average for two convex functions. The new proximal average transforms continuously in epi-topology from one proximal hull to the other. When one of the functions is differentiable, the new proximal average is differentiable. We give characterizations for Lipschitz and single-valued proximal mappings and we show that the convex combination of convexified proximal mappings is always
a proximal mapping. Subdifferentiability and behaviors of infimal values and minimizers are also studied.
\end{abstract}
\noindent {\bfseries 2000 Mathematics Subject Classification:}\\
Primary 49J53; Secondary 26A51, 47H05, 26E60, 90C30.
\noindent {\bfseries Keywords:} Almost differentiable function,
arithmetic average,
convex hull, epi-average,
epi-convergence, Moreau envelope, Lasry--Lions envelope,
prox-bounded function, proximal average, proximal hull, proximal mapping,
resolvent,
subdifferential operator.
\section{Introduction}
The proximal average provides a novel technique for averaging convex functions, see \cite{convmono,proxbas}.
The proximal average has been used widely in applications such as machine learning \cite{reidwill,Yu13a}, optimization \cite{resaverage,wolenski,boyd14,planwang2016,zaslav}, matrix analysis \cite{kimlaws, lim18} and
modern monotone operator theory
\cite{simons}. The proximal mapping of the proximal average is precisely
the average of proximal mappings of the convex
functions involved. Averages of proximal mappings are
important in convex and nonconvex optimization algorithms;
see, e.g., \cite{convmono, aveproj}.
A proximal average for
possible nonconvex functions has long been sought.
In this work, we have proposed a proximal average for prox-bounded functions, which enjoy rich
theory in variational analysis and optimization.
Our proximal average
significantly extends the works of \cite{proxbas} from convex functions to
possibly nonconvex functions. The new average function provides an epicontinuous transformation
between proximal hulls of functions, and reverts to the convex proximal average
definition in the case of convex functions.
When studying the proximal average of possibly nonconvex functions, two fundamental issues arise. The first is when
the proximal mapping is convex-valued; the second is when the function can
be recovered from its proximal mapping. It turns out that
resolving both difficulties requires the `proximal'
condition in variational analysis.
\subsection{Outline}
The plan of the paper is as follows. In the following three subsections, we give basic concepts from variational analysis,
review related work in the literature and state the blanket assumptions of the paper.
In Section \ref{s:prel}, we prove some interesting and new
properties of proximal functions, proximal mappings and envelopes. Section \ref{s:conv}
gives
an explicit relationship between the convexified proximal mapping and the Clarke
subdifferential of the Moreau envelope. Section \ref{s:char} provides
characterizations of Lipschitz and single-valued proximal mappings.
In Section \ref{s:main}, we
define the proximal average for prox-bounded functions and give a systematic study
of its properties. Relationships to arithmetic average and epi-average and
full epi-continuity of the proximal average are studied
in Section \ref{s:rela}. Section \ref{s:opti} is devoted to optimal
value and minimizers and convergence in minimization of the proximal average.
In Section \ref{s:subd}, we investigate the subdifferentiability and
differentiability of the proximal average.
As an example, the
proximal average for quadratic functions is given in Section \ref{s:quad}.
Finally, Section \ref{s:theg} illustrates the difficulty when the proximal mapping
is not convex-valued.\par
Two distinguished features
of our proximal average
deserve to be singled out: whenever one of the function is differentiable,
the new proximal average is differentiable and the convex combinations of
convexified proximal mappings is always a proximal mapping.
While epi-convergence
\cite{attouch1984, beertopologies} plays a dominant role in our analysis of
convergence
in minimization, the class of proximal functions, which
is significantly broader than the class of convex functions, is indispensable
for studying the proximal average.
In carrying out the proofs later, we often cite results from
the standard reference Rockafellar--Wets \cite{rockwets}.
\subsection{Constructs from variational analysis}
In order to define the proximal average of possibly nonconvex functions, we utilize the Moreau envelope
and proximal hull. In what follows, $\operatorname{\mathbb{R}}^n$ is the $n$-dimensional Euclidean space
with Euclidean norm $\|x\|=\sqrt{\scal{x}{x}}$ and inner product
$\scal{x}{y}=\sum_{i=1}^{n}x_{i}y_{i}$ for $x,y\in\operatorname{\mathbb{R}}^n$.
\begin{df}
For a proper function $f:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ and parameters $0<\mu<\lambda$, the
{Moreau envelope}
function $e_{\lambda}f$ and {proximal mapping} are defined, respectively, by
$$e_{\lambda}f(x)=\inf_{w}\left\{f(w)+\frac{1}{2\lambda}\|w-x\|^2\operatorname{ri}ght\},
\quad \operatorname{Prox}_{\lambda}f(x)=\operatornamewithlimits{argmin}_{w}\left\{f(w)+\frac{1}{2\lambda}\|w-x\|^2\operatorname{ri}ght\};$$
the {proximal hull} function $h_{\lambda}f$ is defined by
$$h_{\lambda}f(x)=\inf_{w}\left\{e_{\lambda}f(w)-\frac{1}{2\lambda}\|x-w\|^2\operatorname{ri}ght\};$$
the {Lasry--Lions envelope}
$e_{\lambda,\mu}f$ is defined by
$$e_{\lambda,\mu}f(x)=\sup_{w}\left\{e_{\lambda}f(w)-\frac{1}{2\mu}\|x-w\|^2\operatorname{ri}ght\}.$$
\end{df}
\begin{df} The function $f:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow \operatorname{\mathbb{R}}X$ is prox-bounded if
there exist $\lambda>0$ and $x\in \operatorname{\mathbb{R}}^n$ such that
$e_{\lambda}f(x)>-\infty.$
The supremum of the set of all such $\lambda$ is the threshold $\lambda_{f}$
of prox-boundedness for $f$.
\end{df}
Any function $f:\operatorname{\mathbb{R}}^{n}\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ that is bounded
below by an affine function has threshold of prox-boundedness $\lambda_{f}=\infty$; cf.
\cite[Example 3.28]{rockwets}. A differentiable function $f$ with a Lipschitz continuous
gradient has $\lambda_{f}>0$.
Our notation is standard. For every nonempty set $S\subset\operatorname{\mathbb{R}}^n$, $\operatornamewithlimits{conv} S$, $\operatorname{cl} S$ and $\iota_{S}$ denote the \ensuremath{\varnothing}h{convex hull}, \ensuremath{\varnothing}h{closure} and
\ensuremath{\varnothing}h{indicator function} of set $S$, respectively.
For a proper, lower semicontinuous (lsc) function $f:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$, $\operatornamewithlimits{conv} f$ is its convex hull and $f^*$ is its \ensuremath{\varnothing}h{Fenchel conjugate}.
We let $\inf f$ and $\operatornamewithlimits{argmin} f$ denote the infimum and
the set of minimizers of $f$ on $\operatorname{\mathbb{R}}^n$, respectively. We call
$f$ \ensuremath{\varnothing}h{level-coercive} if
$$\liminf_{\|x\|\operatorname{ri}ghtarrow\infty}\frac{f(x)}{\|x\|}>0,$$
and \ensuremath{\varnothing}h{coercive} if
$$\liminf_{\|x\|\operatorname{ri}ghtarrow\infty}\frac{f(x)}{\|x\|}=\infty.$$
We use $\partial f$, $\hat{\partial }f, \partial_{L}f, \partial_{C}f$ for the Fenchel subdifferential, Fr\'echet subdifferential, limiting subdifferential
and Clarke subdifferential of $f$, respectively. More precisely,
at a point $x\in\operatorname{dom} f$, the \ensuremath{\varnothing}h{Fenchel subdifferential}
of $f$ at $x$ is the set
$$\partial f(x)=\{s\in\operatorname{\mathbb{R}}^n:\ f(y) \geq f(x)+\scal{s}{y-x} \text{ for all $y\in\operatorname{\mathbb{R}}^n$}\};$$
the \ensuremath{\varnothing}h{Fr\'echet subdifferential} of $f$ at $x$ is the set
$$\hat{\partial} f(x)=\{s\in\operatorname{\mathbb{R}}^n:\ f(y) \geq f(x)+\scal{s}{y-x}+o(\|y-x\|)\};$$
the \ensuremath{\varnothing}h{limiting subdifferential} of $f$ at $x$ is
$$\partial_{L}f(x)=\{v\in\operatorname{\mathbb{R}}^n:\ \exists \text{ sequences } x_{k} \att x \text{ and }
s_{k}\in\hat{\partial}f(x_{k}) \text{ with } s_{k}\operatorname{ri}ghtarrow v\},$$
where $x_{k} \att x$ means $x_{k}\operatorname{ri}ghtarrow x$ and $f(x_{k})\operatorname{ri}ghtarrow f(x)$.
We let $\operatorname{Id}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}: x\mapsto x$ be the identity mapping and $\ensuremath{\,\mathfrak{q}}=\frac{1}{2}\|\cdot\|^2$.
The mapping $J_{\mu\partial_{L}f}=(\operatorname{Id}+\mu\partial_{L}f)^{-1}$ is
called the \ensuremath{\varnothing}h{resolvent} of $\mu\partial_{L}f$;
cf. \cite[page 539]{rockwets}.
When $f$ is locally Lipschitz at $x$, the \ensuremath{\varnothing}h{Clarke subdifferential}
$\partial_{C}f$ at $x$ is $\partial_{C} f(x)=
\operatornamewithlimits{conv} \partial_{L}f(x)$.
For further details on subdifferentials, see \cite{optanal,mordukhovich2006variational,rockwets}.
For $f_{1},f_{2}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$, the \ensuremath{\varnothing}h{infimal convolution}
(or epi-sum)
of $f_{1}, f_{2}$ is defined by
$$(\forall x\in\operatorname{\mathbb{R}}^n)\ f_{1}\operatorname{\mathbb{B}}ox f_{2}(x)=\inf_{w}\{f_{1}(x-w)+f_{2}(w)\},$$
and it is exact at $x$ if $\exists~w\in\operatorname{\mathbb{R}}^n$ such that $f_{1}\operatorname{\mathbb{B}}ox f_{2}(x)=f_{1}(x-w)+f_{2}(w)$;
$f_{1}\operatorname{\mathbb{B}}ox f_{2}$ is exact if it is exact at every point of its domain.
\subsection{Related work}
A comparison to known work in the literature is in order.
In \cite{zhang2,zhang1}, Zhang et. al. defined a lower compensated convex
transform for $0<\mu<+\infty$ by
$$C_{\mu}^{l}(f)=\operatornamewithlimits{conv}(2\mu\ensuremath{\,\mathfrak{q}}+f)-2\mu\ensuremath{\,\mathfrak{q}}.$$
The lower compensated convex transform is the proximal
hull.
In \cite{zhang2}, Zhang, Crooks and Orlando gave a comprehensive
study on the average compensated
convex approximation,
which is an arithmetic average of the proximal hull and the upper proximal hull.
While the proximal hull is a common ingredient, our work and theirs
are completely different.
By nature, the proximal mapping of the
proximal average for convex functions
is exactly the convex combination
of proximal mappings of individual convex functions \cite{proxbas}.
In \cite{proxave}, Hare proposed a proximal average by
$$\operatorname{\mathcal{PA}}_{1/\mu}=-e_{1/(\mu+\alpha(1-\alpha))}(-\alpha e_{1/\mu}f-(1-\alpha)e_{1/\mu}g).$$
For this average,
$x\mapsto\operatorname{\mathcal{PA}}_{1/\mu}(x)$ is $\mathcal{C}^{1+}$ for every $\alpha\in]0,1[$, and enjoys other nice stabilities
with respect to $\alpha$, see, e.g., \cite[Theorem 4.6]{proxave}. However,
this average definition has two disadvantages.\\\noindent (i) Even when both $f,g$ are convex, it does not
recover the proximal average for convex functions: $$-e_{1/\mu}(-\alpha e_{1/\mu}f-
(1-\alpha)e_{1/\mu}g).$$ (ii) Neither the proximal mapping
$\operatorname{Prox}_{1/(\mu+\alpha(1-\alpha))}\operatorname{\mathcal{PA}}_{1/\mu}$ nor
$\operatorname{Prox}_{1/\mu}\operatorname{\mathcal{PA}}_{1/\mu}$
is the average of the proximal mappings
$\operatorname{Prox}_{1/\mu}f$ and $\operatorname{Prox}_{1/\mu}g$.\par
In \cite{goebel2010proximal}, Goebel introduced a proximal average for saddle functions
by using extremal convolutions:
$$\mathcal{P}_{\mu,\eta}^{\cup\cap}
=\big(\lambda_{1}{{\,\text{\ding{75}}}} (f_{1}+\mu{{\,\text{\ding{75}}}}\ensuremath{\,\mathfrak{q}}_{x}-\eta{{\,\text{\ding{75}}}} \ensuremath{\,\mathfrak{q}}_{y})\big){{\,\text{\ding{57}}\,}} \big(\lambda_{2}{{\,\text{\ding{75}}}} (f_{2}
+\mu{{\,\text{\ding{75}}}}\ensuremath{\,\mathfrak{q}}_{x}-\eta{{\,\text{\ding{75}}}}\ensuremath{\,\mathfrak{q}}_{y})\big)-
\mu{{\,\text{\ding{75}}}}\ensuremath{\,\mathfrak{q}}_{x}+\eta{{\,\text{\ding{75}}}}\ensuremath{\,\mathfrak{q}}_{y},$$
in which $f_{1}, f_{2}: \operatorname{\mathbb{R}}^m\times \operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}E $ are
saddle functions, $\ensuremath{\,\mathfrak{q}}_{x}(x,y)=\ensuremath{\,\mathfrak{q}}(x), \ensuremath{\,\mathfrak{q}}_{y}(x,y)
=\ensuremath{\,\mathfrak{q}}(y)$, $\mu, \eta>0$, $\lambda_{1}+\lambda_{2}=1$ with $\lambda_{i}>0$, and
${{\,\text{\ding{57}}\,}}$ is the extremal convolution.
Some nice results about self-duality with respect to saddle function conjugacy and partial conjugacy are put forth and proved by Goebel \cite{goebel2010proximal}.
Goebel's average is the proximal average for convex functions when each $f_{i}$
is convex. However, the proximal mapping of $\operatorname{Prox}_{\lambda}\mathcal{P}_{\mu,\eta}^{\cup\cap}$ is not
the convex combination of $\operatorname{Prox}_{\lambda}f_1$ and $\operatorname{Prox}_{\lambda} f_2$.
\subsection{Blanket assumptions}\label{s:assump}
Throughout the paper,
the functions $f,g:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ are proper, lsc
and prox-bounded with thresholds $\lambda_{f}, \lambda_{g}>0$ respectively,
$\operatorname{\bar{\lambda}}=\min\{\lambda_{f},\lambda_{g}\}$, $\lambda>0$,
$\mu>0$ and $\alpha\in [0,1]$.
\section{Preliminaries}\label{s:prel}
In this section, we
collect several facts and present some auxiliary results
on proximal mappings of proximal functions, Moreau envelopes
and proximal hulls, which will be used in the sequel.
\subsection{Relationship among three regularizations: $e_{\lambda}f$, $h_{\lambda}f$, and $e_{\lambda,\mu}f$}
Some key properties about these regularizations come as follows.
\begin{Fact}\ensuremath{\varnothing}h{(\cite[Example 11.26]{rockwets})}\label{l:dcform}
Let $0<\lambda<\lambda_{f}$.
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:mor:l}
The Moreau envelope
$$e_{\lambda}f=
-\left(f+\frac{1}{2\lambda}\|\cdot\|^2\operatorname{ri}ght)^*\bigg(\frac{\cdot}{\lambda}\bigg)+
\frac{1}{2\lambda}\|\cdot\|^2$$
is locally Lipschitz.
\item The proximal hull satisfies
$$h_{\lambda}f+\frac{1}{2\lambda}\|\cdot\|^2=\bigg(f+\frac{1}{2\lambda}\|\cdot\|^2\bigg)^{**}.$$
\end{enumerate}
\end{Fact}
\begin{Fact}\ensuremath{\varnothing}h{(\cite[Examples 1.44, 1.46, Exercise 1.29]{rockwets})}\label{f:m-p-l}
Let $0<\mu<\lambda<\lambda_{f}$. One has
\begin{enumerate}[label=\rm(\alph*)]
\item $h_{\lambda}f=-e_{\lambda}(-e_{\lambda}f)$,
\item \label{i:p:hull}
$e_{\lambda} f=e_{\lambda}(h_{\lambda}f)$,
\item $h_{\lambda}(h_{\lambda}f)=h_{\lambda}f$,
\item\label{i:d:env}
$e_{\lambda,\mu}f=-e_{\mu}(-e_{\lambda}f)=h_{\mu}(e_{\lambda-\mu}f)=e_{\lambda-\mu}(h_{\lambda}f)$,
\item $e_{\lambda_{1}}(e_{\lambda_{2}}f)=e_{\lambda_{1}+\lambda_{2}}f$ for
$\lambda_{1}, \lambda_{2}>0$.
\end{enumerate}
\end{Fact}
For more details about these regularizations, we refer the reader to
\cite{attouch1990approximation,infconv,proxhilbert,diffprop} and \cite[Chapter 1]{rockwets}.
\subsection{Proximal functions}
The concept of $\lambda$-proximal functions will play an important role. This
subsection is dedicated to properties of $\lambda$-proximal functions.
\begin{df} We say that $f$ is \ensuremath{\varnothing}h{$\lambda$-proximal}
if $f+\frac{1}{2\lambda}\|\cdot\|^2$ is convex.
\end{df}
\begin{lem}\label{l:env:neg}
\begin{enumerate}[label=\rm(\alph*)]
\item \label{i:e:1} The negative Moreau envelope $-e_{\lambda}f$ is always $\lambda$-proximal.
\item\label{i:e:2}
If $e_{\lambda}f$ is $\mathcal{C}^{1}$, then $f+\frac{1}{2\lambda}\|\cdot\|^2$ is convex,
i.e., $f$ is $\lambda$-proximal.
\end{enumerate}
\end{lem}
\begin{proof} By Fact~\ref{l:dcform},
\begin{equation}\label{e:moreau}
(\forall x\in\operatorname{\mathbb{R}}^n)\ \frac{1}{2\lambda}\|x\|^2-e_{\lambda}f(x)=
\bigg(f+\frac{1}{2\lambda}\|\cdot\|^2\bigg)^{*}\bigg(\frac{x}{\lambda}\bigg).
\end{equation}
\ref{i:e:1}: This is clear from \eqref{e:moreau}.
\noindent\ref{i:e:2}: By \eqref{e:moreau}, the assumption ensures that
$\big(f+\frac{1}{2\lambda}\|\cdot\|^2\big)^{*}\big(\frac{x}{\lambda}\big)$ is differentiable.
It follows from Soloviov's theorem \cite{soloviov}
that $f+\frac{1}{2\lambda}\|\cdot\|^2$ is convex.
\end{proof}
While for convex functions, proximal mappings and resolvents are the same, they
differ for nonconvex functions in general.
\begin{Fact}\ensuremath{\varnothing}h{(\cite[Example 10.2]{rockwets})}
For any proper, lsc function $f:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ and
any $\mu>0$, one has
$$(\forall x\in\operatorname{\mathbb{R}}^n)\ P_{\mu}f(x)\subseteq J_{\mu\partial_{L}f}(x).$$
When $f$ is convex, the inclusion holds as an equation.
\end{Fact}
\noindent However, proximal functions have surprising properties.
\begin{prop}\label{p:resolventf}
Let $0<\mu<\lambda_{f}$. Then
the following are equivalent:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:resolvent1}
$\operatorname{Prox}_{\mu}f=J_{\mu\partial_{L}f}$,
\item\label{i:resolvent2} $f$ is $\mu$-proximal,
\item\label{i:resolvent3} $\operatorname{Prox}_{\mu}f$ is maximally monotone,
\item\label{i:resolvent4} $\operatorname{Prox}_{\mu}f$ is convex-valued.
\end{enumerate}
\end{prop}
\begin{proof}
\ref{i:resolvent2}$\operatorname{\mathbb{R}}ightarrow$\ref{i:resolvent1}: See \cite[Proposition 12.19]{rockwets}
\& \cite[Example 11.26]{rockwets}.
\noindent\ref{i:resolvent1}$\operatorname{\mathbb{R}}ightarrow$\ref{i:resolvent2}: As $\operatorname{Prox}_{\mu}f$ is always monotone,
$(\operatorname{Prox}_{\mu}f)^{-1}=(\operatorname{Id}+\mu\partial_{L}f)$ is monotone and it suffices to apply
\cite[Proposition 12.19(c)$\operatorname{\mathbb{R}}ightarrow$(b)]{rockwets}.
\noindent\ref{i:resolvent2}$\Leftrightarrow$\ref{i:resolvent3}:
See \cite[Proposition 12.19]{rockwets}.
\noindent\ref{i:resolvent3}$\operatorname{\mathbb{R}}ightarrow$\ref{i:resolvent4}: This is clear.
\noindent\ref{i:resolvent4}$\operatorname{\mathbb{R}}ightarrow$\ref{i:resolvent3}: By \cite[Example 1.25]{rockwets},
$\operatorname{Prox}_{\mu}f$ is nonempty, compact-valued and monotone with full domain. As $\operatorname{Prox}_{\mu}f$ is convex-valued, it suffices to apply \cite{lohne08}.
\end{proof}
\begin{lem}\label{l:prox:map}
Let $f$ be $\lambda$-proximal and $0<\mu<\lambda$. Then
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:p:convex} $\operatorname{Prox}_{\lambda}f$ is convex-valued,
\item\label{i:p:single} $\operatorname{Prox}_{\mu}f$ is single-valued.
\end{enumerate}
Consequently, $\operatorname{Prox}_{\mu}f$ is maximally monotone if $0<\mu\leq\lambda$.
\end{lem}
\begin{proof}
\ref{i:p:convex}:
Observe that
$$e_{\lambda}f(x)=\inf_{y}\left\{f(y)+\frac{1}{2\lambda}\|y\|^2-\langle\frac{x}{\lambda}, y
\operatorname{ran}gle\operatorname{ri}ght\}+\frac{1}{2\lambda}\|x\|^2.$$
Since
$f+\frac{1}{2\lambda}\|\cdot\|^2-\langle{\frac{x}{\lambda}},\cdot\operatorname{ran}gle$ is convex, $\operatorname{Prox}_{\lambda}f(x)$ is convex.
\noindent\ref{i:p:single}: This follow from the fact that
$f+\frac{1}{2\mu}\|\cdot\|^2-\langle\frac{x}{\mu},\cdot\operatorname{ran}gle$ is strictly convex
and coercive.
When $0<\mu<\lambda$, $\operatorname{Prox}_{\mu}f$ is continuous and monotone, so maximally monotone by
\cite[Example 12.7]{rockwets}. For the maximal monotonicity of $\operatorname{Prox}_{\lambda}f$, apply
\ref{i:p:convex} and
\cite{lohne08} or Lemma~\ref{l:prox:grad}.
\end{proof}
The set of proximal functions is a convex cone. In particular, one has the following.
\begin{prop}\label{p:p:cone}
Let $f_1$ be $\lambda_1$-proximal and $f_2$ be $\lambda_2$-proximal. Then for any $\alpha,\beta>0$, the function $\alpha f_1+\beta f_2$ is $\frac{\lambda_1\lambda_2}{\beta\lambda_1+\alpha\lambda_2}$-proximal.
\end{prop}
\begin{proof}
Since $f_1+\frac{1}{2\lambda_1}\|\cdot\|^2$ and $f_2+\frac{1}{2\lambda_2}\|\cdot\|^2$ are convex, so are $\alpha\left(f_1+\frac{1}{2\lambda_1}\|\cdot\|^2\operatorname{ri}ght)$, $\beta\left(f_2+\frac{1}{2\lambda_2}\|\cdot\|^2\operatorname{ri}ght)$ and their sum:
$$\alpha f_1+\beta f_2+\left(\frac{\alpha}{2\lambda_1}+\frac{\beta}{2\lambda_2}\operatorname{ri}ght)\|\cdot\|^2=\alpha f_1+\beta f_2+\frac{\beta\lambda_1+\alpha\lambda_2}{2\lambda_1\lambda_2}\|\cdot\|^2.$$
Therefore, $\alpha f_1+\beta f_2$ is $\frac{\lambda_1\lambda_2}{\beta\lambda_1+\alpha\lambda_2}$-proximal.
\end{proof}
\subsection{The proximal mapping of the proximal hull}
\begin{lem}\label{l:e:h}Let $0<\lambda<\lambda_{f}$.
One has
\begin{equation}\label{i:hull:prox}
\operatorname{Prox}_{\lambda}(h_{\lambda}f)=\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}f.
\end{equation}
\end{lem}
\begin{proof}
Applying \cite[Example 10.32]{rockwets} to
$-e_{\lambda} f=-e_{\lambda}(h_{\lambda}f)$ yields
$$\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}(h_{\lambda}f)=\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}f.$$
Since $h_{\lambda}$ is $\lambda$-proximal, by Lemma~\ref{l:prox:map} we have
$\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}(h_{\lambda}f)=\operatorname{Prox}_{\lambda}(h_{\lambda}f).$
Hence \eqref{i:hull:prox} follows.
\end{proof}
\begin{lem} Let $0<\lambda<\lambda_{f}$. The following are equivalent:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{p:hull} $\operatorname{Prox}_{\lambda}(h_{\lambda}f)=\operatorname{Prox}_{\lambda}f$,
\item\label{p:function} $f$ is $\lambda$-proximal.
\end{enumerate}
\end{lem}
\begin{proof}
\ref{p:hull}$\operatorname{\mathbb{R}}ightarrow$\ref{p:function}: Since $\operatorname{Prox}_{\lambda}(h_{\lambda}f)=
\operatornamewithlimits{conv} \operatorname{Prox}_{\lambda}(h_{\lambda}f)$, $\operatorname{Prox}_{\lambda} f$ is upper
semicontinuous, convex and compact valued, and monotone with full domain, so
maximally monotone in view of \cite{lohne08} or Lemma~\ref{l:prox:grad}.
By \cite[Proposition 12.19]{rockwets},
$f+\frac{1}{2\lambda}\|\cdot\|^2$ is convex, equivalently,
$f$ is $\lambda$-proximal by \cite[Example 11.26]{rockwets}.
\noindent\ref{p:function}$\operatorname{\mathbb{R}}ightarrow$\ref{p:hull}: As $f$ is $\lambda$-proximal,
$\operatorname{Prox}_{\lambda}f$ is convex-valued by Lemma~\ref{l:prox:map}. Then Lemma~\ref{l:e:h} gives
$\operatorname{Prox}_{\lambda}(h_{\lambda}f)=\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}f=\operatorname{Prox}_{\lambda}f.$
\end{proof}
\begin{cor} If $f\neq h_{\lambda}f$, then $\operatorname{Prox}_{\lambda}(h_{\lambda}f)
\neq\operatorname{Prox}_{\lambda}f.$
\end{cor}
\subsection{Proximal mappings and envelopes}
\begin{lem}\label{l:env:prox}
Let $0<\mu<\lambda<\operatorname{\bar{\lambda}}$.
The following are equivalent:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:env:fg}$e_{\lambda}f=e_{\lambda}g$,
\item\label{i:phull:fg}
$h_{\lambda}f=h_{\lambda}g$,
\item \label{i:conv:fg} $\operatornamewithlimits{conv}\left(f+\frac{1}{2\lambda}\|\cdot\|^2\operatorname{ri}ght)=\operatornamewithlimits{conv}\left(g+\frac{1}{2\lambda}\|\cdot\|^2\operatorname{ri}ght)$,
\item\label{i:double:fg}
$e_{\lambda,\mu}f=e_{\lambda,\mu}g$,
\item\label{i:prox:initial}
$\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}f=\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}g$, and for some $x_{0}\in\operatorname{\mathbb{R}}^n$ one has
$e_{\lambda}f(x_{0})=e_{\lambda}g(x_{0})$.
\end{enumerate}
Under any one of the conditions \ref{i:env:fg}--\ref{i:prox:initial}, one has
\begin{equation}\label{e:convexhull}
\overline{\operatornamewithlimits{conv}} f=\overline{\operatornamewithlimits{conv}} g.
\end{equation}
\end{lem}
\begin{proof}
\ref{i:env:fg}$\operatorname{\mathbb{R}}ightarrow$\ref{i:phull:fg}:
We have
$-e_{\lambda}f=-e_{\lambda}g$ implies $-e_{\lambda}(-e_{\lambda}f)=-e_{\lambda}(-e_{\lambda}g)$,
which is \ref{i:phull:fg}.
\noindent\ref{i:phull:fg}$\operatorname{\mathbb{R}}ightarrow$\ref{i:env:fg}:
This follows from $e_{\lambda}f=e_{\lambda}(h_{\lambda}f)=e_{\lambda}(h_{\lambda}g)=e_{\lambda}g.$
\noindent\ref{i:phull:fg}$\Leftrightarrow$\ref{i:conv:fg}: Since $\lambda<\operatorname{\bar{\lambda}}$, we have that
$f+\frac{1}{2\lambda}\|\cdot\|^2$ and $g+\frac{1}{2\lambda}\|\cdot\|^2$ are coercive, so
$\operatornamewithlimits{conv}\left(f+\frac{1}{2\lambda}\|\cdot\|^2\operatorname{ri}ght)$ and $\operatornamewithlimits{conv}\left(f+\frac{1}{2\lambda}\|\cdot\|^2\operatorname{ri}ght)$
are lsc. Fact~\ref{l:dcform} gives
$$h_{\lambda}f=\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\lambda}\|\cdot\|^2\bigg)-\frac{1}{2\lambda}\|\cdot\|^2,$$
$$h_{\lambda}g=\operatornamewithlimits{conv}\bigg(g+\frac{1}{2\lambda}\|\cdot\|^2\bigg)-\frac{1}{2\lambda}\|\cdot\|^2.$$
\noindent\ref{i:double:fg}$\Leftrightarrow$\ref{i:env:fg}: Invoking Fact~\ref{f:m-p-l}, we have
\begin{align*}
e_{\lambda,\mu}f =e_{\lambda,\mu}g &\Leftrightarrow h_{\mu}(e_{\lambda-\mu}f)=h_{\mu}(e_{\lambda-\mu}g)\\
& \Leftrightarrow e_{\mu}(h_{\mu}(e_{\lambda-\mu}f))=e_{\mu}(h_{\mu}(e_{\lambda-\mu}g))\\
& \Leftrightarrow e_{\mu}(e_{\lambda-\mu}f)=e_{\mu}(e_{\lambda-\mu}g)\\
& \Leftrightarrow e_{\lambda}f=e_{\lambda}g. \
\end{align*}
\noindent\ref{i:env:fg}$\operatorname{\mathbb{R}}ightarrow$\ref{i:prox:initial}: The Moreau envelope $e_{\lambda}f(x)=e_{\lambda}g(x)$
for every $x\in\operatorname{\mathbb{R}}^n$. Apply \cite[Example 10.32]{rockwets}
to $-e_{\lambda}f = -e_{\lambda}g$ to get
$$(\forall x\in\operatorname{\mathbb{R}}^n)\ \frac{\operatornamewithlimits{conv} \operatorname{Prox}_{\lambda}f(x)-x}{\lambda}=\frac{\operatornamewithlimits{conv} \operatorname{Prox}_{\lambda}g(x)-x}{\lambda},
$$
which gives \ref{i:prox:initial} after simplifications.
\noindent\ref{i:prox:initial}$\operatorname{\mathbb{R}}ightarrow$\ref{i:env:fg}: Since
both $e_{\lambda}f$ and $e_{\lambda}g$ are locally Lipschitz,
$\operatornamewithlimits{conv} \operatorname{Prox}_{\lambda}f=\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}g$
implies
$-e_{\lambda}f=-e_{\lambda}g+\text{constant}$ by
\cite[Example 10.32]{rockwets}. The $\text{constant}$ has to be zero by
$e_{\lambda}f(x_{0})=e_{\lambda}g(x_{0})$. Thus, \ref{i:env:fg} holds.
Equation~\eqref{e:convexhull} follows from the equivalence of \ref{i:env:fg}--\ref{i:double:fg}
and taking the Fenchel conjugate to $e_{\lambda}f=e_{\lambda}g$, followed
by cancelation of terms and taking the Fenchel conjugate again.
\end{proof}
The notion of `proximal' is instrumental.
\begin{cor}\label{c:needed1} Let $0<\mu\leq \lambda <\operatorname{\bar{\lambda}}$, and
let $f,g$ be $\lambda$-proximal.
Then $e_{\mu}f=e_{\mu}g$ if and only if $f=g$
\end{cor}
\begin{proof} Since $\mu\leq\lambda$, both $f,g$ are also $\mu$-proximal, so
$f=h_{\mu}f, g=h_{\mu}g$. Lemma~\ref{l:env:prox}\ref{i:env:fg}$
\Leftrightarrow$\ref{i:phull:fg} applies.
\end{proof}
\begin{prop}\label{p:needed1}
Let $0<\mu <\operatorname{\bar{\lambda}}$, and let
$\operatorname{Prox}_{\mu}f=\operatorname{Prox}_{\mu}g$. If $f, g$ are $\mu$-proximal, then $f-g\equiv
\text{constant}$.
\end{prop}
\begin{proof}
As $\operatorname{Prox}_{\mu}f=\operatorname{Prox}_{\mu}g$,
by \cite[Example 10.32]{rockwets},
$\partial(-e_{\mu}f)=\partial(-e_{\mu}g)$. Since
both $-e_\mu f, -e_{\mu}g$ are locally Lipschitz and Clarke regular,
we obtain that there exists $-c\in\operatorname{\mathbb{R}}$ such that
$-e_\mu f=-e_{\mu}g-c$. Because $f, g$ are $\mu$-proximal, we have
$$f=-e_{\mu}(-e_{\mu}f)=-e_{\mu}(-e_{\mu}g-c)=-e_{\mu}(-e_{\mu}g)+c=g+c,$$
as required.
\end{proof}
\subsection{An example}
The following example shows that one cannot remove the assumption of
$f, g$ being $\mu$-proximal in Proposition~\ref{p:resolventf}, Corollary~\ref{c:needed1}
and Proposition~\ref{p:needed1}.
\begin{ex}\label{e:proximal:fk}
Consider the function
$$f_{k}(x)=\max\{0,(1+\varepsilon_{k})(1-x^2)\},$$
where $\varepsilon_{k}>0$.
It is easy to check that $f_{k}$ is $1/(2(1+\varepsilon_{k}))$-proximal, but not $1/2$-proximal.
\end{ex}
\noindent {\sl Claim 1: The functions $f_{k}$ have the same proximal mappings
and Moreau envelopes for all $k\in\operatorname{\mathbb{N}}$. However, whenever $\varepsilon_{k_{1}}\neq\varepsilon_{k_{2}}$, $f_{k_{1}}-f_{k_{2}}=(\varepsilon_{k_{1}}-\varepsilon_{k_{2}})f\neq \text{constant}$.}
Indeed, simple calculus gives that for every $\varepsilon_{k}>0$ one has
\begin{equation*}\label{e:proximal2}
\operatorname{Prox}_{1/2}f_{k}(x)=\begin{cases}
x &\text{ if $x\geq 1$,}\\
1 &\text{ if $0<x<1$,}\\
\{-1,1\} &\text{ if $x=0$,}\\
-1 &\text{ if $-1<x<0$,}\\
x &\text{ if $x\leq -1$,}
\end{cases}
\end{equation*}
and
$$e_{1/2}f_{k}(x)=\begin{cases}
0 &\text{ if $x\geq 1$,}\\
(x-1)^2 &\text{ if $0\leq x<1$,}\\
(x+1)^2 &\text{ if $-1<x<0$,}\\
0 &\text{ if $x\leq -1$.}
\end{cases}
$$
\noindent{\sl Claim 2: $\operatorname{Prox}_{1/2}f_{k}\neq J_{1/2\partial_{L}f_{k}},$ i.e., the
proximal mapping
differs from the resolvent.}
Since $J_{1/2\partial_{L}f_{k}}=(\operatorname{Id}+1/2\partial_{L}f_{k})^{-1}$ and
$$\partial_{L}f_{k}(x)
=\begin{cases}
0 & \text{ if $x<-1$,}\\
[0,2(1+\varepsilon_{k})] &\text{ if $x=-1$,}\\
-2(1+\varepsilon_{k})x &\text{ if $-1<x<1$,}\\
[-2(1+\varepsilon_{k}),0] &\text{ if $x=1$,}\\
0 & \text{ if $x>1$},
\end{cases}
$$
we obtain
$$J_{1/2\partial_{L}f_{k}}(x)=
\begin{cases}
x & \text{ if $x<-1$,}\\
-1 &\text{ if $-1\leq x\leq \varepsilon_{k}$,}\\
-\frac{x}{\varepsilon_{k}} &\text{ if $-\varepsilon_{k}<x<\varepsilon_{k}$,}\\
1 &\text{ if $-\varepsilon_{k}\leq x\leq 1$,}\\
x & \text{ if $x>1$},
\end{cases}
$$
equivalently,
$$J_{1/2\partial_{L}f_{k}}(x)=
\begin{cases}
x & \text{ if $x<-1$,}\\
-1 &\text{ if $-1\leq x< -\varepsilon_{k}$,}\\
\left\{-1,-\frac{x}{\varepsilon_{k}},1\operatorname{ri}ght\} &\text{ if $-\varepsilon_{k}\leq x\leq \varepsilon_{k}$,}\\
1 &\text{ if $\varepsilon_{k}< x\leq 1$,}\\
x & \text{ if $x>1$},
\end{cases}
$$
which does not equal \eqref{e:proximal2}.
\section{The convexified proximal mapping and Clarke subdifferential of the Moreau envelope}\label{s:conv}
The following result gives the relationship between the Clarke subdifferential
of the Moreau envelope and the convexified proximal mapping.
\begin{lem}\label{l:prox:grad}
For $0<\mu<\lambda_{f}$, the following hold.
\begin{enumerate}[label=\rm(\alph*)]
\item \label{i:fplus:p}The convex hull
\begin{equation*}\label{e:convp}
\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}f=\partial \bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*.
\end{equation*}
In particular, $\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}f$ is maximally monotone.
\item\label{i:fplus:n}The limiting subdifferential
\begin{equation*}
-\partial_{L} \bigg(-\bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*\bigg)
\subseteq \operatorname{Prox}_{\mu}f.
\end{equation*}
\item \label{e:env:clarke} The Clarke subdifferential
\begin{equation}\label{e:clarkesub}
\partial_{C}(e_{\mu}f)=-\partial_{L}(-e_{\mu}f)=\frac{\operatorname{Id}-\operatornamewithlimits{conv} \operatorname{Prox}_{\mu}f}{\mu}.
\end{equation}
If, in addition, $f$ is $\mu$-proximal, then
\begin{equation}\label{e:clarke:prox}
\partial_{C}(e_{\mu}f)=\frac{\operatorname{Id}-\operatorname{Prox}_{\mu}f}{\mu}.
\end{equation}
\end{enumerate}
\end{lem}
\begin{proof} \ref{i:fplus:p}: By Fact~\ref{l:dcform},
\begin{equation}\label{e:moreau:d}
-e_{\mu}f(x)=-\frac{1}{2\mu}\|x\|^2+\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\left(\frac{x}{\mu}\operatorname{ri}ght).
\end{equation}
Using \cite[Example 10.32]{rockwets} and the subdifferential sum rule \cite[Corollary 10.9]{rockwets}, we get
$$
\frac{\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}f(x)-x}{\mu} =\partial_{L} (-e_{\mu}f)(x)=-\frac{x}{\mu}
+\partial \bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\left(\frac{x}{\mu}\operatorname{ri}ght).
$$
Simplification gives
\begin{align*}
\operatornamewithlimits{conv} \operatorname{Prox}_{\mu}f(x) &
=\partial \mu \bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\left(\frac{x}{\mu}\operatorname{ri}ght)\\
& =\partial \bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*(x).
\end{align*}
Since $\mu f+\frac{1}{2}\|\cdot\|^2$ is coercive, we conclude that
$\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$ is a continuous convex function, so
$\operatornamewithlimits{conv} \operatorname{Prox}_{\mu}f$ is maximally monotone \cite[Theorem 12.17]{rockwets}.
\noindent\ref{i:fplus:n}: By \eqref{e:moreau:d},
\begin{align*}
-\bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*(x) & =-\mu\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\left(\frac{x}{\mu}\operatorname{ri}ght)\\
&=\mu e_{\mu}f(x)-\frac{1}{2}\|x\|^2.
\end{align*}
From \cite[Example 10.32]{rockwets} we obtain
\begin{align*}
\partial_{L} \bigg(-\bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*\bigg)
(x) &= \partial_{L} (\mu e_{\mu}f)(x)-x\\
&\subseteq \mu \frac{x-\operatorname{Prox}_{\mu}f(x)}{\mu}-x=-\operatorname{Prox}_{\mu}f(x).
\end{align*}
Therefore, $-\partial_{L} \bigg(-\bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*\bigg)(x)\subseteq \operatorname{Prox}_{\mu}f(x)$.
\noindent\ref{e:env:clarke}: As $-e_{\mu}f$ is Clarke regular, using \cite[Example 10.32]{rockwets}
we obtain
$$\partial_{C}e_{\mu}f(x)=-\partial_{C}(-e_{\mu}f)(x)=-\partial_{L} (-e_{\mu}f)(x)=
\frac{x-\operatornamewithlimits{conv} \operatorname{Prox}_{\mu}f(x)}{\mu}.$$
If $f$ is $\mu$-proximal, then $\operatorname{Prox}_{\mu}f(x)$ is convex for every $x$, so
\eqref{e:clarke:prox} follows from \eqref{e:clarkesub}.
\end{proof}
\begin{rem} {\rm Lemma~\ref{l:prox:grad}\ref{i:fplus:p}} \& {\rm\ref{e:env:clarke}}
extend {\rm\cite[Exercise 11.27]{rockwets}} and {\rm\cite[Theorem 2.26]{rockwets}},
respectively, from convex functions to possibly nonconvex functions.
\end{rem}
It is tempting to ask whether
\begin{equation*}\label{e:boris}
\partial_L (e_{\mu}f)=\frac{\operatorname{Id}-\operatorname{Prox}_{\mu}f}{\mu}
\end{equation*}
holds. This is answered negatively
below.
\begin{prop} Let
$0<\lambda<\lambda_{f}$
and $\psi=h_{\lambda}f$. Suppose
that there exists $x_{0}\in\operatorname{\mathbb{R}}^n$ such that $\operatorname{Prox}_{\lambda}f(x_{0})$ is not convex.
Then
\begin{equation}\label{e:sub:point}
\partial_{L} e_{\lambda}\psi(x_{0})\neq \frac{x_{0}-
\operatorname{Prox}_{\lambda}\psi(x_{0})}{\lambda};
\end{equation}
consequently,
$$\partial_{L} e_{\lambda}\psi\neq \frac{\operatorname{Id}-\operatorname{Prox}_{\lambda}\psi}{\lambda}.$$
\end{prop}
\begin{proof} We prove by contrapositive. Suppose \eqref{e:sub:point} fails,
i.e.,
\begin{equation}\label{e:prox:f}
\partial_{L} e_{\lambda}\psi(x_{0})=\frac{x_{0}-\operatorname{Prox}_{\lambda}\psi(x_{0})}{\lambda}.
\end{equation}
In view of $e_{\lambda}\psi=e_{\lambda}f$ and \cite[Example 10.32]{rockwets},
we have
\begin{equation}\label{e:prox:s}
\partial_{L} e_{\lambda}\psi(x_{0})= \partial_L e_{\lambda}f(x_{0})
\subseteq \frac{x_{0}-\operatorname{Prox}_{\lambda}f(x_{0})}{\lambda}.
\end{equation}
Since $\operatorname{Prox}_{\lambda}\psi=\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}f$ by Lemma~\ref{l:e:h}, \eqref{e:prox:f} and \eqref{e:prox:s}
give
$$\frac{x_{0}-\operatornamewithlimits{conv}\operatorname{Prox}_{\lambda}f(x_{0})}{\lambda}\subseteq \frac{x_{0}-\operatorname{Prox}_{\lambda}f(x_{0})}{\lambda},$$
which implies that $\operatorname{Prox}_{\lambda}f(x_{0})$ is a convex set.
This is a contradiction.
\end{proof}
\section{Characterizations of Lipschitz and single-valued proximal mappings}\label{s:char}
Simple examples show that proximal mappings can be wild, although always monotone.
\begin{ex} The function $f(x)=-\frac{1}{2}\|\cdot\|^2$ is prox-bounded with threshold
$\lambda_{f}=1$. We have $\operatorname{Prox}_{1}f=N_{\{0\}}$ the normal cone map at $0$,
i.e.,
$$N_{\{0\}}(x)=\begin{cases}
\operatorname{\mathbb{R}}^n & \text{ if $x=0$,}\\
\varnothing & \text{ otherwise.}
\end{cases}
$$
When $0<\mu<1$, $$\operatorname{Prox}_{\mu}f=\frac{\operatorname{Id}}{1-\mu},$$
which is Lipschitz continuous
with constant $1/(1-\mu)$.
\end{ex}
\begin{Fact}\ensuremath{\varnothing}h{(\cite[Example 7.44]{rockwets})}
Let $f:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow \operatorname{\mathbb{R}}X$ be proper, lsc and prox-bounded with threshold $\lambda_{f}$, and $0<\mu<\lambda_{f}$. Then
$\operatorname{Prox}_{\mu}f$ is always upper semicontinuous and locally bounded.
\end{Fact}
The following characterizations of the proximal mapping are of independent interest.
\begin{prop}[Lipschitz proximal mapping]
Let
$0<\mu<\lambda_{f}$.
Then the following are equivalent.
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:map} The proximal mapping $\operatorname{Prox}_{\mu}f$ is Lipschitz continuous with constant $\kappa>0$.
\item\label{i:function}
The function
$$f+\frac{\kappa-1}{2\mu\kappa}\|\cdot\|^2$$
is convex.
\end{enumerate}
\end{prop}
\begin{proof}
\ref{i:map}$\operatorname{\mathbb{R}}ightarrow$\ref{i:function}: By Lemma~\ref{l:prox:grad}\ref{i:fplus:p},
$\bigg(\mu f+\frac{1}{2}\|\cdot\|^2\bigg)^*$ is differentiable and its
gradient is Lipschitz continuous with constant $\kappa$. By Soloviov's
theorem \cite{soloviov},
$\mu f+\frac{1}{2}\|\cdot\|^2$ is convex. Then the convex function
$\mu f+\frac{1}{2}\|\cdot\|^2$
has differentiable Fenchel conjugate $\big(\mu f+\frac{1}{2}\|\cdot\|^2\big)^*$ and
$\operatorname{tr}iangledown \big(\mu f+\frac{1}{2}\|\cdot\|^2\big)^*$ is Lipschitz continuous
with constant $\kappa$. It follows from \cite[Proposition 12.60]{rockwets} that
$\mu f+\frac{1}{2}\|\cdot\|^2$ is $\frac{1}{\kappa}$-strongly convex, i.e.,
$$\mu f+\frac{1}{2}\|\cdot\|^2-\frac{1}{\kappa}\frac{1}{2}\|\cdot\|^2$$
is convex. Equivalently,
$$f+\frac{\kappa-1}{2\mu\kappa}\|\cdot\|^2$$
is convex.
\noindent\ref{i:function}$\operatorname{\mathbb{R}}ightarrow$\ref{i:map}: We have
$$\mu f+\frac{1}{2}\|\cdot\|^2-\frac{1}{\kappa}\frac{1}{2}\|\cdot\|^2$$
is convex, i.e.,
$\mu f+\frac{1}{2}\|\cdot\|^2$ is strongly convex with constant $\frac{1}{\kappa}$.
Then \cite[Proposition 12.60]{rockwets} implies that
$\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$ is differentiable and its
gradient is Lipschitz continuous with constant $\kappa$.
In view of Lemma~\ref{l:prox:grad}\ref{i:fplus:p},
$\operatorname{Prox}_{\mu}f$ is Lipschitz continuous with constant $\kappa$.
\end{proof}
\begin{cor}\label{c:lip}
Let
$0<\mu<\lambda_{f}$.
Then the following are equivalent.
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:map1}The proximal mapping $\operatorname{Prox}_{\mu}f$ is Lipschitz continuous with constant $1$, i.e., nonexpansive.
\item\label{i:function1}
The function
$f$
is convex.
\end{enumerate}
\end{cor}
\begin{df}\ensuremath{\varnothing}h{(See \cite[Section 26]{rockconv} or \cite[page 483]{rockwets})}
A proper, lsc, convex function $f:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow (-\infty, +\infty]$
is
\begin{enumerate}[label=\rm(\alph*)]
\item essentially strictly convex if $f$ is strictly convex on every convex subset
of $\operatorname{dom} \partial f$;
\item essentially differentiable if $\partial f(x)$ is a singleton whenever
$\partial f(x)\neq\varnothing$.
\end{enumerate}
\end{df}
\begin{prop}[single-valued proximal mapping]\label{p:single}
Let
$0<\mu<\lambda_{f}$.
Then the following are equivalent.
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:maps}The proximal mapping $\operatorname{Prox}_{\mu}f$ is single-valued, i.e., $\operatorname{Prox}_{\mu}f(x)$
is a singleton for every $x\in\operatorname{\mathbb{R}}^n$.
\item\label{i:functionc}
The function
$$f+\frac{1}{2\mu}\|\cdot\|^2$$
is essentially strictly convex and coercive.
\end{enumerate}
\end{prop}
\begin{proof}
\ref{i:maps}$\operatorname{\mathbb{R}}ightarrow$\ref{i:functionc}: By Lemma~\ref{l:prox:grad}\ref{i:fplus:p},
$\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$ is differentiable. By Soloviov's
theorem \cite{soloviov},
$\mu f+\frac{1}{2}\|\cdot\|^2$ is convex. The convex function
$\mu f+\frac{1}{2}\|\cdot\|^2$
has differentiable Fenchel conjugate $\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$. It follows from \cite[Proposition 11.13]{rockwets} that
$\mu f+\frac{1}{2}\|\cdot\|^2$ is essentially strictly convex.
Since $\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$ has full domain and
$\mu f+\frac{1}{2}\|\cdot\|^2$ is convex, the function
$\mu f+\frac{1}{2}\|\cdot\|^2$ is coercive by \cite[Theorem 11.8]{rockwets}.
\noindent\ref{i:functionc}$\operatorname{\mathbb{R}}ightarrow$\ref{i:maps}:
Since
$\mu f+\frac{1}{2}\|\cdot\|^2$ is essentially strictly convex,
$\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$ is essentially differentiable by \cite[Theorem 11.13]{rockwets}.
Because $\mu f+\frac{1}{2}\|\cdot\|^2$ is coercive,
$\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$
has full domain. Then
$\left(\mu f+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*$ is differentiable on $\operatorname{\mathbb{R}}^n$.
In view of Lemma~\ref{l:prox:grad}\ref{i:fplus:p},
$\operatorname{Prox}_{\mu}f(x)$ is single-valued for every $x\in\operatorname{\mathbb{R}}^n$.
\end{proof}
Recall that for a nonempty, closed set $S\subseteq\operatorname{\mathbb{R}}^n$ and every $x\in\operatorname{\mathbb{R}}^n$,
the projection $P_{S}(x)$
consists of the points in $S$ nearest to $x$, so
$P_{S}=\operatorname{Prox}_{1}\iota_{S}$.
Combining Corollary~\ref{c:lip} and Proposition~\ref{p:single},
we can derive the following result
due to Rockafellar and Wets, \cite[Corollary 12.20]{rockwets}.
\begin{cor} Let $S$ be a nonempty, closed set in $\operatorname{\mathbb{R}}^n$. Then the following are
equivalent:
\begin{enumerate}[label=\rm(\alph*)]
\item $P_{S}$ is single-valued,
\item $P_{S}$ is nonexpansive,
\item $S$ is convex.
\end{enumerate}
\end{cor}
\section{The proximal average for prox-bounded functions}\label{s:main}
The goal of this section is to establish a proximal average function that works for any two prox-bounded functions. Our framework
will generalize the convex proximal average of
\cite{proxpoint} to include nonconvex functions, in a manner
that recovers the original definition in the convex case.
Remembering the standing assumptions in Subsection \ref{s:assump},
we define the \ensuremath{\varnothing}h{proximal average} of $f, g$ associated with parameters $\mu, \alpha$ by
\begin{equation}\label{e:prox:def}
\ensuremath{\varphi^{\alpha}_{\mu}}=-e_{\mu}(-\alpha e_{\mu}f-(1-\alpha)e_{\mu}g),
\end{equation}
which essentially relies on the Moreau envelopes.
\begin{thm}[basic properties of the proximal average]\label{t:prox}
Let
$0<\mu<\operatorname{\bar{\lambda}}$,
and let $\ensuremath{\varphi^{\alpha}_{\mu}}$ be defined as in \eqref{e:prox:def}.
Then the following hold.
\begin{enumerate}[label=\rm(\alph*)]
\item \label{i:env:conhull}
The Moreau envelope $e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}})=\alpha e_{\mu}f+(1-\alpha)e_{\mu} g.$
\item\label{i:lowers}The proximal average $\ensuremath{\varphi^{\alpha}_{\mu}}$ is proper, lsc and prox-bounded with threshold
$\lambda_{\ensuremath{\varphi^{\alpha}_{\mu}}}\geq\operatorname{\bar{\lambda}}$.
\item\label{i:epi:sum}
The proximal average $\ensuremath{\varphi^{\alpha}_{\mu}}(x)=$
\begin{equation}\label{e:func}
\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght](x)
-\frac{1}{2\mu}\|x\|^2,
\end{equation}
where the inf-convolution $\operatorname{\mathbb{B}}ox$ is exact;
consequently, $\operatorname{epi}(\ensuremath{\varphi^{\alpha}_{\mu}}+1/2\mu\|\cdot\|^2)=$
\begin{equation}\label{e:epig}
\alpha \operatorname{epi}\operatornamewithlimits{conv}(f+1/2\mu\|\cdot\|^2)+(1-\alpha)\operatorname{epi}\operatornamewithlimits{conv}(g+1/2\mu\|\cdot\|^2).
\end{equation}
\item\label{i:dom:convhull}
The domain $\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha \operatornamewithlimits{conv}\operatorname{dom} f+(1-\alpha)\operatornamewithlimits{conv}\operatorname{dom} g$.
In particular, $\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}=\operatorname{\mathbb{R}}^n$ if either one of $\operatornamewithlimits{conv}\operatorname{dom} f$ and $\operatornamewithlimits{conv}\operatorname{dom} g$ is
$\operatorname{\mathbb{R}}^n$.
\item\label{i:hull:ave} The proximal average of $f$ and $g$ is the same
as the proximal average of proximal hulls $h_{\mu}f$ and $h_{\mu}g$, respectively.
\item\label{i:alpha}
When $\alpha=0$, $\varphi_{\mu}^{0}=h_{\mu}g$; when $\alpha=1$, $\varphi_{\mu}^{1}=h_{\mu}g$.
\item\label{i:phi:mu}
Each $\ensuremath{\varphi^{\alpha}_{\mu}}$ is $\mu$-proximal, or equivalently, $\mu$-hypoconvex.
\item\label{i:f=g}
When $f=g$, $\ensuremath{\varphi^{\alpha}_{\mu}}=h_{\mu}f$; consequently, $\ensuremath{\varphi^{\alpha}_{\mu}}=f$ when $f=g$ is $\mu$-proximal.
\item\label{i:g=c}
When $g\equiv c\in\operatorname{\mathbb{R}}$, $\ensuremath{\varphi^{\alpha}_{\mu}}=e_{\mu/\alpha,\mu}(\alpha f+(1-\alpha)c)$,
the Lasry-Lions envelope of $\alpha f+(1-\alpha)c$.
\end{enumerate}
\end{thm}
\begin{proof}
\ref{i:env:conhull}:
Since $-\alpha e_{\mu}f-(1-\alpha)e_{\mu}g$ is $\mu$-proximal by
Lemma~\ref{l:env:neg}\ref{i:e:1} and Proposition~\ref{p:p:cone},
we have
\begin{align*}
-e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}})& =-e_{\mu}(-e_{\mu}(-\alpha e_{\mu}f-(1-\alpha)e_{\mu}g))\\
&=h_{\mu}(-\alpha e_{\mu}f-(1-\alpha)e_{\mu}g)\\
&=-\alpha e_{\mu}f-(1-\alpha)e_{\mu}g.
\end{align*}
\noindent\ref{i:lowers}: Because
$0<\mu<\operatorname{\bar{\lambda}}$,
both $e_{\mu}f$ and $e_{\mu}g$ are continuous, see, e.g., \cite[Theorem 1.25]{rockwets}.
By \ref{i:env:conhull}, $e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}})$ is real-valued and continuous. If
$\ensuremath{\varphi^{\alpha}_{\mu}}$ is not proper, then $e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}})\equiv-\infty$ or
$e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}})\equiv\infty$, which is a contradiction. Hence,
$\ensuremath{\varphi^{\alpha}_{\mu}}$ must be proper.
Lower semicontinuity follows from
the definition of the Moreau envelope.
To show that $\lambda_{\ensuremath{\varphi^{\alpha}_{\mu}}}\geq \operatorname{\bar{\lambda}}$, take any $\delta\in ]0,\operatorname{\bar{\lambda}}-\mu[$. By \cite[Exercise 1.29(c)]{rockwets} and \ref{i:env:conhull}, we have
\begin{align*}
e_{\delta+\mu}(\ensuremath{\varphi^{\alpha}_{\mu}}) &=e_{\delta}(e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}}))\\
& =e_{\delta}(\alpha e_{\mu}f+(1-\alpha)e_{\mu}g)\\
&\geq \alpha e_{\delta}(e_{\mu}f)+(1-\alpha)e_{\delta}(e_{\mu}g)\\
&=\alpha e_{\delta+\mu}f+(1-\alpha)e_{\delta+\mu}g>-\infty.
\end{align*}
Since $\delta\in ]0,\operatorname{\bar{\lambda}}-\mu[$ was arbitrary, $\ensuremath{\varphi^{\alpha}_{\mu}}$ has prox-bound
$\lambda_{\ensuremath{\varphi^{\alpha}_{\mu}}}\geq \operatorname{\bar{\lambda}}$.
\noindent\ref{i:epi:sum}: Since $\mu<\operatorname{\bar{\lambda}}$, both
$e_{\mu}f$ and $e_{\mu}g$ are locally Lipschitz with full domain by
Fact~\ref{l:dcform}\ref{i:mor:l},
so
$$\operatorname{dom} \bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*=
\operatorname{dom} \bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*
=\operatorname{\mathbb{R}}^n.$$ It follows from \cite[Theorem 11.23(a)]{rockwets} that
\begin{align*}
& \left[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\operatorname{ri}ght]^*\\
&=
\bigg(\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\bigg)^{*}\operatorname{\mathbb{B}}ox
\bigg((1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\bigg)^{*}
\end{align*}
where the $\operatorname{\mathbb{B}}ox$ is exact; see, e.g., \cite[Theorem 16.4]{rockconv}.
By Fact~\ref{l:dcform},
\begin{align*}
& -\alpha e_{\mu}f-(1-\alpha)e_{\mu}g \\
&=\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\left(\frac{x}{\mu}\operatorname{ri}ght)
+(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\left(\frac{x}{\mu}\operatorname{ri}ght)-\frac{1}{2\mu}\|\cdot\|^2.
\end{align*}
Substitute this into the definition of $\ensuremath{\varphi^{\alpha}_{\mu}}$ and use Fact~\ref{l:dcform} again
to obtain $\ensuremath{\varphi^{\alpha}_{\mu}}(x)=$
\begin{align}
&\left[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\big(\frac{\cdot}{\mu}\big)+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\big(\frac{\cdot}{\mu}\big)
\operatorname{ri}ght]^*\big(\frac{x}{\mu}\big)
-\frac{1}{2\mu}\|x\|^2\nonumber\\
=&\left[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\operatorname{ri}ght]^*\big(\mu\frac{x}{\mu}\big)
-\frac{1}{2\mu}\|x\|^2\nonumber\\
= & \left[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^{**}\big(\frac{\cdot}{\alpha}\big)\operatorname{\mathbb{B}}ox
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^{**}
\big(\frac{\cdot}{1-\alpha}\big)
\operatorname{ri}ght](x)
-\frac{1}{2\mu}\|x\|^2\nonumber\\
=& \left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\big(\frac{\cdot}{\alpha}\big)\operatorname{\mathbb{B}}ox
(1-\alpha)\operatornamewithlimits{conv}\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)(\frac{\cdot}{1-\alpha}\big)
\operatorname{ri}ght](x)
-\frac{1}{2\mu}\|x\|^2,\label{e:box:exact}
\end{align}
in which
$$\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^{**}=\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)$$
$$\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^{**}=\operatornamewithlimits{conv}\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)$$
because $f+\frac{1}{2\mu}\|\cdot\|^2$ and $g+\frac{1}{2\mu}\|\cdot\|^2$
are coercive; see, e.g., \cite[Example 11.26(c)]{rockwets}.
Also, in \eqref{e:box:exact}, the infimal convolution is exact because
$\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)^*$ and $\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)^*$
have full domain and \cite[Theorem 16.4]{rockconv}
or \cite[Theorem 11.23(a)]{rockwets}.
\eqref{e:epig} follows from \eqref{e:func} and
\cite[Proposition 12.8(ii)]{convmono} or \cite[Exercise 1.28]{rockwets}.
\noindent\ref{i:dom:convhull}: This is immediate from \ref{i:epi:sum} and
\cite[Proposition 12.6(ii)]{convmono}.
\noindent\ref{i:hull:ave}: Use
\eqref{e:prox:def}, and the fact that
$e_{\mu}(h_{u}f)=e_{\mu}f$ and $e_{\mu}(h_{u}g)=e_{\mu}g$.
\noindent\ref{i:alpha}: When $\alpha=0$, this follows from $\varphi_{\mu}^{0}=-e_{\mu}(-e_{\mu}g)=h_{\mu}g$;
the proof for $\alpha=1$ case is similar.
\noindent\ref{i:phi:mu}: This follows from Fact~\ref{l:dcform}\ref{i:mor:l}.
\noindent\ref{i:f=g}: When $f=g$, we have $e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}=e_{\mu}f$ so that $-e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}=-e_{\mu}f$. Since
$\ensuremath{\varphi^{\alpha}_{\mu}}$ is $\mu$-proximal by \ref{i:phi:mu}, it follows that
$\ensuremath{\varphi^{\alpha}_{\mu}}=-e_{\mu}(-e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}})=-e_{\mu}(-e_{\mu}f)=h_{\mu}f$.
\noindent\ref{i:g=c}: This follows from
\begin{align*}
\ensuremath{\varphi^{\alpha}_{\mu}} &=-e_{\mu}(-\alpha e_{\mu}f-(1-\alpha)c)=-e_{\mu}(-e_{\mu/\alpha}(\alpha f)-(1-\alpha)c)\\
&=-e_{\mu}[-e_{\mu/\alpha}(\alpha f+(1-\alpha)c)],
\end{align*}
and Fact~\ref{f:m-p-l}\ref{i:d:env}.
\end{proof}
\begin{prop}
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:regular} The proximal average $\ensuremath{\varphi^{\alpha}_{\mu}}$ is always Clarke regular, prox-regular and strongly
amenable on $\operatorname{\mathbb{R}}^n$.
\item\label{i:full:d}
If one of the sets $\operatornamewithlimits{conv}\operatorname{dom} f$ or $\operatornamewithlimits{conv}\operatorname{dom} g$ is $\operatorname{\mathbb{R}}^n$,
then $\ensuremath{\varphi^{\alpha}_{\mu}}$ is locally Lipschitz on $\operatorname{\mathbb{R}}^n$.
\item\label{i:u:proximable} When $f, g$ are both $\mu$-proximal, $\ensuremath{\varphi^{\alpha}_{\mu}}$ is the proximal
average for convex functions.
\end{enumerate}
\end{prop}
\begin{proof}
One always has
$$\ensuremath{\varphi^{\alpha}_{\mu}}=\bigg(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\bigg)-\frac{1}{2\mu}\|\cdot\|^2$$
where $\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2$ is convex
by Theorem~\ref{t:prox}\ref{i:phi:mu}.
\noindent\ref{i:regular}:
Use \cite[Example 11.30]{rockwets} and \cite[Exercise 13.35]{rockwets}
to conclude that $\ensuremath{\varphi^{\alpha}_{\mu}}$ is prox-regular. \cite[Example 10.24(g)]{rockwets}
shows that $\ensuremath{\varphi^{\alpha}_{\mu}}$ is strongly amenable.
Also, being a sum of a convex function and a $\mathcal{C}^2$ function, $\ensuremath{\varphi^{\alpha}_{\mu}}$ is Clarke regular.
\noindent\ref{i:full:d}:
By Theorem~\ref{t:prox}\ref{i:dom:convhull},
$\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}=\operatorname{\mathbb{R}}^n$, then $(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2)$ is
a finite-valued convex function on $\operatorname{\mathbb{R}}^n$, so it is
locally Lipschitz, hence $\ensuremath{\varphi^{\alpha}_{\mu}}$.
\noindent\ref{i:u:proximable}: Since both $f+\frac{1}{2\mu}\|\cdot\|^2$ and
$g+\frac{1}{2\mu}\|\cdot\|^2$
are convex, the result follows from
Theorem~\ref{t:prox}\ref{i:epi:sum}
and
\cite[Definition 4.1]{proxbas}.
\end{proof}
\begin{cor}
Let
$0<\mu<\operatorname{\bar{\lambda}}$
and let $\ensuremath{\varphi^{\alpha}_{\mu}}$ be defined as in \eqref{e:prox:def}.
Then
$$-\partial_L\left[-\bigg(\mu\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2}\|\cdot\|^2\bigg)^*\operatorname{ri}ght]\subseteq
\alpha \operatorname{Prox}_{\mu}f+(1-\alpha)\operatorname{Prox}_{\mu}g.$$
\end{cor}
\begin{proof}
By Theorem~\ref{t:prox}\ref{i:env:conhull},
$e_{\mu}(\ensuremath{\varphi^{\alpha}_{\mu}})=\alpha e_{\mu}f+(1-\alpha)e_{\mu} g.$ Since both $e_{\mu}f, e_{\mu}g$ are locally
Lipschitz, the sum rule for $\partial_{L}$ \cite[Corollary 10.9]{rockwets} gives
\begin{align*}
\partial_{L} e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}(x) & \subseteq \alpha\partial_{L} e_{\mu}f(x)
+(1-\alpha)\partial_{L} e_{\mu}g(x)\\
&\subseteq \alpha \frac{x-\operatorname{Prox}_{\mu}f(x)}{\mu}+(1-\alpha)\frac{x-\operatorname{Prox}_{\mu}g(x)}{\mu}\\
&=\frac{x}{\mu}-\frac{\alpha \operatorname{Prox}_{\mu}f(x)+(1-\alpha)\operatorname{Prox}_{\mu}g(x)}{\mu},
\end{align*}
from which
$$\partial_{L}\left(e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}-\frac{1}{2\mu}\|x\|^2\operatorname{ri}ght)\subseteq -\frac{\alpha \operatorname{Prox}_{\mu}f(x)+(1-\alpha)\operatorname{Prox}_{\mu}g(x)}{\mu}.$$
As
$$e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}(x)-\frac{1}{2}\|x\|^2=-\left(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)^{*}\left(\frac{x}{\mu}\operatorname{ri}ght)
=-\frac{\left(\mu\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^*(x)}{\mu},$$
we have
$$-\partial_L\left (-\left(\mu\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2}\|\cdot\|^2\operatorname{ri}ght)^{*}\operatorname{ri}ght)(x)\subseteq\alpha \operatorname{Prox}_{\mu}f(x)+(1-\alpha)\operatorname{Prox}_{\mu}g(x).$$
\end{proof}
A natural question to ask is whether $\alpha \operatorname{Prox}_{\mu}f+(1-\alpha)\operatorname{Prox}_{\mu}g$ is still a
proximal mapping. Although this is not clear in general, we have the following.
\begin{thm}[the proximal mapping of the proximal average]\label{prop:convcomb}
Let $0<\mu<\operatorname{\bar{\lambda}}$ and let $\ensuremath{\varphi^{\alpha}_{\mu}}$ be defined as in \eqref{e:prox:def}.
Then
\begin{equation}\label{e:prox:conv}
\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}
=\alpha\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}f+(1-\alpha)\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}g.
\end{equation}
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:u:prox} When both $f$ and $g$ are $\mu$-proximal, one has
$$\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}
=\alpha\operatorname{Prox}_{\mu}f+(1-\alpha)\operatorname{Prox}_{\mu}g.$$
\item\label{i:r:prox} Suppose that on an open subset $U\subset\operatorname{\mathbb{R}}^n$
both $\operatorname{Prox}_{\mu}f, \operatorname{Prox}_{\mu}g$ are
single-valued (e.g., when
$e_{\mu}f$ and $e_{\mu}g$ are
continuously differentiable).
Then $\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}$ is single-valued, and
$$\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}
=\alpha\operatorname{Prox}_{\mu}f+(1-\alpha)\operatorname{Prox}_{\mu}g \text{ on $U$.}$$
\item\label{i:r:prox2} Suppose that on an open subset $U\subset\operatorname{\mathbb{R}}^n$
both $\operatorname{Prox}_{\mu}f, \operatorname{Prox}_{\mu}g$ are
single-valued and Lipschitz continuous (e.g., when $f$ and $g$ are prox-regular).
Then $\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}$ is single-valued and Lipschitz continuous, and
$$\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}
=\alpha\operatorname{Prox}_{\mu}f+(1-\alpha)\operatorname{Prox}_{\mu}g \text{ on $U$.}$$
\end{enumerate}
\end{thm}
\begin{proof}
By Theorem~\ref{t:prox},
$$-e_{\mu}(\varphi^\alpha_{\mu})=-\alpha e_{\mu}f-(1-\alpha)e_{\mu}g.$$
Since both $-e_{\mu}f, -e_{\mu}g$ are Clarke regular, the sum rule \cite[Corollary 10.9]{rockwets}
gives
$$\partial_L(-e_{\mu}(\varphi^\alpha_{\mu}))=\alpha\partial_L (- e_{\mu}f)
+(1-\alpha)\partial_L(-e_{\mu}g).$$
Apply \cite[Example 10.32]{rockwets} to get
$$\frac{\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}(x)-x}{\mu}=\alpha \frac{\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}f(x)-x}{\mu}+
(1-\alpha)\frac{\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}g(x)-x}{\mu}$$
from which
$$
\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}
=\alpha\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}f+(1-\alpha)\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}g.
$$
Since $\ensuremath{\varphi^{\alpha}_{\mu}}$ is $\mu$-proximal, $\operatornamewithlimits{conv}\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}=\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}$,
therefore, \eqref{e:prox:conv} follows.
\noindent\ref{i:u:prox}: Since $f,g$ are $\mu$-proximal,
$\operatorname{Prox}_{\mu}f$ and $\operatorname{Prox}_{\mu}g$ are convex-valued
by Proposition~\ref{p:resolventf}.
\noindent\ref{i:r:prox}: When $e_{\mu}f$ and $e_{\mu}g$ are continuously differentiable,
both $\operatorname{Prox}_{\mu}f, \operatorname{Prox}_{\mu}g$ are
single-valued on $U$ by \cite[Proposition 5.1]{proxhilbert}.
\noindent\ref{i:r:prox2}: When $f$ and $g$ are prox-regular on $U$,
both $\operatorname{Prox}_{\mu}f, \operatorname{Prox}_{\mu}g$ are
single-valued and Lipschitz continuous on $U$ by \cite[Proposition 5.3]{proxhilbert}
or \cite[Proposition 13.37]{rockwets}.
\end{proof}
\begin{cor}
Let $0<\mu<\operatorname{\bar{\lambda}}$ and
let $\ensuremath{\varphi^{\alpha}_{\mu}}$ be defined as in \eqref{e:prox:def}.
Then
$$
\operatorname{Prox}_{\mu}\varphi^\alpha_{\mu}
=\alpha\operatorname{Prox}_{\mu}(h_{\mu}f)+(1-\alpha)\operatorname{Prox}_{\mu}(h_{\mu}g).
$$
\end{cor}
\begin{proof}
Combine Theorem~\ref{prop:convcomb}
and Lemma~\ref{l:e:h}.
\end{proof}
\begin{cor} Let $\mu>0$. The following set of proximal mappings
$$\{\operatorname{Prox}_{\mu}f|\ \text{$f$ is $\mu$-proximal and $\mu<\lambda_{f}$}\}$$
is a convex set. Moreover, for every $\mu$-proximal function,
$\operatorname{Prox}_{\mu}f=(\operatorname{Id}+\mu\partial_{L} f)^{-1}$.
\end{cor}
\begin{proof}
Apply Theorem~\ref{prop:convcomb}\ref{i:u:prox},
Theorem~\ref{t:prox}\ref{i:lowers}\&\ref{i:phi:mu} and Proposition~\ref{p:resolventf}.
\end{proof}
\section{Relationships to the arithmetic average and epi-average}\label{s:rela}
\begin{df}[epi-convergence and epi-topology]
\ensuremath{\varnothing}h{(See \cite[Chapter~6]{rockwets}.)}
Let $f$ and $(f_k)_\ensuremath{{k \in \N}}$ be functions from $\operatorname{\mathbb{R}}^n$ to $\operatorname{\mathbb{R}}X$. Then
$(f_k)_{k\in\operatorname{\mathbb{N}}}$ \ensuremath{\varnothing}h{epi-converges} to $f$, in symbols $f_k\epi f$,
if for every $x\in \operatorname{\mathbb{R}}^n$ the following hold:
\begin{enumerate}[label=\rm(\alph*)]
\item $\big(\forall\,(x_k)_{\ensuremath{{k \in \N}}}\big)$ $x_k\to x \operatorname{\mathbb{R}}ightarrow
f(x)\leq\liminf f_k(x_k)$;
\item $\big(\exists (y_k)_\ensuremath{{k \in \N}}\big)$ $y_k\to x$
and $\limsup f_k(y_k) \leq f(x)$.
\end{enumerate}
We write $\operatornamewithlimits{e-lim}_{k\operatorname{ri}ghtarrow\infty}f_{k}=f$ to say that $f_k$ epi-converges to $f$.
The \ensuremath{\varnothing}h{epi-topology} is the topology induced by epi-convergence.
\end{df}
\begin{rem} The threshold $\operatorname{\bar{\lambda}}=+\infty$ whenever both $f,g$ are bounded
from below by an affine function.\end{rem}
\begin{thm}\label{t:go:infinity} Let $0<\mu<\operatorname{\bar{\lambda}}$.
One has the following.
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:mono:phi} For every fixed $x\in \operatorname{\mathbb{R}}^n$,
the function $\mu\mapsto\ensuremath{\varphi^{\alpha}_{\mu}}(x)$ is monotonically decreasing and
left-continuous on $]0,\operatorname{\bar{\lambda}}]$.
\item\label{i:inf:phi1}
The pointwise limit
$\lim_{\mu\uparrow \operatorname{\bar{\lambda}}}\ensuremath{\varphi^{\alpha}_{\mu}}=\inf_{\operatorname{\bar{\lambda}}>\mu>0}\ensuremath{\varphi^{\alpha}_{\mu}}=$
$$
\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\operatorname{\bar{\lambda}}}\|\cdot\|^2\bigg)\bigg(\frac{\cdot}{\alpha}\bigg)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\operatorname{\bar{\lambda}}}\|\cdot\|^2\bigg)\bigg(\frac{\cdot}{1-\alpha}\bigg)\operatorname{ri}ght](x)
-\frac{1}{2\operatorname{\bar{\lambda}}}\|x\|^2.
$$
\item\label{i:inf:phi2} When $\operatorname{\bar{\lambda}}=\infty$,
the pointwise limit
\begin{equation}\label{e:pointwise}
\lim_{\mu\uparrow \infty}\ensuremath{\varphi^{\alpha}_{\mu}}=\inf_{\mu>0}\ensuremath{\varphi^{\alpha}_{\mu}}=
\alpha\operatornamewithlimits{conv} f\bigg(\frac{\cdot}{\alpha}\bigg)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\bigg(\frac{\cdot}{1-\alpha}\bigg), \text{ and }
\end{equation}
the epigraphical limit
\begin{equation}\label{i:epi:limit}
\operatornamewithlimits{e-lim}_{\mu\uparrow\infty}\ensuremath{\varphi^{\alpha}_{\mu}}=
\operatorname{cl}\left[\alpha\operatornamewithlimits{conv} f\bigg(\frac{\cdot}{\alpha}\bigg)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\bigg(\frac{\cdot}{1-\alpha}\bigg)\operatorname{ri}ght].
\end{equation}
\end{enumerate}
\end{thm}
\begin{proof}
\ref{i:mono:phi}:
We have $\ensuremath{\varphi^{\alpha}_{\mu}}(x)=$
\footnotesize\begin{align}
&
\inf_{u+v=x}\left(\alpha\inf_{\sum_{i}\alpha_{i}x_{i}=u/\alpha\atop{\sum_{i}\alpha_{i}=1,
\alpha_{i}\geq 0}}\left(\sum_{i}\alpha_{i}f(x_{i})
+\alpha_{i}\frac{1}{2\mu}\|x_{i}\|^2\operatorname{ri}ght)+(1-\alpha)\inf_{\sum_{j}\beta_{j}y_{j}=v/(1-\alpha)
\atop{\sum_{j}\beta_{j}=1,
\beta_{j}\geq 0}}\left(\sum_{j}\beta_{j}g(y_{j})
+\beta_{j}\frac{1}{2\mu}\|y_{j}\|^2\operatorname{ri}ght)\operatorname{ri}ght)\nonumber\\
&\quad -\frac{1}{2\mu}\|x\|^2\nonumber\\
&=
\inf_{{\alpha\sum_i\alpha_{i}x_{i}+
(1-\alpha)\sum_{j}\beta_{j}y_{j}=x}\atop{\sum_{i}\alpha_{i}=1,
\sum_{j}\beta_{j}=1},\alpha_{i}\geq 0, \beta_{j}\geq 0}\bigg(\alpha\sum_{i}\alpha_{i}f(x_{i})
+(1-\alpha)\sum_{j}\beta_{j}g(y_{j})+\nonumber\\
&\quad \frac{1}{2\mu}\underbrace{\left(\alpha\sum_{i}\alpha_{i}\|x_{i}\|^2
+(1-\alpha)\sum_{j}\beta_{j}\|y_{j}\|^2-\bigg\|\alpha\sum_{i}\alpha_{i}x_{i}+(1-\alpha)\sum_{j}
\beta_{j}y_{j}\bigg\|^2\operatorname{ri}ght)}\bigg).\nonumber
\end{align}\normalsize
The underbraced part is nonnegative because $\|\cdot\|^2$ is convex, $\sum_{i}\alpha_{i}=1,
\sum_{j}\beta_{j}=1$. It follows that
$\mu\mapsto\ensuremath{\varphi^{\alpha}_{\mu}}$ is a monotonically decreasing function on $]0,+\infty[$.
Let $\operatorname{\bar{\mu}}\in ]0,\operatorname{\bar{\lambda}}]$. Then $\lim_{\mu\uparrow\operatorname{\bar{\mu}}}\ensuremath{\varphi^{\alpha}_{\mu}}
=\inf_{\operatorname{\bar{\mu}}>\mu>0}\ensuremath{\varphi^{\alpha}_{\mu}}=$
\begin{align}
&\inf_{\operatorname{\bar{\mu}}>\mu>0}
\inf_{{\alpha\sum_i\alpha_{i}x_{i}+
(1-\alpha)\sum_{j}\beta_{j}y_{j}=x}\atop{\sum_{i}\alpha_{i}=1,
\sum_{j}\beta_{j}=1},\alpha_{i}\geq 0, \beta_{j}\geq 0}\bigg(\alpha\sum_{i}\alpha_{i}f(x_{i})
+(1-\alpha)\sum_{j}\beta_{j}g(y_{j})+\nonumber\\
&\quad \frac{1}{2\mu}\left(\alpha\sum_{i}\alpha_{i}\|x_{i}\|^2
+(1-\alpha)\sum_{j}\beta_{j}\|y_{j}\|^2-\bigg\|\alpha\sum_{i}\alpha_{i}x_{i}+(1-\alpha)\sum_{j}
\beta_{j}y_{j}\bigg\|^2\operatorname{ri}ght)\bigg)\label{e:k1}\\
&=
\inf_{{\alpha\sum_i\alpha_{i}x_{i}+
(1-\alpha)\sum_{j}\beta_{j}y_{j}=x}\atop{\sum_{i}\alpha_{i}=1,
\sum_{j}\beta_{j}=1},\alpha_{i}\geq 0, \beta_{j}\geq 0}\inf_{\operatorname{\bar{\mu}}>\mu>0}\bigg(\alpha\sum_{i}\alpha_{i}f(x_{i})
+(1-\alpha)\sum_{j}\beta_{j}g(y_{j})+\nonumber\\
&\quad \frac{1}{2\mu}\left(\alpha\sum_{i}\alpha_{i}\|x_{i}\|^2
+(1-\alpha)\sum_{j}\beta_{j}\|y_{j}\|^2-\bigg\|\alpha\sum_{i}\alpha_{i}x_{i}+(1-\alpha)\sum_{j}
\beta_{j}y_{j}\bigg\|^2\operatorname{ri}ght)\bigg)\\
&=
\inf_{{\alpha\sum_i\alpha_{i}x_{i}+
(1-\alpha)\sum_{j}\beta_{j}y_{j}=x}\atop{\sum_{i}\alpha_{i}=1,
\sum_{j}\beta_{j}=1},\alpha_{i}\geq 0, \beta_{j}\geq 0}
\bigg(\alpha\sum_{i}\alpha_{i}f(x_{i})
+(1-\alpha)\sum_{j}\beta_{j}g(y_{j})+\nonumber\\
&\quad \frac{1}{2\operatorname{\bar{\mu}}}\left(\alpha\sum_{i}\alpha_{i}\|x_{i}\|^2
+(1-\alpha)\sum_{j}\beta_{j}\|y_{j}\|^2-\bigg\|\alpha\sum_{i}\alpha_{i}x_{i}+(1-\alpha)\sum_{j}
\beta_{j}y_{j}\bigg\|^2\operatorname{ri}ght)\bigg)\label{e:k2}\\
&=
\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\operatorname{\bar{\mu}}}
\|\cdot\|^2\bigg)\bigg(\frac{\cdot}{\alpha}\bigg)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\operatorname{\bar{\mu}}}\|\cdot\|^2\bigg)\bigg(\frac{\cdot}{1-\alpha}\bigg)\operatorname{ri}ght](x)
-\frac{1}{2\operatorname{\bar{\mu}}}\|x\|^2\nonumber.
\end{align}
\noindent\ref{i:inf:phi1}: This follows from \ref{i:mono:phi}.
\noindent\ref{i:inf:phi2}:
By \ref{i:mono:phi}, we have $\lim_{\mu\operatorname{ri}ghtarrow\infty}\ensuremath{\varphi^{\alpha}_{\mu}}
=\inf_{\mu>0}\ensuremath{\varphi^{\alpha}_{\mu}}$. Using similar arguments as \eqref{e:k1}--\eqref{e:k2},
we obtain $\inf_{\mu>0}\ensuremath{\varphi^{\alpha}_{\mu}}=$
\begin{align*}
&\inf_{{\alpha\sum_i\alpha_{i}x_{i}+
(1-\alpha)\sum_{j}\beta_{j}y_{j}=x}\atop{\sum_{i}\alpha_{i}=1,
\sum_{j}\beta_{j}=1},\alpha_{i}\geq 0, \beta_{j}\geq 0}\bigg(\alpha\sum_{i}\alpha_{i}f(x_{i})
+(1-\alpha)\sum_{j}\beta_{j}g(y_{j})\bigg)\\
&=\inf_{{u+v=x}}\bigg(\alpha\inf_{\sum_i\alpha_{i}x_{i}=u/\alpha
\atop{\sum_{i}\alpha_{i}=1,\alpha_{i}\geq 0}}\sum_{i}\alpha_{i}f(x_{i})
+(1-\alpha)\inf_{\sum_{j}\beta_{j}y_{j}=v/(1-\alpha)\atop{\sum_{j}\beta_{j}=1}, \beta_{j}\geq 0}\sum_{j}\beta_{j}g(y_{j})\bigg)\\
&=\inf_{{u+v=x}}\bigg(\alpha(\operatornamewithlimits{conv} f)(u/\alpha)
+(1-\alpha)(\operatornamewithlimits{conv} g)(v/(1-\alpha))\bigg),
\end{align*}
as required. To get \eqref{i:epi:limit}, we combine \eqref{e:pointwise} and
\cite[Proposition 7.4(c)]{rockwets}.
\end{proof}
In order to study the limit behavior when $\mu\downarrow 0$, a lemma helps.
We omit its simple proof.
\begin{lem}\label{l:concave:e}
The Moreau envelope function respects the inequality$$e_{\mu}(\alpha f_{1}+(1-\alpha)f_{2})\geq \alpha e_{\mu}f_{1}+(1-\alpha)e_{\mu}f_{2}.$$
\end{lem}
\begin{thm}\label{t:u:0}
Let $0<\mu<\operatorname{\bar{\lambda}}$. One has
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:three:c}
\begin{equation}\label{e:three:c}
\alpha e_{\mu}f+(1-\alpha) e_{\mu}g \leq\ensuremath{\varphi^{\alpha}_{\mu}}\leq \alpha h_{\mu}f+(1-\alpha)h_{\mu}g
\leq\alpha f+(1-\alpha) g \text{ and }
\end{equation}
\item when $\mu\downarrow 0$, the pointwise limit and epi-graphical limit agree with
\begin{equation}\label{e:lim:0}
\lim_{\mu\downarrow 0}\ensuremath{\varphi^{\alpha}_{\mu}}=\sup_{\mu>0}\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha f+(1-\alpha)g.
\end{equation}
Furthermore, the convergence in \eqref{e:lim:0}
is uniform on compact subsets of $\operatorname{\mathbb{R}}^n$ when $f,g$ are continuous.
\end{enumerate}
\end{thm}
\begin{proof}
Apply Lemma~\ref{l:concave:e} with $f_{1}=-e_{\mu}f, f_{2}=-e_{\mu}g$ to obtain
$e_{\mu}(\alpha(-e_{\mu}f)+(1-\alpha)(-e_{\mu}g))\geq \alpha e_{\mu}(-e_{\mu}f)+(1-\alpha)
e_{\mu}(-e_{\mu}g).$ Then
\begin{equation}\label{e:1}
\ensuremath{\varphi^{\alpha}_{\mu}}\leq \alpha (-e_{\mu}(-e_{\mu}f))+(1-\alpha)(-e_{\mu}(-e_{\mu}g))=
\alpha h_{\mu}f+(1-\alpha)h_{\mu}g.
\end{equation}
On the other hand,
$e_{\mu}(\alpha(-e_{\mu}f)+(1-\alpha)(-e_{\mu}g))\leq \alpha(-e_{\mu}f)+(1-\alpha)(-e_{\mu}g)$
so
\begin{equation}\label{e:2}
\ensuremath{\varphi^{\alpha}_{\mu}}\geq \alpha e_{\mu}f +(1-\alpha) e_{\mu}g.
\end{equation}
Combining \eqref{e:1} and \eqref{e:2} gives
$$\alpha e_{\mu}f +(1-\alpha) e_{\mu}g \leq\ensuremath{\varphi^{\alpha}_{\mu}}\leq \alpha h_{\mu}f+(1-\alpha)h_{\mu}g
\leq \alpha f+(1-\alpha) g,$$
which is \eqref{e:three:c}. Equation \eqref{e:lim:0} follows from \eqref{e:three:c} by sending $\mu\downarrow 0$.
The pointwise and epigraphical limits agree because of
\cite[Proposition 7.4(d)]{rockwets}.
Now assume that $f,g$ are continuous.
Since both $e_{\mu}f$ and $f$ are continuous, and $e_{\mu}f\uparrow f$.
Dini's theorem says that $e_{\mu}f\uparrow f$ uniformly on compact subsets
of $\operatorname{\mathbb{R}}^n$. The same can be said about
$e_{\mu}g\uparrow g$.
Hence, the convergence in \eqref{e:lim:0} is uniform
on compact subsets of $\operatorname{\mathbb{R}}^n$ by \eqref{e:three:c}.
\end{proof}
To study the epi-continuity of proximal average, we recall the following two standard notions.
\begin{df} A sequence of functions $(f_{k})_{k\in\operatorname{\mathbb{N}}}$ is eventually prox-bounded
if there exists $\lambda>0$ such that $\liminf_{k\operatorname{ri}ghtarrow\infty}e_{\lambda}f_{k}(x)>-\infty$
for some $x$. The supremum of all such $\lambda$ is then
the threshold of eventual prox-boundedness of the sequence.
\end{df}
\begin{df}
A sequence of functions $(f_{k})_{k\in \operatorname{\mathbb{N}}}$ converges continuously to $f$ if
$f_{k}(x_{k})\operatorname{ri}ghtarrow f(x)$ whenever $x_{k}\operatorname{ri}ghtarrow x$.
\end{df}
The following key result is implicit in the proof of \cite[Theorem 7.37]{rockwets}.
We provide its proof for completeness.
Define $\operatorname{\mathcal{N}_{\infty}}=\{N\subset\operatorname{\mathbb{N}}|\ \operatorname{\mathbb{N}}\setminus N \text{ is finite}\}.$
\begin{lem}\label{l:env:cont}
Let $(f_{k})_{k\in\operatorname{\mathbb{N}}}$ and $f$ be proper, lsc functions on $\operatorname{\mathbb{R}}^n$.
Suppose that $(f_{k})_{k\in\operatorname{\mathbb{N}}}$ is eventually prox-bounded,
$\bar{\lambda}$ is the threshhold of eventual prox-boundedness,
and $f_{k}\epi f$. Suppose also that
$\mu_{k}, \mu\in ]0,\bar{\lambda}[$, and $\mu_{k}\operatorname{ri}ghtarrow\mu$.
Then $f$ is prox-bounded with threshold $\lambda_{f}\geq \bar{\lambda}$, and
$e_{\mu_{k}}f_{k}$ converges continuously to $e_{\mu}f$.
In particular,
$e_{\mu_{k}}f_{k}\epi e_{\mu}f$, and $e_{\mu_{k}}f_{k}\pw e_{\mu}f$.
\end{lem}
\begin{proof} Let $\varepsilon \in ]0,\operatorname{\bar{\lambda}}[$. The eventual prox-boundness of $(f_{k})_{k\in\operatorname{\mathbb{N}}}$
means that
there exist $b\in\operatorname{\mathbb{R}}^n$,
$\beta\in\operatorname{\mathbb{R}}$ and $N\in\operatorname{\mathcal{N}_{\infty}}$ such that
$$(\forall k\in N)(\forall w\in \operatorname{\mathbb{R}}^n)\ f_{k}(w)
\geq \beta-\frac{1}{2\varepsilon}\|b-w\|^2.$$
Let $\mu\in ]0,\varepsilon[$.
Consider any $x\in\operatorname{\mathbb{R}}^n$ and any sequence $x_{k}\operatorname{ri}ghtarrow x$
in $\operatorname{\mathbb{R}}^n$, any sequence $\mu_{k}\operatorname{ri}ghtarrow\mu$ in $(0,\operatorname{\bar{\lambda}})$.
Since $f_{k}\epi f$, the functions
$f_{k}+(1/2\mu_{k})\|\cdot-x_{k}\|^2$ epi-converge to $f+(1/2\mu)\|\cdot-x\|^2$.
Take $\delta\in ]\mu,\varepsilon[$. Because
$\mu_{k}\operatorname{ri}ghtarrow\mu$, there exists $N'\subseteq N$, $N'\in\operatorname{\mathcal{N}_{\infty}}$
such that $\mu_{k}\in (0,\delta)$ when $k\in N'$.
Then $\forall k\in N'$,
\begin{align*}
f_{k}(w)+\frac{1}{2\mu_{k}}\|x_{k}-w\|^2 &\geq \beta-\frac{1}{2\varepsilon}\|b-w\|^2+ \frac{1}{2\delta}\|x_{k}-w\|^2\\
&=\beta-\frac{1}{2\varepsilon}\|b-w\|^2+ \frac{1}{2\delta}\|(x_{k}-b)+(b-w)\|^2\\
&\geq \beta+\bigg(\frac{1}{2\delta}-\frac{1}{2\varepsilon}\bigg)
\|b-w\|^2-\frac{1}{\delta}\|x_{k}-b\|\|b-w\|.
\end{align*}
In view of $x_{k}\operatorname{ri}ghtarrow x$, the sequence $(\|x_{k}-b\|)_{k\in\operatorname{\mathbb{N}}}$ is bounded,
say by $\rho>0$. We have
$$
(\forall k\in N')\ f_{k}(w)+\frac{1}{2\mu_{k}}\|x_{k}-w\|^2 \geq h(w):=
\beta+\bigg(\frac{1}{2\delta}-\frac{1}{2\varepsilon}\bigg)\|b-w\|^2-\frac{\rho}{\delta}
\|b-w\|.
$$
The function $h$ is level-bounded because $\delta<\varepsilon$. Hence, by
\cite[Theorem 7.33]{rockwets},
$$\lim_{k\operatorname{ri}ghtarrow\infty}\inf_{w}\bigg(f_{k}(w)+\frac{1}{2\mu_{k}}
\|x_{k}-w\|^2\bigg)
=\inf_{w}\bigg(f(w)+\frac{1}{2\mu}\|x-w\|^2\bigg),$$
i.e., $e_{\mu_{k}}f_{k}(x_{k})\operatorname{ri}ghtarrow e_{\mu}f(x)$.
Also, $e_{\mu}f(x)$ is finite, so $\lambda_{f}\geq \mu$.
Since $\varepsilon\in ]0,\operatorname{\bar{\lambda}}[$ and $\mu\in ]0,\varepsilon[$ were
arbitrary, the result holds whenever $\mu\in ]0,\operatorname{\bar{\lambda}}[$. This in turn implies
$\lambda_{f}\geq\operatorname{\bar{\lambda}}$.
\end{proof}
For the convenience of analyzing the full epi-continuity,
below we write the proximal average $\ensuremath{\varphi^{\alpha}_{\mu}}$ explicitly in the form
$\ensuremath{\varphi^{\alpha}_{\mu}}f$.
\begin{thm}[full epi-continuity of proximal average]
Let the sequences of functions $(f_{k})_{k\in \operatorname{\mathbb{N}}}$, $(g_{k})_{k\in\operatorname{\mathbb{N}}}$ on $\operatorname{\mathbb{R}}^n$
be eventually prox-bounded
with threshold of eventual prox-boundedness $\bar{\lambda}>0$.
Let $(\mu_{k})_{k\in\operatorname{\mathbb{N}}}$ be a sequence and $\mu$ in
$]0,\bar{\lambda}[$ and let $(\alpha_{k})_{k\in\operatorname{\mathbb{N}}}$ be a sequence and $\alpha$
in
$[0,1]$. Suppose that $f_{k}\epi f$, $g_{k}\epi g$,
$\mu_{k}\operatorname{ri}ghtarrow\mu$, and $\alpha_{k}\operatorname{ri}ghtarrow\alpha$.
Then $\ensuremath{\varphi^{\alpha}_{\mu}}fk\epi\ensuremath{\varphi^{\alpha}_{\mu}}f$.
\end{thm}
\begin{proof} Consider any $x\in\operatorname{\mathbb{R}}^n$ and any sequence $x_{k}\operatorname{ri}ghtarrow x$.
By \cite[Example 11.26]{rockwets},
$$e_{\mu_{k}}f_{k}(\mu_{k}x_{k})=\frac{\mu_{k}\|x_{k}\|^2}{2}-
\bigg(f_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*(x_{k}).$$
Lemma~\ref{l:env:cont} shows that
\begin{align*}
\lim_{k\operatorname{ri}ghtarrow\infty}
\bigg(f_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*(x_{k})
& = \lim_{k\operatorname{ri}ghtarrow\infty} \frac{\mu_{k}\|x_{k}\|^2}{2}-e_{\mu_{k}}f_{k}(\mu_{k}x_{k})
=\frac{\mu\|x\|^2}{2}-e_{\mu}f(\mu x)\\
&=\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*(x).
\end{align*}
Therefore, the functions $\left(f_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\operatorname{ri}ght)^*$ converge
continuously to $\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)^*$.
It follows that
$$\alpha_{k}\bigg(f_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*+
(1-\alpha_{k})\bigg(g_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*$$
converges continuously
to
$$\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*,$$
so epi-converges.
Then by Wijsman's theorem \cite[Theorem 11.34]{rockwets},
$$\bigg[\alpha_{k}\bigg(f_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*+
(1-\alpha_{k})\bigg(g_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*\bigg]^*$$
epi-converges to
$$\bigg[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\bigg]^*.$$
Since $(\mu, x)\mapsto \frac{1}{2\mu}\|x\|^2$ is continuous
on $]0,+\infty[\times\operatorname{\mathbb{R}}^n$, we have that
$$\ensuremath{\varphi^{\alpha}_{\mu}}fk
=\bigg[\alpha_{k}\bigg(f_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*+
(1-\alpha_{k})\bigg(g_{k}+\frac{1}{2\mu_{k}}\|\cdot\|^2\bigg)^*\bigg]^*
-\frac{1}{2\mu_{k}}\|\cdot\|^2$$
epi-converges to
$$\ensuremath{\varphi^{\alpha}_{\mu}}f
=\bigg[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\bigg]^*
-\frac{1}{2\mu}\|\cdot\|^2.$$
\end{proof}
\begin{cor}[epi-continuity of the proximal average]\label{t:alpha:epi}
Let $0<\mu<\operatorname{\bar{\lambda}}$.
Then the function
$\alpha\mapsto \ensuremath{\varphi^{\alpha}_{\mu}}$ is continuous with respect to the epi-topology. That is,
$\forall (\alpha_{k})_{k\in\operatorname{\mathbb{N}}}$ and $\alpha$ in $[0,1]$,
$$\alpha_{k}\operatorname{ri}ghtarrow\alpha \quad \operatorname{\mathbb{R}}ightarrow \quad \ensuremath{\varphi^{\alpha}_{\mu}}k\epi\ensuremath{\varphi^{\alpha}_{\mu}}.$$
In particular, $\ensuremath{\varphi^{\alpha}_{\mu}}\epi h_{\mu}g$ when $\alpha\downarrow 0$, and
$\ensuremath{\varphi^{\alpha}_{\mu}}\epi h_{\mu}f$ when $\alpha\uparrow 1$.
\end{cor}
\section{Optimal value and minimizers of the proximal average}\label{s:opti}
\subsection{Relationship of infimum and minimizers among
$\ensuremath{\varphi^{\alpha}_{\mu}}$, $f$ and $g$.}
\begin{prop}\label{p:minimizer} Let $0<\mu<\operatorname{\bar{\lambda}}$. One has
\begin{enumerate}[label=\rm(\alph*)]
\item \label{e:env:conv}
\begin{align*}
\inf\ensuremath{\varphi^{\alpha}_{\mu}} &= \inf[\alpha e_{\mu}f+(1-\alpha)e_{\mu}g], \text{ \ensuremath{\varnothing}h{and} }\\
\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}} &=\operatornamewithlimits{argmin} [\alpha e_{\mu}f+(1-\alpha)e_{\mu}g];
\end{align*}
\item \label{e:hull:arith}
\begin{align*}
\alpha\inf f+(1-\alpha)\inf g& \leq \inf\ensuremath{\varphi^{\alpha}_{\mu}}
\leq\inf[\alpha h_{\mu}f+(1-\alpha)h_{\mu}g]
\leq \inf[\alpha f+(1-\alpha) g].
\end{align*}
\end{enumerate}
\end{prop}
\begin{proof}
For \ref{e:env:conv}, apply Theorem~\ref{t:prox}\ref{i:env:conhull}
and $\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}=\operatornamewithlimits{argmin} e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}$.
For \ref{e:hull:arith}, apply Theorem~\ref{t:u:0}\ref{i:three:c}
and $\inf e_{\mu}f=\inf f$, and $\inf e_{\mu}g=\inf g$.
\end{proof}
\begin{thm} Suppose that $\operatornamewithlimits{argmin} f\cap \operatornamewithlimits{argmin} g\neq\varnothing$ and
$\alpha\in ]0,1[$.
Then the following hold:
\begin{enumerate}[label=\rm(\alph*)]
\item \label{i:arithmin}
\begin{equation}\label{e:arith}
\min(\alpha f+(1-\alpha)g)=\alpha\min f+(1-\alpha)\min g, \text{ \ensuremath{\varnothing}h{and} }
\end{equation}
\begin{equation}\label{e:arith:minimizer}
\operatornamewithlimits{argmin}(\alpha f+(1-\alpha)g)=\operatornamewithlimits{argmin} f\cap \operatornamewithlimits{argmin} g;
\end{equation}
\item \label{i:proxmin}
\begin{equation}\label{e:minvalue}
\min\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha\min f+(1-\alpha)\min g, \text{ \ensuremath{\varnothing}h{and} }
\end{equation}
\begin{equation*}
\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}=\operatornamewithlimits{argmin} f\cap \operatornamewithlimits{argmin} g.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof} Pick $x\in \operatornamewithlimits{argmin} f\cap \operatornamewithlimits{argmin} g$. We have
\begin{equation}\label{e:minvalue:two}
\inf[\alpha f+(1-\alpha)g]=\alpha f(x)+(1-\alpha) g(x)=\alpha\min f+(1-\alpha)\min g.
\end{equation}
\ref{i:arithmin}: Equation \eqref{e:minvalue:two} gives \eqref{e:arith} and
\begin{equation*}\label{e:setinclus}
(\operatornamewithlimits{argmin} f\cap\operatornamewithlimits{argmin} g)\subseteq \operatornamewithlimits{argmin} (\alpha f+(1-\alpha)g).
\end{equation*}
To see the converse inclusion of \eqref{e:minvalue},
let $x\in\operatornamewithlimits{argmin}(\alpha f+(1-\alpha)g)$. Then \eqref{e:arith} gives
$$\alpha \min f+(1-\alpha)\min g=\min(\alpha f+(1-\alpha)g)=\alpha f(x)+(1-\alpha)g(x),$$
from which
$$\alpha (\min f-f(x))+(1-\alpha)(\min g-g(x))=0.$$
Since $\min f\leq f(x), \min g\leq g(x)$, we obtain
$\min f= f(x), \min g= g(x)$, so $x\in\operatornamewithlimits{argmin} f\cap\operatornamewithlimits{argmin} g$. Thus,
$\operatornamewithlimits{argmin} (\alpha f+(1-\alpha)g)\subseteq (\operatornamewithlimits{argmin} f\cap\operatornamewithlimits{argmin} g)$. Hence,
\eqref{e:arith:minimizer} holds.
\noindent\ref{i:proxmin}: Equation \eqref{e:minvalue} follows from Proposition~\ref{p:minimizer} and Theorem~\ref{t:u:0}\ref{i:three:c}. This also gives
$$(\operatornamewithlimits{argmin} f\cap \operatornamewithlimits{argmin} g)\subseteq \operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}.$$
To show $(\operatornamewithlimits{argmin} f\cap \operatornamewithlimits{argmin} g)\supseteq \operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}$, take any $x\in \operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}$.
By \eqref{e:minvalue} and Theorem~\ref{t:u:0}\ref{i:three:c}, we have
$$\alpha\min f+(1-\alpha)\min g=\ensuremath{\varphi^{\alpha}_{\mu}}(x)\geq \alpha e_{\mu}f(x)+(1-\alpha)e_{\mu}g(x),$$
from which
$$\alpha(e_{\mu}f(x)-\min f)+(1-\alpha)(e_{\mu}g(x)-\min g)\leq 0.$$
Since $\min f=\min e_{\mu}f$ and $\min g=\min e_{\mu}g$, it follows that
$e_{\mu}f(x)=\min e_{\mu}f$ and $e_{\mu}g(x)=\min e_{\mu}g$, so
$x\in(\operatornamewithlimits{argmin} e_{\mu}f\cap\operatornamewithlimits{argmin} e_{\mu}g)=(\operatornamewithlimits{argmin} f\cap\operatornamewithlimits{argmin} g)$
because of $\operatornamewithlimits{argmin} e_{\mu}f=\operatornamewithlimits{argmin} f$ and $\operatornamewithlimits{argmin} e_{\mu}g=\operatornamewithlimits{argmin} g$.
\end{proof}
To explore further optimization properties of $\ensuremath{\varphi^{\alpha}_{\mu}}$, we need the following three
auxiliary results.
\begin{lem}\label{l:box} Suppose that $f_{1}, f_{2}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ are
proper and lsc, and that
$f_{1}\operatorname{\mathbb{B}}ox f_{2}$ is exact. Then
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:box:inf}
\begin{equation}\label{e:epis:inf}
\inf (\ensuremath{f_{1}\Box f_{2}})=\inf f_{1}+\inf f_{2}, \text{ and }
\end{equation}
\item\label{i:box:min}
\begin{equation}\label{e:epis:min}
\operatornamewithlimits{argmin} (\ensuremath{f_{1}\Box f_{2}})=\operatornamewithlimits{argmin} f_{1}+\operatornamewithlimits{argmin} f_{2}.
\end{equation}
\end{enumerate}
\end{lem}
\begin{proof} Equation \eqref{e:epis:inf} follows from
\begin{align*}
\inf\ensuremath{f_{1}\Box f_{2}} &=\inf_{x}\inf_{x=y+z}[f_{1}(y)+f_{2}(z)]\\
&=\inf_{y,z}[f_{1}(y)+f_{2}(z)]=\inf f_{1}+\inf f_{2}.
\end{align*}
To see \eqref{e:epis:min}, we first show
\begin{equation}\label{e:setsum}
\operatornamewithlimits{argmin} (\ensuremath{f_{1}\Box f_{2}})\subseteq\operatornamewithlimits{argmin} f_{1}+\operatornamewithlimits{argmin} f_{2}.
\end{equation}
If $\operatornamewithlimits{argmin} (\ensuremath{f_{1}\Box f_{2}})=\varnothing$, the inclusion holds trivially. Let us
assume that $\operatornamewithlimits{argmin} (\ensuremath{f_{1}\Box f_{2}})\neq \varnothing$ and
let $x\in\operatornamewithlimits{argmin} (\ensuremath{f_{1}\Box f_{2}})$. Since $\ensuremath{f_{1}\Box f_{2}}$ is exact, we have $x=y+z$ for some $y,z$ and
$\ensuremath{f_{1}\Box f_{2}}(x)=f_{1}(y)+f_{2}(z)$. In view of \eqref{e:epis:inf},
\begin{align*}
f_{1}(y)+f_{2}(z) &=\ensuremath{f_{1}\Box f_{2}}(x)=\min \ensuremath{f_{1}\Box f_{2}}=\inf f_{1}+\inf f_{2},
\end{align*}
from which
$$(f_{1}(y)-\inf f_{1})+(f_{2}(z)-\inf f_{2})=0.$$
Then $f_{1}(y)=\inf f_{1}, f_{2}(z)=\inf f_{2}$, which gives
$y\in\operatornamewithlimits{argmin} f_{1}, z\in\operatornamewithlimits{argmin} f_{2}$. Therefore,
$x\in \operatornamewithlimits{argmin} f_{1}+\operatornamewithlimits{argmin} f_{2}$. Next, we show
\begin{equation}\label{e:setsum2}
\operatornamewithlimits{argmin} (\ensuremath{f_{1}\Box f_{2}})\supseteq\operatornamewithlimits{argmin} f_{1}+\operatornamewithlimits{argmin} f_{2}.
\end{equation}
If one of $\operatornamewithlimits{argmin} f_{1}, \operatornamewithlimits{argmin} f_{2}$ is empty, the inclusion holds
trivially. Assume that
$\operatornamewithlimits{argmin} f_{1}\neq\varnothing$ and $\operatornamewithlimits{argmin} f_{2}\neq\varnothing$.
Take
$y\in\operatornamewithlimits{argmin} f_{1}, z\in\operatornamewithlimits{argmin} f_{2}$, and put $x=y+z$. The definition of
$\operatorname{\mathbb{B}}ox$ and \eqref{e:epis:inf} give
$$\ensuremath{f_{1}\Box f_{2}}(x)\leq f_{1}(y)+f_{2}(z)=\min f_{1}+\min f_{2}=
\inf \ensuremath{f_{1}\Box f_{2}},$$
which implies $x\in\operatornamewithlimits{argmin}(\ensuremath{f_{1}\Box f_{2}})$. Since $y\in\operatornamewithlimits{argmin} f_{1}$, $z\in\operatornamewithlimits{argmin} f_{2}$
were arbitrary,
\eqref{e:setsum2} follows. Combining \eqref{e:setsum} and \eqref{e:setsum2}
gives \eqref{e:epis:min}.
\end{proof}
\begin{lem}\label{l:epimulti}
Let $f_{1}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ be proper and lsc, and let $\beta>0$. Then
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:epimul:inf}
$$\inf\left[\beta f_{1}\left(\frac{\cdot}{\beta}\operatorname{ri}ght)\operatorname{ri}ght]=\beta\inf f_{1},\text{ and }
$$
\item \label{i:epimul:min}
$$\operatornamewithlimits{argmin} \left[\beta f_{1}
\left(\frac{\cdot}{\beta}\operatorname{ri}ght)\operatorname{ri}ght]=\beta\operatornamewithlimits{argmin} f_{1}.$$
\end{enumerate}
\end{lem}
\begin{lem}\label{l:convexhull:f} Let $f_{1}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ be proper and lsc.
Then the following
hold:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:convexh:inf}
$$\inf (\operatornamewithlimits{conv} f_1) =\inf f_{1};$$
\item\label{i:convexh:min}
if, in addition, $f_{1}$ is coercive, then
$$\operatornamewithlimits{argmin} (\operatornamewithlimits{conv} f_{1})
=\operatornamewithlimits{conv}(\operatornamewithlimits{argmin} f_{1}),$$
and $\operatornamewithlimits{argmin} (\operatornamewithlimits{conv} f_{1})\neq\varnothing$.
\end{enumerate}
\end{lem}
\begin{proof} Combine \cite[Comment 3.7(4)]{benoist} and
\cite[Corollary 3.47]{rockwets}.
\end{proof}
We are now ready for the main result of this section.
\begin{thm}\label{t:shifted}
Let $0<\mu<\operatorname{\bar{\lambda}}$, and
let $\ensuremath{\varphi^{\alpha}_{\mu}}$ be defined as in \eqref{e:prox:def}.
Then the following hold:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:shifted:inf}
\begin{align*}
&\inf\left(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\\
&=\alpha\inf\left(
f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)+(1-\alpha)
\inf \left(
g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght);
\end{align*}
\item\label{i:shifted:min}
\begin{align*}
&\operatornamewithlimits{argmin}\left(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\\
& =\alpha\operatornamewithlimits{conv} \left[\operatornamewithlimits{argmin}\left(
f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\operatorname{ri}ght]+(1-\alpha)
\operatornamewithlimits{conv} \left[\operatornamewithlimits{argmin}\left(
g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\operatorname{ri}ght]\neq\varnothing.
\end{align*}
\end{enumerate}
\end{thm}
\begin{proof} Theorem~\ref{t:prox}\ref{i:epi:sum} gives
\begin{align*}
& \ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\\
&=\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{ri}ght]
\operatorname{\mathbb{B}}ox \left[(1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght],
\end{align*}
in which the inf-convolution $\operatorname{\mathbb{B}}ox$ is exact.
\noindent\ref{i:shifted:inf}: Using Lemma \ref{l:box}\ref{i:box:inf} and Lemma \ref{l:convexhull:f}\ref{i:convexh:inf},
we deduce
\begin{align*}
&\inf\left(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\\
&=\inf \left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{ri}ght]
+\inf \left[(1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght]\\
&=\alpha\inf \left[\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\operatorname{ri}ght]
+(1-\alpha)\inf \left[\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\operatorname{ri}ght]\\
&=\alpha\inf\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)
+(1-\alpha)\inf
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg).
\end{align*}
\noindent\ref{i:shifted:min}: Note that
$f+\frac{1}{2\mu}\|\cdot\|^2$ and
$g+\frac{1}{2\mu}\|\cdot\|^2$ are coercive
because of $0<\mu<\operatorname{\bar{\lambda}}$.
Using Lemma~\ref{l:box}\ref{i:box:min}-Lemma~\ref{l:convexhull:f}\ref{i:convexh:min},
we deduce
\begin{align*}
&\operatornamewithlimits{argmin}\left(\ensuremath{\varphi^{\alpha}_{\mu}}+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\\
&=\operatornamewithlimits{argmin} \left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{ri}ght]
+\operatornamewithlimits{argmin} \left[(1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght]\\
&=\alpha\operatornamewithlimits{argmin} \left[\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\operatorname{ri}ght]
+(1-\alpha)\operatornamewithlimits{argmin} \left[\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\operatorname{ri}ght]\\
&=\alpha\operatornamewithlimits{conv}\left[\operatornamewithlimits{argmin} \bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\operatorname{ri}ght]
+(1-\alpha)\operatornamewithlimits{conv}\left[\operatornamewithlimits{argmin}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\operatorname{ri}ght].
\end{align*}
Finally, these three sets of minimizers are nonempty by
Lemma~\ref{l:convexhull:f}\ref{i:convexh:min}.
\end{proof}
\begin{rem} \ensuremath{\varnothing}h{Theorem~\ref{t:shifted}\ref{i:shifted:min}} is just a rewritten form of
$$\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}(0)=\alpha\operatornamewithlimits{conv}[\operatorname{Prox}_{\mu}f(0)]+(1-\alpha)\operatornamewithlimits{conv}[\operatorname{Prox}_{\mu}g(0)].$$
\end{rem}
In view of
Theorem~\ref{t:go:infinity}\ref{i:inf:phi2},
when $\operatorname{\bar{\lambda}}=\infty$, as $\mu\operatorname{ri}ghtarrow\infty$ the pointwise limit is
$$\ensuremath{\varphi^{\alpha}_{\mu}}\pw \left[
\alpha\operatornamewithlimits{conv} f\bigg(\frac{\cdot}{\alpha}\bigg)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\bigg(\frac{\cdot}{1-\alpha}\bigg)\operatorname{ri}ght],$$
and the epi-limit is
$$\ensuremath{\varphi^{\alpha}_{\mu}}\epi \operatorname{cl}\left[
\alpha\operatornamewithlimits{conv} f\bigg(\frac{\cdot}{\alpha}\bigg)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\bigg(\frac{\cdot}{1-\alpha}\bigg)\operatorname{ri}ght].$$
We conclude this section with a result on minimization of this limit.
\begin{prop}\label{p:coercive}
Suppose that both $f$ and $g$ are coercive. Then the following hold:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:coercive:1}
$\alpha\operatornamewithlimits{conv} f\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)$ is proper, lsc and convex;
\item\label{i:coercive:2}
$$\min \left[\alpha\operatornamewithlimits{conv} f\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght]=
\alpha\min f+(1-\alpha)\min g;$$
\item\label{i:coercive:3}
\begin{align*}
& \operatornamewithlimits{argmin}\left[\alpha\operatornamewithlimits{conv} f\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght]\\
& =
\alpha\operatornamewithlimits{conv}\operatornamewithlimits{argmin} f+(1-\alpha)\operatornamewithlimits{conv}\operatornamewithlimits{argmin} g\neq\varnothing.
\end{align*}
\end{enumerate}
\end{prop}
\begin{proof} Since both $f$ and $g$ are coercive, by \cite[Corollary 3.47]{rockwets},
$\operatornamewithlimits{conv} f$ and $\operatornamewithlimits{conv} g$ are lsc, convex and coercive.
As
$$(\alpha f^*+(1-\alpha) g^*)^*=\operatorname{cl}\left[\alpha\operatornamewithlimits{conv} f\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght]$$
and $\operatorname{dom} f^*=\operatorname{\mathbb{R}}^n=\operatorname{dom} g^*$, the closure operation on the right-hand side is superfluous.
This establishes \ref{i:coercive:1}.
Moreover, the
infimal convolution
\begin{equation}\label{e:coercive0}
\alpha\operatornamewithlimits{conv} f\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
g\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)
\end{equation}
is exact.
For
\ref{i:coercive:2}, \ref{i:coercive:3}, it suffices to apply Lemma~\ref{l:box} to
\eqref{e:coercive0} for functions $\alpha\operatornamewithlimits{conv} f\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)$ and
$\alpha\operatornamewithlimits{conv} g\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)$, followed by invoking
Lemma~\ref{l:epimulti} and Lemma~\ref{l:convexhull:f}.
\end{proof}
\subsection{Convergence in minimization}
We need the following result on coercivity.
\begin{lem}\label{l:psi}
Let $0<\mu<\operatorname{\bar{\lambda}}$, and let $\psi:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}$ be a convex function. If
$f\geq \psi, g\geq\psi$, then $\ensuremath{\varphi^{\alpha}_{\mu}}\geq \psi$.
\end{lem}
\begin{proof}
Recall
$\ensuremath{\varphi^{\alpha}_{\mu}}(x)=$
$$\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght](x)
-\frac{1}{2\mu}\|x\|^2.
$$
As $f+\frac{1}{2\mu}\|\cdot\|^2\geq \psi+\frac{1}{2\mu}\|\cdot\|^2$ and the latter is convex,
we have
$$\operatornamewithlimits{conv}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\geq \psi+\frac{1}{2\mu}\|\cdot\|^2;$$
similarly,
$$\operatornamewithlimits{conv}\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\geq \psi+\frac{1}{2\mu}\|\cdot\|^2.$$
Then
\begin{align*}
&\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\\
&\geq \alpha\bigg(\psi+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)
\operatorname{\mathbb{B}}ox
(1-\alpha)\bigg(\psi+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\\
&=\psi+\frac{1}{2\mu}\|\cdot\|^2,
\end{align*}
in which we have used the convexity of $\psi+\frac{1}{2\mu}\|\cdot\|^2$.
The result follows.
\end{proof}
\begin{thm} Let $0<\mu<\operatorname{\bar{\lambda}}$. One has the following.
\begin{enumerate}[label=\rm(\alph*)]
\item \label{i:coercive0}If $f, g$ are bounded from below,
then $\ensuremath{\varphi^{\alpha}_{\mu}}$ is bounded from below.
\item\label{i:coercive1}
If $f, g$ are level-coercive, then $\ensuremath{\varphi^{\alpha}_{\mu}}$ is level-coercive.
\item\label{i:coercive2}
If $f,g$ are coercive, then $\ensuremath{\varphi^{\alpha}_{\mu}}$ is coercive.
\end{enumerate}
\end{thm}
\begin{proof}
\ref{i:coercive0}: Put $\psi=\min\{\inf f, \inf g\}$ and apply Lemma~\ref{l:psi}.
\noindent\ref{i:coercive1}:
By \cite[Theorem 3.26(a)]{rockwets}, there exist $\gamma\in (0,\infty)$, and $\beta\in\operatorname{\mathbb{R}}$
such that $f\geq \psi, g\geq \psi$ with $\psi=\gamma\|\cdot\|+\beta$.
Apply Lemma~\ref{l:psi}.
\noindent\ref{i:coercive2}: By \cite[Theorem 3.26(b)]{rockwets}, for
every $\gamma\in (0,\infty)$, there exists $\beta\in\operatorname{\mathbb{R}}$
such that $f\geq \psi, g\geq \psi$ with $\psi=\gamma\|\cdot\|+\beta$.
Apply Lemma~\ref{l:psi}.
\end{proof}
\begin{thm} Suppose that the proper, lsc functions $f, g$ are level-coercive. Then
for every $\operatorname{\bar{\alpha}}\in [0,1]$, we have
\begin{align*}
\lim_{\alpha\operatorname{ri}ghtarrow\operatorname{\bar{\alpha}}}\inf\ensuremath{\varphi^{\alpha}_{\mu}} & =\inf \ensuremath{\varphi^{\alpha}_{\mu}}ba \text{ (finite)}, \text{ and }
\\
\limsup_{\alpha\operatorname{ri}ghtarrow\operatorname{\bar{\alpha}}}\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}} & \subseteq\operatornamewithlimits{argmin} \ensuremath{\varphi^{\alpha}_{\mu}}ba.
\end{align*}
Moreover, $(\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}})_{\alpha\in[0,1]}$ lies in a bounded set.
Consequently,
$$\lim_{\alpha\downarrow 0}\inf\ensuremath{\varphi^{\alpha}_{\mu}}=\inf g, \text{ and } \limsup_{\alpha\downarrow 0}\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}\subseteq\operatornamewithlimits{argmin} g;$$
$$\lim_{\alpha\uparrow 1}\inf\ensuremath{\varphi^{\alpha}_{\mu}}=\inf f, \text{ and }
\limsup_{\alpha\uparrow 1}\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}}\subseteq\operatornamewithlimits{argmin} f.$$
\end{thm}
\begin{proof} By assumption, there exist $\gamma>0$ and $\beta\in\operatorname{\mathbb{R}}$ such that
$f\geq\gamma\|\cdot\|+\beta, g\geq\gamma\|\cdot\|+\beta$. Lemma~\ref{l:psi}
shows that $\ensuremath{\varphi^{\alpha}_{\mu}}\geq \gamma\|\cdot\|+\beta$ for every $\alpha\in [0,1]$.
Since $\gamma\|\cdot\|+\beta$ is level-bounded,
$(\ensuremath{\varphi^{\alpha}_{\mu}})_{\alpha\in [0,1]}$ is uniformly level-bounded (so
eventually level-bounded). Corollary~\ref{t:alpha:epi} says that
$\alpha\mapsto \ensuremath{\varphi^{\alpha}_{\mu}}$ is epi-continuous on $[0,1]$.
As $\lambda_{f}=\lambda_{g}=\infty$,
$\ensuremath{\varphi^{\alpha}_{\mu}}$ and $\ensuremath{\varphi^{\alpha}_{\mu}}ba$ are proper and lsc for every $\mu>0$. Hence
\cite[Theorem 7.33]{rockwets} applies.
\end{proof}
\begin{thm} Suppose that the proper, lsc functions $f, g$ are level-coercive and
$\operatorname{dom} f\cap\operatorname{dom} g\neq\varnothing$. Then
\begin{align*}
\lim_{\mu\downarrow 0}\inf\ensuremath{\varphi^{\alpha}_{\mu}} & =\inf (\alpha f+(1-\alpha)g), \text{ and }
\\
\limsup_{\mu\downarrow 0}\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}} & \subseteq\operatornamewithlimits{argmin} (\alpha f+(1-\alpha)g).
\end{align*}
Moreover, $(\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}})_{\mu>0}$ lies in a bounded set.
\end{thm}
\begin{proof}
Note that each $\ensuremath{\varphi^{\alpha}_{\mu}}$ is proper and lsc, and $f+g$ is proper and lsc.
By Theorem~\ref{t:u:0}, when $\mu\downarrow 0$, $\ensuremath{\varphi^{\alpha}_{\mu}}$ epi-converges to
$f+g$.
By assumption, there exist $\gamma>0$ and $\beta\in\operatorname{\mathbb{R}}$ such that
$f\geq\gamma\|\cdot\|+\beta, g\geq\gamma\|\cdot\|+\beta$. Lemma~\ref{l:psi}
shows that $\ensuremath{\varphi^{\alpha}_{\mu}}\geq \gamma\|\cdot\|+\beta$ for every $\mu\in ]0,\infty[$.
Since $\gamma\|\cdot\|+\beta$ is level-bounded,
$(\ensuremath{\varphi^{\alpha}_{\mu}})_{\mu\in ]0,\infty[}$ is uniformly level-bounded (so
eventually level-bounded).
It remains to
apply \cite[Theorem 7.33]{rockwets}.
\end{proof}
\begin{thm} Suppose that the proper and lsc functions $f, g$ are coercive.
Then
for every $\operatorname{\bar{\mu}}\in ]0,\infty]$, we have
\begin{align}\label{e:bcoercive}
\lim_{\mu\uparrow\operatorname{\bar{\mu}}}\inf\ensuremath{\varphi^{\alpha}_{\mu}} & =\inf \ensuremath{\varphi^{\alpha}_{\mu}}bmu \text{ (finite)}, \text{ and }
\nonumber\\
\limsup_{\mu\uparrow \operatorname{\bar{\mu}}}\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}} & \subseteq\operatornamewithlimits{argmin} \ensuremath{\varphi^{\alpha}_{\mu}}bmu.
\end{align}
Moreover, $(\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}})_{\mu>0}$ lies in a bounded set.
Consequently,
\begin{align}\label{e:coercive:inf}
\lim_{\mu\uparrow \infty}\inf\ensuremath{\varphi^{\alpha}_{\mu}} & =\alpha\min f+(1-\alpha)\min g, \text{ and }
\nonumber \\
\limsup_{\mu\uparrow \infty}\operatornamewithlimits{argmin}\ensuremath{\varphi^{\alpha}_{\mu}} & \subseteq
(\alpha\operatornamewithlimits{conv}\operatornamewithlimits{argmin} f+(1-\alpha)\operatornamewithlimits{conv}\operatornamewithlimits{argmin} g).
\end{align}
\end{thm}
\begin{proof} Note that each $\ensuremath{\varphi^{\alpha}_{\mu}}$ is proper and lsc for $\mu\in]0,\infty[$.
When $\mu=\infty$, Proposition~\ref{p:coercive} gives that
the epi-limit is proper, lsc and convex. By Theorem~\ref{t:go:infinity}\ref{i:mono:phi},
when $\mu\uparrow\operatorname{\bar{\mu}}$, $\ensuremath{\varphi^{\alpha}_{\mu}}$ monotonically decrease to $\ensuremath{\varphi^{\alpha}_{\mu}}bmu$.
Since $\ensuremath{\varphi^{\alpha}_{\mu}}bmu$ is lsc, so $\ensuremath{\varphi^{\alpha}_{\mu}}$ epi-converges to $\ensuremath{\varphi^{\alpha}_{\mu}}bmu$.
By assumption, for every $\gamma>0$ there exists $\beta\in\operatorname{\mathbb{R}}$ such that
$f\geq\gamma\|\cdot\|+\beta, g\geq\gamma\|\cdot\|+\beta$. Lemma~\ref{l:psi}
shows that $\ensuremath{\varphi^{\alpha}_{\mu}}\geq \gamma\|\cdot\|+\beta$ for every $\mu\in ]0,\infty[$.
Since $\gamma\|\cdot\|+\beta$ is level-bounded,
$(\ensuremath{\varphi^{\alpha}_{\mu}})_{\mu\in ]0,\infty[}$ is uniformly level-bounded (so
eventually level-bounded). Hence \eqref{e:bcoercive} follows from
\cite[Theorem 7.33]{rockwets}.
Combining \eqref{e:bcoercive}, Theorem~\ref{t:go:infinity}
and Proposition~\ref{p:coercive} yields \eqref{e:coercive:inf}.
\end{proof}
\section{Subdifferentiability of the proximal average}\label{s:subd}
In this section, we focus on the subdifferentiability and differentiability of proximal average.
Following Benoist and Hiriart-Urruty \cite{benoist},
we say that a family of points $\{x_{1},\ldots, x_{m}\}$ in $\operatorname{dom} f$
is
called by $x\in\operatorname{dom}\operatornamewithlimits{conv} f$ if
$$x=\sum_{i=1}^{m}\alpha_{i}x_{i}, \text{ and } \operatornamewithlimits{conv} f(x)=\sum_{i=1}^{m}\alpha_{i}f(x_{i}),$$
where $\sum_{i=1}^{m}\alpha_{i}=1$ and $(\forall i)\ \alpha_{i}>0$.
The following result is the central one of this section.
\begin{thm}[subdifferentiability of the proximal average]\label{t:vphi:sub}
Let
$0<\mu<\operatorname{\bar{\lambda}}$,
let $x\in\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}$ and $x=y+z$.
Suppose the following conditions hold:
\begin{enumerate}[label=\rm(\alph*)]
\item \label{i:function1}
\begin{align*}
&\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght](x)\\
&=\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{y}{\alpha}\operatorname{ri}ght)+(1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{z}{1-\alpha}\operatorname{ri}ght),
\end{align*}
\item \label{i:function2}$\{y_{1},\ldots,y_{l}\}$ are called by
$y/\alpha$ in $\operatornamewithlimits{conv}(f+1/2\mu\|\cdot\|^2)$, and
\item \label{i:function3}
$\{z_{1},\ldots,z_{m}\}$ are called by
$z/(1-\alpha)$ in $\operatornamewithlimits{conv}(g+1/2\mu\|\cdot\|^2)$.
\end{enumerate}
Then
\begin{align*}
\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x) &=
\partial_{L}\ensuremath{\varphi^{\alpha}_{\mu}}(x)
=\partial_{C}\ensuremath{\varphi^{\alpha}_{\mu}}(x)
\\
&=\left[\cap_{i=1}^{l}\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
\left(y_{i}\operatorname{ri}ght)\operatorname{ri}ght]\cap
\left[\cap_{j=1}^{m}\partial\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\left(z_{j}\operatorname{ri}ght)\operatorname{ri}ght]-
\frac{x}{\mu}.
\end{align*}
\end{thm}
\begin{proof} By Theorem~\ref{t:prox}\ref{i:epi:sum}, the Clarke regularity of
$\ensuremath{\varphi^{\alpha}_{\mu}}$ and sum rule of limiting subdifferentials, we have
$\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x)=\partial_{C}\ensuremath{\varphi^{\alpha}_{\mu}}(x)=\partial_{L}\ensuremath{\varphi^{\alpha}_{\mu}}(x)=$
\begin{align}\label{e:diff:conv}
&\partial_{L}\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght](x)
-\frac{x}{\mu}\\
&=\partial \left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght](x)
-\frac{x}{\mu}.\nonumber
\end{align}
Using the subdifferential formula for infimal convolution \cite[Proposition 16.61]{convmono} or \cite[Corollary 2.4.7]{zalinescu2002convex}, we obtain
\begin{align*}\label{e:infsub}
& \partial\left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{\alpha}\operatorname{ri}ght)\operatorname{\mathbb{B}}ox (1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{\cdot}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght](x)\\
&=
\partial \left[\alpha\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{y}{\alpha}\operatorname{ri}ght)\operatorname{ri}ght]\cap
\partial \left[(1-\alpha)\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\left(\frac{z}{1-\alpha}\operatorname{ri}ght)\operatorname{ri}ght]\\
&=
\partial\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)(\bar{y})\cap
\partial\operatornamewithlimits{conv}
\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)(\bar{z})
\end{align*}
where $\bar{y}=\frac{y}{\alpha}$, $\bar{z}=\frac{z}{1-\alpha}$.
The subdifferential formula for the convex hull of a coercive function
\cite[Corollary 4.9]{benoist} or \cite[Theorem 3.2]{rabier} gives
\begin{equation*}\label{e:convf}
\partial\operatornamewithlimits{conv}\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\big(\bar{y}\big)=
\cap_{i=1}^{l}\partial\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)\big(y_{i}),
\end{equation*}
\begin{equation}\label{e:convg}
\partial\operatornamewithlimits{conv}\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\big(\bar{z}\big)=
\cap_{j=1}^{m}\partial\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)\big(z_{j}).
\end{equation}
Therefore, the result follows by combining \eqref{e:diff:conv} and \eqref{e:convg}.
\end{proof}
\begin{cor}
Let $0<\mu<\operatorname{\bar{\lambda}}$,
let $\alpha_{i}>0, \beta_{j}>0$ with
$\sum_{i=1}^{l}\alpha_{i}=1, \sum_{j=1}^{m}\beta=1$ and let
$\alpha\in ]0,1[$.
Suppose that
\begin{equation*}\label{e:x=yz}
x=\alpha\sum_{i=1}^{l}\alpha_{i}y_{i}+(1-\alpha)\sum_{j=1}^{m}\beta_{j}z_{j},
\end{equation*}
and
\begin{equation}\label{e:commonsub}
\left[\cap_{i=1}^{l}\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
\left(y_{i}\operatorname{ri}ght)\operatorname{ri}ght]\cap
\left[\cap_{j=1}^{m}\partial\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\left(z_{j}\operatorname{ri}ght)\operatorname{ri}ght]
\neq \varnothing.
\end{equation}
Then
\begin{align*}
\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x) &=
\partial_{L}\ensuremath{\varphi^{\alpha}_{\mu}}(x)
=\partial_{C}\ensuremath{\varphi^{\alpha}_{\mu}}(x)\nonumber\\
&=\left[\cap_{i=1}^{l}\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
\left(y_{i}\operatorname{ri}ght)\operatorname{ri}ght]\cap
\left[\cap_{j=1}^{m}\partial\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\left(z_{j}\operatorname{ri}ght)\operatorname{ri}ght]-
\frac{x}{\mu}.
\end{align*}
\end{cor}
\begin{proof} We will show that
\begin{equation}\label{e:called:y0}
\operatornamewithlimits{conv}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\sum_{i=1}^{l}\alpha_{i}y_{i}
=\sum_{i}^{l}\alpha_{i}
\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i}).
\end{equation}
By \eqref{e:commonsub}, there exists
$$y^*\in \left[\cap_{i=1}^{l}\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
\left(y_{i}\operatorname{ri}ght)\operatorname{ri}ght]\cap
\left[\cap_{j=1}^{m}\partial\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\left(z_{j}\operatorname{ri}ght)\operatorname{ri}ght].$$
For every $y_{i}$, we have
$$(\forall u\in\operatorname{\mathbb{R}}^n)\ \left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(u)\geq \left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})
+\scal{y^*}{u-y_{i}}.$$ Multiplying each inequality by $\alpha_i$, followed by summing
them up, gives
$$(\forall u\in\operatorname{\mathbb{R}}^n)\ \left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(u)\geq \sum_{i=1}^{l}\alpha_{i}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})
+\scal{y^*}{u-\sum_{i=1}^{l}\alpha_{i}y_{i}}.$$
Then
$$(\forall u\in\operatorname{\mathbb{R}}^n)\ \operatornamewithlimits{conv}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(u)\geq \sum_{i=1}^{l}\alpha_{i}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})
+\scal{y^*}{u-\sum_{i=1}^{l}\alpha_{i}y_{i}},$$
from which
\begin{equation}\label{e:called:y1}
\operatornamewithlimits{conv}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\sum_{i=1}^{l}\alpha_{i}y_{i}
\geq \sum_{i=1}^{l}\alpha_{i}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i}).
\end{equation}
Since $\operatornamewithlimits{conv}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(\sum_{i=1}^{l}\alpha_{i}y_{i})
\leq \sum_{i=1}^{l}\alpha_{i}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})$
always holds, \eqref{e:called:y0} is established.
Moreover, \eqref{e:called:y0}
and \eqref{e:called:y1} implies
\begin{equation}\label{e:called:y2}
y^*\in \partial \operatornamewithlimits{conv}\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
\sum_{i=1}^{l}\alpha_{i}y_{i}.
\end{equation}
Similar arguments give
\begin{equation}\label{e:called:z0}
\operatornamewithlimits{conv}\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(\sum_{j=1}^{m}\beta_{j}z_{j})
=\sum_{j}^{m}\beta_{j}
\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(z_{j}),
\end{equation}
and
\begin{equation}\label{e:called:z1}
y^*\in \partial \operatornamewithlimits{conv}\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)\sum_{j=1}^{m}\beta_{j}y_{j}.
\end{equation}
Put
$x=y+z$ with $y=\alpha\sum_{i=1}^{l}\alpha_{i}y_{i}$ and $z=(1-\alpha)
\sum_{j=1}^{m}\beta_{j}z_{j}.$
Equations \eqref{e:called:y2} and \eqref{e:called:z1} guarantee
the assumption \ref{i:function1} of Theorem~\ref{t:vphi:sub}; \eqref{e:called:y0} and \eqref{e:called:z0} guarantee the assumptions \ref{i:function2} and \ref{i:function3}
of Theorem~\ref{t:vphi:sub} respectively.
Hence, Theorem~\ref{t:vphi:sub} applies.
\end{proof}
\begin{cor}
Let $0<\mu<\operatorname{\bar{\lambda}}$.
Suppose that
$$\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(x)\cap \partial\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(x)\neq\varnothing.$$
Then
\begin{align*}
\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x) &=
\partial_{L}\ensuremath{\varphi^{\alpha}_{\mu}}(x)
=\partial_{C}\ensuremath{\varphi^{\alpha}_{\mu}}(x)\\
&=\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
(x)\cap
\partial\left(g+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(x)-
\frac{x}{\mu}.
\end{align*}
\end{cor}
Armed with Theorem~\ref{t:vphi:sub}, we now turn to the differentiability of $\ensuremath{\varphi^{\alpha}_{\mu}}$.
\begin{df}
A function $f_{1}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ is almost differentiable
if $\hat{\partial} f_{1}(x)$ is a singleton
for every $x\in \operatorname{int}(\operatorname{dom} f_{1})$, and $\hat{\partial} f_{1}(x)=\varnothing$
for every $x\in\operatorname{dom} f_{1}\setminus\operatorname{int}(\operatorname{dom} f_{1})$, if any.
\end{df}
\begin{lem}\label{l:sumrule}
Let $f_{1},f_{2}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}X$ be proper, lsc functions
and let $x\in\operatorname{dom} f_{1}\cap\operatorname{dom} f_{2}$. If $f_{2}$ is
continuously differentiable at $x$, then
$$\partial (f_{1}+f_{2})(x)
\subset\hat{\partial}(f_{1}+f_{2})(x)=
\hat{\partial}f_{1}(x)+\operatorname{tr}iangledown f_{2}(x).$$
\end{lem}
\begin{lem}\label{l:diff:hypo}
Let $f_{1}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow \operatorname{\mathbb{R}}X$ be proper, lsc and $\mu$-proximal, and
let $x\in\operatorname{int}\operatorname{dom} f_{1}$. If $\hat{\partial}f_{1}(x)$ is a singleton, then
$f_{1}$ is differentiable at $x$.
\end{lem}
\begin{proof} Observe that
$f_{2}=f_{1}+\frac{1}{2\mu}\|\cdot\|^2$ is convex, and
$$\partial f_{2}(x)=\hat{\partial}f_{2}(x)
=\hat{\partial}f_{1}(x)+\frac{x}{\mu}.$$
When $\hat{\partial}f_{1}(x)$ is a singleton, $\partial f_{2}(x)$
is a singleton. This implies that $f_{2}$ is differentiable at $x$
because $f_{2}$ is convex and $x\in\operatorname{int}\operatorname{dom} f_{2}$.
Hence, $f_{1}$ is differentiable at $x$.
\end{proof}
\begin{cor}[differentiability of the proximal average] \label{c:interior:diff}
Let $0<\mu<\operatorname{\bar{\lambda}}$.
Suppose that either $f$ or $g$ is almost differentiable (in particular, if $f$ or $g$ is differentiable
at every point of its domain). Then
$\ensuremath{\varphi^{\alpha}_{\mu}}$ is almost differentiable. In particular, $\ensuremath{\varphi^{\alpha}_{\mu}}$ is differentiable on the interior of
its domain
$\operatorname{int}\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}$.
\end{cor}
\begin{proof} Without loss of generality, assume that
$f$ is almost differentiable.
By Lemma~\ref{l:sumrule},
\begin{equation}\label{e:frechet}
\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)
\left(y_{i}\operatorname{ri}ght)\subset \hat{\partial} f(y_{i})+\frac{y_{i}}{\mu}.
\end{equation}
It follows that
$\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})$ is
at most single-valued whenever
$\hat{\partial}f(y_{i})$ is single-valued.
With the same notation as in Theorem~\ref{t:vphi:sub}, we consider two cases.
{\sl Case 1:} $x\in\operatorname{bdry}\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}$. As $x=\alpha(y/\alpha)+(1-\alpha)(z/(1-\alpha))$,
we must have
$y/\alpha\in(\operatorname{bdry}\operatornamewithlimits{conv} \operatorname{dom} f)$ and $z/(1-\alpha)\in\operatorname{bdry}(\operatornamewithlimits{conv} \operatorname{dom} g)$;
otherwise $x\in\operatorname{int}(\alpha\operatornamewithlimits{conv}\operatorname{dom} f+(1-\alpha)\operatornamewithlimits{conv}\operatorname{dom} g)
=\operatorname{int}\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}$,
which is a contradiction. Then the family of $\{y_{1},\ldots, y_{m}\}$ called
by $y/\alpha$ must be from $\operatorname{bdry}\operatorname{dom} f$. As $f$ is almost differentiable,
$\hat{\partial}f(y_{i})=\varnothing$, then $\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x)=\varnothing$ by
Theorem~\ref{t:vphi:sub} and \eqref{e:frechet}.
{\sl Case 2:} $x\in\operatorname{int}(\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}})$. As $\ensuremath{\varphi^{\alpha}_{\mu}}$ is $\mu$-proximal,
$\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x)\neq\varnothing$. We claim that
the family of $\{y_{1},\ldots, y_{m}\}$ called by
$y/\alpha$ in Theorem~\ref{t:vphi:sub} are necessarily from $\operatorname{int}\operatorname{dom} f$.
If not, then
$\partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})=\varnothing$
because of \eqref{e:frechet} and
$\hat{\partial}f(y_{i})=\varnothing$ for $y_{i}\in\operatorname{bdry}(\operatorname{dom} f)$.
Then Theorem~\ref{t:vphi:sub} implies
$\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x)=\varnothing$, which is a contradiction.
Now $\{y_{1},\ldots, y_{m}\}$ are from $\operatorname{int}\operatorname{dom} f$ and $f$ is almost
differentiable, so $(\forall i)\ \hat{\partial}f(y_{i})$
is a singleton. Using \eqref{e:frechet} again and
$\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x)\neq\varnothing$,
we see that $(\forall i)\ \partial\left(f+\frac{1}{2\mu}\|\cdot\|^2\operatorname{ri}ght)(y_{i})$
is a singleton. Hence, $\hat{\partial}\ensuremath{\varphi^{\alpha}_{\mu}}(x)$ is a singleton by
Theorem~\ref{t:vphi:sub}.
Case 1 and Case 2 together show that $\ensuremath{\varphi^{\alpha}_{\mu}}$ is almost differentiable.
Finally, $\ensuremath{\varphi^{\alpha}_{\mu}}$ is differentiable on
$\operatorname{int}\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}$ by Lemma~\ref{l:diff:hypo}.
\end{proof}
\begin{cor} Let $0<\mu<\operatorname{\bar{\lambda}}$.
Suppose that either $f$ or $g$ is almost differentiable and that either $\operatornamewithlimits{conv}\operatorname{dom} f=\operatorname{\mathbb{R}}^n$
or $\operatornamewithlimits{conv}\operatorname{dom} g=\operatorname{\mathbb{R}}^n$. Then
$\ensuremath{\varphi^{\alpha}_{\mu}}$ is differentiable on $\operatorname{\mathbb{R}}^n$.
\end{cor}
\begin{proof}
By Theorem~\ref{t:prox}\ref{i:dom:convhull}, $\operatorname{dom}\ensuremath{\varphi^{\alpha}_{\mu}}=\operatorname{\mathbb{R}}^n$.
It suffices to apply Corollary~\ref{c:interior:diff}.
\end{proof}
We end this section with a result on Lipschitz continuity of the gradient of $\ensuremath{\varphi^{\alpha}_{\mu}}$.
\begin{prop} Suppose that $f$ (or $g$) is differentiable with
a Lipschtiz continuous gradient and $\mu$-proximal.
Then, for every $\alpha\in ]0,1[$, the function $\ensuremath{\varphi^{\alpha}_{\mu}}$ is differentiable
with a Lipschitz continuous gradient.
\end{prop}
\begin{proof}
As $f$ is $\mu$-proximal and differentiable with a Lipschtiz continuous
gradient,
the function $f+\frac{1}{2\mu}\|\cdot\|^2$ is convex and differentiable with a Lipschitz
continuous gradient. By \cite[Proposition 12.60]{rockwets},
$\big(f+\frac{1}{2\mu}\|\cdot\|^2\big)^*$ is strongly convex,
so $$\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*$$ is strongly convex.
By \cite[Proposition 12.60]{rockwets} again,
$$\left[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\operatorname{ri}ght]^*$$
is convex and differentiable with a Lipschitz continuous gradient.
Since
$\ensuremath{\varphi^{\alpha}_{\mu}}=$
$$\left[\alpha\bigg(f+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*+
(1-\alpha)\bigg(g+\frac{1}{2\mu}\|\cdot\|^2\bigg)^*\operatorname{ri}ght]^*
-\frac{1}{2\mu}\|\cdot\|^2,$$
we see that $\ensuremath{\varphi^{\alpha}_{\mu}}$ is differentiable with a Lipschitz continuous gradient.
\end{proof}
\section{The proximal average for quadratic functions}\label{s:quad}
In this section, we illustrate the above results for quadratic functions.
For an $n\times n $ symmetric matrix $A$, define the quadratic function
$\ensuremath{\,\mathfrak{q}}_{A}:\operatorname{\mathbb{R}}^n\operatorname{ri}ghtarrow\operatorname{\mathbb{R}}$ by $x\mapsto \frac{1}{2}\scal{x}{Ax}.$
We use $\ensuremath{\, \lambda_{\min}} A$ to denote the smallest eigenvalue of $A$.
\begin{lem}\label{l:quad} For an $n\times n$ symmetric matrix $A$, one has
\begin{enumerate}[label=\rm(\alph*)]
\item\label{i:quad1}
$\ensuremath{\,\mathfrak{q}}_{A}$ is prox-bounded with threshold
\begin{equation}\label{e:boundp}
\lambda_{\ensuremath{\,\mathfrak{q}}_{A}}=\frac{1}{\max\{0,-\ensuremath{\, \lambda_{\min}} A\}}>0
\end{equation}
and $\mu$-proximal for every $0<\mu\leq \lambda_{\ensuremath{\,\mathfrak{q}}_{A}}$;
\item \label{e:boundp1}
the prox-bound $\lambda_{\ensuremath{\,\mathfrak{q}}_{A}}=+\infty$ if and only if $A$ is positive semidefinite;
\item\label{i:quad2} if $0<\mu<\lambda_{\ensuremath{\,\mathfrak{q}}_{A}}$, then
\begin{equation}\label{e:quadenv}
e_{\mu}\ensuremath{\,\mathfrak{q}}_{A}=\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[\operatorname{Id}-(\mu A+\operatorname{Id})^{-1}]};\text { and }
\end{equation}
\begin{equation*}\label{e:quadprox}
\operatorname{Prox}_{\mu}\ensuremath{\,\mathfrak{q}}_{A}=(\mu A+\operatorname{Id})^{-1}.
\end{equation*}
\end{enumerate}
\end{lem}
\begin{proof}
\ref{i:quad1}: As $A$ can be diagonalized,
$\ensuremath{\,\mathfrak{q}}_{A}\geq \ensuremath{\, \lambda_{\min}} A \ensuremath{\,\mathfrak{q}}_{\operatorname{Id}}$. Apply \cite[Exercise 1.24]{rockwets} to
obtain \eqref{e:boundp}. When $0<\mu\leq \lambda_{\ensuremath{\,\mathfrak{q}}_{A}}$,
$A+\frac{1}{\mu}\operatorname{Id}$ has nonnegative eigenvalues,
so $\ensuremath{\,\mathfrak{q}}_{A}+\frac{1}{\mu}\ensuremath{\,\mathfrak{q}}_{\operatorname{Id}}$ is convex.
\noindent\ref{e:boundp1}: This follows from \ref{i:quad1}.
\noindent\ref{i:quad2}: When $0<\mu<\lambda_{\ensuremath{\,\mathfrak{q}}_{A}}$, the function
$\ensuremath{\,\mathfrak{q}}_{A}+\frac{1}{\mu}\ensuremath{\,\mathfrak{q}}_{\operatorname{Id}}$ is strictly convex. To find
\begin{equation}\label{e:quade}
e_{\mu}\ensuremath{\,\mathfrak{q}}_{A}(x)=\inf_{w}\left(\ensuremath{\,\mathfrak{q}}_{A}(w)+\frac{1}{\mu}\ensuremath{\,\mathfrak{q}}_{\operatorname{Id}}(x-w)\operatorname{ri}ght),
\end{equation}
one directly takes derivative with repect to $w$ to find
\begin{equation}\label{e:quadp}
\operatorname{Prox}_{\mu}\ensuremath{\,\mathfrak{q}}_{A}(x)=(\mu A+\operatorname{Id})^{-1}(x).
\end{equation}
Substitute \eqref{e:quadp} into \eqref{e:quade} to get \eqref{e:quadenv}.
\end{proof}
\begin{ex} Let $A_{1}, A_{2}$ be two $n\times n$ symmetric matrices and
let $0<\mu<\operatorname{\bar{\lambda}}=\min\{\lambda_{\ensuremath{\,\mathfrak{q}}_{A_{1}}},\lambda_{\ensuremath{\,\mathfrak{q}}_{A_{2}}}\}$.
Then the following hold:
\begin{enumerate}[label=\rm(\alph*)]
\item\label{e:proxq1}
$\ensuremath{\varphi^{\alpha}_{\mu}}=\ensuremath{\,\mathfrak{q}}_{\mu^{-1}A_{3}}$ with
$$A_{3}=[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]^{-1}-\operatorname{Id},$$
and
$$\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1};$$
\item \label{e:proxq2}
$\lim_{\alpha\downarrow 0}\ensuremath{\varphi^{\alpha}_{\mu}}=\ensuremath{\,\mathfrak{q}}_{A_{2}}$ and $\lim_{\alpha\uparrow 1}\ensuremath{\varphi^{\alpha}_{\mu}}=\ensuremath{\,\mathfrak{q}}_{A_{1}};$
\item\label{e:proxq3}
$\lim_{\mu\downarrow 0}\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha\ensuremath{\,\mathfrak{q}}_{A_{1}}+(1-\alpha)\ensuremath{\,\mathfrak{q}}_{A_{2}};$
\item \label{e:proxq3.5} when $\operatorname{\bar{\lambda}}<\infty$,
$$\lim_{\mu\uparrow \operatorname{\bar{\lambda}}}\ensuremath{\varphi^{\alpha}_{\mu}}=
\ensuremath{\,\mathfrak{q}}_{\alpha^{-1}(A_{1}+\operatorname{\bar{\lambda}}^{-1}\operatorname{Id})}\operatorname{\mathbb{B}}ox \ensuremath{\,\mathfrak{q}}_{(1-\alpha)^{-1}(A_{2}+\operatorname{\bar{\lambda}}^{-1}\operatorname{Id})}-
\ensuremath{\,\mathfrak{q}}_{\operatorname{\bar{\lambda}}^{-1}\operatorname{Id}};$$
\item \label{e:proxq4}
when both $A_{1}, A_{2}$ are positive definite, $\operatorname{\bar{\lambda}}=+\infty$,
$$\lim_{\mu\uparrow \infty}\ensuremath{\varphi^{\alpha}_{\mu}}=\ensuremath{\,\mathfrak{q}}_{(\alpha A_{1}^{-1}+(1-\alpha)A_{2}^{-1})^{-1}}.$$
\end{enumerate}
\end{ex}
\begin{proof}
\ref{e:proxq1}: By Lemma~\ref{l:quad},
\begin{align*}
& -\alpha e_{\mu}\ensuremath{\,\mathfrak{q}}_{A_{1}}-(1-\alpha)e_{\mu}\ensuremath{\,\mathfrak{q}}_{A_{2}}\\
&=-\alpha \ensuremath{\,\mathfrak{q}}_{\mu^{-1}[\operatorname{Id}-(\mu A_{1}+\operatorname{Id})^{-1}]}-(1-\alpha)\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[\operatorname{Id}-(\mu A_{2}+\operatorname{Id})^{-1}]}\\
&=\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})-\operatorname{Id}]}.
\end{align*}
Thus, applying Lemma~\ref{l:quad} again,
\begin{align*}
\ensuremath{\varphi^{\alpha}_{\mu}} &=-e_{\mu}(\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}-\operatorname{Id}]})
\\
&=-\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[\operatorname{Id}-(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})^{-1}]}\\
&=\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})^{-1}-\operatorname{Id}]}.
\end{align*}
Again, using Lemma~\ref{l:quad},
\begin{align*}
e_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}} &= e_{\mu}\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})^{-1}-\operatorname{Id}]}\\
&=\ensuremath{\,\mathfrak{q}}_{\mu^{-1}[\operatorname{Id}-(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})]},
\end{align*}
so
$$\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}.$$
\noindent\ref{e:proxq2}: Note that the matrix function $A\mapsto A^{-1}$ is continuous whenever
$A$ is invertible. Then \ref{e:proxq2} is immediate because
$$\lim_{\alpha\downarrow 0}(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})^{-1}
=((\mu A_{2}+\operatorname{Id})^{-1})^{-1}=\mu A_{2}+\operatorname{Id}, \text{ and }
$$
$$\lim_{\alpha\uparrow 1}(\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1})^{-1}
=((\mu A_{1}+\operatorname{Id})^{-1})^{-1}=\mu A_{1}+\operatorname{Id}.
$$
\noindent\ref{e:proxq3}: It suffices to show
$$\lim_{\mu\downarrow 0}\frac{[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]^{-1}-\operatorname{Id}}{\mu}=\alpha A_{1}+(1-\alpha)A_{2},$$
equivalently,
\begin{equation}\label{e:matrix}
\small\lim_{\mu\downarrow 0}\frac{[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]^{-1}-[\alpha(\mu A_{1}+\operatorname{Id})+(1-\alpha)(\mu A_{2}+\operatorname{Id})]}{\mu}=0.
\end{equation}
Since $\lim_{\mu\downarrow 0}[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]
=\operatorname{Id}$, \eqref{e:matrix} follows from the following calculation:
\begin{align*}
&[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]\cdot
\nonumber\\
&\frac{[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]^{-1}-[\alpha(\mu A_{1}+\operatorname{Id})+(1-\alpha)(\mu A_{2}+\operatorname{Id})]}{\mu}\\
&= \frac{\operatorname{Id}-[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]
[\alpha(\mu A_{1}+\operatorname{Id})+(1-\alpha)(\mu A_{2}+\operatorname{Id})]}{\mu}\\
&=-\alpha(1-\alpha)[(\mu A_{1}+\operatorname{Id})^{-1}(A_{2}-A_{1})
+(\mu A_{2}+\operatorname{Id})^{-1}(A_{1}-A_{2})]\\
& \operatorname{ri}ghtarrow -\alpha(1-\alpha)[(A_{2}-A_{1})
+(A_{1}-A_{2})]= 0.
\end{align*}
\noindent\ref{e:proxq3.5}: The matrices $A_{1}+\operatorname{\bar{\lambda}}^{-1}\operatorname{Id}$ and $A_{2}+\operatorname{\bar{\lambda}}^{-1}\operatorname{Id}$
are positive semidefinite, so the convex hulls are superfluous.
\noindent\ref{e:proxq4}: As $\mu\operatorname{ri}ghtarrow\infty$, we have
\begin{align*}
&\frac{[\alpha(\mu A_{1}+\operatorname{Id})^{-1}+(1-\alpha)(\mu A_{2}+\operatorname{Id})^{-1}]^{-1}-\operatorname{Id}}{\mu}\\
&=\left[\alpha\left(A_{1}+\frac{\operatorname{Id}}{\mu}\operatorname{ri}ght)^{-1}
+(1-\alpha)\left(A_{2}+\frac{\operatorname{Id}}{\mu}\operatorname{ri}ght)^{-1}\operatorname{ri}ght]-\frac{\operatorname{Id}}{\mu}\\
&\operatorname{ri}ghtarrow (\alpha A_{1}^{-1}+(1-\alpha)A_{2}^{-1})^{-1}.
\end{align*}
\end{proof}
\begin{rem} When both $A_{1}, A_{2}$ are positive semidefinite matrices,
we refer the reader to \ensuremath{\varnothing}h{\cite{respos}}.
\end{rem}
\section{The general question is still unanswered}\label{s:theg}
According to Theorem~\ref{prop:convcomb}, suppose that $0<\mu<\operatorname{\bar{\lambda}}$,
$0<\alpha<1$
and $\operatorname{Prox}_{\mu}f$ and $\operatorname{Prox}_{\mu}g$ are convex-valued. Then
there exists a proper, lsc function
$\ensuremath{\varphi^{\alpha}_{\mu}}$ such that
$\operatorname{Prox}_{\mu}\ensuremath{\varphi^{\alpha}_{\mu}}=\alpha\operatorname{Prox}_{\mu}f+(1-\alpha)\operatorname{Prox}_{\mu}g$.
When the proximal mapping is not convex-valued, the situation is subtle.
We illustrate this by revisiting Example~\ref{e:proximal:fk}.
Recall that for $\varepsilon_{k}>0$, the function
$$f_{k}(x)=\max\{0,(1+\varepsilon_{k})(1-x^2)\}$$
has
$$\operatorname{Prox}_{1/2}f_{k}(x)=\begin{cases}
x &\text{ if $x\geq 1$,}\\
1 &\text{ if $0<x<1$,}\\
\{-1,1\} &\text{ if $x=0$,}\\
-1 &\text{ if $-1<x<0$,}\\
x &\text{ if $x\leq -1$.}
\end{cases}
$$
With $\alpha=1/2$, we have
\begin{equation}\label{e:prox:half}
(\alpha \operatorname{Prox}_{1/2}f_{1}+(1-\alpha)\operatorname{Prox}_{1/2}f_{2})(x)
=\begin{cases}
x &\text{ if $x\geq 1$,}\\
1 &\text{ if $0<x<1$,}\\
\{-1,0,1\} &\text{ if $x=0$,}\\
-1 &\text{ if $-1<x<0$,}\\
x &\text{ if $x\leq -1$.}
\end{cases}
\end{equation}
Because $\operatorname{Prox}_{1/2}f_{i}(0)$ is not convex-valued, $(\alpha \operatorname{Prox}_{1/2}f_{1}+(1-\alpha)\operatorname{Prox}_{1/2}f_{2})(0)$ is neither
$\operatorname{Prox}_{1/2}f_{1}(0)$ nor $\operatorname{Prox}_{1/2}f_{2}(0)$, although
$\operatorname{Prox}_{1/2}f_{1}(0)=\operatorname{Prox}_{1/2}f_{2}(0)$.
One can verify that \eqref{e:prox:half} is indeed
$\operatorname{Prox}_{1/2}g(x)$ where
$$g(x)=\begin{cases}
0 &\text{ if $x>1$,}\\
-x(x-1)-x^2+1 &\text{ if $0<x\leq 1$,}\\
-x(x+1)-x^2+1 &\text{ if $-1<x\leq 0$,}\\
0 &\text{ if $x\leq -1$.}
\end{cases}
$$
Regretfully, we do not have a systematic way to find $g$
when $\operatorname{Prox}_{\mu}g$ is not convex-valued.
The challenging question is still open:
\ensuremath{\varnothing}h{Is a convex combination of proximal mappings of possibly nonconvex functions
always a proximal mapping?}
\section*{Acknowledgment}
Xianfu Wang was partially supported by the Natural Sciences and
Engineering Research Council of Canada.
{}
\end{document} |
\begin{document}
\begin{abstract}
This work studies the Cauchy problem for the energy-critical inhomogeneous Hartree equation with inverse square potential
$$i\partial_t u-\mathcal K_\lambda u=\pm |x|^{-\tau}|u|^{p-2}(I_\alpha *|\cdot|^{-\tau}|u|^p)u, \quad \mathcal K_\lambda=-\Delta+\frac\lambda{|x|^2}$$
in the energy space $H_\lambda^1:=\{f\in L^2,\quad\sqrt{\mathcal{K}_\lambda}f\in L^2\}$. In this paper, we develop a well-posedness theory and investigate the blow-up of solutions in $H_\lambda^1$. Furthermore we present a dichotomy between energy bounded and non-global existence of solutions under the ground state threshold.
To this end, we use Caffarelli-Kohn-Nirenberg weighted interpolation inequalities and some equivalent norms considering $\mathcal K_\lambda$, which make it possible to control the non-linearity involving the singularity $|x|^{-\tau}$ as well as the inverse square potential. The novelty here is the investigation of the energy critical regime which remains still open and the challenge is to deal with three technical problems: a non-local source term, an inhomogeneous singular term $|\cdot|^{-\tau}$, and the presence of an inverse square potential.
\end{abstract}
\maketitle
\tableofcontents
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\section{Introduction}
In this paper we are concerned with the Cauchy problem for the inhomogeneous generalized Hartree equation with inverse square potential
\begin{equation}
\begin{cases}\label{S}
i\partial_t u-\mathcal K_\lambda u=\epsilon |x|^{-\tau}|u|^{p-2}(I_\alpha *|\cdot|^{-\tau}|u|^p)u,\\
u(x,0)=u_0(x), \quad (x,t)\in \mathbb{R}^n \times \mathbb{R},
\end{cases}
\end{equation}
where $p>2$, $\epsilon=\pm1$, and $\mathcal K_\lambda:=-\Delta+\frac\lambda{|x|^{2}}$ satisfying $\lambda>-\frac{(n-2)^2}{4}$.
Here the case $\epsilon =1$ is \textit{defocusing}, while the case $\epsilon =-1$ is \textit{focusing}.
The Riesz potential is defined on $\mathbb{R}^n$ by
$$I_\alpha:=\frac{\Gamma(\frac{n-\alpha}2)}{\Gamma(\frac\alpha2)\pi^\frac{n}22^\alpha}\,|\cdot|^{\alpha-n},\quad 0<\alpha<n.$$
The assumption on $\lambda$ comes from the sharp Hardy inequality \cite{abde},
\begin{equation}\label{prt}
\frac{(n-2)^2}4\int_{\mathbb{R}^n}\frac{|f(x)|^2}{|x|^2}\,dx\leq \int_{\mathbb{R}^n}|\nabla f(x)|^2 dx,
\end{equation}
which guarantees that $\mathcal K_\lambda$ is thepositive self-adjoint extension of $-\Delta+\lambda/|x|^{-2}$.
It is known that in the range $-\frac{(n-2)^2}4 <\lambda< 1-\frac{(n-2)^2}4$, the extension is not unique (see \cite{ksww,ect}). In such a case, one picks the Friedrichs extension (see \cite{ksww,pst}).
The problem \end{equation}ref{S} arises in various physical contexts. In the linear regime ($\epsilon=0$), the considered Schr\"odinger equation models quantum mechanics \cite{ksww,haa}. In the non-linear regime without potentials, namely $\lambda=0\neq\epsilon$, the equation \end{equation}ref{S} is of interest in the mean-field limit of large systems of non-relativistic bosonic atoms and molecules in a regime where the number of bosons is very large, but the interactions
between them are weak \cite{fl,hs,pg,mpt}. The homogeneous problem associated to the considered problem \end{equation}ref{S}, specifically, when $\lambda=\tau=0$, has several physical origins such as quantum mechanics \cite{pg,pgl} and Hartree-Fock theory \cite{ll}. The particular case $p=2$ and $\lambda=\tau=0$ is called standard Hartree equation. It is a classical limit of a field equation describing a quantum mechanical non-relativistic many-boson system interacting through a two body potential \cite{gvl}.
Now, let us return to the mathematical aspects of the generalized Hartree equation \end{equation}ref{S}.
Recall the critical Sobolev index.
If $u(x,t)$ is a solution of \end{equation}ref{S}, so is the family
$$u_\delta(x,t):=\delta^{\frac{2-2\tau+\alpha}{2(p-1)}} u(\delta x, \delta^2 t),$$ with the re-scaled initial data $u_{\delta,0}:=u_{\delta}(x,0)$ for all $\delta>0$.
Then, it follows that
\begin{equation*}
\|u_{\delta,0}\|_{\dot H^1}=\delta^{1-\frac n2 +\frac{2-2\tau+\alpha}{2(p-1)}}\|u_0\|_{\dot H^1}.
\end{equation*}
If $p=1+\frac{2-2\tau+\alpha}{n-2}$, the scaling preserves the $\dot H^1$ norm of $u_0$, and in this case, \end{equation}ref{S} is referred as the energy-critical inhomogeneous generalized Hartree equation.
Moreover, the solution to \end{equation}ref{S} satisfies the mass and energy conservation,
where the mass conservation is
\begin{equation}
\mathcal M[u(t)]:=\int_{\mathbb{R}^n} |u(x,t)|^2 dx = \mathcal M [u_0],
\end{equation}
and the energy conservation is
\begin{equation}
\mathcal E[u(t)]:= \int_{\mathbb{R}^n}\Big(|\nabla u|^2 +\lambda |x|^{-2} |u|^2\Big)\,dx+ \frac{\epsilon}{p}\mathcal P[u(t)]=\mathcal E[u_0],
\end{equation}
where the potential energy reads
$$\mathcal P[u(t)]:=\int_{\mathbb{R}^n} |x|^{-\tau}\big(I_\alpha *|\cdot|^{-\tau}|u|^p\big)|u|^p dx.$$
To the best of our knowledge, this paper is the first one dealing with the energy-critical inhomogeneous Hatree equation with inverse square potential, precisely \end{equation}ref{S} with $\lambda\neq0$.
The main contribution is to develop a local well-posedness theory in the energy-critical case, as well as to investigate the blow-up of the solution in energy space for the inhomogeneous generalized Hartree equation \end{equation}ref{S}.
Precisely, the local theory is based on the standard contraction mapping argument via the availability of Strichartz estimates. More interestingly, we take advantage of some equivalent norms considering the operator $\mathcal{K}_\lambda$, namely $\|\sqrt{\mathcal{K}_\lambda}u\|_r\simeq\|u\|_{\dot W^{1,r}}$, which makes it possible to apply the contraction mapping principle without directly handling with the operator.
In the repulsive regime($\epsilon=-1$), we prove that the solution blows up in finite time without assuming the classical assumption such as radially symmetric or $|x|u_0 \in L^2$.
The blow-up phenomenon is expressed in terms of the non-conserved potential energy, which may give a criteria in the spirit of \cite{vdd}, which implies in particular the classical phenomena under the ground state threshold in the spirit of \cite{km}.
In this paper, we deal with three technical problems by the equation \end{equation}ref{S}, a non-local source term, the inhomogeneous singular term $|\cdot|^{-\tau}$, and the presence of an inverse square potential.
Indeed, in order to deal with the singular term $|\cdot|^{-\tau}$ in Lebesgue spaces, the method used in the literature decomposes the integrals on the unit ball and it's complementary (see, for example, \cite{mt}).
However, this is no more sufficient to conclude in the energy critical case. For $\lambda=0$, the first author used some Lorentz spaces with the useful property $|\cdot|^{-\tau}\in L^{\frac{n}{\tau},\infty}$.
To overcome these difficulties, we make use of some Caffarelli-Kohn-Nirenberg weighted interpolation inequalities which is different from the existing approaches.
Before stating our results, we introduce some Sobolev spaces defined in terms of the operator $\mathcal K_\lambda$ as the completion of $C^\infty_0(\mathbb{R}^n)$ with respect to the norms
\begin{align*}
\|u\|_{\dot W^{1,r}_\lambda}&:=\|\sqrt{\mathcal K_\lambda} u\|_{L^r} \quad \textnormal{and} \quad \|u\|_{W^{1,r}_\lambda}:=\|\langle \sqrt{\mathcal K_\lambda}\rangle u\|_{L^r},
\end{align*}
where $\langle \cdot\rangle:=(1+|\cdot|^2)^{1/2}$ and $L^r:=L^r(\mathbb{R}^n)$. We denote also the particular Hilbert cases $\dot W^{1,2}_\lambda=\dot H^1_\lambda$ and $W^{1,2}_\lambda=H^1_\lambda$.
We note that by the definition of the operator $\mathcal K_\lambda$ and Hardy estimate \end{equation}ref{prt}, one has
\begin{align*}
\|u\|_{\dot H^1_\lambda}&:=\|\sqrt{\mathcal K_\lambda}u\|=\big(\|\nabla u\|^2+\lambda\||x|^{-1}u\|^2\big)^\frac12\simeq \|u\|_{\dot H^1},
\end{align*}
where we write for simplicity $\|\cdot\|:=\|\cdot\|_{L^2(\mathbb{R}^n)}$.
\subsection{Well-posedness in the energy-critical case}
The theory of well-poseddness for the inhomogeneous Hartree equation ($\lambda=0$ in \end{equation}ref{S}) has been extensively studied in recent several years and is partially understood. (See, for examples, \cite{mt,sa, kls, sk} and references therein). For related results on the scattering theory, see also \cite{sx} for spherically symmetric datum and \cite{cx} in the general case.
Our first result is the following well-posedness in the energy-critical case.
\begin{thm}\label{loc}
Let $n\ge3$, $\lambda >-\frac{(n-2)^2}{4}$ and $2\kappa=n-2-\sqrt{(n-2)^2+4\lambda}$.
Assume that
\begin{equation}\label{1.6}
0<\alpha<n, \quad 2\kappa < n-2-\frac{2(n-2)}{3n-2+2\sqrt{9n^2+8n-16}}
\end{equation}
and
\begin{equation}\label{1.7}
\frac{\alpha}{2}-\frac{n+2+\sqrt{9n^2+8n-16}}{2}<\tau < \frac{\alpha}{2}-\max\{\frac{n-4}{2}, \frac{n-4}{n},\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\}.
\end{equation}
Then, for $u_0 \in H_{\lambda}^1(\mathbb{R}^n)$, there exist $T>0$ and a unique solution
$$u\in C([0,T]; H_\lambda^1) \cap L^q([0,T];W_{\lambda}^{1,r})$$
to \end{equation}ref{S} with $p=1+\frac{2-2\tau+\alpha}{n-2}$
for any admissible pair $(q,r)$ in Definition \ref{dms}.
Furthermore, the continuous dependence on the initial data holds.
\end{thm}
We also provide the small data global well-posedness and scattering results as follows:
\begin{thm}\label{glb}
Under the same conditions as in Theorem \ref{loc} and the smallness assumption on $\|u_0\|_{H_{\lambda}^1}$, there exists a unique global solution
$$u\in C([0,\infty); H_\lambda^1) \cap L^q([0,\infty);W_{\lambda}^{1,r})$$
to \end{equation}ref{S} with $p=1+\frac{2-2\tau+\alpha}{n-2}$ for any admissible pair $(q,r)$.
Furthermore, the solution scatters in $H_\lambda^1$, i.e., there exists $\phi\in H_\lambda^1$ such that
$$\lim_{t\to\infty} \|u(t)-e^{-it\mathcal{K}_{\lambda}}\phi\|_{H_{\lambda}^1}=0.$$
\end{thm}
\subsection{Blow-up of energy solutions}
We now turn to our attention to blow-up of the solution to \end{equation}ref{S} under the ground state threshold, in the focusing regime. A particular global solution of \end{equation}ref{S} with $\epsilon=-1$ is the stationary solution to \end{equation}ref{S}, namely
\begin{equation}\label{E}
-\Delta \varphi+\frac{\lambda}{|x|^2}\varphi=|x|^{-\tau}|\varphi|^{p-2}(I_\alpha *|\cdot|^{-\tau}|\varphi|^p)\varphi,\quad 0\neq \varphi\in {H^1_\lambda}.
\end{equation}
Such a solution called ground state plays an essential role in the focusing regime.
The following result is the existence of ground states to \end{equation}ref{E}.
\begin{thm}\label{gag}
{Let $n\geq3$, $\lambda>-\frac{(n-2)^2}4$ and $p=1+\frac{2-2\tau+\alpha}{n-2}$. Assume that
\begin{equation}\label{as}
0<\alpha<n \quad \text{and} \quad 0<\tau< 1+\frac{\alpha}{n}.
\end{equation}\label{inte}
Then, the following inequality holds:
\begin{equation}\label{gagg}
\int_{\mathbb{R}^n} |x|^{-\tau}|u|^p (I_\alpha \ast |\cdot|^{-\tau}|u|^p) \leq C_{N,\tau,\alpha,\lambda} \big\|\sqrt{\mathcal{K}_{\lambda} }u\big\|^{2p}.
\end{equation}}
Moreover, there exists $\varphi\in H_{\lambda}^1$ a ground state solution to \end{equation}ref{E}, which is a minimizing of the problem
\begin{equation}\label{min}
\frac1{C_{N,\tau,\alpha,\lambda}}=\inf\Big\{\frac{\|\sqrt{\mathcal K_\lambda} u\|^{2p}}{\mathcal P[u]},\quad0\neq u\in H^1_\lambda\Big\}.
\end{equation}
\end{thm}
\begin{rems}
\begin{enumerate}
\item[1.] Theorem \ref{gag} does not require to assume that $p\geq2$;
\item[2.] $C_{N,\tau,\alpha,\lambda}$ denotes the best constant in the inequality \end{equation}ref{gagg};
\item[3.] compared with the homogeneous regime $\tau=0$, the minimizing \end{equation}ref{min} is never reached for $\lambda>0$, see \cite{kmvzz}.
\end{enumerate}
\end{rems}
Here and hereafter, we denote $\varphi$ a ground state solution of \end{equation}ref{E} and the scale invariant quantities
\begin{align*}
\mathcal{ME}[u_0]:=\frac{\mathcal E[u_0]}{\mathcal E[\varphi]},\quad
\mathcal{MG}[u_0]:=\frac{\|\sqrt{\mathcal K_\lambda} u_0\|}{\|\sqrt{\mathcal K_\lambda} \varphi\|},\quad
\mathcal{MP}[u_0]:=\frac{\mathcal P[u_0]}{\mathcal P[\varphi]}.
\end{align*}
The next theorem gives a blow-up phenomenon in the energy-critical focusing regime under the ground state threshold.
\begin{thm}\label{t1}
Under the assumptions in \ref{loc} and $\epsilon=-1$, let $\varphi$ be a ground state solution to \end{equation}ref{E} and $u\in C_{T^*}(H^1_\lambda)$ be a maximal solution of the focusing problem \end{equation}ref{S}.
If
\begin{equation} \label{ss'}
\sup_{t\in[0,T^*)}\mathcal I[u(t)]<0,
\end{equation}
then $u$ blows-up in finite or infinite time.
Here, $\mathcal I[u]:=\|\sqrt{\mathcal K_\lambda} u\|^2-\mathcal{P}[u]$.
\end{thm}
\begin{rems}
\begin{enumerate}
\item[1.]
$u$ blows-up in infinite time means that it is global and there is $t_n\to\infty$ such that $\|\sqrt{\mathcal K_\lambda} u(t_n)\|\to\infty$;
\item[2.]
the threshold is expressed in terms of the potential energy $\mathcal P[u]$, which is a non conserved quantity;
\item[3.]
the theorem here doesn't require the classical assumptions such as spherically symmetric data or $|x|u_0\in L^2$;
\item[4.]
a direct consequence of the variance identity is that the energy solution to \end{equation}ref{S} blows-up in finite time if $|x|u_0\in L^2$ and \end{equation}ref{ss'} is satisfied;
\end{enumerate}
\end{rems}
The next result is a consequence of Theorem \ref{t1}.
\begin{cor}\label{t2}
Under the assumptions in Theorem \ref{loc} and $\epsilon=-1$, Let $\varphi$ be a ground state of \end{equation}ref{E} and $u_0\in H^1_\lambda$ such that
\begin{equation} \label{t11}
\mathcal{ME}[u_0]<1.
\end{equation}
If we assume that
\begin{equation}\label{t13}
\mathcal{MG}[u_0]>1,\end{equation}
then the energy solution of \end{equation}ref{S} blows-up in finite or infinite time
\end{cor}
\begin{rems}
\begin{enumerate}
\item[1.]
The assumptions of the above result are more simple to check than \end{equation}ref{ss'}, because they are expressed in terms of conserved quantities;
\item[2.]
the above ground state threshold has a deep influence in the NLS context since the pioneering papers \cite{km,Holmer};
\end{enumerate}
\end{rems}
Finally, we close this subsection with some additional results which gives the boundedness of the energy solution.
\begin{prop}\label{s}
Under the assumptions in Theorem \ref{loc} and $\epsilon=-1$, let $\varphi$ be a ground state solution to \end{equation}ref{E} and $u\in C_{T^*}(H^1_\lambda)$ be a maximal solution of the focusing problem \end{equation}ref{S}.
If \begin{equation} \label{ss}
\sup_{t\in[0,T^*)}\mathcal{MP}[u(t)]<1,
\end{equation}
then
$u$ is bounded in $H^1_\lambda$.
\end{prop}
The next is a consequence of Proposition \ref{s}
\begin{cor}\label{s2}
Under the assumptions in Theorem \ref{loc} and $\epsilon=-1$. Let $\varphi$ be a ground state of \end{equation}ref{E} and $u_0\in H^1_\lambda$ satisfying \end{equation}ref{t11}
If
\begin{equation}\label{t12}
\mathcal{MG}[u_0]<1, \end{equation}
then the energy solution of \end{equation}ref{S} is bounded.
\end{cor}
\begin{rem}
\begin{enumerate}
\item[1.]
the global existence and energy scattering under the assumptions \end{equation}ref{ss} in Proposition \ref{s} or \end{equation}ref{t11}-\end{equation}ref{t12} in Corollary \ref{s2} is investigated in a paper in progress.
\end{enumerate}
\end{rem}
The rest of this paper is organized as follows. In Section 2 we introduce some useful properties that we need. Section 3 develops a local theory and a global one for small datum. In section 4, the existence of ground states is established. Section 5 establishes blow-up of solutions under the ground state threshold and the boundedness of energy solutions. In the appendix, a Morawetz type estimate is proved.
Throughout this paper, the letter $C$ stands for a positive constant which may be different at each occurrence. We also denote $A \lesssim B$ to mean $A \leq CB$ with unspecified constants $C>0$.
\section{Preliminaries}
In this section, we introduce some useful properties which will be utilized throughout this paper.
We also recall the Strichartz estimates.
Let us start with the Hardy-Littlewood-Sobolev inequality \cite{el} which is suitable for dealing with non-local source term in \end{equation}ref{S}:
\begin{lem}\label{hls}
Let $n\geq1$ and $0 <\alpha < n$.
\begin{enumerate}
\item[1.]
Let $s\geq1$ and $r>1$ such that $\frac1r=\frac1s+\frac\alpha n$. Then,
$$\|I_\alpha*g\|_{L^s}\leq C_{n,s,\alpha}\|g\|_{L^r}.$$
\item[2.]
Let $1<s,r,t<\infty$ be such that $\frac1r +\frac1s=\frac1t +\frac\alpha n$. Then,
$$\|f(I_\alpha*g)\|_{L^t}\leq C_{n,t,\alpha}\|f\|_{L^r}\|g\|_{L^s}.$$
\end{enumerate}
\end{lem}
The following lemma is a weighted version of the Sobolev embedding, that is, a special case of Caffarelli-Kohn-Nirenberg weighted interpolation inequalities \cite{sgw,csl}:
{\begin{lem}\label{ckn}
Let $n\geq1$ and
$$1< p\leq q<\infty, \quad -\frac nq<b\leq a<\frac n{p'} \quad \text{and} \quad a-b-1=n\Big(\frac1q-\frac1p\Big).$$
Then,
$$\||x|^{b}f\|_{L^q}\leq C\||x|^a\nabla f\|_{L^p}.$$
\end{lem}}
Now, we describe several properties related to the operator $\mathcal K_\lambda.$
Since $\|f\|_{H^1} \simeq \|f\|_{H_\lambda^1}$, one has the following compact Sobolev injection (\cite[Lemma 3.1]{cg}):
\begin{lem}\label{compact}
Let $n\geq3$, $0<\tau<2$ and $2<r<\frac{2(n-\tau)}{n-2}$. Then, the following injection is compact:
$$H^1_\lambda\hookrightarrow\hookrightarrow L^{r}(|x|^{-\tau}\,dx) .$$
\end{lem}
We also have the following equivalent norms to Sobolev ones, see \cite{kmvzz} and \cite[Remark 2.2]{cg}:
\begin{lem}\label{2.2}
Let $n\geq3$, $\lambda>-\frac{(n-2)^2}4$, $1<r<\infty$ and $2\kappa=n-2-\sqrt{(n-2)^2+4\lambda}$. Then,
\begin{enumerate}
\item[1.]
if $\frac{1+\kappa}n<\frac1r<\min\{1,1-\frac\kappa n\}$, thus, $\|f\|_{\dot W^{1,r}}\lesssim \|f\|_{\dot W_\lambda^{1,r}}$
\item[2.]
if $\max\{\frac{1}n,\frac{\kappa}n\}<\frac1r<\min\{1,1-\frac\kappa N\}$, thus, $\|f\|_{\dot W_\lambda^{1,r}}\lesssim\|f\|_{\dot W^{1,r}}$
\end{enumerate}
\end{lem}
Finally, we recall the Strichartz estimates. As we shall see, the availability of these estimates is the key role in the proof of Theorem \ref{loc}.
\begin{defi}\label{dms}
Let $n\ge3$. We say that $(q,r)$ is an admissible pair if it satisfies
\begin{equation*}
2\le q \le \infty, \quad 2\le r \le \frac{2n}{n-2} \quad \text{and} \quad \frac2q+\frac{n}{r}=\frac{n}{2}.
\end{equation*}
\end{defi}
\begin{prop}\cite{bpst,zz,df}\label{str}
Let $n\geq3$, $\lambda>-\frac{(n-2)^2}4$. Then, there exists $C>0$ such that
\begin{enumerate}
\item[1.]
$\|e^{-it\mathcal K_\lambda}f\|_{L_t^q(L_x^r)}\leq C\|f\|,$
\item[2.]
$\|\int_0^{t_1}e^{-i(t-t_1)\mathcal K_\lambda}F(\cdot,t_1)dt_1\|_{L_t^q(L_x^r)}\leq C\|F\|_{L_t^{\tilde q'}L_x^{\tilde r'}}.$
\end{enumerate}
\end{prop}
Finally, one gives a classical Morawetz estimate proved in the appendix. Let $\phi:\mathbb{R}^n\to\mathbb{R}$ be a smooth function and define the variance potential
$$V_\phi(t):=\int_{\mathbb{R}^n}\phi(x)|u(x,t)|^2\,dx,$$
and the Morawetz action
$$M_\phi(t):=2\Im\int_{\mathbb{R}^n} \bar u(\xi_ju_j)\,dx=2\Im\int_{\mathbb{R}^n} \bar u(\nabla\phi\cdot\nabla u)\,dx.$$
\begin{prop}\label{mrwz}
Let $\phi:\mathbb{R}^N \rightarrow \mathbb{R}$ be a radial, real-valued multiplier, $\phi=\phi(|x|)$.
Then, for any solution $u\in C([0,T];H_\lambda^1)$ of the generalized Hartree equation \end{equation}ref{S} in the focusing sign with initial data $u_0\in H_{\lambda}^{1}$, the following virial-type identities hold:
\begin{equation*}
V'_\phi(t)=
2\Im\int_{\mathbb{R}^n} \bar u\nabla\phi\cdot\nabla u dx
\end{equation*}
and
\begin{align*}
V''_\phi(t)=M_\phi'(t)&=4\sum_{k,l=1}^{N}\int_{\mathbb{R}^n}\partial_l\partial_k\phi\mathbb{R}e(\partial_ku\partial_l\bar u)dx-\int_{\mathbb{R}^n}\Delta^2\phi|u|^2dx+4\lambda\int_{\mathbb{R}^n}\nabla\phi\cdot x\frac{|u|^2}{|x|^4}dx\\
&\qquad-\frac{2(p-2)}{p}\int_{\mathbb{R}^n}\Delta\phi|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
&\qquad\qquad -\frac{4\tau}p\int_{\mathbb{R}^n}x\cdot\nabla\phi|x|^{-\tau-2}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
&\qquad\qquad -\frac{4(N-\alpha)}p\sum_{k=1}^N\int_{\mathbb{R}^n}|x|^{-\tau}|u|^{p}\partial_k\phi(\frac{x_k}{|\cdot|^2}I_\alpha*|\cdot|^{-\tau}|u|^p)dx.
\end{align*}
\end{prop}
{\section{Well-posedness in the energy space}}
In this section, we develop a local theory in the energy space $H_{\lambda}^1$, Theorem \ref{loc}. Moreover, we prove Theorem \ref{glb} about the global theory for small datum. Let us first denote the source term
\begin{align*}
\mathcal N&:=\mathcal N[u]:=|x|^{-\tau}|u|^{p-2}(I_\alpha *|\cdot|^{-\tau}|u|^p)u.
\end{align*}
\subsection{Nonlinear estimates} We first establish some nonlinear estimates for $\mathcal{N}[u]$. These nonlinear estimates will play an important role in proving the well-posedness results applying the contraction mapping principle. Before stating the nonlinear estimates, we introduce some notations.
We set
$$\mathcal A=\{(q,r):(q,r) \,\,\text{is}\,\, \text{admissible}\},$$
and then define the norm
$$\|u\|_{\Lambda(I)}=\sup_{(q,r)\in \mathcal A}\|u\|_{L_t^q(I;L_x^r)}$$
and its dual weighted norm
$$\|u\|_{\Lambda'(I)}=\sup_{(\tilde q,\tilde r)\in \mathcal A}\|u\|_{L_t^{\tilde q'}(I;L_x^{\tilde r'})}$$
for any interval $I\subset \mathbb{R}.$
\begin{lem}\label{non}
Let $n\ge3$, $\lambda >-\frac{(n-2)^2}{4}$ and $p=1+\frac{2-2\tau+\alpha}{n-2}$.
Assume that
\begin{equation}\label{ass1}
0<\alpha<n, \quad 2\kappa < \frac{5n-4-\sqrt{9n^2+8n-16}}{2}
\end{equation}
and
\begin{equation}\label{ass2}
\frac{\alpha}{2}-\frac{n+2+\sqrt{9n^2+8n-16}}{2}<\tau < \frac{\alpha}{2}-\max\{\frac{n-4}{2}, \frac{n-4}{n},\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\}.
\end{equation}
Then, {there is an admissible pair $(q,r)$, such that}
\begin{equation}\label{non1}
\|\sqrt{\mathcal{K}_{\lambda}}\,\mathcal{N}[u]\|_{\Lambda'(0,T)} \leq C \|u\|_{L_t^{q}(I;\dot W_{\lambda}^{1,r})}^{2p-1}
\end{equation}
and
\begin{equation}\label{non2}
\|\mathcal{N}[u]-\mathcal{N}[v]\|_{\Lambda'(0,T)} \leq C (\|u\|_{L_t^q(I;\dot W_{\lambda}^{1,r})}^{2p-2} +\|u\|_{L_t^q(I;\dot W_{\lambda}^{1,r})}^{2p-2})\|u-v\|_{L_t^q(I;L_x^r)}.
\end{equation}
\end{lem}
\begin{proof}
It is sufficient to show that there exist $(q,r)$ for which
\begin{equation}\label{non11}
\|\nabla\mathcal{N}[u]\|_{L_t^2(I;L_x^{\frac{2n}{n+2}})} \leq C \|\nabla u\|_{L_t^{q}(I;L_x^r)}^{2p-1}
\end{equation}
and
\begin{equation}\label{non22}
\|\mathcal{N}[u]-\mathcal{N}[v]\|_{L_t^2(I;L_x^{\frac{2n}{n+2}})} \leq C \|\nabla u\|_{L_t^q(I;L_x^r)}^{2p-2} \|u-v\|_{L_t^q(I;L_x^r)}
\end{equation}
hold for $\lambda, \alpha, \kappa, \tau, p$ given as in the lemma.
Indeed, by applying the equivalent norms to Sobolev ones (see Lemma \ref{2.2}) we obtain the desired estimates \end{equation}ref{non1} and \end{equation}ref{non2} if
\begin{equation}\label{ka}
\max\{\frac1n,\frac{\kappa}n\}<\frac{n+2}{2n}<\min\{1,\frac{n-\kappa}n\}, \quad \frac{1+\kappa}{n}<\frac1{r}<\min\{1,\frac{n-\kappa}{n}\},
\end{equation}
Here, one can easily see that the first condition in \end{equation}ref{ka} is always satisfied.
Now we start to prove \end{equation}ref{non11}.
Let us set
\begin{equation}\label{st}
\frac1q=\frac{1}{2(2p-1)}, \quad \frac{n-2}{2n}\leq \frac1r \leq \frac12, \quad \frac{2}q+\frac{n}{r}=\frac{n}{2}, \quad 0<\alpha<n.
\end{equation}
We first see that
\begin{align*}
\|&\nabla\mathcal{N}[u]\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim\big\||x|^{-\tau-1}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\big\|_{L_x^{\frac{2n}{n+2}}}+\big\||x|^{-\tau}|u|^{p-1}(I_\alpha \ast|\cdot|^{-\tau-1}|u|^{p})\big\|_{L_x^{\frac{2n}{n+2}}}\\
&+\||x|^{-\tau}|u|^{p-2}|\nabla u|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\|_{L_x^{\frac{2n}{n+2}}}+\||x|^{-\tau}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}|u|^{p-1}\nabla u)\|_{L_x^{\frac{2n}{n+2}}}\\
&:=A_1+A_2+A_3+A_4.
\end{align*}
The first term $A_1$ is bounded by using Lemmas \ref{hls} and \ref{ckn},
in turn,
\begin{align*}
\big\||x|^{-\tau-1}|u|^{p-1}|I_\alpha\ast|\cdot|^{-\tau}|u|^p|\big\|_{L_x^{\frac{2n}{n+2}}}
&\lesssim \||x|^{-\tau-1}|u|^{p-1}\|_{L_x^{a_1}} \||x|^{-\tau}|u|^p\|_{L_x^{b_1}} \\
&=\||x|^{-\frac{\tau+1}{p-1}}u\|^{p-1}_{L_x^{(p-1)a_1}}\||x|^{-\frac{\tau}{p}}u\|^{p}_{L_x^{pb_1}}\\
&\lesssim \|\nabla u\|_{L_x^r}^{2p-1},
\end{align*}
if
\begin{equation}\label{c7}
0<\frac1{a_1},\frac1{b_1}<1, \quad \frac1{a_1}+\frac1{b_1}=\frac{n+2}{2n}+\frac{\alpha}{n},
\end{equation}
\begin{equation}\label{c8}
0<\frac{1}{(p-1)a_1}\leq \frac1r\leq1,\quad 0\leq\frac{\tau+1}{p-1}<\frac{n}{(p-1)a_1} ,\quad \frac{\tau+1}{p-1}-1=\frac{n}{(p-1)a_1}-\frac{n}{r}
\end{equation}
\begin{equation}\label{c9}
0<\frac{1}{pb_1}\leq \frac1r\leq 1,\quad 0 \leq \frac{\tau}{p}<\frac{n}{pb_1},\quad \frac{\tau}{p}-1=\frac{n}{pb_1}-\frac{n}{r}.
\end{equation}
Similarly,
\begin{align*}
A_2&=\big\||x|^{-\tau}|u|^{p-1}(I_\alpha \ast|\cdot|^{-\tau-1}|u|^{p})\big\|_{L_x^{\frac{2n}{n+2}}}\\
&\leq \||x|^{-\tau}|u|^{p-1}\|_{L_x^{a_2}}\||x|^{-\tau-1}|u|^p\|_{L_x^{b_2}}\\
&\lesssim \|\nabla u\|^{2p-1}_{L_x^r}
\end{align*}
if
\begin{equation}\label{c10}
0<\frac1{a_2}, \frac{1}{b_2}<1, \quad \frac{1}{a_2}+\frac{1}{b_2}=\frac{n+2}{2n}+\frac{\alpha}{n},
\end{equation}
\begin{equation}\label{c11}
0<\frac1{(p-1)a_2}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-1}<\frac{n}{(p-1)a_2}, \quad \frac{\tau}{p-1}-1=\frac{n}{(p-1)a_2}-\frac{n}{r},
\end{equation}
\begin{equation}\label{c12}
0<\frac{1}{pb_2}\leq \frac1r \leq 1, \quad 0\leq \frac{\tau+1}{p}<\frac{n}{pb_2}, \quad \frac{\tau+1}{p}-1=\frac{n}{pb_2}-\frac{n}{r}.
\end{equation}
The third term $A_3$ is bounded by using Lemma \ref{hls}, H\"older's inequality and Lemma \ref{ckn}
in turn as
\begin{align*}
\big\||x|^{-\tau}|u|^{p-2}|\nabla u||I_\alpha \ast|\cdot|^{-\tau}|u|^{p}|\big\|_{L_x^{\frac{2n}{n+2}}}
&\lesssim \||x|^{-\tau}|u|^{p-2}|\nabla u|\|_{L_x^{a_1}} \||x|^{-\tau}|u|^p\|_{L_x^{b_1}}\\
&\leq \||x|^{-\tau}|u|^{p-2}\|_{L_x^{a_3}}\|\nabla u\|_{L_x^r} \||x|^{-\tau}|u|^p\|_{L_x^{b_1}}\\
&=\||x|^{-\frac{\tau}{p-2}}u\|^{p-2}_{L_x^{(p-2)a_3}} \|\nabla u\|_{L_x^r} \||x|^{-\frac{\tau}{p}}u\|^p_{L_x^{pb_1}}\\
&\lesssim \|\nabla u\|_{L_x^r}^{2p-1}
\end{align*}
if
\begin{equation}\label{c1}
0<\frac1{a_1}, \frac1{b_1}<1, \quad \frac{1}{a_1}+\frac{1}{b_1}=\frac{n+2}{2n}+\frac{\alpha}{n},\quad \frac{1}{a_1}=\frac{1}{a_3}+\frac{1}{r},
\end{equation}
\begin{equation}\label{c2}
0<\frac{1}{(p-2)a_3} \leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-2}<\frac{n}{(p-2)a_3}, \quad \frac{\tau}{p-2}-1=\frac{n}{(p-2)a_3}-\frac{n}{r},
\end{equation}
\begin{equation}\label{c3}
0<\frac1{pb_1}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p}<\frac{n}{pb_1}, \quad \frac{\tau}{p}-1=\frac{n}{pb_1}-\frac{n}{r}.
\end{equation}
Similarly,
\begin{align*}
A_4&=\||x|^{-\tau}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}|u|^{p-1}\nabla u)\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim \||x|^{-\tau}|u|^{p-1}\|_{L_x^{a_2}}\||x|^{-\tau}|u|^{p-1}\nabla u\|_{L_x^{b_2}}\\
&\leq \||x|^{-\frac{\tau}{p-1}}|u|\|^{p-1}_{L_x^{(p-1)a_2}}\||x|^{-\frac{\tau}{p-1}}u\|^{p-1}_{L_x^{(p-1)b_4}}\|\nabla u\|_{L_x^r}\\
&\lesssim \|\nabla u\|_{L_x^r}^{2p-1}
\end{align*}
if
\begin{equation}\label{c4}
0<\frac1{a_2}, \frac{1}{b_2}<1,\quad \frac1{a_2}+\frac1{b_2}=\frac{n+2}{2n}+\frac{\alpha}{n},\quad \frac{1}{b_2}=\frac{1}{b_4}+\frac1r
\end{equation}
\begin{equation}\label{c5}
0<\frac{1}{(p-1)a_2}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-1}\leq \frac{n}{(p-1)a_2}, \quad \frac{\tau}{p-1}-1=\frac{n}{(p-1)a_2}-\frac{n}{r},
\end{equation}
\begin{equation}\label{c6}
0<\frac1{(p-1)b_4}\leq \frac1r\leq 1, \quad 0\leq \frac{\tau}{p-1}<\frac{n}{(p-1)b_4}, \quad \frac{\tau}{p-1}-1=\frac{n}{(p-1)b_4}-\frac{n}{r}.
\end{equation}
On the other hand, in order to show \end{equation}ref{non22}, we first use the following simple inequality
\begin{align*}
\big|\mathcal N[u]-\mathcal N[v]\big| &\lesssim \Big||x|^{-\tau}(|u|^{p-2}+|v|^{p-2})|u-v|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\Big|
\\&\qquad\quad+\Big||x|^{-\tau}|v|^{p-1}\big(I_\alpha \ast|\cdot|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|\big)\Big|.
\end{align*}
From this, we see that
\begin{align*}
\|\mathcal N[u]-\mathcal N[v]\|_{L_x^{\frac{2n}{n+2}}} &\leq \||x|^{-\tau}(|u|^{p-2}+|v|^{p-2})|u-v|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\|_{L_x^{\frac{2n}{n+2}}}\\
&\qquad+\||x|^{-\tau}|v|^{p-1}\big(I_\alpha \ast|\cdot|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|\big)\|_{L_x^{\frac{2n}{n+2}}}\\
&:=B_1+B_2.
\end{align*}
Replacing $\nabla u$ with $u-v$ in the process of dealing with $A_3$, we get
\begin{align*}
B_1&=\||x|^{-\tau}(|u|^{p-2}+|v|^{p-2})|u-v|(I_\alpha\ast|\cdot|^{-\tau}|u|^p)\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim \Big(\||x|^{-\tau}|u|^{p-2}|u-v|\|_{L_x^{a_1}}+\||x|^{-\tau}|v|^{p-2}|u-v|\|_{L_x^{a_1}}\Big)\||x|^{-\tau}|u|^p\|_{L_x^{b_1}}\\
&\lesssim \Big(\||x|^{-\frac{\tau}{p-2}}u\|_{L_x^{(p-2)a_3}}^{p-2}+ \||x|^{-\frac{\tau}{p-2}}u\|_{L_x^{(p-2)a_3}}^{p-2}\Big)\|u-v\|_{L_x^r}\||x|^{-\frac{\tau}{p}}u\|_{L_x^{pb_1}}^p\\
&\lesssim \big(\|\nabla u\|_{L_x^{r}}^{2p-2}+\|\nabla v\|_{L_x^{r}}^{2p-2}\big)\|u-v\|_{L_x^r}
\end{align*}
under the conditions \end{equation}ref{c1}, \end{equation}ref{c2} and \end{equation}ref{c3}.
Similarly, replacing $\nabla u$ with $u-v$ in estimating $A_4$, we also have
\begin{align*}
B_2&=\||x|^{-\tau}|u|^{p-1}(I_\alpha\ast|\cdot|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|)\|_{L_x^{\frac{2n}{n+2}}}\\
&\lesssim \||x|^{-\tau}|u|^{p-1}\|_{L_x^{a_2}}\||x|^{-\tau}(|u|^{p-1}+|v|^{p-1})|u-v|\|_{L_x^{b_2}}\\
&\lesssim \||x|^{-\frac{\tau}{p-1}}u\|_{L_x^{(p-1)a_2}}^{p-1} \Big(\||x|^{-\frac{\tau}{p-1}}u\|_{L_x^{(p-1)b_4}}^p+\||x|^{-\frac{\tau}{p-1}}v\|_{L_x^{(p-1)b_4}}^p\Big)\|u-v\|_{L_x^r}\\
&\lesssim \big(\|\nabla u\|_{L_x^{r}}^{2p-2}+\|\nabla v\|_{L_x^{r}}^{2p-2}\big)\|u-v\|_{L_x^r}
\end{align*}
under the conditions \end{equation}ref{c4}, \end{equation}ref{c5} and \end{equation}ref{c6}.
Now it remains to eliminate some redundant pairs, we then show that there exists an admissible pair $(q,r)$ satisfying the assumptions in the lemma.
The third conditions of \end{equation}ref{c8} and \end{equation}ref{c9} can be rewritten with respect to $a$ and $b$, respectively, as follow:
\begin{equation}\label{c19}
\frac{n}{a_1}=\frac{(p-1)n}{r}+\tau-p+2, \quad \frac{n}{b_1}=\frac{pn}{r}+\tau-p.
\end{equation}
Inserting these into the second condition of \end{equation}ref{c7} implies
\begin{equation}\label{a}
\frac{(2p-1)n}{r}=\alpha+2p-2\tau-1+\frac{n}{2}.
\end{equation}
Here, we note that this equation is equivalent to the second condition of \end{equation}ref{c10} by using \end{equation}ref{c18}.
Inserting \end{equation}ref{c19} into the conditions in \end{equation}ref{c7}, \end{equation}ref{c8} and \end{equation}ref{c9}, these conditions are summarized as follows:
\begin{equation}\label{c15}
\frac{p-\tau-2}{p-1}<\frac{n}{r}<\frac{p-\tau-2+n}{p-1}, \quad \frac{p-\tau}{p}<\frac{n}{r}<\frac{p-\tau+n}{p},
\end{equation}
\begin{equation}\label{c13}
\tau-p+2\leq 0, \quad \frac{n}{r}\leq n, \quad \frac{p-\tau-2}{p-1} \leq 1 <\frac{n}{r}
\end{equation}
\begin{equation}\label{c14}
\tau-p\leq 0 , \quad \frac{p-\tau}{p}\leq 1 < \frac{n}{r}
\end{equation}
Since $\tau>0$, the first inequalities of the last conditions in \end{equation}ref{c13} and \end{equation}ref{c14} are redundant.
The first condition in \end{equation}ref{c14} is also redundant by the first one in \end{equation}ref{c13}.
Also, the third conditions of \end{equation}ref{c11} and \end{equation}ref{c12} can be rewritten as
\begin{equation}\label{c18}
\frac{n}{a_2}=\frac{(p-1)n}{r}+\tau-p+1, \quad \frac{n}{b_2}=\frac{pn}{r}+\tau-p+1
\end{equation}
Inserting these into the conditions \end{equation}ref{c10}, \end{equation}ref{c11} and \end{equation}ref{c12}, these conditions are summarized as
\begin{equation}\label{c16}
\frac{p-\tau-1}{p-1}<\frac{n}{r}<\frac{p-\tau-1+n}{p-1}, \quad \frac{p-\tau-1}{p}<\frac{n}{r}<\frac{p-\tau-1+n}{p}
\end{equation}
\begin{equation}\label{c17}
\tau-p+1\leq 0, \quad \frac{n}{r}\leq n, \quad \frac{p-\tau-1}{p-1}\leq 1 <\frac{n}{r}, \quad \frac{p-\tau-1}{p}\leq 1 < \frac{n}{ r}.
\end{equation}
Here, since $\tau>0$, the first inequalities of the last two conditions in \end{equation}ref{c17} are redundant.
The first conditions in \end{equation}ref{c17} is also eliminated by the first one of \end{equation}ref{c13}.
Finally, the first two conditions of \end{equation}ref{c2} and \end{equation}ref{c6} are summarized by inserting the third conditions of \end{equation}ref{c2} and \end{equation}ref{c6} as
\begin{equation}\label{c20}
\frac{p-\tau-2}{p-2}<\frac{n}r\leq n, \quad \tau-p+2\leq0, \quad \frac{p-\tau-2}{p-2}\leq 1<\frac{n}{r},
\end{equation}
\begin{equation}\label{c21}
\frac{p-\tau-1}{p-1}<\frac{n}{r}\leq n, \quad \tau-p+1\leq 0, \quad \frac{p-\tau-1}{p-1}\leq 1 <\frac{n}{r}.
\end{equation}
Here, the second condition in \end{equation}ref{c21} is eliminated by the second one in \end{equation}ref{c20}.
Since $p>2$ and $\tau>0$, all lower bounds of $n/r$ in \end{equation}ref{c15}, \end{equation}ref{c16}, \end{equation}ref{c20} and \end{equation}ref{c21} are eliminated by $1$.
Moreover, by using $p>2$ the upper bounds of $n/r$ in the second condition of \end{equation}ref{c15} and the first one of \end{equation}ref{c16} are also eliminated by the upper one of $n/r$ in the second condition of \end{equation}ref{c16}.
As a result, combining all the above conditions, we get
\begin{equation}\label{rr}
1<\frac{n}{r}<\min\Big\{\frac{p-\tau-2+n}{p-1}, \frac{p-\tau-1+n}{p}\Big\}, \quad 0<\tau\leq p-2.
\end{equation}
On the other hand, substituting the first condition into the third one in \end{equation}ref{st} implies
\begin{equation}\label{r}
\frac{n}{r}=\frac{n}{2}-\frac1{2p-1}.
\end{equation}
Note that \end{equation}ref{a} is exactly same as $p=1+\frac{2-2\tau+\alpha}{n-2}$ by substituting \end{equation}ref{r} into \end{equation}ref{a}.
Eliminating $r$ by inserting \end{equation}ref{r} into the second conditios of \end{equation}ref{ka} and \end{equation}ref{st}, the first one of \end{equation}ref{rr}, we then get
\begin{equation}\label{c25}
1+\kappa < \frac{n}{2}-\frac1{2p-1}<\min\{n,n-\kappa\}, \quad \frac{n-2}{2}\leq \frac{n}{2}-\frac1{2p-1}\leq \frac{n}2,
\end{equation}
\begin{equation}\label{c22}
p>\frac{n}{2(n-2)}, \quad \tau <n-1+\min\Big\{(p-1)\Big(\frac{2p}{2p-1}-\frac{n}{2}\Big), p\Big(\frac{2p}{2p-1}-\frac{n}{2}\Big)\Big\}.
\end{equation}
Here, the first condition in \end{equation}ref{c25} can be divided into two inequalities
\begin{equation}\label{c26}
p>\frac12+\frac{1}{n-2-2\kappa}, \quad \max\big\{-\frac{n}{2},-\frac{n}{2}+\kappa\big\}<\frac1{2p-1},
\end{equation}
in which the second condition is redundant since the maximum value is always negative.
Since $p>2$ and $n\ge3$, the last condition in \end{equation}ref{c25} and the first condition in \end{equation}ref{c22} are redundant.
Moreover, since $\frac{2p}{2p-1}-\frac{n}{2}<0$, the last condition in \end{equation}ref{c22} is reduced
\begin{equation}\label{c23}
\tau <n-1+ p\Big(\frac{2p}{2p-1}-\frac{n}{2}\Big).
\end{equation}
In order to eliminate $\alpha$, inserting $p=1+\frac{2-2\tau+\alpha}{n-2}$ into the last condition in \end{equation}ref{st}, we also have
\begin{equation}\label{c24}
\frac{n}{2}-\frac{(n-2)p}{2}<\tau<n-\frac{(n-2)p}{2}.
\end{equation}
Now we make the lower bounds of $\tau$ less than the upper ones of $\tau$ in \end{equation}ref{rr}, \end{equation}ref{c23} and \end{equation}ref{c24} to obtain
\begin{align*}
\nonumber
\max\Big\{2, &\frac{5n-4-\sqrt{9n^2+8n-16}}{4(n-2)},\frac{n+4}{n}, \frac{n-2}{2(n-1)}\Big\} \leq \\
&\qquad\qquad\qquad\qquad\quad p \leq \min\Big\{\frac{2n}{n-2}, \frac{5n-4+\sqrt{9n^2+8n-16}}{4(n-2)} \Big\},
\end{align*}
which is reduced
\begin{equation}\label{c27}
\max\Big\{2,\frac{n+4}{n},\frac12+\frac1{n-2-2\kappa}\Big\} < p < \frac{5n-4+\sqrt{9n^2+8n-16}}{4(n-2)}
\end{equation}
by using $p>2$, $n\ge3$ and combining the first condition in \end{equation}ref{c26}.
The assumption \end{equation}ref{ass2} follows from inserting $p=1+\frac{2-2\tau+\alpha}{n-2}$ into \end{equation}ref{c27}. In fact, \end{equation}ref{c27} is expressed with respect to $\tau$, as follows:
\begin{equation}
\frac{\alpha}{2}-\frac{n-4+\sqrt{9n^2+8n-16}}{8}<\tau < \frac{\alpha}{2}-\max\{\frac{n-4}{2}, \frac{n-4}{n},\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\}.
\end{equation}
Finally, we make the lower bound of $\tau$ less than the upper ones of $\tau$ to deduce
\begin{equation*}
2\kappa < \frac{5n-4-\sqrt{9n^2+8n-16}}{2},
\end{equation*}
which implies the assumption \end{equation}ref{ass1}.
Indeed, to obtain \end{equation}ref{ass1}, we can compute as follows:
\begin{eqnarray*}
&&\frac{n-4+\sqrt{9n^2+8n-16}}{8}>\frac{\kappa}{n-2-2\kappa}-\frac{n}{4}\\
&\Leftrightarrow&\frac{n-2-2\kappa}{8}>\frac{\kappa}{3n-4+\sqrt{9n^2+8n-16}}\times\frac{\sqrt{9n^2+8n-16}-(3n-4)}{\sqrt{9n^2+8n-16}-(3n-4)}\\
&\Leftrightarrow&n-2>\frac{\sqrt{9n^2+8n-16}+5n-4}{4(n-1)}\kappa.
\end{eqnarray*}
This is equivalent to
\begin{align*}
\kappa<\frac{4(n-1)(n-2)}{5n-4+\sqrt{9n^2+8n-16}}&=\frac{4(n-1)(n-2)\big\{5n-4-\sqrt{9n^2+8n-16}\big\}}{(5n-4)^2-9n^2-8n+16}\\
&=\frac{5n-4-\sqrt{9n^2+8n-16}}{4}.
\end{align*}
This ends the proof.
\end{proof}
\subsection{Local well-posedness in the energy space}
By Duhamel's principle, we first write the solution of the Cauchy problem \end{equation}ref{S} as fix points of the function
$$\Phi(u)=e^{-it\mathcal{K}_{\lambda}}u_0 + i\epsilon\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}} \mathcal N[u](s,\cdot) ds$$
where $\mathcal N[u]=|x|^{-\tau}|u|^{p-2}(J_\alpha \ast |\cdot|^{-\tau}|u|^p)u$.
For appropriate values of $T,M,N>0$, we shall show that $\Phi$ defines a contraction map on
$$X(T,M,N)=\{u \in C_t(I;H_\lambda^1) \cap L_t^{q}(I;W^{1,r}_{\lambda}): \sup_{t\in I} \|u\|_{H_{\lambda}^1}\leq M, \|u\|_{\mathcal W_{\lambda}(I)}\leq N\}$$
equipped with the distance
$$d(u,v)=\|u-v\|_{\Lambda(I)}.$$
Here, $I=[0,T]$ and $(q,r)$ is given as in Proposition \ref{str}.
We also define
$$\|u\|_{\mathcal W_{\lambda}(I)}:= \|u\|_{\Lambda(I)} + \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}$$
and
$$\|u\|_{\mathcal W{'}_{\lambda}(I)}:= \|u\|_{\Lambda'(I)} + \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda'(I)}.$$
We now show that $\Phi$ is well defined on $X$. By Proposition \ref{str}, we get
\begin{equation}\label{w1}
\|\Phi(u)\|_{\mathcal W_{\lambda}(I)}\leq C\|e^{-it\mathcal K_{\lambda}}u_0\|_{\mathcal W_{\lambda}(I)} +C\big\|\mathcal N[u]\big\|_{\mathcal W{'}_{\lambda}(I)}
\end{equation}
and
\begin{equation*}
\sup_{t\in I}\|\Phi(u)\|_{H_{\lambda}^1}\leq \|u_0\|_{H_{\lambda}^1}+\sup_{t \in I}\Big\|\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}} \mathcal N[u](\cdot,s) ds\Big\|_{H_{\lambda}^1}.
\end{equation*}
Here, for the second inequality we used the fact that $e^{it\mathcal K_{\lambda}}$ is an unitary on $L^2$.
Since $\|\langle \sqrt{\mathcal K_{\lambda}} \rangle u \|\lesssim \|u\| + \|\sqrt{\mathcal K_{\lambda}}u\|$, using the fact $e^{it\mathcal K_{\lambda}}$ is an unitary on $L^2$ again, and then applying the dual estimate of the first one in Proposition \ref{str}, we see that
$$ \sup_{t\in I} \Big\| \int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}} \mathcal N[u](\cdot,s) ds \Big\|_{H_{\lambda}^1} \lesssim \|\mathcal{N}[u]\|_{\Lambda'(I)} +\|\sqrt{\mathcal{K}_{\lambda}}\mathcal{N}[u]\|_{\Lambda'(I)}. $$
Hence,
\begin{equation*}
\sup_{t\in I}\|\Phi(u)\|_{H_\lambda^1} \leq C \|u_0\|_{H_{\lambda}^1}+C\|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}'(I)}.
\end{equation*}
On the other hand, using Lemma \ref{non}, we get
\begin{align}\label{w2}
\nonumber
\|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}'(I)}&\leq C \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}^{2p-1} + \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}^{2p-2}\|u\|_{\Lambda(I)} \\
\nonumber
&\leq C \|\sqrt{\mathcal K_{\lambda}}u\|_{\Lambda(I)}^{2p-2}\|u\|_{\mathcal{W}_{\lambda}(I)}\\
&\leq C N^{2p-1}
\end{align}
if $u \in X$, and for some $\varepsilon>0$ small enough which will be chosen later we get
\begin{equation}\label{sm}
\|e^{it\mathcal{K}_{\lambda}}u_0\|_{\mathcal{W}_{\lambda}(I)}\leq \varepsilon
\end{equation}
which holds for a sufficiently small $T>0$ by the dominated convergence theorem.
We now conclude that
\begin{equation*}
\|\Phi(u)\|_{\mathcal{W}_{\lambda}(I)} \leq \varepsilon + CN^{2p-1} \quad \textnormal{and} \quad \sup_{t \in I}\|\Phi(u)\|_{H_{\lambda}^1} \leq C \|u_0\|_{H_\lambda^1} + CN^{2p-1}.
\end{equation*}
Hence we get $\Phi(u)\in X$ for $u \in X$ if
\begin{equation}\label{w3}
\varepsilon + CN^{2p-1} \leq N \quad \textnormal \quad C\|u_0\|_{H_{\lambda}^1} + CN^{2p-1} \leq M.
\end{equation}
Next we show that $\Phi$ is a contraction on $X$.
Using the same argument used in \end{equation}ref{w1}, we see
\begin{equation*}
\|\Phi(u)-\Phi(v)\|_{\Lambda(I)} \leq C \|\mathcal{N}[u]-\mathcal{N}[v]\|_{\Lambda'(I)}.
\end{equation*}
By applying Lemma \ref{non} (see \end{equation}ref{non2}), we see
\begin{align*}
\|\mathcal{N}[u]-\mathcal{N}[v]\|_{\Lambda'(I)}&\leq C\big(\|\sqrt{\mathcal{K}_{\lambda}} u\|_{\Lambda(I)}^{2p-1}+\|\sqrt{\mathcal{K}_{\lambda}} v\|_{\Lambda(I)}^{2p-2}\big)\|u-v\|_{\Lambda(I)} \\
&\leq C N^{2p-2}\|u-v\|_{\Lambda(I)}
\end{align*}
as in \end{equation}ref{w2}.
Hence, for $u,v \in X$ we obtain $d(\Phi(u), \Phi(v))\leq C N^{2p-2}d(u,v)$.
Now by taking $M=2C\|u_0\|_{H_\lambda^1}$ and $N=2\varepsilon$ and then choosing $\varepsilon>0$ small enough so that \end{equation}ref{w3} holds and $CN^{2p-2}\leq 1/2$, it follows that $\Phi$ is a contraction on $X$.
Therefore, we have proved that there exists a unique local solution with $u \in C_t(I;H_{\lambda}^1) \cap L_t^q(I;W_{\lambda}^{1,r})$ for any admissible pair $(q,r)$.
\subsection{Global well-posedness in the energy space for small data}
Using the first estimate in Proposition \ref{str}, we observe that \end{equation}ref{sm} is satisfied also if $\|u_0\|_{H_{\lambda}^1}$ is sufficiently small,
\begin{equation*}
\|e^{-it\mathcal K_{\lambda}}u_0\|_{\mathcal{W}_\lambda(I)} \leq C \|u_0\|_{H_{\lambda}^1}\leq \varepsilon
\end{equation*}
from which one can take $T=\infty$ in the above argument to obtain a global unique solution.
The continuous dependence of the solution $u$ with respect to the initial data $u_0$ follows clearly in the same way:
\begin{align*}
d(u,v) &\lesssim d(e^{-it\mathcal{K}_{\lambda}}u_0,e^{-it\mathcal{K}_{\lambda}}v_0) + d\Big(\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[u]ds,\int_0^t e^{-i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[v]ds \Big) \\
&\lesssim \|u_0 - v_0\| + \frac12 d(u,v)
\end{align*}
which implies
\begin{align*}
d(u,v) \lesssim \|u_0-v_0\|_{H_{\lambda}^1}.
\end{align*}
Here, $u,v$ are the corresponding solutions for initial data $u_0, v_0$, respectively.
\subsection{Scattering in the energy space for small data}
To prove the scattering property, we first note that
\begin{align*}
\|e^{it_2 \mathcal{K}_{\lambda}}u(t_2)-e^{it_2 \mathcal{K}_{\lambda}}u(t_1)\|_{H_{\lambda}^1} = \Big\|\int_{t_1}^{t_2} e^{is\mathcal{K}_{\lambda}}\mathcal{N}[u]\Big\|_{H_{\lambda}^1}\\
\lesssim \|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}{'}([t_1 ,t_2])} \\
\lesssim \|u\|^{2p-1}_{\mathcal{W}_{\lambda}([t_1,t_2])} \quad \rightarrow\quad 0
\end{align*}
as $t_1, t_2 \rightarrow {\infty}$.
This implies that $\phi :=\lim_{t\rightarrow {\infty}} e^{it\mathcal{K}_{\lambda}}u(t)$ exists in $H_\lambda^1$.
Furthermore,
$$u(t)-e^{-it\mathcal{K}_{\lambda}}\phi=i\int_t^{\infty} e^{i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[u]ds,$$
and hence
\begin{align*}
\|u(t)-e^{-it{\mathcal K}_\lambda}\phi\|_{H^1_\lambda}&=\Big\|\int_t^{\infty} e^{i(t-s)\mathcal{K}_{\lambda}}\mathcal{N}[u] ds\Big\|_{H_{\lambda}^1}\\
&\lesssim\|\mathcal{N}[u]\|_{\mathcal{W}_{\lambda}'([t,\infty])} \\
&\lesssim \|u\|^{2p-1}_{\mathcal{W}_{\lambda}([t,\infty])} \quad \rightarrow \quad 0
\end{align*}
as $t\rightarrow {\infty}$.
{The scattering is proved.}
\section{Ground states and Gagliardo-Nirenberg estimate}
In this section, we prove Theorem \ref{gag} dealing with the existence of ground states solutions to \end{equation}ref{E} and the Gagliardo-Nirenberg type estimate \end{equation}ref{gagg}.
\subsection{Gagliardo-Nirenberg estimate}
Using the Hardy-Littlewood-Sobolev inequality (Lemma \ref{hls}), we first see
\begin{equation}\label{ie}
\int_{\mathbb{R}^n} |x|^{-\tau}|u|^p (I_\alpha \ast |\cdot|^{-\tau}|u|^p) dx \lesssim \big\||x|^{-\tau}|u|^p\big\|^2_{{\frac{2n}{\alpha+n}}}
\end{equation}
if $0<\alpha<n.$
Applying Lemma \ref{ckn} to the right-hand side of \end{equation}ref{ie} with $b=-\frac{\tau}{p}$, $q=\frac{2np}{\alpha+n}$, $a=0$ and $p=2$, we get
\begin{equation}\label{5.100'}
\big\||x|^{-\tau}|u|^p\big\|^2_{{\frac{2n}{\alpha+n}}}= \big\||x|^{-\frac{\tau}{p}}u\big\|^{2p}_{{\frac{2np}{\alpha+n}}} \lesssim \|\nabla u\|^{2p}
\end{equation}
if
\begin{equation}\label{iee}
0<\frac{\alpha+n}{2np} \leq \frac12 <1, \quad -\frac{\alpha+n}{2p}<-\frac{\tau}{p} \leq 0, \quad \frac{\tau}{p}-1=\frac{\alpha+n}{2p}-\frac{n}2.
\end{equation}
Finally, using the equivalent norm to Sobolev one (see Lemma \ref{2.2}), we obtain the desired estimate \end{equation}ref{inte} if $\frac{1+\kappa}{n}<\frac12<\min\{1, 1-\frac{\kappa}{n}\}$ which does not affect the assumptions in \end{equation}ref{as}.
Now it remains to derive the assumptions in \end{equation}ref{as}.
We note that the last equality in \end{equation}ref{iee} is equivalent to $p=1+\frac{2-2\tau+\alpha}{n-2}$.
Using $p=1+\frac{2-2\tau+\alpha}{n-2}$, the requirements \end{equation}ref{iee} can be written as
\begin{equation}\label{ie1}
\alpha+n>0, \quad \alpha+n \ge n\tau, \quad 0\leq 2\tau<\alpha+n,
\end{equation}
which are reduced to $0<\tau\leq 1+\frac{\alpha}{n}$ since $\alpha+n\geq n\tau>2\tau>0$, as desired.
\subsection{Existence of ground states}
By using \end{equation}ref{gagg}, we first set $J(u)=\|\sqrt{\mathcal K_\lambda}u\|^{2p}/\|\mathcal P[u]\|$ and take a sequence $\{u_n\}_{n\in\mathbb{N}}$ in $H_{\lambda}^1$ such that
\begin{align*}
\gamma := \frac1{C_{n,\tau,\alpha,\lambda}}
=\lim_{n\rightarrow \infty} \frac{\|\sqrt{\mathcal{K}_{\lambda}}u_n\|^{2p}}{\mathcal{P}[u_n]}.
\end{align*}
By the scaling $u(x) \mapsto u^{\delta,\mu}(x)=\delta u(\mu x)$ for $\delta, \mu \in \mathbb{R}$, we have
\begin{align*}
\|u^{\delta,\mu}\|^2&=\delta^2 \mu^{-n}\|u\|^2 \\
\|\sqrt{\mathcal K_{\lambda}}u^{\delta,\mu}\|^2&= \|\nabla u^{\delta,\mu}\|^2+\lambda\big\|\frac{u^{\delta,\mu}}{|x|}\big\|^2 \\
&=\delta^2 \mu^{2-n}\Big(\|\nabla u\|^2+\lambda\big\|\frac{u}{|x|}\big\|^2\Big)=\delta^2 \mu^{2-n}\|\sqrt{\mathcal K_\lambda}u\|^2 \\
\int_{\mathbb{R}^n}|x|^{-\tau}|u^{\delta,\mu}|^p(I_\alpha \ast |\cdot|^{-\tau}|u^{\delta,\mu}|^p) dx &= \delta^{2p}\mu^{2\tau-n-\alpha}\int_{\mathbb{R}^n}|x|^{-\tau}|u|^p(I_\alpha \ast |\cdot|^{-\tau}|u|^p) dx,
\end{align*}
which implies that $J(u^{\delta,\mu})=J(u)$ by $p=1+\frac{2-2\tau+\alpha}{n-2}$.
Let $\psi_n = u_n^{\delta_n,\mu_n}$ where
$$\delta_n= \frac{\|u_n\|^{\frac{n}2-1}}{\|\sqrt{\mathcal K_\lambda} u_n\|^{\frac{n}{2}}}, \quad \mu_n=\frac{\|u_n\|}{\|\sqrt{\mathcal K_\lambda} u_n\|}.$$
Then, we have
$$\|\psi_n\|=\|\sqrt{\mathcal K_\lambda} \psi_n\|=1 \quad \textnormal{and} \quad \gamma=\lim_{n\rightarrow\infty} J(\psi_n)=\lim_{n\rightarrow \infty} \frac1{\mathcal P[\psi_n]}.$$
Now we take $\psi \in H_\lambda^1$ so that $\psi_n \rightharpoonup \psi$ in $H_\lambda^1$ and we will show that
$$\frac{1}{\mathcal P[\psi_n]} \rightarrow \frac1{\mathcal P[\psi]} \quad \textnormal{as} \quad n\rightarrow \infty.$$
By using Lemma \ref{hls} {via \end{equation}ref{5.100'}}, we have
\begin{align}
\nonumber
&\int_{\mathbb{R}^n} |x|^{-\tau}|\psi_n|^p (I_\alpha \ast |\cdot|^{-\tau}|\psi_n|^p)- |x|^{-\tau}|\psi|^p (I_\alpha \ast |\cdot|^{-\tau}|\psi|^p)dx \\
\nonumber
&\quad =\int_{\mathbb{R}^n} |x|^{-\tau}|\psi|^p \big(I_\alpha \ast |\cdot|^{-\tau}(|\psi_n|^p - |\psi|^p)\big) dx\\
\nonumber
&\qquad \qquad \qquad+ \int_{\mathbb{R}^n}|x|^{-\tau}(|\psi_n|^p-|\psi|^p)(I_\alpha \ast |\cdot|^{-\tau}|\psi_n|^p)dx\\
\nonumber
&\quad\lesssim \Big(\big\||x|^{-\tau}|\psi|^p\big\|_{{\frac{2n}{\alpha+n}}} +\big\||x|^{-\tau}|\psi_n|^p\big\|_{{\frac{2n}{\alpha+n}}}\Big)
\big\||x|^{-\tau}(|\psi_n|^p-|\psi|^p)\big\|_{{\frac{2n}{\alpha+n}}}\\
\label{dif}
&\quad\lesssim \big\||x|^{-\tau}(|\psi_n|^p-|\psi|^p)\big\|_{{\frac{2n}{\alpha+n}}}.
\end{align}
Using the following simple inequality
$$|u|^p-|v|^p \lesssim |u-v|(|u|^{p-1}+|v|^{p-1}), \quad p\ge1$$
and H\"older's inequality, the last term in \end{equation}ref{dif} is bounded as
\begin{align}\label{i}
\nonumber
\big\||x|^{-\tau}(|\psi_n|^p-|\psi|^p)\big\|_{{\frac{2n}{\alpha+n}}}
&\lesssim (\|\psi_n\|_{{(p-1)a_1}}^{p-1}+\|\psi\|_{{(p-1)a_1}}^{p-1}) \||x|^{-\tau}|\psi-\psi_n|\|_{{a_2}}\\
&\lesssim (\|\psi\|_{H_\lambda^1}^{p-1}+\|\psi_n\|_{H_\lambda^1}^{p-1})\||x|^{-\tau}|\psi-\psi_n|\|_{{a_2}}
\end{align}
if $0<\tau<2$ and
\begin{equation}\label{gagc}
\frac{\alpha+n}{2n}=\frac{1}{a_1}+\frac{1}{a_2}, \quad \frac{n-2}{2n}\leq\frac{1}{(p-1)a_1}\leq\frac12.
\end{equation}
{Indeed, for the last inequality we used the Sobolev embedding, $H^1(\mathbb{R}^n) \hookrightarrow L^q({\mathbb{R}^n})$ for $2\leq q \leq \frac{2n}{n-2}$ if $n\ge3$.}
Thanks to the compactness of the Sobolev injection, Lemma \ref{compact}, under the condition
\begin{equation}\label{gagcc}
\frac{n-2}{2(n-\tau)}<\frac{1}{a_2}<\frac12,
\end{equation}
we then get $1/{\mathcal P[\psi_n]} \rightarrow 1/{\mathcal P[\psi]}=\gamma$ as $n \rightarrow \infty$.
We need to check that there exist $a_1$ and $a_2$ satisfying \end{equation}ref{gagc}, \end{equation}ref{gagcc} and the assumptions in Theorem \ref{gag}, but we will postpone this until the end of the proof.
By the lower semi-continuity of the norm, we see
$$\|\psi\|\leq 1 \quad \textnormal{and} \quad \|\sqrt{\mathcal K_\lambda} \psi\|\leq 1,$$
from which $J(\psi)<\gamma$, and hence $\|\psi\|=\|\sqrt{\mathcal K_\lambda}\psi\|=1.$
Consequently,
$$\psi_n \rightarrow \psi \quad \textnormal{in} \quad H_\lambda^1 \quad \textnormal{and} \quad \gamma=J(\psi)=\frac1{\mathcal P[\psi]}.$$
$\psi$ satisfies \end{equation}ref{E} because the minimizer satisfies the Euler equation
$$\partial_\epsilon J(\psi+\epsilon \eta)_{|\epsilon=0}=0, \quad \forall \eta \in C_0^{\infty} \cap H_\lambda^1.$$
It remains to check the existence of $a_1$ and $a_2$ satisfying the conditions \end{equation}ref{gagc}, \end{equation}ref{gagcc} under the assumptions in Theorem \ref{gag}.
Substituting the first condition in \end{equation}ref{gagc} into the second one of \end{equation}ref{gagc} with $p=1+\frac{2-2\tau+\alpha}{n-2}$, we see
{\begin{equation}\label{gagc2}
\frac{\alpha+n}{2n}-\frac{2-2\tau+\alpha}{2(n-2)}\leq\frac1{a_2} \leq \frac{\alpha+n}{2n}-\frac{2-2\tau+\alpha}{2n}.
\end{equation}}
To eliminate $a_2$, we make the lower bounds of $1/a_2$ of \end{equation}ref{gagcc} and \end{equation}ref{gagc2} less than the upper ones of $1/a_2$ of \end{equation}ref{gagcc} and \end{equation}ref{gagc2}.
{Indeed, starting the process from the lower bound in \end{equation}ref{gagc2}, we arrive at $n\tau<\alpha+n$ which is satisfied by the assumption \end{equation}ref{as}.
Similarly from the lower bound in \end{equation}ref{gagcc}, we arrive at $0<\tau<\frac{n+2}{2}$, but this is eliminated by \end{equation}ref{as} using the facts that $n\ge3$ and $\tau<2$.}
\section{Blow-up of the energy solutions}
In this section, we prove Theorem \ref{t1} which provides a criterion for blow-up phenomena in the energy-critical focusing regime under the threshold of the ground state. As a consequence, we establish Corollary \ref{t2}. Moreover, we prove Proposition \ref{s} and Corollary \ref{s2} about energy bounded solutions.
\subsection{Criterion for blow-up}
In order to prove Theorem \ref{t1}, we use proof by contradiction through the following inequality which will be proved:
\begin{equation}\label{qq}
V_{R}''\leq4\,\mathcal I[u]+\frac C{R^{2\tau}}+\frac C{R^2},
\end{equation}
where $\mathcal I [u] = \|\sqrt{\mathcal K_\lambda} u\|^2 - \mathcal P[u]$ and $R\gg1$.
Indeed, taking $u_0\in H^1_\lambda$ with \end{equation}ref{ss'} and assuming that $u$ is global, for $R\gg1$,
$$V_{R}''\leq4\,\mathcal I[u]+\frac C{R^{2\tau}}+\frac C{R^2}<-c<0$$
if there is no sequence $t_n\to\infty$ such that $\|\sqrt{\mathcal K_\lambda} u(t_n)\|\to \infty$, which is contradiction.
Before starting to prove \end{equation}ref{qq}, we first define $\phi_R(\cdot):=R^2\phi(\frac{\cdot}{R})$, $R>0$, where the radial function $\phi\in C_0^\infty(\mathbb{R}^n)$ satisfies
$$\phi(|x|)=\phi(r):=\left\{
\begin{array}{ll}
\frac{r^2}2,\quad\mbox{if}\quad r\leq1 ;\\
0,\quad\mbox{if}\quad r\geq2,
\end{array}
\right.\quad\mbox{and}\quad \phi''\leq1.$$
Then, $\phi_R$ satisfies
$$\phi_R''\leq1,\quad \phi_R'(r)\leq r,\quad\Delta \phi_R\leq N$$
and, for $|x|\leq R$
\begin{align}\label{calc}
\nabla\phi_R(x)=x,\quad\Delta\phi_R(x)=N.
\end{align}
By recalling the definition of $V(t)$ and $M(t)$ in Section 2, we denote the localized variance and Morawetz action as
\begin{align*}
V_R(t):=\int_{\mathbb{R}^n}\phi_R(x)|u(x,\cdot)|^2\,dx, \quad V_R'(t)=M_R(t):=2\Im\int_{\mathbb{R}^n}\bar u\nabla \phi_R \cdot \nabla udx.
\end{align*}
By Proposition \ref{mrwz}, we divide $M_R'$ into two parts, $A$ and $B$, as $M_R'(t)=A + B$
where
$$A=4\sum_{k,l=1}^{N}\int_{\mathbb{R}^n}\partial_l\partial_k\phi_R\mathbb{R}e(\partial_ku\partial_l\bar u)dx-\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2dx+4\lambda\int_{\mathbb{R}^n}\nabla\phi_R\cdot x\frac{|u|^2}{|x|^4}dx$$
and
\begin{align}
\nonumber
B&=-\frac{2(p-2)}{p}\int_{\mathbb{R}^n}\Delta\phi_R|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
\nonumber
&\quad\qquad -\frac{4\tau}p\int_{\mathbb{R}^n}x\cdot\nabla\phi_R|x|^{-\tau-2}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
\nonumber
&\qquad\qquad\qquad -\frac{4(N-\alpha)}p\sum_{k=1}^N\int_{\mathbb{R}^n}|x|^{-\tau}|u|^{p}\partial_k\phi_R(\frac{x_k}{|\cdot|^2}I_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
\label{123}
&=:B_1 + B_2+ B_3.
\end{align}
Using the following radial relations
\begin{equation}\label{''}
\partial_k=\frac{x_k}r\partial_r,\quad\partial_l\partial_k=\Big(\frac{\delta_{lk}}r-\frac{x_lx_k}{r^3}\Big)\partial_r+\frac{x_lx_k}{r^2}\partial_r^2
\end{equation}
and the Cauchy-Schwarz inequality via the properties of $\phi$,
it follows that
\begin{align}
\nonumber
A&= 4\int_{\mathbb{R}^N} |\nabla u|^2 \frac{\phi_R'}{r}dx + 4 \int_{\mathbb{R}^N}
|x\cdot \nabla u|^2 \big(\frac{\phi_R''}{r^2}-\frac{\phi_R'}{r^3}\big) dx \\
\nonumber
&\qquad \qquad\qquad \qquad\qquad \qquad\quad-\int_{\mathbb{R}^N} \Delta^2\phi_R|u|^2dx + 4 \int_{\mathbb{R}^N} \frac{|u|^2}{r^3}\phi_R' dx \\
\nonumber
&\leq4\int_{\mathbb{R}^n}|\nabla u|^2\frac{\phi_R'}r\,dx+4\int_{\mathbb{R}^n}\frac{|x\cdot\nabla u|^2}{r^2}\big(1-\frac{\phi_R'}r\big)dx\\
\nonumber
&\qquad \qquad \qquad \qquad\qquad \qquad\quad -\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2\,dx+4\lambda\int_{\mathbb{R}^n}\frac{|u|^2}{r^3}\phi_R'dx\nonumber\\
\label{(I)}
&\leq4\int_{\mathbb{R}^n}|\nabla u|^2dx-\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2dx+4\lambda\int_{\mathbb{R}^n}\frac{|u|^2}{r^2}dx.
\end{align}
On the other hand, to handle the part $B$, we split the integrals in $B$ into the regions $|x|<R$ and $|x|>R$.
Then, by \end{equation}ref{calc}, the first two terms in $B$ are written
\begin{align}
\nonumber
B_1+B_2&=\frac{2N(2-p)-4\tau}{p}\int_{|x|<R}|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
\nonumber
&\qquad \qquad \qquad \qquad +O\bigg(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\bigg)\\
&=\frac{2(N(2-p)-2\tau)}p\mathcal{P}[u]+O\bigg(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\bigg).\label{12}
\end{align}
For the third term $B_3$, with calculus done in \cite[Lemma 4.5]{st4}, we have
\begin{align}
B_3&=\frac{2(\alpha-N)}{p}\int_{|y|<R}\int_{|x|<R}I_\alpha(x-y)|y|^{-\tau}|u(y)|^p|x|^{-\tau}|u(x)|^{p}\,dx\,dy\nonumber\\
&\qquad \qquad \qquad \qquad \qquad \qquad +O\bigg(\int_{|x|>R}(I_\alpha*|\cdot|^{-\tau}|u|^p)|x|^{-\tau}|u|^pdx\bigg)\nonumber\\
&=\frac{2(\alpha-N)}{p}\int_{|x|<R}|x|^{-\tau}|u(x)|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)dx \nonumber\\
&\qquad \qquad \qquad \qquad \qquad \qquad+O\bigg(\int_{|x|>R}(I_\alpha*|\cdot|^{-\tau}|u|^p)|x|^{-\tau}|u|^pdx\bigg)\nonumber\\
&=\frac{2(\alpha-N)}{p}\mathcal P[u]+O\bigg(\int_{|x|>R}|x|^{-\tau}|u|^p(I_\alpha*|\cdot|^{-\tau}|u|^p)dx\bigg).\label{372}
\end{align}
Combining \end{equation}ref{(I)}, \end{equation}ref{123}, \end{equation}ref{12} and \end{equation}ref{372}, we then obtain
\begin{align}
M_R'&\leq-\int_{\mathbb{R}^n}\Delta^2\phi_R|u|^2\,dx+4\int_{\mathbb{R}^n}|\nabla u|^2+4\lambda\int_{\mathbb{R}^n}\frac{|u|^2}{r^2}\,dx
-4\mathcal P[u]\nonumber\\
&\qquad \qquad \qquad \qquad \qquad \qquad \quad +O\left(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)\,dx\right)\nonumber\\
&\leq4\Big(\|\sqrt{\mathcal K_\lambda} u\|^2-\mathcal P[u]\Big)+O\left(\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)\,dx\right)+O(R^{-2}).\nonumber
\end{align}
Here, for the last inequality we used the fact that $|\partial^{\nu}\phi_R|\lesssim R^{2-|\nu|}$.
Now, using Lemma \ref{hls} and Lemma \ref{ckn} with $b=-\frac{\tau}{p}$, $q=\frac{2Np}{\alpha+N}$, $a=0$ and $p=2$, we obtain
\begin{eqnarray*}
\int_{|x|>R}|x|^{-\tau}|u|^{p}(I_\alpha*|\cdot|^{-\tau}|u|^p)\,dx
&\lesssim& \||x|^{-\tau}|u|^{p}\|^2_{\frac{2N}{\alpha+N}}\\
&\lesssim& R^{-2\tau}\|u\|_{\frac{2Np}{\alpha+N}}^{2p}\\
&\lesssim& R^{-2\tau}\|\nabla u\|^{2p}
\end{eqnarray*}
if
\begin{equation}\label{q}
0<\frac{\alpha+n}{2np}\leq \frac12<1 , \quad -\frac{\alpha+n}{2p}<-\frac{\tau}{p}\leq 0, \quad \frac{\tau}{p}-1=\frac{\alpha+n}{2p}-\frac{n}{2}.
\end{equation}
Here we note that the last equality in \end{equation}ref{q} is equivalent to $p=1+\frac{2-2\tau+\alpha}{n-2}$.
Using $p=1+\frac{2-2\tau+\alpha}{n-2}$, the requirement \end{equation}ref{q} can be written as \end{equation}ref{ie1}
which are reduced to $0<\tau\leq 1+\frac{\alpha}{n}$ since $\alpha+n\geq n\tau>2\tau>0$.
Consequently, for large $R\gg1$, using the equivalent norm to Sobolev one (see Lemma \ref{2.2}), we get
\begin{equation*}
M_R'\leq4\,\mathcal I[u]+\frac C{R^{2\tau}}\|\sqrt{\mathcal K_\lambda} u\|^{2p}+\frac C{R^2},
\end{equation*}
as desired.
\subsection{The boundedness of the energy solution}
Now, we prove Proposition \ref{s}.
Specifically, the energy bound is demonstrated by combining the conservation law with the following lemma known as coercivity (or energy trapping) results, obtained through the assumption \end{equation}ref{ss}.
\begin{lem}\label{bnd'}
Let $\varphi\in H_\lambda^1$ be a ground state solution to \end{equation}ref{E}.
Assume that there is $0<c<1$ satisfying
$$\mathcal P[u]<c \mathcal P[\varphi], \quad u\in H_\lambda^1.$$
Then there exists a constant $c_\varphi>0$ such that
\begin{align*}
\|\sqrt{\mathcal K_\lambda} u\|^2&<c_\varphi\mathcal E[u].
\end{align*}
\end{lem}
\begin{proof}
Thanks to \end{equation}ref{E}, we first see
\begin{equation}\label{p}
\mathcal P[\varphi]:=\int_{\mathbb{R}^n} |x|^{-\tau}|\varphi|^p(I_\alpha *|\cdot|^{-\tau}|\varphi|^p)dx=\|\sqrt{\mathcal K_\lambda}\varphi\|^2.
\end{equation}
Applying the Gagliardo-Nirenberg inequality, Theorem \ref{gag}, we have
\begin{align*}
(\mathcal P[u])^p \leq (\mathcal P[u])^{p-1}\cdot C_{N,\tau,\alpha,\lambda} \|\sqrt{\mathcal K_\lambda} u\|^{2p}\leq \Big(\frac{\mathcal P[u]}{\mathcal P[\varphi]}\Big)^{p-1}\|\sqrt{\mathcal K_\lambda} u\|^{2p}.
\end{align*}
Here, for the second inequality, we used the fact that
\begin{equation}\label{poh}
C_{N,\tau,\alpha,\lambda}=\frac{\mathcal P(\varphi)}{\|\sqrt{\mathcal K_\lambda}\varphi\|^{2p}}=({\mathcal P[\varphi]})^{1-p}
\end{equation}
by using \end{equation}ref{p}.
Therefore, we obtain
\begin{eqnarray*}
\mathcal P[u]
&\leq&\Big(\frac{\mathcal P[u]}{\mathcal P[\varphi]}\Big)^\frac{p-1}p\|\sqrt{\mathcal K_\lambda} u\|^2,
\end{eqnarray*}
which implies that
\begin{align*}
\mathcal E[u]&=\|\sqrt{\mathcal K_\lambda} u\|^2-\frac1p\mathcal P[u]\\
&\geq\Big(1-\frac{1}p\Big(\frac{\mathcal P[u]}{\mathcal P[\varphi]}\Big)^\frac{p-1}p\Big)\|\sqrt{\mathcal K_\lambda} u\|^2\\
&\geq\Big(1-\frac{c^{(p-1)/p}}p\Big)\|\sqrt{\mathcal K_\lambda} u\|^2.
\end{align*}
{This concludes the proof.}
\end{proof}
\subsection{Energy bounded/non-global solutions}
Finally, we prove Corollaries \ref{t2} and \ref{s2}, which presents the dichotomy of energy bounded/non-global existence of solutions.
\subsubsection{Energy bounded solutions}
First, Corollary \ref{s2} follows from the invariance of \end{equation}ref{t11} and \end{equation}ref{t12} under the flow of \end{equation}ref{S}.
We first define a function $f:[0,T^\ast) \rightarrow \mathbb{R}$ as
\begin{equation}\label{def}
f(t)= t- \frac{C_{N,\tau,\alpha,\lambda}}{p} t^{p}.
\end{equation}
Since $p>1$, the function $f(t)$ has a maximum value $f(t_1)=\frac{p-1}{p}C_{N,\tau,\alpha,\lambda}^{-\frac1{p-1}}$ at $t_1= (C_{N,\tau,\alpha,\lambda})^{-\frac1{p-1}}$.
We note here that $t_1=\|\sqrt{\mathcal K_\lambda} \varphi\|^2$ by \end{equation}ref{poh} and \end{equation}ref{p}.
Using the Gagliardo-Nirenberg type inequality, Theorem \ref{gag}, we see
\begin{align}
\mathcal E[u]=\|\sqrt{\mathcal K_\lambda} u\|^2 - \frac1p \mathcal P[u] &\geq\|\sqrt{\mathcal K_\lambda} u\|^2-\frac{C_{N,\tau,\alpha,\lambda}}{p}\|\sqrt{\mathcal K_\lambda} u\|^{2p}\label{xxx}\\
&=f\big(\|\sqrt{\mathcal K_\lambda} u\|^2\big).\nonumber
\end{align}
By the assumption \end{equation}ref{t11} with \end{equation}ref{poh} and \end{equation}ref{p}, we also see
\begin{equation}\label{x}
\mathcal E[u_0]<\mathcal E[\varphi] =f(t_1),
\end{equation}
which implies
\begin{equation}\label{xx}
f\big(\|\sqrt{\mathcal K_\lambda}u\|^2\big) \leq \mathcal E[u]= \mathcal E[u_0] <f(t_1).
\end{equation}
Since $\|\sqrt{\mathcal K_\lambda}u_0\|^2<\|\sqrt{\mathcal K_\lambda}\varphi\|^2=t_1$ by the assumption \end{equation}ref{t12}, and the continuity in time with \end{equation}ref{xx}, we get
$$\|\sqrt{\mathcal K_\lambda}u(t)\|^2 < t_1, \quad \forall t \in [0,T^\ast),$$
which is equivalent to $$\mathcal{MG}[u(t)]<1, \quad \forall t \in [0,T^\ast).$$
Therefore \end{equation}ref{t11} and \end{equation}ref{t12} are invariant under the flow \end{equation}ref{S} and this implies that $T^\ast=\infty$,
which concludes the proof.
\subsubsection{Blow-up }
To prove Corollary \ref{t2}, we use the same function $f(t)$ defined as \end{equation}ref{def}.
By the assumption \end{equation}ref{t13} with \end{equation}ref{poh} and \end{equation}ref{p}, we have
$$\|\sqrt{\mathcal K_\lambda} u_0\|^2>\|\sqrt{\mathcal K_\lambda}\varphi\|^2=t_1.$$
Thus, the continuity in time with \end{equation}ref{xx} gives
$$ \|\sqrt{\mathcal K_\lambda} u (t)\|^2>t_1,\quad \forall\, t\in [0,T^*) .$$
Hence, $\mathcal{MG}[u(t)]>1$ on $[0,T^*)$, and this and \end{equation}ref{t11} are invariant under flow \end{equation}ref{S}.
Finally, by using $\mathcal{E}[u(t)]>1$, $\mathcal{MG}[u(t)]>1$ and the identity $p\mathcal E[\varphi]=(p-1)\|\sqrt{\mathcal K_\lambda}\varphi\|^2$, we obtain for all $t\in[0,T^\ast)$
\begin{align*}
\mathcal I[u(t)]&=\|\sqrt{\mathcal K_\lambda} u\|^2-\mathcal P[u]\\
&=p\mathcal E[u]-(p-1)\|\sqrt{\mathcal K_\lambda} u\|^2\\
&< p\mathcal E[\varphi]-(p-1)\|\sqrt{\mathcal K_\lambda}\varphi\|^2<0,
\end{align*}
which concludes the proof by using Theorem \ref{t1}.
\section{Appendix: Morawetz estimate}
In this section, we present a virial identity (Proposition \end{equation}ref{mrwz}) that exhibits the convexity property in time for certain quantities associated with solutions of the generalized Hartree equation \end{equation}ref{S}. This identity serves as the basis for studying blow-up phenomena. The virial identity for the free nonlinear Schr\"odinger equation was first established by Zakharov \cite{Za} and Glassey \cite{G}. When the free equation is perturbed by an electromagnetic potential, Fanelly and Vega \cite{FV} derived the corresponding virial identities for the linear Schr\"odinger and linear wave equations. The proof relies on the standard technique of Morawetz multipliers, introduced in \cite{M} for the Klein-Gordon equation. The identity we present here is the same as that in \cite{sx}, with the addition of a term corresponding to the contribution from the inverse square potential.
\begin{proof}[Proof of Proposition \end{equation}ref{mrwz}]
Let $u \in C_t([0,T];H_\lambda^1)$ be a solution to the focusing case of equation \end{equation}ref{S}
\begin{align}\label{S1}
i\partial_t u
&=-\Delta u + \frac\lambda{|x|^2}u -|x|^{-\tau}|u|^{p-2}\Big(I_\alpha*|\cdot|^{-\tau}|u|^p\Big)u \\
\nonumber
&=-\Delta u + \frac\lambda{|x|^2}u -\mathcal N.
\end{align}
By multiplying $2 \bar u$ to \end{equation}ref{S1}, we obtain
\begin{equation*}
-2 \Im{(\bar u \Delta u)} = \partial_t(|u|^2)
\end{equation*}
Using this, we can compute
$$V_\phi'(t)=2\Im\int_{\mathbb{R}^n} \bar u \nabla\phi\cdot\nabla u dx=2\sum_{k=1}^N \Im\int_{\mathbb{R}^n} \bar u \partial_k\phi\cdot \partial_k u dx.$$
In order to consider the second derivative of $V_\xi$, we need to compute
\begin{equation}\label{sec}
V''_{\phi}(t)=2\sum_{k=1}^N \int_{\mathbb{R}^n} \partial_k\phi\cdot \partial_t \Im(\bar u \partial_k u) dx.
\end{equation}
Using \end{equation}ref{S1}, we have
\begin{align}
\partial_t\Im(\bar u \partial_k u )
&=\mathbb{R}e(i\partial_t u \partial_k\bar u )-\mathbb{R}e(i\bar{u} \partial_k \partial_t u )\nonumber\\
&=\mathbb{R}e\big(\partial_k\bar u (-\Delta u + \frac\lambda{|x|^2}u -\mathcal N)\big)-\mathbb{R}e\big(\bar u \partial_k(-\Delta u + \frac\lambda{|x|^2}u -\mathcal N)\big)\nonumber\\
&=\mathbb{R}e\big(\bar u \partial_k\Delta u -\Delta u\partial_k\bar u \big)+\mathbb{R}e\big(\bar u \partial_k\mathcal N-\mathcal N\partial_k\bar u \big)+\lambda\mathbb{R}e\big(\frac{u}{|x|^2}\partial_k\bar u-\bar u \partial_k(\frac{u }{|x|^2}) \big).\label{vr}
\end{align}
Here, for the last term, we see
\begin{equation}\label{aa}
\mathbb{R}e\big(\bar u \partial_k(\frac{u }{|x|^2})-\frac{u}{|x|^2}\partial_k\bar u \big)=-2\frac{x_k}{|x|^4}{|u|^2}.
\end{equation}
For the first two term, we will apply the following lemma, omitted here, can be found in the proof of \cite[Proposition 2.12]{sx}.
\begin{lem}\label{lem}
Let $\phi:\mathbb{R}^N \rightarrow \mathbb{R}$ be a radial, real-valued multiplier with $\phi=\phi(|x|)$.
Then, for $\mathcal N$ defined as $\mathcal N = -|x|^{-\tau}|u|^{p-2}\big(J_\alpha*|\cdot|^{-\tau}|u|^p\big)u$, we have
\begin{align*}
&\mathbb{R}e\int_{\mathbb{R}^N}(\bar u \partial_k\Delta u -\Delta u\partial_k\bar u )+(\bar u \partial_k\mathcal N-\mathcal N\partial_k\bar u ) dx\\
&\qquad\qquad =\sum_{l=1}^{N}2\int_{\mathbb{R}^n}\partial_l\partial_k\phi\,\mathbb{R}e(\partial_ku\partial_l\bar u)dx-\frac12\int_{\mathbb{R}^n}\Delta^2\phi|u|^2dx\\
&\qquad \qquad\qquad-\frac{(p-2)}{p}\int_{\mathbb{R}^n}\Delta\phi|x|^{-\tau}|u|^p(J_\alpha*|\cdot|^{-\tau}|u|^{p})dx\\
&\qquad\qquad \qquad \qquad -\frac{2\tau}p\int_{\mathbb{R}^n}x\cdot\nabla\phi|x|^{-\tau-2}|u|^{p}(J_\alpha*|\cdot|^{-\tau}|u|^p)dx\\
& \qquad \qquad\qquad \qquad \qquad -\frac{2(N-\alpha)}p\int_{\mathbb{R}^n}\partial_k \phi|x|^{-\tau}|u|^{p}(\frac{x_k}{|\cdot|^2}J_\alpha*|\cdot|^{-\tau}|u|^p)dx.
\end{align*}
\end{lem}
Therefore, by combining \end{equation}ref{sec}, \end{equation}ref{vr}, Lemma \ref{lem} and \end{equation}ref{aa}, we finish the proof.
\end{proof}
\section{Declarations}
$\!\!\!\!\!\!\bullet$ The authors have no relevant financial or non-financial interests to disclose.\\
$\bullet$ The authors have no competing interests to declare that are relevant to the content of this article.\\
$\bullet$ All authors certify that they have no affiliations with or involvement in any organization or entity with any financial interest or non-financial interest in the subject matter or materials discussed in this manuscript.\\
$\bullet$ The authors have no financial or proprietary interests in any material discussed in this article.\\
$\bullet$ The data that support the findings of this study are available from the corresponding author
upon reasonable request.
\end{document} |
\begin{document}
{thin}itle{Weak density of orbit equivalence classes of free group actions}
\alphauthor{Lewis Bowen\footnote{email:lpbowen@math.utexas.edu} }
{mid}aketitle
\begin{abstract}
It is proven that the orbit-equivalence class of any essentially free probability-measure-preserving action of a free group $G$ is weakly dense in the space of actions of $G$.
\end{abstract}
{thin}oindent
{\bf Keywords}: orbit equivalence, free groups, weak equivalence.\\
{\bf MSC}:37A20, 37A15\\
{thin}oindent
{{thin}extrm{semi}}ection{Introduction}
The results of this paper are motivated by orbit-equivalence theory and weak equivalence of group actions. Let us first recall some terminology before delving into background material.
Let $(X,{mid}u)$ be a standard non-atomic probability space and ${\cal A}ut(X,{mid}u)$ the group of all measure-preserving automorphisms of $(X,{mid}u)$ in which we identify two automorphisms if they agree on a conull subset. Let $G$ be a countable group. By an {\em action of $G$} we will mean a homomorphism $a:G {thin}o {\cal A}ut(X,{mid}u)$. In particular, all actions in this paper are probability-measure-preserving. Let $A(G,X,{mid}u)$ denote the set of all such actions. It admits a natural topology as follows. First, let us recall that ${\cal A}ut(X,{mid}u)$ is a Polish group with the weak topology (see \S \ref{sec:prelim} for details). We endow the product space ${\cal A}ut(X,{mid}u)^G$ with the product topology and view $A(G,X,{mid}u)$ as a subspace $A(G,X,{mid}u) {{thin}extrm{semi}}ubset {\cal A}ut(X,{mid}u)^G$ with the induced topology. It is well-known that $A(G,X,{mid}u)$ is a Polish space \cite{Ke10}.
{{thin}extrm{semi}}ubsubsection{Weak containment}
If $a \in A(G,X,{mid}u)$ and $T \in {\cal A}ut(X,{mid}u)$, define $a^T \in A(G,X,{mid}u)$ by $a^T(g)=Ta(g)T^{-1}$. Let $[a]_{MC}=\{a^T:~T \in {\cal A}ut(X,{mid}u)\} {{thin}extrm{semi}}ubset A(G,X,{mid}u)$ be the {\em measure-conjugacy class of $a$}.
Given two actions $a,b \in A(G,X,{mid}u)$ we say {\em $a$ is weakly contained in $b$} (denoted $a{\partial}rec b$) if $a$ is contained in the closure of the measure-conjugacy class of $b$ (i.e., $a \in \overline{[b]_{MC}}$). We say {\em $a$ is weakly equivalent to $b$} if $a{\partial}rec b$ and $b{\partial}rec a$. These notions were introduced by A. Kechris \cite{Ke10} as an analog of weak containment of unitary representations.
We can also think of weak equivalence as describing the manner in which the Rohlin Lemma fails for non-amenable groups. Recall that the Rohlin Lemma states that any pmp ${\mathbb Z}$-action is approximately periodic. This fundamental fact is critically important in much classical ergodic theory. It has been extended to amenable groups \cite{OW80}. Moreover, the Rohlin Lemma is essentially equivalent to the statement that if $G$ is infinite and amenable then any essentially free action $a\in A(G,X,{mid}u)$ weakly contains all actions of $G$ (i.e., $[a]_{MC}$ is dense in $A(G,X,{mid}u)$) \cite{Ke11}. By contrast, any non-amenable group admits an essentially free strongly ergodic action (e.g., Bernoulli shift actions) \cite{Sc81, KT08}. By definition, the closure of the measure-conjugacy class of a strongly ergodic action cannot contain any non-ergodic action. So each non-amenable group admits at least two non-weakly-equivalent essentially free actions. It is an open problem whether any non-amenable group admits at least two {\em ergodic} non-weakly-equivalent actions. However M. Abert and G. Elek \cite{AE11} made use of profinite actions to show that there is an explicit large family of residually finite groups $G$ that admit an uncountable family of ergodic non-weakly-equivalent actions. This family includes non-abelian free groups.
{{thin}extrm{semi}}ubsubsection{Orbit-equivalence}
We say two actions $a,b \in A(G,X,{mid}u)$ are {\em orbit-equivalent} if there exists $T\in {\cal A}ut(X,{mid}u)$ which takes orbits to orbits: $T(a(G)x) = b(G)T(x)$ for a.e. $x\in X$. We say that $a\in A(G,X,{mid}u)$ is {\em essentially free} if for for a.e. $x\in X$ the stabilizer of $x$ in $G$ is trivial: $\{g\in G:~a(g)x=x\} = \{e_G\}$.
If $G$ is amenable then every two essentially free ergodic actions of $G$ are orbit equivalent \cite{OW80}. On the other hand, I. Epstein proved that if $G$ is non-amenable then $G$ admits an uncountable family of essentially free non-orbit-equivalent ergodic pmp actions \cite{Ep09, IKT09}. This followed a series of earlier results that dealt with various important classes of non-amenable groups \cite{GP05, Hj05, Io09, Ki08, MS06, Po06}. In \cite{IKT09} it shown that essentially free mixing actions of any non-amenable group $G$ cannot be classified by orbit-equivalence up to countable structures.
The main result of this paper shows that, although there are uncountably many essentially free non-orbit-equivalent ergodic pmp actions of any non-abelian free group, the orbit-equivalence class of any such action is dense in the space of all actions.
{{thin}extrm{semi}}ubsubsection{Results}
Our main result is:
\begin{thm}\label{thm:main}
Let $G$ be a free group with at most countably many generators. Let $a \in A(G,X,{mid}u)$ be essentially free and let $[a]_{OE}$ be the set of all actions $b \in A(G,X,{mid}u)$ which are orbit-equivalent to $a$. Then $[a]_{OE}$ is dense in $A(G,X,{mid}u)$.
\end{thm}
By contrast we can use rigidity results to show that many groups do not satisfy Theorem \ref{thm:main}. For this purpose, let us recall that if $(K,{thick}appa)$ is a probability space then any countable group $G$ acts on the product space $(K,{thick}appa)^G$ by
$$(gx)(f)=x(g^{-1}f),\quad x\in K^G, g,f \in G.$$
This action is called the {\em Bernoulli shift over $G$} with base space $(K,{thick}appa)$.
\begin{thm}\label{thm:counter}
Let $G$ be any countably infinite group satisfying at least one of the following conditions:
\begin{enumerate}
\item $G=G_1{thin}imes G_2$ where $G_1,G_2$ are both infinite, $G_1$ is nonamenable and $G$ has no nontrivial finite normal subgroups;
\item $G$ is the mapping class group of the genus $g$ $n$-holed surface for some $(g,n)$ with $3g +n - 4>0$ and $(g,n) {thin}otin \{(1,2),(2,0)\}$;
\item $G$ has property (T) and every nontrivial conjugacy class of $G$ is infinite.
\end{enumerate}
Let $(X,{mid}u)$ be a standard non-atomic probability space and let $a \in A({\cal G}amma,X,{mid}u)$ be isomorphic to the Bernoulli action $G {\curvearrowright} ([0,1],\lambda)^G$ where $\lambda$ is the Lebesgue measure on the unit interval $[0,1]$. Then $[a]_{OE}$ is not dense in $A(G,X,{mid}u)$.
\end{thm}
Before proving this, we need a lemma.
\begin{defn}
Let $a\in A(G,X,{mid}u)$ and $\alphalpha \in {\cal A}ut(G)$. Observe that the composition $a \circ \alphalpha \in A(G,X,{mid}u)$. We say that two actions $a,b \in A(G,X,{mid}u)$ are {\em conjugate up to automorphisms} if there exists $\alphalpha \in {\cal A}ut(G)$ and $T \in {\cal A}ut(X,{mid}u)$ such that $b = (a \circ \alphalpha)^T$.
\end{defn}
\begin{lem}
Let $G$ be any countable group, $(K,{thick}appa)$ a standard probability space and $G {\curvearrowright}^a (K,{thick}appa)^G$ the Bernoulli shift action. (So $(gx)(f)=x(g^{-1}f)$ for $x\in K^G$ and $g,f \in G$). Then any action of $G$ which is conjugate to $a$ up to automorphisms is measurably conjugate to $a$.
\end{lem}
\begin{proof}
Suppose $\alphalpha \in {\cal A}ut(G)$. It suffices to show that $a$ is measurable conjugate to $a \circ \alphalpha$. For this purpose, define $T:K^G {thin}o K^G$ by $T(x)(g)=x(\alphalpha^{-1}(g))$. Then for any $g,f \in G$ and $x\in K^G$,
$$ T(f x)(g) = (fx)(\alphalpha^{-1}(g)) = x(f^{-1}\alphalpha^{-1}(g)) = x(\alphalpha^{-1}( \alphalpha(f^{-1})g)) = (Tx)(\alphalpha(f^{-1})g) = \alphalpha(f)(Tx)(g).$$
This shows that $T$ intertwines $a$ and $a\circ \alphalpha$ as required.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:counter}]
Using the previous lemma and \cite[Corollary 1.3]{Po08}, \cite[Theorem 1.4]{Ki08} and \cite[Corollary 0.2]{Po06} we obtain that
if $G$ and $a$ are as above then $[a]_{OE}=[a]_{MC}$. Moreover $a$ is strongly ergodic \cite{KT08}. So there does not exist any non-ergodic actions in the closure of its measure-conjugacy class. In particular $[a]_{MC}$ is not dense in $A(G,X,{mid}u)$.
\end{proof}
\begin{remark}
Theorem \ref{thm:main} and the upper semi-continuity of cost \cite[Theorem 10.12]{Ke10} imply that finitely generated free groups have fixed price, a fact originally obtained by Gaboriau \cite{Ga00}.
\end{remark}
\begin{remark}
Because free groups are residually finite and therefore sofic, Theorem \ref{thm:main} implies that the orbit-equivalence relation of every essentially free $a\in A({\cal G}amma,X,{mid}u)$ is sofic. This fact was first obtained in \cite{EL10} (it can also be obtained as a consequence of property MD for free groups \cite{Ke11} which was discovered earlier in a different context in \cite{Bo03}). A new proof of this appears in \cite{BLS13}.
\end{remark}
\begin{question}
Which groups ${\cal G}amma$ satisfy the conclusion of Theorem \ref{thm:main}? For example, do all strongly treeable groups satisfy this conclusion? Does $PSL(2,{\mathbb R})$ satisfy the conclusion?
\end{question}
\begin{question}
Are orbit-equivalence classes meager? That is, is the set $[a]_{OE}$ from Theorem \ref{thm:main} meager in $A(G,X,{mid}u)$? If so, then combined with ideas and results of \cite{IKT09} it should be possible to prove that if $G$ is a nonabelian free group then for any comeager subset $Y {{thin}extrm{semi}}ubset A(G,X,{mid}u)$ it is not possible to classify actions in $Y$ by orbit-equivalence up to countable structures.
\end{question}
{{thin}extrm{semi}}ubsection{A special case}
To give the reader a feeling for the proof of Theorem \ref{thm:main}, we show how to quickly prove a special case.
\begin{thm}\label{thm:easy}
Let $G$ be a non-abelian finitely generated free group and $a \in A(G,X,{mid}u)$ be essentially free. Let $S {{thin}extrm{semi}}ubset G$ be a free generating set. Suppose the for every $s\in S$, the automorphism $a(s) \in {\cal A}ut(X,{mid}u)$ is ergodic. Then $[a]_{OE}$ is dense in $A(G,X,{mid}u)$.
\end{thm}
\begin{lem}\label{lem:FW}
Suppose that $T \in {\cal A}ut(X,{mid}u)$ is ergodic, $\epsilon>0$ and $\{C_i\}_{i<k}, \{D_i\}_{i<k}$ two measurable partitions of $X$ such that for each $i < k$, $C_i$ and $D_i$ have the same measure. Then there is a $T' \in {\cal A}ut(X,{mid}u)$ with the same orbits as $T$ such that for all $i$ the measure of $T'(C_i)\vartriangle D_i$ is less than $\epsilon$.
\end{lem}
\begin{proof}
This is \cite[Lemma 7]{FW04}.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:easy}]
Let $b \in A(G,X,{mid}u)$. We will show that $b \in \overline{[a]_{OE}}$. By Lemma \ref{lem:generator} below, it suffices to show that if $\{C_i\}_{i<k}$ is a measurable partition of $X$ and $\epsilon>0$ then there exists a measurable partition $\{D_i\}_{i<k}$ of $X$ and an action $a' \in [a]_{OE}$ such that
\begin{eqnarray}\label{eqn:suff}
| {mid}u(C_i \cap b_s C_j) - {mid}u(D_i \cap a'_s D_j)| < \epsilon
\end{eqnarray}
for every $s\in S$ and $1\le i,j < k$ (where for example $b_s=b(s)$).
By Lemma \ref{lem:FW} for every $s\in S$ there is an automorphism $a'_s \in {\cal A}ut(X,{mid}u)$ with the same orbits as $a_s$ such that
$${mid}u( a'_s(C_i) \vartriangle b_s(C_i)) < \epsilon.$$
Therefore, equation (\ref{eqn:suff}) holds with $D_i=C_i$ for all $i$. It is easy to verify that $a'$ is orbit-equivalent to $a$ (indeed $a'$ has the same orbits as $a$).
\end{proof}
The conclusion of Lemma \ref{lem:FW} does not hold in general if $T$ is non-ergodic. In order to prove Theorem \ref{thm:main} we will show instead that if the sets $\{C_i\}_{i<k}$ are sufficiently equidistributed with respect to the ergodic decomposition of $T$ then we can find an automorphism $T'$ with the same orbits as $T$ such that the numbers ${mid}u(C_i \cap T' C_j)$ are close to any pre-specified set of numbers satisfying the obvious restrictions.
{\bf Acknowledgements}. Thanks to Robin Tucker-Drob for pointing me to \cite[Lemma 7]{FW04}. I am partially supported by NSF grant DMS-0968762 and NSF CAREER Award DMS-0954606.
{{thin}extrm{semi}}ection{The weak topology}\label{sec:prelim}
Here we review the weak topology and obtain some general results regarding weak containment. So let $(X,{mid}u)$ be a standard non-atomic probability space. The {\em measure algebra} of ${mid}u$, denoted ${thin}extrm{MALG}({mid}u)$, is the collection of all measurable subsets of $X$ modulo sets of measure zero. There is a natural distance function on the measure-algebra defined by
$$d( A,B) = {mid}u(A \vartriangle B)$$
for any $A,B \in {thin}extrm{MALG}({mid}u)$. Because ${mid}u$ is standard, there exists a dense sequence $\{A_i\}_{i=1}^\infty {{thin}extrm{semi}}ubset {thin}extrm{MALG}({mid}u)$. Using this sequence we define the weak-distance between elements $T,S \in {\cal A}ut(X,{mid}u)$ by:
$$d_w(T,S) = {{thin}extrm{semi}}um_{i=1}^\infty 2^{-i} {mid}u(TA_i \vartriangle SA_i).$$
The topology induced by this distance is called the {\em weak topology}. While $d_w$ depends on the choice of $\{A_i\}_{i=1}^\infty$, the topology on ${\cal A}ut(X,{mid}u)$ does not depend on this choice.
Let $G$ be a countable group. Recall that $A(G,X,{mid}u)$ denotes the set of all homomorphisms $a:G {thin}o {\cal A}ut(X,{mid}u)$. We may view $A(G,X,{mid}u)$ as a subset of the product space ${\cal A}ut(X,{mid}u)^G$ from which it inherits a Polish topology \cite{Ke10}.
\begin{notation}
If $v \in A(G,X,{mid}u)$ and $g \in G$ then we write $v_g=v(g)$.
\end{notation}
\begin{lem}\label{lem:Kechris}
Let $G$ be a countable group. Let $v \in A(G,X,{mid}u)$ and $W {{thin}extrm{semi}}ubset A(G,X,{mid}u)$. Then $v$ is in the closure $\overline{W}$ if and only if: for every $\epsilon>0$, for every finite Borel partition ${\mathcal P}=\{P_1,\ldots, P_n\}$ of $X$ and every finite set $F {{thin}extrm{semi}}ubset G$ there exists $w\in W$ and a finite Borel partition ${\mathcal{Q}}=\{Q_1,\ldots, Q_n\}$ of $X$ such that
$$ | {mid}u(P_i \cap v_g P_j) - {mid}u(Q_i \cap w_g Q_j)| < \epsilon$$
for every $g \in F$ and $1\le i,j\le n$.
\end{lem}
\begin{proof}
This is essentially the same as \cite[Proposition 10.1]{Ke10}. It also follows from \cite[Theorem 1]{CKT12}.
\end{proof}
\begin{cor}\label{cor:fin-gen}
In order to prove Theorem \ref{thm:main}, it suffices to prove the special case in which $G$ is finitely generated.
\end{cor}
\begin{proof}
Let $G$ be a countably generated free group with free generating set $S=\{s_1,s_2,\ldots \} {{thin}extrm{semi}}ubset G$. Let $a,b \in A(G,X,{mid}u)$ and suppose $a$ is essentially free. Let $\epsilon>0$, ${\mathcal P}=\{P_1,\ldots, P_n\}$ be a Borel partition of $X$ and $F {{thin}extrm{semi}}ubset G$ be finite. By Lemma \ref{lem:Kechris} it suffices to show there exists $a'\in [a]_{OE}$ and a finite Borel partition ${\mathcal{Q}}=\{Q_1,\ldots, Q_n\}$ of $X$ such that
\begin{eqnarray}\label{eqn:a'}
| {mid}u(P_i \cap b_g P_j) - {mid}u(Q_i \cap a'_g Q_j)| < \epsilon
\end{eqnarray}
for every $g \in F$ and $1\le i,j\le n$.
Let $G_n<G$ be the subgroup generated by $\{s_1,\ldots, s_n\}$. Choose $n$ large enough so that $F {{thin}extrm{semi}}ubset G_n$. Because we are assuming Theorem \ref{thm:main} is true for finitely generated free groups, there exists an action $a'' \in A(G_n,X,{mid}u)$ orbit-equivalent to $a|_{G_n}$ such that
\begin{eqnarray}\label{eqn:a''}
| {mid}u(P_i \cap b_g P_j) - {mid}u(Q_i \cap a''_g Q_j)| < \epsilon
\end{eqnarray}
for every $g \in F$ and $1\le i,j\le n$. By definition of orbit-equivalence, there exists an automorphism $T \in {\cal A}ut(X,{mid}u)$ such that $a''$ and $(a|_{G_n})^T$ have the same orbits.
Define $a' \in A(G,X,{mid}u)$ by $a'(s_i) = a''(s_i)$ if $1\le i \le n$ and $a'(s_i) = Ta(s_i)T^{-1}$ for $i>n$. Then clearly $a'$ is orbit-equivalent to $a$ and $a'$ satisfies (\ref{eqn:a'}) because of (\ref{eqn:a''}).
\end{proof}
The next result implies that we can replace the finite set $F{{thin}extrm{semi}}ubset G$ appearing in the lemma above with a fixed generating set $S{{thin}extrm{semi}}ubset G$. This is crucial to the whole approach because it allows us to reduce Theorem \ref{thm:main} from a problem about actions of the free group to a problem about actions of the integers.
\begin{lem}\label{lem:generator}
Let $G$ be a group with a finite symmetric generating set $S$. Let $v \in A(G,X,{mid}u)$ and $W {{thin}extrm{semi}}ubset A(G,X,{mid}u)$. Suppose that for every $\epsilon>0$ for every finite Borel partition ${\mathcal P}=\{P_1,\ldots, P_n\}$ of $X$ there exists $w\in W$ and a finite Borel partition ${\mathcal{Q}}=\{Q_1,\ldots, Q_n\}$ of $X$ such that
$$ | {mid}u(P_i \cap v_s P_j) - {mid}u(Q_i \cap w_s Q_j)| < \epsilon$$
for every $s\in S$ and $1\le i,j\le n$. Then $v \in \overline{W}$.
\end{lem}
\begin{proof}
Let $\epsilon>0$, ${\mathcal P}=\{P_1,\ldots,P_n\}$ be a Borel partition of $X$ and $F {{thin}extrm{semi}}ubset G$ be a finite set. By Lemma \ref{lem:Kechris} it suffices to show that there exists $w\in W$ and a finite Borel partition ${\mathcal{Q}}=\{Q_1,\ldots, Q_n\}$ of $X$ such that
\begin{eqnarray}\label{eqn:Q}
| {mid}u(P_i \cap v_g P_j) - {mid}u(Q_i \cap w_g Q_j)| < \epsilon
\end{eqnarray}
for every $g \in F$ and $1\le i,j\le n$.
In order to do this, we may assume that for some integer $r\ge 0$, $F=B(e,r)$ is the ball of radius $r$ centered at the identity in $G$ with respect to the word metric $d_S(\cdot,\cdot)$ induced by the generating set $S$.
Let ${\mathcal P}' = \bigvee_{g\in F} g{\mathcal P}$ be the common refinement of the partitions $\{g{\mathcal P}:~g \in F\}$. By hypothesis, there exists a partition ${\mathcal{Q}}'$ of $X$, a bijection $\beta: {\mathcal P}' {thin}o {\mathcal{Q}}'$ and an action $w\in W$ such that
\begin{eqnarray}\label{eqn:weak}
| {mid}u(P' \cap v_s P'') - {mid}u( \beta(P') \cap w_s \beta(P''))| < \epsilon |{\mathcal P}'|^{-2} |F|^{-1}/4
\end{eqnarray}
for every $P', P'' \in {\mathcal P}'$ and $s\in S$.
Let $\Sigma({\mathcal P}')$ denote the sigma-algebra generated by ${\mathcal P}'$ (and define $\Sigma({\mathcal{Q}}')$ similarly). There is a unique boolean-algebra homomorphism from $\Sigma({\mathcal P}')$ to $\Sigma({\mathcal{Q}}')$ extending $\beta$. We also let $\beta$ denote this homomorphism.
Let ${\mathcal{Q}}=\{\beta(P):~P \in {\mathcal P}\}$. It is immediate that ${\mathcal{Q}}$ is a finite Borel partition of $X$. We will show that it satisfies (\ref{eqn:Q}).
{thin}oindent {\bf Claim 1}. $|{mid}u(\beta(P)) - {mid}u(P)| < \epsilon/2$ for every $P \in \Sigma({\mathcal P}')$.
\begin{proof}[Proof of Claim 1]
Let $s\in S$. By (\ref{eqn:weak})
\begin{eqnarray*}
|{mid}u(\beta(P)) - {mid}u(P)| &\le & {{thin}extrm{semi}}um_{P', P''} | {mid}u(P' \cap v_s P'') - {mid}u( \beta(P') \cap w_s \beta(P''))| < \epsilon/2
\end{eqnarray*}
where the sum is over all $P', P'' \in {\mathcal P}'$ with $P' {{thin}extrm{semi}}ubset P$.
\end{proof}
{thin}oindent {\bf Claim 2}. ${mid}u( \beta(v_gP) \vartriangle w_g\beta(P)) \le \epsilon |g|/(2|F|)$ for all $P \in {\mathcal P}$ and $g\in F$ where $|g|$ denotes the word length of $g$. Moreover equality holds only in the case $|g|=0$.
\begin{proof}[Proof of Claim 2]
We prove this by induction on the word length $|g|$. It is obviously true when $|g|=0$. So we assume there is an integer $m\ge 0$ such that the statement is true for all $g$ with $|g|\le m$. Now suppose that $|g|=m+1$ and $g\in F$. Then $g=sh$ for some $h\in F$ and $s\in S$ such that $|h|=m$. By induction,
\begin{eqnarray*}
{mid}u( \beta(v_gP) \vartriangle w_g\beta(P)) &=& {mid}u( \beta(v_{sh}P) \vartriangle w_{sh}\beta(P)) \\
&\le&{mid}u( \beta(v_{sh}P) \vartriangle w_s\beta(v_hP)) + {mid}u( w_s\beta(v_hP) \vartriangle w_{sh}\beta(P)) \\
&=& {mid}u( \beta(v_{sh}P) \vartriangle w_s\beta(v_hP)) + {mid}u( \beta(v_hP) \vartriangle w_h\beta(P)) \\
&\le& {mid}u( \beta(v_{sh}P) \vartriangle w_s\beta(v_hP)) + \epsilon |h|/(2|F|).
\end{eqnarray*}
Next we observe that
\begin{eqnarray*}
{mid}u(\beta(v_{sh}P) \vartriangle w_s\beta(v_hP)) = {{thin}extrm{semi}}um_{P_1,P_2} {mid}u(\beta(P_1) \cap w_s \beta(P_2)) + {{thin}extrm{semi}}um_{P_3,P_4} \beta(P_3 \cap w_s\beta(P_4))
\end{eqnarray*}
where the first sum is over all $P_1,P_2 \in {\mathcal P}'$ such that $P_1 {{thin}extrm{semi}}ubset v_{sh}P$ and $P_2 \cap v_hP = \emptyset$ while the second sum is over all $P_3,P_4 \in {\mathcal P}'$ such that $P_3 \cap v_{sh}P = \emptyset$ and $P_4 {{thin}extrm{semi}}ubset v_hP$.
By (\ref{eqn:weak}) if $(i,j)=(1,2)$ or $(i,j)=(3,4)$ as above then
\begin{eqnarray*}
{mid}u(\beta(P_i) \cap w_s \beta(P_j)) < {mid}u(P_i \cap v_s P_j) + \epsilon |{\mathcal P}'|^{-2}/(4|F|) = \epsilon |{\mathcal P}'|^{-2}/(4|F|).
\end{eqnarray*}
Therefore,
\begin{eqnarray*}
{mid}u(\beta(v_{sh}P) \vartriangle w_s\beta(v_hP)) < \epsilon/(2|F|).
\end{eqnarray*}
This implies the claim.
\end{proof}
Next we verify (\ref{eqn:Q}) with $Q_i = \beta(P_i)$:
\begin{eqnarray*}
| {mid}u(P_i \cap v_g P_j) - {mid}u(Q_i \cap w_g Q_j)| &=& | {mid}u(P_i \cap v_g P_j) - {mid}u(\beta(P_i) \cap w_g \beta(P_j))| \\
&<& | {mid}u(P_i \cap v_g P_j) - {mid}u(\beta(P_i) \cap \beta(v_g P_j))| + \epsilon |g|/(2|F|)\\
&=& | {mid}u(P_i \cap v_g P_j) - {mid}u(\beta(P_i \cap v_g P_j))| + \epsilon |g|/(2|F|) \\
&<& \epsilon/2 + \epsilon |g|/(2|F|) \le \epsilon.
\end{eqnarray*}
The first inequality follows from Claim 2 and the second inequality from Claim 1.
\end{proof}
{{thin}extrm{semi}}ection{A combinatorial lemma}
The next lemma will be used to rearrange partial orbits of a single transformation. Roughly speaking it states that any partial orbit which is roughly equidistributed with respect to some partition can be rearranged so as to approximate the local statistics of any given Markov chain on the partition.
\begin{lem}\label{lem:combinatorial}
Let ${\cal A}$ be a finite set, ${\partial}i$ be a probability distribution on ${\cal A}$ and ${{mid}athcal{J}}$ be a self-coupling of ${\partial}i$ (so ${{mid}athcal{J}}$ is a probability distribution on ${\cal A}{thin}imes {\cal A}$ such that the projection of ${{mid}athcal{J}}$ to either factor is ${\partial}i$). Let $0<\epsilon<1$ and $N>0$ be an integer and ${\partial}hi:\{1,\ldots, N\} {thin}o {\cal A}$ a map such that if ${\partial}i'$ is the empirical distribution of ${\partial}hi$ then $\| {\partial}i' - {\partial}i\|_\infty < \epsilon$. (By definition, ${\partial}i' = {\partial}hi_* u_N $ where $u_N$ is the uniform probability distribution on $\{1,\ldots, N\}$). We assume ${mid}in_{a,b \in {\cal A}} {{mid}athcal{J}}(a,b) >2 |{\cal A}|\epsilon + |{\cal A}|^2/N$.
Then there exists a bijection ${{thin}extrm{semi}}igma={{thin}extrm{semi}}igma_{\partial}hi:\{1,\ldots, N-1\} {thin}o \{2,\ldots, N\}$ such that if ${\cal G}amma({{thin}extrm{semi}}igma)$ is the graph with vertices $\{1,\ldots,N\}$ and edges $\{ (i,{{thin}extrm{semi}}igma(i)):~ 1\le i \le N-1\}$ then
\begin{itemize}
\item ${\cal G}amma({{thin}extrm{semi}}igma)$ is connected (so it is isomorphic to a line graph)
\item if ${\cal P}hi_{{thin}extrm{semi}}igma:\{1,\ldots, N-1\} {thin}o {\cal A} {thin}imes {\cal A}$ is the map ${\cal P}hi_{{thin}extrm{semi}}igma(i) = ( {\partial}hi(i), {\partial}hi({{thin}extrm{semi}}igma(i)))$ and ${{mid}athcal{J}}_{{thin}extrm{semi}}igma=({\cal P}hi_{{thin}extrm{semi}}igma)_*u_{N-1}$ is the empirical distribution of ${\cal P}hi_{{thin}extrm{semi}}igma$ then
$$\| {{mid}athcal{J}}_{{thin}extrm{semi}}igma - {{mid}athcal{J}}\|_\infty < 2|{\cal A}|\epsilon + 3|{\cal A}|^2/N.$$
\end{itemize}
\end{lem}
\begin{proof}
{thin}oindent {\bf Claim 1}. There exists a self-coupling ${{mid}athcal{J}}'$ of ${\partial}i'$ such that
$$\| {{mid}athcal{J}}' - {{mid}athcal{J}} \|_\infty < 2 |{\cal A}|\epsilon + |{\cal A}|^2/N$$
and ${{mid}athcal{J}}'$ takes values only in ${\mathbb Z}[1/N]$.
\begin{proof}[Proof of Claim 1]
Let $a\in {\cal A}$. For $b,c\in {\cal A} {{thin}extrm{semi}}etminus \{a\}$, let ${{mid}athcal{J}}'(b,c)$ be the closest number in ${\mathbb Z}[1/N]$ to ${{mid}athcal{J}}(b,c)$. Define
\begin{eqnarray*}
{{mid}athcal{J}}'(a,c) &=& {\partial}i'(c) - {{thin}extrm{semi}}um_{t \in {\cal A}{{thin}extrm{semi}}etminus \{a\}} {{mid}athcal{J}}'(t,c) \\
{{mid}athcal{J}}'(b,a) &=& {\partial}i'(b) - {{thin}extrm{semi}}um_{t \in {\cal A}{{thin}extrm{semi}}etminus \{a\}} {{mid}athcal{J}}'(b,t) \\
{{mid}athcal{J}}'(a,a) &=& {\partial}i'(a) - {{thin}extrm{semi}}um_{t \in {\cal A} {{thin}extrm{semi}}etminus \{a\}} {{mid}athcal{J}}'(a,t) = {\partial}i'(a) - {{thin}extrm{semi}}um_{t \in {\cal A} {{thin}extrm{semi}}etminus \{a\}} {{mid}athcal{J}}'(t,a).
\end{eqnarray*}
It is straightforward to check that
$$\| {{mid}athcal{J}}' - {{mid}athcal{J}} \|_\infty < \epsilon +|{\cal A}|( \epsilon + |{\cal A}|/N) \le 2 |{\cal A}|\epsilon + |{\cal A}|^2/N.$$
Because ${mid}in_{a,b \in {\cal A}} {{mid}athcal{J}}(a,b) >2 |{\cal A}|\epsilon + |{\cal A}|^2\epsilon/N$, this implies ${{mid}athcal{J}}'$ is positive everywhere. So it is a self-coupling of ${\partial}i'$.
\end{proof}
{thin}oindent {\bf Claim 2}. There exists a bijection ${{thin}ilde a}u:\{1,\ldots, N-1\} {thin}o \{2,\ldots, N\}$ such that if ${\cal P}hi_{{thin}ilde a}u:\{1,\ldots, N\} {thin}o {\cal A} {thin}imes {\cal A}$ is the map ${\cal P}hi_{{thin}ilde a}u(i) = ( {\partial}hi(i), {\partial}hi({{thin}ilde a}u(i)))$ and ${{mid}athcal{J}}_{{thin}ilde a}u= ({\cal P}hi_{{thin}ilde a}u)_*u_N$ is the empirical distribution of ${\cal P}hi_{{thin}ilde a}u$ then $\|{{mid}athcal{J}}_{{thin}ilde a}u-{{mid}athcal{J}}'\|_\infty \le 1/N$.
\begin{proof}[Proof of Claim 2]
Because ${{mid}athcal{J}}'$ is a self-coupling of ${\partial}i'$ taking values in ${\mathbb Z}[1/N]$ there exist partitions ${\mathcal P}=\{P_{a,b}\}_{a,b\in {\cal A}},{\mathcal{Q}}=\{Q_{a,b}\}_{a,b\in {\cal A}}$ of $\{1,\ldots, N\}$ such that:
\begin{itemize}
\item $|P_{a,b}| = |Q_{a,b}| = N {{mid}athcal{J}}'(a,b)$ for every $a,b \in {\cal A}$;
\item $P_{a,b} {{thin}extrm{semi}}ubset {\partial}hi^{-1}(a)$ and $Q_{a,b} {{thin}extrm{semi}}ubset {\partial}hi^{-1}(b)$ for every $a,b \in {\cal A}$.
\end{itemize}
Next we choose bijections $\beta_{a,b}:P_{a,b} {thin}o Q_{a,b}$ for all $a,b \in {\cal A}$. Define ${{thin}ilde a}u:\{1,\ldots, N-1\} {thin}o \{2,\ldots, N\}$ by ${{thin}ilde a}u(i) = \beta_{a,b}(i)$ if $i\in P_{a,b}$ and $\beta_{a,b}(i) {thin}e 1$. If $i \in P_{a,b}$ (for some $a,b$) and $\beta_{a,b}(i)=1$ then we define ${{thin}ilde a}u(i)=N$. This satisfies the claim.
\end{proof}
Let ${{thin}ilde a}u:\{1,\ldots, N-1\} {thin}o \{2,\ldots, N\}$ be a bijection satisfying the conclusion of Claim 2 with the property that the number of connected components of the graph ${\cal G}amma({{thin}ilde a}u)$ is as small as possible given that ${{thin}ilde a}u$ satisfies Claim 2.
{thin}oindent {\bf Claim 3}. ${\cal G}amma({{thin}ilde a}u)$ has at most $|{\cal A}|^2$ connected components.
\begin{proof}[Proof of Claim 3]
To obtain a contradiction, suppose ${\cal G}amma({{thin}ilde a}u)$ has more than $|{\cal A}|^2$ connected components. Then there exists $1\le i <j \le N-1$ such that $i$ and $j$ are in different connected components of ${\cal G}amma({{thin}ilde a}u)$, ${\partial}hi(i)={\partial}hi(j)$ and ${\partial}hi({{thin}ilde a}u(i))={\partial}hi({{thin}ilde a}u(j))$. Let us define ${{thin}ilde a}u':\{1,\ldots, N-1\} {thin}o \{2,\ldots, N\}$ by
\begin{displaymath}
{{thin}ilde a}u'(k) = \left\{ \begin{array}{ll}
{{thin}ilde a}u(k) & k {thin}otin \{i,j\} \\
{{thin}ilde a}u(j) & k=i \\
{{thin}ilde a}u(i) & k=j
\end{array}\right.
\end{displaymath}
Observe that ${{thin}ilde a}u'$ also satisfies Claim 2 and ${\cal G}amma({{thin}ilde a}u')$ has one fewer connected component than ${\cal G}amma({{thin}ilde a}u)$ contradicting the choice of ${\cal G}amma({{thin}ilde a}u)$.
\end{proof}
Let $1\le i_1 < i_2 < \ldots < i_k \le N$ be a maximal set of indices such that for $t {thin}e s$, $i_t$ and $i_s$ are in different connected components of ${\cal G}amma({{thin}ilde a}u)$. Define the bijection ${{thin}extrm{semi}}igma:\{1,\ldots,N-1\} {thin}o \{2,\ldots, N\}$ by
\begin{displaymath}
{{thin}extrm{semi}}igma(t) = \left\{ \begin{array}{ll}
{{thin}ilde a}u(t) & t {thin}otin \{i_1,\ldots, i_k\} \\
{{thin}ilde a}u(i_{s+1}) & t=i_s~{thin}extrm{ (indices mod $k$)}
\end{array}\right.
\end{displaymath}
Observe that ${\cal G}amma({{thin}extrm{semi}}igma)$ is connected and, since $k\le |{\cal A}|^2$,
$$ \left|{{mid}athcal{J}}_{{thin}extrm{semi}}igma(a,b) - {{mid}athcal{J}}_{{thin}ilde a}u(a,b) \right| = \left|\frac{ \#\{ 1\le i \le N-1:~ {\partial}hi(i) = a, {\partial}hi({{thin}extrm{semi}}igma(i)) = b\} }{N} - {{mid}athcal{J}}_{{thin}ilde a}u(a,b) \right| \le |{\cal A}|^2/N.$$
This implies the lemma.
\end{proof}
{{thin}extrm{semi}}ection{Proof of Theorem \ref{thm:main}}
From here on, it will be convenient to work with observables instead of partitions. So instead of considering partitions ${\mathcal P}$ of a space $X$ we consider measurable maps ${\partial}hi:X {thin}o {\cal A}$ where ${\cal A}$ is a finite set. Of course, $\{{\partial}hi^{-1}(\{a\}):~a\in {\cal A}\}$ is the partition of $X$ represented by ${\partial}hi$.
\begin{lem}\label{lem:hard}
Let $(X,{mid}u)$ be a standard probability space and $T \in {\cal A}ut(X,{mid}u)$ be aperiodic. Let ${\partial}si: X {thin}o {\cal A}$ be a measurable map into a finite set. Let ${mid}u = \int {thin}u~d\omega({thin}u)$ be the ergodic decomposition of ${mid}u$ with respect to $T$ (so $\omega$ is a probability measure on the space of $T$-invariant ergodic Borel probability measures on $X$). Suppose that for some $1/6>\epsilon>0$,
$$\omega{\cal B}ig(\big\{ {thin}u:~ \|{\partial}si_*{thin}u - {\partial}si_*{mid}u \|_\infty > \epsilon \big\}{\cal B}ig) < \epsilon.$$
Suppose also that ${{mid}athcal{J}}$ is a self-coupling of ${\partial}si_*{mid}u$ (i.e. ${{mid}athcal{J}}$ is a probability measure on ${\cal A} {thin}imes {\cal A}$ whose projections are both equal to ${\partial}si_*{mid}u$) and
$${mid}in_{a,b \in {\cal A}} {{mid}athcal{J}}(a,b) >2|{\cal A}|\epsilon.$$
Then there exists $T' \in {\cal A}ut(X,{mid}u)$ such that $T$ and $T'$ have the same orbits (a.e.) and if ${\cal P}hi: X {thin}o {\cal A} {thin}imes {\cal A}$ is the map ${\cal P}hi(x)=({\partial}si(x),{\partial}si(T'x))$ then
$$ \| {\cal P}hi_* {mid}u - {{mid}athcal{J}}\|_\infty \le 9|{\cal A}|\epsilon.$$
\end{lem}
\begin{proof}
By the pointwise ergodic theorem, there exists a Borel set $X' {{thin}extrm{semi}}ubset X$ and an integer $M>0$ such that
\begin{itemize}
\item ${mid}u(X')>1-\epsilon$
\item for every $x\in X'$, every $a\in {\cal A}$ and every $K_1,K_2 \ge M$,
$$\left| \frac{ \#\{ -K_1 \le j \le K_2:~ {\partial}si(T^j x) = a \} }{K_1+K_2+1} - {\partial}si_*{mid}u(a) \right| < \epsilon.$$
\end{itemize}
Without loss of generality, we may assume $M$ is large enough so that
$${mid}in_{a,b \in {\cal A}} {{mid}athcal{J}}(a,b) >2 |{\cal A}|\epsilon + |{\cal A}|^2/(2M+1)$$
and $3|{\cal A}|^2/(2M+1) < \epsilon$.
Let $Y {{thin}extrm{semi}}ubset X$ be a complete section with ${mid}u(Y)\le \epsilon/(2M+1)$. By a complete section we mean that for ${mid}u$-a.e. $x\in X$ the orbit of $x$ intersects $Y$ nontrivially. The existence of such a complete section is proved in \cite[Chapter II, Lemma 6.7]{KM04}. Without loss of generality, we will assume that if $y\in Y$ then $Ty {thin}otin Y$.
For any integer $N\ge 1$ let us say that a map $\eta:\{1,\ldots, N\} {thin}o {\cal A}$ is {\em $\epsilon$-good} if
$$\| \eta_*u_N - {\partial}si_*{mid}u \|_\infty < \epsilon$$
and
$${mid}in_{a,b \in {\cal A}} {{mid}athcal{J}}(a,b) >2 |{\cal A}|\epsilon + |{\cal A}|^2/N$$
where $u_N$ is the uniform probability measure on $\{1,\ldots, N\}$. For each $\epsilon$-good map $\eta:\{1,\ldots,N\} {thin}o {\cal A}$ choose a map ${{thin}extrm{semi}}igma_\eta:\{1,\ldots, N-1\} {thin}o \{2,\ldots, N\}$ as in Lemma \ref{lem:combinatorial}.
For $x\in X$, let $\alphalpha(x)$ be the smallest nonnegative integer such that $T^{-\alphalpha(x)}x \in Y$, let $\beta(x)$ be the smallest nonnegative integer such that $T^{\beta(x)}x \in Y$ and let ${\partial}si_x:\{1,\ldots, \alphalpha(x) + \beta(x)+1\} {thin}o {\cal A}$ be the map ${\partial}si_x(j) = {\partial}si(T^{j-\alphalpha(x)-1} x)$. So
$${\partial}si_x = \big({\partial}si(T^{-\alphalpha(x)}x), {\partial}si(T^{-\alphalpha(x)+1}x), \ldots, {\partial}si(T^{\beta(x)}x) \big).$$
Note that ${\partial}si_x$ is $\epsilon$-good if $x \in X'$, $\alphalpha(x) \ge M$ and $\beta(x) \ge M$. In this case, let ${{thin}extrm{semi}}igma_x={{thin}extrm{semi}}igma_{{\partial}si_x}$ and ${{mid}athcal{J}}_x={{mid}athcal{J}}_{{{thin}extrm{semi}}igma_x}$ (with notation as in Lemma \ref{lem:combinatorial}).
Now we can define $T'$ as follows. If either $x \in Y$ or ${\partial}si_x$ is not $\epsilon$-good then we define $T'x=Tx$. Otherwise we set $T'x = T^{{{thin}extrm{semi}}igma_x(\alphalpha(x)+1)-\alphalpha(x)-1}(x)$. Because each ${{thin}extrm{semi}}igma_x$ is a bijection and the graph ${\cal G}amma({{thin}extrm{semi}}igma_x)$ is connected it immediately follows that $T'$ and $T$ have the same orbits.
By Kac's Theorem (see for example \cite[Theorem 4.3.4]{Do11}),
$$\int_Y \beta(Tx)+1~d{mid}u(x) = 1.$$
Therefore the set $Y'$ of all $x \in Y$ such that $\beta(Tx)+1 \ge 2M+1 $ satisfies
$$\int_{Y'} \beta(Tx)+1~d{mid}u(x) = 1 - \int_{Y{{thin}extrm{semi}}etminus Y'} \beta(Tx)+1~d{mid}u(x) \ge 1 - {mid}u(Y)(2M+1) \ge 1-\epsilon.$$
Let $X''$ be the set of all $T^jx$ for $x \in Y'$ and $M \le j \le \beta(Tx)-M$. Then
$${mid}u(X'') = \int_{Y'}( \beta(Tx) -2M +1) ~d{mid}u(x) \ge 1-\epsilon - {mid}u(Y')(2M) \ge 1-2\epsilon.$$
Let $X''' = X' \cap X''$. So ${mid}u(X''') \ge 1-3\epsilon$. Observe that if $x\in X'''$ then $\alphalpha(x)\ge M$ and $\beta(x)\ge M$ so ${\partial}si_x$ is $\epsilon$-good. Finally, let $Y''$ be the set of all $y\in Y'$ such that $T^jy \in X'''$ for some $1\le j \le \beta(Tx)+1$. Then
$$\int_{Y''} \beta(Tx)+1~d{mid}u(x) \ge {mid}u(X''') \ge 1-3\epsilon.$$
Moreover, if $y \in Y''$ then ${\partial}si_{Ty}$ is $\epsilon$-good (this uses our hypothesis that if $y\in Y'' {{thin}extrm{semi}}ubset Y$ then $Ty {thin}otin Y$) . Let $Z$ be the set of all $T^jy$ for $y \in Y''$ and $0\le j \le \beta(Ty)$. So
$${mid}u(Z) = \int_{Y''} \beta(Tx)+1 ~d{mid}u(x) \ge 1-3\epsilon.$$
Recall that ${\cal P}hi:X {thin}o {\cal A} {thin}imes {\cal A}$ is defined by ${\cal P}hi(x) = ({\partial}si(x), {\partial}si(T'x))$. Let ${mid}u_{Z}$ denote the unnormalized restriction of ${mid}u$ to $Z$. Then
\begin{eqnarray*}
\| {\cal P}hi_*{mid}u - {{mid}athcal{J}} \|_\infty &\le& \| {\cal P}hi_*{mid}u - {\cal P}hi_* {mid}u_{Z} \|_\infty + \|{\cal P}hi_*{mid}u_{Z} - {{mid}athcal{J}} \|_\infty\\
&\le& 3\epsilon + \|{\cal P}hi_*{mid}u_{Z} - {{mid}athcal{J}} \|_\infty.
\end{eqnarray*}
Next observe that
$${\cal P}hi_*{mid}u_Z = \int_{Y''} [\beta(Tx)+1] {{mid}athcal{J}}_{Tx}~d{mid}u(x).$$
By Lemma \ref{lem:combinatorial}, for $x\in Y''$
$$\|{{mid}athcal{J}}_{Tx} - {{mid}athcal{J}}\|_\infty \le 2|{\cal A}|\epsilon + 3|{\cal A}|^2/(2M+1) \le 3|{\cal A}|\epsilon.$$
So
$$ \|{\cal P}hi_*{mid}u_{Z} - {{mid}athcal{J}} \|_\infty \le (3|{\cal A}|\epsilon)/(1-3\epsilon) \le 6|{\cal A}|\epsilon$$
(because $\epsilon<1/6$) and therefore,
$$\| {\cal P}hi_*{mid}u - {{mid}athcal{J}} \|_\infty \le 3\epsilon + 6|{\cal A}|\epsilon \le 9|{\cal A}|\epsilon.$$
\end{proof}
In the next lemma, we prove the existence of a ``good observable'' ${\partial}si$.
\begin{lem}\label{lem:good-partition}
Let $G$ be a countable group and $S {{thin}extrm{semi}}ubset G$ a finite set of elements of infinite order. Let $T\in A(G,X,{mid}u)$ be an essentially free action of $G$. For each $s \in S$ let ${mid}u = \int {thin}u ~d\omega_s({thin}u)$ be the ergodic decomposition of ${mid}u$ with respect to $T_s$ (so $\omega_s$ is a Borel probability measure on the space of all $T_s$-invariant ergodic Borel probability measures).
Let ${\partial}i$ be a probability measure on a finite set ${\cal A}$. Also let $0<\epsilon<1/2$. Then there exists a measurable map ${\partial}si:X {thin}o {\cal A}$ such that for every $s \in S$,
$$\omega_s(\{ {thin}u:~ \|{\partial}si_*{thin}u - {\partial}i \|_\infty > 3\epsilon \}) < \epsilon.$$
\end{lem}
\begin{proof}
Given a measurable map ${\partial}si:X {thin}o {\cal A}$, an element $s\in S$ and an integer $N\ge 1$ let ${\partial}si_{s,N}:X {thin}o {\cal A}^N$ be the map
$${\partial}si_{s,N}(x) = ({\partial}si(T_s x), {\partial}si(T^2_s x), \ldots, {\partial}si(T^N_s x) ).$$
According to Abert-Weiss \cite{AW11}, the action $T$ weakly contains the Bernoulli shift action $G {\curvearrowright} ({\cal A},{\partial}i)^G$. This immediately implies that for any integer $N\ge 1$ there exists a measurable map ${\partial}si:X {thin}o {\cal A}$ such that for every $s \in S$
$$\| ({\partial}si_{s,N})_*{mid}u - {\partial}i^N \|_\infty < \frac{\epsilon^2}{2}.$$
Given a sequence $y \in {\cal A}^N$, let $E[y]$ denote its empirical distribution. To be precise, $E[y]$ is the probability measure on ${\cal A}$ defined by
$$E[y](a) = \#\{1\le i \le N:~ y(i) = a\}/N.$$
By the law of large numbers we may choose $N$ large enough so that
$${\partial}i^N(\{ y \in {\cal A}^N:~ \| E[y] - {\partial}i \|_\infty > \epsilon\}) < \epsilon^2/2.$$
Therefore,
$${mid}u\left(\left\{x \in X:~ \| E[{\partial}si_{s,N}(x)] - {\partial}i \|_\infty > \epsilon\right\}\right) = ({\partial}si_{s,N})_*{mid}u(\{ y \in {\cal A}^N:~ \| E[y] - {\partial}i \|_\infty > \epsilon\})< \epsilon^2$$
for every $s\in S$. Let $Z=\left\{x \in X:~ \| E[{\partial}si_{s,N}(x)] - {\partial}i \|_\infty > \epsilon\right\}$. So
$$\int {thin}u(Z)~d\omega_s({thin}u) = {mid}u(Z)<\epsilon^2.$$
This implies $\omega_s(\{{thin}u:~ {thin}u(Z)>\epsilon\})<\epsilon$.
Next we claim that if a probability measure ${thin}u$ satisfies ${thin}u(Z) \le \epsilon$ then $\| {\partial}si_*{thin}u - {\partial}i \|_\infty \le 3\epsilon$. Indeed,
$${\partial}si_*{thin}u = E_* ({\partial}si_{s,N})_*{thin}u.$$
So if ${thin}u(Z)\le \epsilon$ then $\|{\partial}si_* {thin}u - E_*({\partial}si_{s,N})_*({thin}u\upharpoonright Z^c) \|_\infty \le \epsilon$ (where $Z^c = X {{thin}extrm{semi}}etminus Z$ is the complement of $Z$) and $\| {\partial}i - E_*({\partial}si_{s,N})_*({thin}u\upharpoonright Z^c) \|_\infty \le \frac{\epsilon}{1-\epsilon}$ by definition of $Z$. So $\| {\partial}si_* {thin}u - {\partial}i\|_\infty \le 3\epsilon$ (since we assume $\epsilon < 1/2$).
Since $\omega_s(\{{thin}u:~ {thin}u(Z)>\epsilon\})<\epsilon$, we now have
$$\omega_s(\{{thin}u:~ \|{\partial}si_*{thin}u - {\partial}i\|_\infty > 3\epsilon\}) < \epsilon.$$
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:main}]
Let $G$ be a finitely generated free group with free generating set $S {{thin}extrm{semi}}ubset G$. Let $a,b\in A(G,X,{mid}u)$ and assume $a$ is essentially free. It suffices to show that $b\in \overline{[a]_{OE}}$. By Lemma \ref{lem:generator} it suffices to show that for every finite set ${\cal A}$, measurable map ${\partial}hi:X {thin}o {\cal A}$ and $\epsilon>0$ there exists a measurable map ${\partial}si:X {thin}o {\cal A}$ and $a'\in [a]_{OE}$ such that
$$\| ({\partial}si \vee {\partial}si \circ a'_s)_*{mid}u - ({\partial}hi \vee {\partial}hi \circ b_s)_*{mid}u \|_\infty \le 10|{\cal A}|\epsilon\quad \forall s\in S$$
where, for example, ${\partial}hi \vee {\partial}hi \circ b_s:X {thin}o {\cal A} {thin}imes {\cal A}$ is the map
$${\partial}hi\vee {\partial}hi \circ b_s(x) = ({\partial}hi(x), {\partial}hi(b_sx)).$$
After replacing ${\cal A}$ with the essential range of ${\partial}hi$ is necessary, we may assume that ${\partial}hi_*{mid}u(c)>0$ for every $c\in {\cal A}$. We claim that there exists a self-coupling ${{mid}athcal{J}}_s$ of ${\partial}hi_*{mid}u$ such that ${{mid}athcal{J}}_s(c,d) >0$ for all $c,d \in {\cal A}$ and
$$\|({\partial}hi \vee {\partial}hi \circ b_s)_*{mid}u - {{mid}athcal{J}}_s\|_\infty < \epsilon.$$
Indeed, the self-coupling
$${{mid}athcal{J}}_s = (1-\epsilon) ({\partial}hi \vee {\partial}hi \circ b_s)_*{mid}u + \epsilon ({\partial}hi {thin}imes {\partial}hi)_*({mid}u {thin}imes {mid}u)$$
has this property. After choosing $\epsilon$ smaller if necessary we may assume that $\epsilon<1/6$ and
$${mid}in_{s\in S} {mid}in_{c,d \in {\cal A}} {{mid}athcal{J}}_s(c,d) > 2|{\cal A}|\epsilon.$$
Let ${mid}u=\int {thin}u~d\omega_s$ be the ergodic decomposition of ${mid}u$ with respect to $a_s$. By Lemma \ref{lem:good-partition} there exists a measurable map ${\partial}si:X {thin}o {\cal A}$ such that
$$\omega_s(\{{thin}u:~\| {\partial}hi_*{mid}u - {\partial}si_* {thin}u\|_\infty > \epsilon\})< \epsilon$$
for every $s\in S$. By Lemma \ref{lem:hard} for every $s\in S$ there exists $a'_s \in {\cal A}ut(X,{mid}u)$ such that $a'_s$ and $a_s$ have the same orbits and
$$\| ({\partial}si \vee {\partial}si \circ a'_s)_*{mid}u - {{mid}athcal{J}}_s \|_\infty \le 9|{\cal A}|\epsilon.$$
Because $a'_s$ and $a_s$ have the same orbits for every $s\in S$ it follows that the homomorphism $a':G {thin}o {\cal A}ut(X,{mid}u)$ defined by $\{a'_s\}_{s\in S}$ is orbit-equivalent to $a$. In other words, $a' \in [a]_{OE}$. Also
$$\| ({\partial}si \vee {\partial}si \circ a'_s)_*{mid}u - ({\partial}hi \vee {\partial}hi \circ b_s)_*{mid}u\|_\infty \le \|({\partial}si \vee {\partial}si \circ a'_s)_*{mid}u - {{mid}athcal{J}}_s \|_\infty + \|{{mid}athcal{J}}_s - ({\partial}hi \vee {\partial}hi \circ b_s)_*{mid}u\|_\infty \le 10|{\cal A}|\epsilon.$$
This proves the special case in which $G$ is finitely generated. The general case follows from Corollary \ref{cor:fin-gen}.
\end{proof}
{{{thin}extrm{semi}}mall
}
\end{document} |
\begin{document}
\title{Tropical Dynamic Programming for \ Lipschitz Multistage Stochastic Programming}
\begin{abstract}
We present an algorithm called Tropical Dynamic Programming (TDP) which builds upper and lower approximations of the Bellman value functions in risk-neutral Multistage Stochastic Programming (MSP), with independent noises of finite supports.
To tackle the curse of dimensionality, popular parametric variants of Approximate Dynamic Programming approximate the Bellman value function as linear combinations of basis functions. Here, Tropical Dynamic Programming builds upper (resp. lower) approximations of a given value function as min-plus linear (resp. max-plus linear) combinations of "basic functions". At each iteration, TDP adds a new basic function to the current combination following a deterministic criterion introduced by Baucke, Downward and Zackeri in 2018 for a variant of Stochastic Dual Dynamic Programming.
We prove, for every Lipschitz MSP, the asymptotic convergence of the generated approximating functions of TDP to the Bellman value functions on sets of interest. We illustrate this result on MSP with linear dynamics and polyhedral costs.
\end{abstract}
\section{Introduction}
In this article we study multistage stochastic optimal control problems in the
hazard-decision framework (hazard comes first, decision second). Starting from a
given state $x_0$, a decision maker observes the outcome $w_1$ of a
random variable $\mathbf{W_1}$, then decides on a control $u_0$ which induces a
\emph{known} cost $c_0^{w_1}\np{x_0, u_0}$ and the system evolves to a future
state $x_1$ from a \emph{known} dynamic: $x_1 = f_0^{w_1}\np{x_0, u_0}$. Having observed
a new random outcome, the decision maker makes a new decision based on
this observation which induces a known cost, then the system evolves to a known
future state, and so on until $T$ decisions have been made. At the last step, there
are constraints on the final state $x_T$ which are modeled by a final cost function
$\psi$. The decision maker aims to minimize the average cost of her decisions.
Multistage Stochastic optimization Problems (MSP) can be formally described by
the following optimization problem
\begin{equation}
\label{MSP}
\begin{aligned}
& \min_{\np{\mathbf{X}, \mathbf{U}}} \mathbb{E} \left[ \sum_{t=0}^{T-1} \cost[\mathbf{W_{t+1}}]{\mathbf{X_t}, \mathbf{U_t}} + \psi\np{\mathbf{X_T}} \right], \\
& \text{s.t.} \ \mathbf{X_0} = x_0 \ \text{given}, \forall t\in \ce{0,T-1}, \\
& \mathbf{X_{t+1}} = \dyn[\mathbf{W_{t+1}}]{\mathbf{X_t}, \mathbf{U_t}}, \\
& \sigma\np{\mathbf{U_t}} \subset \sigma\np{ \mathbf{W_1}, \ldots , \mathbf{W_{t+1}}},
\end{aligned}
\end{equation}
where $\np{\mathbf{W_{t}}}_{t\in \ce{1,T}}$ is a given sequence of independent random variables each with values in some measurable set $(\mathbb{W}_t,\mathcal{W}_t)$. We refer to the random variable $\mathbf{W_{t+1}}$ as a \emph{noise} and throughout the remainder of the article we assume the following on the sequence of noises.
\begin{assumption}
\label{whitenoise}
Each random variable $\mathbf{W_t}$ in Problem \eqref{MSP} has finite support and the sequence of random variable $\np{\mathbf{W_{t}}}_{t\in \ce{1,T}}$ is independent.
\end{assumption}
One approach to solving MSP problems is by dynamic programming, see for example
\cite{Be2016,Ca.Ch.Co.De2015,Pf.Pi2014,Sh.De.Ru2009}. For some integers
$n,m \in \mathbb{N}$, denote by $\mathbb{X} = \mathbb{R}^n$ the \emph{state space} and $\mathbb{U} = \mathbb{R}^m$ the
\emph{control space}. Both $\mathbb{X}$ and $\mathbb{U}$ are endowed with their euclidean
structure and borelian structure. We define the pointwise Bellman operators
$\pB{}{}$ and the average Bellman operators $\aB{}{}$ for every
$t \in \ic{0,T-1}$. For each possible realization $w\in \mathbb{W}_{t+1}$ of
the noise $\mathbf{W_{t+1}}$, for every function $\phi : \mathbb{X} \to \mathbb{R}b$ taking
extended real values in $\mathbb{R}b = \mathbb{R}R \cup\na{\pm\infty}$, the function
$\pB{\phi}{\cdot}: \mathbb{X} \to \mathbb{R}b$ is defined by
\begin{equation*}
\forall x\in \mathbb{X},\; , \enspace \pB{\phi}{x} = \min_{u\in \mathbb{U}} \mathcal{B}p{\cost{x,u} + \phi\bp{\dyn{x,u}}}
\; .
\end{equation*}
Now, the average Bellman operator $\aB{}{}$ is the mean of all the pointwise Bellman operators with respect to the probability law of
$\mathbf{W_{t+1}}$. That is, for every $\phi : \mathbb{X} \to \mathbb{R}b$, we have that
\begin{equation*}
\forall x \in \mathbb{X}
\; , \enspace \aB{\phi}{x} = \mathbb{E} \bc{ \pB[\mathbf{W_{t+1}}]{\phi}{x}} =
\mathbb{E}\mathcal{B}c{ \min_{u\in \mathbb{U}}\mathcal{B}p{\cost[\mathbf{W_{t+1}}]{x,u} + \phi\bp{\dyn[\mathbf{W_{t+1}}]{x,u} }}}
\; .
\end{equation*}
The average Bellman operator can be seen as a one stage operator which computes
the value of applying the best (average) control at a given state $x$. Note
that in the hazard-decision framework assumed here, the control is taken after
observing the noise. Now, the Dynamic Programming approach states that in order to solve
MSP Problems~\eqref{MSP}, it suffices to solve the following system of
\emph{Bellman equations}~\eqref{BellmanEquations},
\begin{equation}
\label{BellmanEquations}
V_T = \psi \quad\text{and}\quad
\forall t \in \ic{0,T-1}, V_t = \aB{V_{t+1}}{}
\; .
\end{equation}
Solving the Bellman equations means computing recursively backward in time the
\emph{(Bellman) value functions} $V_t$. Finally, the value $V_0(x_0)$ is the solution of the multistage Problem~\ref{MSP}.
Grid-based approach to compute the value functions suffers from the so-called
curse of dimensionality. Assuming that the value functions
$\na{V_t}_{t\in \ic{0,T}}$ are convex, one approach to bypass this difficulty is
proposed by Pereira and Pinto \cite{Pe.Pi1991} with the Stochastic Dual Dynamic
Programming (SDDP) algorithm which computes piecewise affine approximations of
each value function $V_t$. At a given iteration $k\in \mathbb{N}^*$ of SDDP, for every
time step $t\in \ce{0,T}$, the value function $V_t$ is approximated by
$\underline{V}_t^k = \max_{\phi \in \underline{F}_k} \phi$ where $\underline{F}_k$ is a finite set of
affine functions. Then, given a realization of the noise process
$\np{W_t}_{t\in \ce{1,T}}$, the decision maker computes an optimal trajectory
associated with the approximations $\np{\underline{V}_t^k}_{t\in \ce{0,T}}$ and add a new
mapping, $\phi_t^{k+1}$ (named cut) to the current collection $\underline{F}_t^k$
which define $\underline{V}_t^k$, that is
$\underline{F}_t^{k+1} = \underline{F}_t^k \cup \left\{ \phi_t^{k+1} \right\}$. Although SDDP
does not involve discretization of the state space, one of its computational
bottleneck is the lack of efficient stopping criterion: SDDP easily builds lower
approximations of the value function but upper approximations are usually
computed through a costly Monte-Carlo scheme.
In order to build upper approximations of the value functions, Min-plus methods
were studied (\emph{e.g.} \cite{Mc2007,Qu2014}) for optimal control problems in
continuous time. When the value functions $\na{V_t}_{t\in \ic{0,T}}$ are convex
(or more generally, semiconcave), discrete time adaptations of Min-plus methods
build for each $t\in \ic{1,T}$ approximations of convex value function $V_t$ as
finite infima of convex quadratic forms. That is, at given iteration
$k\in \mathbb{N}$, we consider upper approximations defined as
$\overline{V}_t^k = \min_{\phi \in \overline{F}_t^k} \phi$, where
$\overline{F}_t^k$ is a finite set of convex quadratic forms. Then, a sequence of
\emph{trial points} $\np{x_t^k}_{t\in \ce{0,T}}$ are drawn (\emph{e.g.}
uniformly on the unit sphere as in \cite{Qu2014}) and for every $t\in \ce{0,T{-}1}$
a new function $\phi_t^{k+1}$ is added,
$\overline{F}_t^{k+1} = \overline{F}_t^k \cup \left\{ \phi_t^{k+1}\right\}$. The function
$\phi_t^{k+1}$ should be compatible with the Bellman equation, in particular it
should be \emph{tight}, \emph{i.e.} the Bellman equations should be satisfied at
the trial point,
\[
\aB{\phi_{t+1}^{k+1}}{x_t^k} = \phi_t^{k+1}\np{x_t^k}.
\]
In~\cite{Ak.Ch.Tr2018}, the authors present a common framework for a
deterministic version of SDDP and a discrete time version of Min-plus
algorithms. Moreover, the authors give sufficient conditions on the way the trial
points have to be sampled in order to obtain asymptotic convergence of either
upper or lower approximations of the value functions. Under these conditions,
the main reason behind the convergence of these algorithm was shown to be that
the Bellman equations \eqref{BellmanEquations} are asymptotically satisfied on
all cluster points of possible trial points. In this article, we would like to
extend the work of \cite{Ak.Ch.Tr2018} by introducing a new algorithm called
Tropical Dynamic Programming (TDP).
In \cite{Ba.Do.Za2018,Ph.de.Fi2013}, is studied approximation schemes where lower
approximations are given as a suprema of affine functions and upper
approximations are given as a polyhedral function. We aim in this article to
extend, with TDP, the approach of \cite{Ba.Do.Za2018,Ph.de.Fi2013} considering more
generally that lower approximations are max-plus linear combinations of some
\emph{basic functions} and upper approximations are min-plus linear combinations
of other \emph{basic functions} where basic functions are defined later. TDP can
be seen as a tropical variant of parametric approximations used in Adaptive
Dynamic Programming (see \cite{Be2019,Po2011}) where the value functions are
approximated by linear combinations of basis functions. In this article, we
will:
\begin{enumerate}
\item Extend the deterministic framework of \cite{Ak.Ch.Tr2018} to Lipschitz
MSP defined in \mathbb{C}ref{MSP} and introduce TDP, see Section~\ref{sec:TDP}.
\item Ensure that upper and lower approximations converge to the true value
functions on a common set of points, see Section~\ref{sec:convergence}. The main
result of Section~\ref{sec:convergence} generalizes to any min-plus/max-plus
approximation scheme the result of \cite{Ba.Do.Za2018} which was stated for a
variant of SDDP.
\item Explicitly give several numerically efficient ways to build upper and
lower approximations of the value functions, as min-plus and max-plus linear
combinations of some simple functions, see Section~\ref{sec:numerique}.
\end{enumerate}
\section{Tropical Dynamical Programming on Lipschitz MSP}
\label{sec:TDP}
\subsection{Lipschitz MSP with independent finite noises}
For every time step $t\in \ce{1,T}$, we denote by $\supp{\mathbf{W_{t}}}$ the
\emph{support} of the discrete random variable $\mathbf{W}_{t}$ \footnote{The
support of the discrete random variable $\mathbf{W}_t$ is equal to
the set $\left\{ w\in \mathbb{W}_t \mid \mathbb{P}\np{\mathbf{W_t}= w} > 0 \right\}$.} and
for a given subset $X \subset \mathbb{X}$, we denote by $\pi_X$ the euclidean projector
on $X$. State and control constraints for each time $t$ are modeled in the cost
functions which may possibly take infinite values outside of some given
sets. Now, we introduce a sequence of sets
$\na{X_t}_{t \in \ic{0,T}}$ which only depend on the problem data and make the following compactness assumption:
\begin{assumption}[Compact state space]
\label{compactStates}
For every time $t \in \ce{0,T}$, we assume that the set $X_t$ is a nonempty compact set in $\mathbb{X}$ where
the sequence of sets $\na{X_t}_{t \in \ic{0,T}}$ is defined, for all $t\in \ce{0,T-1}$, by
\begin{equation}
\label{Xt}
X_t := \bigcap_{w\in \supp{\mathbf{W_{t+1}}}} \pi_{\mathbb{X}}\np{ \mathop{\mathrm{dom}}\, \cost{}{} }
\; ,
\end{equation}
and for $t=T$ by $X_T = \mathop{\mathrm{dom}} \psi$.
\end{assumption}
For each noise $w\in \supp{\mathbf{W_{t+1}}}$, $t\in \ce{0,T-1}$, we also introduce the \emph{constraint set-valued mapping} $\mathcal{U}_t^w : \mathbb{X} \rightrightarrows \mathbb{U}$ defined for every $x \in \mathbb{X}$ by
\begin{equation}
\label{constraintset-valued mapping}
\mathcal{U}_t^w\np{x} := \bset{ u \in \mathbb{U}}{ \cost{x,u} < +\infty \ \text{and} \ \dyn{x,u} \in X_{t+1}}
\; .
\end{equation}
We will assume that the data of Problem~\eqref{MSP} is Lipschitz in the sense defined below. Let us stress that we do not assume structure on the dynamics or costs like linearity or convexity, only that they are Lipschitz.
\begin{assumption}[Lipschitz MSP]
\label{LipschitzMSP}
For every time $t\in \ce{0,T-1}$, we assume that for each
$w\in \supp{\mathbf{W_{t+1}}}$, the dynamic $\dyn{}{}$, the cost $\cost{}{}$
are Lipschitz continuous on $\mathop{\mathrm{dom}}\, \cost{}{}$ and the set-valued mapping
constraint $\mathcal{U}_t^w$ is Lipschitz continuous on $X_t$, \emph{i.e.} for some
constant $L_{\mathcal{U}_t^w} >0$, for every $x_1, x_2 \in X_t$, we have
\begin{equation}
d_{\mathcal{H}}\bp{\mathcal{U}_t^w\np{x_1}, \mathcal{U}_t^w\np{x_2}} \leq L_{\mathcal{U}_t^w} \underline{V}ert x_1 - x_2 \rVert.\footnotemark
\end{equation}
\end{assumption}
Computing a (sharp) Lipschitz constant for the set-valued mapping
$\mathcal{U}_t^w : \mathbb{X} \rightrightarrows \mathbb{U}$ is difficult. However, when the graph of the
set-valued mapping $\mathcal{U}_t^w$ is polyhedral, as in the linear-polyhedral framework
studied in \mathbb{C}ref{sec:numerique}, one can compute a Lipschitz constant for $\mathcal{U}_t^w$.
We make the following assumption in order to ensure that the domains of the
value functions $V_t$ are chosen by the decision maker. It can be seen as a
recourse assumption. \footnotetext{The Hausdorff distance $d_{\mathcal{H}}$
between two nonempty compact sets $X_1, X_2$ in $\mathbb{X}$ is defined by
\[
d_{\mathcal{H}}\np{X_1, X_2} = \max\np{ \max_{x_1 \in X_1} d\np{x_1, X_2}, \max_{x_2\in X_2} d\np{X_1,x_2}} = \max\np{\max_{x_1 \in X_1} \min_{x_2\in X_2} d\np{x_1,x_2}, \max_{x_2\in X_2} \min_{x_1\in X_1} d\np{x_1,x_2}}.
\]}
\begin{assumption}[Recourse assumption]
\label{recourse}
Given $t\in \ce{0,T{-}1}$, for every noise realization
$w \in \supp{\mathbf{W_t}}$ the set-valued mapping
$\mathcal{U}_t^w : \mathbb{X} \rightrightarrows \mathbb{U}$ defined in \eqref{constraintset-valued
mapping} is nonempty compact valued.
\end{assumption}
\emph{A priori}, it might be difficult to compute the domain of each value
function $V_t$. However, under the recourse \mathbb{C}ref{recourse}, we have that
$\mathop{\mathrm{dom}} V_t := X_t$ and thus the domain of each value function is known to the
decision maker.
\begin{lemma}[Known domains of $V_t$]
Under Assumptions~\ref{whitenoise} and~\ref{recourse}, for every
$t\in \ce{0,T}$, the domain of $V_t$ is equal to $X_t$.
\end{lemma}
\begin{proof}
We make the proof by backward induction on time. At time $t=T$, we have
$V_T = \psi$ and thus $\mathop{\mathrm{dom}} V_T = \mathop{\mathrm{dom}} \psi = X_T$. Now, for a given
$t\in \ce{0,T{-}1}$, we assume that $\mathop{\mathrm{dom}} V_{t+1} = X_{t+1}$ and we
prove that $\mathop{\mathrm{dom}} V_t = X_t$.
First, fix $x \in X_t$. Then, for every $w\in \supp{\mathbf{W_{t+1}}}$,
using~\mathbb{C}ref{recourse}, $\mathcal{U}_t^w\np{x}$ is nonempty and thus $V_t\np{x} <
+\infty$. Moreover, by Assumptions~\ref{LipschitzMSP} and
Assumptions~\ref{recourse} the optimization problem
\[
\min_{u\in \mathbb{U}}\mathcal{B}p{\cost{x,u} + V_{t+1}\bp{\dyn{x,u}}} = \min_{u\in \mathcal{U}_t^w\np{x}} \mathcal{B}p{\cost{x,u} + V_{t+1}\bp{\dyn{x,u}}},
\]
consists in the minimization of a continuous function in $u$ over a nonempty compact set. Denote by $u^w \in \mathcal{U}_t^w\np{x}$ a minimizer of this optimization problem. We have, denoting by
$\na{p_w}_{w\in \supp{\mathbf{W_{t+1}}}}$ the discrete probability law of the random variable
$\mathbf{W_{t+1}}$, that
\begin{align}
V_t\np{x}
& = \aB{V_{t+1}}{x} \notag
\\
& = \mathbb{E}\bc{ \pB[\mathbf{W_{t+1}}]{V_{t+1}}{x} \notag }
\\
& = \sum_{w\in \supp{\mathbf{W_{t+1}}}} p_w
\inf_{u\in \mathbb{U}} \mathcal{B}p{\cost{x,u} + V_{t+1}\bp{\dyn{x,u}}} \notag
\\
& = \sum_{w\in \supp{\mathbf{W_{t+1}}}} p_w \mathcal{B}p{\cost{x,u^w} + V_{t+1}\bp{\dyn{x,u^w}}}
\notag \; .
\end{align}
As every term in the right hand side of the previous equation is finite, we
have $V_t\np{x} < + \infty$ and thus $x\in \mathop{\mathrm{dom}} V_t$.
Second, fix $x\notin X_t$. Then, there exists an element
$w \in \supp{\mathbf{W_{t+1}}}$ such that $\cost{x,u} = +\infty$ for every
control $u \in \mathbb{U}$. We therefore have that $V_t\np{x} = +\infty$ and
$x \not\in \mathop{\mathrm{dom}} V_t$.
We conclude that $\mathop{\mathrm{dom}} V_t = X_t$ which ends the proof.
\end{proof}
In \mathbb{C}ref{sec:numerique}, it will be crucial for numerical efficiency to have a
good estimation of the Lipschitz constant of the function
$\aB{V_{t+1}^k}{}$.
We now prove that under Assumptions~\ref{LipschitzMSP} and
Assumptions~\ref{recourse}, the operators $\aB{}{}$ preserve Lipschitz
regularity. Given a $L_{t+1}$-Lipschitz function $\phi$ and
$w\in \supp{\mathbf{W_{t+1}}}$, in order to compute a Lipschitz constant of the
function $\pB{\phi}{\cdot}$ we exploit the fact that the set-valued constraint
mapping $\mathcal{U}_t^w$ and the data of Problem \ref{MSP} are Lipschitz in the sense of
Assumptions~\ref{LipschitzMSP}. This was mostly already done in
\cite{Ak.Ch.Tr2018}, but for the sake of completeness, we will slightly adapt
its statement and proof.
\begin{proposition}[$\aB{}{}$ is Lipschitz regular]
\label{lipschitz_regularity}
Let $\phi : \mathbb{X} \to \mathbb{R}b$ be given. Under Assumptions~\ref{whitenoise} to
\ref{recourse}, if for some $L_{t+1}> 0$, $\phi$ is $L_{t+1}$-Lipschitz on
$X_{t+1}$, then the function $\aB{\phi}{}$ is $L_t$-Lipschitz on $X_t$ for
some constant $L_t > 0$ which only depends on the data of Problem~\ref{MSP} and
$L_{t+1}$.
\end{proposition}
\begin{proof}
Let $\phi : \mathbb{X} \to \mathbb{R}b$ be a $L_{t+1}$-Lipschitz function on $X_{t+1}$. We
will show that for each $w\in \supp{\mathbf{W_{t+1}}}$, the mapping
$\pB{\phi}{\cdot}$ is $L_w$-Lipschitz for some constant $L_w$ which only
depends on the data of problem \eqref{MSP}. Fix
$w \in \supp{\mathbf{W_{t+1}}}$ and $x_1, x_2 \in X_t$. Denote by $u_2^*$ an
optimal control at $x_2$ and $w$, that is
$u_2^* \in \mathop{\arg\min}_{u \in \mathcal{U}_t^w\np{x_2}}\mathcal{B}p{ \cost{x_2,u} + \phi\bp{\dyn{x_2,u}}}$,
or equivalently, $u_2^*$ satisfies
\begin{equation}
\cost{x_2,u_2^*} + \phi\bp{\dyn{x_2,u_2^*}} = \pB{\phi}{x_2}.
\label{eq:u2-optimal}
\end{equation}
Then, for every $u_1\in \mathcal{U}_t^w(x_1)$ we successively have
\begin{align*}
\pB{\phi}{x_1}
& \le \cost{x_1,u_1} + \phi\bp{\dyn{x_1,u_1}}
\tag{as $u_1\in \mathcal{U}_t^w(x_1)$ is admissible}
\\
& \leq \pB{\phi}{x_2} + \cost{x_1,u_1} + \phi\bp{\dyn{x_1,u_1}} - \pB{\phi}{x_2}
\\
& = \pB{\phi}{x_2} + \bp{\cost{x_1,u_1} - \cost{x_2,u_2^*}} +
\mathcal{B}p{\phi\bp{\dyn{x_1, u_1}} - \phi\bp{\dyn{x_2, u_2^*}}}
\tag{using~\eqref{eq:u2-optimal}}
\\
& \leq \pB{\phi}{x_2} + L\bp{ \bnorm{x_1 - x_2} + \bnorm{u_1 - u_2^*}},
\tag{by \mathbb{C}ref{LipschitzMSP}} \label{eq:lipschitz_regularity1}
\end{align*}
where $L = \max\np{L_{\cost{}{}}, L_{t+1}L_{\dyn{}{}}}$.
Now, as the set-valued mapping $\mathcal{U}_t^w$ is $L_{\mathcal{U}_t^w}$-Lipschitz, there exists
$\tilde{u}_1 \in \mathcal{U}_t^w\np{x_1}$ such that
\[
\underline{V}ert \tilde{u}_1 - u_2^*\rVert \leq L_{\mathcal{U}_t^w} \underline{V}ert x_1 - x_2 \rVert.
\]
Hence, setting $L_w := \max\np{L_{\cost{}{}}, L_{t+1}L_{f_t^w}} \np{1 + L_{\mathcal{U}_t^w}}$,
we obtain
\[
\pB{\phi}{x_1} - \pB{\phi}{x_2} \leq L_t \underline{V}ert x_1 - x_2 \rVert.
\]
Reverting the role of $x_1$ and $x_2$ we get the converse inequality. Hence,
we have shown that, for every $w \in \supp{\mathbf{W_{t+1}}}$, the mapping
$\pB{\phi}{}$ is $L_w$-Lipschitz. Thus, setting $L_t = \np{\sum_{w}p_w L_w}$,
we have
\begin{align*}
\big\lvert \aB{\phi}{x_1} - \aB{\phi}{x_2} \big\rvert
& \leq \sum_{w\in \supp{\mathbf{W_{t+1}}}}
p_w \big\lvert \pB{\phi}{x_1} - \pB{\phi}{x_2} \big\rvert
\\
& \leq \mathcal{B}p{\sum_{w\in \supp{\mathbf{W_{t+1}}}} p_w L_w }
\norm{x_1 - x_2}\; ,
\end{align*}
as $\pB{\phi}{}$ is $L_w$-Lipschitz.
We obtain that the mapping $\aB{\phi}{}$ is $L_t$-Lipschitz continuous
on $\mathop{\mathrm{dom}} V_t$ and this concludes the proof.
\end{proof}
The explicit constant $L_t$ computed in the proof of
Proposition~\ref{lipschitz_regularity} does not exploit any possible structure
of the data, \emph{e.g.} linearity. In the presence of such structure or
possible decomposition, it is possible to greatly reduce the value of the $L_t$
constant. However, in the sequel, we only care for the regularity result given
in Proposition~\ref{lipschitz_regularity} and computing sharper bounds under
some specific structure is left for future works.
Using the fact that the final cost function $\psi = V_T$ is Lipschitz on $X_T$, by successive applications of
Proposition~\ref{lipschitz_regularity}, one gets the following corollary.
\begin{corollary}[The value functions of a Lipschitz MSP are Lipschitz continuous]
\label{Lipschitz_Vt}
For every time step $t\in \ce{0,T}$, the value function $V_t$ is
$L_{V_t}$-Lipschitz continuous on $X_t$ where $L_{V_t} > 0$ is a constant which
only depends on the data of Problem~\ref{MSP}.
\end{corollary}
\subsection{Tight and valid selection functions}
We formally define now what we call \emph{basic functions}. In the sequel, the
notation in bold $Fb_t$ will stand for a set of basic functions and
$F_t$ will stand for a subset of $Fb_t$.
\begin{definition}[Basic functions]
Given $t\in \ce{0,T}$, a \emph{basic function} $\phi : \mathbb{X} \to \mathbb{R}b$ is a
$L_{V_t}$-Lipschitz continuous function on $X_t$, where the constant
$L_{V_t}>0$ is defined in Corollary~\ref{Lipschitz_Vt}.
\end{definition}
In order to ensure the convergence of the scheme detailed in the introduction,
at each iteration of TDP algorithm a basic functions which is be \emph{tight}
and \emph{valid} in the sense below is added to the current sets of basic
functions. The idea behind these assumptions is to ensure that the Bellman
equations \eqref{BellmanEquations} will gradually be satisfied: it is too
numerically hard to find functions satisfying the Bellman equations
\eqref{BellmanEquations}, however tightness and validity can be checked
efficiently and this will be enough to ensure asymptotic convergence of our TDP
algorithm.
There is a dissymmetry for the validity assumption which depends on whether the
decision maker wants to build upper or lower approximations of the value
functions. In \S\ref{subsec:TDP}, we will assume that the decision maker has, at
hist disposal, two sequences of selection functions
$\np{\uSelection[]{}}_{t\in \ce{0,T}}$ and
$\np{\lSelection[]{}}_{t\in \ce{0,T}}$. The former to select basic functions for the
upper approximations and the latter for the lower approximations of $V_t$. We write
$\mathbb{Z}Selection[]{}$ when designing either $\uSelection[]{}$ or $\lSelection[]{}$
and denote by $\overline{\mathcal{V}}_{\overline{F}_t}$ (resp. $\underline{\mathcal{V}}_{\underline{F}_t}$) the pointwise
infimum (resp. pointwise supremum) of basic functions in $\underline{F}_t$ (resp. in
$\overline{F}_t$) when approximating from above (resp. below) a maping $V_t$. The
Figure~\ref{fig:selection_u_sddp} illustrates the formal definition of selection
functions given below. Given a set $Z$, we denote by $\mathcal{P}\np{Z}$ its
\emph{power set}, \emph{i.e.} the set of all subsets included in $Z$.
\begin{definition}[Selection functions]
\label{CompatibleSelection}
Let a time step $t \in \ce{0,T-1}$ be fixed. A \emph{selection function} or
simply \emph{selection function} is a mapping $\mathbb{Z}Selection[]{}$ from
$\mathcal{P}\np{Fb_{t+1}}{\times}X_t$ to $Fb_t$ satisfying the
following properties
\begin{itemize}
\item \textbf{Tightness}: for every set of basic functions
$F_{t+1} \subset Fb_{t+1}$ and $x\in X_t$, the mappings
$\mathbb{Z}Selection{F_{t+1}, x}$ and $\pB{V_{F_{t+1}}}{\cdot}$ coincide at
point $x$, that is
\[
\mathbb{Z}Selection{F_{t+1}, x}\np{x} = \aB{\mathcal{V}_{F_{t+1}}}{x}.
\]
\item \textbf{Validity}: for every set of basic functions
$F_{t+1} \subset Fb_{t+1}$ and for every $x\in X_t$ we have
\begin{align}
\uSelection{F_{t+1}, x} \geq \aB{\mathcal{V}_{F_{t+1}}}{\cdot}, \tag{when building upper approximations} \\
\lSelection{F_{t+1}, x} \leq \aB{\mathcal{V}_{F_{t+1}}}{\cdot}. \tag{when building lower approximations}
\end{align}
\end{itemize}
For $t= T$, we also say that $S_T: X_T \to Fb_T$ is a \emph{selection function}
if the mapping $S_T$ is \emph{tight} and \emph{valid} with a modified definition of
\emph{tight} and \emph{valid} defined now. The mapping $S_T$ is said to be
\emph{valid} if, for every $x\in X_T$, the function $S_T\left( x \right)$
remains above (resp. below) the value function at time $T$ when building upper
approximations (resp. lower approximations). The mapping $S_T$ is said to be
\emph{tight} if it coincides with the value function at point $x$, that is for
every $x\in X_T$ we have
\[
S_T\left( x\right)\left(x\right) = V_T\np{x}.
\]
\end{definition}
\begin{remark}
Note that the validity and tightness assumptions at time $t=T$ is stronger than at times $t<T$ as the final cost function is a known data, we are allowed to enforce conditions directly on the value function $V_T$ and not just the on the image of the current approximations at time $t+1$ as it is the case when $t<T$.
\end{remark}
\begin{figure}
\caption{\label{fig:selection_u_sddp}
\label{fig:selection_u_sddp}
\end{figure}
\subsection{The problem-child trajectory}
From the previous section, given a set of basic functions and a point in $\mathbb{X}$, a
selection function is used to computes a new basic function. We explain in this section
the algorithm used to select the points which are used for searching new basic functions.
In this section we present how to build a trajectory of states, without
discretization of the whole state space. Selection functions for both upper and
lower approximations of $V_t$ will be evaluated along it. This trajectory of
states, coined \emph{problem-child} trajectory, was introduced by Baucke,
Downward and Zackeri in 2018 (see \cite{Ba.Do.Za2018}) for a variant of SDDP first
studied by Philpott, de Matos and Finardi in 2013 (see \cite{Ph.de.Fi2013}).
We present in Algorithm~\ref{PC} a generalized problem-child trajectory, it is
the sequence of states on which we evaluate selection functions.
\begin{algorithm}
\caption{Problem-child trajectory}
\label{PC}
\begin{algorithmic}
\mathbb{R}EQUIRE{Two sequences of functions from $\mathbb{X}$ to $\mathbb{R}b$,
$\overline{\phi}_0,\ldots, \overline{\phi}_T$ and $\underline{\phi}_0,\ldots, \underline{\phi}_T$ with respective
domains equal to $\mathop{\mathrm{dom}} V_t$.} \mathbb{E}NSURE{A sequence of states
$\np{x_0^*, \ldots, x_T^*}$.} \STATE{Set $x_0^* := x_0$.}
\FOR{$t \in \ce{0,T{-}1}$} \FOR{$w\in \supp{\mathbf{W_{t+1}}}$} \STATE{Compute
an optimal control $u_t^w$ for $\underline{\phi}_{t+1}$ at $x_t^*$ for the given $w$
\begin{equation}
\label{eq:pc_control}
u_t^w \in \mathop{\arg\min}_{u\in \mathbb{U}} \mathcal{B}p{ \cost{x_t^*,u} + \underline{\phi}_{t+1}\bp{\dyn{x_t^*,u}} }.
\end{equation}}
\mathbb{E}NDFOR
\STATE{Compute ``the worst'' noise $w^*\in \supp{\mathbf{W_{t+1}}}$.
\emph{i.e.} the one which maximizes the ``future'' gap
\[
w^* \in \mathop{\arg\max}_{w \in \supp{\mathbf{W_{t+1}}}} \bp{ \overline{\phi}_{t+1} -\underline{\phi}_{t+1} }\bp{\dyn{x_t^*,u_t^w}}.
\]}
\STATE{Compute the next state dynamics for noise $w^*$ and associated optimal control
$u_t^{w^*}$:
$$x_{t+1}^* = \dyn[w^*]{x_t^*,u_t^{w^*}}\; .$$}
\mathbb{E}NDFOR
\end{algorithmic}
\end{algorithm}
One can interpret the problem child trajectory as the worst (for the noises)
optimal trajectory (for the controls) of the lower approximations. It is worth
mentioning that the problem-child trajectory is deterministic. The approximations of
the value functions will be refined along the problem-child trajectory only,
thus avoiding a discretization of the state space. The main computational
drawback of such approach is the need to solve Problem \eqref{eq:pc_control}
$\lvert \supp{\mathbf{W_{1}}} \rvert \cdot \ldots \cdot \lvert
\supp{\mathbf{W_{T}}} \rvert$ times. Except on special instances like the
linear-quadratic case, one cannot expect to find a closed form expression for
solutions of Equation~\eqref{eq:pc_control}. However,
we will see in \mathbb{C}ref{sec:numerique} examples
where Problem \eqref{eq:pc_control} can be solved by Linear Programming or
Quadratic Programming. Simply put, if one can solve efficiently the
deterministic problem \eqref{eq:pc_control} and if at each time step the set
$\supp{\mathbf{W_{t}}}$ remains of small cardinality, then
using the problem-child trajectory and the Tropical Dynamical
Algorithm presented below in \mathbb{C}ref{subsec:TDP}, one can solve MSP problems with
finite independent noises efficiently. This might be an interesting framework in
practice if at each step the decision maker has a few different forecasts on
which her inputs are significantly different.
\subsection{Tropical Dynamic Programming}
\label{subsec:TDP}
\begin{algorithm}[H]
\caption{Tropical Dynamic Programming (TDP)}
\label{TDP}
\begin{algorithmic}
\mathbb{R}EQUIRE{For every $t\in \ce{0,T}$, two compatible selection functions
$\uSelection[]{}$ and $\lSelection[]{}$. A sequence of independent random
variables $\np{\mathbf{W_t}}_{t\in \ce{0,T-1}}$, each with finite support.}
\mathbb{E}NSURE{For every $t\in \ce{0,T}$, two sequence of sets
$\np{\overline{F}_t^k}_{k\in\mathbb{N}}$, $\np{\underline{F}_t^k}_{k\in \mathbb{N}}$ and the associated
functions $\overline{V}_t^k = \inf_{\phi \in \overline{F}_t^k} \phi$ and
$\underline{V}_t^k = \sup_{\phi \in \underline{F}_t^k} \phi$.}
\STATE{Define for every
$t\in \ce{0,T}$, $\overline{F}_t^0 := \emptyset$ and $\underline{F}_t^0 := \emptyset$.}
\FOR{$k\geq 0$}
\STATE{\emph{Forward phase}}
\STATE{Compute the problem-child
trajectory $\np{x_t^k}_{t\in \ce{0,T}}$ for the sequences
$\np{\overline{\mathcal{V}}_{\overline{F}_t^k}}_{t\in \ce{0,T}}$ and
$\np{\underline{\mathcal{V}}_{\underline{F}_t^k}}_{t\in \ce{0,T}}$ using Algorithm~\ref{PC}.}
\STATE{\emph{Backward phase}}
\STATE{At $t = T$, compute new basic functions
$\overline{\phi}_T := \overline{S}_T \left( x_T^{k}\right)$ and
$\underline{\phi}_T := \underline{S}_T \np{ x_T^{k}}$.}
\STATE{Add them to current collections,
$\overline{F}_T^{k+1} := \overline{F}_T^{k} \cup \left\{ \overline{\phi}_T \right\}$ and
$\underline{F}_T^{k+1} := \underline{F}_T^{k} \cup \left\{ \underline{\phi}_T \right\}$.}
\FOR{$t$
from $T{-}1$ to $0$}
\STATE{Compute new basic functions:
$\overline{\phi}_t := \uSelection{\overline{F}^{k+1}_{t+1}, x_t^{k}}$ and
$\underline{\phi}_t := \lSelection{\underline{F}^{k+1}_{t+1}, x_t^{k}}$.} \STATE{Add them
to the current collections:
$\overline{F}_t^{k+1} := \overline{F}_t^{k} \cup \left\{ \overline{\phi}_t \right\}$ and
$\underline{F}_t^{k+1} := \underline{F}_t^{k} \cup \left\{ \underline{\phi}_t \right\}$.} \mathbb{E}NDFOR
\mathbb{E}NDFOR
\end{algorithmic}
\end{algorithm}
\section{Asymptotic convergence of TDP along the problem-child trajectory}
\label{sec:convergence}
In this section, we will assume that Assumptions \eqref{whitenoise} to
\eqref{recourse} are satisfied. We recall that, under Assumption~\ref{recourse},
the sequence of sets $\na{X_t}_{t\in \ic{0,T}}$ defined in Equation~\eqref{Xt}
is known and for all $t\in \ce{0,T}$ the domain of $V_t$ is equal to $X_t$.
We denote by $\np{x_t^k}_{k\in \mathbb{N}}$ the sequence of trial points generated by TDP algorithm at time $t$ for
every $t\in \ce{0,T}$, and by $\np{u_t^k}_{k\in \mathbb{N}}$ and $\np{w_t^k}_{k\in \mathbb{N}}$
the optimal control and worst noises sequences associated for each time $t$
with $x_t^k$ in the problem-child trajectory in Algorithm~\ref{PC}.
Now, observe that for every $t\in \ce{0,T}$, the approximations of $V_t$ generated by TDP, $\np{\overline{V}_t^k}_{k\in \mathbb{N}}$ and $\np{\underline{V}_t^k}_{k \in \mathbb{N}}$, are respectively non increasing and non decreasing. Moreover, for every index $k\in \mathbb{N}$ we have
\[
\underline{V}_t^k \leq V_t \leq \overline{V}_t^k.
\]
We refer to \cite[Lemma 7]{Ak.Ch.Tr2018} for a proof. Observing that the basic functions are all $L_{V_t}$-Lipschitz continuous on $X_t$ one can prove using Arzel\`a-Ascoli Theorem the following proposition.
\begin{proposition}[Existence of an approximating limit]
\label{ExistenceLimits}
Let $t\in \ce{0,T}$ be fixed, the sequences of
functions $\left( \underline{V}_t^k \right)_{k\in \mathbb{N}}$ and $\left( \overline{V}_t^k \right)_{k\in \mathbb{N}}$ generated by Algorithm~\ref{TDP} converge
uniformly on $X_t$ to two functions $\underline{V}_t^*$ and $\overline{V}_t^*$. Moreover, $\underline{V}_t^*$ and $\overline{V}_t^*$ are $L_{V_t}$-Lipschitz continuous on $X_t$ and satisfy
\(
\underline{V}_t^* \leq V_t \leq \overline{V}_t^* \;.
\)
\end{proposition}
\begin{proof}
Omitted as it is slight rewriting of \cite[Proposition 9]{Ak.Ch.Tr2018}.
\end{proof}
If we extract a converging subsequence of trial points, then using compactness,
extracting a subsubsequence if needed, one can find a find a subsequence of
trial points, and associated controls that jointly converge.
\begin{lemma}
\label{lem:conv_commune}
Fix $t \in \ce{0,T-1}$ and denote by $\np{x_t^k}_{k\in \mathbb{N}}$ the sequence of
trial points generated by Algorithm~\ref{TDP} and by $\np{u_t^k}_{k\in \mathbb{N}}$ the
sequence of associated optimal controls. There exists an increasing function
$\sigma : \mathbb{N} \to \mathbb{N}$ and a state-control ordered pair
$\np{x_t^*, u_t^*} \in X_t{\times}\mathbb{U}$ such that
\begin{equation}
\left\{
\begin{aligned}
& x_t^{\sigma(k)} \underset{k\to +\infty}{\longrightarrow} x_t^*, \\
& u_t^{\sigma(k)} \underset{k\to +\infty}{\longrightarrow} u_t^*.
\end{aligned}
\right.
\end{equation}
\end{lemma}
\begin{proof}
Fix a time step $t\in \ce{0,T{-}1}$. First, by construction of the problem-child
trajectories, the sequence $\np{x_t^k}_{k\in \mathbb{N}}$ remains in the subset
$X_t$ that is $x_t^k \in X_t$ for all $k\in \mathbb{N}N$.
Second, we show that the sequence of controls $\np{u_t^k}_{k\in \mathbb{N}}$ is
included in a compact subset of $\mathbb{U}$. Under Assumption~\ref{compactStates},
$X_t$ is a nonempty compact subset of $\mathbb{X}$. For every
$w\in \supp{\mathbf{W_{t+1}}}$ the set-valued mapping $\mathcal{U}_t^w$ is Lipschitz
continuous on $X_t$ under Assumption~\ref{LipschitzMSP}, hence upper
semicontinuous on $X_t$.\footnote{The compact valued set-valued mapping
$\mathcal{U}_t^w : \mathbb{X} \rightrightarrows \mathbb{U}$ is \emph{upper semicontinuous} on $X_t$ if,
for all $x_t \in X_t$, if an open set $U \subset \mathbb{U}$ contains $\mathcal{U}_t^w\np{x_t}$
then $\left\{ x \in \mathbb{X} \mid \mathcal{U}_t^w{x} \subset U \right\}$ contains a
neighborhood of $x_t$.} Moreover, under recourse Assumption~\ref{recourse},
$\mathcal{U}_t^w$ is nonempty compact valued. Thus, by \cite[Proposition 11
p.112]{Au.Ek1984}, its image $\mathcal{U}_t^w\np{X_t}$ of the compact $X_t$ is a nonempty
compact subset of $\mathbb{U}$. Finally as the random variable $\mathbf{W_{t+1}}$ has
a finite support under Assumption~\ref{whitenoise}, the set
$U_t := \cup_{w \in \supp{\mathbf{W_{t+1}}}} \, \mathcal{U}_t^w\np{X_t}$ is a compact
subset of $\mathbb{U}$. The sequence $\np{u_t^k}_{k\in \mathbb{N}}$ remains in $U_t$ and
therefore we conclude that it remains in a compact subset of $\mathbb{U}$.
Finally, as the sequence $\np{x_t^k, u_t^k}_{k\in \mathbb{N}}$ is included in the compact subset $X_t{\times}U_t$
of $\mathbb{X}{\times}\mathbb{U}$, one can extract a converging subsequence, hence the result.
\end{proof}
Lastly, we will use the following elementary lemma, whose proof is omitted.
\begin{lemma}
\label{lem:unif_conv}
Let $\np{g^k}_{k\in \mathbb{N}}$ be a sequence of functions that converges uniformly on
a compact $K$ to a function $g^*$. If $\np{y^k}_{k\in \mathbb{N}}$ is a sequence of
points in $K$ that converges to $y^* \in K$ then one has
\[
g^k\np{y^k} \underset{k\rightarrow + \infty}{\longrightarrow} g^*\np{y^*}.
\]
\end{lemma}
We now state the main result of this article. For a fixed $t\in \ce{0,T}$,
as the Bellman value function $V_t$ is always sandwiched between the sequences of
upper and lower approximations, if the gap between upper and lower approximations
vanishes at a given state value $x$, then upper and lower approximations will both
converge to $V_t(x)$.
Note that, even though a MSP is a stochastic optimization problem, the
convergence result below is not. Indeed, we have assumed (see \mathbb{C}ref{whitenoise}) that
the noises have finite supports, thus under careful selection of scenario as done
by the Problem-child trajectory, we get a ``sure'' convergence.
\begin{theorem}[Vanishing gap along problem-child trajectories]
Denote by $\np{\overline{V}_t^k}_{k\in \mathbb{N}}$ and $\np{\underline{V}_t^k}_{k\in \mathbb{N}}$ the
approximations generated by the Tropical Dynamic Programming algorithm. For
every $k\in \mathbb{N}$ denote by $\np{x_t^k}_{0 \leq t \leq T}$ the current
Problem-child trajectory.
Then, under Assumptions~\ref{whitenoise} to~\ref{recourse},
we have that
\[
\overline{V}_t^k\np{x_t^k} - \underline{V}_t^k\np{x_t^k} \underset{k\to +\infty}{\longrightarrow 0}
\quad\text{and}\quad \overline{V}_t^*\np{x_t^*} = \underline{V}_t^*\np{x_t^*}
\; ,
\]
for every accumulation point $x_t^*$ of the sequence $\np{x_t^k}_{k\in \mathbb{N}}$.
\end{theorem}
\begin{proof}
We prove by backward recursion that, for every $t\in \ce{0,T}$,
for every accumulation point $x_t^*$ of the sequence $\np{x_t^k}_{k\in \mathbb{N}}$, we have
\begin{equation}
\label{ConvergingBonds}
\overline{V}_t^*\np{x_t^*} = \underline{V}_t^*\np{x_t^*}.
\end{equation}
By a direct consequence of the tightness of the selection functions one has
that for every $k\in \mathbb{N}$,
$\overline{V}_T^k\np{x_T^k} = V_T\np{x_T^k} =
\underline{V}_T^k\np{x_T^k}$. Thus, the equality~\eqref{ConvergingBonds} holds
for $t=T$ by Lemma~\ref{lem:unif_conv}.
Now assume that for some $t\in \ce{0,T{-}1}$, for every accumulation point
$x_{t+1}^*$ of $\np{x_{t+1}^k}_{k\in \mathbb{N}}$ we have
\begin{equation}
\label{HR_sto}
\overline{V}_{t+1}^*\np{x_{t+1}^*} = \underline{V}_{t+1}^*\np{x_{t+1}^*}.
\end{equation}
On the one hand, for every index $k\in \mathbb{N}$ one has
\begin{align}
\underline{V}_t^{k+1}\np{x_t^k}
& = \aB{\underline{V}_{t+1}^{k+1}}{x_t^k}, \tag{\text{Tightness}}
\\
& \geq \aB{\underline{V}_{t+1}^{k}}{x_t^k}, \tag{\text{Monotonicity}} \\
& = \mathcal{B}esp{ \pB[\mathbf{W_{t+1}}]{\underline{V}_{t+1}^{k}}{x_t^k}}
\tag{\text{by definition of $\aB{}{}$}}
\\
& = \mathcal{B}esp{ \cost[\mathbf{W_{t+1}}]{x_t^k, u_t^{\mathbf{W_{t+1}}}} +
\underline{V}_{t+1}^{k}\bp{\dyn[\mathbf{W_{t+1}}]{x_t^k, u_t^{\mathbf{W_{t+1}}}}}}
\tag{by \mathbb{C}ref{eq:pc_control}}
\\
& = \sum_{w\in \supp{\mathbf{W_{t+1}}}}
\mathbb{P}\bc{\mathbf{W_{t+1}} = w}
\mathcal{B}p{ \cost{x_t^k, u_t^w} + \underline{V}_{t+1}^{k}\bp{\dyn{x_t^k,u_t^w}}} \notag
\; .
\end{align}
On the other hand, for every index $k\in \mathbb{N}$ one has
\begin{align}
\overline{V}_t^{k+1}\np{x_t^k}
& = \aB{\overline{V}_{t+1}^{k+1}}{x_t^k}, \tag{\text{Tightness}}
\\
& = \mathcal{B}esp{ \pB[\mathbf{W_{t+1}}]{\overline{V}_{t+1}^{k+1}}{x_t^k}} \notag
\\
& \leq \mathcal{B}esp{ \cost[\mathbf{W_{t+1}}]{x_t^k, u_t^{\mathbf{W_{t+1}}}} +
\overline{V}_{t+1}^{k+1}\bp{\dyn[\mathbf{W_{t+1}}]{x_t^k, u_t^{\mathbf{W_{t+1}}}}}}
\tag{\text{Def. of pointwise $\pB{}{}$}}
\\
& \leq \mathcal{B}esp{ \cost[\mathbf{W_{t+1}}]{x_t^k, u_t^{\mathbf{W_{t+1}}}} +
\overline{V}_{t+1}^{k}\bp{\dyn[\mathbf{W_{t+1}}]{x_t^k, u_t^{\mathbf{W_{t+1}}}}}}
\tag{\text{Monotonicity}}
\\
& = \sum_{w\in \supp{\mathbf{W_{t+1}}}} \mathbb{P}\nc{\mathbf{W_{t+1}} = w}
\mathcal{B}p{\cost{x_t^k, u_t^{w}} + \overline{V}_{t+1}^{k}\bp{\dyn{x_t^k,u_t^{w}}}} \notag
\; .
\end{align}
By definition of the problem-child trajectory, recall that $u_t^k := u_t^{w_t^k}$,
thus we have $x_{t+1}^k := \dyn[w_t^k]{x_t^k, u_t^k}$ and for every $k\in \mathbb{N}$
\begin{align*}
0 \leq \overline{V}_t^{k+1}\np{x_t^k} - \underline{V}_t^{k+1}\np{x_t^k}
& \leq \sum_{w\in \supp{\mathbf{W_{t+1}}}} \bprob{\mathbf{W_{t+1}} = w}
\mathcal{B}p{ \np{\overline{V}_{t+1}^{k} - \underline{V}_{t+1}^k}\bp{\dyn{x_t^k,u_t^w}}}
\\
& \leq \overline{V}_{t+1}^{k}\np{x_{t+1}^k} - \underline{V}_{t+1}^{k}\np{x_{t+1}^k}
\; .
\end{align*}
Thus, we get that for every function $\sigma : \mathbb{N} \to \mathbb{N}$
\begin{equation}
\label{Presque_sto}
0 \leq \overline{V}_t^{\sigma(k)+1}\np{x_t^{\sigma(k)}} - \underline{V}_t^{\sigma(k)+1}\np{x_t^{\sigma(k)}}
\leq \overline{V}_{t+1}^{\sigma(k)}\np{x_{t+1}^{\sigma(k)}} - \underline{V}_{t+1}^{\sigma(k)}\np{x_{t+1}^{\sigma(k)}}
\; .
\end{equation}
By Lemma~\ref{lem:conv_commune} and continuity of the dynamics, there exists an
increasing function $\sigma : \mathbb{N} \to \mathbb{N}$ such that the sequence of future
states
$x_{t+1}^{\sigma(k)} =
\dyn[w_{t+1}^{\sigma(k)}]{x_t^{\sigma(k)},u_t^{\sigma(k)}}$, $k\in \mathbb{N}$,
converges to some future state $x_{t+1}^* \in X_{t+1}$. Thus, by
Lemma~\ref{lem:unif_conv} applied to the $2L_{V_{t+1}}$-Lipschitz functions
$g^k := \overline{V}_{t+1}^{\sigma(k)} - \underline{V}_{t+1}^{\sigma(k)}$,
$k\in \mathbb{N}$ and the sequence $y^k := x_{t+1}^{\sigma(k)}$, $k\in \mathbb{N}$ we have that
\begin{equation*}
\overline{V}_{t+1}^{\sigma(k)}\np{x_{t+1}^{\sigma(k)}} - \underline{V}_{t+1}^{\sigma(k)}\np{x_{t+1}^{\sigma(k)}}
\underset{k\rightarrow + \infty}{\longrightarrow}
\overline{V}_{t+1}^{*}\np{x_{t+1}^{*}} - \underline{V}_{t+1}^{*}\np{x_{t+1}^{*}}
\; .
\end{equation*}
Likewise, by Lemma~\ref{lem:unif_conv} applied to the $2L_{V_t}$-Lipschitz
functions
$g^k := \overline{V}_{t}^{\sigma(k)+1} - \underline{V}_{t}^{\sigma(k)+1}$,
$k\in \mathbb{N}$ and the sequence $y^k := x_{t}^{\sigma(k)}$, $k\in \mathbb{N}$ we have that
\begin{equation*}
\overline{V}_{t}^{\sigma(k)+1}\np{x_{t}^{\sigma(k)}} - \underline{V}_{t}^{\sigma(k)+1}\np{x_{t}^{\sigma(k)}}
\underset{k\rightarrow + \infty}{\longrightarrow}
\overline{V}_{t}^{*}\np{x_{t}^{*}} - \underline{V}_{t}^{*}\np{x_{t}^{*}}
\; .
\end{equation*}
Thus, taking the limit in $k$ in \mathbb{C}ref{Presque_sto}, we have that
\begin{equation*}
\label{IncreasingGap*_sto}
0 \leq \overline{V}_t^*\np{x_t^*} - \underline{V}_t^*\np{x_t^*} \leq \overline{V}_{t+1}^{*}\np{x_{t+1}^{*}} - \underline{V}_{t+1}^{*}\np{x_{t+1}^{*}}.
\end{equation*}
By induction hypothesis \eqref{HR_sto} we have that
$\overline{V}_{t+1}^{*}\np{x_{t+1}^{*}} - \underline{V}_{t+1}^{*}\np{x_{t+1}^{*}} = 0$. Thus,
we have shown that
\[
\overline{V}_t^*\np{x_t^*} = \underline{V}_t^*\np{x_t^*}
\; .
\]
This concludes the proof.
\end{proof}
\section{Illustrations in the linear-polyhedral framework}
\label{sec:numerique}
In this section, we first present a class of Lipschitz MSP that we call
\emph{linear-polyhedral} MSP where dynamics are linear and costs are polyhedral,
\emph{i.e.} functions with convex polyhedral epigraph. Second, we give three
selection functions, one which generates polyhedral lower approximations (see
\S\ref{sec:SDDP}) and two which generates upper approximations, one as infima of
$U$-shaped functions (see \S\ref{sec:U}) and one as infima of $V$-shaped
functions (see \S\ref{sec:V}).
In Table~\ref{recapitulatif_numerique} we illustrate the flexibility made
available by TDP to the decision maker to approximate value functions.
Implementations were done in the programming language Julia 1.4.2 using
the optimization interface JuMP 0.21.3, \cite{Du.Hu.Lu2017}. The code
is available online (\url{https://github.com/BenoitTran/TDP})
as a collection of Julia Notebooks.
\begin{table}[h]
\centering
\begin{tabular}{|c||c|c|c|c|}
\hline
Selection mapping & Tight & Valid & Averaged & Computational difficulty \\
\hline
SDDP & \ding{51} & \ding{51} & \ding{51} & $\mathrm{Card}\np{\mathbf{W_{t+1}}}$ LPs \\
\hline
U & \ding{51} & \ding{55} & \ding{51} & $\mathrm{Card}\np{\mathbf{W_{t+1}}} \cdot \mathrm{Card}\np{F}$ QPs \\
\hline
V & \ding{51} & \ding{51} & \ding{55} & one LP \\
\hline
\end{tabular}
\caption{\label{recapitulatif_numerique} Summary of the three selection functions
presented in Section~\ref{sec:numerique}.}
\end{table}
\subsection{Linear-polyhedral MSP}
We want to solve MSPs where the dynamics are linear and the costs are
polyhedral. That is, we want to solve optimization problems of the form
\eqref{MSP} where for each time step $t \in \ce{0,T{-}1}$ the state dynamics
is linear, $\dyn{x,u} = A^w_t x + B^w_t u$ for some matrices $A_t^w$ and $B_t^w$ of
coherent dimensions and the cost is polyhedral:
\begin{equation}
\label{eq:polyhedralcosts}
\cost{x,u} = \max_{i \in I_t} \bscal{c^{i,w}_t}{(x;u)} + d_t^{i,w} + \delta_{P_t^w}(x,u)
\; .
\end{equation}
where $I_t$ is a finite set, $c^{i,w} \in \mathbb{X}{\times}\mathbb{U}$, $d_t^{i,w}$ is a scalar
and $P_t^w$ is a convex polyhedron. The final cost function $\psi$ is of the
form $\psi(x) = \max_{i \in I_T} \nscal{c^{i}_T}{x} + d_T^{i}+ \delta_{X_{T}}$
where ${X_{T}}$ is a nonempty convex polytope. We assume that
Assumption~\ref{whitenoise}, \ref{compactStates} and \ref{recourse} are
satisfied.
\begin{proposition}[Linear-polyhedral MSP are Lipschitz MSP]
\label{linpol_are_lipschitz}
Linear-polyhedral MSP are Lipschitz MSP in the sense of Assumption~\ref{LipschitzMSP}.
\end{proposition}
\begin{proof}
By construction, the costs $\cost{}{}$ and the dynamics $\dyn{}{}$ are
Lipschitz continuous with explicit constants. We show that for every
$t\in \ce{0,T{-}1}$ and each $w\in \supp{\mathbf{W_{t+1}}}$, the constraint
set-valued mapping $\mathcal{U}_t^w{}{}$ is Lipschitz continuous. From \cite[Example
9.35]{Ro.We2009}, it is enough to show that the graph of $\mathcal{U}_t^w{}{}$ is a convex
polyhedron. By assumption $\mathop{\mathrm{dom}}\, \cost{}{}$ is a convex polyhedron and by
recourse $\mathrm{Graph}\,\, \mathcal{U}_t^w{}{}$ is nonempty. As a nonempty intersection of convex
polyhedron is a convex polyhedron, we only have to show that
$\nset{\np{x,u} \in \mathbb{X}{\times}\mathbb{U}}{\dyn{x,u} \in X_{t+1}}$ is a convex
polyhedron as well.
Using Equation~\eqref{Xt} we have that $X_{t+1}$ is given by
$X_{t+1} = \cap_{w \in \supp{\mathbf{W_{t+2}}}}\pi_{\mathbb{X}} \bp{ \mathop{\mathrm{dom}}
\,c_{t+1}^w}$, which is the nonempty intersection of convex
polyhedron. Thus, $X_{t+1}$ is a convex polyhedron which implies that
there exist a matrix $Q_{t+1}$ and a vector $b_{t+1}$ such that
$X_{t+1} = \bset{{x}\in \mathbb{X}}{ Q_{t+1} x \leq b_{t+1}}$.
Therefore, we obtain that the two following sets coincide
$$
\bset{ \np{x,u}\in \mathbb{X}{\times}\mathbb{U}}{\dyn{x,u} \in X_{t+1}}
=
\bset{ \np{x,u}\in \mathbb{X}{\times}\mathbb{U}}{Q_{t+1}A_t^wx + Q_{t+1}B_t^wu \le b_{t+1}}
\; .
$$
The latter being convex polyhedral we obtain that the former is convex polyhedral.
This ends the proof.
\end{proof}
Now, observe that as linear-polyhedral MSP are Lipschitz MSP, by
Corollary~\ref{Lipschitz_Vt}, the value function $V_t$ is $L_{V_t}$-Lipschitz
continuous on $X_t$ for all $t\in \ic{0,T}$. Moreover, under the recourse assumption~\ref{recourse} we
can show that the Bellman operators ${\aB{}{}}_{t\in \ic{0,T{-}1}}$ preserves polyhedrality in the
sense defined below.
\begin{lemma}[$\aB{}{}$ preserves polyhedrality]
\label{polypreserved}
For every $t\in \ce{0,T{-}1}$, if $\phi : \mathbb{X} \to \mathbb{R}b$ is a \emph{polyhedral
function}, \emph{i.e.} its epigraph is a convex polyhedron, then
$\aB{\phi}{}$ is a polyhedral function as well.
\end{lemma}
\begin{proof}
For every $w\in \supp{\mathbf{W_{t+1}}}$, we have shown in the proof of
Proposition~\ref{linpol_are_lipschitz} that the graph of $\mathcal{U}_t^w{}{}$ is a convex
polyhedron. Thus,
$\np{x,u} \mapsto \cost{x,u} + \phi\bp{\dyn{x,u}} + \delta_{\mathrm{Graph}\,
\mathcal{U}_t^w{}{}}\np{x,u}$ is convex polyhedral and by \cite[Proposition
5.1.8.e]{Bo.Le2006}, $\pB{\phi}{}$ is polyhedral as well. Finally, under
Assumption~\ref{whitenoise}, we deduce that
$\aB{\phi}{} := \sum_{w \in \supp{\mathbf{W_{t+1}}}} \pB{\phi}{}$ is
polyhedral as a finite sum of polyhedral functions. This ends the proof.
\end{proof}
\subsection{SDDP lower approximations}
\label{sec:SDDP}
Stochastic Dual Dynamic Programming is a popular algorithm which was introduced
by Perreira and Pinto in 1991 (see \cite{Pe.Pi1991}) and studied extensively
since then, \emph{e.g.} \cite{Ah.Ca.da2019,Ba.Do.Za2018,Gu2014,Ph.Gu2008,Ph.de.Fi2013,Sh2011,Zo.Ah.Su2018}.
Lemma~\ref{polypreserved} is the main intuitive justification of using SDDP in
linear-polyhedral MSPs: if the final cost function is polyhedral, as the operators
$\na{\aB{}{}}_{t\in \ic{0,T{-}1}}$ preserve polyhedrality, by backward induction on time, we obtain
that the value function $V_t$ is polyhedral for every $t\in \ce{0,T}$. Hence, the
decision maker might be tempted to construct polyhedral approximations of $V_t$
as well.
We now present a way to generate polyhedral lower approximations of
value functions, as done in the literature of SDDP, by defining a proper selection
mapping. When the value functions are convex, it builds lower approximations as suprema
of affine cuts. We put SDDP in TDP's framework by constructing a lower selection function.
First, for every time step $t\in \ce{0,T}$, define the set of basic functions,
\[
\underline{F}b_t^{\mathrm{SDDP}} :=
\bset{\nscal{a}{\cdot} + b + \delta_{X_t}}{\np{a,b} \in \mathbb{X}{\times}\mathbb{R} \ \text{s.t.} \ \norm{a} \leq L_{V_t}}
\; .
\]
At time $t = T$, given a trial point $x \in X_T$, we define
$\underline{S}_{T}^{\text{SDDP}}\np{ x} = \nscal{a_x}{\cdot -x} + b_x$, where $a_x$
is a subgradient of the convex polyhedral function $\psi$ at $x$ and
$b_x = \psi\np{x}$. Tightness and validity of $\underline{S}_{T}^{\text{SDDP}}$
follows from the given expression. Now, for $t \in \ic{0,T-1}$, we compute a
tight and valid cut for $\pB{}{}$ for each possible value of the noise $w$ then
average it to get a tight and valid cut for $\aB{}{}$. The details are given in
Algorithm~\ref{SDDP_Selection}.
\begin{algorithm}
\caption{\label{SDDP_Selection}SDDP Selection function $\underline{S}_{t}^{\mathrm{SDDP}}$ for $t<T$}
\begin{algorithmic}
\mathbb{R}EQUIRE{A set of basic functions $\underline{F}_{t+1} \subset \underline{F}b_{t+1}^{\mathrm{SDDP}}$ and a trial point $x_t \in X_t$.}
\mathbb{E}NSURE{A tight and valid basic function $\underline{\phi}_t \in \underline{F}b_{t}^{\mathrm{SDDP}}$.}
\FOR{$w\in \supp{\mathbf{W_{t+1}}}$}
\STATE{Solve by linear programming $b^w := \pB{\underline{\mathcal{V}}_{\underline{F}_{t+1}}}{x}$ and compute a subgradient $a^w$ of $\pB{\underline{\mathcal{V}}_{\underline{F}_{t+1}}}{}$ at $x$.}
\mathbb{E}NDFOR
\STATE{Set $\underline{\phi} := \langle a, \cdot \rangle + b + \delta_{X_t}$ where $a := \sum_{w\in \supp{\mathbf{W_{t+1}} }} p_w a^w$ and $b = \sum_{w\in \supp{\mathbf{W_{t+1}}}} p_w b^w$.}
\end{algorithmic}
\end{algorithm}
We say that $\mathbb{Z}Selection[w]{}$ is a selection function for $\pB{}{}$, for a given
noise value $w \in \supp{\mathbf{W_{t+1}}}$ if
Definition~\ref{CompatibleSelection} is satisfied when replacing $\aB{}{}$ by
$\pB{}{}$. We now prove that $\underline{S}_{t}^{\text{SDDP}}$ is a selection
function, \emph{i.e.} it is tight and valid in the sense of
Definition~\ref{CompatibleSelection}. It follows from the general fact that by
averaging functions which are tight and valid for the pointwise Bellman
operators $\pB{}{}$, $w \in \supp{\mathbf{W_{t+1}}}$, then one get a tight and
valid function for the average Bellman operator $\aB{}{}$. Note that the average
of affine functions is still an affine function, the set of basic functions
$\underline{F}b_t^{\mathrm{SDDP}}$ is stable by averaging.
\begin{lemma}
\label{pointwisetoaverage}
Let a time step $t \in \ce{0,T{-}1}$ be fixed and let be given for every noise
value $w\in \supp{\mathbf{W_{t+1}}}$ a selection function
$\mathbb{Z}Selection[w]{}$ for $\pB{}{}$. Then, the mapping $\mathbb{Z}Selection[]{}$ defined
by $\mathbb{Z}Selection[]{} = \mathbb{E}\nc{\mathbb{Z}Selection[\mathbf{W_{t+1}}]{}}$ is a selection
mapping for $\aB{}{}$.
\end{lemma}
\begin{proof}
Fix $t\in \ce{0,T{-}1}$. Given a trial point $x\in X_t$ and a set of basic
functions $F$, the pointwise tightness (resp. validity) equality
(resp. inequality) is satisfied for every realization $w$ of the noise
$\mathbf{W_{t+1}}$, that is
\begin{align}
& \mathbb{Z}Selection[w]{F, x}\np{x} = \pB{\mathcal{V}_{F}}{x}, \tag{Pointwise tightness} \\
& \mathbb{Z}Selection[w]{F, x} \geq \pB{\overline{\mathcal{V}}_{F}}{} , \tag{Pointwise validity when building upper approximations} \\
& \mathbb{Z}Selection[w]{F, x} \leq \pB{\underline{\mathcal{V}}_{F}}{} \tag{Pointwise validity when building lower approximations}.
\end{align}
Recall that
$\aB{\mathcal{V}_{F}}{x} = \mathbb{E} \nc{\pB[\mathbf{W_{t+1}}]{\mathcal{V}_{F}}{x}}$, thus
taking the expectation in the above equality and inequalities, one gets the
lemma.
\end{proof}
\begin{proposition}[SDDP Selection function]
For every $t\in \ce{0,T}$, the mapping $\underline{S}_{t}^{\mathrm{SDDP}}$ is
a selection function in the sense of Definition~\ref{CompatibleSelection}.
\end{proposition}
\begin{proof}
For $t=T$, for every $x_T\in X_T$, by construction we have
\[
\underline{S}_{T}^{\mathrm{SDDP}}\np{x_T} = \psi\np{x_T} = V_T\np{x_T}.
\]
Thus, $\underline{S}_{T}^{\mathrm{SDDP}}$ is tight and it is valid as
$\underline{S}_{T}^{\mathrm{SDDP}}\np{x_T} = \langle a, \cdot - x_T \rangle +
\psi\np{x_T}$ is an affine minorant of the convex function $\psi$ which is
exact at $x_T$. Now, fix $t \in\ic{0,T{-}1}$, a set of basic functions
$\underline{F}_t \subset \underline{F}b_{t}^{\mathrm{SDDP}}$ and a trial point $x_t \in
X_t$. By construction, $\underline{S}_{t}^{\mathrm{SDDP}}$ is tight as we have
\[
\underline{S}_{t}^{\mathrm{SDDP}}\bp{\underline{F}_t, x_t}\np{x_t} =
\nscal{a}{x_t - x_t} + \mathcal{B}esp{ \pB[\mathbf{W_{t+1}}]{\underline{\mathcal{V}}_{\underline{F}_t}}{x_t}} = \aB{\underline{\mathcal{V}}_{\underline{F}_t}}{x_t}.
\]
Moreover, for every $w\in \supp{\mathbf{W_{t+1}}}$, $a^w$ (see
Algorithm~\ref{SDDP_Selection}) is a subgradient of $\pB{\underline{\mathcal{V}}_{\underline{F}_t}}{}$
at $x_t$. Thus as $a$ is equal to $\nesp{a^{\mathbf{W_{t+1}}}}$ it is a
subgradient of $\aB{\underline{\mathcal{V}}_{\underline{F}_t}}{}$ at $x_t$. Hence, the mapping
$\underline{S}_{t}^{\mathrm{SDDP}}$ is valid.
\end{proof}
\subsection{$U$-upper approximations}
\label{sec:U}
We have seen in Lemma~\ref{pointwisetoaverage}, that in order to construct a
selection function for $\aB{}{}$, it suffices to construct a selection function
for each pointwise Bellman operator $\pB{}{}$. In order to do so, for upper
approximations we exploit the min-additivity of the pointwise Bellman operators
$\pB{}{}$. That is, given a set of functions $F$, we use the following
decomposition
\[
\forall t\in \ce{0,T{-}1}, \forall x\in \mathbb{X}, \forall w \in \supp{\mathbf{W_{t+1}}}, \ \pB{\inf_{\phi \in F} \phi}{x} = \inf_{\phi\in F} \pB{\phi}{x}.
\]
This is a decomposition of the computation of $\pB{\overline{\mathcal{V}}_{F}}{}$ which is
possible for upper approximations but not for lower approximations as for
minimization problems, the Bellman operators (average or pointwise) are min-plus
linear but generally not max-plus linear.
However, in linear-polyhedral MSP, the value functions are
polyhedral. Approximating from above value function $V_t$ by infima of convex
quadratics is not suited: in particular, one cannot ensure validity of a
quadratic at a kink of the polyhedral function $V_t$. Still, we present a
selection function which is tight but not valid. In the numerical experiment of
Figure~\ref{figure:U-SDDP}, we illustrate that the selection function defined below
might not be valid, but the error is still reasonable. Yet, this will motivate
the use of other basic functions more suited to the linear-polyhedral framework,
as done in \S\ref{sec:V}.
We consider basic functions that are $U$-shaped, \emph{i.e.} of the form
$\frac{c}{2}\underline{V}ert x - a \rVert^2 + b$ for some constant $c>0$, vector $a$ and
scalar $b$. We call such function a \emph{$c$-function}. We now fix a sequence
of constants $\np{c_t}_{t\in \ce{0,T}}$ such that $c_t > L_{V_t}$. For every
time $t\in \ce{0,T}$, define the set of basic functions
\[
\overline{F}b_t^{\mathrm{U}} =
\bset{ \frac{c_t}{2}\underline{V}ert x - a \rVert^2 + b + \delta_{X_t} }
{\np{a,b} \in \mathbb{X}{\times}\mathbb{R} }
\; .
\]
At time $t=T$, we select the $c_T$-quadratic mapping which is equal to $\psi$ at point
$x\in X_T$ and has same (sub)gradient at $x$, \emph{i.e.}
$\overline{S}_{T}^{\text{U}}\np{x} = \frac{c_T}{2}\underline{V}ert \cdot - a \rVert^2 + b$
where $a = x - \frac{1}{c}\lambda$ and
$b = \psi\np{x} - \frac{1}{2c}\underline{V}ert \lambda \rVert^2$ with $\lambda$ being a
subgradient of $\psi$ at $x$.
The mapping $\overline{S}_{t}^{\text{U}}$ defined in Algorithm~\ref{U_Selection}
is tight but not necessarily valid, see an illustration in
Figure~\ref{figure:U-SDDP}. As with SDDP, in order to build a tight selection
function at $t<T$ for $\aB{}{}$ we first compute a tight selection function for
each $\pB{}{}$, $w\in \supp{\mathbf{W_{t+1}}}$, which can be done numerically by
quadratic programming.
\begin{algorithm}
\caption{\label{U_Selection}U Selection function $\overline{S}_{t}^{\text{U}}$ for $t<T$}
\begin{algorithmic}
\mathbb{R}EQUIRE{A set of basic functions $\overline{F}_{t+1} \subset \overline{F}b_{t+1}^{\mathrm{U}}$ and a trial point $x_t \in X_t$.}
\mathbb{E}NSURE{A tight basic function $\overline{\phi}_t \in \overline{F}b_{t}^{\mathrm{U}}$.}
\FOR{$w\in \supp{\mathbf{W_{t+1}}}$}
\STATE{Solve by quadratic programming $v^w := \pB{\overline{\mathcal{V}}_{\overline{F}_{t+1}}}{x} = \inf_{\overline{\phi} \in \overline{F}_{t+1}}\pB{\overline{\phi}}{x}$ and compute $a^w =x - \frac{1}{c}\lambda$ and $b^w = v^w - \frac{1}{2c} \underline{V}ert \lambda \rVert^2$ with $\lambda$ being a subgradient of $\pB{\overline{\mathcal{V}}_{\overline{F}_{t+1}}}{}$ at $x$.}
\mathbb{E}NDFOR
\STATE{Set $\overline{\phi} := \frac{c_t}{2}\underline{V}ert \cdot - a \rVert^2 + b + \delta_{X_t}$ where $a := \mathbb{E}\nc{a^{\mathbf{W_{t+1}}}}$ and $b = \mathbb{E}\nc{ \frac{c_t}{2}\underline{V}ert \cdot - a \rVert^2 + b^{\mathbf{W_{t+1}}}}$.}
\end{algorithmic}
\end{algorithm}
\begin{figure}
\caption{\label{figure:U-SDDP}
\label{figure:U-SDDP}
\end{figure}
\subsection{$V$-upper approximations}
\label{sec:V}
We have seen in \S\ref{sec:U} that $U$-shaped basic functions may not be suited
to approximate polyhedral functions. In \cite{Ph.de.Fi2013}, upper
approximations which were polyhedral as well were introduced. In this section we
propose upper approximations of $V_t$ as infima of $V$-shaped functions. Even
though when $V_t$ is polyhedral the approach of \cite{Ph.de.Fi2013} seems the
most natural, their approximations cannot be easily expressed as a pointwise infima
of basic functions.
In future works we will add a max-plus/min-plus projection
step to TDP in order to broaden the possibilities of converging approximations
available to the decision maker. In particular, polyhedral upper approximations
as in~\cite{Ph.de.Fi2013} will be covered.
In this section, by introducing a new tight and valid selection function, we
would like to emphasize on the flexibility already available to the decision
maker by adopting the framework of TDP.
We consider $V$-shaped functions, \emph{i.e.} functions of the form
$L \underline{V}ert x - a \rVert_1 + b$ with $a \in \mathbb{X} = \mathbb{R}^n$ and $b\in \mathbb{R}$ and a
constant $L>0$. We define for every time step $t\in \ce{0,T}$, the set of basic
functions
\[\overline{F}b_{t}^{\mathrm{V}} := \mathcal{B}set{ \frac{L_{V_t}}{\sqrt{n}} \norm{\cdot - a}_1 + b}
{ \np{a,b} \in \mathbb{X}{\times}\mathbb{R} }
\; .
\]
At time $t = T$, we compute a $V$-shaped function at $\psi\np{x}$, \emph{i.e.}
given a trial point $x \in X_T$, using the expression
$\overline{S}_{T}^{\mathrm{V}}\np{x} = \frac{L_{V_T}}{\sqrt{n}} \underline{V}ert \cdot - x
\rVert_1 + \psi\np{x}$. For time $t \in\ic{0,T{-}1}$, the selection function is
given in Algorithm~\ref{V_Selection}. The main difference with the previous
cases treated in~\S\ref{sec:SDDP} and in~\S\ref{sec:U} is that $V$-shaped function are not
stable by averaging as the average of several $V$-shaped function is a polyhedral function.
\begin{algorithm}
\caption{\label{V_Selection}V Selection function $\overline{S}_{t}^{\mathrm{V}}$ for $t<T$}
\begin{algorithmic}
\mathbb{R}EQUIRE{A set of basic functions $\overline{F}_{t+1} \subset \overline{F}b_{t+1}^{\mathrm{V}}$ and a trial point $x_t \in X_t$.}
\mathbb{E}NSURE{A tight and valid basic function $\overline{\phi}_t \in \overline{F}b_{t}^{\mathrm{V}}$.}
\STATE{Solve by linear programming $b := \aB{\overline{\mathcal{V}}_{\overline{F}_{t+1}}}{x_t}$.}
\STATE{Set $\overline{\phi}_t := \frac{L_{V_t}}{\sqrt{n}} \underline{V}ert \cdot - x_t \rVert + b$.}
\end{algorithmic}
\end{algorithm}
\begin{proposition}[V Selection function]
For every $t\in \ce{0,T}$, the mapping $\overline{S}_{t}^{\mathrm{V}}$
described in Algorithm~\ref{V_Selection}
is a selection function in the sense of Definition~\ref{CompatibleSelection}.
\end{proposition}
\begin{proof}
At time $t=T$, for every $x_T \in X_T$, we have $\overline{S}_{T}^{\mathrm{V}}\np{x_T} = \frac{L_{V_T}}{\sqrt{n}} \underline{V}ert \cdot - x_T \rVert_1 + \psi\np{x_T}$. Thus, $\overline{S}_{T}^{\mathrm{V}}\np{x_T}\np{x_T} = \psi\np{x_T}$ and $\overline{S}_{T}^{\mathrm{V}}$ is a tight mapping. As the polyhedral function $\psi(x) = \max_{i \in I_T} \langle c^{i}_T, x \rangle + d_T^i + \delta_{X_{T}}$ is $L_{V_T}$-Lipschitz continuous, by Cauchy-Schwarz inequality, for every $x \in X_T$ and $i\in I_T$, we have
\[
\langle c^{i}_T, x - x_T \rangle \leq \underline{V}ert c^{i}_T \rVert_2 \underline{V}ert x - x_T \rVert_2 \leq L_{V_T} \frac{1}{\sqrt{n}} \underline{V}ert x-x_T \rVert_1.
\]
Adding $\langle c^i_T, x_T \rangle + d_T^i$ on both sides of the last inequality and taking the maximum over $i \in I_T$ we have that
\[
\psi\np{x} = \max_{i \in I_T} \langle c^i_T, x \rangle + d_T^i \leq L_{V_T} \frac{1}{\sqrt{n}} \underline{V}ert x-x_T \rVert_1 + \psi\np{x_T} = \overline{S}_{T}^{\mathrm{V}}\np{x_T}\np{x},
\]
which gives that $\overline{S}_{T}^{\mathrm{V}}$ is a valid mapping.
Now, fix $t < T$, we show that the mapping $\overline{S}_{t}^{\mathrm{V}}$ is
tight and valid as well. By construction, for every set of basic functions
$\overline{F}_{t+1} \subset \overline{F}b_{t+1}^{\mathrm{U}}$ and trial point
$x_t \in X_t$, we have
\[
\overline{S}_{t}^{\mathrm{V}}\np{\overline{F}_{t+1}, x_t}\np{x_t} = b = \aB{\overline{\mathcal{V}}_{\overline{F}_{t+1}}}{x_t}.
\]
Hence, $\overline{S}_{t}^{\mathrm{V}}$ is a tight mapping.
We check that $\overline{S}_{t}^{\mathrm{V}}$ is a valid mapping.
First, as each basic function $\phi \in \overline{F}_{t+1}$ is
$L_{V_{t+1}}$-Lipschitz continuous on $X_t$, we show that
$\overline{\mathcal{V}}_{\overline{F}_{t+1}}$ is $L_{V_{t+1}}$-Lipschitz continuous on $X_t$ as
well. Given $x_1, x_2 \in X_t$, we have
\begin{align*}
\lvert \overline{\mathcal{V}}_{\overline{F}_{t+1}}\np{x_1} - \overline{\mathcal{V}}_{\overline{F}_{t+1}}\np{x_2} \rvert & = \lvert \inf_{\phi \in \overline{F}_{t+1}} \phi\np{x_1} - \inf_{\phi \in \overline{F}_{t+1}} \phi\np{x_2} \rvert \\
& \leq \sup_{\phi \in \overline{F}_{t+1}} \lvert \phi\np{x_1} - \phi\np{x_2} \rvert \\
& \leq L_{V_t} \underline{V}ert x_1 - x_2 \rVert.
\end{align*}
As the Bellman operator $\aB{}{}$ is Lipschitz regular in the sense of Proposition~\ref{lipschitz_regularity}, $\aB{\overline{\mathcal{V}}_{\overline{F}_{t+1}}}{}$ is $L_{V_t}$-Lipschitz continuous.
Second, by min-additivity of the Bellman operator $\aB{}{}$, we have that
\[
\aB{\overline{\mathcal{V}}_{\overline{F}_{t+1}}}{x} = \aB{\inf_{\phi \in \overline{F}_{t+1}}\phi}{x} = \inf_{\phi \in \overline{F}_{t+1}} \aB{\phi}{x}.
\]
Recall that by Lemma~\ref{polypreserved}, the Bellman operator $\aB{}{}$ preserves polyhedrality. As $\phi \in \overline{F}_{t+1}$ is polyhedral, $\aB{\phi}{}$ is polyhedral as well and as in the case $t=T$, \emph{mutatis mutandis} we have that $\overline{S}_{t}^{\mathrm{V}}$ is valid.
\end{proof}
\begin{figure}
\caption{V-SDDP approximations of the value functions. As the selection function $S_t^{\mathrm{V}
\end{figure}
\section*{Conclusion}
\begin{itemize}
\item TDP generates simultaneously \emph{monotonic} approximations $\np{\underline{V}_t^k}_k$ and $\np{\overline{V}_t^k}_k$ of $V_t$.
\item Each approximation is either a \emph{min-plus} or \emph{max-plus linear} combinations of basic functions.
\item Each basic function should be \emph{tight} and \emph{valid}.
\item The approximations are refined iteratively along the Problem-child trajectory \emph{without discretizing the state space}.
\item The \emph{gap} between upper and lower approximation \emph{vanishes along the Problem-child trajectory}.
\item TDP generalizes a similar approach done in \cite{Ph.de.Fi2013} and proved by \cite{Ba.Do.Za2018} for a variant of SDDP in convex MSPs.
\end{itemize}
\section*{Perpectives}
\begin{itemize}
\item Consider an additional min-plus/max-plus projection step of suprema/infima of basic functions.
\item Extensive numerical comparisons with existing methods, namely classical SDDP and the upper approximations obtained by Fenchel duality of \cite{Le.Ca.Ch.Le.Pa2018}.
\item Extend the scope of TDP to encompass Partially Observed Markov Decision Processes. A first attempt to do so can be found in Appendix~\ref{TDP_POMDP}.
\end{itemize}
\appendix
\section{Tropical Dynamic Programming for POMDP}
\label{TDP_POMDP}
In this section, we present an on-going work to apply TDP on Partially Observed Markov Decision Processes (POMDP).
\subsection{Recalls on POMDP}
Formally, a POMDP is described (in the finite settings) by a finite set of states $\mathbb{X} = \na{\state_1, . . . , \state_{|\mathbb{X}|}}$,
a finite set of actions $\mathbb{C}ONTROL = \na{\control_1, . . . , \control_{|\mathbb{C}ONTROL|}}$, a finite set of observations
$\mathbb{O} = \na{o_1, . . . , o_{|\mathbb{O}|}}$, transition probabilities of the Markov chain
\begin{equation}
\TransitionState{\control}{\state_i}{\state_j}{t}
= \nprobc{\State_{t+1}=\state_j}{\State_t=\state_i,\mathbb{C}ontrol_t=\control}
\; ,
\end{equation}
and conditional law of the observations
\begin{equation}
{O}Law{o}{\state}{\control}{t+1}
= \nprobc{{O}_{t+1}=o}{\State_{t+1}=\state,\mathbb{C}ontrol_t=\control}
\; ,
\end{equation}
a real-valued cost function $L_t(\state, \control)$ for any $t \in \ic{0,T-1}$, a final cost $K(x)$
and an initial probability law in the simplex of $\mathbb{R}R^{|\mathbb{X}|}$ called the initial belief $b_0$.
We assume here that the state space the control space and the observation space dimensions do not vary with time
but for the sake of clarity we will use the notation $\mathbb{X}_t$ to designate the state space at time $t$ even if it is equal to $\mathbb{X}$ and the same for control and observation states.
Under Markov assumptions, we can use at time $t$ a probability distribution
$b_t$, whose name is a reminder of belief, over current states as a
sufficient statistic for the history of actions and observations up to time
$t$. The space of beliefs is the simplex of $\mathbb{R}R^{|\mathbb{X}|}$, denoted
$\Delta_{|\mathbb{X}|}$. The belief dynamics, at time $t$, driven by action
$\control_t$ and observation $o_{t+1}$ is given by by the equation
\begin{align}
b_{t+1}
& = \tau_t \np{b_{t}, \control_t, o_{t+1}}
\intertext{with $b_{t+1} \in \Delta_{|\mathbb{X}|}$ given by}
b_{t+1}\np{\state_{t+1}}
& =
\beta_{t+1} {O}Law{o_{t+1}}{\state_{t+1}}{\control_t}{t+1}
\mathcal{B}p{\sum_{x_t \in \mathbb{X}_{t}} b\np{\state_t} \TransitionState{\control_t}{\state_t}{\state_{t+1}}{t}}
\quad \forall \state_{t+1} \in \mathbb{X}_{t+1}
\; ,
\end{align}
where $\beta_{t+1}$ is a normalization constant to ensure that
$b_{t+1}\in \Delta_{|\mathbb{X}|}$, that is
\[
\beta_{t+1}^{-1} = \sum_{\state_{t+1} \in \mathbb{X}_{t+1}} {O}Law{o_{t+1}}{\state_{t+1}}{\control_t}{t+1}
\mathcal{B}p{\sum_{x_t \in \mathbb{X}_t} \TransitionState{\control_t}{\state_t}{\state_{t+1}}{t} b\np{\state_t}} \; .
\]
To simplify the notation we introduce the (sub-stochastic) matrix defined as follows
\[
{M^{\control_t, o_{t+1}}_t}\np{\state_t,\state_{t+1}} =
{O}Law{o_{t+1}}{\state_{t+1}}{\control_t}{t+1}
\TransitionState{\control_t}{\state_t}{\state_{t+1}}{t}
\quad \forall (\state_t,\state_{t+1}) \in \mathbb{X}_t{\times}\mathbb{X}_{t+1}
\; ,
\]
where we have $\sum_{o_{t+1}} \sum_{\state_{t+1}} {M^{\control_t, o_{t+1}}_t}\np{\state_t,\state_{t+1}}=1$.
Using matrix notations, where beliefs are represented by row vector and $\mathbf{1}$ is a column vector full of ones, we can rewrite the beliefs
dynamics as
\[
\tau_t \np{b_{t}, \control_t, o_{t+1}} =
\frac{ b_t {M^{\control_t, o_{t+1}}_t}}{ b_t {M^{\control_t, o_{t+1}}_t} \mathbf{1}}
\in \Delta_{|\mathbb{X}|}
\; .
\]
In general the object of the optimization problem is to generate a policy that minimizes expected finite horizon cost
for the controlled Markov chain $\na{\State_t^u}_{t\in \mathbb{N}N}$ with transition matrix $P^{\control}$. That is
consider the minimization problem
\begin{equation}
J(b_0) = \min_{\mathbb{C}ontrol_1,\ldots,\mathbb{C}ontrol_{T-1}} \mathcal{B}espc{\sum_{t=0}^{T-1} L_t\np{\State_t,\mathbb{C}ontrol_t} + K\np{\State_T}}
{b_0}
\; .
\label{pb:pomdp}
\end{equation}
It is classical to derive a Bellman equation for the beliefs given by the bellman operators for $t\in \ic{0,T-1}$
\begin{equation}
\label{eq:bellman-pomdp}
\mathcal{B}_t\np{V} = \inf_{\control \in \mathbb{C}ONTROL} \mathcal{B}^{\control}_t \np {V}
\; ,
\end{equation}
where for each $\control \in \mathbb{C}ONTROL$ and $t \in \ic{0,T-1}$, the Bellman operator $\mathcal{B}^{\control}_{t}$ is defined by
\begin{align}
\label{eq:bellman-u-pomdp}
\mathcal{B}^{\control}_{t}\np{V}\np{b}
& =
b L_t^\control
+ \sum_{o \in \mathbb{O}_{t+1}}
\np{b M^{\control, o}_t \mathbf{1}}
V
\mathcal{B}p{
\frac{ b M^{\control, o}_t}
{b M^{\control, o}_t \mathbf{1}}}
\; .
\end{align}
where $L_t^u$ is the column vector $\bp{L_t^u(\state_t)}_{\state_t \in \mathbb{X}_t}$. Note that
the mapping $o_{t+1} \in \mathbb{O}_{t+1} \mapsto \np{b_t M^{\control_t, o_{t+1}}_t \mathbf{1}}$ is a
probability distribution on $\mathbb{O}_{t+1}$ ($\sum_{o \in \mathbb{O}_{t+1}} b_t M^{\control_t, o_{t+1}}_t \mathbf{1}=1$).
The Bellman operator can be also written as
\begin{align}
\mathcal{B}^{\control}_{t}\np{V}\np{b}
& =
b L_t^u
+ \sum_{b' \in \Delta_{|\mathbb{X}|}} \overline{P}^u_t(b, b') V \np{b'}
\; ,
\end{align}
where, $\overline{P}^u$ is a controlled Markov chain transition matrix in the belief space. Indeed
\begin{equation}
\overline{P}^u_t(b, b')
=
\begin{cases}
\np{b M^{\control, o}_t \mathbf{1}} & \text{when } b' =
\frac{ b_t {M^{\control, o}_t}}{ b_t {M^{\control, o}_t} \mathbf{1}} \text{ with } o \in \mathbb{O}_{t+1} \; ,
\\
0 & \text{ if not}\; ,
\end{cases}
\end{equation}
which is a classical Bellman equation of a controlled Markov chain but with a state space in the belief space.
We conclude this section by the following lemma
\begin{proposition} The value functions $\na{V_t}_{t\in \ic{0,T}}$ solutions of the Bellman Equation
\begin{equation}
\forall b \in\mathbb{R}R_{+}^{|\mathbb{X}|}\quad {V}_T(b) = b K \quad\text{and}\quad
\forall t \in \ic{0,T{-}1}\quad
{V}_t(b) = \inf_{\control \in \mathbb{C}ONTROL} {\mathcal{B}}^{\control}_{t}\np{{V}_{t+1}}\np{b}
\; ,
\label{eq:bellman-eq}
\end{equation}
where the operator ${\mathcal{B}}^{\control}_{t}$ is given by Equation~\ref{eq:bellman-u-pomdp}
are such that $V_0(b_0)$ is the optimal value of the minimization problem given by Equation~\ref{pb:pomdp}.
\end{proposition}
\subsection{The Bellman operator defined in Equation~\eqref{eq:bellman-pomdp} propagate Lipschitz mappings}
\begin{proposition} For $t\in \ic{0,T{-}1}$, assume that the mappings $L_t(\control,\cdot)$
satisfy $\norm{L_t(\control,\cdot)}_{\infty} \le {\cal L}$\footnote{Since the state space if finite we
identify mappings $\phi:\mathbb{X}\to \mathbb{R}R$ with vectors in $\mathbb{R}R^{|\mathbb{X}|}$}
for all $\control \in \mathbb{C}ONTROL$ and assume that a mapping $K$ satisfy $\sup_{\state \in \mathbb{X}} |K(x)|= {\cal K}< + \infty$.
Then the solution of the Bellman Equation~\eqref{eq:bellman-eq} are Lipschitz mappings.
\end{proposition}
\begin{proof}\quad
\noindent $\bullet$ We consider the operator $\widetilde{\mathcal{B}}^{\control}_{t}$
defined for mappings $\widetilde{V}: \mathbb{R}R_{+}^{|\mathbb{X}|} \to \mathbb{R}R$ by
\begin{align}
\widetilde{\mathcal{B}}^{\control}_{t}\np{\widetilde{V}}\np{c}
& =
c L_t^\control
+ \sum_{o \in \mathbb{O}_{t+1}}
\widetilde{V}
\bp{c M^{\control, o}_t} \quad \forall c \in \mathbb{R}R^{|\mathbb{X}|}
\; ,
\end{align}
where $L^u_t$ stands for the column vector $\np{L_t(x,u)}_{x \in \mathbb{X}_t}$
and we recall that beliefs are row vectors. We consider $\na{\widetilde{V_t}}_{t\in \ic{0,T}}$ solution of the
Bellman Equation
\begin{equation}
\forall c \in\mathbb{R}R_{+}^{|\mathbb{X}|}\quad \widetilde{V}_T(c) = cK \quad\text{and}\quad
\forall t \in \ic{0,T{-}1}\quad
\widetilde{V}_t(c) = \inf_{\control \in \mathbb{C}ONTROL} \widetilde{\mathcal{B}}^{\control}_{t}\np{\widetilde{V}_{t+1}}\np{c}
\; .
\label{eq:modified-bellman-pomdp}
\end{equation}
First, we straightforwardly obtain by backward induction that the value functions
$\np{\widetilde{V}_t}_{t \in \ic{0,T}}$ are homogeneous of degree $1$. Second
we prove that the operator $\widetilde{\mathcal{B}}^{\control}_{t}$ preserves Lispchitz
regularity. We proceed as follows. Consider $c$ and $c'$ in
$\mathbb{R}R_{+}^{|\mathbb{X}|}$ and suppose that
$| \widetilde{V}(c)-\widetilde{V}(c')| \le {\cal V} \norm{c'-c}_1$. Then we
have that
\begin{align*}
\widetilde{\mathcal{B}}^{\control}_{t}\np{\widetilde{V}}\np{c'} -
\widetilde{\mathcal{B}}^{\control}_{t}\np{\widetilde{V}}\np{c}
& = (c' - c) L_t^\control
+ \sum_{o \in \mathbb{O}_{t+1}} \widetilde{V}\bp{c' M^{\control, o}_t} -
V\bp{ c M^{\control, o}_t}
\\
& \le {\cal L}\norm{c'-c}_1
+ \sum_{o \in \mathbb{O}_{t+1}} {\cal V}
\norm{c' M^{\control, o}_t - cM^{\control, o}_t}_1
\\
& \le {\cal L}\norm{c'-c}_1
+ {\cal V}
\sum_{\substack{o \in \mathbb{O}_{t+1} \\ \state' \in \mathbb{X}}}
\mathcal{B}ig\lvert
\sum_{\state \in \mathbb{X}}
\bp{c'(\state)-c(\state)} M^{\control, o}_t(\state,\state')
\mathcal{B}ig\rvert
\\
& \le {\cal L}\norm{c'-c}_1
+ {\cal V}
\sum_{\state \in \mathbb{X}}
|c'(\state)-c(\state)|
\sum_{\substack{o \in \mathbb{O}_{t+1} \\ \state' \in \mathbb{X}}}
M^{\control, o}_t(\state,\state')
\\
& \le {\cal L}\norm{c'-c}_1
+ {\cal V}
\sum_{\state \in \mathbb{X}}
|c'(\state)-c(\state)|
\\
& \le
\mathcal{B}p{{\cal L} +{\cal V}} \norm{c'-c}_1
\; .
\end{align*}
As a pointwise minimum of Lipschitz mappings having the same Lipschitz constant is Lipschitz, we obtain the same Lispchitz
constant for the operators $\inf_{\control \in \mathbb{C}ONTROL}\widetilde{\mathcal{B}}^{\control}_{t}$. Then, using the fact that
$\overline{V}_T = K$ we obtain by backward induction that the Bellman value function $\widetilde{V}_{t}$ is
$({\cal L}(T-t) + {\cal K})$-Lipschitz
for $t\in \ic{0,T}$ where ${\cal K}= \norm{K(\cdot)}_{\infty}$.
\noindent $\bullet$ We prove now an intermediate result to link the solutions of the Bellman Equation~\eqref{eq:modified-bellman-pomdp}
to the Bellman Equation~\eqref{eq:bellman-eq}. Suppose that $\widetilde{V}$ is
$1$-homogeneous and such that $\widetilde{V}(b) = V(b)$ for all
$b \in \Delta_{|\mathbb{X}|}$. Then, We prove that
$\widetilde{\mathcal{B}}^{\control}_{t}\np{\widetilde{V}}(b)=
{\mathcal{B}}^{\control}_{t}\np{{V}}(b)$ for all $b \in \Delta_{|\mathbb{X}|}$. For
$b\in \Delta_{|\mathbb{X}|}$, we successively have that
\begin{align}
\widetilde{\mathcal{B}}^{\control}_{t}\np{\widetilde{V}}\np{b}
& =
b L_t^\control
+ \sum_{o \in \mathbb{O}_{t+1}}
\widetilde{V}
\bp{b M^{\control, o}_t}
\\
& = b L_t^\control
+ \sum_{o \in \mathbb{O}_{t+1}}
\np{b M^{\control, o}_t\mathbf{1}}
\widetilde{V}
\bp{ \frac{b M^{\control, o}_t}{b M^{\control, o}_t\mathbf{1}}}
\tag{$\widetilde{V}$ is $1$-homogeneous}
\\
& =
b L_t^\control
+ \sum_{o \in \mathbb{O}_{t+1}}
\np{b M^{\control, o}_t\mathbf{1}}
{V}
\bp{ \frac{b M^{\control, o}_t}{b M^{\control, o}_t\mathbf{1}}}
\tag{$\widetilde{V}=V$ on $\Delta_{|\mathbb{X}|}$}
\\
& = {\mathcal{B}}^{\control}_{t}\np{{V}}\np{b}
\; .
\end{align}
\noindent $\bullet$ Now we turn to solutions of Bellman Equation~\eqref{eq:bellman-eq}.
Since $\widetilde{V}_T(c) = c K$ for all $c\in \mathbb{R}R_{+}^{|\mathbb{X}|}$ and
$V_T(b) = bK$ for all $b \in \Delta_{|\mathbb{X}|}$, the two mappings $V_T$ and $\widetilde{V}_T$ coincide
on the simplex of dimension $|\mathbb{X}|$. Then gathering the previous steps we obtain that
$V_t$ and $\widetilde{V}_t$ coincide
also on the simplex of dimension $|\mathbb{X}|$ for all $t \in \ic{0,T}$.
Finally, for all $t \in \ic{0,T}$ $\widetilde{V}_t$ being $({\cal L}(T-t) + {\cal K})$-Lipschitz
we obtain the same result for $V_t$.
\end{proof}
\subsection{Value of $\mathcal{B}_{t}\np{V_{t+1}}$ when $V_{t+1} = \min_{\alpha \in \Gamma_{t+1}} \proscal{\alpha}{b}$}
Assume that $V_{t+1}: b \mapsto \min_{\alpha \in \Gamma_{t+1}}
\proscal{\alpha}{b}$ where $\Gamma_{t+1} \subset
\mathbb{R}R^{|\mathbb{X}|}$. Then we obtain that
\begin{align}
\mathcal{B}_{t}\np{V_{t+1}}\np{b}
& =
\min_{\control \in \mathbb{C}ONTROL_t}
\mathcal{B}p{{b L_t^u}
+ \sum_{o \in \mathbb{O}_{t+1}}
\np{b_t M^{\control, o}_t \mathbf{1}}
V_{t+1}\mathcal{B}p{
\frac{ b {M^{\control, o}_t}}{b {M^{\control, o}_t} \mathbf{1}}
}}
\\
& =
\min_{\control \in \mathbb{C}ONTROL_t}
\mathcal{B}p{{b L_t^u}
+ \sum_{o \in \mathbb{O}_{t+1}}
\np{b M^{\control, o}_t \mathbf{1}}
\min_{\alpha \in \Gamma_{t+1}} \mathcal{B}p{
\frac{ b {M^{\control, o}_t} \alpha }{ b {M^{\control, o}_t} \mathbf{1}
}}}
\\
& =
\min_{\control \in \mathbb{C}ONTROL_t}
\mathcal{B}p{{b L_t^u}
+ \sum_{o \in \mathbb{O}_{t+1}}
{ b {M^{\control, o}_t} \alpha^{\sharp}({\control, o}) }
\tag{with $\alpha^{\sharp}({\control, o})
= \mathop{\arg\min}_{\alpha \in \Gamma_{t+1}}
\frac{ b {M^{\control, o}_t} \alpha }
{ b {M^{\control, o}_t} \mathbf{1}}$}
}
\\
& =
\min_{\control \in \mathbb{C}ONTROL_t}b
\mathcal{B}p{ L_t^u
+ \sum_{o \in \mathbb{O}_{t+1}}
{ {M^{\control, o}_t} \alpha^{\sharp}({\control, o}) }}
\\
& =
\min_{\alpha \in \Gamma_t} \proscal{\alpha}{b}
\; ,
\end{align}
with
$\Gamma_t = \bset{{ L_t^u + \sum_{o \in \mathbb{O}_{t+1}} {
{M^{\control, o}_t} \alpha^{\sharp}({\control, o})
}}} {\control \in \mathbb{C}ONTROL_t \,\text{and}\, \alpha^{\sharp}({\control,
o}) = \mathop{\arg\min}_{\alpha \in \Gamma_{t+1}} \frac{ b
{M^{\control, o}_t} \alpha } { b {M^{\control,
o}_t} \mathbf{1}}}$. We therefore obtain that the Bellman
value function at time $t$ has the same form as the Bellman value function at
time $t+1$.
We are in a context where the Bellman function that is to to be computed is
polyhedral concave with a huge polyhedron. It is thus tempting to use our
algorithm with polyhedral concave upper approximations and sup of quadratic or
Lipschitz mappings as lower approximations.
The \emph{Problem-child trajectory} technique is used in POMDP algorithms as an heuristic
but without a convergence proof as far as we have investigated.
\subsection{A lower bound of $\mathcal{B}_{t}\np{V_{t+1}}$}
We consider a special case where ${V}_{t+1}:\mathbb{X} \to \mathbb{R}R$ is
given by ${V}_{t+1}(b)= \proscal{b}{\widehat{V}_{t+1}}$ and we compute
$\mathcal{B}_{t}\np{V_{t+1}}$ as follows
\begin{align*}
\mathcal{B}_{t}\np{V_{t+1}}\np{b}
& =
\min_{\control \in \mathbb{C}ONTROL_t}
\mathcal{B}p{
{b} {L_t^u}
+ \sum_{o \in \mathbb{O}_{t+1}}
b {M^{\control, o}_t} \widehat{V}_{t+1}}
\\
& =
\min_{\control \in \mathbb{C}ONTROL_t}
\mathcal{B}p{ {b}L_t^u
+ \sum_{o \in \mathbb{O}_{t+1}, \state \in \mathbb{X}_t, \state' \in \mathbb{X}_{t+1}}
{O}Law{o}{\state'}{\control}{t+1}
\TransitionState{\control}{\state}{\state'}{t}
b\np{\state}
\widehat{V}_{t+1}\np{\state'}}
\\
& =
\min_{\control \in \mathbb{C}ONTROL_t}
\mathcal{B}p{{b}{L_t^u}
+ \sum_{ \state \in \mathbb{X}_t, \state' \in \mathbb{X}_{t+1}}
\TransitionState{\control}{\state}{\state'}{t}
b\np{\state}
\widehat{V}_{t+1}\np{\state'}}
\tag{$\sum_{o} {O}Law{o}{\state'}{\control}{t+1} = 1$}
\\
& \ge
\sum_{\state \in \mathbb{X}_t} b\np{\state}
\min_{\control \in \mathbb{C}ONTROL_t} \mathcal{B}p{L_t\np{\control,\state}
+\sum_{\state' \in \mathbb{X}_{t+1}}
\TransitionState{\control}{\state}{\state'}{t}
\widehat{V}_{t+1}\np{\state'}}
\\
& =
\sum_{\state \in \mathbb{X}_t} b(\state)\widehat{V}_t(\state)
= b \widehat{V}_t
\; ,
\end{align*}
with
\begin{equation}
\widehat{V}_t(\state)
= \min_{\control \in \mathbb{C}ONTROL_t} \mathcal{B}p{L_t\np{\control,\state}
+\sum_{\state' \in \mathbb{X}_{t+1}}
\TransitionState{\control}{\state}{\state'}{t}
\widehat{V}_{t+1}\np{\state'}}
\; .
\end{equation}
Using the fact that at time $T$ we have that $V_T = \proscal{b}{\widehat{V}_T}$ with ${\widehat{V}_T}=K$ we obtain that
for all $t\in \ic{0,T}$ $V_t \ge \proscal{b}{\widehat{V}_t}$ where ${\widehat{V}_t}$ is the Value function of the fully observed Bellman equation associated to the POMDP.
\end{document} |
\begin{document}
\begin{abstract}
We investigate the $K$-theory of unital UCT Kirchberg algebras $\mathbb{Q}Q_S$ arising from families $S$ of relatively prime numbers. It is shown that $K_*(\mathbb{Q}Q_S)$ is the direct sum of a free abelian group and a torsion group, each of which is realized by another distinct $C^*$-algebra naturally associated to $S$. The $C^*$-algebra representing the torsion part is identified with a natural subalgebra $\mathbb{C}A_S$ of $\mathbb{Q}Q_S$. For the $K$-theory of $\mathbb{Q}Q_S$, the cardinality of $S$ determines the free part and is also relevant for the torsion part, for which the greatest common divisor $g_S$ of $\{p-1 : p \in S\}$ plays a central role as well. In the case where $\lvert S \rvert \leq 2$ or $g_S=1$ we obtain a complete classification for $\mathbb{Q}Q_S$. Our results support the conjecture that $\mathbb{C}A_S$ coincides with $\otimes_{p \in S} \mathbb{C}O_p$. This would lead to a complete classification of $\mathbb{Q}Q_S$, and is related to a conjecture about $k$-graphs.
\end{abstract}
\maketitle
\section{Introduction}
Suppose $S$ is a non-empty family of relatively prime natural numbers and consider the submonoid of $\mathbb{N}^\times$ generated by $S$. Its action on $\mathbb{Z}$ by multiplication can be represented on $\ell^2(\mathbb{Z})$ by the bilateral shift $U$ and isometries $(S_p)_{p \in S}$ defined by $U\xi_n = \xi_{n+1}$ and $S_p\xi_n = \xi_{pn}$. The associated $C^*$-algebra $C^*\bigl(U,(S_p)_{p \in S}\bigr)$ admits a universal model $\mathbb{Q}Q_S$ that is generated by a unitary $u$ and isometries $(s_p)_{p \in S}$, subject to\vspace*{-2mm}
\[\begin{array}{c} s_ps_q = s_qs_p \text{ for } q \in S,\quad s_pu=u^ps_p, \quad \text{ and } \quad \sum\limits_{m=0}^{p-1}u^ms_p^{\phantom{*}}s_p^*u^{-m} = 1. \end{array}\vspace*{-2mm}\]
By results of \cites{KOQ} or \cite{Sta1}, $\mathbb{Q}Q_S$ is isomorphic to $C^*\bigl(U,(S_p)_{p \in S}\bigr)$ and belongs to the class of unital UCT Kirchberg algebras. In view of the Kirchberg-Phillips classification theorem \cites{Kir,Phi}, the information on $S$ encoded in $\mathbb{Q}Q_S$ can therefore be read off from its $K$-theory.
In special cases, $\mathbb{Q}Q_S$ and its $K$-theory have been considered before: If $S$ is the set of all primes, then $\mathbb{Q}Q_S$ coincides with the algebra $\mathbb{Q}Q_\mathbb{N}$ from \cite{CuntzQ} and it follows that $K_i(\mathbb{Q}Q_S) = \mathbb{Z}^\infty$ for $i=0,1$ and $[1]=0$. The other extreme case, where $S=\{p\}$ for some $p \geq 2$, appeared already in \cite{Hir}: Hirshberg showed that $(K_0(\mathbb{Q}Q_{\{p\}}),[1],K_1(\mathbb{Q}Q_{\{p\}})) = (\mathbb{Z} \operatornamelus \mathbb{Z}/(p-1)\mathbb{Z},(0,1),\mathbb{Z})$. This result was recovered later in \cite{KatsuraIV} and \cites{CuntzVershik} as a byproduct. Note that $\mathbb{Q}Q_{\{p\}}$ coincides with Katsura's algebra $\mathbb{C}O(E_{p,1})$, see \cite{KatsuraIV}*{Example~A.6}. Moreover, Larsen and Li analyzed the situation for $p=2$ in great detail, see \cite{LarsenLi}. The similarities and differences among these known cases raise several questions:
\begin{enumerate}[(i)]
\item Is $K_1(\mathbb{Q}Q_S)$ always torsion free?
\item Is $2 \in S$ the only obstruction to torsion in $K_0(\mathbb{Q}Q_S)$?
\item What is the $K$-theory of $\mathbb{Q}Q_S$ in the general case of $\lvert S \rvert \geq 2$?
\item What does $\mathbb{Q}Q_S \cong \mathbb{Q}Q_T$ reveal about the relationship between $S$ and $T$?
\end{enumerate}
Through the present work, we provide a complete description in the case of $\lvert S \rvert=2$, for which the $K$-theory of $\mathbb{Q}Q_S$ satisfies
\[(K_0(\mathbb{Q}Q_S),[1],K_1(\mathbb{Q}Q_S)) = (\mathbb{Z}^2 \operatornamelus \mathbb{Z}/g_S\mathbb{Z},(0,1),\mathbb{Z}^2 \operatornamelus \mathbb{Z}/g_S\mathbb{Z}),\]
where $g_S=\gcd(\{p-1 : p \in S\})$, see Theorem~\ref{thm:main result}~(c). Thus we see that the first two questions from above have a negative answer (for instance, consider $S=\{3,5\}$ and $S=\{5,6\}$, respectively). More generally, we completely determine $K_*(\mathbb{Q}Q_S)$ in the case of $\lvert S \rvert \leq 2$ or $g_S = 1$, see Theorem~\ref{thm:main result}, and conclude that $\mathbb{Q}Q_S \cong \mathbb{Q}Q_T$ if and only if $\lvert S \rvert=\lvert T \rvert$ and $g_S=g_T$ in this case. In addition, Theorem~\ref{thm:main result} substantially reduces the problem in the remaining case of $\lvert S \rvert \geq 3$ and $g_S >1$. Thereby we also make progress towards a general answer to the remaining questions (iii) and (iv) from above.
In order to prove Theorem~\ref{thm:main result}, we first compare the stabilization of $\mathbb{Q}Q_S$ to the $C^*$-algebra $C_0(\mathbb{R}) \rtimes N \rtimes H$, where $N=\mathbb{Z}\bigl[\{\frac{1}{p} : p \in S\}\bigr]$, $H$ is the subgroup of $\mathbb{Q}_+^\times$ generated by $S$, and the action comes from the natural $ax+b$-action of $N \rtimes H$ on $\mathbb{R}$, see Section~\ref{sec:comp with real dyn}. This approach is inspired by methods of Cuntz and Li from \cite{CLintegral2}. However, the final part of their strategy is to use the Pimsner-Voiculescu sequence iteratively, see \cite{CLintegral2}*{Remark~3.16}, and depends on having free abelian $K$-groups, which does not work in our situation. Instead, we show that $K_*(\mathbb{Q}Q_S)$ decomposes as a direct sum of a free abelian group and a torsion group, both arising in a natural way from two distinguished $C^*$-algebras related to $\mathbb{Q}Q_S$, see Theorem~\ref{thm:decomposition of K-theory} and Corollary~\ref{cor:torsion and free part K-theory}. The determination of the torsion free part of $K_*(\mathbb{Q}Q_S)$ uses a homotopy argument, and thereby benefits heavily from the comparison with real dynamics. This allows us to prove that the rank of the torsion free subgroup of $K_i(\mathbb{Q}Q_S)$ equals $2^{\lvert S \rvert-1}$ for both $i=0,1$, see Proposition~\ref{prop:K-theory torsion free part}.
The torsion subgroup of $K_*(\mathbb{Q}Q_S)$ is realized by the semigroup crossed product $M_{d^\infty} \rtimes^e_\alpha H^+$, where $d$ is the product of all primes dividing some element of $S$, $H^+$ is the submonoid of $\mathbb{N}^\times$ generated by $S$, and the action $\alpha$ is inherited from a semigroup crossed product description of $\mathbb{Q}Q_S$, see Corollary~\ref{cor:torsion and free part K-theory}. Appealing to the recently introduced machinery for equivariantly sequentially split $*$-homomorphisms from \cite{BarSza1}, we show that $M_{d^\infty} \rtimes^e_\alpha H^+$ is a unital UCT Kirchberg algebra, just like $\mathbb{Q}Q_S$, see Corollary~\ref{cor:UHF into BD seq split cr pr}. Quite intriguingly, this paves the way to identify $M_{d^\infty} \rtimes^e_\alpha H^+$ with the subalgebra $\mathbb{C}A_S = C^*(\{u^ms_p : p \in S, 0 \leq m \leq p-1\})$ of $\mathbb{Q}Q_S$, see Corollary~\ref{cor:subalgebra for torsion part}. That is why we decided to name $\mathbb{C}A_S$ the \emph{torsion subalgebra}. This $C^*$-algebra is interesting in its own right as, for instance, it admits a model as the boundary quotient $\mathbb{Q}Q(U)$ of a particular right LCM submonoid $U$ of $\mathbb{N} \rtimes H^+$, see Proposition~\ref{prop:A_S as BQ of U}. As explained in Remark~\ref{rem:A_S as BQ of U}, this gives rise to a remarkable diagram for the semigroup $C^*$-algebras and boundary quotients related to the inclusion of right LCM semigroups $U \subset \mathbb{N} \rtimes H^+$.
With regards to the $K$-theory of $\mathbb{C}A_S$ and hence $\mathbb{Q}Q_S$, the $k$-graph description for finite $S$ obtained in Corollary~\ref{cor:tor subalgebra via k-graphs} is more illuminating: The canonical $k$-graph for $\mathbb{C}A_S$ has the same skeleton as the standard $k$-graph for $\bigotimes_{p \in S}\mathbb{C}O_p$, but uses different factorization rules, see Remark~\ref{rem:Lambda_S flip}. It is apparent from the given presentation that $\mathbb{C}A_S$ is isomorphic to $\mathbb{C}O_p$ for $S=\{p\}$. If $S$ consists of two relatively prime numbers $p$ and $q$, then a result from \cite{Evans} shows that $\mathbb{C}A_S$ coincides with $\mathbb{C}O_p \otimes \mathbb{C}O_q$. For the remaining cases, we extract vital information on $\mathbb{C}A_S$ by applying Kasparov's spectral sequence \cite{kasparov} (see also \cite{barlak15}) to the $H^+$-action $\alpha$ on $M_{d^{\infty}}$, see Theorem~\ref{thm:K-theory for A_S}. More precisely, we obtain that $\mathbb{C}A_S$ is isomorphic to $\bigotimes_{p \in S}\mathbb{C}O_p$ if $\lvert S \rvert \leq 2$ or $g_S=1$. In the latter case, it actually coincides with $\mathbb{C}O_2$. Additionally, we show that the order of every element in $K_*(\mathbb{C}A_S)$ divides $g_S^{2^{\lvert S \rvert - 2}}$. As we remark at the end of this work, the same results can be obtained by employing the $k$-graph representation of $\mathbb{C}A_S$ and using Evans' spectral sequence \cite{Evans} for the $K$-theory of $k$-graph $C^*$-algebras. In view of these results, it is very plausible that $\mathbb{C}A_S$ always coincides with $\bigotimes_{p \in S}\mathbb{C}O_p$. This would be in accordance with Conjecture~\ref{conj:k-graph}, which addresses independence of $K$-theory from the factorization rules for $k$-graphs under certain constraints. If $\mathbb{C}A_S \cong \bigotimes_{p \in S}\mathbb{C}O_p$ holds for all $S$, then we get a complete classification for $\mathbb{Q}Q_S$ with the rule that $\mathbb{Q}Q_S$ and $\mathbb{Q}Q_T$ are isomorphic if and only if $\lvert S \rvert = \lvert T \rvert$ and $g_S=g_T$, see Conjecture~\ref{conj:K-theory of QQ_S}.
At a later stage, the authors learned that Li and Norling obtained interesting results for the multiplicative boundary quotient for $\mathbb{N} \rtimes H^+$ by using completely different methods, see \cite{LN2}*{Subsection~6.5}. Briefly speaking, the multiplicative boundary quotient related to $\mathbb{Q}Q_S$ is obtained by replacing the unitary $u$ by an isometry $v$, see Subsection~\ref{subsec:BQ for ADS} for details. As a consequence, the $K$-theory of the multiplicative boundary quotient does not feature a non-trivial free part. It seems that $\mathbb{C}A_S$ is the key to reveal a deeper connection between the $K$-theoretical structure of these two $C^*$-algebras. As this is beyond the scope of the present work, we only note that
the inclusion map from $\mathbb{C}A_S$ into $\mathbb{Q}Q_S$ factors through the multiplicative boundary quotient as an embedding of $\mathbb{C}A_S$ and the natural quotient map. The results of \cite{LN2} together with our findings indicate that this embedding might be an isomorphism in $K$-theory. This idea is explored further in \cite{Sta3}*{Section~5}.
The paper is organized as follows: In Section~\ref{sec:prelim}, we set up the relevant notation and list some useful known results in Subsection~\ref{subsec:notation and basics}. We then link $\mathbb{Q}Q_S$ to boundary quotients of right LCM semigroups, see Subsection~\ref{subsec:BQ for ADS}, and $a$-adic algebras, see Subsection~\ref{subsec:a-adic algs}. These parts explain the central motivation behind our interest in the $K$-theory of $\mathbb{Q}Q_S$. In addition, the connection to $a$-adic algebras allows us to apply a duality theorem from \cite{KOQ}, see Theorem~\ref{thm:duality}, making it possible to invoke real dynamics. This leads to a decomposition result for $K_*(\mathbb{Q}Q_S)$ presented in Section~\ref{sec:K-theory}, which essentially reduces the problem to determining the $K$-theory of $\mathbb{C}A_S$. The structure of the torsion subalgebra $\mathbb{C}A_S$ is discussed in Section~\ref{sec:torsion part}. Finally, the progress on the classification of $\mathbb{Q}Q_S$ we obtain via a spectral sequence argument for the $K$-theory of $\mathbb{C}A_S$ is presented in Section~\ref{sec:classification}.
\subsection*{Acknowledgments}
The first named author was supported by SFB~$878$ \emph{Groups, Geometry and Actions}, GIF Grant~$1137$-$30.6/2011$, ERC through AdG~$267079$, and the Villum Fonden project grant `Local and global structures of groups and their algebras' (2014-2018). The second named author was supported by RCN through FRIPRO~$240913$. A significant part of the work was done during the second named author's research stay at Arizona State University, and he is especially grateful to the analysis group at ASU for their hospitality. He would also like to thank the other two authors of this paper for their hospitality during two visits to M\"{u}nster. The third named author was supported by ERC through AdG~$267079$ and by RCN through FRIPRO~$240362$. We are grateful to Alex Kumjian for valuable suggestions.
\section{Preliminaries}\label{sec:prelim}
\subsection{Notation and basics}\label{subsec:notation and basics}
Throughout this paper, we assume that $S \subset \mathbb{N}^\times \setminus \{1\}$ is a non-empty family of relatively prime numbers. We write $p|q$ if $q \in p\mathbb{N}^\times$ for $p,q \in \mathbb{N}^\times$. Given $S$, we let $P := \{p \in \mathbb{N}^\times : p \text{ prime and } p|q \text{ for some } q \in S\}$. Also, we define $d:= \prod_{p \in P}p$ (which is a supernatural number in case $S$ is infinite, see Remark~\ref{rem:supernatural}) and $g_S$ to be the greatest common divisor of $\{p-1 : p \in S\}$, i.e.\ $g_S := \gcd(\{p-1 : p \in S\})$.
Recall that $\mathbb{N}^\times$ is an Ore semigroup with enveloping group $\mathbb{Q}_+^\times$, that is, $\mathbb{N}^\times$ embeds into $\mathbb{Q}_+^\times$ (in the natural way) so that each element $q \in \mathbb{Q}_+^\times$ can be displayed as $p^{-1}q$ with $p,q \in \mathbb{N}^\times$. The subgroup of $\mathbb{Q}_+^\times$ generated by $S$ is denoted by $H$. Note that the submonoid of $\mathbb{N}^\times$ generated by $S$, which we refer to as $H^+$, forms a positive cone inside $H$. As the elements in $S$ are relatively prime, $H^+$ is isomorphic to the free abelian monoid in $\lvert S \rvert$ generators. Finally, we let $H_k$ be the subgroup of $H$ generated by the $k$ smallest elements of $S$ for $1 \leq k \leq \lvert S \rvert$, and define $H_k^+$ as the analogous submonoid of $H^+$.
Though the natural action of $H^+$ on $\mathbb{Z}$ given by multiplication is irreversible, it has a natural extension to an action of $H$ by automorphisms, namely by acting upon the ring extension $\mathbb{Z}\bigl[\{ \frac{1}{p} : p \in S\}\bigr] = \mathbb{Z}\bigl[\{ \frac{1}{p} : p \in P\}\bigr]$, that will be denoted by $N$. Within this context we will consider the collection of cosets
\[\begin{array}{c} \mathcal{F} := \{m+ h\mathbb{Z} : m \in \mathbb{Z}, h \in \mathbb{N}^\times, \frac{1}{h} \in N\}. \end{array}\]
\begin{defn}\label{def:Q_S}
$\mathbb{Q}Q_S$ is defined to be the universal $C^*$-algebra generated by a unitary $u$ and isometries $(s_p)_{p \in S}$ subject to the relations:
\[\begin{array}{llllll} \textnormal{(i)} & s_p^*s_q^{\phantom{*}} = s_q^{\phantom{*}}s_p^*, \quad & \textnormal{(ii)} & s_pu = u^ps_p, \quad \text{and} \quad & \textnormal{(iii)} & \sum\limits_{m=0}^{p-1} e_{m+p\mathbb{Z}} = 1
\end{array}\]
for all $p,q \in S, p \neq q$, where $e_{m+p\mathbb{Z}} = u^ms_p^{\phantom{*}}s_p^*u^{-m}$.
\end{defn}
Observe that the notation $e_{m+p\mathbb{Z}}$ is unambiguous, i.e.\ it does not depend on the representative $m$ of the coset $m+p\mathbb{Z}$, as
\[
u^{m+pn}s_p^{\phantom{*}}s_p^*u^{-m-pn} \stackrel{(ii)}{=} u^ms_p^{\phantom{*}}u^{n-n}s_p^*u^{-m} = u^ms_p^{\phantom{*}}s_p^*u^{-m}.
\]
\begin{rem}\label{rem:Q_S basic I}
Let us briefly discuss the defining relations for $\mathbb{Q}Q_S$:
\begin{enumerate}[a)]
\item Condition~(i) is known as the double commutation relation for the isometries $s_p$ and $s_q$ with $p \neq q$. In particular, they commute as $s_p^*s_q^{\phantom{*}} = s_q^{\phantom{*}}s_p^*$ implies that $(s_ps_q)^*s_q^{\phantom{*}}s_p^{\phantom{*}} = 1$, which forces $s_ps_q=s_qs_p$. Thus the family $(s_p)_{p \in S}$ gives rise to a representation of the monoid $H^+$ by isometries and we write $s_h$ for $s_{p_1}\cdots s_{p_n}$ whenever $h = p_1 \cdots p_n \in H^+$ with $p_i \in S$. In fact, $u$ and $(s_p)_{p \in S}$ yield a representation of $\mathbb{Z} \rtimes H^+$ due to Definition~\ref{def:Q_S}~(i),(ii).
\item $\mathbb{Q}Q_S$ can also be defined as the universal $C^*$-algebra generated by a unitary $u$ and isometries $(s_p)_{p \in H^+}$ subject to (ii), (iii) and
\[
\textnormal{(i') } s_ps_q = s_{pq} \text{ for all } p,q \in H^+.
\]
By a), we only need to show that (i') implies (i) for $p \neq q$. Note that (i') and (ii) imply that (iii) holds for all $p \in H^+$. In addition, (iii) implies the following: If $r \in S$ and $k \in \mathbb{Z}$ satisfy $s_r^*u^ks_r^{\phantom{*}} \neq 0$, then $k \in r\mathbb{Z}$. As $pq=qp$ and $p\mathbb{Z} \cap q\mathbb{Z} = pq\mathbb{Z}$, we get
\[\begin{array}{lclclclcl}
s_p^*s_q^{\phantom{*}}
&\stackrel{(iii)}{=}& \sum\limits_{k=0}^{pq-1} s_p^*u^ks_{pq}^{\phantom{*}}s_{pq}^*u^{-k}s_q^{\phantom{*}}
&\stackrel{(i')}{=}& \sum\limits_{k=0}^{pq-1} (s_p^*u^ks_p^{\phantom{*}})s_q^{\phantom{*}}s_p^*(s_q^*u^{-k}s_q^{\phantom{*}})
&=& s_q^{\phantom{*}}s_p^*.
\end{array}\]
\end{enumerate}
\end{rem}
\begin{rem}\label{rem:can rep}
The $C^*$-algebra $\mathbb{Q}Q_S$ has a canonical representation on $\ell^2(\mathbb{Z})$: Let $(\xi_n)_{n \in \mathbb{Z}}$ denote the standard orthonormal basis for $\ell^2(\mathbb{Z})$. If we define $U\xi_n := \xi_{n+1}$ and $S_p\xi_n := \xi_{pn}$, then it is routine to verify that $U$ and $(S_p)_{p \in S}$ satisfy (i)--(iii) from Definition~\ref{def:Q_S}. $\mathbb{Q}Q_S$ is known to be simple, see \cite{Sta1}*{Example~3.29~(a) and Proposition~3.2} for proofs and Proposition~\ref{prop:Q_S as O-alg} for the connection to \cite{Sta1}. Therefore, the representation from above is faithful and $\mathbb{Q}Q_S$ can be regarded as a subalgebra of $B(\ell^2(\mathbb{Z}))$.
\end{rem}
\begin{rem}\label{rem:Q_S for 2 and N}
For the case of $S=\{\text{all primes}\}$, the algebra $\mathbb{Q}Q_S$ coincides with $\mathbb{Q}Q_\mathbb{N}$ as introduced by Cuntz in \cite{CuntzQ}. Moreover, $S=\{2\}$ yields the \emph{$2$-adic ring $C^*$-algebra of the integers} that has been studied in detail by Larsen and Li in \cite{LarsenLi}.
\end{rem}
\begin{defn} \label{def:D_S}
The commutative subalgebra of $\mathbb{Q}Q_S$ generated by the projections $e_{m+h\mathbb{Z}}= u^ms_h^{\phantom{*}}s_h^*u^{-m}$ with $m \in \mathbb{Z}$ and $h \in H^+$ is denoted by $\mathcal{D}_S$.
\end{defn}
\begin{rem}\label{rem:Q_S basic II}
We record the following observations:
\begin{enumerate}[a)]
\item In view of Remark~\ref{rem:can rep}, $e_{m+h\mathbb{Z}}$ can be regarded as the orthogonal projection from $\ell^2(\mathbb{Z})$ onto $\ell^2(m+h\mathbb{Z})$.
\item With regards to a), the projections $e_{m+h\mathbb{Z}}$ correspond to certain cosets from $\mathcal{F}$. However, projections arising as sums of such elementary projections may lead to additional cosets: If $h \in \mathbb{N}^\times$ belongs to the submonoid generated by $P$, then there is $h' \in H^+$ so that $h'=h\ell$ for some $\ell \in \mathbb{N}^\times$. Therefore, we get
\[\begin{array}{c} e_{m+h\mathbb{Z}} = \sum\limits_{k=0}^{\ell-1} e_{m+hk + h'\mathbb{Z}}\end{array}\]
and $e_{m+h\mathbb{Z}} \in \mathcal{D}_S$ for all such $h$. In fact, $\mathcal{F}$ equals the collection of all cosets for which the corresponding projection appears in $\mathcal{D}_S$, that is, the projection is expressible as a finite sum of projections $e_{m_i+h_i\mathbb{Z}}$ with $m_i \in \mathbb{Z}$ and $h_i \in H^+$.
\end{enumerate}
\end{rem}
\begin{defn}\label{def:B_S}
The subalgebra of $\mathbb{Q}Q_S$ generated by $\mathcal{D}_S$ and $u$ is denoted by $\mathcal{B}_S$.
\end{defn}
\begin{rem}\label{rem:BD subalg B_S}
The $C^*$-algebra $\mathcal{B}_S$ is isomorphic to the Bunce-Deddens algebra of type $d^\infty$. If $p \in H^+$ and $(e_{i, j}^{(p)})_{0 \leq i,j \leq p-1}$ denote the standard matrix units in $M_p(\mathbb{C})$, then there is a unital $*$-homomorphism $M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}) \to \mathcal{B}_S$ mapping $e^{(p)}_{m,n} \otimes u^k$ to $e_{m+p\mathbb{Z}}u^{m-n+pk}$. Given another $q \in H^+$, the so constructed $*$-homomorphisms associated with $p$ and $pq$ are compatible with the embedding $\iota_{p,pq}\colon M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}) \to M_{pq}(\mathbb{C}) \otimes C^*(\mathbb{Z})$ given by $e_{i, j}^{(p)} \otimes 1 \mapsto \sum_{k=0}^{q-1}e_{i+pk, j+pk}^{(pq)} \otimes 1$ and $1 \otimes u \mapsto 1 \otimes u^q$. The inductive limit associated with $(M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}),\iota_{p,pq})_{p,q \in H^+}$, where $H^+ \subset \mathbb{N}^\times$ is directed set in the usual way, is isomorphic to the Bunce-Deddens algebra of type $d^\infty$. Moreover, under this identification, the natural UHF subalgebra $M_{d^\infty}$ of the Bunce-Deddens algebra corresponds to the $C^*$-subalgebra of $\mathcal{B}_S$ generated by all elements of the form $e_{m+p\mathbb{Z}}u^{m-n}$ with $p\in H^+$ and $0\leq m,n \leq p-1$.
There is a natural action $\alpha$ of $\mathbb{Z} \rtimes H^+$ on $\mathcal{B}_S$ given by $\alpha_{(k,p)}(x) = u^ks_pxs_p^*u^{-k}$ for $(k,p) \in \mathbb{Z} \rtimes H^+$. Under the above identification, $M_{d^\infty} \subset \mathcal{B}_S$ is invariant under the restricted $H^+$-action, as for $p,q \in H^+$ and $0\leq m,n \leq p-1$,
\[
s^{\phantom{*}}_qe^{\phantom{*}}_{m+p\mathbb{Z}}u^{m-n}s_q^* = s_q^{\phantom{*}} u^ms^{\phantom{*}}_ps_p^*u^{-n} s_q^* = u^{qm}s_{pq}^{\phantom{*}}s_{pq}^*u^{-qn} =e^{\phantom{*}}_{qm+pq\mathbb{Z}}u^{qm-qn}.
\]
\vspace*{0mm}
\end{rem}
Another way to present the algebra $\mathbb{Q}Q_S$ is provided by the theory of semigroup crossed products. Recall that, for an action $\beta$ of a discrete, left cancellative semigroup $T$ on a unital $C^*$-algebra $B$ by $*$-endomorphisms, a unital, covariant representation of $(B, \beta, T)$ is given by a unital $*$-homomorphism $\pi$ from $B$ to some unital $C^*$-algebra $C$ and a semigroup homomorphism $\varphi$ from $T$ to the isometries in $C$ such that the covariance condition $\varphi(t)\pi(b)\varphi(t)^* = \pi(\beta_t(b))$ holds for all $b \in B$ and $t \in T$. The semigroup crossed product $B \rtimes^{e}_\beta T$ is then defined as the $C^*$-algebra generated by a universal unital, covariant representation $(\iota_B,\iota_T)$ of $(B, \beta, T)$. We refer to \cite{LacRae} for further details. Note that if $T$ is a group, then this crossed product agrees with the full group crossed product $B \rtimes_\beta T$. Semigroup crossed products may be pathological or extremely complicated in some cases. But we will only be concerned with crossed products of left Ore semigroups acting by injective endomorphisms so that we maintain a close connection to group crossed products, see \cite{Lac}. With respect to $\mathbb{Q}Q_S$, we get isomorphisms
\begin{equation}\label{eq:B_S and Q_S as crossed products}
\mathbb{Q}Q_S \cong \mathcal{D}_S \rtimes^e_\alpha \mathbb{Z} \rtimes H^+ \cong \mathcal{B}_S \rtimes^e_\alpha H^+ \quad\text{and}\quad \mathcal{B}_S \cong \mathcal{D}_S\rtimes_\alpha \mathbb{Z},
\end{equation}
see \cite{Sta1}*{Proposition~3.18 and Theorem~A.5}. Remark~\ref{rem:BD subalg B_S} reveals that the canonical subalgebra $M_{d^\infty} \subset \mathcal{B}_S$ and the isometries $(s_p)_{p \in H^+}$ give rise to a unital, covariant representation of $(M_{d^{\infty}},\alpha,H^+)$ on $\mathbb{Q}Q_S$. We will later see that this representation is faithful so that we can view $M_{d^\infty} \rtimes^e_\alpha H^+$ as a subalgebra of $\mathbb{Q}Q_S$, see Corollary~\ref{cor:subalgebra for torsion part}.
\subsection{Boundary quotients} \label{subsec:BQ for ADS}
The set $S \subset \mathbb{N}^\times\setminus\{1\}$ itself can be thought of as a data encoding a dynamical system, namely the action $\theta$ of the free abelian monoid $H^+ \subset \mathbb{N}^\times$ on the group $\mathbb{Z}$ given by multiplication. $\theta_h$ is injective for $h \in H^+$ and surjective only if $h=1$. Furthermore, as every two distinct elements $p$ and $q$ in $S$ are relatively prime, we have $\theta_p(\mathbb{Z})+\theta_q(\mathbb{Z})=\mathbb{Z}$. Hence $(\mathbb{Z},H^+,\theta)$ forms an \emph{irreversible algebraic dynamical system} in the sense of \cite{Sta1}*{Definition 1.5}, compare \cite{Sta1}*{Example~1.8~(a)}. In fact, dynamics of this form were one of the key motivations for \cite{Sta1}. In order to compare the $C^*$-algebra $\mathbb{C}O[\mathbb{Z},H^+,\theta]$ from \cite{Sta1}*{Definition 3.1} with $\mathbb{Q}Q_S$, let us recall the definition:
\begin{defn}\label{def:O-alg for IADS}
Let $(G,P,\theta)$ be an irreversible algebraic dynamical system. Then $\mathbb{C}O[G,P,\theta]$ is the universal $C^*$-algebra generated by a unitary representation $u$ of the group $G$ and a representation $s$ of the semigroup $P$ by isometries subject to the relations:
\[\begin{array}{lrcl}
(\text{CNP }1) & s_{p}u_{g} &\hspace*{-2.5mm}=\hspace*{-2.5mm}& u_{\theta_{p}(g)}s_{p},\vspace*{2mm}\\
(\text{CNP }2) & s_{p}^{*}u_gs_{q} &\hspace*{-2.5mm}=\hspace*{-2.5mm}& \begin{cases}
u_{g_1}s_{\gcd(p,q)^{-1}q}^{\phantom{*}}s_{\gcd(p,q)^{-1}p}^{*}u_{g_2}& \text{ if } g = \theta_p(g_1)\theta_q(g_2),\\ 0& \text{ else,}\end{cases}\vspace*{2mm}\\
(\text{CNP }3) & 1 &\hspace*{-2.5mm}=\hspace*{-2.5mm}& \sum\limits_{[g] \in G/\theta_{p}(G)}{e_{g,p}} \hspace*{2mm}\text{ if } [G : \theta_{p}(G)]< \infty,
\end{array}\]
where $e_{g,p} = u_{g}s_{p}s_{p}^{*}u_{g}^{*}$.
\end{defn}
Clearly, Definition~\ref{def:Q_S}~(ii) is the same as (CNP $1$). As $[\mathbb{Z}:h\mathbb{Z}] = h < \infty$ for every $h \in H^+$, (iii) corresponds to (CNP $3$) once we use Remark~\ref{rem:Q_S basic I}~a) and note that it is enough to have the summation relation for a set of generators of $H^+$. The case of distinct $p,q \in S$ and $g=0$ in (CNP $2$) yields (i). On the other hand, a slight modification of the argument in Remark~\ref{rem:Q_S basic I}~b) with $s_p^*u^ms_q$ in place of $s_p^*s_q$ establishes (CNP $2$) based on (i)--(iii).
Thus we arrive at:
\begin{prop}\label{prop:Q_S as O-alg}
The $C^*$-algebras $\mathbb{Q}Q_S$ and $\mathbb{C}O[\mathbb{Z},H^+,\theta]$ are canonically isomorphic.
\end{prop}
According to \cite{Sta1}*{Corollary~3.28 and Example~3.29~(a)} $\mathbb{Q}Q_S$ is therefore a unital UCT Kirchberg algebra. While classification of $\mathbb{C}O[G,P,\theta]$ by $K$-theory was achieved in \cite{Sta1} for irreversible algebraic dynamical systems $(G,P,\theta)$ under mild assumptions, and even generalized to \emph{algebraic dynamical systems} in \cite{bsBQforADS}, the range of the classifying invariant remained a mystery beyond the case of a single group endomorphism, where the techniques of \cite{CuntzVershik} apply. It thus seemed natural to go back to examples of dynamical systems involving $P = \mathbb{N}^k$ and try to understand the invariant in this case. In other words, our path lead back to $\mathbb{Q}Q_S$, and the present work aims at making progress precisely in this direction.
\vspace*{1em}
\noindent There is also an alternative way of constructing $\mathbb{Q}Q_S$ directly from either of the semigroups $\mathbb{N} \rtimes H^+$ or $\mathbb{Z} \rtimes H^+$ using the theory of boundary quotients of semigroup $C^*$-algebras. To begin with, let us note that $(\mathbb{N} \rtimes H^+,N \rtimes H)$ forms a quasi lattice-ordered group. Hence we can form the Toeplitz algebra $\mathcal{T}(\mathbb{N} \rtimes H^+,N \rtimes H)$ using the work of Nica, see \cite{Nic}. But $\mathbb{Z} \rtimes H^+$ has non-trivial units, so it cannot be part of a quasi lattice-ordered pair. In order to treat both semigroups within the same framework, let us instead employ the theory of semigroup $C^*$-algebras from \cite{Li1}, which generalizes Nica's approach tremendously.
We note that both $\mathbb{N} \rtimes H^+$ and $\mathbb{Z} \rtimes H^+$ are cancellative, countable, discrete semigroups with unit. Moreover, they are \emph{right LCM} semigroups, meaning that the intersection of two principal right ideals is either empty or another principal right ideal (given by a right least common multiple for the representatives of the two intersected ideals). Thus their semigroup $C^*$-algebras both enjoy a particularly nice and tractable structure, see \cites{BLS1,BLS2}. Additionally, both are left Ore semigroups with amenable enveloping group $N \rtimes H \subset \mathbb{Q} \rtimes \mathbb{Q}_+^\times$. However, we would like to point out that $\mathbb{N} \rtimes H^+$ and $\mathbb{Z} \rtimes H^+$ are not left amenable (but right amenable) as they fail to be left reversible, see \cite{Li1}*{Lemma~4.6} for details.
Roughly speaking, semigroup $C^*$-algebras have the flavor of Toeplitz algebras. In particular, they tend to be non-simple except for very special situations. Still, we might hope for $\mathbb{Q}Q_S$ to be a quotient of $C^*(\mathbb{N} \rtimes H^+)$ or $C^*(\mathbb{Z} \rtimes H^+)$ obtained through some systematic procedure. This was achieved in \cite{LacRae} for $\mathbb{N} \rtimes \mathbb{N}^\times$, i.e.\ $S$ consisting of all primes, by showing that the boundary quotient of $\mathcal{T}(\mathbb{N} \rtimes \mathbb{N}^\times,\mathbb{Q} \rtimes \mathbb{Q}_+^\times) = C^*(\mathbb{N} \rtimes \mathbb{N}^\times)$ in the sense of \cite{CrispLaca} coincides with $\mathbb{Q}Q_\mathbb{N}$. Recently, this concept of a boundary quotient for a quasi lattice-ordered group from \cite{CrispLaca} was transferred to semigroup $C^*$-algebras in the context of right LCM semigroups, see \cite{BRRW}*{Definition 5.1}:
\begin{defn}\label{def:FS and boundary quotient}
Let $T$ be a right LCM semigroup. A finite set $F \subset T$ is called a \emph{foundation set} if, for all $t\in T$, there is $f \in F$ satisfying $tT \cap fT \neq \emptyset$. The \emph{boundary quotient} $\mathbb{Q}Q(T)$ of a right LCM semigroup $T$ is the quotient of $C^*(T)$ by the relation
\begin{equation}\label{eq:BQ}
\begin{array}{c}\prod\limits_{f \in F} (1-e_{fT}) = 0 \quad \text{ for all foundation sets } F.\end{array}
\end{equation}
\end{defn}
To emphasize the relevance of this approach, let us point out that right LCM semigroups are much more general than quasi lattice-ordered groups. For instance, right cancellation may fail, so right LCM semigroups need not embed into groups.
On the one hand, this notion of a quotient of a semigroup $C^*$-algebra seems suitable as $\mathbb{N} \rtimes H^+$ and $\mathbb{Z} \rtimes H^+$ are right LCM semigroups. On the other hand, the abstract condition~\eqref{eq:BQ} prohibits an immediate identification of $\mathbb{Q}Q_S$ with $\mathbb{Q}Q(\mathbb{N} \rtimes H^+)$ or $\mathbb{Q}Q(\mathbb{Z} \rtimes H^+)$. This gap has been bridged successfully through \cite{bsBQforADS}:
\begin{prop}\label{prop:Q_S as BQ}
There are canonical isomorphisms $\mathbb{Q}Q_S \cong \mathbb{Q}Q(\mathbb{Z} \rtimes H^+) \cong \mathbb{Q}Q(\mathbb{N} \rtimes H^+)$.
\end{prop}
\begin{proof}
For $\mathbb{Z} \rtimes H^+$, \cite{bsBQforADS}*{Corollary~4.2} shows that $\mathbb{Q}Q(\mathbb{Z} \rtimes H^+) \cong \mathbb{C}O[\mathbb{Z},H^+,\theta]$, and hence $\mathbb{Q}Q_S \cong \mathbb{Q}Q(\mathbb{Z} \rtimes H^+)$ by Proposition~\ref{prop:Q_S as O-alg}. Noting that $H^+$ is directed, this can also be seen immediately from \cite{bsBQforADS}*{Remark~2.2 and Proposition~4.1}. For $\mathbb{N}\rtimes H^+$, we infer from \cite{bsBQforADS}*{Example~2.8~(b)} that it suffices to consider \emph{accurate foundation sets} $F$ for \eqref{eq:BQ} by \cite{bsBQforADS}*{Proposition~2.4}, that is, $F$ consists of elements with mutually disjoint principal right ideals. Now $F \subset \mathbb{N} \rtimes H^+$ is an accurate foundation set if and only if it is an accurate foundation set for $\mathbb{Z} \rtimes H^+$. Conversely, given an accurate foundation set $F' =\{(m_1,h_1),\dots,(m_n,h_n)\} \subset \mathbb{Z} \rtimes H^+$, we can replace each $m_i$ by some $m_i' \in m_i+h_i\mathbb{N}$ with $m_i' \in \mathbb{N}$ to get an accurate foundation set $F \subset \mathbb{N} \rtimes H^+$ which uses the same right ideals as $F'$. This allows us to conclude that $\mathbb{Q}Q(\mathbb{Z} \rtimes H^+)$ and $\mathbb{Q}Q(\mathbb{N} \rtimes H^+)$ are isomorphic.
\end{proof}
The fact that $\mathbb{Q}Q(\mathbb{N} \rtimes H^+)$ and $\mathbb{Q}Q(\mathbb{Z} \rtimes H^+)$ coincide is not at all surprising if we take into account \cite{BaHLR} and view $C^*(\mathbb{Z} \rtimes H^+)$ as the \emph{additive boundary quotient} of $\mathbb{N} \rtimes H^+$. Where there is an additive boundary, there is also a multiplicative boundary, see the boundary quotient diagram in \cite{BaHLR}*{Section~4}: The \emph{multiplicative boundary quotient} of $C^*(\mathbb{N} \rtimes H^+)$ is obtained by imposing the analogous relation to (iii) from Definition~\ref{def:Q_S}, i.e.\ $\sum_{k=0}^{p-1} e_{k+p\mathbb{N}} = 1$ for each $p \in S$. In comparison with Definition~\ref{def:Q_S}, the essential difference is that the semigroup element $(1,1)$ is implemented by a proper isometry $v_{(1,1)}$ instead of a unitary $u$. This multiplicative boundary quotient has been considered in \cite{LN2}*{Subsection~6.5}. As it turns out, its $K$-theory is hard to compute for larger $S$ as it leads to increasingly complicated extension problems of abelian groups. It is quite remarkable that there seems to be a deep common theme underlying the structure of the $K$-theory for both the multiplicative boundary quotient and $\mathbb{Q}Q_S$.
\subsection{\texorpdfstring{The $a$-adic algebras}{The a-adic algebras}} \label{subsec:a-adic algs}
Our aim is to compute the $K$-theory of $\mathbb{Q}Q_S$, and for this we need to make use of a certain duality result \cite{KOQ}*{Theorem~4.1} that allows us to translate our problem into real dynamics. This will be explained in the next section, but let us first recall the definition and some facts about $a$-adic algebras from \cite{KOQ} and \cite{Oml}, see also \cite{hr}*{Sections~10 and~25} for more on $a$-adic numbers.
Let $a=(a_k)_{k\in\mathbb{Z}}$ be a sequence of numbers in $\mathbb{N}^\times\setminus\{1\}$, and define the \emph{$a$-adic numbers} as the abelian group of sequences
\[\begin{array}{c}
\Omega_a = \left\{ x\in\prod\limits_{k=-\infty}^{\infty}\{0,1,\dotsc,a_k-1\} : \text{there exists $\ell\in\mathbb{Z}$ such that $x_k=0$ for all $k<\ell$}\right\}
\end{array}\]
under addition with carry (that is, like a doubly infinite odometer). The family of all subgroups $\{x\in\Omega_a:x_k=0\text{ for }k<\ell\}$ form a neighborhood basis of the identity. This induces a topology that makes $\Omega_a$ a totally disconnected, locally compact Hausdorff group. The \emph{$a$-adic integers} is the compact open subgroup
\begin{equation}\label{eq:D-spectrum}
\Delta_a=\{x\in\Omega_a:x_k=0\text{ for }k<0\}\subset\Omega_a.
\end{equation}
For $k \in \mathbb{Z}$, define the sequence $(e_k)_\ell=\delta_{k \ell}$. For $k\geq 1$, we may associate the rational number $(a_{-1}a_{-2}\dotsm a_{-k})^{-1}$ with $e_{-k}$ to get an injective group homomorphism from the non-cyclic subgroup
\[\begin{array}{c}
N_a=\left\{\frac{j}{a_{-1}a_{-2}\cdots a_{-k}}:j\in\mathbb{Z}, k\geq 1\right\} \subset \mathbb{Q}
\end{array}\]
into $\Omega_a$ with dense range. Note that $N_a$ contains $\mathbb{Z} \subset \mathbb{Q}$, and by identifying $N_a$ and $\mathbb{Z} \subset N_a$ with their images under the embedding into $\Omega_a$, it follows that $N_a \cap\Delta_a = \mathbb{Z}$.
The subgroups $N_a \cap\{x\in\Omega_a:x_k=0\text{ for }k<\ell\}$ for $\ell\in\mathbb{Z}$ give rise to a subgroup topology of $N_a$, and $\Omega_a$ is the Hausdorff completion (i.e.\ inverse limit completion) of $N_a$ with respect to this filtration. Therefore, the class of $a$-adic numbers $\Omega_a$ comprises all groups that are Hausdorff completions of non-cyclic subgroups of $\mathbb{Q}$. Loosely speaking, the negative part of the sequence $a$ determines a subgroup $N_a$ of $\mathbb{Q}$, and the positive part determines a topology that gives rise to a completion of $N_a$. Given a sequence $a$, let $a^*$ denote the dual sequence defined by $a^*_k=a_{-k}$, and write $N_a^*$ and $\Omega_a^*$ for the associated groups.
Let $H_a$ be any non-trivial subgroup of $\mathbb{Q}^\times_+$ acting on $N_a$ by continuous multiplication, meaning that for all $h\in H_a$, the map $N_a\to N_a$, $x\mapsto hx$ is continuous with respect to the topology described above. The largest subgroup with this property is generated by the primes dividing infinitely many terms of both the positive and negative tail of the sequence $a$, see \cite{KOQ}*{Corollary~2.2}, so we must assume that this subgroup is non-trivial (which holds in the cases we study). Then $H_a$ also acts on $\Omega_a$ by multiplication, and therefore $N_a\rtimes H_a$ acts on $\Omega_a$ by an $ax+b$-action.
\begin{defn}\label{def:Q_a,H}
For a sequence $a=(a_k)_{k \in \mathbb{Z}}$ in $\mathbb{N}^\times\setminus\{1\}$ and a non-trivial subgroup $H_a$ of $\mathbb{Q}^\times_+$ acting by continuous multiplication on $N_a$, the crossed product $\overline{\mathbb{Q}Q}(a,H_a) := C_0(\Omega_a)\rtimes N_a\rtimes H_a$ is called the \emph{$a$-adic algebra} of $(a,H_a)$.
\end{defn}
Clearly, interchanging $a$ and $a^*$ and manipulating the position of $a_0$ will not affect any structural property on the level of algebras. In fact, for our purposes, it will usually be convenient to assume that $a=a^*$. Therefore, we will often use the positive tail of the sequence $a$ in the description of $N_a$, and think of $N_a$ as the inductive limit of the system $\left\lbrace (\mathbb{Z} ,\cdot a_k) : k \geq 0 \right\rbrace$ via the isomorphism induced by
\begin{equation}\label{eq:N_a ind lim}
\begin{gathered}
\begin{xy}
\xymatrix{
\mathbb{Z} \ar[rr]^{\cdot a_k} \ar[rd]_(0.3){\cdot \tfrac{1}{a_0a_1a_2\dotsm a_{k-1}}} & & \mathbb{Z} \ar[ld]^(0.3){\cdot \tfrac{1}{a_0a_1a_2\dotsm a_{k-1}a_k}}\\
& N_a &
}
\end{xy}
\end{gathered}
\end{equation}
\begin{rem}\label{rem:a-adic stability}
By \cite{KOQ}*{Corollary~2.8} the $a$-adic algebra $\overline{\mathbb{Q}Q}(a,H)$ is always a non-unital UCT Kirchberg algebra, hence it is stable by Zhang's dichotomy, see \cite{Z} or \cite{rordam-zd}*{Proposition~4.1.3}.
An immediate consequence of \eqref{eq:N_a ind lim} is that
\[
\begin{array}{c}
C_0(\Omega_a)\rtimes N_a \cong \overline{\bigcup\limits_{k=0}^\infty C(\frac{1}{a_0\dotsm a_k}\Delta_a)\rtimes \frac{1}{a_0\dotsm a_k}\mathbb{Z}}.
\end{array}
\]
Moreover, by writing $\frac{1}{a_0\dotsm a_k}\Delta_a=\Delta_a+(\frac{a_0\dotsm a_k - 1}{a_0\dotsm a_k}+\Delta_a)+\dotsb+(\frac{1}{a_0\dotsm a_k}+\Delta_a)$ and checking how the translation action of $\frac{1}{a_0\dotsm a_k}\mathbb{Z}$ interchanges the components of this sum, one sees that
\[
\begin{array}{c}
C(\frac{1}{a_0\dotsm a_k}\Delta_a)\rtimes \frac{1}{a_0\dotsm a_k}\mathbb{Z} \cong M_{a_0\dotsm a_k}\left(C(\Delta_a)\rtimes\mathbb{Z}\right).
\end{array}
\]
In particular, the natural embeddings of the increasing union above translates into embeddings into the upper left corners. Hence, it follows that $C_0(\Omega_a)\rtimes N_a$ is also stable.
\end{rem}
\begin{rem}\label{rem:supernatural}
A supernatural number is a function $d\colon\{\text{all primes}\} \to \mathbb{N} \cup \{\infty\}$, such that $\sum_{\text{$p$ prime}}d(p) = \infty$, and often written as a formal product $\prod_{\text{$p$ prime}} p^{d(p)}$. It is well known that there is a one-to-one correspondence between supernatural numbers and non-cyclic subgroups of $\mathbb{Q}$ containing $1$, and that the supernatural numbers form a complete isomorphism invariant both for the UHF algebras and the Bunce-Deddens algebras, see \cite{Gl} and \cite{BD}.
Every sequence $a=(a_k)_{k\geq 0}$ defines a function $d_a\colon\{\text{all primes}\} \to \mathbb{N} \cup \{\infty\}$ given by $d_a(p)=\sup\{n \in \mathbb{N} : p^n|a_0a_1 \dotsm a_k \text{ for some } k\geq 0\}$. More intuitively, $d_a$ is thought of as the infinite product $d_a=a_0a_1a_2\dotsm$. Moreover (see e.g.\ \cite{KOQ}*{Lemma~5.1}), we have
\begin{equation}\label{eq:a-adic int}
\begin{array}{c}
\Delta_a\cong\prod\limits_{p\in d_a^{-1}(\infty)}\mathbb{Z}_p\times\prod\limits_{p\in d_a^{-1}(\mathbb{N})}\mathbb{Z}/p^{d_a(p)}\mathbb{Z},
\end{array}
\end{equation}
and thus the supernatural numbers are a complete isomorphism invariant for the homeomorphism classes of $a$-adic integers.
\end{rem}
Now, as in Section~\ref{sec:prelim}, let $S$ be a set consisting of relatively prime numbers, and let $H^+$ and $H$ denote the submonoid of $\mathbb{N}^\times$ and the subgroup of $\mathbb{Q}^\times_+$ generated by $S$, respectively. The sequence $a_S$ is defined as follows: Since $H^+$ is a subset of $\mathbb{N}^\times$, its elements can be sorted into increasing order $1<a_{S,0}<a_{S,1}<\dotsb$, where $a_{S,0}=\min S$. Finally, we set $a_{S,k}=a_{S,-k}$ for $k<0$. If $S$ is a finite set, an easier way to form a suitable sequence $a_S$ is to let $q$ denote the product of all elements of $S$, and set $a_{S,k}=q$ for all $k\in\mathbb{Z}$. In both cases, $a_S^*=a_S$ and $N_{a_S}=N$. Henceforth we fix such a sequence $a_S$ and denote $\Omega_{a_S}$ and $\Delta_{a_S}$ by $\Omega$ and $\Delta$, respectively. The purpose of self-duality of $a_S$ is to have $N^*=N$, making the statement of Theorem~\ref{thm:duality} slightly more convenient by avoiding the explicit use of $N^*$. The sequences $a_S$ are the ones associated with supernatural numbers $d$ for which $d(p) \in \{0,\infty\}$ for every prime $p$. In this case \eqref{eq:a-adic int} implies that
\[\begin{array}{c}
\Delta \cong\prod\limits_{p \in P}\mathbb{Z}_p \quad\text{and}\quad
\Omega \cong\prod\limits_{p \in P}\hspace*{-2mm}' \hspace*{2mm} \mathbb{Q}_p=\prod\limits_{p \in P}(\mathbb{Q}_p,\mathbb{Z}_p),
\end{array}\]
where the latter denotes the restricted product with respect to $\left\lbrace \mathbb{Z}_p : p\in P \right\rbrace$.
\begin{rem}\label{rem:spec of D_S}
The spectrum of the commutative subalgebra $\mathcal{D}_S$ of $\mathbb{Q}Q_S$ from Definition~\ref{def:D_S} coincides with the $a$-adic integers $\Delta$ described in \eqref{eq:D-spectrum}. Indeed, for every $X=m+h\mathbb{Z}\in\mathcal{F}$, the projection $e_X$ in $\mathcal{D}_S$ corresponds to the characteristic function on the compact open subset $m+h\Delta$ of $\Delta$. Moreover, this correspondence extends to an isomorphism between the $C^*$-algebra $\mathcal{B}_S\cong\mathcal{D}_S\rtimes\mathbb{Z}$ of Definition~\ref{def:B_S} and $C(\Delta)\rtimes\mathbb{Z}$, which is equivariant for the natural $H^+$-actions on the algebras, both denoted by $\alpha$.
\end{rem}
Let us write $e$ for the projection in $\overline{\mathbb{Q}Q}(a_S,H)$ representing the characteristic function on $\Delta$ in $C_0(\Omega)$. It is explained in \cite{Oml}*{Section~11.6} that $e$ is a full projection, and thus, by using Remark~\ref{rem:spec of D_S} together with \eqref{eq:B_S and Q_S as crossed products}, we have
\begin{equation}\label{eq:full corner Q_S}
e\overline{\mathbb{Q}Q}(a_S,H)e
\cong (C(\Delta) \rtimes \mathbb{Z}) \rtimes^e_\alpha H^+
\cong (\mathcal{D}_S \rtimes \mathbb{Z}) \rtimes^e_\alpha H^+
\cong \mathcal{B}_S \rtimes^e_\alpha H^+
\cong \mathbb{Q}Q_S.
\end{equation}
In fact, since $N$ coincides with $(H^+)^{-1}\mathbb{Z}$, the above also follows from \cite{Lac}. Moreover, the argument in \cite{Oml} does not require $H$ to be non-trivial, so it can be used together with Remark~\ref{rem:spec of D_S} and \eqref{eq:B_S and Q_S as crossed products} to get
\begin{equation}\label{eq:full corner B_S}
e(C_0(\Omega) \rtimes N)e
\cong C(\Delta) \rtimes \mathbb{Z}
\cong \mathcal{D}_S \rtimes \mathbb{Z}
\cong \mathcal{B}_S.
\end{equation}
Hence, by applying Remark~\ref{rem:a-adic stability} we arrive at the following result:
\begin{prop}\label{prop:stable Q_S}
The stabilization of $\mathbb{Q}Q_S$ is isomorphic to $\overline{\mathbb{Q}Q}(a_S,H)$, and the stabilization of $\mathcal{B}_S$ is isomorphic to $C_0(\Omega)\rtimes N$.
\end{prop}
Therefore Proposition~\ref{prop:stable Q_S} gives an alternative way to see that $\mathbb{Q}Q_S$ is a unital UCT Kirchberg algebra, which is also a consequence of Proposition~\ref{prop:Q_S as O-alg}.
\section{Comparison with real dynamics}\label{sec:comp with real dyn}
Let $S$ and $H_k$ be as specified in Section~\ref{sec:prelim}, $a=(a_k)_{k \in \mathbb{Z}}$ in $\mathbb{N}^\times \setminus\{1\}$, and $H_a$ a non-trivial subgroup of $\mathbb{Q}_+^\times$ that acts on $N_a$ by continuous multiplication. For convenience, we will assume $a^*= a$ so that $N_a^*=N_a$. Moreover, $N_a$ acts by translation and $H_a$ acts by multiplication on $\mathbb{R}$, respectively, giving rise to an $ax+b$-action of $N_a\rtimes H_a$ on $\mathbb{R}$. Let $\widehat{N}_a$ denote the Pontryagin dual of $N_a$. By \cite{KOQ}*{Theorem~3.3}, the diagonal embedding $N_a \to\mathbb{R}\times\Omega_a$ has discrete range, and gives an isomorphism
\[(\mathbb{R}\times\Omega_a)/N_a \cong \widehat{N}_a.\]
By applying Green's symmetric imprimitivity theorem, see e.g.\ \cite{Wil}*{Corollary~4.11}, we obtain that
\[C_0(\Omega_a) \rtimes N_a \sim_M C_0(\mathbb{R}) \rtimes N_a,\]
and this Morita equivalence is equivariant for the actions of $H_a$ by multiplication on one side and inverse multiplication on the other. The inverse map on $H_a$ does not have any impact on the crossed products, and thus
\[\overline{\mathbb{Q}Q}(a,H_a) \sim_M C_0(\mathbb{R}) \rtimes N_a \rtimes H_a.\]
All the above is explained in detail in \cite{KOQ}*{Proof of Theorem~4.1}. Moreover, recall that UCT Kirchberg algebras are either unital or stable, so by using Proposition~\ref{prop:stable Q_S} we get:
\begin{thm}\label{thm:duality}
The $a$-adic algebra $\overline{\mathbb{Q}Q}(a,H_a)$ is isomorphic to $C_0(\mathbb{R}) \rtimes N_a \rtimes H_a$. In particular, the stabilization of $\mathbb{Q}Q_S$ is isomorphic to $C_0(\mathbb{R}) \rtimes N \rtimes H$.
\end{thm}
\begin{rem}\label{rem:generality}
It follows from Theorem~\ref{thm:duality}, based on \cite{KOQ}*{Theorem~4.1}, that any $a$-adic algebra $\overline{\mathbb{Q}Q}(a,H_a)$ is isomorphic to a crossed product $C_0(\mathbb{R})\rtimes N_a \rtimes H_a$. Recall that $N_a$ can be any non-cyclic subgroup of $\mathbb{Q}$ and $H_a$ can be any non-trivial subgroup of $\mathbb{Q}_+^\times$ that acts on $N_a$ by multiplication. In the present work, we limit our scope to the case where $N_a$ and $H_a$ can be obtained from a family $S$ of relatively prime numbers for the benefit of a more concise exposition. In a forthcoming project, we aim at establishing analogous results to the ones proven here for all $a$-adic algebras.
\end{rem}
\begin{rem}\label{rem:ind limit C_0(R) cross N}
By employing the description of $N_a$ from \eqref{eq:N_a ind lim}, we can write $C_0(\mathbb{R}) \rtimes N_a$ as an inductive limit. For $k \geq 0$, define the automorphism $\gamma_k$ of $C_{0}(\mathbb{R})$ by
\[\begin{array}{c}
\gamma_0(f)(s)=f(s-1) \text{ and }
\gamma_{k+1}(f)(s)=f\bigl(s-\frac{1}{a_0a_1\dotsm a_k}\bigr),\quad f\in C_0(\mathbb{R}). \end{array}\]
Under the identification in \eqref{eq:N_a ind lim}, these automorphisms give rise to the natural $N_a$-action on $C_0(\mathbb{R})$, where $\gamma_k$ corresponds to the generator for the $k$th copy of $\mathbb{Z}$. For $k \geq 0$, let $u_k \in \mathcal{M}(C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z})$ denote the canonical unitary implementing $\gamma_k$ and consider the $*$-homomorphism $\phi_k\colon C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z} \to C_0(\mathbb{R}) \rtimes_{\gamma_{k+1}} \mathbb{Z}$ given by $\phi_k(f) = f$ and $\phi_k(fu_k) = fu_{k+1}^{a_k}$ for every $f\in C_0(\mathbb{R})$. The inductive limit description \eqref{eq:N_a ind lim} of $N_a$ now yields an isomorphism $\varphi\colon\varinjlim \left\lbrace C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z},\phi_k \right\rbrace \stackrel{\cong}{\longrightarrow} C_0(\mathbb{R}) \rtimes N_a$.
\end{rem}
\begin{rem}\label{rem:duality B_S}
A modification of \cite{CuntzQ}*{Lemma~6.7}, using the inductive limit description from Remark~\ref{rem:ind limit C_0(R) cross N}, shows that $C_0(\mathbb{R}) \rtimes N_a$ is stable. Hence, it follows from the above together with Remark~\ref{rem:a-adic stability} that $C_0(\Omega_a) \rtimes N_a$ is isomorphic to $C_0(\mathbb{R}) \rtimes N_a$. In particular, Proposition~\ref{prop:stable Q_S} shows that the stabilization of $\mathcal{B}_S$ is isomorphic to $C_0(\mathbb{R}) \rtimes N$.
\end{rem}
We will make use of this fact below.
\begin{lem} \label{lem:torsion algebra Morita}
Let $\widetilde{\alpha}$ and $\beta$ denote the actions of $H_a$ on $C_0(\Omega_a)\rtimes N_a$ and $C_0(\mathbb{R})\rtimes N_a$, respectively. Then $\beta^{-1}$ is exterior equivalent to an action $\widetilde{\beta}$ for which there is an $\widetilde{\alpha}$ - $\widetilde{\beta}$-equivariant isomorphism $C_0(\Omega_a) \rtimes N_a \stackrel{\cong}{\longrightarrow} C_0(\mathbb{R}) \rtimes N_a$.
\end{lem}
\begin{proof}
The respective actions $\widetilde{\alpha}$ and $\beta^{-1}$ of $H_a$ are Morita equivalent by \cite{KOQ}*{Proof of Theorem~4.1}. Moreover, both $C^*$-algebras are separable and stable, see Remark~\ref{rem:duality B_S}. Therefore, \cite{Com}*{Proposition on p.~16} implies that the actions are also outer conjugate, and the statement follows.
\end{proof}
In the following, we denote by $\iota_{N_a}\colon C_0(\mathbb{R}) \into C_0(\mathbb{R}) \rtimes N_a$ the canonical embedding, which is equivariant for the respective $H_a$-actions $\beta$ (and also $\beta^{-1}$). We conclude this section by proving that $\iota_{N_a}$ induces an isomorphism between the corresponding $K_1$-groups.
\begin{prop} \label{prop:KtheoryA0}
The canonical embedding $\iota_{N_a}\colon C_0(\mathbb{R}) \into C_0(\mathbb{R})\rtimes N_a$ induces an isomorphism between the corresponding $K_1$-groups.
\end{prop}
\begin{proof}
Recall the isomorphism $\varphi\colon\varinjlim \left\lbrace C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z},\phi_k \right\rbrace \stackrel{\cong}{\longrightarrow} C_0(\mathbb{R}) \rtimes N_a$ from Remark~\ref{rem:ind limit C_0(R) cross N}. For $k \geq 0$, let $\iota_k\colon C_0(\mathbb{R})\to C_0(\mathbb{R})\rtimes_{\gamma_k}\mathbb{Z}$ be the canonical embedding. As $\iota_{k+1} = \phi_k \circ \iota_k$, we obtain the following commutative diagram
\begin{equation}\label{eq:ind lim C_0(R)CrossN_a}
\begin{gathered}
\begin{xy}
\xymatrix{
C_0(\mathbb{R}) \ar[r]^{\iota_{N_a}} \ar[d]_{\phi_{k,\infty} \circ \ \iota_k} & C_0(\mathbb{R}) \rtimes N_a \\
\varinjlim \left\lbrace C_0(\mathbb{R}) \rtimes_{\gamma_m} \mathbb{Z},\phi_m \right\rbrace \ar[ur]_\varphi
}
\end{xy}
\end{gathered}
\end{equation}
Here, $\phi_{k,\infty}\colon C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z} \to \varinjlim \left\lbrace C_0(\mathbb{R}) \rtimes_{\gamma_m} \mathbb{Z},\phi_m \right\rbrace$ denotes the canonical $*$-homomorphism given by the universal property of the inductive limit.
As $K_0(C_0(\mathbb{R})) = 0$, the Pimsner-Voiculescu sequence \cite{PV} for $\gamma_k \in \op{Aut}(C_0(\mathbb{R}))$ reduces to an exact sequence
\[
K_0(C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z}) \into K_1(C_0(\mathbb{R})) \stackrel{\op{id} - K_1(\gamma_k)}{\longrightarrow} K_1(C_0(\mathbb{R})) \stackrel{K_1(\iota_k)}{\onto} K_1(C_0(\mathbb{R}) \rtimes_{\gamma_k} \mathbb{Z}).
\]
For each $k\geq 0$, the automorphism $\gamma_k$ is homotopic to the identity on $\mathbb{R}$, so that $K_1(\gamma_k) = \op{id}$. It thus follows that $K_1(\iota_k)$ is an isomorphism. As $\iota_{k+1} = \phi_k \circ \iota_k$, we therefore get that $K_1(\phi_k)$ is an isomorphism as well. Hence, by continuity of $K$-theory, $K_1(\phi_{k,\infty})$ is an isomorphism. It now follows from \eqref{eq:ind lim C_0(R)CrossN_a} that $K_1(\iota_{N_a})$ is an isomorphism, which completes the proof.
\end{proof}
\section{\texorpdfstring{A decomposition of the $K$-theory of $\mathbb{Q}Q_S$}{A decomposition of the K-theory}} \label{sec:K-theory}
In this section, we show that $K_*(\mathbb{Q}Q_S)$ decomposes as a direct sum of a free abelian group and a torsion group, see Theorem~\ref{thm:decomposition of K-theory} and Corollary~\ref{cor:torsion and free part K-theory}. We would like to highlight that this is not just an abstract decomposition of $K_*(\mathbb{Q}Q_S)$, but a result that facilitates a description of the two parts by distinguished $C^*$-algebras associated to $S$, namely $M_{d^\infty} \rtimes^e_\alpha H^+$ for the torsion part, and $C_0(\mathbb{R}) \rtimes_\beta H$ for the free part. The free abelian part is then shown to have rank $2^{\lvert S\rvert - 1}$, see Proposition \ref{prop:K-theory torsion free part}, so that $\mathbb{Q}Q_S$ and $\mathbb{Q}Q_T$ can only be isomorphic if $S$ and $T$ have the same cardinality.
The following is the key tool for the proof of this section's main result, and we think it is of interest in its own right.
\begin{prop} \label{prop:splitted K-theory}
Let $k \in \mathbb{N}\cup \left\lbrace \infty \right\rbrace$, $A,B,C$ $C^*$-algebras, and $\alpha\colon \mathbb{Z}^k \curvearrowright A$, $\beta\colon \mathbb{Z}^k \curvearrowright B$, and $\gamma\colon \mathbb{Z}^k \curvearrowright C$ actions. Let $v\colon \mathbb{Z}^k \to \mathcal U(\mathcal{M}(C))$ be a $\gamma$-cocycle and denote by $\tilde{\gamma}\colon \mathbb{Z}^k \curvearrowright C$ the induced action given by $\tilde{\gamma}_h = \operatorname{Ad}(v_h) \circ \gamma_h$ for $h \in \mathbb{Z}^k$. Let $\kappa\colon C \rtimes_{\tilde{\gamma}} \mathbb{Z}^k \stackrel{\cong}{\longrightarrow} C \rtimes_{\gamma} \mathbb{Z}^k$ be the $*$-isomorphism induced by the $\gamma$-cocycle $v$. Assume that $\varphi\colon A \to C$ is a non-degenerate $\alpha$ - $\gamma$-equivariant $*$-homomorphism and $\psi\colon B\to C$ a non-degenerate $\beta$ - $\tilde{\gamma}$-equivariant $*$-homomorphism such that $K_0(\varphi)$ and $K_1(\psi)$ are isomorphisms and $K_1(\varphi)$ and $K_0(\psi)$ are trivial. Then
\[
K_*(\varphi \rtimes \mathbb{Z}^k) \operatornamelus K_*(\kappa \circ (\psi \rtimes \mathbb{Z}^k))\colon K_*(A \rtimes_\alpha \mathbb{Z}^k) \operatornamelus K_*(B \rtimes_\beta \mathbb{Z}^k) \to K_*(C \rtimes_\gamma \mathbb{Z}^k)
\]
is an isomorphism.
\end{prop}
\begin{proof}
Consider the amplified action $\gamma^{(2)}\colon \mathbb{Z}^k \curvearrowright M_2(C)$ given by entrywise application of $\gamma$. Let $w\colon \mathbb{Z}^k \to \mathcal U(\mathcal{M}(M_2(C)))$ be the $\gamma^{(2)}$-cocycle given by $w_h= \operatorname{diag}(1,v_h)$ for $h \in \mathbb{Z}^k$. The induced $\mathbb{Z}^k$-action $\delta = \operatorname{Ad}(w) \circ \gamma^{(2)}$ satisfies $\delta_h(\operatorname{diag}(c,c')) = \operatorname{diag}(\gamma_h(c),\tilde{\gamma}_h(c'))$ for all $h \in \mathbb{Z}^k$ and $c,c' \in C$. Thus, $\eta = \varphi \operatornamelus \psi\colon A \operatornamelus B \to M_2(C)$ is a non-degenerate $\alpha \operatornamelus \beta$ - $\delta$-equivariant $*$-homomorphism.
By additivity of $K$-theory, $K_*(\eta) = K_*(\varphi) + K_*(\psi)$. Hence, $K_*(\eta)$ is an isomorphism, as $K_0(\eta) = K_0(\varphi)$ and $K_1(\eta) = K_1(\psi)$. If $k \in \mathbb{N}$, an iterative use of the naturality of the Pimsner-Voiculescu sequence and the Five Lemma yields that $K_*(\eta \rtimes \mathbb{Z}^k)$ is an isomorphism. If $k = \infty$, it follows from continuity of $K$-theory that $K_*(\eta \rtimes \mathbb{Z}^\infty)$ is an isomorphism, since $K_*(\eta \rtimes \mathbb{Z}^k)$ is an isomorphism for every $k\in \mathbb{N}$.
Let $u \colon \mathbb{Z}^k \to \mathcal U(\mathcal{M}((A \operatornamelus B) \rtimes_{\alpha \operatornamelus \beta} \mathbb{Z}^k))$ and $\tilde{u} \colon \mathbb{Z}^k \to \mathcal U(\mathcal{M}(M_2(C)\rtimes_\delta \mathbb{Z}^k))$ denote the canonical representations, respectively. The covariant pair given by the natural inclusion $A \into 1_{\mathcal{M}(A)}((A \operatornamelus B) \rtimes_{\alpha \operatornamelus \beta} \mathbb{Z}^k) 1_{\mathcal{M}(A)}$ and the unitary representation $1_{\mathcal{M}(A)}u_h$, $h \in \mathbb{Z}^k$, gives rise to a $*$-homomorphism $\Phi_A \colon A \rtimes_\alpha \mathbb{Z}^k \to (A \operatornamelus B) \rtimes_{\alpha \operatornamelus \beta} \mathbb{Z}^k$. Similarly, we define $\Phi_B \colon B \rtimes_\beta \mathbb{Z}^k \to (A \operatornamelus B) \rtimes_{\alpha \operatornamelus \beta} \mathbb{Z}^k$. It is easy to check that $\Phi_A$ and $\Phi_B$ are orthogonal and
\[
\Phi_A \operatornamelus \Phi_B \colon A \rtimes_\alpha \mathbb{Z}^k \operatornamelus B \rtimes_\beta \mathbb{Z}^k \to (A \operatornamelus B) \rtimes_{\alpha \operatornamelus \beta} \mathbb{Z}^k
\]
is an isomorphism. Moreover, let $\tilde{\varphi} \colon A \rtimes_\alpha \mathbb{Z}^k \to M_2(C) \rtimes_\delta \mathbb{Z}^k$ be the $*$-homomorphism induced by the covariant pair in $\mathcal{M}(e_{11}(M_2(C)\rtimes_\delta \mathbb{Z}^k) e_{11})$ given by the composition of the embedding $C \into M_2(C)$ into the upper left corner with $\varphi$ and the unitary representation $e_{11}\tilde{u}_h$, $h \in \mathbb{Z}^k$. Define $\tilde{\psi} \colon B \rtimes_\beta \mathbb{Z}^k \to M_2(C) \rtimes_\delta \mathbb{Z}^k$ analogously by considering the embedding $C \into M_2(C)$ into the lower right corner. By construction, the following diagram commutes
\[
\xymatrix{
A \rtimes_\alpha \mathbb{Z}^k \operatornamelus B \rtimes_\beta \mathbb{Z}^k \ar[drr]_{\tilde{\varphi} \operatornamelus \tilde{\psi}} \ar[rr]_\cong^{\Phi_A \operatornamelus \Phi_B} & & (A \operatornamelus B) \rtimes_{\alpha \operatornamelus \beta} \mathbb{Z}^k \ar[d]^{\eta \rtimes \mathbb{Z}^k} \\
& & M_2(C) \rtimes_\delta \mathbb{Z}^k
}\]
which shows that
\[
K_*(\tilde{\varphi}) \operatornamelus K_*(\tilde{\psi})\colon K_*(A \rtimes_\alpha \mathbb{Z}^k) \operatornamelus K_*(B \rtimes_\beta \mathbb{Z}^k) \to K_*(M_2(C) \rtimes_{\delta} \mathbb{Z}^k)
\]
is an isomorphism.
Let $\kappa'\colon M_2(C) \rtimes_{\delta} \mathbb{Z}^k \stackrel{\cong}{\longrightarrow} M_2(C) \rtimes_{\gamma^{(2)}} \mathbb{Z}^k$ denote the isomorphism induced by the $\gamma^{(2)}$-cocycle $w$. Then the following diagram commutes and the proof is complete:
\[
\xymatrix{
A \rtimes_\alpha \mathbb{Z}^k \ar[d]_{\tilde{\varphi}} \ar[r]^{\varphi \rtimes \mathbb{Z}^k} & C \rtimes_\gamma \mathbb{Z}^k \ar[rd]^{\op{id}_{C \rtimes_\gamma \mathbb{Z}^k} \operatornamelus 0} \\
M_2(C) \rtimes_\delta \mathbb{Z}^k \ar[r]^{\kappa'}_\cong & M_2(C) \rtimes_{\gamma^{(2)}} \mathbb{Z}^k \ar[r]^\cong & M_2(C \rtimes_\gamma \mathbb{Z}^k) \\
B \rtimes_\beta \mathbb{Z}^k \ar[u]^{\tilde{\psi}} \ar[r]^*!/_0.5mm/{\labelstyle \psi \rtimes \mathbb{Z}^k} & C \rtimes_{\tilde{\gamma}} \mathbb{Z}^k \ar[r]^\kappa_\cong & C \rtimes_\gamma \mathbb{Z}^k \ar[u]_*!/_1.5mm/{\labelstyle 0 \operatornamelus \op{id}_{C\rtimes_\gamma \mathbb{Z}^k}}
}
\]
\end{proof}
\begin{rem}
Proposition~\ref{prop:splitted K-theory} is true in a more general setting. In fact, $\mathbb{Z}^k$ could be replaced by any locally compact group $G$ with the following property: If $\varphi\colon A \to B$ is an $\alpha$ - $\beta$-equivariant $*$-homomorphism such that $K_*(\varphi)$ is an isomorphism, then $K_*(\varphi \rtimes G)$ is an isomorphism as well.
\end{rem}
\begin{rem}\label{rem:dilations}
Note that $K_1(M_{d^\infty}) = 0$ and the natural embedding $j\colon M_{d^\infty} \into \mathcal{B}_S$ induces an isomorphism between the corresponding $K_0$-groups. The invariance of $M_{d^\infty} \subset \mathcal{B}_S$ under the $H^+$-action $\alpha$, see Remark~\ref{rem:BD subalg B_S}, yields a non-degenerate $*$-homomorphism $j_\infty\colon M_{d^\infty,\infty} \to \mathcal{B}_{S,\infty}$ between the minimal automorphic dilations for $\alpha$, which is equivariant for the induced $H$-actions $\alpha_\infty$. From the concrete model of the minimal automorphic dilation as an inductive limit, see \cite{Lac}*{Proof of Theorem~2.1}, we conclude that $K_1(M_{d^\infty,\infty}) = 0$ and $K_0(j_\infty)$ is an isomorphism. Moreover, there is an isomorphism between $\mathcal{B}_S$ and $C(\Delta)\rtimes \mathbb{Z}$ that intertwines the actions of $H^+$, see Remark~\ref{rem:spec of D_S}. It then follows from \eqref{eq:full corner B_S} and \cite{Lac}*{Theorem~2.1} that $\alpha\colon H^+ \curvearrowright C(\Delta)\rtimes\mathbb{Z}$ dilates to $\widetilde{\alpha}\colon H \curvearrowright C_0(\Omega)\rtimes N$, where $\widetilde{\alpha}$ coincides with the $H$-action from Lemma~\ref{lem:torsion algebra Morita}. Consequently, there is an $\alpha_\infty$ - $\widetilde{\alpha}$-equivariant isomorphism $\mathcal{B}_{S,\infty} \stackrel{\cong}{\longrightarrow} C_0(\Omega)\rtimes N$.
\end{rem}
As in Section~\ref{sec:comp with real dyn}, let $\iota_N\colon C_0(\mathbb{R}) \into C_0(\mathbb{R})\rtimes N$ denote the canonical embedding. Note that $\iota_N$ is non-degenerate and equivariant with respect to the $H$-actions $\beta$ (and also $\beta^{-1}$).
\begin{thm}\label{thm:decomposition of K-theory}
The map
\[
K_*(j \rtimes^e H^+) \operatornamelus K_*(\iota_N \rtimes H)\colon
K_*(M_{d^\infty} \rtimes^e_\alpha H^+) \operatornamelus K_*(C_0(\mathbb{R}) \rtimes_{\beta} H) \to K_*(\mathbb{Q}Q_S),
\]
induced by the identifications $\mathcal{B}_S \rtimes^e_\alpha H^+ \cong \mathbb{Q}Q_S$ from \eqref{eq:B_S and Q_S as crossed products} and $(C_0(\mathbb{R}) \rtimes N) \rtimes_\beta H \cong \mathbb{Q}Q_S \otimes \mathcal{K}$ from Theorem~\ref{thm:duality}, is an isomorphism.
\end{thm}
\begin{proof}
By combining Remark~\ref{rem:dilations} with Lemma~\ref{lem:torsion algebra Morita}, there exist an $H$-action $\widetilde\beta$ on $C_0(\mathbb{R}) \rtimes N$ that is exterior equivalent to $\beta^{-1}$, and a non-degenerate $\alpha_\infty$ - $\widetilde\beta$-equivariant $*$-homomorphism $\psi\colon M_{d^\infty,\infty}\to C_0(\mathbb{R}) \rtimes N$, namely the one coming from the composition
\[
M_{d^\infty,\infty} \stackrel{j_\infty}{\longrightarrow}
\mathcal{B}_{S,\infty} \stackrel{\cong}{\longrightarrow}
C_0(\Omega) \rtimes N \stackrel{\cong}{\longrightarrow}
C_0(\mathbb{R}) \rtimes N.
\]
Since $K_1(j_\infty) = 0$ and $K_0(j_\infty)$ is an isomorphism by Remark~\ref{rem:dilations}, the same also holds for $K_1(\psi)$ and $K_0(\psi)$, respectively. Now Proposition~\ref{prop:KtheoryA0} gives that $K_0(\iota_N)$ is trivial and $K_1(\iota_N)$ is an isomorphism. As $\psi$ and $\iota_N$ are non-degenerate,
\begin{multline*}
K_*(\kappa \circ (\psi \rtimes H)) \operatornamelus K_*(\iota_N \rtimes H)\colon \\
K_*(M_{d^\infty,\infty} \rtimes_{\alpha_\infty} H) \operatornamelus K_*(C_0(\mathbb{R}) \rtimes_{\beta^{-1}} H) \to K_*((C_0(\mathbb{R})\rtimes N)\rtimes_{\beta^{-1}} H)
\end{multline*}
is an isomorphism by Proposition~\ref{prop:splitted K-theory}, where $\kappa\colon(C_0(\mathbb{R})\rtimes N)\rtimes_{\widetilde\beta} H \stackrel{\cong}{\longrightarrow} (C_0(\mathbb{R})\rtimes N) \rtimes_{\beta^{-1}} H$ denotes the isomorphism induced by a fixed $\beta^{-1}$-cocycle defining $\widetilde{\beta}$. Since $K_*(j \rtimes^e H^+)$ corresponds to $K_*(j_\infty \rtimes H)$ under the isomorphisms induced by the minimal automorphic dilations, we also get that $K_*(j \rtimes^e H^+)$ corresponds to $K_*(\kappa \circ (\psi \rtimes H))$ under the isomorphism $K_*(\mathcal{B}_S \rtimes^e_\alpha H^+) \cong K_*((C_0(\mathbb{R}) \rtimes N) \rtimes_{\beta^{-1}} H)$. As $\mathcal{B}_S \rtimes^e_\alpha H^+ \cong \mathbb{Q}Q_S$ by \eqref{eq:B_S and Q_S as crossed products} and $(C_0(\mathbb{R})\rtimes N)\rtimes_{\beta^{-1}} H \cong (C_0(\mathbb{R})\rtimes N)\rtimes_\beta H \cong \mathbb{Q}Q_S \otimes \mathcal{K}$ by Theorem~\ref{thm:duality}, the conclusion follows.
\end{proof}
We will now show that the two summands appearing in Theorem~\ref{thm:decomposition of K-theory} correspond to the torsion and the free part of $K_*(\mathbb{Q}Q_S)$, respectively.
\begin{prop}\label{prop:K-theory torsion free part}
For $i=0,1$, $K_i(C_0(\mathbb{R}) \rtimes_{\beta} H)$ is the free abelian group in $2^{\lvert S \rvert-1}$ generators.
\end{prop}
\begin{proof}
The result holds for any non-trivial subgroup $H$ of $\mathbb{Q}^\times_+$, and we prove it in generality, not necessarily requiring $H$ to be generated by $S$. Suppose that $k$ is the (possibly infinite) rank of $H$. Let $\{ h_i : 0 \leq i \leq k \}$ be a minimal generating set for $H$. For $t\in [0,1]$ and $1 \leq i\leq k$, define $\tilde{\beta}_{h_i,t} \in \operatorname{Aut}(C_0(\mathbb{R}))$ by $\tilde{\beta}_{h_i,t}(f)(s) = f((th_i^{-1} + 1 - t)s)$. Note that $\tilde{\beta}_{h_i,t}$ is indeed an automorphism as $h_i > 0$. Since multiplication on $\mathbb{R}$ is commutative, we see that for each $t \in [0,1]$, $\left\lbrace \tilde{\beta}_{h_i,t} \right\rbrace_{1\leq i\leq k}$ defines an $H$-action. Let $\gamma\colon H \curvearrowright C_0([0,1],C_0(\mathbb{R}))$ be the action given by $\gamma_{h_i}(f)(t) = \tilde{\beta}_{h_i,t}(f(t))$. We have the following short exact sequence of $C^*$-algebras:
\[
C_0((0,1],C_0(\mathbb{R}))\rtimes_\gamma H \into C_0([0,1],C_0(\mathbb{R}))\rtimes_\gamma H \stackrel{\operatorname{ev}_0\rtimes H}{\onto} C_0(\mathbb{R})\rtimes_{\op{id}} H
\]
The Pimsner-Voiculescu sequence shows that $K_*(C_0((0,1],C_0(\mathbb{R}))\rtimes_\gamma H) = 0$, where we also use continuity of $K$-theory if $k = \infty$. The six-term exact sequence corresponding to the above extension now yields that $K_*(\operatorname{ev}_0 \rtimes H)$ is an isomorphism. A similar argument shows that $K_*(\operatorname{ev}_1 \rtimes H)$ is an isomorphism. We therefore conclude that for $i=0,1$,
\[K_i(C_0(\mathbb{R}) \rtimes_{\beta} H) \cong K_i(C_0(\mathbb{R}) \rtimes_{\op{id}} H) \cong K_i(C_0(\mathbb{R}) \otimes C^*(H)).\]
This completes the proof as $K_i(C_0(\mathbb{R}) \otimes C^*(H))$ is the free abelian group in $2^{k-1}$ generators.
\end{proof}
\begin{prop} \label{prop:UHF cross prod torsion K-th}
$K_*(M_{d^\infty} \rtimes^e_\alpha H^+)$ is a torsion group, which is finite if $S$ is finite.
\end{prop}
\begin{proof}
As in Remark \ref{rem:BD subalg B_S}, we think of $M_{d^\infty}$ as the inductive limit $(M_p(\mathbb{C}),\iota_{p,pq})_{p,q \in H^+}$ with $\iota_{p,pq}\colon M_p(\mathbb{C}) \to M_{pq}(\mathbb{C})$ given by $e_{i, j}^{(p)} \otimes 1 \mapsto \sum_{k=0}^{q-1}e_{i+pk, j+pk}^{(pq)} \otimes 1$. With this perspective, $\alpha$ satisfies $\alpha_q(e^{(p)}_{m,m}) = e^{(pq)}_{qm,qm}$ for all $p,q \in H^+$ and $0 \leq m \leq p-1$. From this, one concludes that for $q \in H^+$, $K_0(\alpha_q)$ is given by multiplication with $1/q$ on $K_0(M_{d^\infty}) \cong N$. Hence, for $p \in S$, there exists a Pimsner-Voiculescu type exact sequence, see \cite{Pas}*{Theorem~4.1} and also \cite{CunPV}*{Proof of Proposition~3.1},
\[
\xymatrix{
0 \ar[r] & K_1(M_{d^\infty} \rtimes^e_{\alpha_p} \mathbb{N}) \ar[r] & N \ar[r]^{\frac{p-1}{p}} & N \ar[r] & K_0(M_{d^\infty} \rtimes^e_{\alpha_p} \mathbb{N}) \ar[r] & 0
}
\]
This shows that $K_1(M_{d^\infty} \rtimes^e_{\alpha_p} \mathbb{N}) = 0$ and $K_0(M_{d^\infty} \rtimes^e_{\alpha_p} \mathbb{N}) \cong N / (p-1)N$. In particular, $K_*(M_{d^\infty} \rtimes^e_{\alpha_p} \mathbb{N})$ is a torsion group. If $S$ is finite, we can write $M_{d^\infty} \rtimes^e_\alpha H^+$ as an $\lvert S\rvert$-fold iterative crossed product by $\mathbb{N}$ and apply the Pimsner-Voiculescu type sequence repeatedly to get that $K_*(M_{d^\infty} \rtimes^e_\alpha H^+)$ is a torsion group. If $S$ is infinite, we may use continuity of $K$-theory to conclude the claim from the case of finite $S$.
Finiteness of $S$ implies finiteness of $K_*(M_{d^\infty} \rtimes^e_\alpha H^+)$ because $N/(p-1)N$ is finite for all $p \in S$, which follows from the forthcoming Lemma~\ref{lem:N/gN}.
\end{proof}
Using Proposition~\ref{prop:K-theory torsion free part} and \ref{prop:UHF cross prod torsion K-th}, we record the following immediate consequence of the decomposition of $K_*(\mathbb{Q}Q_S)$ given in Theorem~\ref{thm:decomposition of K-theory}.
\begin{cor}\label{cor:torsion and free part K-theory}
$K_*(\mathbb{Q}Q_S)$ decomposes as a direct sum of a free abelian group and a torsion group. More precisely, $K_*(j \rtimes^e H^+)$ is a split-injection onto the torsion subgroup and $K_*(\iota_N \rtimes H)$ is a split-injection onto the torsion free part of $K_*(\mathbb{Q}Q_S)$, respectively.
\end{cor}
\section{The torsion subalgebra}\label{sec:torsion part}
Within this section we analyze the structure of $M_{d^\infty} \rtimes^e_\alpha H^+$ and its role relative to $\mathbb{Q}Q_S$ more closely. First, we show that the inclusion $M_{d^\infty} \into \mathcal{B}_S$ is equivariantly sequentially split with respect to the $H^+$-actions $\alpha$ in the sense of \cite{BarSza1}*{Remark~3.17}, see Proposition~\ref{prop:UHF into BD is eq seq split}. According to \cite{BarSza1}, we thus get that $M_{d^\infty} \rtimes^e_\alpha H^+$ shares many structural properties with $\mathcal{B}_S \rtimes^e_\alpha H^+ \cong \mathbb{Q}Q_S$. Most importantly, $M_{d^\infty} \rtimes^e_\alpha H^+$ is a unital UCT Kirchberg algebra, see Corollary~\ref{cor:UHF into BD seq split cr pr}. By simplicity of $M_{d^\infty} \rtimes^e_\alpha H^+$, we conclude that this $C^*$-algebra is in fact isomorphic to the natural subalgebra $\mathbb{C}A_S$ of $\mathbb{Q}Q_S$ that is generated by all the isometries $u^ms_p$ with $p \in S$ and $0 \leq m \leq p-1$, see Corollary~\ref{cor:subalgebra for torsion part}. By Corollary \ref{cor:torsion and free part K-theory}, it thus follows that the canonical inclusion $\mathbb{C}A_S \into \mathbb{Q}Q_S$ induces a split-injection onto the torsion subgroup of $K_*(\mathbb{Q}Q_S)$. Due to this remarkable feature, we call $\mathbb{C}A_S$ the \emph{torsion subalgebra} of $\mathbb{Q}Q_S$.
We then present two additional interesting perspectives on the torsion subalgebra $\mathbb{C}A_S$. Firstly, $\mathbb{C}A_S$ can be described as the boundary quotient of the right LCM subsemigroup $U = \{ (m,p) : p \in H^+, 0 \leq m \leq p-1\}$ of $\mathbb{N} \rtimes H^+$ in the sense of \cite{BRRW}, see Proposition~\ref{prop:A_S as BQ of U}. This yields a commutative diagram which might be of independent interest, see Remark~\ref{rem:A_S as BQ of U}.
Secondly, the boundary quotient perspective allows us to identify $\mathbb{C}A_S$ for $k:= \lvert S \rvert < \infty$ with the $C^*$-algebra of the $k$-graph $\Lambda_{S,\theta}$ consisting of a single vertex with $p$ loops of color $p$ for every $p \in S$, see Corollary~\ref{cor:tor subalgebra via k-graphs}. Quite intriguingly, $\Lambda_{S,\theta}$ differs from the canonical $k$-graph model $\Lambda_{S,\sigma}$ for $\bigotimes_{p \in S} \mathbb{C}O_p$ only with respect to its factorization rules, see Remark~\ref{rem:Lambda_S flip}. In fact, the corresponding $C^*$-algebras coincide for $\lvert S \rvert \leq 2$, see Proposition~\ref{prop:A_S for |S|=2}. After obtaining these intermediate results, we were glad to learn from Aidan Sims that, in view of Conjecture~\ref{conj:k-graph}, it is reasonable to expect that the results for $\lvert S \rvert \leq 2$ already display the general form, i.e.\ that $\mathbb{C}A_S$ is always isomorphic to $\bigotimes_{p \in S} \mathbb{C}O_p$.
\begin{prop}\label{prop:UHF into BD is eq seq split}
The embedding $M_{d^\infty} \into \mathcal{B}_S$ is $\alpha$-equivariantly sequentially split.
\end{prop}
\begin{proof}
Let $\iota\colon M_{d^\infty} \into \prod_{p \in H^+}M_{d^\infty} \big / \bigoplus_{p \in H^+} M_{d^\infty}$ denote the canonical inclusion as constant sequences and $\bar{\alpha}$ the induced action of $H^+$ on $\prod_{p \in H^+}M_{d^\infty} \big / \bigoplus_{p \in H^+} M_{d^\infty}$ given by componentwise application of $\alpha_h$ for $h \in H$. Clearly, $\prod_{p \in H^+}M_{d^\infty} \big / \bigoplus_{p \in H^+} M_{d^\infty}$ is canonically isomorphic to the sequence algebra of $M_{d^\infty}$, $\prod_{n \in \mathbb{N}}M_{d^\infty} \big / \bigoplus_{n \in \mathbb{N}} M_{d^\infty}$. In particular, this isomorphism intertwines $\bar{\alpha}$ and the natural $H^+$-action on the sequence algebra induced by $\alpha$. We therefore need to construct a $\alpha$ - $\bar{\alpha}$-equivariant $*$-homomorphism $\chi\colon \mathcal{B}_S \to \prod_{p \in H^+}M_{d^\infty} \big / \bigoplus_{p \in H^+} M_{d^\infty}$ making the following diagram commute:
\begin{equation}\label{dia:UHF into BD is eq seq split}
\begin{gathered}
\begin{xy}
\xymatrix{
M_{d^\infty} \ar@<-1ex>@{^{(}->}[dr] \ar[rr]^(0.35){\iota}&&\prod\limits_{p \in H^+}M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty}\\
&\mathcal{B}_S \ar@<-1ex>@{-->}_(0.35){\chi}[ur]}
\end{xy}
\end{gathered}
\end{equation}
Recall the inductive system $(M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}),\iota_{p,pq})_{p,q \in H^+}$ from Remark~\ref{rem:BD subalg B_S} whose inductive limit is isomorphic to $\mathcal{B}_S$. The canonical subalgebra $M_p(\mathbb{C}) \subset M_p(\mathbb{C}) \otimes C^*(\mathbb{Z})$ can in this way be considered as a subalgebra of $M_{d^\infty} \subset \mathcal{B}_S$ in a natural way. For each $p \in H^+$, the map $\chi_p\colon M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}) \to M_p(\mathbb{C})$ given by $\sum_{k=1}^n a_k \otimes u \mapsto \sum_{k=1}^n a_k$ is a $*$-homomorphism. Thus, the family $(\chi_p)_{p \in H^+}$ gives rise to a $*$-homomorphism
\[
\begin{array}{c}
\chi'\colon \prod\limits_{p \in H^+} M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}) \to \prod\limits_{p \in H^+} M_{d^\infty}.\end{array}
\]
Clearly, $\chi'\bigl(\bigoplus_{p \in H^+} M_p(\mathbb{C}) \otimes C^*(\mathbb{Z})\bigr) \subset \bigoplus_{p \in H^+} M_{d^\infty}$, so $\chi'$ induces a map
\[
\begin{array}{c} \chi\colon \prod\limits_{p \in H^+} M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}) \bigr/ \bigl(\bigoplus\limits_{p \in H^+} M_p(\mathbb{C}) \otimes C^*(\mathbb{Z})\bigr) \to \prod\limits_{p \in H^+}M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty}. \end{array}
\]
Using the inductive limit description of $\mathcal{B}_S$ from Remark~\ref{rem:BD subalg B_S}, we can think of $\mathcal{B}_S$ as a subalgebra of $\prod_{p \in H^+} M_p(\mathbb{C}) \otimes C^*(\mathbb{Z}) \bigr/ \bigl(\bigoplus_{p \in H^+} M_p(\mathbb{C}) \otimes C^*(\mathbb{Z})\bigr)$. Moreover, because of the concrete realization of $M_{d^\infty}$ as the inductive limit associated with $(M_p(\mathbb{C}),\iota_{p,pq})_{p,q \in H^+}$, we have that $\chi$ restricts to the canonical embedding $\iota$ on $M_{d^\infty}$. Hence, \eqref{dia:UHF into BD is eq seq split} is commutative, when we ignore the question of equivariance, or, in other words, $\iota$ is sequentially split as an ordinary $*$-homomorphism. However, we claim that we also have a commutative diagram
\begin{equation}\label{dia:UHF into BD equivariance}
\begin{gathered}
\begin{xy}
\xymatrix{
\prod\limits_{p \in H^+}M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty} \ar^{\bar{\alpha}_p}[r] & \prod\limits_{p \in H^+}M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty} \\
\mathcal{B}_S \ar[u]^(0.4){\chi} \ar_{\alpha_p}[r] & \mathcal{B}_S \ar[u]_(0.4){\chi}
}
\end{xy}
\end{gathered}
\end{equation}
for each $p \in H^+$. Let us expand this diagram for fixed $p$ and arbitrary $q \in H^+$ to:
\begin{equation}\label{dia:UHF into BD equivariance zoom}
\begin{gathered}
\scalebox{0.8}{\begin{xy}
\xymatrix{
\prod\limits_{p \in H^+}M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty} \ar^{\bar{\alpha}_p}[rrr] &&& \prod\limits_{p \in H^+}M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty} \\
&M_q(\mathbb{C}) \ar@{_{(}->}[ul] \ar^{\alpha_p}[r] & M_{pq}(\mathbb{C}) \ar@{^{(}->}[ur] \\
&M_q(\mathbb{C}) \otimes C^*(\mathbb{Z}) \ar@{^{(}->}[dl] \ar^{\chi_q}[u] \ar_*!/_0.5mm/{\labelstyle \alpha_p}[r] & M_{pq} \otimes C^*(\mathbb{Z}) \ar_{\chi_{pq}}[u] \ar@{_{(}->}[dr]\\
\mathcal{B}_S \ar[uuu]^{\chi} \ar_*!/_0.5mm/{\labelstyle \alpha_p}[rrr] &&& \mathcal{B}_S \ar[uuu]_{\chi}
}
\end{xy}}
\end{gathered}
\end{equation}
It is clear that the four outer chambers are commutative, so we only need to check the centre. For every $0 \leq i,j \leq q-1$, we get
\[\chi_{pq} \circ \alpha_p (e_{i, j}^{(q)} \otimes u) = \chi_{pq}(e_{pi, pj}^{(pq)} \otimes u^p) = e_{pi, pj}^{(pq)} = \alpha_p \circ \chi_q (e_{i, j}^{(q)} \otimes u)\]
and therefore $\chi_{pq} \circ \alpha_p = \alpha_p \circ \chi_q$ on $M_q(\mathbb{C}) \otimes C^*(\mathbb{Z})$. This establishes the claim as we have $M_{d^\infty} = \varinjlim(M_q(\mathbb{C}),q\in H^+)$ and $\mathcal{B}_S = \varinjlim(M_q(\mathbb{C}) \otimes C^*(\mathbb{Z}),q\in H^+)$.
\end{proof}
\begin{cor}\label{cor:UHF into BD seq split cr pr}
The inclusion $M_{d^\infty} \rtimes^e_\alpha H^+ \to \mathcal{B}_S \rtimes^e_\alpha H^+$ is sequentially split. In particular, $M_{d^\infty} \rtimes^e_\alpha H^+$ is a UCT Kirchberg algebra.
\end{cor}
\begin{proof}
By Proposition~\ref{prop:UHF into BD is eq seq split}, we know that $M_{d^\infty} \into \mathcal{B}_S$ is $\alpha$-equivariantly sequentially split. As this inclusion preserves the units, we can use the universal property of the semigroup crossed products $M_{d^\infty} \rtimes^e_\alpha H^+$ and $\mathcal{B}_S \rtimes^e_\alpha H^+$ to obtain a commutative diagram of $*$-homomorphisms
\[
\xymatrix{
M_{d^\infty} \rtimes^e_\alpha H^+ \ar[dr] \ar[rr]^(0.4){\iota \rtimes^e H^+} && \left(\prod\limits_{p \in H^+} M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty} \right)\rtimes^e_{\bar{\alpha}} H^+\\
&\mathcal{B}_S \rtimes^e_\alpha H^+ \ar[ur]
}
\]
Again by the universal property of semigroup crossed products, there is a natural $*$-homomorphism
\[\begin{array}{c}
\psi\colon\left(\prod\limits_{p \in H^+} M_{d^\infty} \big / \bigoplus\limits_{p \in H^+} M_{d^\infty} \right)\rtimes^e_{\bar{\alpha}} H^+ \to \prod\limits_{p \in H^+} M_{d^\infty} \rtimes^e_\alpha H^+ \big /\bigoplus\limits_{p \in H^+} M_{d^\infty} \rtimes^e_\alpha H^+
\end{array}\]
such that $\psi \circ (\iota \rtimes^e H^+)$ coincides with the standard embedding. This shows that the inclusion $M_{d^\infty} \rtimes^e_\alpha H^+ \to \mathcal{B}_S \rtimes^e_\alpha H^+$ is sequentially split. It now follows from \cite{BarSza1}*{Theorem~2.9~(1)+(8)} that $M_{d^\infty} \rtimes^e_\alpha H^+$ is a Kirchberg algebra. Moreover, $M_{d^\infty} \rtimes^e_\alpha H^+$ satisfies the UCT by \cite{BarSza1}*{Theorem~2.10}. We note that this part also follows from standard techniques combined with the central result of \cite{Lac}.
\end{proof}
We will now see that simplicity enables us to identify $M_{d^\infty} \rtimes^e_\alpha H^+$ with the following natural subalgebra of $\mathbb{Q}Q_S$, whose name is justified by the next result.
\begin{defn}\label{def:torsion subalgebra}
The \emph{torsion subalgebra} $\mathbb{C}A_S$ of $\mathbb{Q}Q_S$ is the $C^*$-subalgebra of $\mathbb{Q}Q_S$ generated by $\{u^ms_p : p \in S, 0 \leq m \leq p-1\}$.
\end{defn}
Note that for $S=\{p\}$, the subalgebra $\mathbb{C}A_S$ is canonically isomorphic to $\mathbb{C}O_p$.
\begin{cor}\label{cor:subalgebra for torsion part}\label{cor:torsion subalgebra justification}
The isomorphism $\mathcal{B}_S \rtimes^e_\alpha H^+ \stackrel{\cong}{\longrightarrow} \mathbb{Q}Q_S$ from \eqref{eq:B_S and Q_S as crossed products} restricts to an isomorphism $M_{d^\infty} \rtimes^e_\alpha H^+ \stackrel{\cong}{\longrightarrow} \mathbb{C}A_S$. In particular, the canonical inclusion $\mathbb{C}A_S \into \mathbb{Q}Q_S$ induces a split-injection onto the torsion subgroup of $K_*(\mathbb{Q}Q_S)$.
\end{cor}
\begin{proof}
$\mathbb{C}A_S$ contains the copy of $M_{d^\infty} \subset \mathcal{B}_S$ described in Remark~\ref{rem:BD subalg B_S}. Together with $s_p, p \in S$, which are also contained in $\mathbb{C}A_S$, this defines a covariant representation of $(M_{d^\infty},\alpha)$ inside $\mathbb{C}A_S$. The resulting $*$-homomorphism $M_{d^\infty} \rtimes^e_\alpha H^+ \to \mathbb{C}A_S$ is surjective. By Corollary~\ref{cor:UHF into BD seq split cr pr}, $M_{d^\infty} \rtimes^e_\alpha H^+$ is simple, so this map is an isomorphism. The second claim is due to Corollary~\ref{cor:torsion and free part K-theory}.
\end{proof}
Let us continue with the representation of $\mathbb{C}A_S$ as a boundary quotient. When $\lvert S\rvert=k<\infty$, this will lead us to a $k$-graph model for $\mathbb{C}A_S$ that is closely related to the canonical $k$-graph representation for $\bigotimes_{p \in S}\mathbb{C}O_p$, see Remark~\ref{rem:Lambda_S flip}. Consider the subsemigroup $U:= \{ (m,h) \in \mathbb{N} \rtimes H^+ : 0 \leq m \leq h-1\}$ of $\mathbb{N} \rtimes H^+$. Observe that $U$ is a right LCM semigroup because
\begin{equation}\label{eq:right LCM subsemigroup}
(m,h)U \cap (m',h')U = \bigl((m,h)(\mathbb{N} \rtimes H^+) \cap (m',h')(\mathbb{N} \rtimes H^+)\bigr) \cap U
\end{equation}
for all $(m,h),(m',h') \in U$, and $\mathbb{N} \rtimes H^+$ is right LCM. We note that $U$ can be used to describe $\mathbb{N} \rtimes H^+$ as a Zappa-Sz\'{e}p product $U \bowtie \mathbb{N}$, where action and restriction are given in terms of the generator $1 \in \mathbb{N}$ and $(m,h) \in U$ by
\[1.(m,h) = \begin{cases} (m+1,h) &\text{if } m<h-1, \\ (0,h) &\text{if } m=h-1,\end{cases} \quad \quad 1\rvert_{(m,h)} = \begin{cases} 0 &\text{if } m<h-1, \text{ and}\\ 1 &\text{if } m=h-1.\end{cases}\]
In the case of $H^+=\mathbb{N}^\times$ this has been discussed in detail in \cite{BRRW}*{Subsection~3.2} and the very same arguments apply for the cases we consider here.
\begin{prop}\label{prop:A_S as BQ of U}
$\mathbb{C}A_S$ is canonically isomorphic to the boundary quotient $\mathbb{Q}Q(U)$.
\end{prop}
\begin{proof}
Recall that $\mathbb{Q}Q(U)$ is the quotient of the full semigroup $C^*$-algebra $C^*(U)$ by relation \eqref{eq:BQ}. In particular, it is generated as a $C^*$-algebra by a representation $v$ of $U$ by isometries whose range projections are denoted $v^{\phantom{*}}_{(m,h)}v_{(m,h)}^*= e^{\phantom{*}}_{(m,h)U}$ for $(m,h) \in U$.
For every $h \in H^+$, we get a family of matrix units $(v^{\phantom{*}}_{(m,h)}v_{(n,h)}^*)_{0 \leq m,n \leq h-1}$ because
\[\begin{array}{l}
v_{(n,h)}^*v^{\phantom{*}}_{(m,h)} = v_{(n,h)}^*e^{\phantom{*}}_{(n,h)U \cap (m,h)U}v_{(m,h)} = \delta_{m,n} \quad
\text{and} \quad \sum\limits_{m=0}^{h-1} e_{(m,h)U} = 1
\end{array}\]
as $\{(m,h) : 0 \leq m \leq h-1\}$ is an accurate foundation set for $U$. That is to say that, for each $u \in U$, there is $0 \leq m \leq h-1$ such that $uU \cap (m,h)U \neq \emptyset$, and $(m,h)U \cap (n,h)U = \emptyset$ unless $m=n$, see \cite{bsBQforADS} for further details. Since
\[\begin{array}{lcl}
v^{\phantom{*}}_{(m,h)}v_{(n,h)}^* &=& v^{\phantom{*}}_{(m,h)} \bigl(\sum\limits_{k=0}^{h'-1}e^{\phantom{*}}_{(k,h')U}\bigr)v_{(n,h)}^*\\
&=& \sum\limits_{k=0}^{h'-1}v^{\phantom{*}}_{(m+hk,hh')}v_{(n+hk,hh')}^*
\end{array}\]
for each $h' \in H^+$, we see that $C^*(\{v^{\phantom{*}}_{(m,h)}v_{(n,h)}^* : h \in H^+, 0 \leq m,n \leq h-1\}) \subset \mathbb{Q}Q(U)$ is isomorphic to $M_{d^\infty}$. In fact, we get a covariant representation for $(M_{d^\infty},H^+,\alpha)$ as
\[v^{\phantom{*}}_{(0,p)}v^{\phantom{*}}_{(m,h)}v_{(n,h)}^*v_{(0,p)}^* = v^{\phantom{*}}_{(pm,ph)}v_{(pn,ph)}^*.\]
Thus we get a $*$-homomorphism $\varphi\colon \mathbb{C}A_S \cong M_{d^\infty} \rtimes^e_\alpha H^+ \to \mathbb{Q}Q(U)$ given by $u^ms_h \mapsto v_{(m,h)}$, see Corollary~\ref{cor:subalgebra for torsion part}. The map is surjective, and due to Corollary~\ref{cor:UHF into BD seq split cr pr}, the domain is simple so that $\varphi$ is an isomorphism.
\end{proof}
\begin{rem}\label{rem:A_S as BQ of U}
Conceptually, it seems that there is more to Proposition~\ref{prop:A_S as BQ of U} than the proof entails: There is a commutative diagram
\begin{equation}\label{eq:right LCM inclusion diagram}
\begin{gathered}
\xymatrix{
C^*(U) \ar^(0.4){\iota}@{^{(}->}[r] \ar@{->>}_{\pi_U}[d] & C^*(\mathbb{N} \rtimes H^+) \ar@{->>}^{\pi_{\mathbb{N} \rtimes H^+}}[d] \\
\mathbb{Q}Q(U) \ar@{^{(}->}_(0.4){\varphi^{-1}}[r] & \mathbb{Q}Q(\mathbb{N} \rtimes H^+)
}
\end{gathered}
\end{equation}
with $\iota$ induced by $U \subset \mathbb{N} \rtimes H^+$ and $\varphi$ as in the proof of Proposition~\ref{prop:A_S as BQ of U}. The fact that $\iota$ is an injective $*$-homomorphism follows from \cite{BLS2}*{Proposition~3.6}: $N \rtimes H$ is amenable and hence $C^*(\mathbb{N} \rtimes H^+) \cong C^*_r(\mathbb{N} \rtimes H^+)$, see \cite{BLS1}*{Example~6.3}, and similarly $C^*(U) \cong C^*_r(U)$. Note that the bottom row of \eqref{eq:right LCM inclusion diagram} is given by $\mathbb{C}A_S \into \mathbb{Q}Q_S$, see Proposition~\ref{prop:Q_S as BQ} and Proposition~\ref{prop:A_S as BQ of U}.
\end{rem}
By Corollary~\ref{cor:torsion subalgebra justification} and Proposition~\ref{prop:A_S as BQ of U}, the torsion part of the $K$-theory of the boundary quotient of $\mathbb{N} \rtimes H^+$ arises from the boundary quotient of the distinguished submonoid $U$, which in fact sits inside $\mathbb{Q}Q(\mathbb{N} \rtimes H^+)$ in the natural way.
For the remainder of this section, we will assume that $S$ is finite with cardinality $k$. This restriction is necessary in order to derive a $k$-graph model for $\mathbb{C}A_S$, which we obtain via the boundary quotient representation of $\mathbb{C}A_S$. Note that for $p,q \in \mathbb{N}^\times$ and $(m,n) \in \{0,\dots,p-1\} \times \{0,\dots,q-1\}$, there is a unique pair $(n',m') \in \{0,\dots,q-1\} \times \{0,\dots,p-1\}$ such that $m+pn = n'+qm'$. In other words, the map
\[\theta_{p,q}\colon \{0,\dots,p-1\} \times \{0,\dots,q-1\} \to \{0,\dots,q-1\} \times \{0,\dots,p-1\}\]
with $ (m,n) \mapsto (n',m')$ determined by $n'+qm' = m+pn$ is bijective.
\begin{rem}\label{rem:k-graph from S}
For each $p \in S$, we can consider the $1$-graph given by a single vertex with $p$ loops $(m,p), 0 \leq m \leq p-1$. If we think of the collection of these $1$-graphs as the skeleton of a $k$-graph, i.e.\ the set of all edges of length at most $1$, where the vertices for different $p$ are identified, then the maps $\theta_{p,q}$ satisfy condition~(2.8) in \cite{FS}*{Remark~2.3}, and hence define a row-finite $k$-graph $\Lambda_{S,\theta}$. Indeed, this is obvious for $k=2$. For $k \geq 3$, let $p,q,r \in S$ be pairwise distinct elements and fix $0 \leq m_t \leq t-1$ for $t = p,q,r$. We compute
\[\begin{array}{lclcl}
m_p+p(m_q+qm_r) &=& m_p+p(m^{(1)}_r+rm^{(1)}_q) &=& m^{(2)}_r+r(m^{(1)}_p+pm^{(1)}_q)\\
&=& m^{(2)}_r+r(m^{(2)}_q+qm^{(2)}_p) &=& m^{(3)}_q+q(m^{(3)}_r+rm^{(2)}_p)\\
&=& m^{(3)}_q+q(m^{(3)}_p+pm^{(4)}_r) &=& m^{(4)}_p+p(m^{(4)}_q+qm^{(4)}_r),
\end{array}\]
where $0 \leq m^{(i)}_t \leq t-1$ for $t=p,q,r$ and $i=1,\ldots,4$ are uniquely determined by the $\theta_{s,t}$ for the respective values of $s$ and $t$. The bijection from (2.8) in \cite{FS}*{Remark~2.3} now maps $((m_p,p),(m_q,q),(m_r,r))$ to $((m^{(4)}_p,p),(m^{(4)}_q,q),(m^{(4)}_r,r))$. It is easy to check that $m^{(4)}_t = m_t$ for $t=p,q,r$, which shows that condition~(2.8) in \cite{FS}*{Remark~2.3} is valid. Applying \cite{KP}*{Definition 1.5} to the case of $\Lambda_{S,\theta}$, we see that $C^*(\Lambda_{S,\theta})$ is the universal $C^*$-algebra generated by isometries $(t_{(m,p)})_{p \in S, 0 \leq m \leq p-1}$ subject to the relations:
\[\begin{array}{c} \textnormal{(i)} \ t_{(m,p)}t_{(n,q)} = t_{(n',q)}t_{(m',p)} \text{ if } m+pn = n'+qm' \quad \text{and} \quad\textnormal{(ii)} \ \sum\limits_{m=0}^{p-1} t^{\phantom{*}}_{(m,p)}t_{(m,p)}^* = 1 \end{array}\]
for all $p,q \in S$.
\end{rem}
\begin{cor}\label{cor:tor subalgebra via k-graphs}
$\mathbb{C}A_S$ is isomorphic to $C^*(\Lambda_{S,\theta})$.
\end{cor}
\begin{proof}
We will work with $\mathbb{Q}Q(U)$ in place of $\mathbb{C}A_S$ and invoke Proposition~\ref{prop:A_S as BQ of U}. Condition~(i) guarantees that $(m,p) \mapsto t_{(m,p)}$ yields a representation of $U$ by isometries as $U$ is generated by $(m,p)$ with $p \in S, 0 \leq m \leq p-1$, and $(m,p)(n,q) = (m+pn,pq) = (n',q)(m',p)$. (ii) holds for arbitrary $p \in H^+$ if we write $t_{(m,p)}$ for the product $t_{(m_1,p_1)}\cdots t_{(m_k,p_k)}$ where $(m_1,p_1)\cdots (m_k,p_k) = (m,p) \in U$ with $p_i \in S$. It is then straightforward to verify that we get a $*$-homomorphism $C^*(U) \to C^*(\Lambda_{S,\theta})$.
Now let $F \subset U$ be a foundation set and set $h := \text{lcm}(\{h' : (m',h') \in F \text{ for some } 0 \leq m' \leq h'-1\})$. Then $F_a := \{ (m,h) : 0 \leq m \leq h-1\}$ is a foundation set that refines $F$. Therefore, it suffices to establish \eqref{eq:BQ} for $F_a$ in place of $F$. But as $F_a$ is accurate, \eqref{eq:BQ} takes the form $\sum_{m=0}^{h-1} t^{\phantom{*}}_{(m,h)}t_{(m,h)}^* = 1$, which follows from (ii) as explained in the proof of Proposition~\ref{prop:A_S as BQ of U}. Thus $v_{(m,p)} \mapsto t_{(m,p)}$ defines a surjective $*$-homomorphism $\mathbb{Q}Q(U) \to C^*(\Lambda_{S,\theta})$. By simplicity, see Corollary~\ref{cor:UHF into BD seq split cr pr} and Proposition~\ref{prop:A_S as BQ of U}, this map is also injective.
\end{proof}
\begin{rem}\label{rem:Lambda_S flip}
Similar to $\Lambda_{S,\theta}$, we can also consider the row-finite $k$-graph $\Lambda_{S,\sigma}$ with $\sigma_{p,q}$ being the flip, i.e.\ $\sigma_{p,q}(m,n) := (n,m)$. That is to say, we keep the skeleton of $\Lambda_{S,\theta}$, but replace $\theta$ by $\sigma$. In this case, it is easy to see that $C^*(\Lambda_{S,\sigma}) \cong \bigotimes_{p \in S} \mathbb{C}O_p$.
\end{rem}
With regards to the $K$-theory of $\mathbb{Q}Q_S$, it is interesting to ask whether $C^*(\Lambda_{S,\theta})$ and $C^*(\Lambda_{S,\sigma})$ are isomorphic or not. At least for $\lvert S \rvert \leq 2$, the answer is known to be positive.
\begin{prop} \label{prop:A_S for |S|=2}
Let $p,q \geq 2$ be two relatively prime numbers and $S = \{p,q\}$. Then $\mathbb{C}A_S \cong C^*(\Lambda_{S,\theta})\cong C^*(\Lambda_{S,\sigma}) \cong \mathbb{C}O_p \otimes \mathbb{C}O_q$.
\end{prop}
\begin{proof}
We have seen in Corollary~\ref{cor:tor subalgebra via k-graphs} and Remark~\ref{rem:Lambda_S flip} that the UCT Kirchberg algebras $\mathbb{C}A_S$ and $\mathbb{C}O_p \otimes \mathbb{C}O_q$ are both expressible as $C^*$-algebras associated with row-finite $2$-graphs $\Lambda_{S,\theta}$ and $\Lambda_{S,\sigma}$ sharing the same skeleton. The claim therefore follows from \cite{Evans}*{Corollary~5.3}.
\end{proof}
Concerning a generalization of Proposition~\ref{prop:A_S for |S|=2} to the case of $\lvert S \rvert \geq 3$, we learned from Aidan Sims that the following conjecture for $k$-graphs might be true:
\begin{conj}\label{conj:k-graph}
Suppose $\Lambda$ and $\Lambda'$ are row-finite $k$-graphs without sources such that $C^*(\Lambda)$ and $C^*(\Lambda')$ are unital, purely infinite and simple. If $\Lambda$ and $\Lambda'$ have the same skeleton, then the associated $C^*$-algebras are isomorphic.
\end{conj}
Note that $C^*(\Lambda)$ and $C^*(\Lambda')$ are indeed unital UCT Kirchberg algebras, as separability, nuclearity and the UCT are automatically satisfied, see \cite{KP}*{Theorem~5.5}. We will come back to Conjecture~\ref{conj:k-graph} at the end of the next section.
\section{\texorpdfstring{Towards a classification of $\mathbb{Q}Q_S$}{Towards a classification}}\label{sec:classification}
This final section provides a survey of the progress on the classification of $\mathbb{Q}Q_S$ that we achieve through the preceding sections and a spectral sequence argument for $K_*(\mathbb{C}A_S)$, see Theorem~\ref{thm:main result} and Theorem~\ref{thm:K-theory for A_S}. Recall that $N = \mathbb{Z}\bigl[\{ \frac{1}{p} : p \in S\}\bigr]$ and $g_S$ denotes the greatest common divisor of $\{p-1 : p \in S\}$. We begin by stating our main result.
\begin{thm}\label{thm:main result}
Let $S \subset \mathbb{N}^\times\setminus\{1\}$ be a non-empty family of relatively prime numbers. Then the $K$-theory of $\mathbb{Q}Q_S$ satisfies
\[\begin{array}{c}
K_{i}(\mathbb{Q}Q_{S})\cong \mathbb{Z}^{2^{\lvert S \rvert-1}} \operatornamelus K_i(\mathbb{C}A_S),\quad i=0,1,\end{array}\]
where $K_i(\mathbb{C}A_S)$ is a torsion group. Moreover, the following statements hold:
\begin{enumerate}[(a)]
\item If $g_S=1$, then $K_i(\mathbb{Q}Q_S)$ is free abelian in $2^{\lvert S \rvert-1}$ generators for $i=0,1$, and $[1]=0$.
\item If $\lvert S \rvert=1$, then $(K_{0}(\mathbb{Q}Q_{S}),[1],K_{1}(\mathbb{Q}Q_{S})) \cong (\mathbb{Z} \operatornamelus \mathbb{Z}/g_S\mathbb{Z}, (0,1), \mathbb{Z})$.
\item If $\lvert S \rvert=2$, then $(K_{0}(\mathbb{Q}Q_{S}),[1],K_{1}(\mathbb{Q}Q_{S})) \cong (\mathbb{Z}^2 \operatornamelus \mathbb{Z}/g_S\mathbb{Z}, (0,1), \mathbb{Z}^2 \operatornamelus \mathbb{Z}/g_S\mathbb{Z})$.
\end{enumerate}
\end{thm}
\begin{rem}\label{rem:K-theory of Q_S mod tor}
Note that for $S = \left\lbrace p \right\rbrace$, the torsion subalgebra $\mathbb{C}A_S$ is canonically isomorphic to the Cuntz algebra $\mathbb{C}O_p$. Therefore, Theorem~\ref{thm:main result}~(b) recovers known results by Hirshberg \cite{Hir}*{Example~1, p.~106} and Katsura \cite{KatsuraIV}*{Example~A.6}. Indeed, it is already clear from the presentation for $\mathbb{C}O(E_{p,1})$ described in \cite{KatsuraIV}*{Example~A.6} that it coincides with $\mathbb{Q}Q_S$. Theorem~\ref{thm:main result}~(c) shows an unexpected result for the $K$-groups of $\mathbb{Q}Q_S$ in the case of $S=\{p,q\}$ for two relatively prime numbers $p$ and $q$ with $g_S > 1$: $K_1(\mathbb{Q}Q_S)$ has torsion and is therefore, for instance, not a graph $C^*$-algebra, see \cite{RS}*{Theorem~3.2}. By virtue of (a), Theorem~\ref{thm:main result} also explains why $\mathbb{Q}Q_\mathbb{N}$ and $\mathbb{Q}Q_{2}$ have torsion free $K$-groups. More importantly, it shows that the presence of $2$ in the family $S$ is not the only way to achieve this. Indeed, $S$ can contain at most one even number. If $g_S=1$, then $S$ must contain an even number, and there are many examples, e.g.\ $S$ with $2^m+1,2n \in S$ for some $m,n \geq 1$.
\end{rem}
In view of the Kirchberg-Phillips classification theorem \cites{Kir,Phi}, we get the following immediate consequence of Theorem~\ref{thm:main result}.
\begin{cor}\label{cor:isomorphism classes for |S| at most 2}
Let $S,T \subset \mathbb{N}^\times \setminus\{1\}$ be non-empty families of relatively prime numbers. Then $\mathbb{Q}Q_S \cong \mathbb{Q}Q_T$ implies $\lvert S \rvert = \lvert T \rvert$. Moreover, the following statements hold:
\begin{enumerate}[(a)]
\item If $g_S=1=g_T$, then $\mathbb{Q}Q_S$ is isomorphic to $\mathbb{Q}Q_T$ if and only if $\lvert S \rvert=\lvert T \rvert$.
\item If $\lvert S \rvert \leq 2$, then $\mathbb{Q}Q_S$ is isomorphic to $\mathbb{Q}Q_T$ if and only if $\lvert S \rvert=\lvert T \rvert$ and $g_S=g_T$.
\end{enumerate}
\end{cor}
Observe that the decomposition of $K_*(\mathbb{Q}Q_S)$ claimed in Theorem~\ref{thm:main result} follows from Corollary~\ref{cor:torsion and free part K-theory}, Proposition~\ref{prop:K-theory torsion free part} and Corollary~\ref{cor:subalgebra for torsion part}. To prove our main result, it is therefore enough to establish the following theorem reflecting our present knowledge on the torsion subalgebra $\mathbb{C}A_S$, an object which is certainly of interest in its own right.
\begin{thm}\label{thm:K-theory for A_S}
Let $S \subset \mathbb{N}^\times\setminus\{1\}$ be a non-empty family of relatively prime numbers. Then the following statements hold:
\begin{enumerate}[(a)]
\item If $g_S=1$, then $\mathbb{C}A_S \cong \mathbb{C}O_2 \cong \bigotimes_{p \in S} \mathbb{C}O_p$.
\item If $S = \left\lbrace p \right\rbrace$, then $\mathbb{C}A_S \cong \mathbb{C}O_p$.
\item If $S = \left \lbrace p,q \right\rbrace$ with $p\neq q$, then $\mathbb{C}A_S \cong \mathbb{C}O_p \otimes \mathbb{C}O_q$.
\item For $\lvert S \rvert\geq 3$ and $g_S > 1$, $K_i(\mathbb{C}A_S)$ is a torsion group in which the order of any element divides $g_S^{2^{\lvert S \rvert-2}}$. Moreover, $K_i(\mathbb{C}A_S)$ is finite whenever $S$ is finite.
\end{enumerate}
\end{thm}
Note that in the case of infinite $S$ with $g_S > 1$, part (d) still makes sense within the realm of supernatural numbers. Based on Theorem~\ref{thm:main result} and Theorem~\ref{thm:K-theory for A_S}, we suspect that the general situation is in accordance with Conjecture~\ref{conj:k-graph}:
\begin{conj}\label{conj:K-theory of QQ_S}
For a family $S \subset \mathbb{N}^\times\setminus\{1\}$ of relatively prime numbers with $\lvert S\rvert\geq 2$, $\mathbb{C}A_S$ is isomorphic to $\bigotimes_{p \in S} \mathbb{C}O_p$. Equivalently, $\mathbb{Q}Q_S$ is the unital UCT Kirchberg algebra with
\[
(K_0(\mathbb{Q}Q_S),[1],K_1(\mathbb{Q}Q_S)) = (\mathbb{Z}^{2^{\lvert S \rvert-1}} \operatornamelus (\mathbb{Z}/g_S\mathbb{Z})^{2^{\lvert S \rvert-2}},(0,e_1),\mathbb{Z}^{2^{\lvert S \rvert-1}} \operatornamelus (\mathbb{Z}/g_S\mathbb{Z})^{2^{\lvert S \rvert-2}}),
\]
where $e_1 = (\delta_{1,j})_j \in (\mathbb{Z}/g_S\mathbb{Z})^{2^{\lvert S \rvert-2}}$. In particular, if $S,T \subset \mathbb{N}^\times \setminus\{1\}$ are non-empty sets of relatively prime numbers, then $\mathbb{Q}Q_S$ is isomorphic to $\mathbb{Q}Q_T$ if and only if $\lvert S \rvert=\lvert T \rvert$ and $g_S=g_T$.
\end{conj}
\begin{rem}\label{rem:stable relations}
It follows from Theorem~\ref{thm:main result} and Theorem~\ref{thm:K-theory for A_S}~(d) that the $K$-theory of $\mathbb{Q}Q_S$ is finitely generated if and only if $S$ is finite. Consequently, when $S$ is finite the defining relations of $\mathbb{Q}Q_S$ from Defintion~\ref{def:Q_S} are \emph{stable}, see \cite{enders1}*{Corollary~4.6} and \cite{loring1}*{Chapter~14}.
\end{rem}
For the proof of Theorem~\ref{thm:K-theory for A_S}, we will employ the isomorphism $\mathbb{C}A_S \cong M_{d^\infty} \rtimes_\alpha^e H$ and make use of a spectral sequence by Kasparov constructed in \cite{kasparov}*{6.10}. Let us briefly review the relevant ideas and refer to \cite{barlak15} for a detailed exposition. Given a $C^*$-dynamical system $(B,\beta,\mathbb{Z}^k)$, we can consider its \emph{mapping torus}
\[
\mathcal{M}_\beta(B):=\left\lbrace f\in C(\mathbb{R}^k,B)\ : \ \beta_z (f(x))=f(x+z)\ \text{for all}\ x\in \mathbb{R}^k,\ z\in \mathbb{Z}^k \right\rbrace.
\]
It is well-known that $K_*(\mathcal{M}_\beta(B))$ is isomorphic to $K_{*+k}(B\rtimes_\beta\mathbb{Z}^k)$, see e.g.\ \cite{barlak15}*{Section~1}. The mapping torus admits a finite cofiltration
\begin{equation}
\label{cofiltrationMappingTorus}
\mathcal{M}_\beta(B)=F_k \stackrel{\pi_k}{\onto} F_{k-1} \stackrel{\pi_{k-1}}{\onto} \cdots \stackrel{\pi_1}\onto F_0=A \stackrel{\pi_0}{\onto} F_{-1}=0
\end{equation}
arising from the filtration of $\mathbb{R}^k$ by its skeletons
\[
\emptyset = X_{-1} \subset \mathbb{Z}^k = X_0 \subset X_1 \subset \cdots\subset X_k = \mathbb{R}^k,
\]
where $X_\ell := \{ (x_1,\ldots,x_k) \in \mathbb{R}^k : \lvert\{ 1 \leq i \leq k : x_i \in \mathbb{R}\setminus\mathbb{Z}\}\rvert \leq \ell\}$.
As for filtrations of $C^*$-algebras by closed ideals \cite{schochet}, there is a standard way relying on Massey's technique of exact couples \cites{massey1,massey2} of associating a spectral sequence to a given finite cofiltration of a $C^*$-algebra. In this way, the cofiltration \eqref{cofiltrationMappingTorus} yields a spectral sequence $(E_\ell,d_\ell)_{\ell\geq 1}$ that converges to $K_*(\mathcal{M}_\beta(B))\cong K_{*+k}(B\rtimes_\beta \mathbb{Z}^k)$. Using Savinien-Bellissard's \cite{savinienBellissard} description of the $E_1$-term, we can summarize as follows.
\begin{thm}[cf.\ {\cite{kasparov}*{6.10}}, {\cite{savinienBellissard}*{Theorem~2}} and {\cite{barlak15}*{Corollary~2.5}}]\label{thm:spec seq for cr prod}~\newline
Let $(B,\beta,\mathbb{Z}^k)$ be a $C^*$-dynamical system. There exists a cohomological spectral sequence $(E_\ell,d_\ell)_{\ell\geq 1}$ converging to $K_*(\mathcal{M}_\beta(B))\cong K_{*+k}(B\rtimes_\beta \mathbb{Z}^k)$. The $E_1$-term is given by
\[
\begin{array}{l}
E_1^{p,q}:= K_q(B) \otimes_\mathbb{Z} \Lambda^p(\mathbb{Z}^k),\text{ with}\\
d_1^{p,q}\colon E_1^{p,q}\to E_1^{p+1,q}, \quad x\otimes e\mapsto \sum\limits_{j=1}^k (K_q(\beta_j)-\operatorname{id})(x)\otimes (e_j\wedge e).
\end{array}
\]
Furthermore, the spectral sequence collapses at the $(k+1)$th page, so that $E_\infty = E_{k+1}$.
\end{thm}
By Bott periodicity, we have that $(E_\ell^{p,q+2},d_\ell^{p,q+2})=(E_\ell^{p,q},d_\ell^{p,q})$ for all $p,q \in \mathbb{Z}$. In particular, the $E_\infty$-term reduces to $E_\infty^{p,q}$ with $p\in \mathbb{Z}$ and $q=0,1$.
\begin{rem}
Let us recall the meaning of convergence of the spectral sequence $(E_\ell,d_\ell)_{\ell\geq 1}$. For $q=0,1$, consider the diagram
\[
K_q(\mathcal{M}_\gamma(B))=K_q(F_k) \longrightarrow K_q(F_{k-1}) \longrightarrow \cdots \longrightarrow K_q(F_0) \longrightarrow K_q(F_{-1})=0.
\]
Define $\mathcal{F}_p K_q(\mathcal{M}_\beta(B)):=\operatorname{ker}(K_q(\mathcal{M}_\beta(B))\to K_q(F_p))$ for $p=-1,\ldots,k$, and observe that this gives rise to a filtration of abelian groups
\[
0 \into \mathcal{F}_{k-1} K_q(\mathcal{M}_\beta(B)) \into \cdots \into \mathcal{F}_{-1} K_q(\mathcal{M}_\beta(B)) = K_q(\mathcal{M}_\beta(B)).
\]
One can now show the existence of exact sequences
\begin{equation}\label{eq:exact sequences from filtration}
0 \longrightarrow \mathcal{F}_p K_{p+q}(\mathcal{M}_\beta(B)) \longrightarrow \mathcal{F}_{p-1} K_{p+q}(\mathcal{M}_\beta(B)) \longrightarrow E_{\infty}^{p,q} \longrightarrow 0,
\end{equation}
or in other words, there are isomorphisms
\[
E_{\infty}^{p,q}\cong \mathcal{F}_{p-1} K_{p+q}(\mathcal{M}_\beta(B))/\mathcal{F}_p K_{p+q}(\mathcal{M}_\beta(B)).
\]
Hence, the $E_\infty$-term determines the $K$-theory of $\mathcal{M}_\beta(B)$, and thus of $B\rtimes_\beta \mathbb{Z}^k$, up to group extension problems.
\end{rem}
Let us now turn to the $K$-theory of $M_{d^\infty} \rtimes_\alpha^e H^+$. By Laca's dilation theorem \cite{Lac}, see also Remark~\ref{rem:dilations}, we may and will determine the $K$-theory of the dilated crossed product $M_{d^\infty,\infty} \rtimes_{\alpha_\infty} H$ instead. Fix a natural number $1 \leq k \leq \lvert S \rvert$ and observe that $H_k \cong \mathbb{Z}^k$. Let $\alpha_\infty(k)$ be the $H_k$-action on $M_{d^\infty,\infty}$ induced by the $k$ smallest elements $p_1 < p_2 < \dotsb < p_k$ of $S$. It follows from the proof of Proposition~\ref{prop:UHF cross prod torsion K-th} that $K_0(\alpha_{\infty,p_\ell})$ is given by multiplication with $1/p_\ell$ on $K_0(M_{d^\infty}) \cong N$. It turns out to be more convenient to work with the action $\alpha_\infty^{-1}(k)$ given by the inverses of the $\alpha_\ell$, whose crossed product is canonically isomorphic to $M_{d^\infty,\infty} \rtimes_{\alpha_\infty(k)} H_k$.
Let $(E_\ell,d_\ell)_{\ell\geq 1}$ denote the spectral sequence associated with $\alpha^{-1}_\infty(k)$. As $K_1(M_{d^\infty})=0$, it follows directly from Theorem~\ref{thm:spec seq for cr prod} that $E_1^{p,1}=0$ for all $p \in \mathbb{Z}$. Moreover, according to Theorem~\ref{thm:spec seq for cr prod}, $d_1^{p,0}\colon N\otimes_\mathbb{Z} \Lambda^p(\mathbb{Z}^k)\to N\otimes_\mathbb{Z} \Lambda^p(\mathbb{Z}^k)$, $p \in \mathbb{Z}$, is given by
\[\begin{array}{c}
d_1^{p,0}(x\otimes e)=\sum_{\ell=1}^k (p_\ell -1)x\otimes e_\ell\wedge e=\sum_{\ell=1}^k x\otimes (p_\ell -1)e_\ell\wedge e.
\end{array}\]
In other words, $d_1^{p,0} = \op{id}_N\otimes h^p$ with
\begin{equation}\label{eq:differential formula}
\begin{array}{c}
h^p\colon \Lambda^p(\mathbb{Z}^k)\to \Lambda^{p+1}(\mathbb{Z}^k),\quad h^p(e)=\sum_{\ell=1}^k (p_\ell-1)e_\ell\wedge e.
\end{array}
\end{equation}
To obtain $E_2^{p,0}$, we therefore compute the cohomology of the complex $(\Lambda^p(\mathbb{Z}^k),h^p)_{p\in \mathbb{Z}}$. To do so, we consider $h^p$ as a matrix $A_p\in M_{\binom k {p+1} \times \binom k p}(\mathbb{Z})$, where the identification is taken with respect to the canonical bases of $\Lambda^p(\mathbb{Z}^k)$ and $\Lambda^{p+1}(\mathbb{Z}^k)$ in lexicographical ordering. The computation then mainly reduces to determining the Smith normal form of $A_p$.
\begin{thm}[Smith normal form]\label{thm:Smith normal form}
Let $A$ be a non-zero $m\times n$-matrix over a principal ideal domain $R$. There is an invertible $m\times m$-matrix $S$ and an invertible $n\times n$-matrix $T$ over $R$, so that
\[D:=SAT=\operatorname{diag}(\delta_1,\ldots,\delta_r,0,\ldots,0)\]
for some $r \leq \min(m,n)$ and non-zero $\delta_i\in R$ satisfying $\delta_i|\delta_{i+1}$ for $1 \leq i \leq r-1$. The elements $\delta_i$ are unique up to multiplication with some unit and are called \emph{elementary divisors} of $A$. The diagonal matrix $D$ is called a \emph{Smith normal form} of $A$. The $\delta_i$ can be computed as
\begin{equation}\label{eq:det divisor formula}
\begin{array}{c}
\delta_1 = d_1(A),\quad \delta_i = \frac{d_i(A)}{d_{i-1}(A)},
\end{array}
\end{equation}
where $d_i(A)$, called the \emph{$i$-th determinant divisor}, is the greatest common divisor of all $i\times i$-minors of $A$.
\end{thm}
Of course, $D$ can only be a diagonal matrix if $m=n$. The notation in Theorem~\ref{thm:Smith normal form} is supposed to mean that $D$ is the $m\times n$ matrix over $R$ with the $\min(m,n)\times \min(m,n)$ left upper block matrix being $\operatorname{diag}(\delta_1,\ldots,\delta_r,0,\ldots,0)$ and all other entries being zero.
For each $1\leq k\leq\lvert S\rvert$, set $g_k :=\gcd(\{p_\ell-1 : \ell=1,\cdots,k\})$.
\begin{lem}\label{lem:ker d^p / im d^(p-1)}
The group $\operatorname{ker}(h^p)/\operatorname{im}(h^{p-1})$ is isomorphic to $(\mathbb{Z}/g_k\mathbb{Z})^{\binom{k-1}{p-1}}$ for $1 \leq p \leq k$ and vanishes otherwise.
\end{lem}
\begin{proof}
For $p\in \mathbb{Z}$, let $D_p=S_pA_pT_p$ denote the Smith normal form of $A_p$ with elementary divisors $\delta^{(p)}_1,\ldots,\delta^{(p)}_{r_p}$. As $\Lambda^p(\mathbb{Z}^k) = 0$ unless $0 \leq p\leq k$, $\operatorname{ker}(h^p)/\operatorname{im}(h^{p-1})$ vanishes if $p < 0$ or $p \geq k+1$.
If $p=0$, then $h^0\colon \mathbb{Z} \to \mathbb{Z}^k$ is given by $A_0 = (p_1-1,\ldots,p_k-1)$. Thus we have $r_0=1$ and $\delta^{(0)}_1=g_k$. Moreover, $h^0$ is injective, so $\operatorname{ker}(h^0)/\operatorname{im}(h^{-1})=0$.
Likewise, $p=k$ is simple as $h^{k-1}\colon \mathbb{Z}^k \to \mathbb{Z}$ is given by $A_{k-1} = (p_1-1,\ldots,p_k-1)^t$ and $h^k$ is zero because $\Lambda^{k+1}(\mathbb{Z}^k) = 0$. Therefore, $r_{k-1}=1$ and $\delta^{(k-1)}_1=g_k$, and hence
\[
\operatorname{ker}(h^k)/\operatorname{im}(h^{k-1}) = \mathbb{Z}/g_k\mathbb{Z}=(\mathbb{Z}/g_k\mathbb{Z})^{\binom{k-1}{k-1}}.
\]
As this completes the proof for $p\leq0$ and $p\geq k$, we will assume $1\leq p\leq k-1$ from now on.
We start by showing that for $\ell=1,\cdots,k$, the matrix $A_p$ contains a $\binom{k-1}{p}\times \binom{k-1}p$ diagonal matrix with entries $\pm (p_\ell-1)$ (obtained by deleting suitable rows and columns). This will allow us to conclude that $A_p$ has a $j \times j$-minor equal to $\pm(p_\ell-1)^j$ for each $j=1,\ldots,\binom{k-1}p$. Thus we obtain that $d_j(A_p)$ divides $g_k^j$ for $j=1,\ldots,\binom{k-1}{p}$:
First, keep only those columns of $A_p$ which correspond to basis elements $e_{i_1}\wedge\ldots\wedge e_{i_p} \in \Lambda^p(\mathbb{Z}^k)$ satisfying $\ell \neq i_j$ for all $j=1,\ldots,p$. As this amounts to choosing $p$ elements out of $k-1$ without order and repetition, we are left with $\binom{k-1}p$ columns (out of $\binom k p$). Next, we restrict to those rows which correspond to basis elements $e_{i_1}\wedge\ldots\wedge e_{i_{p+1}} \in \Lambda^{p+1}(\mathbb{Z}^k)$ satisfying $\ell=i_j$ for some (necessarily unique) $j=1,\ldots,p-1$. Here again $\binom{k-1}p$ rows (out of $\binom k {p+1}$) remain. The resulting matrix describes the linear map
\[
\Lambda^p(\mathbb{Z}^k) \supset \mathbb{Z}^{\binom{k-1}p}\to \mathbb{Z}^{\binom{k-1}p} \subset \Lambda^{p+1}(\mathbb{Z}^k),\ e_{i_1}\wedge\ldots\wedge e_{i_p}\mapsto (p_\ell-1)\cdot e_\ell\wedge e_{i_1}\wedge\ldots\wedge e_{i_p},
\]
which is nothing but a diagonal matrix of size $\binom{k-1}p$ with entries $\pm(p_\ell-1)$. As explained above, we thus obtain that $d_j(A_p)|g_k^j$ for $j=1,\ldots,\binom{k-1}{p}$.
We will now show that the converse holds as well, i.e.\ $g_k^j|d_j(A_p)$ for $j=1,\ldots,\binom{k-1}{p}$. Note that every $1\times 1$-minor is either zero or $p_\ell-1$ for some $\ell=1,\ldots,k$. This shows that $d_1(A_p)=g_k$ for $1 \leq p \leq k-1$. Let $1\leq j\leq \binom{k-1}{p}-1$ and assume that $d_j(A_p)=g_k^j$. Let $L$ be any $(j+1)\times (j+1)$-matrix arising from $A_p$ by deleting sums and rows. By the Laplace expansion theorem, the determinant of $L$ is given as a linear combination of some of its $j\times j$-minors. The coefficients in the linear combination all are entries of $L$. The occurring minors are all $j\times j$-minors of $A_p$. Hence, $g_k^j|\operatorname{det}(L)$ by assumption. In fact, we have $g_k^{j+1}|\operatorname{det}(L)$ because all entries in $A_p$ are divisible by $g_k$. Altogether, $d_j(A_p)=g_k^j$ for $j=1,\ldots,\binom{k-1}{p}$ and we have shown that for $p=1,\ldots,k-1$, $r_p\geq \binom{k-1}{p}$ and $\delta^{(p)}_j=g_k$ for $j=1,\ldots,\binom{k-1}p$.
Since $A_p$ and $D_p$ have isomorphic kernel and image, our considerations show that
\[\begin{array}{lcr}
\binom{k-1}p \leq \operatorname{rank}(\operatorname{im}(h^p)) &\quad\text{ and }\quad&
\operatorname{rank}(\operatorname{ker}(h^p)) \leq \binom k p -\binom{k-1}p=\binom{k-1}{p-1}.
\end{array}\]
By $h^{p+1}\circ h^p=0$, we conclude that $\operatorname{rank}(\operatorname{ker}(h^{p+1})) = \operatorname{rank}(\operatorname{im}(h^p)) = \binom{k-1}p$ which implies $r_p=\binom{k-1}p$. Moreover, $h^{p}\circ h^{p-1}=0$ forces $T_p^{-1}S_{p-1}^{-1}(\operatorname{im}(D_{p-1}))\subset\operatorname{ker}(D_p)$ or, equivalently, $\operatorname{im}(D_{p-1})\subset\operatorname{ker}(D_pT_p^{-1}S_{p-1}^{-1})$. Since
\[\operatorname{im}(D_{p-1})=g_k\mathbb{Z}^{\binom{k-1} {p-1}}\operatornamelus\{0\}^{\binom k p - \binom{k-1} {p-1}}\]
has the same rank as $\operatorname{ker}(D_pT_p^{-1}S_{p-1}^{-1})$,
it means that
\[\operatorname{ker}(D_pT_p^{-1}S_{p-1}^{-1})=\mathbb{Z}^{\binom{k-1}{p-1}}\operatornamelus\{0\}^{\binom k p -\binom{k-1}{p-1}}.\]
Moreover, $S_{p-1}$ is an automorphism of $\mathbb{Z}^{\binom k p}$ that restricts both to an isomorphism $\operatorname{ker}(A_p) \stackrel{\cong}{\longrightarrow} \operatorname{ker}(D_pT_p^{-1}S_{p-1}^{-1})$ and to an isomorphism $\operatorname{im}(A_{p-1}) \stackrel{\cong}{\longrightarrow} \operatorname{im}(D_{p-1})$.
Hence,
\[\begin{array}{lclcl}
\operatorname{ker}(h^p) / \operatorname{im}(h^{p-1}) &=& \operatorname{ker}(A_p) / \operatorname{im}(A_{p-1}) & \cong & \operatorname{ker}(D_p T_p^{-1}S_{p-1}^{-1}) / \operatorname{im}(D_{p-1}) \vspace*{2mm}\\
&&&\cong& (\mathbb{Z}/g_k\mathbb{Z})^{\binom{k-1}{p-1}}.
\end{array}\]
\end{proof}
Lemma~\ref{lem:ker d^p / im d^(p-1)} now allows us to compute the $E_2$-term of the spectral sequence associated to $\alpha_{\infty}^{-1}(k)\colon H_k\curvearrowright M_{d^\infty,\infty}$ by appealing to the following simple, but useful observation.
\begin{lem}\label{lem:N/gN}
The group $N/g_SN$ is isomorphic to $\mathbb{Z}/g_S\mathbb{Z}$. Moreover, for every $1\leq k\leq \lvert S\rvert$, the group $N/g_kN$ is isomorphic to a subgroup of $\mathbb{Z}/g_k\mathbb{Z}$.
\end{lem}
\begin{proof}
Recall that $S$ consists of relatively prime numbers, $N=\mathbb{Z}\bigl[\{\frac{1}{p}:p\in S\}\bigr]$ and let us simply write $g$ for $g_S=\gcd(\{p-1:p\in S\})$. The map
\[\begin{array}{c} N/gN \to \mathbb{Z}/g\mathbb{Z}, \quad \frac{1}{r}+gN \mapsto s + g\mathbb{Z}, \end{array}\]
where $r$ is a natural number and $s$ is the unique solution in $\{0,1,\dotsc,g-1\}$ of $rs=1 \pmod{g}$, defines a group homomorphism. To see this, note first that for every $p\in P$ there is a $q\in S$ such that $p|q$, i.e.\ $\gcd{(q-1,p)}=1$. Therefore, $\gcd{(g,p)}=1$ for all $p\in P$. If $\frac{1}{r}\in N$, then all the prime factors of $r$ come from $P$, and it follows that $\gcd{(g,r)}=1$. Thus, the above map is well-defined and extends by addition to the whole domain. Moreover, every $s$ appearing as a solution is relatively prime with $g$, meaning that the kernel is $gN$, i.e.\ the map is injective. Finally, the inverse map is given by $1+g\mathbb{Z}\mapsto 1+gN$.
For the second part, set $g_k'=g_k/\max{(\gcd{(g_k,r)})}$, where the maximum is taken over all natural numbers $r$ such that $\frac{1}{r}\in N$, i.e.\ $g_k'$ is the largest number dividing $g_k$ so that $\gcd{(g_k',r)}=1$ for all such $r$. Then $g_k'N=g_kN$ and a similar proof as above shows that $N/g_k N=N/g_k'N\cong\mathbb{Z}/g_k'\mathbb{Z}$.
\end{proof}
\begin{prop}\label{prop:E for alpha(k)}
For every $1 \leq k \leq \lvert S \rvert$, the respective group $E_2^{p,0}$ is isomorphic to a subgroup of $(\mathbb{Z}/g_k\mathbb{Z})^{\binom{k-1}{p-1}}$ for $1 \leq p \leq k$, and vanishes otherwise. $E_2^{p,1}$ vanishes for $p \in \mathbb{Z}$.
\end{prop}
\begin{proof}
Note that $N$ is torsion free and hence a flat module over $\mathbb{Z}$. Thus, an application of Lemma~\ref{lem:ker d^p / im d^(p-1)} yields
\[\begin{array}{lclclcl}
E_2^{p,0} &\hspace*{-1mm}=\hspace*{-1mm}& \operatorname{ker}(\op{id}_{N}\otimes h^p)/\operatorname{im}(\op{id}_{N}\otimes h^{p-1}) &\hspace*{-1mm}\cong\hspace*{-1mm}& N\otimes_\mathbb{Z} \operatorname{ker}(h^p)/\operatorname{im}(h^{p-1}) \vspace*{2mm}\\
&\hspace*{-1mm}\cong\hspace*{-1mm}& N \otimes_\mathbb{Z} (\mathbb{Z}/g_k\mathbb{Z})^{\binom {k-1}{p-1}} &\hspace*{-1mm}\cong\hspace*{-1mm}& (N \otimes_\mathbb{Z} \mathbb{Z}/g_k\mathbb{Z})^{\binom {k-1}{p-1}} &\hspace*{-1mm}\cong\hspace*{-1mm}& (N/g_kN)^{\binom {k-1}{p-1}}
\end{array}\]
and Lemma~\ref{lem:N/gN} shows that $N/g_kN$ is isomorphic to a subgroup of $\mathbb{Z}/g_k\mathbb{Z}$. The second claim follows from the input data.
\end{proof}
\begin{rem}\label{rem:apps for E2term lemma}
Assume that $g_k=1$ for some $1 \leq k \leq \lvert S \rvert$, $k < \infty$, and let $k \leq \ell \leq \lvert S \rvert$ be a natural number. If $(E_i^{p,q})_{i \geq 1}$ denotes the spectral sequence associated with $\alpha_\infty^{-1}(\ell)$, then Proposition~\ref{prop:E for alpha(k)} yields $E_2^{p,0}=0$ for all $p \in \mathbb{Z}$.
\end{rem}
\begin{proof}[Proof of Theorem~\ref{thm:K-theory for A_S}]
Let $k \geq 1$ be finite with $k \leq \lvert S \rvert$. The main idea is to use the $E_\infty$-term of the spectral sequence associated with $\alpha^{-1}_\infty(k)$ to compute $K_*(M_{d^\infty} \rtimes^e_{\alpha(k)} H^+_k)$, up to certain group by employing convergence of this spectral sequence, see Theorem~\ref{thm:spec seq for cr prod}. Recall the general form \eqref{eq:exact sequences from filtration} of the extension problems involved. Since $\mathcal{F}_k K_q(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) =0$ and hence $\mathcal{F}_{k-1} K_q(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) \cong E_{\infty}^{k,q-k}$, we face $k$ iterative extensions of the form:
\begin{equation}\label{eq:iterative extensions}
\begin{array}{ccccc}
E_{\infty}^{k,q-k} &\hspace*{-2mm}\into\hspace*{-2mm}& \mathcal{F}_{k-2} K_q(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) &\hspace*{-2mm}\onto\hspace*{-2mm}& E_{\infty}^{k-1,q-k+1} \vspace*{2mm}\\
\mathcal{F}_{k-2} K_q(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) &\hspace*{-2mm}\into\hspace*{-2mm}& \mathcal{F}_{k-3} K_q(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) &\hspace*{-2mm}\onto\hspace*{-2mm}& E_{\infty}^{k-2,q-k+2} \vspace*{0mm}\\
\vdots&&\vdots&&\vdots \vspace*{0mm}\\
\mathcal{F}_{0} K_{q}(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) &\hspace*{-2mm}\into\hspace*{-2mm}& \mathcal{F}_{-1} K_{q}(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) &\hspace*{-2mm}\onto\hspace*{-2mm}& E_{\infty}^{0,q}.
\end{array}
\end{equation}
Using
\[\begin{array}{lcl}
\mathcal{F}_{-1} K_{q}(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) &=& K_{q}(\mathcal{M}_{\alpha_\infty}(M_{d^\infty,\infty})) \\
&\cong& K_{k+q}(M_{d^\infty,\infty} \rtimes_{\alpha_\infty(k)} H_k) \cong K_{k+q}(M_{d^\infty} \rtimes^e_{\alpha(k)} H^+_k),
\end{array}\]
we will thus arrive at $K_{k+q}(M_{d^\infty} \rtimes^e_{\alpha(k)} H^+_k)$, see Theorem~\ref{thm:spec seq for cr prod}. Recall that by Bott periodicity, $E_\infty^{p,q+2} \cong E_\infty^{p,q}$ for all $q \in \mathbb{Z}$. In addition, we know from Proposition~\ref{prop:E for alpha(k)} that for $p \in \mathbb{Z}$, the group $E_\infty^{p,1}$ is trivial, and $E_\infty^{p,0}$ vanishes unless $1 \leq p \leq k$, in which case it is a subquotient of $E_2^{p,0}$, and thus a subquotient of $(\mathbb{Z}/g_k\mathbb{Z})^{\binom {k-1}{p-1}}$.
Assume now that $g=1$. Clearly, this holds exactly if $g_k = 1$ for some $k \geq 1$. For such $k \geq 1$, the corresponding $E_\infty$-term is trivial, yielding $K_*(M_{d^\infty} \rtimes^e_{\alpha(k)} H^+_k) = 0$. Using continuity of $K$-theory if necessary, we obtain that $\mathbb{C}A_S \cong M_{d^\infty} \rtimes^e_{\alpha} H^+$ has trivial $K$-theory. It follows from Kirchberg-Phillips classification that $\mathbb{C}A_S \cong \mathbb{C}O_2$. This proves (a).
If $S = \{p\}$, $\mathbb{C}A_S \cong \mathbb{C}O_p$ by the definition of $\mathbb{C}A_S$, and (b) follows.
Claim (c) is nothing but Proposition~\ref{prop:A_S for |S|=2}.
Lastly, let us prove claim (d). Let $k \geq 2$, and denote by $(E_\ell,d_\ell)_{\ell \geq 1}$ the spectral sequence associated with $\alpha^{-1}_\infty(k)$. Recall that only those $E_\infty^{\ell,q-\ell}$ with $1 \leq \ell \leq k$ and $q-\ell \in 2\mathbb{Z}$ may be non-trivial subgroups of $(\mathbb{Z}/g_k\mathbb{Z})^{\binom{k-1}{\ell-1}}$. Keeping track of the indices, we get
\[\begin{array}{c}
\sum\limits_{\substack{1 \leq \ell \leq k:\\ \ell \text{ even}}} \binom {k-1} {\ell-1} =2^{k-2} = \sum\limits_{\substack{1 \leq \ell \leq k:\\ \ell \text{ odd}}} \binom {k-1} {\ell-1}.
\end{array}\]
This allows us to conclude that every element in $K_i(M_{d^\infty} \rtimes^e_{\alpha(k)} H^+_k)$ is a divisor of $g_k^{2^{k-2}}$. This concludes the proof, as $g_S = g_k$ for $k \leq \lvert S\rvert$ sufficiently large.
\end{proof}
\begin{rem}\label{rem:K-theory for M_d^infty}
It is possible to say something about the case $\lvert S \rvert=3$, though the answer is incomplete: Noting that $E_{\infty}^{2,-2} $ is a subgroup of $(\mathbb{Z}/g\mathbb{Z})^2$, $E_{\infty}^{3,-2}$ and $E_{\infty}^{1,0}$ are subgroups of $\mathbb{Z}/g\mathbb{Z}$, and the remaining terms vanish, we know that $K_1(M_{d^\infty} \rtimes^e_\alpha H^+) \cong E_\infty^{2,-2}$ and $K_0(M_{d^\infty} \rtimes^e_\alpha H^+)$ fits into an exact sequence
\[E_\infty^{3,-2} \into K_0(M_{d^\infty} \rtimes^e_\alpha H^+) \onto E_\infty^{1,0}.\]
But we cannot say more without additional information here.
\end{rem}
\begin{rem}
By considering $\mathbb{C}A_S$ as the $k$-graph $C^*$-algebra $C^*(\Lambda_{S,\theta})$ for finite $S$, see Corollary~\ref{cor:tor subalgebra via k-graphs}, one could probably also apply Evans' spectral sequence \cite{Evans}*{Theorem~3.15} to obtain Theorem~\ref{thm:K-theory for A_S} by performing basically the same proof. In fact, Evans' spectral sequence is the homological counterpart of the spectral sequence used here.
\end{rem}
\section*{References}
\begin{biblist}
\bib{barlak15}{article}{
author={Barlak, Sel\c {c}uk},
title={On the spectral sequence associated with the Baum-Connes Conjecture for $\mathbb Z^n$},
note={\href {http://arxiv.org/abs/1504.03298}{arxiv:1504.03298v2}},
}
\bib{BarSza1}{article}{
label={BaSz},
author={Barlak, Sel\c {c}uk},
author={Szab\'{o}, G\'{a}bor},
title={Sequentially split $*$-homomorphisms between $C^*$-algebras},
note={\href {http://arxiv.org/abs/1510.04555}{arxiv:1510.04555v2}},
}
\bib{BD}{article}{
author={Bunce, John W.},
author={Deddens, James A.},
title={A family of simple $C^*$-algebras related to weighted shift operators},
journal={J. Funct. Anal.},
volume={19},
year={1975},
pages={13--24},
}
\bib{BaHLR}{article}{
author={Brownlowe, Nathan},
author={an Huef, Astrid},
author={Laca, Marcelo},
author={Raeburn, Iain},
title={Boundary quotients of the {T}oeplitz algebra of the affine semigroup over the natural numbers},
journal={Ergodic Theory Dynam. Systems},
volume={32},
year={2012},
number={1},
pages={35--62},
issn={0143-3857},
doi={\href {http://dx.doi.org/10.1017/S0143385710000830}{10.1017/S0143385710000830}},
}
\bib{BLS1}{article}{
author={Brownlowe, Nathan},
author={Larsen, Nadia S.},
author={Stammeier, Nicolai},
title={On $C^*$-algebras associated to right LCM semigroups},
journal={Trans. Amer. Math. Soc.},
year={2016},
doi={\href {http://dx.doi.org/10.1090/tran/6638}{10.1090/tran/6638}},
}
\bib{BLS2}{article}{
author={Brownlowe, Nathan},
author={Larsen, Nadia S.},
author={Stammeier, Nicolai},
title={$C^*$-algebras of algebraic dynamical systems and right LCM semigroups},
note={\href {http://arxiv.org/abs/1503.01599}{arxiv:1503.01599v1}},
}
\bib{BRRW}{article}{
author={Brownlowe, Nathan},
author={Ramagge, Jacqui},
author={Robertson, David},
author={Whittaker, Michael F.},
title={Zappa-{S}z\'{e}p products of semigroups and their $C^*$-algebras},
journal={J. Funct. Anal.},
volume={266},
year={2014},
number={6},
pages={3937--3967},
issn={0022-1236},
doi={\href {http://dx.doi.org/10.1016/j.jfa.2013.12.025}{10.1016/j.jfa.2013.12.025}},
}
\bib{bsBQforADS}{article}{
label={BrSt},
author={Brownlowe, Nathan},
author={Stammeier, Nicolai},
title={The boundary quotient for algebraic dynamical systems},
journal={J. Math. Anal. Appl.},
volume={438},
year={2016},
number={2},
pages={772--789},
doi={\href {http://dx.doi.org/10.1016/j.jmaa.2016.02.015}{10.1016/j.jmaa.2016.02.015}},
}
\bib{Com}{article}{
author={Combes, Fran\c {c}ois},
title={Crossed products and {M}orita equivalence},
journal={Proc. Lond. Math. Soc. (3)},
volume={49},
year={1984},
number={2},
pages={289--306},
issn={0024-6115},
doi={\href {http://dx.doi.org/10.1112/plms/s3-49.2.289}{10.1112/plms/s3-49.2.289}},
}
\bib{CrispLaca}{article}{
author={Crisp, John},
author={Laca, Marcelo},
title={Boundary quotients and ideals of {T}oeplitz {$C^*$}-algebras of {A}rtin groups},
journal={J. Funct. Anal.},
volume={242},
year={2007},
number={1},
pages={127--156},
issn={0022-1236},
doi={\href {http://dx.doi.org/10.1016/j.jfa.2006.08.001}{10.1016/j.jfa.2006.08.001}},
}
\bib{CunPV}{article}{
author={Cuntz, Joachim},
title={A class of $C^*$-algebras and topological Markov chains II. Reducible chains and the Ext-functor for $C^*$-algebras},
journal={Invent. Math.},
year={1981},
pages={25--40},
volume={63},
doi={\href {http://dx.doi.org/10.1007/BF01389192}{10.1007/BF01389192}},
}
\bib{CuntzQ}{article}{
author={Cuntz, Joachim},
title={$C^*$-algebras associated with the $ax+b$-semigroup over $\mathbb {N}$},
conference={ title={$K$-theory and noncommutative geometry}, },
book={ series={EMS Ser. Congr. Rep.}, publisher={Eur. Math. Soc., Z\"urich}, },
date={2008},
pages={201--215},
doi={\href {http://dx.doi.org/10.4171/060-1/8}{10.4171/060-1/8}},
}
\bib{CLintegral2}{article}{
author={Cuntz, Joachim},
author={Li, Xin},
title={{$C^*$}-algebras associated with integral domains and crossed products by actions on adele spaces},
journal={J. Noncommut. Geom.},
volume={5},
year={2011},
number={1},
pages={1--37},
issn={1661-6952},
doi={\href {http://dx.doi.org/10.4171/JNCG/68}{10.4171/JNCG/68}},
}
\bib{CuntzVershik}{article}{
author={Cuntz, Joachim},
author={Vershik, Anatoly},
title={{$C^*$}-algebras associated with endomorphisms and polymorphisms of compact abelian groups},
year={2013},
issn={0010-3616},
journal={Comm. Math. Phys.},
volume={321},
number={1},
doi={\href {http://dx.doi.org/10.1007/s00220-012-1647-0}{10.1007/s00220-012-1647-0}},
publisher={Springer-Verlag},
pages={157-179},
}
\bib{enders1}{article}{
author={Enders, Dominic},
title={Semiprojectivity for Kirchberg algebras},
note={\href {http://arxiv.org/abs/1507.06091}{arxiv:1507.06091v1}},
}
\bib{Evans}{article}{
author={Evans, D. Gwion},
title={On the {$K$}-theory of higher rank graph {$C\sp *$}-algebras},
journal={New York J. Math.},
volume={14},
year={2008},
pages={1--31},
issn={1076-9803},
note={\href {http://nyjm.albany.edu:8000/j/2008/14_1.html}{nyjm:8000/j/2008/14\textunderscore 1}},
}
\bib{FS}{article}{
author={Fowler, Neal J.},
author={Sims, Aidan},
title={Product systems over right-angled {A}rtin semigroups},
journal={Trans. Amer. Math. Soc.},
volume={354},
year={2002},
number={4},
pages={1487--1509},
issn={0002-9947},
doi={\href {http://dx.doi.org/10.1090/S0002-9947-01-02911-7}{10.1090/S0002-9947-01-02911-7}},
}
\bib{Gl}{article}{
author={Glimm, James G.},
title={On a certain class of operator algebras},
journal={Trans. Amer. Math. Soc.},
volume={95},
year={1960},
pages={318--340},
}
\bib{hr}{book}{
author={Hewitt, Edwin},
author={Ross, Kenneth A.},
title={Abstract harmonic analysis. Vol.~I},
series={Grundlehren Math. Wiss.},
volume={115},
edition={2},
note={Structure of topological groups, integration theory, group representations},
publisher={Springer-Verlag, Berlin-New York},
date={1979},
pages={ix+519},
isbn={3-540-09434-2},
}
\bib{Hir}{article}{
author={Hirshberg, Ilan},
title={On $C^*$-algebras associated to certain endomorphisms of discrete groups},
journal={New York J. Math.},
volume={8},
year={2002},
pages={99--109},
issn={1076-9803},
note={\href {http://nyjm.albany.edu:8000/j/2002/8_99.html}{nyjm.albany.edu:8000/j/2002/8\textunderscore 99}},
}
\bib{KOQ}{article}{
author={Kaliszewski, Steve},
author={Omland, Tron},
author={Quigg, John},
title={Cuntz-Li algebras from $a$-adic numbers},
journal={Rev. Roumaine Math. Pures Appl.},
volume={59},
date={2014},
number={3},
pages={331--370},
issn={0035-3965},
}
\bib{kasparov}{article}{
author={Kasparov, Gennadi},
title={Equivariant $KK$-theory and the Novikov conjecture},
journal={Invent. Math.},
volume={91},
date={1988},
number={1},
pages={147--201},
issn={0020-9910},
doi={\href {http://dx.doi.org/10.1007/BF01404917}{10.1007/BF01404917}},
}
\bib{KatsuraIV}{article}{
author={Katsura, Takeshi},
title={A class of {$C^*$}-algebras generalizing both graph algebras and homeomorphism {$C^*$}-algebras. {IV}. {P}ure infiniteness},
journal={J. Funct. Anal.},
volume={254},
year={2008},
number={5},
pages={1161--1187},
issn={0022-1236},
doi={\href {http://dx.doi.org/10.1016/j.jfa.2007.11.014}{10.1016/j.jfa.2007.11.014}},
}
\bib{Kir}{article}{
author={Kirchberg, Eberhard},
title={The classification of purely infinite C*-algebras using Kasparov's theory},
journal={to appear in Fields Inst. Commun, Amer. Math. Soc., Providence, RI},
}
\bib{KP}{article}{
author={Kumjian, Alex},
author={Pask, David},
title={Higher rank graph {$C^*$}-algebras},
journal={New York J. Math.},
volume={6},
year={2000},
pages={1--20},
issn={1076-9803},
note={\href {http://nyjm.albany.edu:8000/j/2000/6_1.html}{nyjm:8000/j/2000/6\textunderscore 1}},
}
\bib{Lac}{article}{
author={Laca, Marcelo},
title={From endomorphisms to automorphisms and back: dilations and full corners},
journal={J. Lond. Math. Soc. (2)},
volume={61},
year={2000},
number={3},
pages={893--904},
doi={\href {http://dx.doi.org/10.1112/S0024610799008492}{10.1112/S0024610799008492}},
}
\bib{LacRae}{article}{
author={Laca, Marcelo},
author={Raeburn, Iain},
title={Semigroup crossed products and the {T}oeplitz algebras of nonabelian groups},
journal={J. Funct. Anal.},
volume={139},
year={1996},
number={2},
pages={415--440},
issn={0022-1236},
doi={\href {http://dx.doi.org/10.1006/jfan.1996.0091}{10.1006/jfan.1996.0091}},
}
\bib{LarsenLi}{article}{
author={Larsen, Nadia S.},
author={Li, Xin},
title={The $2$-adic ring {$C^*$}-algebra of the integers and its representations},
journal={J. Funct. Anal.},
volume={262},
year={2012},
number={4},
pages={1392--1426},
doi={\href {http://dx.doi.org/10.1016/j.jfa.2011.11.008}{10.1016/j.jfa.2011.11.008}},
}
\bib{Li1}{article}{
author={Li, Xin},
title={Semigroup {$C^*$}-algebras and amenability of semigroups},
journal={J. Funct. Anal.},
volume={262},
year={2012},
number={10},
pages={4302--4340},
issn={0022-1236},
doi={\href {http://dx.doi.org/10.1016/j.jfa.2012.02.020}{10.1016/j.jfa.2012.02.020}},
}
\bib{LN2}{article}{
author={Li, Xin},
author={Norling, Magnus D.},
title={Independent resolutions for totally disconnected dynamical systems {II}: {$C^*$}-algebraic case},
journal={J. Operator Theory},
volume={75},
year={2016},
number={1},
pages={163--193},
note={\href {http://www.theta.ro/jot/archive/2016-075-001/2016-075-001-009.pdf}{2016-075-001/2016-075-001-009}},
}
\bib{loring1}{book}{
author={Loring, Terry A.},
title={Lifting solutions to perturbing problems in $C^*$-algebras},
series={Fields Inst. Monogr.},
volume={8},
publisher={Amer. Math. Soc., Providence, RI},
date={1997},
pages={x+165},
isbn={0-8218-0602-5},
}
\bib{massey1}{article}{
author={Massey, William S.},
title={Exact couples in algebraic topology.~I, II},
journal={Ann. of Math. (2)},
volume={56},
date={1952},
pages={363--396},
issn={0003-486X},
}
\bib{massey2}{article}{
author={Massey, William S.},
title={Exact couples in algebraic topology.~III, IV, V},
journal={Ann. of Math. (2)},
volume={57},
date={1953},
pages={248--286},
issn={0003-486X},
}
\bib{Nic}{article}{
author={Nica, Alexandru},
title={$C^*$-algebras generated by isometries and {W}iener-{H}opf operators},
journal={J. Operator Theory},
volume={27},
year={1992},
number={1},
pages={17--52},
issn={0379-4024},
}
\bib{Oml}{article}{
author={Omland, Tron},
title={$C^*$-algebras associated with $a$-adic numbers},
conference={ title={Operator algebra and dynamics}, },
book={ series={Springer Proc. Math. Stat.}, volume={58}, publisher={Springer, Heidelberg}, },
date={2013},
pages={223--238},
doi={\href {http://dx.doi.org/10.1007/978-3-642-39459-1_11}{10.1007/978-3-642-39459-1\textunderscore 11}},
}
\bib{Pas}{article}{
author={Paschke, William L.},
title={$K$-theory for actions of the circle group on $C^*$-algebras},
journal={J. Operator Theory},
volume={6},
number={1},
year={1981},
pages={125--133},
}
\bib{Phi}{article}{
author={Phillips, N. Christopher},
title={A classification theorem for nuclear purely infinite simple $C^*$-algebras},
journal={Doc. Math.},
volume={5},
year={2000},
pages={49--114},
issn={1431-0635},
}
\bib{PV}{article}{
author={Pimsner, Mihai V.},
author={Voiculescu, Dan-Virgil},
title={Exact sequences for $K$-groups and Ext-groups of certain cross-product $C^*$-algebras},
journal={J. Operator Theory},
volume={4},
number={1},
year={1980},
pages={93--118},
}
\bib{RS}{article}{
author={Raeburn, Iain},
author={Szyma{\'n}ski, Wojciech},
title={Cuntz-Krieger algebras of infinite graphs and matrices},
journal={Trans. Amer. Math. Soc.},
volume={356},
number={1},
pages={39--59},
year={2004},
doi={\href {http://dx.doi.org/10.1090/S0002-9947-03-03341-5}{10.1090/S0002-9947-03-03341-5}},
}
\bib{rordam-zd}{article}{
author={R{\o }rdam, Mikael},
title={Classification of nuclear, simple $C^*$-algebras},
conference={ title={Classification of nuclear $C^*$-algebras. Entropy in operator algebras} },
book={ series={Encyclopaedia Math. Sci.}, volume={126}, publisher={Springer, Berlin}, },
date={2002},
pages={1--145},
doi={\href {http://dx.doi.org/10.1007/978-3-662-04825-2_1}{10.1007/978-3-662-04825-2\textunderscore 1}},
}
\bib{savinienBellissard}{article}{
author={Savinien, Jean},
author={Bellissard, Jean},
title={A spectral sequence for the $K$-theory of tiling spaces},
journal={Ergodic Theory Dynam. Systems},
volume={29},
date={2009},
number={3},
pages={997--1031},
issn={0143-3857},
doi={\href {http://dx.doi.org/10.1017/S0143385708000539}{10.1017/S0143385708000539}},
}
\bib{schochet}{article}{
author={Schochet, Claude L.},
title={Topological methods for {$C^*$}-algebras.~I. Spectral sequences},
year={1981},
journal={Pacific J. Math.},
volume={96},
number={1},
pages={193--211},
note={\href {http://projecteuclid.org/euclid.pjm/1102734956}{euclid.pjm/1102734956}},
}
\bib{Sta1}{article}{
author={Stammeier, Nicolai},
title={On {$C^*$}-algebras of irreversible algebraic dynamical systems},
journal={J. Funct. Anal.},
volume={269},
year={2015},
number={4},
pages={1136--1179},
doi={\href {http://dx.doi.org/10.1016/j.jfa.2015.02.005}{10.1016/j.jfa.2015.02.005}},
}
\bib{Sta3}{article}{
author={Stammeier, Nicolai},
title={A boundary quotient diagram for right LCM semigroups},
note={\href {http://arxiv.org/abs/1604.03172}{arxiv:1604.03172}},
}
\bib{Wil}{book}{
author={Williams, Dana P.},
title={Crossed products of $C^*$-algebras},
series={Math. Surveys Monogr.},
volume={134},
publisher={Amer. Math. Soc., Providence, RI},
date={2007},
pages={xvi+528},
isbn={978-0-8218-4242-3},
isbn={0-8218-4242-0},
doi={\href {http://dx.doi.org/10.1090/surv/134}{10.1090/surv/134}},
}
\bib{Z}{article}{
author={Zhang, Shuang},
title={Certain $C^*$-algebras with real rank zero and their corona and multiplier algebras.~I},
journal={Pacific J. Math.},
volume={155},
number={1},
year={1992},
pages={169--197},
note={\href {http://projecteuclid.org/euclid.pjm/1102635475}{euclid.pjm/1102635475}},
}
\end{biblist}
\end{document} |
\begin{document}
\title{Atom-Resonant Heralded Single Photons by ``Interaction-Free Measurement''}
\author{Florian Wolfgramm, Yannick A. de Icaza Astiz, Federica A. Beduini, Alessandro Cer\`{e}, and Morgan W. Mitchell}
\affiliation{ICFO - Institut de Ciencies Fotoniques, Mediterranean
Technology Park, 08860 Castelldefels (Barcelona), Spain}
\date{1 February 2010}
\begin{abstract}
We demonstrate the generation of rubidium-resonant heralded single
photons for quantum memories. Photon pairs are created by
cavity-enhanced down-conversion and narrowed in bandwidth to 7~MHz
with a novel atom-based filter operating by ``interaction-free
measurement'' principles. At least 94\% of the heralded photons
are atom-resonant as demonstrated by a direct absorption
measurement with rubidium vapor. A heralded auto-correlation
measurement shows $g_c^{(2)}(0)=0.040 \pm 0.012$, i.e.,
suppression of multi-photon contributions by a factor of 25
relative to a coherent state. The generated heralded photons can
readily be used in quantum memories and quantum networks.
\end{abstract}
\pacs{42.50.Dv, 42.50.Ar, 42.65.Lm, 42.65.Yj}
\maketitle
\PRLsection{Introduction.} The availability of single photons is a
crucial requirement in quantum information, quantum communication
and quantum metrology. For quantum networks, the photons
(\emph{flying qubits}) should be resonant with atoms
(\emph{stationary qubits}) for storage and/or processing. For this
reason, it has been an important goal of quantum optics to produce
high-purity single photons capable of interaction with atoms.
While there exist a number of different single-photon sources,
most of these do not fulfill all necessary requirements
\footnote{A review of narrow-band generation methods and their
limitations is provided in Reference
\cite{Neergaard-Nielsen2007}}. The most widely used heralded
single-photon source, spontaneous parametric down-conversion
(SPDC) \cite{Grangier1986,Fasel2004}, produces photons with a
spectral width orders of magnitude larger than typical atomic
natural linewidths. Passive filtering of SPDC photons is possible
and has been demonstrated \cite{Piro2010}, but shows low count
rates that are not sufficient for many tasks. Cavity-enhancement
of the down-conversion process has established itself in recent
years as a method to not only enhance the total photon rate, but
at the same time to enhance the emission into the spectral and
spatial modes of the cavity, producing high-purity photon states
at high rates
\cite{Ou1999,Kuklewicz2006,Neergaard-Nielsen2006,Neergaard-Nielsen2007,
Scholz2007,Wolfgramm2008,Bao2008,Scholz2009}.
\\
Bocquillon et al. \cite{Bocquillon2009} identify two critical
figures of merit for heralded single-photon sources. The first,
$g_{S,I}^{(2)}(\tau)$, describes the cross-correlation of signal
and idler beams, a measure of reliability of the heralding
mechanism. The second, $g_c^{(2)}(\tau)$, describes the
conditional auto-correlation of the signal beam, a measure of the
single-photon character of the heralded state. $g_c^{(2)}(0)<1$
indicates non-classical behavior; $g_c^{(2)}(0)=0$ for an ideal
source.
\\
Experiments using a cavity-enhanced, but unfiltered, source
\cite{Scholz2009} have demonstrated $g^{(2)}(0)<1$, but work in a
regime where many longitudinal frequency modes, spread over tens
of GHz, contribute to the signal. A cavity-enhanced source with
optical-cavity filtering of the heralding (idler) beam and
homodyne detection of the signal produced highly non-classical
states: 70\% of the heralded pulses contained a single photon in
the mode to which the detection was sensitive
\cite{Neergaard-Nielsen2006,Neergaard-Nielsen2007}. Undetected
modes, however, contained photons spread over a large bandwidth. A
recent experiment reports nearly 10\% efficient atom-storage of
beams from filtered cavity-enhanced SPDC, implying at least 10\%
atom-resonance, but made no measurement of $g_c^{(2)}(0)$
\cite{Jin2010}. To date, no SPDC single-photon source has
demonstrated atom-resonance of more than a small fraction of its
output.
\\
Here we demonstrate the generation of atom-resonant heralded
single photons with high spectral purity: within the detection
window of 400-1000~nm (450~THz), at least $94\%$ of the photons
are in a single, 7~MHz-bandwidth mode at the D$_1$ line of
$^{87}$Rb. Multi-photon contamination is at most $4\%$. We achieve
this using an atom-based filter, inspired by the
``interaction-free measurement'' (IFM) strategy of Elitzur and
Vaidman \cite{Elitzur1993} (also known as ``quantum
interrogation'' \cite{Kwiat1999}). The IFM proposal is based on a
balanced Mach-Zehnder interferometer in which due to destructive
interference one of the two output ports is dark. The presence of
an opaque object in either interferometer arm changes the
interference and thus increases the probability of a photon
exiting through the dark port. In our filtering scheme the object
is a hot atomic vapor which is opaque at the transition
frequencies. This guarantees the frequency of photons exiting
through the dark port to be at an atomic transition.
\\
IFM experiments have been proposed and demonstrated in different
systems \cite{Paraoanu2006} and for a variety of applications
including imaging \cite{White1998} and quantum computing
\cite{Hosten2006,Mitchison2007,Vaidman2007}.
\\
Intrinsic stability and intrinsic atom-resonance make our system
robust and attractive for quantum networking applications. Our IFM
filtering technique could be used also with solid-state ensembles
\cite{Riedmatten2008,Hedges2010}.
\\
\PRLsection{Experimental setup.} The experiment combines a
cavity-enhanced down-conversion source locked to a rubidium
transition, described in detail in \cite{Wolfgramm2008,
Wolfgramm2010} and an intrinsically atom-resonant narrow-band
filter, described in \cite{Cere2009}. The setup is shown
schematically in Fig.~\ref{img:Setup}.
\\
A single-frequency diode laser is locked to the
5$^{2}$S$_{1/2}$(F=2)$\rightarrow$5$^{2}$P$_{1/2}$(F'=1)
transition of the D$_1$ line of $^{87}$Rb. Part of the laser
output is frequency doubled and pumps the cavity-enhanced
down-conversion system at a typical pump power of 25~mW. Type-II
phase-matched down conversion takes place in a 2~cm-long
phase-matched periodically poled potassium titanyl phosphate
(PPKTP) crystal. Another KTP crystal (neither pumped nor
phase-matched) inside the cavity is temperature tuned to achieve
simultaneous resonance of signal and idler modes. The type-II
process generates photon pairs with mutually perpendicular
polarizations. This allows for straightforward separation of
signal and idler photon and also for easy generation of
polarization entanglement. A locking beam from the same diode
laser, and therefore also at the same rubidium transition
frequency, is used to stabilize the cavity length. In this way, we
guarantee the presence of frequency-degenerate cavity modes at the
atomic transition frequency. After leaving the cavity, the
generated photon pairs are coupled into a single-mode fiber.
\begin{figure}
\caption{Experimental setup. SHG, second harmonic generation
cavity; PPKTP, phase-matched nonlinear crystal; KTP, compensating
crystal; YVO4, Yttrium Vanadate crystal; HWP, half wave plate;
APD, avalanche photo-diode. (a)-(c), different measurement
scenarios for the signal photon detection. \label{img:Setup}
\label{img:Setup}
\end{figure}
The pumped, nonlinear cavity acts as a sub-threshold optical
parametric oscillator and generates resonant pairs of modes. With
the cavity locked and doubly-resonant for signal and idler modes,
the output spectrum is determined by the 148~GHz phase-matching
bandwidth of the down-conversion process. Within this envelope the
spectrum consists of hundreds of non-degenerate frequency modes
spaced by the free spectral range of 490~MHz, centered around the
degenerate mode at the rubidium transition frequency.
To achieve filtering that guarantees a high ratio between
degenerate and non-degenerate modes, e.g., a signal-to-noise ratio
of 90\%, requires an extinction ratio of several thousand, over a
bandwidth of hundreds of GHz. In principle this filtering can be
achieved with optical cavities. Consecutive cavities with
incommensurate free spectral ranges have been used in other
experiments
\cite{Neergaard-Nielsen2006,Neergaard-Nielsen2007,Piro2010}, but
do not appear to reach high rejection ratios. Neergaard-Nielsen et
al. report a 20\% discrepancy in effective signal detection
efficiency, and give ``insufficient suppression of uncorrelated
frequency modes in the series of trigger filters'' as a likely
explanation \cite{Neergaard-Nielsen2007}. A small misalignment or
aberration would be sufficient to couple into higher modes and
spoil the extinction ratio.
In contrast, our filter operates by principles of
``interaction-free measurement'' \cite{Elitzur1993,Kwiat1995a} and
combines extremely broadband optics (birefringent polarizers) with
extremely narrow-band optics (atoms) with a large angular
acceptance, thus practically insensitive to mode misalignment.
As shown in Fig.~\ref{img:Setup}, a YVO$_4$ crystal separates
horizontally and vertically polarized photons by 1 mm. The
polarization modes travel parallel to each other through a hot
rubidium cell of isotopically pure $^{87}$Rb, optically pumped by
a single-frequency laser resonant to the F=2$\rightarrow$F'=3
transition of the D$_2$ line of $^{87}$Rb (not shown). Due to
Doppler shifts, the optical pumping only effects a portion of the
thermal velocity distribution, and creates a circular dichroism
with a sub-Doppler linewidth of about 80~MHz. A second YVO$_4$
crystal introduces a second relative displacement, which can
re-combine or further separate the photons, depending on
polarization. Separated photons are collected, while re-combined
photons are blocked. A half wave plate is used to switch between
the ``active'' configuration, in which only photons that change
polarization in the cell are collected, and the ``inactive''
configuration, in which photons that do not change are collected.
In the ``active'' configuration, the system acts as an IFM
detector for polarized atoms: a photon is collected only if it
experiences a polarization change, i.e., if it is resonant with
the optically pumped atoms, which absorb one circular component of
the photon polarization state. Neighboring modes of the degenerate
mode at the rubidium transition are already 490~MHz detuned and
therefore outside of the filter linewidth of 80~MHz. The
out-of-band extinction ratio is $\geq$35~dB. The filter
transmission is optimized by adjusting the overlap between pump
and single-photon mode, the rubidium vapor temperature and the
magnitude of a small orienting applied magnetic field. The
temperature is set to 65$^{\circ}$C, which corresponds to an
atomic density of $5\cdot 10^{11}$ cm$^{-3}$. The measured filter
transmission of 10.0\% for horizontal polarization and 9.5\% for
vertical polarization is limited by pump power and in principle
can reach 25\% \cite{Cere2009}. To avoid contamination of the
single-photon mode by scattered pump light, the pump enters the
vapor cell at a small angle and counter-propagating to the
single-photon mode. Interference filters centered on 795~nm
further reject the 780~nm pump light with an extinction ratio of
$>$$10^{5}$. The measured contribution from pump photons is below
the detectors' dark count rate. Each output is coupled into
single-mode fiber. One is detected directly on a fiber-coupled
avalanche photo diode (APD, Perkin Elmer SPCM-AQ4C). The other is
used for subsequent experiments. Photon detections are recorded by
a counting board (FAST ComTec P7888) for later analysis.
\\
\PRLsection{Time-correlation measurements.} First, the time
distribution of the difference in arrival time between signal and
idler photons is analyzed in absence of the filter
(Fig.~\ref{img:Setup}(a)). We follow the theory developed in
\cite{Fasel2004,Herzog2008,Scholz2009,Bocquillon2009}. The
cross-correlation function between signal and idler modes is
\begin{equation}
g_{S,I}^{(2)}(\tau) \equiv \frac{\langle E_S^{\dag}(t+\tau)
E_I^{\dag}(t) E_I(t) E_S(t+\tau) \rangle}{\langle E_I^{\dag}(t)
E_I(t) \rangle
\langle E_S^{\dag}(t+\tau) E_S(t+\tau) \rangle}, \\
\end{equation}
where $E_{S,I}$ are the operators of the signal and idler fields.
In the case of doubly-resonant cavity-enhanced down-conversion it
takes this form:
\begin{equation}
\begin{split}
g_{S,I}^{(2)}(\tau) \propto {} & \Bigg| \sum_{m_S, m_I = 0}^\infty \frac{\sqrt{\gamma_S \, \gamma_I \, \omega_S \, \omega_I}}{\Gamma_S + \Gamma_I} \\
& \times
\begin{cases}
e^{-2 \pi \Gamma_S (\tau-(\tau_0/2))}{\rm sinc}{(i \pi \tau_0 \Gamma_S)} \hspace{3 mm} \hspace{0.5 mm} \tau \geqslant \frac{\tau_0}{2}\\
e^{+2 \pi \Gamma_I (\tau-(\tau_0/2))}{\rm sinc}{(i \pi \tau_0 \Gamma_I)} \hspace{4 mm} \hspace{0.5 mm} \tau < \frac{\tau_0}{2}
\end{cases} \hspace{-4 mm} \Bigg|^2,
\end{split}
\label{eq:cross}
\end{equation}
where $\gamma_{S,I}$ are the cavity damping rates for signal ($S$)
and idler ($I$), $\omega_{S,I}$ are the central frequencies,
$\tau_0$ is difference between the transit times of a signal and
idler photon through the SPDC crystal, $\Gamma_{S,I} \equiv
\gamma_{S,I}/2+i m_{S,I} \Delta \omega_{S,I}$ with mode indices
$m_{S,I}$ and free spectral ranges $\Delta \omega_{S,I}$
\cite{Scholz2009,Herzog2008}. Due to compensation, $\Delta\omega_S
= \Delta\omega_I \equiv \Delta\omega$ in our cavity.
We first measure the $g_{S,I}^{(2)}(\tau)$-function with the
filter in the ``inactive'' configuration at a much reduced pump
power. The histogram of the difference in arrival time between
detection events in the two APDs is shown in
Fig.~\ref{img:Unfiltered}.
\begin{figure}
\caption{Arrival time histogram of unfiltered photon pairs;
experimental data (upper bars) and theory (lower bars). The
frequency-comb structure is reflected by a comb-like structure in
the temporal domain. The visibility of the experimental data is
limited by time resolution of the counting electronics.
\label{img:Unfiltered}
\label{img:Unfiltered}
\end{figure}
The blue bars represent the coincidence event detections within
time bins of 1~ns, the resolution of the counting board. The black
bars, drawn inverted for better visibility, show the theoretical
prediction based on Eq.~(\ref{eq:cross}). The height of the theory
histogram, the only free parameter, has been set to match the
height of the data. Experimental and theoretical results are in
excellent agreement. The comb-like structure of the histogram is a
consequence of interference between different frequency modes. The
temporal spacing between neighboring peaks corresponds to the
cavity round-trip time $1/\Delta\omega \approx$ 2.04~ns.
\\
When the filter is ``active'', the arrival time difference
histogram shows a smooth double-exponential shape, without
multi-mode interference (Fig.~\ref{img:Filtered}).
\begin{figure}
\caption{Arrival time histogram of filtered photon pairs;
experimental data (upper bars) and theory (lower bars). The
disappearance of the comb structure in the filtered case indicates
the single-mode character of the filtered fields.
\label{img:Filtered}
\label{img:Filtered}
\end{figure}
This already indicates that only a single frequency mode is
transmitted through the filter. The theory (lower black bars) is
given by Eq.~(\ref{eq:cross}) for a single mode
($\Gamma_{S,I}$=$\gamma_{S,I}/2$). The data shows a very low
background noise level. Throughout, raw data are shown; background
coincidences have not been subtracted.
\\
In this experiment we are interested in time correlations, but it
is interesting to ask if other kinds of correlations and possible
entanglement, e.g. in polarization or in frequency, are also
preserved by the filter. By design, the filter should transmit
nearly equally different frequency and polarization components of
the selected cavity mode, preserving correlations: absorptive and
refractive effects vary on the scale of the 80~MHz absorption
linewidth, large relative to the 7~MHz of the cavity mode. Also,
the axial magnetic field scrambles any linear birefringence or
dichroism, giving equal response for the two linear polarizations.
Preliminary results indicate that the degree of polarization as
well as the entanglement in a polarization entangled state are not
changed significantly by the filter. A detailed study of this will
be the subject of a future publication.
\PRLsection{Atom-resonance.} To measure the atom-resonant
fraction, we let the filtered photons of the signal arm propagate
through a rubidium vapor cell (Fig.~\ref{img:Setup}(b)). At room
temperature, the cell's optical density (OD) is low (0.3)
corresponding to a transmission of 74\% and coincidences between
the detection events on the two APDs are observed
(Fig.~\ref{img:Cell}, upper green bars). By heating the rubidium
cell, an optical density of 6, or 0.25\% resonant transmission, is
reached. The coincidences drop to the background level
(Fig.~\ref{img:Cell}, lower black bars).
\begin{figure}
\caption{Arrival time histogram of filtered photon pairs after
passing the signal photons through a rubidium vapor cell at an
optical density of 0.3 (upper green bars) and at an optical
density of 6 (lower black bars). \label{img:Cell}
\label{img:Cell}
\end{figure}
Within a coincidence window of 40~ns, the ratio of raw OD 0.3
coincidences to raw OD 6 coincidences is 11.6:1, indicating
rubidium resonance of at least 94\% of the photons.
\PRLsection{Suppression of multi-photon events.} The signal
auto-correlation function, given a trigger detection of the idler,
is \cite{Fasel2004, Bocquillon2009, Scholz2009}
\begin{equation}
g_c^{(2)}(\tau)=\frac{\langle E_S^{\dag}(t+\tau) E_S^{\dag}(t)
E_S(t) E_S(t+\tau) \rangle}{\langle E_S^{\dag}(t) E_S(t) \rangle
\langle E_S^{\dag}(t+\tau) E_S(t+\tau) \rangle}. \label{eq:auto}
\end{equation}
The crucial figure of merit is the value of the auto-correlation
function {of signal photons} $g_c^{(2)}(\tau)$ at $\tau=0$. We
measure $g_c^{(2)}(0)$ as follows: the signal mode is split by a
50/50 beam splitting fiber and the coincidences between the idler
detector (APD1) and the two signal detectors (APD2 and APD3) are
analyzed (Fig.~\ref{img:Setup}(c)). The detection of an idler
photon defines a coincidence window of 40~ns, symmetrical around
the detection time. Individual and coincident detections in this
time window give singles counts $N_2, N_3$, while detections at
both APD2 and APD3 give the coincidence count $N_{23}$. $N_{23}$
corresponds to unwanted multi-photon contributions which are very
low in our experiment. To accurately estimate $N_{23}$, we measure
for large coincidence windows of up to 2000~ns, extrapolate down
to 40~ns, and multiply by two, to account for possible bunching
\cite{Bocquillon2009,Scholz2009}. We then calculate
\begin{equation}
g_c^{(2)}(0) \approx \frac{N_{23}N_1}{N_2 N_3},
\end{equation}
where $N_1$ is the number of idler trigger events
\cite{Grangier1986,Fasel2004,Bocquillon2009}. We note that this
gives an upper limit for $g_c^{(2)}(0)$, due to the conservative
bunching factor and the finite time window. We find
$g_c^{(2)}(0)\le 0.040 \pm 0.012$, 80 standard deviations below
the classical limit of 1.
\PRLsection{Summary.} Using an ultra-bright cavity-enhanced
down-conversion source and an atom-based filter operating by
``interaction-free measurement'' principles, we have generated for
the first time narrow-band, high-spectral purity, atom-resonant
heralded single photons from SPDC. Of the generated photons, 94\%
are resonant to a rubidium transition frequency. A
$g_c^{(2)}$-measurement shows an upper limit of
$g_c^{(2)}(0)=0.040\pm0.012$ corresponding to a reduction of
multiple photon events by a factor of at least 25 compared to a
coherent state. The source is an ideal tool for atom-photon
interactions at the single-photon level, for quantum memories in
EIT media \cite{Eisaman2005} and solid-state systems
\cite{Riedmatten2008,Hedges2010} and single-photon single-atom
interfaces \cite{Tey2008,Piro2010}.
\begin{acknowledgments}
We acknowledge useful discussions with P. Kwiat, H. de Riedmatten
and C. Vitelli. This work was supported by the Spanish Ministry of
Science and Innovation under the Consolider-Ingenio 2010 Project
``Quantum Optical Information Technologies'' and the ILUMA project
(No. FIS2008-01051) and by an ICFO-OCE collaborative research
program. F.~W. is supported by the Commission for Universities and
Research of the Department of Innovation, Universities and
Enterprises of the Catalan Government and the European Social
Fund.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{SFCM-R: A novel algorithm for the hamiltonian sequence problem}
\author{ Cícero A. de Lima \footnote{email: cicero.lima.id@gmail.com, orcid: 0000-0002-3117-3065}}
\maketitle
\begin{abstract}
A hamiltonian sequence is a path walk $P$ that can be a hamiltonian path or hamiltonian circuit. Determining whether such hamiltonian sequence exists in a given graph $G=(V,E)$ is a NP-Complete problem. In this paper, a novel algorithm for hamiltonian sequence problem is proposed. The proposed algorithm assumes that $G$ has potential forbidden minors that prevent a potential hamiltonian sequence $P^\prime$ from being a hamiltonian sequence. The algorithm's goal is to degenerate such potential forbidden minors in a two-phrase process. In first phrase, the algorithm passes through $G$ in order to construct a potential hamiltonian sequence $P^\prime$ with the aim of degenerating these potential forbidden minors. The algorithm, in turn, tries to reconstruct $P^\prime$ in second phrase by using a goal-oriented approach.
\end{abstract}
\section{Introduction}
\label{intro}
A hamiltonian sequence is a path walk $P$ that can be a hamiltonian path or hamiltonian circuit. Determining whether such sequence exists in a given graph is a NP-Complete problem (\cite{1}; \cite{9}). Several algorithms have been proposed to find hamiltonian sequences in a graph $G$. For example, Held and Karp (\cite{2}) proposed an algorithm that runs in $O(n^2 2^n)$ to compute a hamiltonian path by using dynamic programming. In 2014, Björklund (\cite{3}) proposed a randomized algorithm that runs in $O(1.657)^n$ to compute a hamiltonian circuit in undirected graphs.
The currently best known exact algorithm for the hamiltonian sequence problem runs in $O^{*}(2^{n-\Theta(\sqrt{n / \log{n}})})$ (\cite{10}). Despite the progress made in the hamiltonian sequence problem, a substantial improvement in the area of exact algorithms for this problem remains an open problem. Unfortunately, exact algorithms for the hamiltonian sequence problem, which is determining if hamiltonian path or hamiltonian circuit exists in a graph $G$, still run in exponential time complexity.
In this paper, a novel algorithm is proposed to solve the hamiltonian sequence problem. The goal of the proposed algorithm is to construct a potential hamiltonian sequence $P^\prime$ , assuming that $G$ may have potential forbidden minors that prevent a potential hamiltonian sequence $P^\prime$ from being a hamiltonian sequence. Thus, these potential forbidden minors need to be degenerated in some state $k$ in a two-phrase process by using a goal-oriented approach. Our algorithm outputs a valid hamiltonian sequence by reconstructing $P^\prime$ , or aborts itself, if it is forced to use probability instead of the proposed goal-oriented approach. Hence, this study presents new techniques to solve the hamiltonian sequence problem.
The rest of the paper is organized as follows. In section 2 we list some technical conventions and provide a solid foundation for a better understanding of this paper. Finally, in section 3 we present the details of the proposed algorithm and prove its correctness.
\section{Preliminary}
\label{sec:1}
In this section, some concepts about graph theory are described. Also, this section provides a concise background needed for a better understanding of the paper.
A graph $G=(V,E)$ consists of a set $V$ of vertices and a set E of edges. The vertex set of a graph $G$ is refereed to as $V(G)$, its edge set as $E(G)$. The number of elements of any set $X$ is written as $|X|$. Each edge $e \in E$ is undirected and joins two vertices $u, v \in V$, denoted by $e=uv$. To represent adjacency between two vertices we use the notation $u \sim v$. $u \sim S$ is used to represent the adjacency between $u$ and at least one vertex $w \in S$,$S \supseteq V$. The set of neighbors of a vertex $v \in V(G)$ is denoted by $N(v)$. $G[U]$ is a subgraph of $G$ \textit{induced} by $U \supseteq V$ that contains all the edges $xy \in E$ with $x,y \in U$. $G-U$ is a subgraph obtained from $G$ by deleting all the vertices $U \cap V$ and their incident edges. If $|X|=1$ and $X=\{v\}$, we write $G-v$ rather than $G-\{v\}$. $\omega(G)$ is the number of components of a graph $G$. If $v$ is an articulation point then we will have $\omega(G-v) > \omega(G)$. The graph $G \setminus e$ is obtained from $G$ by contracting the edge e and replace its endpoints $x$,$y$ with new vertex $v_e$, which becomes adjacent to all the former neighbors of $x$ or $y$ . Formally, the resulting graph $G \setminus e$ is a graph $(V^\prime, E^\prime)$ with vertex set $V^\prime = \{(V \setminus {x,y}) \cup {v_e}\}$ and an edge set $E^\prime=\{ vw \in E \mid {v,w} \cap {x,y} = \emptyset \} \cup \{v_e w \mid xw \in E \setminus \{e\}$ or $ yw \in E \setminus \{e\} \}$ (\cite{4}) .
A minor of a graph $G$ is any subgraph obtainable from $G$ by means of a sequence of vertex and edge deletions and edge contractions. A class or a family $F$ of graphs $G$ contain all graphs $G$ that posses some common characterization. Many families of graphs are minor-closed, that is, for every $G$ in $F$ every minor $G^\prime$ of $G$ also belongs to $F$. Every minor-closed families has a finite set $X$ of excluded minors. (\cite{4}) For example, a major step towards deciding whether a given graph is planar is provided by Kuratowski's theorem which states that if $G$ in $P$ where $P$ is the family of planar graphs, then $G$ contains no minor belongs to $X=\{K_{5},K_{3,3}\}$ (\cite{4})
Many methods were studied to test the planarity of a graph G. One interesting method to determine if a graph $G$ is planar was proposed by Schmidt. This method incrementally builds planar embeddings of every 3-connected component of $G$. Schmidt studied a far-reaching generalization of canonical orderings to non-planar graphs of Lee Mondshein's PhD thesis (\cite{6}) and proposed an algorithm that computes the called Mondshein sequence in $O(m)$ (\cite{5}). Mondshein sequence generalizes canonical orderings and became later and independently known under the name \textit{non-separating ear decomposition} (\cite{5}).
\begin{definition}
An ear decomposition of a 2-connected graph $G=(V,E)$ is a decomposition $G=(P_0, P_1,... P_k)$ such that $P_0$ is a cycle and every $P_i,1 \leq i \leq k$ is a path that intersects $P_0 \cup P_1 \cup ... \cup P_{i-1}$ in exactly its endspoints. Each $P_i$ is called an ear. (\cite{5})
\end{definition}
Mondshein proposed to order the vertices of a graph in a sequence that, for any $i$, the vertices from 1 to $i$ induce essencially a 2-connected graph while the remaining vertices from $i$ + 1 to $n$ induce a connected graph. For conciseness, we will stick with following short ear-based definition of mondshein sequence. (\cite{5})
\begin{definition}
Let $G$ be a graph with edge $ru$. Assuming that $ru \cup tr$ is part of the outer face of G. A \textit{Mondshein sequence avoiding ru} is an ear decomposition $D$ of $G$ such that (1) $r \in P_0$, (2) $P_{birth(u)}$ is the last long ear, contains u as its only inner vertex and doesn't contain $ru$ and (3) $D$ is non-separating. (\cite{5})
\end{definition}
An ear decomposition $D$ that satisfies the conditions (1) (2) and (3) is said to avoid $ru$, so $ru$ is forced to be added last in $D$, right after the ear containing u as an inner vertex (\cite{5}). If we negate the constraints (1) (2) and (3), we form the \textit{forbidden condition} of the Schmidt's algorithm, seeing that such algorithm can't ignore them. Otherwise, the Schmidt's algorithm fails to produce a valid output.
\begin{definition}
A \textit{forbidden condition} $F$ of an algorithm $A$ is a set $F=\{f_0 ... f_n\}$ of sufficient conditions that makes $A$ fail to produce a valid output.
\end{definition}
Before continuing, let \textsc{Validator} be a generic hamiltonian sequence validator function that outputs \textit{true} if $P=v_i .. v_k$ with $1 \leq i \leq k$ is a hamiltonian sequence of $G$ by performing subsequent $G-v_i$ operations.
\inicioAlgoritmo{Hamiltonian sequence validator}
\Input $G=(V,E)$ , $P=v_i .. v_k$
\Output \textbf{true},\textbf{false}
\Function{Validator}{}
\State \textit{output} $\gets$ \textbf{false}
\For {\textbf{each} \textit{$v_i$} $\in P$}
\If {$\omega(G-v_i) > \omega(G)$}
\State \textbf{break}
\EndIf
\State $G-v_i$
\EndFor
\If {$|V| = 0$}
\State \textit{output} $\gets$ \textbf{true}
\EndIf
\State \Return {output}
\EndFunction
\fimAlgoritmo{}
Conditions like $|P| \neq |V(G)|$ or $v_i$ being an articulation point makes \textsc{Validator} ouput \textit{false}. Unfortunately, such invalid conditions are useful only to test if $P$ is a hamiltonian sequence or not. There's some sufficient conditions available for a graph to posses a hamiltonian sequence (\cite{7}; \cite{8}) but there's no known non-exhaustive algorithm for hamiltonian sequence characterization test that constructs a valid hamitonian sequence by performing subsequent $G-v_i$ operations and throwing an error, if $G$ doesn't have any hamiltonian sequence. Likewise, there's no known forbidden condition for the hamiltonian sequence problem. At the same time, find a hamiltonian sequence $P$ by relying on exhaustive methods is not feasible. The lack of a known forbidden condition for hamiltonian sequence characterization test motivated this research.
In this paper, a novel algorithm called SFCM-R is proposed to solve the hamiltonian sequence problem in a different way. SFCM-R is a type of what we call \textit{Syncronization-based Forbidden Condition Mirroring (SFCM)} algorithm, which is formally defined as follows.
\begin{definition}
Let $G=(V,E)$ be a graph. The Synchronization-based Forbidden Condition Mirroring (SFCM) algorithm is an algorithm with a configuration $g: W \times F \rightarrow A$, that consists of: (1) a finite set of scenes $W = W_i...W_n$,$W_0=G$, $W_i \equiv ... \equiv W_n$, $0 \leq i \leq n$ associated to a finite set of synchronizable forbidden conditions $F=F_i... F_n$; and (2) a pair $(W_i,F_i)$, with $W_i \in W$ and $F_i \in F$, associated to each mirrorable algorithm in $A = A_i ... A_n$.
\end{definition}
\begin{definition}\emph{(Synchronizable forbidden condition)}
If $F_i \in F$ and $F_k \in F$ of $A_i \in A$ and $A_k \in A$, respectively, are conceptually equivalent, then both $F_i \in F$ and $F_k \in F$ are synchronizable forbidden conditions that will be synchronized eventually when both $A_i$ and $A_k$ are executed.
\end{definition}
\begin{definition}\emph{(Mirrorable algorithm)}
If $F_i \in F$ and $F_k \in F$ are synchronizable forbidden conditions, then both $A_i \in A$ and $A_k \in A$ are conceptually equivalent mirrorable algorithms that will be mirrored eventually when both $A_i$ and $A_k$ are executed.
\end{definition}
Before continuing, a trivial example of how the proposed algorithm works in practice is presented for a better understanding of this paper. Let's convert the Schmidt's algorithm to a SFCM algorithm that we call SFCM-S algorithm. Let $W_0=G$ be a 2-connected scene that Schmidt's algorithm takes as input and $W_1$ be a scene called Schmidt Scene that SFCM-S takes as input. The description of Schmidt scene is as follows.
\paragraph{\textbf{Schmidt Scene}}{Each $P_k \in D$, with $D$ being an ear decomposition of $G$, is a component and the $ru$ edge is a forbidden \textit{ru-component} that needs to be degenerated in some state $k$.}
Notice that ru-component is a potential forbidden minor of Schmidt Scene that needs to be added last by SFCM-S in order to not make such algorithm fail to produce a valid output. As the $ru$ edge could be also considered a potential forbidden minor of Schmidt's algorithm, all we need to do is to make the SFCM-S imitate the behaviour of Schmidt's algorithm so that the forbidden conditions of both algorithms will be completely \textit{synchronized} eventually.
As the only difference between SFCM-S and Schmidt's algorithm is that they're \textit{conceptually equivalent}, they will be also completely \textit{mirrored} eventually.
In this paper, we use a variation of the same approach to construct a hamiltonian sequence path $P$. In this case, $W_0=G$ is the scene that an unknown non-exhaustive hamiltonian sequence characterization test, that performs subsequent $G-v_i$ operations, takes as input. $W_1$ is the scene called \textit{minimal scene} that the proposed algorithm for hamiltonian sequence problem, that we call SFCM-R, takes as input. Such unknown non-exhaustive algorithm will be called \textit{real scene algorithm} or RS-R. Throughout this paper, we also refer to an exhaustive version of real scene algorithm as RS-E.
We assume that every state of SFCM-R has potential forbidden minors that make such algorithm fail to produce a valid hamilton sequence. In addition, we also assume at first that, if $W_0$ has a hamiltonian sequence, these potential forbidden minors will be degenerated in some state $k$ of SFCM-R.
In summary, the goal of the SFCM-R is to synchronize the forbidden condition of SFCM-R and the forbidden condition of an unknown non-exhaustive hamiltonian sequence characterization test by using an imitation process. The minimal scene description is based on invalid conditions that make \textsc{Validator} have \textit{false} as output on \textit{minimal state}. In other words, such invalid conditions belong only to \textit{minimal scene}, not to \textit{real scene} or simply $G$, which is the scene that RS-R takes as input.
\section{SFCM-R algorithm}
\label{sec:2}
In this section, SFCM-R is explained in detail. Before continuing, we need to define formally the minimal scene and some important functions. The minimal scene is formally defined as follows.
\begin{definition}
A minimal scene is a rooted graph $G=(V,E)$ v with a set $L=(L_{w})_{w\in V}$ of labels associated with each vertex $w$, a root vertex $\text{$v_{0}$} $ and an ordered set $\Omega=(\tau_i ... \tau_n)$ of tiers $\tau_i \supset H$.
\end{definition}
Let $G=(V,E)$ v be a minimal scene and $G=(V,E)$ be a real scene. Let $u$ and $v$ be vertices such that $v,u \in V$. By convention, $v$ is the current state's vertex and $u$, a potential successor $u \in N(v)$. $V - v$ is performed whenever a vertex $u$ is defined as a successor of $v$. It will be written as $v \to u=T$. When $u$ is defined as an invalid successor, it will be written as $v \to u=F$. $P_{u}$ is a path from $v_i$ to $u$. As we need to find a vertex $u$ such that $v \to u=T$ holds for $u$, SFCM-R analyses a subscene $H^\prime \supset H$ that will be denoted as \emph{tier}.
\begin{definition}
Let $G=(V,E)$ v be a minimal scene. A tier $\tau_i$ is a subscene $\tau_i \supset H$ such that $\tau_i=H[V - X_i]$, $X_i=(S_0 \cup ... \cup S_{i+1})$, $S_0=\{\text{$v_{0}$} \}$ and $S_k$ being a set of nodes with depth $k$ of a breadth-first search transversal tree of $H$.
\end{definition}
A set $\Omega$ of tiers is defined by the function called \textsc{maximum-induction}. When \textsc{maximum-induction} outputs a valid set $\Omega$, the next step is to get the vertices labelled according to the function \textsc{Lv-label} that outputs a set $L=(L_{w})_{w\in V}$, which is the set of labels associated with each vertex $w$. By convention, $v_{LABEL}$ is a vertex labelled as $v_{LABEL}$ and $N_{v_{LABEL}}(w)$ represents a set of vertices $w^\prime \in N(w)$ labelled as $v_{LABEL}$. $H^\prime_\text{$v_{0}$} $ is the root of $H^\prime \supseteq H$ and $H^\prime_v$ is the $v$ of its current state. The notation $H_{v_{LABEL}}$ represents a set of all vertices labelled as $v_{LABEL}$.
\inicioAlgoritmo{Maximum induction of $H$}
\Input $G=(V,E)$ v
\Output Set $\Omega=(\tau_i ... \tau_n)$ of tiers
\Function{Maximum-induction}{}
\State $\Omega \gets \emptyset$
\State $A \gets \{\text{$v_{0}$} \}$
\State $X \gets \{\text{$v_{0}$} \}$
\Repeat
\State $B \gets \emptyset$
\For{\textbf{each} $v \in A$}
\For{\textbf{each} $u \in \{N(v) - X\}$}
\State $B=B \cup \{u\}$
\EndFor
\EndFor
\If{$B \neq \emptyset$}
\State $X \gets X \cup B$
\If{$\{V-X\} \neq \emptyset$}
\State $\Omega \gets \Omega \cup H[V-X]$
\EndIf
\State $A \gets B$
\ElsIf{$|V| \neq |X|$}
\State \textbf{throw} error
\EndIf
\Until{$|V| = |X|$}
\State \Return $\Omega$
\EndFunction
\fimAlgoritmo{}
Notice that a tier $\tau_i \in \Omega$ can potentially have $\tau_i \equiv H \equiv G$ in some state $k$ of SFCM-R, RS-R, and RS-E. Because of that, we need to identify all the stumbling blocks that may happen to break a potential hamiltonian sequence $P$ on each tier and degenerate them. These points are denoted as \textit{hamiltonian breakpoints} or simply \text{$v_{B}$}\space, because SFCM-R assumes that they're potential forbidden minors that prevent $P$ from being a hamiltonian sequence due to the fact that a tier can potentially have $\tau_i \equiv H^\prime$ holding for $\tau_i$, with $H^\prime \supset H$ being the scene $H^\prime \supset H$ of the current state of SFCM-R.
Before continuing, we'll briefly describe what each label means. Let $w$ be a vertex $w \in V$. If $w$ is an articulation point of $\tau \in \Omega$, it will labelled as $\text{$v_{A}$}$ or \textit{minimal articulation} vertex. If $\tau \in \Omega$ and $d(w)=1$ it will labelled as \textit{minimal leaf} or \text{$v_{L}$}. Every $ w^\prime \in N(\text{$v_{B}$})$,$w^\prime \neq \text{$v_{B}$}$ will be labelled as \textit{minimal degeneration} vertex or $\text{$v_{D}$}$. Every $w \notin \{\text{$v_{D}$}, \text{$v_{B}$}\}$ such that $w \neq \text{$v_{B}$}$ and $N_\text{$v_{D}$}(w)\ \geq 2$ is an \textit{minimal intersection} vertex or $\text{$v_{I}$}$. Every non-labelled vertex will be labelled as $\text{$v_{N}$}$. A vertex labelled as $\text{$v_{A}$}$ or $\text{$v_{L}$}$ is a $\text{$v_{B}$}$ vertex. On the other hand, a vertex labelled as $\text{$v_{A}$}\text{$v_{N}$}$ is not considered a hamiltonian breakpoint.
\inicioAlgoritmo{$L_v$ labelling}
\Input $G=(V,E)$ v
\Output $L$
\Function{Lv-label}{}
\State $L \gets \emptyset$
\For{\textbf{each} $\tau \in \Omega$}
\State $X \gets $ every $w^\prime$ such that $\omega(\tau-w^\prime) > \omega(\tau)$
\For{\textbf{each} $w \in V(\tau)$}
\If {$d(w) \neq 2$ in $H$}
\If{$w \in X$}
\State $L_w \gets L_w \cup \{\text{$v_{A}$}\}$
\EndIf
\If{$d(v)=1$ in $\tau$}
\State $L_w \gets L_w \cup \{\text{$v_{L}$}\}$
\EndIf
\Else
\If{$w \in X$}
\State $L_w \gets L_w \cup \{\text{$v_{A}$}\text{$v_{N}$}\}$
\EndIf
\EndIf
\EndFor
\EndFor
\For{\textbf{each} $w \text{ labelled as \text{$v_{B}$}} \in V(H)$}
\If {$L_w = \{\text{$v_{A}$}, \text{$v_{L}$}\}$}
\State $L_w \gets L_w - \{\text{$v_{L}$}\}$
\EndIf
\EndFor
\For{\textbf{each} $w \in N(\text{$v_{B}$})$}
\If {$w \neq \text{$v_{B}$}$}
\State $L_w \gets \{\text{$v_{D}$}\}$
\EndIf
\EndFor
\For{\textbf{each} non-labelled $w$}
\If {$|N_{\text{$v_{D}$}}(w)| \geq 2$}
\State $L_w \gets L_w \cup \{\text{$v_{I}$}\}$
\EndIf
\EndFor
\For{\textbf{each} non-labelled $w$}
\State $L_w \gets L_w \cup \{\text{$v_{N}$}\}$
\EndFor
\State \Return $L$
\EndFunction
\fimAlgoritmo{}
Now, a concise description of minimal scene is finally provided below.
\paragraph{\textbf{Minimal Scene}}
Every $\text{$v_{A}$}$ vertex is an articulation point of $H$ and every \text{$v_{L}$}\space vertex is a potential articulation point. In addition, every \text{$v_{B}$}\space vertex is part of an isolated $C_\text{$v_{B}$}$ component such that $C_\text{$v_{B}$}= \text{$v_{B}$} \cup N_{\text{$v_{D}$}}(\text{$v_{B}$})$. Thus, if we have $H-\text{$v_{B}$}$ then we will have $|C_\text{$v_{B}$}| - 1$ \text{$v_{D}$}-components. \text{$v_{I}$}\space vertices are potential intersection points between $C_\text{$v_{B}$}$ components. \text{$v_{I}$}\space, \text{$v_{N}$}\space and \text{$v_{A}$}\text{$v_{N}$}\space vertices aren't part of any $C_\text{$v_{B}$}$ component. The function $A_v(\text{$v_{B}$},H)$ defined bellow returns $T$ if \text{$v_{B}$}\space is a \textit{virtual articulation} of $H$ or $F$, otherwise. \text{$v_{B}$}\space is a virtual articulation only if $|N_\text{$v_{D}$}(\text{$v_{B}$})| \geq 2$.
\begin{equation}
\label{eq:1}
A_v(\text{$v_{B}$},H)=\left\{\begin{array}{lr}
T, &
|N_{\text{$v_{D}$}}(\text{$v_{B}$})| \geq 2 \\
F, & \text{ otherwise }
\end{array}\right\}
\end{equation}
The term \textit{virtual} indicates that some definition belongs only to minimal scene, not to real scene. Thus, we'll define an additional function $A(w, H)$ that returns \textit{true} if $\omega(H-w) > \omega(H)$. For conciseness, Every real articulation point of $H$ is labelled as \text{$v_{H}$}\space and $C_\text{$v_{H}$}=\text{$v_{H}$} \cup N(\text{$v_{H}$})$ represents a $C_\text{$v_{H}$}$ component.
In order to keep a valid state, every $C_\text{$v_{B}$}$ needs to be mapped and degenerated in some state $k$ of first or second phrase. If \text{$v_{B}$}\space could be degenerated in a state $k$, then \text{$v_{B}$}\space is called \textit{b-treatable}. We need to assume at first that $\forall $ \text{$v_{B}$}\space $ \in V(H)$, there exists a state $k$ which \text{$v_{B}$}\space will be \textit{b-treatable}.
\begin{definition}
Let $w$ be a vertex with $\{\text{$v_{B}$}\} \in L_w$. Let $v,w$ and $z$ be vertices of $H$. A b-treatable \text{$v_{B}$}\space or $\text{$v_{B}$}^T$ is a vertex $w$ reachable from $v$ through a path $P=v ... z$ with $z \sim w$ such that we have $\{\text{$v_{B}$}\} \notin L_w$ when we recalculate its label in subscene $H-P$
\end{definition}
If \text{$v_{B}$}\space could be degenerated, \text{$v_{B}$}\space cant' be considered a \text{$v_{B}$}\space \textit{b-consistent} anymore.
\begin{definition}
A \text{$v_{B}$}\space \textit{b-consistent} or $\text{$v_{B}$}^C$ is a consistent \text{$v_{B}$}\space that can't be degenerated in current state.
\end{definition}
As we don't know a detailed description about the unknown forbidden condition of RS-R, we will stick with a conceptually equivalent definition of hamiltonian sequence problem that relates real scene to minimal scene explicitly. The \textit{\text{$v_{B}$}\space path problem} is as follows.
\begin{definition}\emph{(\text{$v_{B}$}\space path problem)} Given a scene $G=(V,E)$ v, is there a simple path $P$ that visits all vertices with $P$ such that $P=P_{\text{$v_{B}$}_{i}} ... P_{\text{$v_{B}$}_{k}} \cup P_u$ with $|P| = |V(H)|$?
\end{definition}
As the \text{$v_{B}$}\space path problem is similar to the hamiltonian sequence problem, it must be NP-complete. In the minimal scene, if we have a path $P=P_\text{$v_{B}$} - \text{$v_{B}$}$ that degenerates $C_\text{$v_{B}$}$ in $H-P$, such $P$ will be part of another $P_{\text{$v_{B}$}^\prime}$ fragment. In first phrase of SFCM-R, we pass through $H$ with the aim of degenerating $C_\text{$v_{B}$}$ components in order to create a potential hamiltonian sequence $L_e$, which is a sequence of path fragments.
It means that the following theorem, which is a sufficient condition to make \textsc{Validator} output \textit{false}, will be partially ignored in first phrase. Such phrase is called mapping phrase, which is represented by \textsc{Mapping} function (see Sect.~\ref{sec:3}).
\begin{theorem}
\label{thm:1}
Let $G=(V,E)$ be a graph. If $P=v_i ... v_k$ with $1 \leq i \leq k$, $1 \leq k \leq |V|$ is hamiltonian sequence of $G$, then $v_i \to v_{i+1} =T$ holds for $v$ only and only if $\omega(G-v_i) \leq \omega(G)$.
\end{theorem}
\begin{proof}
If we have $\omega(G-v_i) > \omega(G)$, at least one component is not reachable from $u = v_{i+1}$. Therefore, $|P| \neq |V|$ holds for $P$, which is not a hamiltonian sequence, since at least one vertex is not reachable from $u = v_{i+1}$.
\end{proof}
Because of that, both first and second phrase of SFCM-R need to enforce basic constraints related to real scene in order to not ignore Theorem \ref{thm:1} completely. Before continuing, we will define two properties that a subscene $H^\prime \supset H$ may have.
\begin{prop}{\emph{($|H^{n}|$ property)}}
The property $|H^{n}|$ indicates that $H^\prime$ is a component of $H^\prime \supset H[V-H_\text{$v_{H}$}]$ that has $n$ vertices $w \in Z$ with $Z=\{ w^\prime \in V(H[V]) : (|N(w^\prime) \cap H_\text{$v_{H}$}| \geq 1) \wedge (w^\prime \neq \text{$v_{H}$})\}$. The value of $|H^{n}|$ is equal to $\alpha = \left|\bigcup_{w \in Z} \{N(w) \cap H_\text{$v_{H}$}\}\right|$; $|H_\text{$v_{H}$}^{n}|$ returns a set $\beta=\bigcup_{w \in Z} \{N(w) \cap H_\text{$v_{H}$}\}$.
\end{prop}
\begin{prop}{\emph{($|H^{c}|$ property)}}
The property $|H^c| = F$ indicates that $H^\prime \supset H[V-H_\text{$v_{H}$}]$ is a creatable component of $H[V]$, which implies that it still doesn't exist in $H[V]$. $|H^{c}|=T$ indicates that $H^\prime$ is a component that exists in $H[V]$.
\end{prop}
The two basic constraints are as follows.
\begin{constraint}
\label{cst:1}
If $H_\text{$v_{H}$} \neq 0$, $H$ can't have a creatable component $H^\prime \supset H[V-H_\text{$v_{H}$}]$ with $|H^n|=1$,$\{V(H^\prime) \cap \{ v \cup N(v) \cup \text{$v_{0}$} \cup N(\text{$v_{0}$} ) \}\} = \emptyset$ and $|H^c|=F$.
\end{constraint}
\why{If $H^\prime$ is created and reached by either $v$ or $\text{$v_{0}$} $, $\omega(G-w) > \omega(G)$ may hold for $G - w$ with $w \in \{v , \text{$v_{0}$} \}$. Such situation is invalid since it can potentially make SFCM-R ignore Theorem \ref{thm:1} completely. SFCM-R assumes that every $w$ is reachable from either $v$ or \text{$v_{0}$} , without ignoring Theorem \ref{thm:1} completely.}
\begin{constraint}
\label{cst:2}
If $H_\text{$v_{H}$} \neq 0$, $H[V-H_\text{$v_{H}$}]$ can't have a component $H^\prime \supseteq H[V-H_\text{$v_{H}$}]$ with $\text{$v_{0}$} \in V(H^\prime)$, $|H^n|=0$ and $|H^c|=T$.
\end{constraint}
\why{
In this case, $\text{$v_{0}$} $ can't reach other components. Such situation is invalid since it can make SFCM-R ignore Theorem \ref{thm:1} completely. SFCM-R assumes that every $w$ is reachable from either $v$ or \text{$v_{0}$} , without ignoring Theorem \ref{thm:1} completely.
}
In addition, SFCM-R can't have an exponential complexity. Otherwise, we're implicitly trying to solve this problem by imitating RS-E. Such situation is clearly invalid seeing that SFCM-R needs to try to imitate the behaviour of RS-R. That's why every $v \to u = T$\space choice must be \textit{goal-oriented} in both two phrases of SFCM-R. In other words, both phrases must be goal-oriented. Throughout this paper, we prove that both phrases are imitating the behaviour of RS-R. Such proofs shall be presented with an appropriate background. (see Sect.~\ref{sec:6} and \ref{sec:13}).
\begin{definition}
Goal-oriented choice is a non-probabilistic $v \to u = T$
\space choice that involves minimal scene directly and real scene partially.
\end{definition}
As SFCM-R passes through $H$ instead of $G$, we're considering minimal scene directly. In addition, the real scene is considered partially since some basic constraints related to RS-R are evaluated by SFCM-R. Throughout this paper, constraints followed by an intuitive description shall be presented in this order by convention. All the goal-oriented strategies developed through this research shall be presented along with an appropriate background (see Sect.~\ref{sec:11}).
Notice that we can assume that RS-R generates only \textit{consistent} $C_\text{$v_{H}$}$ components in order to construct a valid hamiltonian sequence (if it exists), or throws an error when $G$ doesn't have any hamiltonian sequence in order to abort itself. For that reason, we represent the real scene algorithm as follows.
\inicioAlgoritmo{Non-mirrorable RS-R algorithm }
\Input $G=(V,E)$ , $v$, $P$
\Output Hamiltonian sequence $P$
\Function{Hamiltonian-sequence}{}
\State $A \gets N(v)$
\State $X \gets $ every $w^\prime$ such that $A(w^\prime, H)=T$
\For {\textbf{each} $ u \in A$}
\If {$u \in X$ \textbf{or} $v \to u=F$}
\State $X \gets X \cup \{u\}$
\EndIf
\EndFor
\State $A \gets A - X$
\If{$A \neq \emptyset$}
\State $v \to u=T$ with $u \in A$
\State $P \gets P \cup \{u\}$
\State \textsc{Hamiltonian-sequence($G$, $u$, $P$)}
\Else
\State $P \gets \{v\} \cup P$
\If{$|P| \neq |V(H)|$}
\State \textbf{throw} error
\EndIf
\EndIf
\State \Return P
\EndFunction
\fimAlgoritmo{}
As this version of RS-R doesn't have any explicitly relationship with the proposed minimal scene, it needs to be modified to properly represent a \textit{mirrorable} real scene algorithm, which is the real scene algorithm we want to directly mirror in reconstruction phrase. Such modification shall be presented with an appropriate background.
For conciseness, we use RS-R to represent the non-mirrorable RS-R algorithm in order to avoid confusion unless the term \textit{mirrorable} is explicitly written. The reason is that the correctness of SFCM-R implies that both non-mirrorable RS-R algorithm and mirrorable RS-R algorithm are conceptually equivalent mirrorable algorithms, which consequently implies that there's no specific reason to differentiate one from another throughout this paper.
In summary, the main goal of SFCM-R is to imitate the behaviour of RS-R in order to avoid using probability, which is a known behaviour of RS-E. That's why the second phrase, that is called reconstruction phrase, which is represented by the function \textsc{Reconstruct}, aborts the process if it's forced to use probability while reconstructing $P$. Such reconstruction process is explained in section \ref{sec:7}.
\subsection{Mapping phrase}
\label{sec:3}
In this section, the mapping phrase is explained. This phrase outputs a \textit{non-synchronized hamiltonian sequence} that is called $L_e$ set. Such set is used by reconstruction phrase, which tries to reconstruct a hamiltonian sequence by modifying $L_e$ in order to output a valid hamiltonian sequence (if it exists). The mapping task is done by the \textsc{Mapping} function. This function takes both $G=(V,E)$ v and $G=(V^\prime, E^\prime)$ as input by reference along with additional parameters ($L_e$, $v$, $\eta$, $\varepsilon$, $m$, $\kappa$, $S$) by reference and keeps calling itself recursively until reaching its base case.
\begin{definition}
Let $G=(V,E)$ v be a minimal scene. A non-synchronized hamiltonian sequence is a sequence $L_e=(e_i ... e_{n})$, $L_e \supseteq E(H)$ of path fragments.
\end{definition}
By convention, the $(x,y)$ notation will be used to represent non-synchronized edges $xy$ created by \textsc{Mapping}. $(w,\square)$ is a non-synchronized edge $e \in L_e$ with $w \in e$. $\{L_e \cap (w,\square)\}$ is an ordered set that contains each $e \in L_e$ with $w \in e$. The mapping task performed by \textsc{Mapping} has the following structure:
\paragraph{Base case}
(1) $|V(H)|=0$ or (2) $\varepsilon > \eta$ forms the base case of recursion. If base case is reached by first condition, then we assume at first that every $\text{$v_{B}$}^C$ will be $\text{$v_{B}$}^T$ in some state $k$
\paragraph{Degeneration state}
The current state of \textsc{Mapping}, in which the main operations are as follows: (1) perform $V-v$; (2) perform $v \to u = T$; (3) perform a recursive \textsc{Mapping} call; and (4) throw an error exception.
\\
In degeneration state, some constraint must make $v \to v_{LABEL} = T$ hold for at least one $v_{LABEL}$. If we don't have any $u = v_{LABEL}$ with $v \to v_{LABEL} = T$, we have to undo one step and try an alternative choice in current scene until $\varepsilon > \eta$, with $\varepsilon$ being the local error counter of \textsc{Mapping} and $\eta$ being the local error counter limit of \textsc{Mapping}.
The \textsc{Sync-Error} procedure is called by \textsc{Mapping} whenever it finds an inconsistency. Such procedure increments both $\varepsilon$ and $\kappa$ by reference, with $\kappa$ being a global error counter of \textsc{Mapping}. If $\varepsilon > \eta$ , the current subscene must be discarded by \textsc{Mapping} and the degeneration state is changed to another $v$ in an earlier valid subscene. On the other hand, if $\kappa > m$, with $m$ being the global error counter limit of \textsc{Mapping}, the mapping process must be aborted.
\inicioAlgoritmo{Pre-synchronization error handler}
\Input $G=(V,E)$ v, $\eta$, $\varepsilon$, $m$, $\kappa$, $\textit{throw-error}$
\Procedure{Sync-Error}{}
\State $\varepsilon \gets \varepsilon + 1$
\State $\kappa \gets \kappa + 1$
\If{$\kappa > m$}
\State Abort mapping process
\ElsIf{$\varepsilon > \eta$}
\State Discard $H$
\EndIf
\If {$\textit{throw-error}=\textbf{true}$}
\State \textbf{throw} error
\EndIf
\EndProcedure
\fimAlgoritmo{}
Every constraint must be checked into $H[V - v]$. In order to check if a constraint holds for $u=v_{LABEL}$, \textsc{Mapping} must update the labelling of $H$ by calling \textsc{Lv-label}$(H[V - v])$ function only. Some constraints have nested constraints that induce $H^{\prime} = H[V-v]$ by a set $U \supset V$. These nested constraints also need to be checked into $H^\prime[U]$ by calling \textsc{Lv-label}$(H^{\prime}[U])$ function only.
The only case that requires the current subscene to be completely changed is when $H[V-v]_\text{$v_{H}$} \neq \emptyset$. In this case, we have to perform a \textit{Context Change (CC)} operation into a new subscene $H^\prime \supset H$, due to the fact that $\text{$v_{H}$}$ must be reachable by $v$ or $\text{$v_{0}$} $, without ignoring Theorem \ref{thm:1} completely. Because of that, a creatable component $H^{\prime} \supset H[V-H_\text{$v_{H}$}-v]$ such that $|H^n|=1$, $\{ V(H^\prime) \cap \{N(\text{$v_{0}$} ) \cup \text{$v_{0}$} \}\} \neq \emptyset$, ,$|H^c|=F$ needs to be explicitly created by \textsc{Mapping} since the minimal scene is not aware of the existence of real articulation points. We call this creatable component $H{\text{\begin{math}\star\end{math}}}$. After $H{\text{\begin{math}\star\end{math}}}$ is created, it need to be configured by the following operations: $V(H^{\prime}) = V(H^{\prime}) \cup \{\text{$v_{0}$} ,|H_\text{$v_{H}$}^n|\}$, $H^{\prime}_v=\text{$v_{0}$} $, $H^{\prime}_\text{$v_{0}$} =|H_\text{$v_{H}$}^n|$. When it's processed by \textsc{Mapping}, the current labelling of $H$ becomes obsolete. Because of that, $H$ also needs to perform a CC operation.
Notice that an edge $\text{$v_{0}$} v$ is added temporarily whenever \textsc{Mapping} make a CC operation, which is done by the function \textsc{Context-change}, in order to make both \textsc{Lv-Label}($H$) and \textsc{Maximum-induction}($H$) work correctly. That's because such $v$ will act like a vertex $u$ that was chosen by $\text{$v_{0}$} \to v = T$ in an imaginary state with $H[V-\text{$v_{0}$} ]_\text{$v_{H}$}=\emptyset$, which makes $H{\text{\begin{math}\star\end{math}}}$ and the degeneration state behave like $v = \text{$v_{0}$} $ in maximal $H \equiv G$,$\text{$v_{0}$} \in V(H)$.
\inicioAlgoritmo{Context Change (CC) Operation}
\Input $G=(V,E)$ v, $w \in V(H)$, $v \in V(H)$
\Output Scene $G=(V,E)$ v
\Function{Context-change}{}
\If {\text{First context change of $H$}}
\For {\textbf{each} $y \in V(H)$}
\State $y.LAST \gets \textit{null}$
\State $y.SPLIT \gets \textit{true}$
\EndFor
\EndIf
\State $H_{\text{$v_{0}$} } \gets w$
\State $\textit{edge\_created} \gets false$
\State $e \gets \textit{null}$
\If {$w \neq v$ \textbf{and} $w \notin N(v)$}
\State $e \gets vw$
\State $E(H) \gets E(H) \cup \{e\}$
\State $\textit{edge\_created} \gets true$
\EndIf
\State $\Omega \gets \textsc{Maximum-induction}(H) $
\State $L \gets \textsc{Lv-Label}$ $(H)$
\If {\textit{edge\_created}}
\State $E(H) \gets E(H) - \{e\}$
\EndIf
\State \Return $H$
\EndFunction
\fimAlgoritmo{}
The constraints considered in this phrase are defined as follows.
\begin{constraint}
\label{cst:3}
{\large{$v \to \text{$v_{D}$}=T$}},
if $\text{$v_{D}$} \sim \text{$v_{A}$}$ and $N_\text{$v_{A}$}(\text{$v_{A}$})=\emptyset$. \end{constraint}
\why{
As \text{$v_{A}$}\space is considered an isolated component by minimal scene, it can't influence the labelling of any $\text{$v_{A}$}{^\prime}$ directly.
}
\begin{constraint}
\label{cst:4}
{\large{$v \to \text{$v_{D}$}=T$}},
if we have at least one $\text{$v_{A}$}^T$ for $H[V-\text{$v_{D}$}-P]$ with $P$ being a \text{$v_{H}$}-path $P = w_i ... w_k$ generated by $H[V-\text{$v_{D}$}]$ such that $w_1=\text{$v_{D}$}$, $1 \leq i \leq k$, and $d(w) = 2$ with $w \in P$ such that $w \neq w_1$.
\end{constraint}
\why{
In this case, \text{$v_{D}$}\space is part of a degeneration process. As $P$ is a mandatory path of subdivisions, $H[V-\text{$v_{D}$}-P]$ is performed in order to check if $w_k$ also behaves like \text{$v_{D}$}\space since $v=w_k$ will hold for $v$ eventually.
}
\begin{constraint}
\label{cst:5}
{\large{$v \to \text{$v_{L}$}=T$}},
if we have at least one $\text{$v_{A}$}^T$ for $H[V-\text{$v_{L}$}-P]$ with $P$ being a \text{$v_{H}$}-path $P = w_i ... w_k$ generated by $H[V-\text{$v_{L}$}]$ such that $w_1=\text{$v_{L}$}$, $1 \leq i \leq k$, and $d(w) = 2$ with $w \in P$ such that $w \neq w_1$.
\end{constraint}
\why{
A \text{$v_{L}$}\space is a leaf on its minimal state, that can act like a $\text{$v_{D}$}^\prime$ with $d(\text{$v_{D}$}^\prime)=1$ that degenerates a $C_\text{$v_{A}$}$ such that $\text{$v_{D}$}^\prime \in C_\text{$v_{A}$}$. In this case, \text{$v_{L}$}\space is behaving like a leaf $w$ of RS-R such that $w \in C_\text{$v_{H}$}$ instead of a \text{$v_{B}$}\space vertex since it's part of a degeneration process.
}
\begin{constraint}
\label{cst:6}
{\large{$v \to \text{$v_{D}$}=T$}},
if $\text{$v_{D}$} \sim \text{$v_{A}$}$ and $\text{$v_{D}$} \notin \tau \wedge (A(\text{$v_{A}$},\tau)=T) \wedge (|N_\text{$v_{A}$}(\text{$v_{D}$})|=1)$. \end{constraint}
\why{
In this case, \text{$v_{D}$}\space doesn't influence the labelling of any \text{$v_{A}$}\space vertex directly since $\text{$v_{D}$} \notin \tau$ and $|N_\text{$v_{A}$}(\text{$v_{D}$})|=1$.
}
\begin{constraint}
\label{cst:7}
{\large{$v \to \text{$v_{D}$}=T$}},
If $\text{$v_{D}$} \sim \text{$v_{L}$} \wedge \text{$v_{D}$} \to \text{$v_{L}$}=T$.
\end{constraint}
\why{
If $\text{$v_{D}$} \sim \text{$v_{L}$}$ and $\text{$v_{D}$} \to \text{$v_{L}$}=T$ we assume that $\text{$v_{D}$}\to \text{$v_{L}$}=T$ may be the next choice.
}
\begin{constraint}
\label{cst:8}
{\large{$v \to \text{$v_{D}$}=T$}},
if there exists a \text{$v_{H}$}-path $P = w_i ... w_k$,$w_k \sim \text{$v_{L}$}$, generated by $H[V-\text{$v_{D}$}]$ such that: (1) $w_1=\text{$v_{D}$}$, $1 \leq i \leq k$, $d(w) = 2$ with $w \in P$ such that $w \neq w_1$; and (2) $w_k \to \text{$v_{L}$}=T$ in $H[V-\text{$v_{D}$}-P]$.
\end{constraint}
\why{
If there exists $P$, which is a mandatory path of subdivisions, we check if $w_k \to \text{$v_{L}$}=T$ holds for $\text{$v_{L}$}$ since $v=w$ with $w=\text{$v_{L}$}$ will hold for $v$ eventually.
}
\begin{constraint}
\label{cst:9}
{\large{$v \to \text{$v_{D}$}=T$}},
If $\text{$v_{D}$} \sim \text{$v_{A}$}$, and (1) $A_v(\text{$v_{A}$},H)=F$ in $H$ or (2) $A_v(\text{$v_{A}$},H)=F$ in $H[V-\text{$v_{D}$}]$.
\end{constraint}
\why{
In this case, we have $0 \leq |N_\text{$v_{D}$}(\text{$v_{A}$})| \leq 1$ .Thus, such \text{$v_{A}$}\space is not a consistent virtual articulation since we have $0 \leq |C_\text{$v_{A}$}| - 1 \leq 1$.
}
\begin{constraint}
\label{cst:10}
{\large{$v \to \text{$v_{D}$}=T$}},
if $|H_\text{$v_{A}$}| = 0$.
\end{constraint}
\why{
If $|H_\text{$v_{A}$}| = 0$ and $|H_\text{$v_{D}$}| \neq 0$, then $|H_\text{$v_{L}$}| \neq 0$. In such state, \textsc{Mapping} tries to make \text{$v_{L}$}\space behave like leafs $w$ of real scene such that $w \in C_\text{$v_{H}$}$.
}
\begin{constraint}
\label{cst:11}
If there's no other valid choice for $v$, we have {\large{$v \to \text{$v_{A}$}\text{$v_{N}$}=T$}}, {\large{$v \to \text{$v_{I}$}=T$}}, and {\large{$v \to \text{$v_{N}$}=T$}} .
\end{constraint}
\why{
Vertices labelled as $\text{$v_{I}$}$, $\text{$v_{N}$}$ and $\text{$v_{A}$}\text{$v_{N}$}$ aren't part of any $C_\text{$v_{B}$}$ directly.
}
\subsubsection{Goal}
\label{sec:4}
The goal of mapping phrase is to output a valid $L_e$ set ready to be reconstructed in next phrase. As a consequence, if \textsc{Mapping} generates an inconsistent $\text{$v_{H}$}$\space that prevents $L_e$ from being a hamiltonian sequence, \textsc{Reconstruct} will be able to degenerate such inconsistency and generate another $\text{$v_{H}$}^\prime$ to change parts of $L_e$ until we have a valid hamiltonian sequence (if it exists) by correcting parts of mapping process. We call this process \textit{$C_\text{$v_{H}$}$ attaching} or \textit{minimal scene attachment}, because inconsistent $C_\text{$v_{H}$}$ components are degenerated by considering minimal scene directly and real scene partially. Such process is done in reconstruction phrase by using a goal-oriented approach (see Sect.~\ref{sec:10}).
\begin{definition}
A $C_\text{$v_{H}$}$ attaching is when we choose a vertex $u$ with $u \in C_\text{$v_{H}$}$ before $C_\text{$v_{H}$}$ makes a scene $H^\prime \supseteq H$ be inconsistent in current state of SFCM-R. A $C_\text{$v_{H}$}$ is attached when: (1) $H^\prime_\text{$v_{H}$}=\emptyset$ holds for $H^\prime-u$; or (2) a \text{$v_{H}$}-path $P$ generated by $H^\prime-u$ that doesn't generate any inconsistency in $H^\prime-P$.
\end{definition}
\begin{definition}
A \text{$v_{H}$}-path is a path $P = P_{\text{$v_{H}$}_i}...P_{\text{$v_{H}$}_k}$, generated by $H[V-v]$ with $H[V-v]_\text{$v_{H}$} \neq \emptyset$, such that $1 \leq i \leq k$,$1 \leq k \leq |V|$, in which every $\text{$v_{H}$}_i$ reaches $\text{$v_{H}$}_{i+1}$ properly.
\end{definition}
The key to constructing a valid $L_e$ is take into account the priority order of each choice. The priority order plays an important role in this phrase since it will contribute to make the mapping phrase imitate the behaviour of RS-R. The priority order relies on the label of $u$. If the priority is $n$ times higher than an arbitrary constant $i$, it will be denoted as $v^{i+n}_{LABEL}$. The highest priority is to make a $\text{$v_{B}$}^C$ be $\text{$v_{B}$}^T$. So we will have $\text{$v_{L}$}^{i+4}$ and $\text{$v_{D}$}^{i+3}$. \text{$v_{L}$}\space has the highest priority because it can potentially make $|H_\text{$v_{A}$}|$ increase since it's considered a potential real articulation point according to minimal scene's description. If \textsc{Mapping} can't make any $\text{$v_{B}$}^C$ be $\text{$v_{B}$}^T$ in its current state, we want to perform a CC operation instead of undoing states. Thus, we will have $\text{$v_{A}$}\text{$v_{N}$}^{i+2}$ since these vertices can generate \text{$v_{H}$}\space articulations with a considerable probability due to $d(\text{$v_{A}$}\text{$v_{N}$})=2$. If we don't have any $C_\text{$v_{B}$} \sim v$, we have $\text{$v_{N}$}^{i+1}$ in order to make \textsc{Mapping} reach different regions of $H$. The lowest priority is for $\text{$v_{I}$}$. So we have $\text{$v_{I}$}^{i}$ for vertices labelled as \text{$v_{I}$}.
Notice that we don't have any constraint that makes $v \to \text{$v_{A}$}=T$ hold for $v$, since it can disconnect the minimal scene according to its description. Even so, we will have $v \to \text{$v_{A}$}=T$ in some state $k$ of SFCM-R if \textsc{Reconstruct} outputs a valid hamiltonian sequence. It means that the constraints related to vertices labelled as \text{$v_{A}$}\space can't be evaluated directly in this phrase.
\subsubsection{Algorithm}
\label{sec:5}
In this section, the pseudocode of \textsc{Mapping} is explained. Every line number mentioned in this section refers to the pseudocode of \textsc{Mapping}, which is as follows.
\inicioAlgoritmo{Mapping of $H$}
\Input $G=(V,E)$ v, $G=(V^\prime, E^\prime)$, $L_e$,$v$,$\eta$, $\varepsilon$, $m$, $\kappa$,$S$
\Output Set $L_e=e_0 ... e_n$ of non-synchronized edges
\Function{Mapping}{}
\If{$|V(H)| \neq 1$}
\If {$v.SPLIT$ \textbf {and} $H[V-v]_\text{$v_{H}$} \neq \emptyset$}
\If {constraint \ref{cst:1} or \ref{cst:2} doesn't hold for $H[V-v]$}
\State \textsc{Sync-Error($H$, $\eta$, $\varepsilon$, $m$,$\kappa$, \textbf{true}})
\EndIf
\State $H-v$
\State Set and configure $H{\text{\begin{math}\star\end{math}}}$ in $H[V-H_\text{$v_{H}$}]$
\EndIf
\If{$H{\text{\begin{math}\star\end{math}}}$ was set and configured}
\Try
\State $H{\text{\begin{math}\star\end{math}}} \gets$ \textsc{Context-change($H{\text{\begin{math}\star\end{math}}}$, $H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $,$H{\text{\begin{math}\star\end{math}}}_v$)}
\State \textsc{Mapping($H{\text{\begin{math}\star\end{math}}}$,$G$,$L_e$,$H{\text{\begin{math}\star\end{math}}}_v$,$\eta$, 0, $m$,$\kappa$,$S$)}
\State Update $H$
\If{ $w \in V(H)$ with $w \equiv H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $}
\State $v.SPLIT \gets \textit{false}$
\EndIf
\State Restore $v$ and $w \in V(H)$ with $w \equiv H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $
\State $\text{$v_{0}$} \gets w$
\State $H \gets$ \textsc{Context-change($H$,$\text{$v_{0}$} $, $v$)}
\State \textsc{Mapping($H$,$G$,$L_e$,$v$,$\eta$, $\varepsilon$, $m$,$\kappa$,$S$)}
\If {$N(v) = \emptyset$ in $H$}
\State $H-v$
\EndIf
\EndTry
\Catch{error}
\State \textsc{Sync-Error($H$, $\eta$, $\varepsilon$, $m$, $\kappa$, \textbf{true}})
\EndCatch
\Else
\If{$N(v) \neq \emptyset$ in $H$}
\State $\textit{found} \gets \textit{false}$
\State $v.SPLIT \gets \textit{false}$
\If {$v.U$ is not set}
\State $L \gets \textsc{Lv-Label}(H[V-v])$
\State $v.U \gets \text{ every } v_{LABEL} \in N(v) \text{ such that } v \to v_{LABEL} = T$ in $H[V-v]$
\EndIf
\While {there exists a non-visited $u$}
\State $u \gets$ a non-visited \textit{u} $\in v.U$ with highest priority chosen randomly
\Try
\State \textsc{Select($G$, $v$, $u$, $L_e$, $S$)}
\State \textsc{Mapping($H$,$G$,$L_e$,$u$,$\eta$,$\varepsilon$, $m$,$\kappa$,$S$)}
\State {$\textit{found} \gets \textit{true}$}
\State \textbf{break}
\EndTry
\Catch {error}
\State \textsc{Sync-Error($H$, $\eta$, $\varepsilon$, $m$,$\kappa$, \textbf{false}})
\State Undo modifications in $H$, $L_e$, and $S$
\EndCatch
\EndWhile
\If {$\textit{found} = \textit{false}$}
\State \textsc{Sync-Error($H$, $\eta$, $\varepsilon$, $m$, $\kappa$, \textbf{true}})
\EndIf
\Else
\State $H-v$
\EndIf
\EndIf
\Else
\State \textsc{Select($G$, $v$, $v$, $L_e$, $S$)}
\EndIf
\State \Return $L_e$
\EndFunction
\fimAlgoritmo
Firstly, a \textsc{Context-change}(\text{$v_{0}$} ,\text{$v_{0}$} ,$H$) call is needed to calculate $\Omega$ and $L$ of $H$ such that $|H_\text{$v_{H}$}|=0$,$H_\text{$v_{0}$} =\text{$v_{0}$} $ ,$H_v=\text{$v_{0}$} $ before the first \textsc{Mapping} call. When \textsc{Mapping} is called, if $H[V-v]_\text{$v_{H}$} \neq \emptyset$ , \textsc{Mapping} must remove $v$ from $V(H)$ in order to create a valid H{\text{\begin{math}\star\end{math}}}\text{ }component with $|H^c|=T$ (line 7). In addition, every $w \in V(H{\text{\begin{math}\star\end{math}}})$ must be deep copies of $w \in V(H)$ because we treat vertices as objects in order facilitate the understanding of the proposed pseudocode.
Its important to mention that \textsc{Mapping} needs to call \textsc{Context-change} function before \textsc{Mapping} call itself recursively if $H{\text{\begin{math}\star\end{math}}}$ was set (lines 10 and 17). The new $\text{$v_{0}$} $ is set to $w \equiv H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $ with $w \in V(H)$ (line 16). Every vertex $x \equiv x^\prime$, $x \in V(H)$, $x^\prime \in V(H{\text{\begin{math}\star\end{math}}})$, $x^\prime \in S$ that was removed from $H{\text{\begin{math}\star\end{math}}}$ by a $v \to u = T$\space operation made by \textsc{Select} function (lines 33 and 45) must be also removed from $H$ before a CC operation (line 12), including $H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $, despite to the fact that is restored in $H{\text{\begin{math}\star\end{math}}}$. This rule doesn't apply for vertices removed from $H$ when $N(v) = \emptyset$ (lines 20 and 43) since such $v$ may be part of another $H{\text{\begin{math}\star\end{math}}}$ component in different recursive calls. If $w \in V(H)$ with $w \equiv H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $, the split property $v.SPLIT$ is set to \textit{false} (line 14). In this case, $H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $ was not explicitly reached by any $v \to u = T$\space operation made by \textsc{Select}. As we're ignoring Theorem \ref{thm:1} partially, we need to force $v$ to be changed in next recursion call in order to make \textsc{Mapping} create a new $H{\text{\begin{math}\star\end{math}}}$ since a new $H{\text{\begin{math}\star\end{math}}}_\text{$v_{0}$} $ may happen to be explicitly reached by a $v \to u = T$\space operation made by \textsc{Select} in a new $H{\text{\begin{math}\star\end{math}}}$.
If $H[V-v]_\text{$v_{H}$} = \emptyset$, we need to follow the constraints and priorities mentioned earlier to set $v.U$, which is the set of possible successors of $v$ (lines 27 to 29), and set $v.SPLIT$ to \textit{false} (line 26). In this case, If $v \to v_{LABEL}=T$ holds for at least one $u=v_{LABEL}$, we must: (1) perform $v \to u = T$; (2) perform $S \gets S \cup v$ in order to update $H$ properly; (3) add a non-synchronized edge $(v.LAST,v)$ to $L_e$; and (4) perform $u.LAST \gets v$. \textsc{Mapping} needs to call \textsc{Select} in order to do such operations by reference when the context remains unchanged (lines 33 and 45).
\begin{observation} Notice that, as $v \to u = T$\space performs $V-v$ by convention, $G=(V^\prime, E^\prime)$ is not changed. The reason is that we use $G$ to make \textsc{Mapping} keep track of adjacency between $v.LAST$ and $v$ in the maximal $H \equiv G$.
\end{observation}
If $v \to v_{LABEL}=F$ happens to hold for $w=v_{LABEL}$ with $w \in v.U$ due to an error thrown by \textsc{Sync-Error} (line 38), \textsc{Mapping} must undo modifications made in $H$, $L_e$ and $S$ to restore its state before choosing a new unvisited $u \in v.U$ as successor (line 39). On the other hand, if $v \to v_{LABEL}=F$ holds for every $v_{LABEL} \in v.U$, we need to undo the last step and try an alternative, incrementing both $\kappa$ and $\varepsilon$ by calling \textsc{Sync-Error} (line 41). Every error found in mapping phrase must increment $\kappa$ and $\varepsilon$. If $\varepsilon > \eta$, the current subscene $H$ must be discarded by \textsc{Mapping}, that needs to perform undo operations to choose another $v$ in an earlier valid subscene. On the other hand, if $\kappa > m$, the process must be aborted. In this phrase, a vertex can't have more than two incident edges since $L_e$ must be an ordered sequence of path fragments. Therefore, \textsc{Select} must remove the first element of $S=\{L_e \cap (w,\square)\}$ from $L_e$ by reference if $|S| > 2$. \textsc{Select} is as follows.
\inicioAlgoritmo{Non-synchronized edge handler}
\Input $G=(V^\prime, E^\prime)$, $v$, $u$, $L_e$, $S$
\Procedure{Select}{}
\State $v \to u = T$
\State $S \gets S \cup v$
\If {$v=\text{$v_{0}$} $}
\State Restore \text{$v_{0}$}
\EndIf
\If{$v.LAST \in N(v)$ in $G$ \textbf{and} $v \neq u$}
\State $L_e \gets L_e \cup (v.LAST,v)$
\State Remove the first element of $S=\{L_e \cap (w,\square)\}$ from $L_e$ if $|S| > 2$
\EndIf
\State $u.LAST \gets v$
\EndProcedure
\fimAlgoritmo
In addition, \textsc{Mapping} must never remove $w = \text{$v_{0}$} $ from $H$ except in two cases. The first case is before a CC operation that makes $w \neq \text{$v_{0}$} $ hold for $w$ (line 17). The second case is when $N(v)=\emptyset$ and $v=\text{$v_{0}$} $ (lines 20 and 43). Also, \textsc{Mapping} can't have $v \to \text{$v_{0}$} =T$ with $v \neq \text{$v_{0}$} $ unless $v \sim \text{$v_{0}$} $, $d(v)=1$, $d(\text{$v_{0}$} )=1$ and $|V(H)|=2$. Such restriction imitates the way that RS-R reaches \text{$v_{0}$} from $v$.
\subsubsection{Proof of correctness}
\label{sec:6}
This section is dedicated to the proof of correctness of mapping phrase. It's important to mention that SFCM-R can only use goal-oriented choices. Because of that, we need to prove that \textsc{Mapping} is goal-oriented. Consider the following lemmas.
\begin{lemma}
\label{lem:1}
Let $G=(V,E)$ v be a scene. $|H_\text{$v_{A}$} \cap H[V-v]_{\text{$v_{H}$}}|\leq |H[V-v]_\text{$v_{H}$}|$ holds for every $H$ with $|V| > 4$ and $H_\text{$v_{A}$} \cap H[V-v]_\text{$v_{H}$} \neq \emptyset$.
\end{lemma}
\begin{proof}
Let $G=(V,E)$ v be a scene such that $H$ is a minimal hamiltonian graph, $E=w_iw_{i+1} ... w_{n-1}w_n$ and $w_i=w_n$. Suppose that $|V|=4$. For every $H[V-v]$ with $v \in V(H)$, we have $|H[V-v]_\text{$v_{H}$}|=|V| - 3$, which is the maximum value possible of $|H[V-v]_\text{$v_{H}$}|$. If we call \textsc{Context-Change}(\text{$v_{0}$} ,\text{$v_{0}$} ,$H$) with \text{$v_{0}$} being an arbitrary vertex $w \in V$, the first $\tau \in \Omega$ will have $|\tau_\text{$v_{H}$}|=0$ since $|V(\tau)|=1$. Let's add a vertex $w^\prime$ and an edge $w^\prime w_i$ to $H$, set $w^\prime$ to \text{$v_{0}$} and call \textsc{Context-Change}(\text{$v_{0}$} ,\text{$v_{0}$} ,$H$). As $V(\tau) = V(H) - N(\text{$v_{0}$} ) - \text{$v_{0}$} $ holds for first tier $\tau \in \Omega$, $|V| - 4$ is the maximum value possible of $|\tau_\text{$v_{H}$}|$. Notice that: (1) if we had $d(\text{$v_{0}$} )=2$, $d(w)=1$ would hold for every $w \in V(\tau)$; and (2) if we had $d(\text{$v_{0}$} )=3$, $|\Omega|=0$. Thus, $|H_\text{$v_{A}$}|=0$ holds for $H$ when $|V| \leq 4$. Now suppose that $|V| > 4$. Let's connect $w_i$ with every vertex except $\text{$v_{0}$} =w^\prime$ and call \textsc{Context-Change}(\text{$v_{0}$} ,\text{$v_{0}$} ,$H$) again. In this case, $H_\text{$v_{A}$} = H[V-v]_{\text{$v_{H}$}}$ will hold for $H$ with $v=\text{$v_{0}$} $ since $d(w)>2$ in $H$ holds for every $w \in \tau_{\text{$v_{H}$}}$. Notice that if we remove at least one edge $w_i x$ with $x \neq \text{$v_{0}$} $ and call \textsc{Context-Change}(\text{$v_{0}$} ,\text{$v_{0}$} ,$H$) again, $H_\text{$v_{A}$} < H[V-v]_{\text{$v_{H}$}}$ will hold for $H$ with $v=\text{$v_{0}$} $ since $d(x)=2$ in $H$ holds for $x \in \tau_{\text{$v_{H}$}}$ in this case. Therefore, $|H_\text{$v_{A}$} \cap H[V-v]_{\text{$v_{H}$}}|\leq |H[V-v]_\text{$v_{H}$}|$ holds for every $H$ with $|V| > 4$ and $H_\text{$v_{A}$} \cap H[V-v]_\text{$v_{H}$} \neq \emptyset$.
\end{proof}
\begin{lemma}
\label{lem:2}
Let $G=(V,E)$ v be a scene. $\varepsilon$ such that $\varepsilon < |V|$ aborts the mapping task process only if at least one valid $u$ for every $v$ found is known.
\end{lemma}
\begin{proof}
If $H[V-v]_\text{$v_{H}$}=0$ and at least one valid $u$ for every $v$ found is known in mapping task, $\varepsilon = |N(v)|$ will force \textsc{Mapping} to abort the process when there's only invalid $u$ vertices for $v$. If $H[V-v]_\text{$v_{H}$} \neq 0$, $\varepsilon$ such that $\varepsilon \leq |N(v)|$ aborts the mapping task when there's only invalid $u$ vertices for $v$, since at least one invalid $u \in N(v)$ may be part of different $H{\text{\begin{math}\star\end{math}}}$ components set by \textsc{Mapping}. As $|N(v)| \leq |V| - 1$, $\varepsilon$ such that $\varepsilon < |V|$ aborts the mapping task process only if at least one valid $u$ for every $v$ found is known.
\end{proof}
The following theorem we want to prove states that \textsc{Mapping} is goal-oriented with $\eta=|V|$ and $m=\frac{|V|^{2}-|V|}{2}$, even when it doesn't reach its base case. As a consequence, \textsc{Mapping} may require some attempts with a different vertex set as \text{$v_{0}$} to reach its base case in order to output a set $L_e$ that maps the majority of the vertices $w \in V(H)$ (if it exists).
\begin{observation}
The proof of the following theorem assumes that \textsc{Mapping} takes as input a connected $G=(V,E)$ v with $H_\text{$v_{H}$}=\emptyset$. The reason is that \textsc{Mapping} needs to enforce constraints \ref{cst:1} and \ref{cst:2}, which doesn't imply that SFCM-R will fail when $H_\text{$v_{H}$} \neq \emptyset$ and also doesn't imply that \textsc{Mapping} needs to take a connected $G=(V,E)$ v with $H_\text{$v_{H}$}=\emptyset$ in order to be goal-oriented. Thus, if we need to reconstruct a hamiltonian path in a scene $H$ with $H_\text{$v_{H}$} \neq \emptyset$, we need to reconstruct multiple hamiltonian path fragments for each $H^\prime \supset H$,$|H^n|=1$ generated by $H[H-H_\text{$v_{H}$}]$ separately in different instances of SFCM-R.
\end{observation}
Because \textsc{Mapping} is goal-oriented by the following theorem, even when it doesn't reach its base case, SFCM-R also assumes that both \textsc{Mapping} and RS-R have pre-synchronized forbidden conditions.
\begin{theorem}
\label{thm:2}
\textsc{Mapping} is goal-oriented with $\eta=|V|$ and $m=\frac{|V|^{2}-|V|}{2}$.
\end{theorem}
\begin{proof}
Let $G=(V,E)$ v be a connected minimal scene with $H_\text{$v_{H}$}=\emptyset$ that \textsc{Mapping} takes as input, and $\lfloor F \rceil$ be the unknown negated forbidden condition of RS-R.
As not every $C_\text{$v_{A}$}$ will happen to be $C_\text{$v_{H}$}$, then $\text{$v_{L}$}^{i+4}$ and $\text{$v_{D}$}^{i+3}$ can potentially cancel the appearance of non-mandatory $C_\text{$v_{H}$}$ components and consequently retard $\varepsilon$ growth rate since $\text{$v_{A}$} \in V(\tau_i)$ is a potential \text{$v_{H}$}\space of \textsc{Mapping}. As $\lfloor F \rceil$ also cancels the appearance of non-mandatory $C_\text{$v_{H}$}$ components, \textsc{Mapping} is imitating RS-R by giving the degeneration process a high priority.
Even if $C_\text{$v_{A}$}$ happens to be a $C_\text{$v_{H}$}$, such $C_\text{$v_{A}$}$ will not influence the labelling of any other $\text{$v_{A}$}^\prime$ directly since such $C_\text{$v_{H}$}$ forces \textsc{Mapping} to perform a CC operation. $\lfloor F \rceil$ also forces RS-R to perform a CC-like operation in order to pass through a potential forbidden minor $X \supset H$, directly or indirectly. As $\lfloor F \rceil$ is optimal, $X$ can be used by $\lfloor F \rceil$ to decide whether the real scene has a hamiltonian sequence. In this specific case, such $X$ is an inconsistent component with $0 \leq |H^n| \leq 1$, $|H^c| \in \{T,F\}$ in a state $x$, in which RS-R decides to abort itself. Therefore, if $H_\text{$v_{H}$} \neq \emptyset$ in RS-R context, then $v_i \in P$ holds for every valid $v_i$ found with $P$ such that $P=P^\prime \cup X^\prime$ with $P^\prime$ being a \text{$v_{H}$}-path and $X^\prime \supseteq V(X)$.
As $P$ can be split into potential independent forbidden minors in RS-R context by $\lfloor F \rceil$, \textsc{Mapping} is imitating RS-R by:
\begin{enumerate}[(1)]
\item forcing $C_\text{$v_{A}$}$ components to be isolated through the degeneration process; and
\item performing a CC operation in case of $H_\text{$v_{H}$}[V-v] \neq \emptyset$ when both constraints \ref{cst:1} and \ref{cst:2} hold for $H[V-v]$.
\\
\end{enumerate}
Notice that as $\lfloor F \rceil$ detects both potential non-mandatory $C_\text{$v_{H}$}$ components and potential independent forbidden minors that don't exist in current minimal scene context of real scene, directly or indirectly, \textsc{Mapping} is still imitating RS-R when a vertex $w \neq \text{$v_{A}$}$ happens to be \text{$v_{H}$}\space without ignoring both constraints \ref{cst:1} and \ref{cst:2}.
If we have $u=\text{$v_{D}$}$ and $u \in N(\text{$v_{L}$})$, \textsc{Mapping} is forcing such $\text{$v_{L}$}$ to be a leaf of real scene. Notice that if $z=\text{$v_{L}$}$, $z \sim v$ happens to be a real leaf with $d(z)=1$, \textsc{Mapping} can prevent $z$ from being a potential independent forbidden minor $X$ since $d(z)=1$. Even so, $z$ could potentially create non-mandatory $C_\text{$v_{H}$}$ components. As $\lfloor F \rceil$ cancels the appearance of a leaf in order to prevent it from creating non-mandatory $C_\text{$v_{H}$}$ components, \textsc{Mapping} is also imitating RS-R by $\text{$v_{L}$}^{i+4}$ because:
\begin{enumerate}[(1)]
\item not every $\text{$v_{L}$}$ will turn to be a leaf; and
\item $u=\text{$v_{L}$}$ can also cancel the appearance of non-mandatory $C_\text{$v_{H}$}$ components and potential independent forbidden minor $X$ by either preventing $\text{$v_{L}$}$ from being a real leaf or degenerating $C_\text{$v_{A}$}$ components.
\\
\end{enumerate}
Thus, $\text{$v_{L}$}^{i+4}$ can also retard $\varepsilon$ growth rate. In addition, as $\lfloor F \rceil$ needs to ensure that at least one $v \sim \text{$v_{L}$}$ will reach \text{$v_{L}$}\space by $v \to \text{$v_{L}$}=T$ due to the fact that $\text{$v_{L}$}$ is a potential leaf, \textsc{Mapping} is imitating RS-R by giving $\text{$v_{L}$}$ the highest priority.
If $u=\text{$v_{A}$}\text{$v_{N}$}$ due to $\text{$v_{A}$}\text{$v_{N}$}^{i+2}$, and $z^\prime = \text{$v_{A}$}\text{$v_{N}$}$,$z^\prime \sim v$ happens to be a \text{$v_{H}$}\space with $d(z^\prime)=2$, \textsc{Mapping} can prevent $z^\prime$ from being a potential independent forbidden minor $X$ since $d(z^\prime)=1$ will hold for $u=z^\prime$ when \textsc{Mapping} is passing through $z^\prime$. Even so, it could potentially generate non-mandatory $C_\text{$v_{H}$}$ components due to $d(z^\prime)=2$. As:
\begin{enumerate}[(1)]
\item these non-mandatory $C_\text{$v_{H}$}$ components can be degenerated by $\text{$v_{L}$}^{i+4}$ and $\text{$v_{D}$}^{i+3}$; and
\item not every $\text{$v_{A}$}\text{$v_{N}$}$ will turn to be a \text{$v_{H}$}\space with $d(\text{$v_{H}$})=2$;
\end{enumerate}
\textsc{Mapping} is imitating RS-R by giving $\text{$v_{A}$}\text{$v_{N}$}$ an intermediary priority in order to prevent $\text{$v_{A}$}\text{$v_{N}$}$ from generating non-mandatory $C_\text{$v_{H}$}$ components.
\\
If we have $u=\text{$v_{I}$}$ due to $\text{$v_{I}$}^{i}$, $\text{$v_{I}$}$ can delay the appearance of $C_\text{$v_{H}$}$ components by forcing \textsc{Mapping} to cancel the appearance of non-mandatory $C_\text{$v_{H}$}$ components since $u=\text{$v_{I}$}$ prevents $\text{$v_{I}$}$ from being transformed into a \text{$v_{A}$}. In addition, $\text{$v_{I}$}$ can also retard $\varepsilon$ growth rate by forcing \textsc{Mapping} to give the degeneration process a higher priority due to $\text{$v_{I}$} \sim \text{$v_{D}$}$, $\text{$v_{D}$}^{i+3}$ and $\text{$v_{L}$}^{i+4}$.
Notice that $\text{$v_{I}$}$ can also retard $\varepsilon$ growth rate by maximizing the following equation, which is the sum of $abs(|V(A_i)| - |V(B_i)|)$ from state $i=0$ to current state $x$, with $A_i=H{\text{\begin{math}\star\end{math}}}$ being the component set in line 7 of in state $i$ of \textsc{Mapping}, and $B_i=H_i[V(H_i)-\{V(A_i) - A_{i_\text{$v_{0}$} } \}]$, $B_{i_\text{$v_{0}$} } \equiv A_{i_\text{$v_{0}$} }$ in the same state $i$.
\begin{equation}
\label{eq:3}
{\begin{array}{rcll} \text{maximize} \hphantom{00} && {\displaystyle \sum_{i=0}^{x} abs(\left|V(A_i)| - |V(B_i)| \right)} \\[12pt] \text{subject to} \hphantom{00}&& {\displaystyle A_{i_\text{$v_{0}$} } \equiv B_{i_\text{$v_{0}$} }} \end{array}}
\end{equation}
The reason is that \text{$v_{I}$}\space can potentially reduce the local connectivity $l(w,\text{$v_{0}$} )$ of at least one $w \in C_\text{$v_{B}$}$ where $\text{$v_{I}$} \sim C_\text{$v_{B}$}$. If so, $\sum_{i=0}^{x} abs(|V(A_i)| - |V(B_i)|)$ tends to be maximized by $u=\text{$v_{I}$}$, specially when $\text{$v_{I}$}$ forces at least one $\text{$v_{D}$} \sim \text{$v_{I}$}$ of such $C_\text{$v_{B}$}$ to be a subdivision of $H$, which could increase the success rate of CC operations made by \textsc{Mapping} when $v \in C_\text{$v_{B}$}$, seeing that:
\begin{enumerate}[(1)]
\item $\text{$v_{I}$}$ has the lowest priority; and
\item not every $w \in C_\text{$v_{B}$}$ with $\text{$v_{I}$} \sim C_\text{$v_{B}$}$ will have its local connectivity $l(w,\text{$v_{0}$} )$ reduced, because of the higher priority given to degeneration process.
\end{enumerate}
Notice that $\varepsilon$ such that $\varepsilon>|V|$ suggests that the current scene $H^\prime \supseteq H$ of \textsc{Mapping} has regions $R$ of vertices with a small local connectivity $l(w,\text{$v_{0}$} )$,$w \in R$. As \textsc{Mapping} minimizes indirectly the appearance of $C_\text{$v_{H}$}$ components by decreasing both $|V|$, $|H_\text{$v_{A}$}|$, and consequently $|H_\text{$v_{I}$}|$, the appearance of such regions can be minimized. That's because the appearance of mandatory $C_\text{$v_{H}$}$ components is maximized by minimizing the following equation, which is the summation from state $i=0$ to current state $x$ of an equation that ,by Lemma \ref{lem:1}, relates the maximization of $|H[V-v]_\text{$v_{H}$}|$ to $|H_\text{$v_{A}$}|$. As a consequence, \textsc{Mapping} can make the success rate of CC operations increase, and retard $\varepsilon$ growth rate through its degeneration process.
\begin{equation}
{\begin{array}{rcll} \text{minimize} \hphantom{00} && {\displaystyle \sum_{i=0}^{x} |H_i[V-v]_{\text{$v_{H}$}}| - |H_{i_ \text{$v_{A}$}} \cap H_i[V-v]_{\text{$v_{H}$}}|} \\[12pt] \text{subject to} \hphantom{00}&& {\displaystyle H_{i_\text{$v_{A}$}} \cap H_i[V-v]_{\text{$v_{H}$}} \neq \emptyset} \end{array}}
\end{equation}
The success rate of CC operations also can be increased by $u=\text{$v_{D}$}$ with $\text{$v_{D}$} \in C_\text{$v_{A}$}$ when: (1) $A_v(\text{$v_{A}$},H)=F$ in $H$; or (2) $A_v(\text{$v_{A}$},H)=F$ in $H[V-\text{$v_{D}$}]$. The reason is that such $\text{$v_{A}$}$ can potentially create both independent potential forbidden minors $X$ and non-mandatory $C_\text{$v_{H}$}$ components, with $\text{$v_{D}$} \in X$ and $\text{$v_{D}$} \in C_\text{$v_{H}$}$. When \textsc{Mapping} passes through such independent potential forbidden minors $X$ and non-mandatory $C_\text{$v_{H}$}$ components before passes through $\text{$v_{A}$}$, it could cancel the appearance of them, and consequently make the success rate of CC operations increase when $0 \leq |C_\text{$v_{A}$} - 1| \leq 1$. If so, such \text{$v_{A}$}\space will behave like an isolated component. As $\lfloor F \rceil$ also cancels the appearance of both independent potential forbidden minors $X$ and non-mandatory $C_\text{$v_{H}$}$ components, \textsc{Mapping} is imitating RS-R in this case, even if such $\text{$v_{A}$}$ is not explicitly independent in minimal scene.
If we have $u=\text{$v_{N}$}$ due to $\text{$v_{N}$}^{i+1}$, we can also increase the success rate of CC operations, since it doesn't influence any $C_\text{$v_{B}$}$ to be $C_\text{$v_{H}$}$ directly. Because of that, it can prevent $|H_\text{$v_{A}$}|$ and $|H_\text{$v_{I}$}|$ from growing, which delays the appearance of $C_\text{$v_{H}$}$ components. Even if $\text{$v_{D}$} \sim \text{$v_{N}$}$,$\text{$v_{D}$} \in C_\text{$v_{B}$}$, both $\text{$v_{D}$}^{i+3}$ and $\text{$v_{L}$}^{i+4}$ can prevent $w \in C_\text{$v_{B}$}$ from having $l(w,\text{$v_{0}$} )$ reduced. Thus, $\text{$v_{N}$}$ can also retard $\varepsilon$ growth rate.
\\
In addition, notice that even if \textsc{Mapping} generates non-mandatory $C_\text{$v_{H}$}$ components in regions $R$ of vertices with small local connectivity $l(w,\text{$v_{0}$} )$,$w \in R$, no error is thrown when $v$ or \text{$v_{0}$} has none or more than one different vertices as successor unless constraint \ref{cst:1} or \ref{cst:2} doesn't hold for $H[V-v]$. Such flexibility also makes the success rate of CC operations increase and can retard $\varepsilon$ growth rate. Furthermore, \textsc{Mapping} can throw an error with $\varepsilon$ being very small when $H$ has regions with a small connectivity, since \textsc{Mapping} doesn't make $v \to u = T$\space operations when $H[V-v]_\text{$v_{H}$}\neq 0$.
Even so, $\lfloor F \rceil$ can't ignore minimal scene constraints completely. If RS-R ignores minimal scene constraints completely, we have:
\begin{enumerate}[(1)]
\item at least one $\text{$v_{B}$} \in V(\tau_i)$ in every scenario with $H_\text{$v_{B}$} \neq \emptyset$ would happen to be an inconsistency of real scene in at least one of its states. If so, $\lfloor F \rceil$ in every scenario with $H_\text{$v_{B}$} \neq \emptyset$ would be ignoring Theorem \ref{thm:1} completely in at least one state of RS-R, which is invalid.
\item at least one $v \in V(H)$ in every scenario with $H_\text{$v_{B}$} = \emptyset$ would happen to have $v \to u=F$, for every $u \sim v$, in at least one of its states, even when Theorem \ref{thm:1} is not being ignored completely, which is invalid.
\end{enumerate}
Thus, \textsc{Mapping} ignoring Theorem \ref{thm:1} partially is not a sufficient condition to prove that \textsc{Mapping} is not imitating RS-R.
As every constraint of \textsc{Mapping} can potentially retard $\varepsilon$ growth rate, \textsc{Mapping} can potentially distort its potentially-exponential error rate curve. $\lfloor F \rceil$ also distorts the potentially-exponential error rate curve of RS-R, which is represented by the number of times that $v \to u=F$ holds for $u$, since $\lfloor F \rceil$ predicts, directly or indirectly, when the error rate curve of RS-R will grow exponentially in order to make RS-R abort itself. As a consequence, $\varepsilon$ growth rate must be distorted by \textsc{Mapping} in order to make $\varepsilon$ converge to $k$ such that $k < |V|$ in order to prevent it from aborting itself, which is not a sufficient condition to prove that \textsc{Mapping} is not imitating RS-R.
If $\varepsilon$ happens to converge to $k$ such that $k \geq |V|$, \textsc{Mapping} would be failing to make $\varepsilon$ growth rate retard. In this case, \textsc{Mapping} would be using probability explicitly when it doesn't discard its current scene since:
\begin{enumerate}[(1)]
\item by Lemma \ref{lem:2}, it doesn't know at least one valid $u$ for a $v$ in the worst case scenario; and
\item it is tending to ignore Theorem \ref{thm:1} completely as every constraint is failing to make $\varepsilon$ growth rate retard.
\\
\end{enumerate}
When \textsc{Mapping} discard its current scene due to $\varepsilon$ converging to $k$ such that $k \geq |V|$, it is still imitating RS-R. The reason is that we can assume that $\lfloor F \rceil$ needs to construct a valid hamiltonian sequence fragment starting from $u$ by calling \textsc{Hamiltonian-Sequence} recursively in order to check if $v \to u = F$ holds for $u$, directly or indirectly, since RS-R performs only $v \to u = T$ operations. If $\lfloor F \rceil$ can't construct such valid hamiltonian sequence fragment starting from $u$, it'll also discard $G$ without aborting RS-R in order to return $v \to u=F$ to its caller, that in turn, either increments its error count by one or makes $v \to u=F$ hold for the remaining $u$. If $v \to u=F$ holds for every $u \sim v$ and $\lfloor F \rceil$ makes RS-R throw a non-catchable exception, $\lfloor F \rceil$ is predicting when its error rate curve distortion is about to be degenerated in order to abort RS-R.
If Lemma \ref{lem:2} holds for \textsc{Mapping}, $m = \vartheta$, with $\vartheta=\frac{|V|^{2}-|V|}{2}$ being the number of times that \textsc{Mapping} checks if $v \to u = T$\space holds for every $u$ found when it is not aborted in the worst case scenario. That's because, f Lemma \ref{lem:2} holds for \textsc{Mapping}, for each vertex $v_i$ found by \textsc{Mapping} with $i$ such that $1 \leq i \leq |V|$, \textsc{Mapping} needs to check if $v_i \to u=T$ holds for $u \sim v_i$ at most $|V|-i$ times.
If $m>\vartheta$, \textsc{Mapping} would be failing to retard $\varepsilon$ growth rate. In this case, \textsc{Mapping} would be using probability explicitly if it doesn't abort itself since:
\begin{enumerate}[(1)]
\item By Lemma \ref{lem:2}, at least one $v$ would have an unknown successor; and
\item It is tending to ignore Theorem \ref{thm:1} completely since every constraint is failing to make $\varepsilon$ growth rate retard.
\\
\end{enumerate}
However, when \textsc{Mapping} aborts itself due to $m>\vartheta$, \textsc{Mapping} is still imitating RS-R since it enforces the stop condition of RS-R by aborting itself, seeing that the first instance of RS-R also checks if $v \to u = T$\space holds for every $u$ found $\frac{|V|^{2}-|V|}{2}$ times when it is not aborted in the worst case scenario. As a consequence, $m =\vartheta$ must hold for $m$ in order to prevent \textsc{Mapping} from aborting itself, which is not a sufficient condition to prove that \textsc{Mapping} is not imitating RS-R.
In addition, notice that \textsc{Mapping} can produce an incomplete $L_e$, without aborting itself and without reaching its base case, when \textsc{Sync-Error} throws a non-catchable exception. Even so, \textsc{Mapping} is still imitating the behaviour of RS-R since $\lfloor F \rceil$ can abort RS-R without visiting every vertex from real scene when $v \to u=F$ holds for every $u \sim v$. Furthermore, we can assume that:
\begin{enumerate}[(1)]
\item $\lfloor F \rceil$ can change the first $v=y$ of the first \textsc{Hamiltonian-Sequence} call, when $y$ is preventing $\lfloor F \rceil$ from constructing a valid hamiltonian sequence $S$ in order to not make RS-R fail to produce a valid output, with $S=v_i ... v_k$ such that $|S|=|V|$, $1 \leq k \leq |V|$, $1 \leq i \leq k$, $v_1 \neq y$; or
\item $\lfloor F \rceil$ can split $H$ into different components with $|H^n|=1$ when it wants to create a hamiltonian path $S = S_1 \cup S_2$ such that $S_1 = v ... r$, $S_2 = v ... r^\prime$, $r \in V(H)$,$r^\prime \in V(H)$. In this case, when $r$ or $r^\prime$ is reached, $\lfloor F \rceil$ creates a new instance of RS-R to reach the remaining dead end, which consequently forces the current instance RS-R to reach its base case instead of trying to enforce constraints \ref{cst:1} and \ref{cst:2}.
\end{enumerate}
Therefore, \textsc{Mapping} producing an incomplete $L_e$ is not a sufficient condition to prove that \textsc{Mapping} is not imitating RS-R.
\\
As \textsc{Mapping} imitates RS-R, even when it not reach its base case, \textsc{Mapping} ignoring Theorem \ref{thm:1} partially is not a sufficient condition to make \textsc{Mapping} imitate RS-E. Thus, it suggests that:
\begin{enumerate}[(1)]
\item $\lfloor F \rceil$ can generate at least one hamiltonian sequence $S = e_{i}e_{i+1} ... e_{k-1}e_{k}$ (if it exists) of $H$ such that $\{S \cap L_e\} \neq \emptyset$; and
\item $\lfloor F \rceil$ can also generate at least one path $S^\prime \neq \emptyset$, $\{S^\prime \cap L_e\} \neq \emptyset$, that makes RS-R not enforce constraints \ref{cst:1} and \ref{cst:2} explicitly in at least one of its states when $\lfloor F \rceil$ wants to either:
\begin{enumerate}[-]
\item make RS-R abort itself in the absence of at least one constructable hamiltonian sequence $S = e_{i}e_{i+1} ... e_{k-1}e_{k}$; or
\item construct a hamiltonian path $S = S_1 \cup S_2$ such that $S_1 = v ... r$, $S_2 = v ... r^\prime$, $r \in V(H)$,$r^\prime \in V(H)$ by creating a new instance of RS-R to reach $r$ when $r^\prime$ is reached (or vice-versa) in order to force the current instance of RS-R to reach its base case instead of trying to enforce constraints \ref{cst:1} and \ref{cst:2}.
\end{enumerate}
\end{enumerate}
Thus, \textsc{Mapping} is goal-oriented with $\eta=|V|$ and $m=\frac{|V|^{2}-|V|}{2}$.
\end{proof}
\subsection{Reconstruction phrase}
\label{sec:7}
In this section, the reconstruction phrase is explained. The reconstruction task is done by the \textsc{Reconstruct} function, that takes following parameters as input by reference: $G=(V,E)$ v, $L_e$, $H^{*}$, $\phi$, $P_{x_1}$, $P_{x_2}$. The edge $\phi$ is a non-synchronized edge $(x_1 x_2) \in L_e$ where $x_1$ and $x_2$ are initially the last vertices of two expandable paths $P^\prime=(x_1)$ and $P^{\prime\prime}=(x_2)$, respectively. In addition, we need to assume that $x_1=v$ and $x_2=\text{$v_{0}$} $ in this phrase in order to check if both constrains \ref{cst:1} and \ref{cst:2} hold for $H[V-v]$. $P_{x_1}=P^\prime$ will be the current path we're expanding and $P_{x_2}=P^{\prime\prime}$, the other path. As for every $u$, $v$ must be added to either $P_{x_1}$ or $P_{x_2}$, $x_1$ and $x_2$ must be properly updated in order to represent the last vertices of $P_{x_1}$ and $P_{x_2}$, respectively.
The term \textit{expansion call} is used throughout this paper whenever we make a recursive call to \textsc{Reconstruct}. Every expansion call restores the initial state of both $H$ and $L_e$. Some conventions are used in this section. The \textit{synchronized} edges will be written as $[v,u]$. The edge $[w,\square]$ is a synchronized edge $e \in L_e$ with $w \in e$.
\begin{definition}
A synchronized edge is either: (1) a non-synchronized edge $(v,u)$ that got converted to $[v,u]$ by \textsc{Reconstruct}; or (2) an edge $[v,u]$ added to $L_e$ by \textsc{Reconstruct}.
\end{definition}
The notation $d^{*}(x)$ is used to represent the degree of a vertex $x$ of a scene $H^{*}$, which is a clone of $H^\prime \supseteq H$ scene of the current state of \textsc{Reconstruct}, such that $V(H^{*})=V(H^\prime)$ and $E(H^{*})=L_e \cap E(H^\prime)$.
$P_v(u)$ function is used by \textsc{Reconstruct} to pass through $H$ by using paths of $H^{*}$, starting from $v \in \{x_1,x_2\}$ until it reaches $z=u$ such that $d^{*}(z)=1$. During this process, it performs successive $H-v$ operation, converts edges from $(v,u)$ to $[v,u]$, and updates $P_v$. When $z$ is reached, it returns $z$.
$[v,u]$ cannot be removed from $H$ unless by undoing operations performed by \textsc{Reconstruct}.
\subsubsection{Goal}
\label{sec:8}
The goal of reconstruction phrase is to reconstruct a hamiltonian sequence (if it exists) by passing through $H$ in order to attach inconsistent $C_\text{$v_{H}$}$ components. If such hamiltonian sequence is reconstructed, $H^{*}$ will be a path graph corresponding to a valid hamiltonian sequence of the maximal $H \equiv G$. In order to do that, some edges may need to be added to $L_e$ to merge a component $H^{*}_{\prime}$ with $v \in V(H^{*}_{\prime})$ to another component $H^{*}_{\prime\prime}$ so that $P_v(u)$ can reach vertices $u \in V(H^{*}_{\prime\prime})$ properly.
Notice that if \textsc{Reconstruct} passes through $H^{\prime}$ in a scene $H^{\prime\prime}$, with $H^{\prime}$ being a scene in a state $k$ of \textsc{Mapping} and $H^{\prime\prime}$ being the current scene of \textsc{Reconstruct} such that $V(H^{\prime})\cap V(H^{\prime\prime}) \neq \emptyset$, some edges $(v,u) \in L_e$ could be removed from $H$ to make both constraints \ref{cst:1} and \ref{cst:2} hold for $H[V-v]$. However, this is not a sufficient condition to prove that \textsc{Mapping} is not imitating the behaviour of RS-R. (see Sect.~\ref{sec:13}). Therefore, \textsc{Reconstruct} can make both constraints \ref{cst:1} and \ref{cst:2} hold for $H[V-v]$, even if some edges are removed from $L_e$.
The problem is that \textsc{Reconstruct} must decide when to abort the reconstruction process. Because of that, the non-existence of a sequence of $C_\text{$v_{H}$}$ attachments that needs to be made in order to convert $L_e$ to a hamiltonian sequence is part of the forbidden condition of SFCM-R. That's because the following is an immediate corollary of Theorem \ref{thm:2}.
\begin{corollary}
\label{clr:1}
If \textsc{Mapping} outputs $L_e$, such set will be formed by path fragments that generate in RS-R context both (1) potential independent forbidden minors and (2) potential non-mandatory $C_\text{$v_{H}$}$ components.
\end{corollary}
It means that if such sequence of $C_\text{$v_{H}$}$ attachments exists for the current $L_e$, and it is not properly enforced by \textsc{Reconstruct}, then it can be considered a possible sufficient condition to make the \textit{mirrorable real scene algorithm}, which is modified version of RS-R that we want to mirror in this phrase, fail to produce a valid output. We call the output of modified RS-R \textit{hamiltonian sequence given $L_e$}, because it takes a non-synchronized hamiltonian sequence $L_e = L_e - S$ as input, which $S$ being a set of non-synchronized edges removed from $L_e$ by \textsc{Reconstruct}.
\begin{definition}
Let $G=(V,E)$ v be a minimal scene. A hamiltonian sequence given $L_e$ is a simple path $P=v_i ... v_k$ with $1 \leq i \leq k$ of $H$, that visits all vertices, such that $P \cap \{ w \in P : |\{ L_e \cap (w, \square) \}| \geq 1 \} \neq \emptyset$
\end{definition}
The modified version of real scene algorithm is as follows.
\inicioAlgoritmo{Mirrorable RS-R algorithm}
\Input $G=(V,E)$ v, $P_{x_1}$, $P_{x_2}$, $v \in \{x_1,x_2\}$, $L_e$
\Output Hamiltonian sequence $P^\prime$
\Function{Hamiltonian-sequence}{}
\State $A \gets N(v)$
\State $U \gets \{ u \in A : (v,u) \in L_e \}$
\State $X \gets \emptyset$
\If{constraints \ref{cst:1} or \ref{cst:2} doesn't hold for $H[V-v]$}
\State $A \gets \emptyset$
\EndIf
\For {\textbf{each} $u \in A$}
\If {$(v,u) \in U$}
\If{constraints \ref{cst:1} or \ref{cst:2} doesn't hold for $H[V-\{v,u\}]$}
\State $X \gets X \cup \{u\}$
\State $L_e \gets L_e - (v,u)$
\Else
\State $A \gets \{u\}$
\State $X \gets \emptyset$
\State \textbf{break}
\EndIf
\Else
\If{$v \to u = F$}
\State $X \gets X \cup \{u\}$
\EndIf
\EndIf
\EndFor
\State $A \gets A - X$
\If{$A \neq \emptyset$}
\State $v \to u=T$ with $u \in A$
\If{$(v,u) \in L_e$}
\State Convert $(v,u)$ to $[v,u]$
\Else
\State Remove an edge $(u,\square)$ from $L_e$ if $|\{L_e \cap (u,\square)\}| > 2$
\State $L_e \gets L_e \cup [v,u]$
\EndIf
\State Update $P_{v}$
\State $u \gets w \in \{x_1,x_2\}$
\State \textsc{Hamiltonian-sequence($H$, $P_{x_1}$, $P_{x_2}$, $u$,$L_e$)}
\Else
\If{$|P_{x_1} \cup P_{x_2}| \neq |V(H)| $}
\State \textbf{throw} error
\EndIf
\EndIf
\State $B \gets P_{x_2} \textit{ in reverse order}$
\State $P^\prime \gets B \cup P_{x_1}$
\State \Return $P^\prime$
\EndFunction
\fimAlgoritmo{}
In addition, \textsc{Reconstruct} may have inconsistent subscenes $H^\prime \supset H$ with non-attachable $C_\text{$v_{H}$}$ components. It means that if we try to attach every inconsistent $C_\text{$v_{H}$}$ by modifying $L_e$ aggressively, we could end up with SFCM-R imitating RS-E. Remember that SFCM-R must not use exhaustive methods to reconstruct the hamiltonian sequence since we want to mirror a non-exhaustive algorithm. Therefore, we need to use a goal-oriented approach in order to attach inconsistent $C_\text{$v_{H}$}$ properly without relying on probability and find a valid sequence of $C_\text{$v_{H}$}$ attachments. (see Sect.~\ref{sec:10})
\subsubsection{Algorithm}
\label{sec:9}
In this section, the pseudocode of \textsc{Reconstruct} is explained. Every line number mentioned in this section refers to the pseudocode of \textsc{Reconstruct}. Initially, \textsc{Reconstruct} takes the following parameters as input: $G=(V,E)$ v, $H^{*}$, $\phi=(\text{$v_{0}$} ,\square)$, $L_e$, $P_{x_1}=\{x_1\}$ and $P_{x_2}=\{x_2\}$, with $x_2=\text{$v_{0}$} \in \phi$ and $x_1=\{w \in \phi : w \neq \text{$v_{0}$} \}$.
\textsc{Reconstruct} passes through $H$ by using paths of $H^{*}$, performs subsequent $H-v$ operations by expanding $P_{x_1}$ or $P_{x_2}$ paths alternatively with $v$ such that $v \in \{x_1,x_2\}$ (line 7), and connects components of $H^{*}$ by adding a synchronized edge $[v,u]$ (line 27). During this process, it needs to remove some inconsistent edges $(v,u) \in L_e$ in its current state considering the following cases.
\begin{enumerate}[I.]
\item The first case is when we have $(v, \text{$v_{H}$})$.
\item The second case is when $H_{\text{$v_{H}$}} \neq \emptyset$ and $(v,u)$ doesn't enforce both constraints \ref{cst:1} and \ref{cst:2}.
\item The third case is when $(v,x_1)$ or $(v,x_2)$, since both $P_{x_1}$ and $P_{x_2}$ are concatenated to form the output of mirrorable RS-R algorithm.
\end{enumerate}
Notice that I or II could be ignored in hamiltonian path context since both $P_{x_1}$ and $P_{x_2}$ can have non-adjacent dead ends. As \textsc{Reconstruct} considers these two cases inconsistencies, we need to use specific goal-oriented strategies if we want to reconstruct a hamiltonian path (see Sect.~\ref{sec:12}).
\inicioAlgoritmo{Reconstruction of a hamiltonian sequence given $L_e$ (Simplified)}
\Input $G=(V,E)$ v, $H^{*}$, $L_e$, $\phi$, $P_{x_1}$, $P_{x_2}$
\Output Set $L_e$ of synchronized edges
\Function{Reconstruct}{}
\State $S_0 \gets (\emptyset)$
\While{reconstruction of $L_e$ is not done}
\Try
\If{constraint \ref{cst:1} or \ref{cst:2} doesn't hold for $H[V-v]$}
\State \textbf{throw} error
\EndIf
\State $v \gets P_v(u)$ with $d^{*}(u)=1$
\State $S_0 \gets (\emptyset)$
\State $S_1 \gets (\emptyset)$
\State $S_2 \gets (\emptyset)$
\For {\textbf{each} non-synchronized $e \in L_e$}
\If{ $w \in e$ with $w$ being a valid non-visited $w \sim v$}
\If{$d^{*}(w) = 1$}
\State $S_1 \gets S_1 \cup e$
\EndIf
\If{$d^{*}(w)=2$}
\State $S_2 \gets S_2 \cup e$
\EndIf
\EndIf
\EndFor
\For {\textbf{each} non-mapped $w$ with $d^{*}(w)=0$,$w \sim v$}
\State $e \gets (w,w)$
\State $L_e \gets L_e \cup \{e,e\}$
\State $S_0 \gets S_0 \cup e$
\EndFor
\State $S_2 \gets S_2 \cup S_0$
\State $S \gets S_1 \cup S_2$
\If {$S \neq \emptyset$}
\State $u \gets w$ with $(w,\square) \in S$
\If {$v \to u=T$}
\State $L_e \gets L_e - S_0$
\State $L_e \gets L_e \cup [v,u]$
\State $v \gets q \in \{ x_1, x_2 \}$
\EndIf
\Else
\State \textbf{throw} error
\EndIf
\EndTry
\Catch{error}
\State $L_e \gets L_e - S_0$
\State Undo $k$ states
\State Use goal-oriented strategies
\EndCatch
\EndWhile
\State \Return $L_e$
\EndFunction
\fimAlgoritmo{}
If \textsc{Reconstruct} finds a valid $v$ with $d^{*}(v)=1$, the next step is to choose $w \sim v$ (line 24), which will be the successor of $v$, by using the following conventions in an ordered manner.
\begin{enumerate}[1.]
\item If $S_1 \neq \emptyset$, choose $w^\prime \sim v$ of the first element $(w^\prime,\square) \in S_1$.
\item If $S_1 = \emptyset$, remove the first element $y=(w^{\prime\prime},\square) \in S_2$ from $L_e$ in order to make $d^{*}(w^{\prime\prime})=1$, $w^{\prime\prime} \sim v$ hold for $w^{\prime\prime}$, then choose $w^{\prime\prime}$ such that $z = \{L_e \cap (w^{\prime\prime},\square)\} - y$, $w^{\prime\prime} \in z$.
\item If $z$ is removed from $L_e$ because of I, II or III in next state, perform $L_e \cup y$ and remove $z$ from $L_e$ instead of $y$, Then, choose $w^{\prime\prime}$ such that $y = \{L_e \cap (w^{\prime\prime},\square)\} - z$,$w^{\prime\prime} \in y$.
\end{enumerate}
\begin{observation} Whenever a goal-oriented strategy removes either $(v,\square)$ or $[v,\square]$ from $L_e$, and makes $d^{*}(v)=1$ hold for $v$, \textsc{Reconstruct} must use the conventions of this section in order to choose a non-visited $u$
\end{observation}
Notice that \textsc{Reconstruct} temporarily changes $d^{*}(w)$ when $d^{*}(w)=0$ (line 19) in order to force $P_v$ to use the aforementioned conventions even when $w$ is an non-mapped vertex.
If an inconsistency is found during this process, an error needs to be thrown by \textsc{Reconstruct} (lines 6 and 30). Every inconsistent $C_\text{$v_{H}$}$ component must be attached by goal-oriented strategies (lines 34). Because of that, \textsc{Reconstruct} undoes modifications in $H$, $L_e$, $P_{x_1}$, and $P_{x_2}$ (line 33), in order to go back to an earlier $v$ state to be able to use some goal-oriented strategy to attach inconsistent $C_\text{$v_{H}$}$ components. The reconstruction process continues until either the reconstruction of $L_e$ is done (line 3) or a goal-oriented strategy aborts the reconstruction process.
As an example of hamiltonian sequence reconstructed by SFCM-R, Figure \ref{fig:1} shows an arbitrary graph $H$ mapped by \textsc{Mapping} function with $\text{$v_{0}$} =23$ on the left side. On the right side, we can see the non-synchronized hamiltonian sequence of $H$ reconstructed by \textsc{Reconstruct}.
\begin{figure}
\caption{Example of minimal scene mapping with $v=\text{$v_{0}
\label{fig:1}
\end{figure}
In this figure, purple edges represent synchronized edges added by \textsc{Reconstruct} to connect components of $H^{*}$. The red edges represent non-synchronized edges that got converted to synchronized edges by \textsc{Reconstruct}. The green edges represent synchronized edges $[v,w]$ that were added to $L_e$ in order to attach an inconsistent $C_\text{$v_{H}$}$ component with $w \in C_\text{$v_{H}$}$. $x$ is the final state of reconstruction process.
\subsection{Goal-oriented approach}
\label{sec:10}
In this section, the goal-oriented approach is presented and can be used in a non-probabilistic goal-oriented implementation of reconstruction phrase. The main goal of using a goal-oriented approach is to prevent SFCM-R from imitating RS-E during the reconstruction process. Before continuing, we define a structure that we use to help \textsc{Reconstruct} to make goal-oriented choices. Such structure will be called \textit{real-scene perception network (RSPN)}, and we use it to store informations related to goal-oriented strategies.
\begin{definition}
Real scene perception network (RSPN) is a directed tree-like goal-oriented network that starts at \textbf{RSPN} node, which has the following children set \{\textbf{\textit{A}} ,\textbf{\textit{C}} ,\textbf{\textit{J}} ,\textbf{\textit{N}}\}, where $\textbf{\textit{A}} = \{a_i ... a_n\}$ is the the attachment node, $\textbf{\textit{C}} = \{ c_i ... c_n\}$ is the current state node, $\textbf{\textit{J}} = \{ j_i ... j_k\}$ is the ordering node, and $\textbf{\textit{N}} = \{ n_i ... n_k\}$ is the region node.
\end{definition}
It's very important to store some informations about goal-oriented strategies since the only difference between an expansion process from another is the way we pass through $H$ by using edges $e \in L_e$, which can lead to the creation of different attachable $C_\text{$v_{H}$}$. Because \textsc{Reconstruct} has conventions to pass through $H$ by using paths of $H^{*}$, RSPN and strategies can be useful to change $L_e$ relying on knowledge related to real scene instead of probability in order to give such conventions more flexibility.
\\
Before continuing, two rates need to be defined.
\begin{definition}
The \textit{negativity rate} $\gamma$ is the sum of $f_\gamma(x=0, a_{\gamma_i})$ from states $i=0$ to current state $z$ and represents the rate of how likely is the current state $z$ of reconstruction process to be inconsistent.
\begin{equation}
{\begin{array}{rcll} f_\gamma(x, a_{\gamma_i}) = \displaystyle \frac{1}{(1 - a_{\gamma_i})\sqrt{2 \pi}} e^{\displaystyle -\frac{x^2}{(1-a_{\gamma_i})} } \hphantom{00} &&
0 \leq a_{\gamma_i} < 1,\;\; x \leq 0
\end{array}}
\end{equation}
\begin{equation}
\gamma = \sum^z_{i=0}f_{\gamma}(x=0, a_{\gamma_i})
\end{equation}
\end{definition}
\begin{definition}
The \textit{tolerance rate} $t$ is the sum of \textit{degree of tolerance} over $\gamma$ from states $i=0$ to current state $z$ of reconstruction process.
\begin{equation}
t =\sum^z_{i=0} f_{\gamma}(x=0, a_{\gamma_i}) + t_i
\end{equation}
\end{definition}
As \textsc{Reconstruct} undoes $k$ states to attach inconsistent $C_\text{$v_{H}$}$ components, $\gamma$ growth rate must be adjusted whenever a specific strategy fails to attach a $C_\text{$v_{H}$}$ properly. A \textit{tolerance policy} $\lfloor T \rceil$ is needed to adjust $\gamma$ and $t$ in order to select and trigger a goal-oriented strategy in an appropriate moment. $\lfloor T \rceil$ must also prevent SFCM-R from imitating RS-E by making, what we call \textit{curve distortion ring} $\ltimes(\gamma, t)$, be disintegrated in some state of \textsc{Reconstruct}. $\ltimes(\gamma, t)$ is disintegrated when it returns $F$.
\begin{equation}
\ltimes (\gamma, t)=\left\{\begin{array}{lr}
T, &
\text{if } t-\gamma > 0 \\
F, & \text{ otherwise }
\end{array}\right\}
\end{equation}
The disintegration of $\ltimes(\gamma, t)$ made by $\lfloor T \rceil$ is used to make \textsc{Reconstruct} perform a new expansion call. These expansion calls, in turn, makes SFCM-R be more prone to degenerate itself in case of successive negative events that makes \textsc{Reconstruct} be tending to imitate RS-E explicitly, which is invalid (refer to section \ref{sec:11} to understand how this process works). Therefore, $\lfloor T \rceil$ needs to adjust $a_{\gamma_i}$ and $t_i$ of every state $i$ by using a set of actions in order to accomplish the aforementioned goals.
\inicioAlgoritmo{Tolerance policy $\lfloor T \rceil$ }
\State $s \gets$ current state of \textsc{Reconstruct}
\State Adjust $t_s$ and $a_{\gamma_s}$
\State Update RSPN if needed
\State $\textit{S} \gets \emptyset$ \{ set of goal-oriented strategies\}
\State Populate \textit{S}
\While{$s$ is inconsistent}
\For{\textbf{each} goal-oriented strategy $s^\prime \in S$}
\State Trigger $s^\prime$ inside \textsc{Reconstruct} environment
\EndFor
\State $s \gets$ current state of \textsc{Reconstruct}
\State Adjust $t_s$ and $a_{\gamma_s}$
\State Update RSPN if needed
\If{$s$ is consistent}
\State \textbf{break}
\EndIf
\State Populate \textit{S}
\EndWhile
\State Go back to \textsc{Reconstruct} environment
\fimAlgoritmo{}
Therefore, one of the main goals of $\lfloor T \rceil$ is to keep a balance between: (1) retarding the growth rate of both $f(x,a_{\gamma_i})$ and $\gamma$ by triggering goal-oriented strategies that attach inconsistent $C_\text{$v_{H}$}$ components properly; and (2) not retarding the growth rate of both $f(x,a_{\gamma_i})$ and $\gamma$ when some goal-oriented strategy fails to attach inconsistent $C_\text{$v_{H}$}$ components; in order to \textsc{Reconstruct} be able to continue to reconstruction process without imitating RS-E. Later in this paper, we will prove that a potential hamiltonian sequence can be reconstructed by SFCM-R if $\lfloor T \rceil$ is optimizable (see Sect.~\ref{sec:13}).
\begin{definition}
Let $G=(V,E)$ v be a minimal scene. A tolerance policy $\lfloor T \rceil$ is optimizable if it computes the following constrained optimization problem, with $S$ being a set that contains every inconsistent state $i$ found with $\ltimes (\gamma_i, t_i) = F$, without making \textsc{Reconstruct} fail to produce a valid output while behaving like a non-exhaustive algorithm.
\begin{equation}
\underset {t_i} {\operatorname{arg\,min}} \sum_{i \in S} (t_i - \gamma_i)\;\;\;\;{\text{subject to:}}\;\;\; t_i, \gamma_i \in \mathbb{R},\;\; \gamma_i > t_i
\end{equation}
\end{definition}
\subsubsection{Quantum-inspired explanation}
To understand how this process can prevent SFCM-R from imitating RS-E, we can intuitively think of the retardation of $\gamma$ growth rate process as the following simplified quantum-inspired process. In this process, we assume that a distortion ring $\ltimes$ has $N$ distortion particles $p^{+}$, that can behave like their own anti-distortion particles $p^{-}$, and vice-versa. When $p^{-}$ and $p^{+}$ collide, they annihilate each other.
Let $N_{\ltimes}$ be the number of distortion particles $p^{+}$ expected to be observed in distortion ring $\ltimes$, and $N_{\overline{\ltimes}}$ be an unknown non-observed number of anti-distortion particles $p^{-}$ in distortion ring $\ltimes$. In addition, let $E_T(x) = E^{+}(x) + E^{-}(x)$ be the sum of Electromagnetic (EM) waves emitted in $\ltimes$ as a function of time, $E^{+}(x)$ be imaginary EM waves that are expected to be emitted from observed $p^{+}$ particles as a function of time, and $E^{-}(x)$ be imaginary EM waves with opposite charge that are expected to be emitted from observable $p^{-}$ particles as a function of time. To simplify, we assume that $N_{\ltimes}$ is equivalent to the positive amplitude peak of $E^{+}(x)$ and $N_{\overline{\ltimes}}$ is equivalent to the positive amplitude peak of $E^{-}(x)$.
The idea here is to consider a consistent state $s$ of reconstruction process $p^{+}$ particles, an inconsistent state $s^\prime$ of reconstruction process $p^{-}$ particles, and \textsc{Reconstruct} the observer of both $p^{-}$ and $p^{+}$. The following equations are used in this explanation.
\begin{equation}
E^{+}(x) = sen(\beta x)
\end{equation}
\begin{equation}
E^{-}(x) = \delta_\gamma sen(\displaystyle \frac{\beta 3 x}{4})
\end{equation}
\begin{equation}
E_T(x) = E^{+}(x) + E^{-}(x)
\end{equation}
\begin{equation}
\alpha, \beta = 2
\end{equation}
In addition, we use the following sigmoid function to represent $a_{\gamma_i}$, which is the $\gamma$ growth rate in state $i$. For conciseness, we assume that $\lfloor T \rceil$ updates both $t_i$ and $a_{\gamma_i}$ whenever $\delta_\gamma$ is changed to avoid repetition.
\begin{equation}
{\begin{array}{rcll}a_{\gamma_i} = f(\delta_\gamma) = \left(\displaystyle \frac{1}{1 + e ^{\displaystyle -(\delta_\gamma^2)}} \right) \hphantom{00} && 0 \leq f(\delta_\gamma) < 1
\end{array}}
\end{equation}
A distortion ring $\ltimes$ is represented by a circle whose center is the point $(C_\ltimes, 0)$
, as illustrated in next figure. $C_\ltimes$ is represented by the following equation.
\begin{equation}
C_\ltimes=\frac{4\pi}{\beta}
\end{equation}
The force of $\ltimes$ (or simply $F_\ltimes$) is equal to the amplitude $A$ of the second inner wave of $\ltimes$ from $E_T(x)$. If $A > 0$ is a positive peak amplitude, we have an observable distortion ring $\ltimes$ (in blue) with $F_\times = A$. If $A$ is a negative peak amplitude or $A=0$, we have an observable anti-distortion ring $\overline{\ltimes}$ (in red) with $F_{\overline{\ltimes}} = -A$.
By convention, we use a dashed blue line for $E^{+}(x)$, a solid red line for $E^{-}(x)$, and a solid blue line for $E_T(x)$. Figure \ref{fig:2} shows $\ltimes$ with $F_\ltimes > 0$ and $E^{+}(x)>0$.
\begin{figure}
\caption{A distortion ring $\ltimes$ with the $F_\ltimes> 0$, $E^{+}
\label{fig:2}
\end{figure}
Before continuing, consider the following corollary of Theorem \ref{thm:2}
\begin{corollary}
\label{clr:4}
\textsc{Mapping} performs an error curve distortion conceptually equivalent to $\lfloor F \rceil$, in order to distort its potentially-equivalent error rate curve, even when \textsc{Sync-Error} throws a non-catchable error or makes \textsc{Mapping} abort itself.
\end{corollary}
By Corollary \ref{clr:4}, we can assume that, neglecting some technical complexities, the observable collision rate between $p^{-}$ and $p^{+}$, can be maximized in a way that it favours $p^{+}$ over $p^{-}$. In other words, we want to observe $N_{\ltimes} > N_{\overline{\ltimes}}$ in order to be sure that we have a consistent observable $\ltimes$. Because of that, $E^{+}(x)$ represents the EM waves emitted from a total of $R_T = N_{\ltimes} - N_{\overline{\ltimes}}$,$R_T > 0$ particles, which is the residue expected after such collision process. In other words, we want to observe an imbalance between $p^{-}$ and $p^{+}$ particles, even if the observed $R_T$ happens to change due to the principle of superposition of states in quantum mechanics as \textsc{Reconstruct} passes through $H$.
An ideal scenario is represented in Figure \ref{fig:3}.
\begin{figure}
\caption{Ideal distortion ring $\ltimes$ with $E_T(x) > 0$ and $\delta_\gamma=0$}
\label{fig:3}
\end{figure}
In this scenario, the curve of $f_\gamma(x, a_{\gamma_i})$ is illustrated in Figure \ref{fig:4}. The blue line from $s_0$ to $\gamma=s_n$ represents the curvature of the distorted curve that \textsc{Mapping} created, that is expected to exists by Corollary \ref{clr:4}, given an optimizable $\lfloor T \rceil$ and a scene $H$ with at least one hamiltonian sequence.
As the observable collision rate between $p^{-}$ and $p^{+}$ can be maximized in a way that it favours $p^{+}$ over $p^{-}$, an optimizable $\lfloor T \rceil$ assumes that \textsc{Reconstruct} is expected to terminate its execution by observing $\ltimes$ and projecting a curvature of a non-exponential curve between $s_0$ and $s_n$. In other words, \textsc{Reconstruct} is expected to terminate its execution without aborting itself, with a small $N_{\overline{\ltimes}}$. Because of that, an optimizable $\lfloor T \rceil$ considers the curve of $f_\gamma(x, a_{\gamma_i})$ the curvature of a potential error rate curve of a \textsc{Reconstruct} instance that runs without aborting itself in the worst case scenario.
\begin{figure}
\caption{Curve of $f(x, a_{\gamma_i}
\label{fig:4}
\end{figure}
Figure \ref{fig:5} shows $E_T(x)$ after the observation of particles $p^{-}$ in a state with $\delta_\gamma=\frac{\alpha}{4}$. Notice that the increase of $E^{-}(x)$ was not enough for the amplitudes peaks of $E^{-}(x)$ to be greater than the amplitude peaks of $E^{+}(x)$ and $E_T(x)$.
\begin{figure}
\caption{Distortion ring $\ltimes$ observed with $E^{-}
\label{fig:5}
\end{figure}
The $f_\gamma(x, a_{\gamma_i})$ curve in the scenario is illustrated in Figure \ref{fig:6}. Such curve is representing the curvature of a non-exponential curve, which is a desired curvature since we want to imitate RS-R.
\begin{figure}
\caption{Curve of $f(x, a_{\gamma_i}
\label{fig:6}
\end{figure}
Therefore, if \textsc{Reconstruct} finds a valid hamiltonian sequence in a state with $E^{-}(x) < E^{+}(x)$, $\lfloor T \rceil$ can make $E_T(x)$ collapse to $E^{+}(x)$ by setting $\delta_\gamma=0$, which is the configuration of an ideal distortion ring $\ltimes$.
Figure \ref{fig:9} shows an anti-distortion ring with $E^{-}(x) > E^{+}(x)$ in a state $k$ of \textsc{Reconstruct} with $F_{\overline{\ltimes}}= 0$, $F_{\overline{\ltimes}} \geq t - \gamma$. Notice that the amplitude peaks of $E^{-}(x)$ are greater than the amplitude peaks of $E^{+}(x)$, after the observation of an unexpected number of $p^{-}$ particles.
\begin{figure}
\caption{Anti-distortion $\overline{\ltimes}
\label{fig:9}
\end{figure}
As illustrated in the Figure \ref{fig:9}, \textsc{Reconstruct} observed that the maximization of collision rate between $p^{-}$ and $p^{+}$ doesn't favoured $p^{+}$ over $p^{-}$, since $\ltimes$ was spotted behaving like a $\overline{\ltimes}$. In other words, the expected imbalance between $p^{+}$ and $p^{-}$ was not observed, which means that $\ltimes$ is disintegrated. Because of that, the non-exponential curvature of $f_\gamma(x, a_{\gamma_i})$ became unstable.
As $f_\gamma(x, a_{\gamma_i})$ became unstable, $\lfloor T \rceil$ could also make the real error rate function of \textsc{Reconstruct} collapse to $+\infty$ in order to force the current instance of \textsc{Reconstruct} to "jump" into an imaginary state of RS-E and, at the same time, make $f(x, a_{\gamma_i})$ project a curvature of an exponential curve. Such curve is illustrated in the following figure.
\begin{figure}
\caption{Curve of $f(x, a_{\gamma_i}
\label{fig:11}
\end{figure}
As a consequence, $\lfloor T \rceil$ can: (1) negate the definitions of $E^{-}(x)$ and $E^{+}(x)$; and (2) make $E_T(x)$ collapse to $E^{+}(x)$ by setting $\delta_\gamma=0$. If so, we will have an ideal anti-distortion ring with almost the same inner structure of the observed anti-distortion ring $\overline{\ltimes}$ showed in Figure \ref{fig:9}. Figure \ref{fig:10} shows such ideal anti-distortion ring.
\begin{figure}
\caption{An ideal anti-distortion ring $\overline{\ltimes}
\label{fig:10}
\end{figure}
In this scenario, $\lfloor T \rceil$ can force a new expansion call, which makes SFCM-R be more prone to degenerate itself in case of successive negative events that make \textsc{Reconstruct} be tending to imitate RS-E explicitly. This is a desired behaviour of $\lfloor T \rceil$ since SFCM-R can't imitate RS-E explicitly.
In conclusion, $\lfloor T \rceil$ is essentially taking advantage of Corollary \ref{clr:4} since it implies that the observable collision rate between $p^{-}$ and $p^{+}$ can be maximized in a way that it favours $p^{+}$ over $p^{-}$ , given an optimizable $\lfloor T \rceil$ and a scene $H$ with at least one hamiltonian sequence. Therefore, the main goal of $\lfloor T \rceil$ is to force \textsc{Reconstruct} to imitate RS-R in order to minimize the observation of $p^{-}$ particles, which represent inconsistent states, and consequently try to prevent \textsc{Reconstruct} from behaving like RS-E explicitly.
\subsubsection{General goal-oriented strategies}
\label{sec:11}
In this section, we present the goal-oriented strategies that SFCM-R needs to use to reconstruct a hamiltonian sequence. We call them general goal-oriented strategies due to the fact that they can be used to reconstruct both hamiltonian paths or hamiltonian circuits. The goal-oriented proposed in this section are primarily focused on keeping $H$ connected while preventing SFCM-R from imitating RS-E. Because of that, we assume that every goal-oriented strategy presented in this section is enforcing both constraints \ref{cst:1} and \ref{cst:2}. Please refer to section \ref{sec:12} to see specific strategies for hamiltonian path, that allow $P_{x_1}$ and $P_{x_2}$ to have non-adjacent dead ends when it's needed.
\begin{observation} The strategies proposed in section \ref{sec:11} and \ref{sec:12} don't have necessarily an order of activation. It depends on how $\lfloor T \rceil$ is implemented, and specific signs that suggest that a specific strategy should be triggered by $\lfloor T \rceil$ in \textsc{Reconstruct} environment.
\end{observation}
Before continuing, we need to define some conventions. Every inconsistency $\text{$v_{H}$} \in H_\text{$v_{H}$}$ must be added to current state node $\textbf{\textit{C}}$ when $\ltimes (\gamma, t) = T$. If $\ltimes (\gamma, t) = F$, every $\text{$v_{H}$} \in H_\text{$v_{H}$}$ must be added as a child of $\text{$v_{H}$}_i$ node in expansion call $i$. Such $\text{$v_{H}$}_i$ node is called \textit{static \text{$v_{H}$}\space articulation} and it must be child of $\textbf{\textit{J}}$. Every $C_\text{$v_{H}$}$ attached by adding an edge $[v,w]$ to $L_e$ with $w \in C_\text{$v_{H}$}$, must be added to attachment node $\textbf{\textit{A}}$.
The first strategy is to have $\phi=(\text{$v_{H}$},\square)$, with $\text{$v_{H}$}$ being a \text{$v_{H}$}\space added to $\text{$v_{H}$}_i$ of $\textbf{\textit{J}}$, for every new expansion call made when $\ltimes (\gamma, t) = F$. As an example, the figure bellow shows the node $\textbf{\textit{J}}$ of RSPN. We can see on the left side an expansion call $k-1$ with a node $j_0 = \text{$v_{H}$}_0=\{w_1, w_2,w_3\}$ that was created in expansion call $k-3$, and another node $j_1 = \text{$v_{H}$}_{1}=\{w_{4}\}$ that was created in expansion call $k-2$ . The same figure shows $\textbf{\textit{J}}$ in expansion call $k$ with a node $j_2 = \text{$v_{H}$}_2=\{w_3\}$ that was created in expansion call $k-1$. In such case, $\textbf{\textit{J}}$ was updated since $w_3$ can't be part of two ordering constraints at the same time due to the fact that every vertex is visited once in hamiltonian sequence context.
Therefore, $w_3$ was removed from $\text{$v_{H}$}_0$ in expansion call $k-1$. As $w_3$ and $w_4$ are the unique nodes of $\text{$v_{H}$}_{2}$ and $\text{$v_{H}$}_{1}$ respectively, we can enforce the ordering between $w_{3}$ and $w_{4}$ in expansion call $k$. In this case, $j_2=\text{$v_{H}$}_{2}$ and $j_1=\text{$v_{H}$}_{1}$ are \textit{active}. Such enforcement could result in non-synchronized edge removal operations in current expansion. In addition, if $j_i=\text{$v_{H}$}_i$ has only one child, $j_i = \text{$v_{H}$}_i$ can't be changed anymore.
\begin{figure}
\caption{RSPN's node $\textbf{\textit{J}
\label{fig:13}
\end{figure}
By Strategy \ref{str:1}, if SFCM-R runs in exponential time, we'll no longer have a consistent minimal scene mapping. Such situation forces \textsc{Reconstruct} to choose by probability. As \textsc{Reconstruct} can't choose by probability, $\lfloor T \rceil$ will be forced to make SFCM-R abort itself since $\gamma$ will grow exponentially by using the following strategy. Therefore, this strategy forces the number of expansion calls to not grow exponentially.
\begin{strategy}
\label{str:1}
Make a new expansion call $i$ with $\phi=(\text{$v_{H}$},\square)$ such that $\text{$v_{H}$} \in H_\text{$v_{H}$}$ when $\ltimes (\gamma, t) = F$ and add every vertex $\text{$v_{H}$} \in H_{\text{$v_{H}$}}$ to a child $j_i = \text{$v_{H}$}_i$ of node $\textbf{\textit{J}}$. Update $\textbf{\textit{J}}$ and enforce ordering between $j_i = \text{$v_{H}$}_i$ and $j_k = \text{$v_{H}$}_{k}$ with $k > i$, if both are active. If (1) such ordering can't be enforced or (2) $\text{$v_{H}$}_i=\emptyset$, $\gamma$ must grow exponentially in order to make SFCM-R abort itself.
\end{strategy}
It's important to mention that SFCM-R assumes that both \textsc{Mapping} and RS-R have pre-synchronized forbidden conditions. It means that $\lfloor T \rceil$ must avoid making SFCM-R abort itself by Strategy \ref{str:1}. Also notice that high peaks of $\gamma$ can theoretically make $\textbf{\textit{J}}$ store an inconsistent ordering as the number of expansion calls grows. Even if it happens, $\textbf{\textit{J}}$ can't be changed arbitrarily.
Therefore, $\lfloor T \rceil$ must try to retard $\gamma$ growth rate faster instead of making SFCM-R abort itself, in order to: (1) prevent a new expansion call; or (2) add another inconsistent $H_\text{$v_{H}$}$ set to $\textbf{\textit{J}}$ that either postpones the activation of static \text{$v_{H}$}\space points or causes less non-synchronized edge removal operations when $\ltimes (\gamma, t) = F$.
\begin{strategy}
\label{str:2}
Make a new expansion call $i$ with $\phi=(\text{$v_{H}$},\square)$ such that $\text{$v_{H}$} \in H_\text{$v_{H}$}$ when $\ltimes (\gamma, t) = F$ with $H_\text{$v_{H}$}$ being a set that either postpones the activation of static \text{$v_{H}$}\space points or causes less non-synchronized removal operations.
\end{strategy}
$\lfloor T \rceil$ can also prevent the number of expansion calls from growing exponentially by preventing \textsc{Reconstruct} from making expansion calls to expand the same $P_{x_1}$ twice. Thus, we have the following strategy.
\begin{strategy}
Every expansion call must have a different $x_1 \in \phi$
\end{strategy}
As mentioned earlier, each expansion call $i$ generates a static $j_i =\text{$v_{H}$}_i$ that must be added to node $\textbf{\textit{J}}$ of RSPN. However, SFCM-R needs to assume that the \textit{exactness rate} is enough for reconstruction process since \textsc{Reconstruct} must use paths of $H^{*}$, which is goal-oriented by Theorem \ref{thm:2}, to pass through $H$. The \textit{exactness rate} $\mu_x$ is the rate of how many non-synchronized edges got converted to synchronized edges from state $i=0$ to current state $x$. The more edges are removed from $L_e$, the lower is the exactness rate $\mu_x$. The $\lambda_{i}$ function outputs a set of $(v,\square)$ edges that was removed from $L_e$ in state $i$. $S$ is the number of edges $e \in L_e$ before reconstruction phrase.
\begin{equation}
\mu_x=\left(1-\sum_{i=0}^x \frac{|\lambda_{i}|}{S}\right)
\end{equation}
As we need to assume that the exactness rate is enough for reconstruction process, we want to restart the process considering $P_{x_2}$ as $P_{x_1}$ before making a new expansion call when $\gamma > t$. In this case, we have a \textit{path swap} since $P_{x_1}$ becomes $P_{x_2}$ and vice-versa.
\begin{strategy}
Before making a new expansion call, make a path swap in order to restart the process starting from $P_{x_2}$ path instead of $P_{x_1}$ path.
\end{strategy}
In addition, we need to use a lazy approach in order to assume that the exactness rate is enough for reconstruction process. As an example, if we undo $k$ states to attach some inconsistent $C_\text{$v_{H}$}$, we need to assume that such $C_\text{$v_{H}$}$ will be properly attached without analysing the consequences of such attachment in its region.
\begin{strategy}
Any inconsistency correction must be made by using a lazy approach.
\end{strategy}
The negativity rate can be also used when \textsc{Reconstruct} connects components of $H^{*}$ by adding $[v,u]$ successively in non-mapped regions, with $u$ such that $d^{*}(u)=0$. In this case, \textsc{Reconstruct} is tending to ignore $H^{*}$ paths completely and consequently imitate RS-E, specially when $d^{*}(u)=0$ holds for every non-visited $u$ in the absence of inconsistent $C_\text{$v_{H}$}$ components that need to be attached. Because of that, $\gamma$ growth rate must be increased in this case.
In addition, the number of times that \textsc{Reconstruct} can do it must be limited by a variable that is decreased as $\gamma$ growth rate is increased. This strategy is particularly useful to make SFCM-R degenerate itself when \textsc{Reconstruct} takes an incomplete $L_e$ as input, that was produced by \textsc{Mapping} without reaching its base.
\begin{strategy}
\label{str:6}
If $d^{*}(u)=0$ holds for every non-visited $u$ and \textsc{Reconstruct} successively connects components of $H^{*}$ by adding $[v,u]$ with $d^{*}(u)=0$, $\gamma$ growth rate must be increased. If \textsc{Reconstruct} connects such components of $H^{*}$ in the absence of inconsistent $C_\text{$v_{H}$}$ components that need to be attached, $\gamma$ growth rate must get increased drastically.
\end{strategy}
\begin{strategy}
\label{str:7}
The number of times that \textsc{Reconstruct} connects components of $H^{*}$ by adding $[v,u]$ successively, with $u$ such that $d^{*}(u)=0$ must be limited by a variable that is decreased as $\gamma$ growth rate is increased.
\end{strategy}
\begin{observation}
Consider the following corollary of Theorem \ref{thm:2}.
\begin{corollary}
\label{clr:5}
If $\lfloor T \rceil$ is optimizable and \textsc{Reconstruct} wants to reconstruct a hamiltonian path, $\lfloor T \rceil$ may need to compute $P_{x_2}$ in a new instance of SFCM-R when $P_{x_1}$ reaches a dead-end if \textsc{Reconstruct} takes an incomplete $L_e$ as input, with $L_e$ being a non-synchronized hamiltonian sequence that was produced by \textsc{Mapping} without reaching its base case.
\end{corollary}
By Corollary \ref{clr:5} , $\lfloor T \rceil$ may need to create a new instance of SFCM-R in order to prevent itself from using Strategy \ref{str:6} or \ref{str:7} to degenerate the current instance of SFCM-R in a wrong moment, due to the fact that the reconstruction of $P_{x_2}$ in a different instance of SFCM-R could make the current instance of SFCM-R reach its base case instead of trying to enforce constraints \ref{cst:1} and \ref{cst:2}.
\\
\end{observation}
We can also use $\gamma$ to make $k$ increase or decrease. For example, if \textsc{Reconstruct} tries to attach an inconsistent $\text{$v_{H}$}$ stored in node C of RSPN by undoing $k$ states in order to add a synchronized-edge $[v,w]$ such that $w \in C_\text{$v_{H}$}$ and $(v,w) \notin L_e$ and every attachment attempt keeps generating another inconsistencies for every non-visited $w \in N(v)$ found, then $\gamma$ growth rate and $k$ can be increased at the same time to prevent SFCM-R from imitating RS-E.
As a result, \textsc{Reconstruct} undoes $k^{\prime}$ states such that $k^{\prime} > k$ in order to not visit all neighbours of $v$. Therefore, $k$ must be proportional to $\gamma$ growth rate assuming that region $R$ is treatable by expanding $P_{x_1}$ or $P_{x_2}$. Such relationship between $k$ and $\gamma$ growth rate helps \textsc{Reconstruct} to attach frequently inconsistent $C_\text{$v_{H}$}$ components. On the other hand, if we can't find any attachable $C_\text{$v_{H}$}$ component by undoing $k$ states due to a high peak of $\gamma$, we can just delete the synchronized edge that is generating them, since they could happen to be attachable later.
\begin{strategy}
\label{str:8}
Undo $k$ states until we find the first inconsistent $C_\text{$v_{H}$}$ stored in $\textbf{\textit{C}}$ node attachable through $w$ with $w$ such that $S=\{ w \in N(v) : (w \sim C_\text{$v_{H}$}) \neq \emptyset \wedge (\text{w was not visited})\}$, $S \neq \emptyset$, and remove the inconsistent $[v,u]$ edge from $L_e$. Then, choose a non-visited $w$ with $w \in S$, and add a synchronized-edge $[v,w]$ such that $w \in C_\text{$v_{H}$}$ and $(v,w) \notin L_e$. If no attachable $C_\text{$v_{H}$}$ component is found in any previous states, due to a high peak of $\gamma$, then increase $\gamma$ growth rate, remove the inconsistent $[v,u]$ and go back to the former $v=y$ in order to choose another non-visited $u \sim y$.
\end{strategy}
\begin{strategy}
The variable $k$ must be proportional to $\gamma$ growth rate assuming that region $R$ is treatable by expanding $P_{x_1}$ or $P_{x_2}$.
\end{strategy}
The node $\textbf{\textit{A}}$ can have some properties node to make \textsc{Reconstruct} keep track of an inconsistent region $R$ that \textsc{Reconstruct} wants to correct by triggering a strategy. The \textit{total cost} needed to attach an inconsistent $C_\text{$v_{H}$}$ and its appearance frequency can be used by $\lfloor T \rceil$ to detect if SFCM-R is tending to behave like RS-E. The total cost needed to attach an inconsistent $C_\text{$v_{H}$}$ can be represented by the following equation, where: $\Delta_{\gamma(s,a_i)} = \gamma_{s-1} - \gamma_s$; $\gamma_{s-1}$ is the value of $\gamma$ of state $s-1$; \; $\gamma_{s}$ is the value of $\gamma$ of an inconsistent state $s \in S$ where $a_i = C_\text{$v_{H}$}$,$a_i \in \textbf{\textit{A}}$, appeared as inconsistency; and $p(s,a)$ is an extra cost directly proportional to the appearance frequency of $a_i = C_\text{$v_{H}$}$ in $s \in S$.
\begin{equation}
\textbf{\textit{A}}.cost(a_i) = \sum_{s \in S} \Delta_\gamma(s, a_i) + p(s, a_i)
\end{equation}
Because of that, $\lfloor T \rceil$ needs to make $\gamma$ growth rate increase as both the total cost needed to attach an inconsistent $C_\text{$v_{H}$}$ of $R$, and its appearance frequency, tends to increase.
Thus, we have the following strategy.
\begin{strategy}
Make $\gamma$ growth rate increase, as $\textbf{\textit{A}}.cost(a_i)$ gets increased.
\end{strategy}
Furthermore, we can also use the negativity rate along with attached $C_\text{$v_{H}$}$ stored in $\textbf{\textit{A}}$ to change the variable $k$. Thus, $\textbf{\textit{A}}$ can be used by \textsc{Reconstruct} to keep track of specific regions in current expansion, serving as an extra parameter to change $k$. As an example, we can undo $k$ states until we find an arbitrary $C_\text{$v_{H}$}$ that was attached in current \textsc{Reconstruct} call.
As mentioned before, we need to store inconsistent $C_\text{$v_{H}$}$ components in node $\textbf{\textit{C}}$ before using any attaching strategy. However, SFCM-R can't imitate RS-E by trying to attach them aggressively. Thus, the following strategies could be useful to prevent SFCM-R from imitating RS-E.
\begin{strategy}
Avoid adding new $C_{\text{$v_{H}$}^\prime}$ to $\textbf{\textit{C}}$ node until we have at least one well-succeeded $C_\text{$v_{H}$}$ attaching.
\end{strategy}
\begin{strategy}
If attachment attempts always generates new $C^\prime_\text{$v_{H}$}$ components, $\gamma$ growth rate must be increased drastically. In such case, try to attach additional $\text{$v_{H}$}^\prime$ components by adding them to $\textbf{\textit{C}}$ node and giving them a higher priority.
\end{strategy}
Also, the number of $C_\text{$v_{H}$}$ of $\textbf{\textit{C}}$ node can be limited by a variable that is decreased as $\gamma$ growth rate is increased. Such strategy forces \textsc{Reconstruct} to not try to attach $C_\text{$v_{H}$}$ components aggressively when we have successive peaks of $\gamma$.
\begin{strategy}
The number of $C_\text{$v_{H}$}$ components considered by current state must be limited by a variable that is decreased as $\gamma$ growth rate is increased.
\end{strategy}
Notice that once we have a valid attachable $C_\text{$v_{H}$}$, the remaining $C_\text{$v_{H}$}$ components can't be chosen by probability. As the choice of remaining $C^\prime_\text{$v_{H}$}$ components must be explicitly tied to a goal-oriented strategy, $\lfloor T \rceil$ can remove these $\text{$v_{H}$}$ vertices from $\textbf{\textit{C}}$ since SFCM-R assumes that $\mu_x$ is enough for reconstruction process.
\begin{strategy}
Remove every $C_\text{$v_{H}$}$ from $\textbf{\textit{C}}$ node for every $v$ after a valid attachable $C_\text{$v_{H}$}$ is found.
\end{strategy}
As mentioned earlier, if we try to attach every $C_\text{$v_{H}$}$ aggressively we can end up with SFCM-R imitating RS-E, since we can have subscenes with only invalid $C_\text{$v_{H}$}$ components. In other words, there is no guarantee that every $C_\text{$v_{H}$}$ found in every $R$ of vertices will be consistent without making any expansion call. Also, SFCM-R assumes that every vertex $w$ is reachable through $v$ or $\text{$v_{0}$} $. It means that there may exist components $C_\text{$v_{H}$}$ only attachable though $P_{x_2}$. In both cases, $P_{x_1}$ is \textit{overlapping} $P_{x_2}$ since an inconsistent region $R$ can happen to be consistent by either: (1) making a path swap in order to expand $P_{x_2}$ to correct inconsistencies; or (2) making a new expansion with a different $x_1 \in \phi$.
\begin{definition}
A path overlapping in a region $R$ of vertices is when: (1) $P_{x_2}$ needs to pass through $R$ to attach or cancel the appearance of inconsistent $C_\text{$v_{H}$}$ components found by expanding $P_{x_1}$; or (2) $\phi$ needs to be changed in order to attach or cancel the appearance of inconsistencies found by expanding $P_{x_1}$ or $P_{x_2}$.
\end{definition}
A path overlapping can occur in many cases. For example, if $H-P_{x_1}$ generates a non-reachable component $H^{\prime\prime}$ with $V(H^{\prime\prime}) \cap \{x_1,x_2\} = \emptyset$, $H^{\prime\prime}$ is clearly invalid in both hamiltonian circuit and hamiltonian path context. Also, we can have, in hamiltonian circuit context, $A(x_2, H)=T$ holding for $x_2$ by expanding $P_{x_1}$, or even worse, successive peaks of $\gamma$ in a region $R$. If we have successive peaks of $\gamma$ in a region $R$, there may exist a $C_\text{$v_{H}$}$ component frequently inconsistent by expanding $P_{x_1}$, suggesting that it may be attachable by expanding $P_{x_2}$. Another sign of path overlapping is when $A(x_1, H) = T$ holds for $x_1$ in hamiltonian circuit context, and $H-P_{x_1}$ generates a component $H^\prime$ with $x_2 \in V(H^\prime)$ and $|V(H^\prime)|$ being very small. This sign suggests that such $H^\prime$ can't be generated by $P_{x_1}$.
In such cases, the path overlapping correction strategies can be useful since we may find different $C_\text{$v_{H}$}$ components by expanding $P_{x_2}$ that can degenerate such inconsistencies without making new expansion calls. Therefore, we have the following strategy.
\begin{strategy}
If we have a path overlapping in some $R$ in $P_{x_1}$, undo $k$ states and make a path swap, so that we can pass through $R$ by expanding $P_{x_2}$. If path overlapping is corrected, make another path swap to continue the reconstruction process through former $P_{x_1}$.
\end{strategy}
As an alternative, instead of making a path swap to continue this process through former $P_{x_1}$, we can continue through $P_{x_2}$ without making a path swap.
\begin{strategy}
If we have a path overlapping in some $R$, undo $k$ states and make a path swap, so that we can pass through $R$ by expanding $P_{x_2}$. Continue through $P_{x_2}$ until we have another path overlapping.
\end{strategy}
Also, we can continue this process through $P_{x_2}$ until we have a new inconsistent $C_\text{$v_{H}$} \sim x_1$ in either current $P_{x_1}$ state or earlier states with a different $v=x_1$. If such $C_\text{$v_{H}$}$ is found, we undo the states created after the path swap and then, make another path swap to go back to $P_{x_1}$ in order to attach $C_\text{$v_{H}$}$. The goal here is to generate new inconsistent $C_\text{$v_{H}$}$ components to be attached by $P_{x_1}$ and change $P_{x_1}$ without relying on probability.
\begin{strategy}
If we have a path overlapping in some $R$, undo $k$ states and make a path swap, so that we can pass through $R$ by expanding $P_{x_2}$. If path overlapping is corrected, continue through $P_{x_2}$ until we have new inconsistent $C_\text{$v_{H}$} \sim x_1$ in either current $P_{x_1}$ state or earlier states with a different $v=x_1$. If such $C_\text{$v_{H}$}$ is found, undo the states created after the path swap and then, make another path swap to go back to former $P_{x_1}$ in order to attach such $C_\text{$v_{H}$}$.
\end{strategy}
As we're ignoring $u=\text{$v_{A}$}$ vertices in \textsc{Mapping}, we can have sequences of creatable components $H^\prime \supset H$ with $|H^n|=2$,$|H^c|=F$ when \textsc{Reconstruct} is passing through a potential \text{$v_{H}$}-path. If \textsc{Reconstruct} needs to attach an inconsistent $C_\text{$v_{H}$}$ of a potential \text{$v_{H}$}-path, we could choose an attachable $C_\text{$v_{H}$}$ of one of its endpoints in order to not make $\gamma$ growth rate get increased drastically. Such endpoints will be $C_\text{$v_{H}$}$ components that appear as inconsistency frequently.
\begin{strategy}
Undo $k$ states until we find the first attachable $C_\text{$v_{H}$}$ of an endpoint of a potential \text{$v_{H}$}-path instead of making $\gamma$ growth rate increase drastically.
\end{strategy}
Before continuing, we need to define the last type of vertex mentioned in this paper, that will be called \textit{$C_\text{$v_{H}$}$ generators} or simply $\text{$v_{G}$}$.
\begin{definition}\emph{($C_\text{$v_{H}$}$ generator)}
Let $G=(V,E)$ v be a minimal scene. A vertex $w \in V$ is a $C_\text{$v_{H}$}$ generator when $|H[V-w]_\text{$v_{H}$}| > |H[V]_\text{$v_{H}$}|$.
\end{definition}
From a technical point of view, $\text{$v_{G}$}$ is not $C_\text{$v_{H}$}$. On the other hand, if we consider $\text{$v_{G}$}$ as an inconsistent $C_\text{$v_{H}$}$,$\text{$v_{H}$}=\text{$v_{G}$}$, we can degenerate it so that the unwanted $C_\text{$v_{H}$}$ components are not created by $\text{$v_{G}$}$. Also, we can degenerate it by considering such unwanted $C_\text{$v_{H}$}$ components inconsistencies if we want to change the inconsistent \text{$v_{H}$}-path that $\text{$v_{G}$}$ is about to create. As $\text{$v_{G}$}$ is not an explicit $C_\text{$v_{H}$}$, this kind of event must make $\gamma$ growth rate increase but it's particularly useful in very specific cases.
As an example, let $w$ be a vertex that for every $H^\prime \supseteq H$, $H^\prime - w$ generates two potential \text{$v_{H}$}-paths starting from $w$. It means that there's only one way to reach $w$ without having $P_{x_1}$ and $P_{x_2}$ being paths with non-adjacent dead ends. If $w$ needs to be attached as $C_\text{$v_{H}$}$, using a lazy approach here could make $\gamma$ growth rate increase. So we have to assume that either $\text{$v_{G}$}$, or such unwanted $C_\text{$v_{H}$}$ components created by $\text{$v_{G}$}$, are inconsistencies in order to attach $w$ properly.
Another example is when we have unwanted $C_\text{$v_{H}$}$ components preventing $\lfloor T \rceil$ from making minimal scene attachments through Strategy \ref{str:8}. If these unwanted $C_\text{$v_{H}$}$ are properly attached, $\lfloor T \rceil$ can prevent itself from using path overlapping correction strategies. As a result, this strategy can make \textsc{Reconstruct} undo a small number of states, which can retard $\gamma$ growth rate and consequently postpone the need of a new expansion call.
We can also use this strategy to enforce the ordering constraints of ordering node $\textbf{\textit{J}}$, or when we have signs that suggests that there exists hidden region ordering constraints. A possible sign of hidden region ordering constraints is when \textsc{Reconstruct} finds itself using path overlapping correction strategies that generate always almost the same $C_\text{$v_{H}$}$ components from $P_{x_1}$ and $P_{x_2}$ with no significant progress. In this case, \textsc{Reconstruct} would just make a new expansion call due to a high peak of $\gamma$ in order to enforce such ordering by using ordering constraints of node $\textbf{\textit{J}}$. However, $\lfloor T \rceil$ can try to use this strategy before making a new expansion call when these components are about to force either $P_{x_1}$ or $P_{x_2}$ to create a wrong region ordering.
\begin{strategy}
If there's unwanted $C_\text{$v_{H}$}$ components created by $\text{$v_{G}$}$ , assume that $\text{$v_{G}$}$ or such unwanted $C_\text{$v_{H}$}$ components are inconsistent $C_\text{$v_{H}$}$ components that need to be attached, make $\gamma$ growth rate increase and try to attach these inconsistencies.
\end{strategy}
We can also store valid sequences of minimal scene attachments in the region node $\textbf{\textit{N}}$ of RSPN whenever we find inconsistencies that cause successive peaks of $\gamma$. In this case, a useful strategy is to create a temporary expansion call with $\phi=(w,\square)$ with $w$ being frequently part of non-attachable $C_\text{$v_{H}$}$ in current expansion, store a valid sequence of attached $C_\text{$v_{H}$}$ components in $\textbf{\textit{N}}$ and enforce this sequence of attachments through $P_{x_1}$ by using a lazy approach locally. It means that $\lfloor T \rceil$ will not enforce this sequence at first. It must enforce parts of such sequence of attachments progressively only if it finds successive peaks of $\gamma$.
The goal of this strategy is to minimize the number of expansions calls since we're enforcing a known valid sequence of $C_\text{$v_{H}$}$. It's important to mention that in order to enforce such ordering, these $C_\text{$v_{H}$}$ components need to appear as inconsistency explicitly. Therefore, such strategy is an extra parameter to change $k$. As an example, \textsc{Reconstruct} can undo $k$ states until it finds a vertex $v \in C_\text{$v_{H}$}$ with $C_\text{$v_{H}$}$ being part of a valid sequence of attachments.
\begin{strategy}
If we have high peaks of $\gamma$ in a region $R$ of vertices, then create a temporary expansion call with $\phi=(w,\square)$ with $w$ being frequently part of non-attachable $C_\text{$v_{H}$}$ components in current expansion in order to find and store a valid sequence of attached $C_\text{$v_{H}$}$ components in \textbf{\textit{N}}. Next, enforce this sequence of attachments $C_\text{$v_{H}$}$ through $P_{x_1}$ or $P_{x_2}$ progressively by using a lazy approach locally. If this strategy fails, $\gamma$ growth rate must be increased drastically.
\end{strategy}
As this strategy doesn't assume that $\mu_x$ is enough to reconstruct the hamiltonian sequence in region $R$, it must be used only in very specific cases. As an example, such strategy could be used when $\lfloor T \rceil$ is about to abort the reconstruction process or detects that the number of expansion calls is increasing very fast with no significant progress whenever \textsc{Reconstruct} tries to pass through such region.
\subsubsection{Goal-oriented strategies for hamiltonian path}
\label{sec:12}
In this section, we present specific goal-oriented strategies that SFCM-R needs to use to reconstruct a hamiltonian path. As mentioned earlier, the goal-oriented strategies of section \ref{sec:11} are focused on keeping $H$ connected, considering $v \to \text{$v_{H}$}=T$ as an inconsistency. However, $P_{x_1}$ and $P_{x_2}$ may have non-adjacent dead ends in hamiltonian path. In this case, we can have up to one $v \to \text{$v_{H}$}=T$. In other words, we can have $0 \leq \Delta(H) \leq 2-d$ with $\Delta(H)$ being the number of creatable components $H^\prime \supset H$,$V(H^\prime) \cap \{x_1,x_2\} = \emptyset$,$|H^n|=1$,$|H^c|=F$, and $d=0$ being a variable that is incremented when $x_1$ or $x_2$ reaches a dead end.
Notice that the same strategies can be used in hamiltonian path context. In this context, we can ignore at least two $C_\text{$v_{H}$}$ attaching operations. If these $C_\text{$v_{H}$}$ components happen to be non-reachable by $P_{x_1}$ or $P_{x_2}$, just enforce the attachment of such invalid $C_\text{$v_{H}$}$ by using goal-strategies of section \ref{sec:11} and continue the reconstruction process.
\begin{strategy}
In hamiltonian path context, Allow $\Delta(H)$ components to exist, assuming that these components are reachable by $x_1$ or $x_2$.
\end{strategy}
As an alternative strategy, we can enforce $H$ to have $H_{\text{$v_{H}$}}=\emptyset$ until we have only non-attachable $C_\text{$v_{H}$}$ components. When it happens, allow one $v \to \text{$v_{H}$} = T$ and split the scene $H$ in two different subscenes $H^\prime$ and $H^{\prime\prime}$ with $x_1 \in V(H^\prime)$ and $x_2 \in V(H^{\prime\prime})$. In this case, $x_1$ of $H$ will be the $x_1$ of $H^\prime$ and $x_2$ of $H$ will be the $x_1$ of $H^{\prime\prime}$. The $x_2$ of $H^\prime$ and $H^{\prime\prime}$ will be the root of a creatable component with $|H^n|=1$,$|H^c|=F$ (if one exists) of $H^\prime$ and $H^{\prime\prime}$, respectively.
\begin{strategy}
In hamiltonian path context, enforce $H_{\text{$v_{H}$}}=\emptyset$ until we have only non-attachable $C_\text{$v_{H}$}$ components.
\end{strategy}
If we enforce $H_{\text{$v_{H}$}}=\emptyset$ until we have only non-attachable $C_\text{$v_{H}$}$ components, we can find possible mandatory dead ends of hamiltonian path. As an example, the figure bellow shows a RSPN with $j_0 = \text{$v_{H}$}_0$ being an empty child of $\textbf{\textit{J}}$. The reason is that the vertices $w_1$ and $w_2$, which were added to $\textbf{\textit{J}}$ in expansion call $k-4$, were added to $\textbf{\textit{J}}$ again in expansion call $k-1$ to $\textbf{\textit{J}}$ when $\ltimes (\gamma, t) = F$. It means that if we pass through $w_1$ or $w_2$ in expansion call $k$, $\Delta(H)$ could get increased by $\lfloor T \rceil$ at any moment since $\lfloor T \rceil$ failed to prevent $\text{$v_{H}$}_0$ from being empty.
\begin{figure}
\caption{RSPN's node $\textbf{\textit{J}
\label{fig:14}
\end{figure}
When there's an empty $j_i = \text{$v_{H}$}_i$ node, such $\text{$v_{H}$}_i$ can be considered active. In figure 3, $w_1$ and $w_2$ forms together the only possible choice of $\text{$v_{H}$}_0$, which represents a possible mandatory dead end. So if we have $H_\text{$v_{H}$} \cap \{w_1,w_2\} \neq \emptyset$, or a creatable component $H^\prime \supset H$ with $1 \leq |H^n| \leq 2$,$|H^c|=F$, $\{w_1,w_2\} \cap V(H^\prime) \neq \emptyset$ , SFCM-R can just ignore these attachments at first since it must assume that $\Delta(H)$ could get increased by $\lfloor T \rceil$ at any moment since $\lfloor T \rceil$ failed to prevent $\text{$v_{H}$}_0$ from being empty.
\subsection{Proof of correctness}
\label{sec:13}
This section is dedicated to the proof of correctness of \textsc{Reconstruct}, which consequently proves the correctness of SFCM-R algorithm. In this section, the unknown negated forbidden condition of RS-R is refereed to as $\lfloor F \rceil$. Before continuing, consider the following corollaries of Theorem \ref{thm:2}.
\begin{corollary}
\label{clr:2}
$\lfloor F \rceil$ can't ignore the constraints of SFCM-R completely.
\end{corollary}
\begin{corollary}
\label{clr:3}
If RS-R ran without aborting itself, its potentially-exponential error rate curve was completely distorted by $\lfloor F \rceil$ in its final state.
\end{corollary}
Now, we will prove the following theorem, which states that \textsc{Reconstruct} is goal-oriented with at most $|V|-1$ expansion calls a different $ x_1 \in \phi$, are made by using an optimizable tolerance policy $\lfloor T \rceil$.
\begin{theorem}
\label{thm:3}
\textsc{Reconstruct} is goal-oriented if at most $|V|-1$ expansion calls, with a different $x_1 \in \phi$, are made by using an optimizable tolerance policy $\lfloor T \rceil$.
\end{theorem}
\begin{proof}
Let $G=(V,E)$ v be a scene, and $\lfloor T \rceil$ be an optimizable tolerance policy. As \textsc{Mapping} ignores Theorem \ref{thm:1} partially and \textsc{Reconstruct} passes through $H$ by using paths of $H^{*}$, \textsc{Reconstruct} is goal-oriented only if its error rate curve, which is the curve of $\gamma$, doesn't degenerate the error rate curve distortion made by \textsc{Mapping} (Corollary \ref{clr:4}) while enforcing both constraints \ref{cst:1} and \ref{cst:2}.
Let $F_v = P_{v}$ with $v \in \{x_1,x_2\}$, be a forbidden sequence of $H^{*}$ that makes the current of state of \textsc{Reconstruct} be inconsistent in $H[V-F_v]$. Let $Z=H_\text{$v_{H}$}$ be an inconsistent $H_\text{$v_{H}$}$ generated by $F_v$. If $F_v$ is found by \textsc{Reconstruct}, $\lfloor T \rceil$ (along with the proposed goal-oriented strategies and variants) makes \textsc{Reconstruct} either:
\begin{enumerate}[(1)]
\item degenerate $F_v$ by undoing $k$ states in order to attach a $C_\text{$v_{H}$}$ component such that $\text{$v_{H}$} \in Z$ through $x_1$ or $x_2$; or
\item perform a new expansion call with $\phi$ such that $\phi=(\text{$v_{H}$},\square)$ and $\text{$v_{H}$} \in Z$ in order to degenerate $F_v$ ordering by accessing $Z$ before $F_v$.
\end{enumerate}
Notice that:
\begin{enumerate}[(I)]
\item \textsc{Reconstruct} imitates \textsc{Mapping}, which is a goal-oriented by Theorem \ref{thm:2}, in order to degenerate $F_v$ since it minimizes the appearance of non-mandatory $C_\text{$v_{H}$}$ components by attaching them successfully, while using of paths $H^{*}$ to pass through $H$, which could make SFCM-R ignore its own constraints partially to imitate RS-R, that also can ignore SFCM-R constraints partially by Corollary \ref{clr:2}.
\\
\item By Corollary \ref{clr:1}, the existence of non-mandatory $C_\text{$v_{H}$}$ components and potential isolated forbidden minors doesn't imply that \textsc{Reconstruct} is ignoring $\lfloor F \rceil$ by imitating \textsc{Mapping};
\\
\item $F_v$ is not degenerated by imitating RS-E explicitly due to both $\lfloor T \rceil$, and restrictions related to the proposed goal-oriented strategies (and variants) that forces $\Delta(H)$ to be consistent while preventing \textsc{Reconstruct} from imitating RS-E explicitly;
\\
\item Assuming that $\lfloor F \rceil$ makes a recursive \textsc{Hamiltonian-Sequence} call to check if $v \to u=T$ hold for $u$, due to fact that RS-R performs only $v \to u=T$ operations, $\lfloor F \rceil$ can discard the scene $G$ of successive recursive calls without aborting RS-R in order to return $v \to u=F$ to their callers. Each caller, in turn, either increments its error count by one or makes $v \to u=F$ hold for the remaining $u$. Thus, \textsc{Reconstruct} is imitating RS-R when \textsc{Reconstruct} is undoing $k$ states in order to attach a $C_\text{$v_{H}$}$ component such that $\text{$v_{H}$} \in Z$;
\\
\item We can also assume that $\lfloor F \rceil$ can also change the first $v=y$ of the first \textsc{Hamiltonian-Sequence} call, when $y$ is preventing $\lfloor F \rceil$ from constructing a valid hamiltonian sequence $S$ in order to not make RS-R fail to produce a valid output, with $S=v_i ... v_k$ such that $|S|=|V|$, $1 \leq k \leq |V|$,$1 \leq i \leq k$, $v_1 \neq y$. Thus, \textsc{Reconstruct} is imitating RS-R when \textsc{Reconstruct} is performing a new expansion call with $\phi$ such that $\phi=(\text{$v_{H}$},\square)$ and $\text{$v_{H}$} \in Z$ in order to degenerate $F_v$ ordering by accessing $Z$ before $F_v$.
\\
\end{enumerate}
To illustrate (I), (II), (III), (IV) and (V), assume that RS-R, SFCM-R and RS-E are thermodynamic closed isolated systems in a row, defined by $S_{\text{RS-R}}=S(\text{SFCM-R},x_i,x^\prime_j)$, $S_{\text{SFCM-R}}=S(\text{RS-R},x_i,x_j)$, and $S_{\text{RS-E}}=S(\text{RS-E},x_i,x^{\prime\prime}_j)$, respectively. $S(A,x_i=w_i,z_j)$ is a linear combination of Gaussian kernels, which illustrates non-overlapping homeomorphic imaginary surfaces in different dimensions.
\begin{equation}
{\begin{array}{rcll} \displaystyle{S(A,x_i=w_i,z_j) = \sum_{j=1}^{n} \curlywedge_j e^{-\parallel x_i - z_j \parallel}}, \hphantom{00} && w_i \in V, \curlywedge_j \geq 0, n=|V|\end{array}}
\end{equation}
$S_{SFCM-R}$ in the middle illustrates the following quantum superposition as we want.
\begin{equation}
S_{\text{SFCM-R}} = c_0 \mid S_{\text{RS-R}}\rangle + c_1 \mid S_{\text{RS-E}}\rangle
\end{equation}
\begin{equation}
c_0 = max(0, t - \gamma)
\end{equation}
\begin{equation}
c_1 = 1 - c_0
\end{equation}
Let $T_{S_{SFCM-R}}$, $T_{S_{RS-R}}$ and $T_{S_{RS-E}}$ be $\gamma$, $\nabla$ and $\infty$, respectively, with $T_S$ being the temperature of $S$ at equilibrium and $\nabla$ being an imaginary variable.
In this context, we set $\gamma$ as follows because the hidden variable $\curlywedge_j$, that corresponds to the temperature at $x_j$, is uniform in every $x_j$ when $S_{SFCM-R}$ is at equilibrium. In this sense, as $F_v$ represents an inconsistency of SFCM-R, $F_v$ makes $c_j$ and $\curlywedge_j$ increase.
\begin{equation}
\gamma = \sum \frac{\curlywedge_j}{|V|}=\curlywedge_j
\end{equation}
\begin{equation}
{\begin{array}{rcll} \curlywedge_j = \gamma + c_j, \hphantom{00} && c_j \geq 0 \end{array}}
\end{equation}
Because (I), (II), (III), (IV) and (V), we can assume that $S_{RS-E}$ and $S_{SFCM-R}$ are essentially disputing the following minimax-based game, which tests the effectiveness of $S_{SFCM-R}$ on minimizing its disorder (entropy) as $\gamma$ growth rate increases by using a systematic method, which forces $T_{S_{SFCM-R}}$ to approach $T_{S_{RS-R}}$ instead of $T_{S_{RS-E}}$ in order to $\lfloor T \rceil$ not be more prone to abort $S_{SFCM-R}$.
\begin{equation}
{\begin{array}{rcll} \vartheta = \displaystyle{ \underset {\curlywedge_j} {\operatorname{min}}\;\underset {\gamma} {\operatorname{max}}\; $G=(V,E)$ ame(\curlywedge_j, \gamma) = \frac{1}{n} \sum_{j \in V} (\curlywedge_j - \gamma)^2} , \hphantom{00} && 0 \leq \gamma \leq \curlywedge_j, \gamma < t, n=|V| \end{array}}
\end{equation}
In other words, $S_{SFCM-R}$ win $\vartheta$ only and only if it forces itself to not collapse to $S_{RS-E}$ successive times due to $c_0 = 0$, which can maximize the entropy of $S_{SFCM-R}$ by $T_{S_{SFCM-R}}$ approaching $\infty$. Figure \ref{fig:12} illustrates an scenario where $\vartheta$ became unfair for $S_{SFCM-R}$.
\begin{figure}
\caption{Illustration of an approximated "anti-exponential" curve $h(x)$ of $S_{SFCM-R}
\label{fig:12}
\end{figure}
In fact, by Theorem \ref{thm:2} , $\lfloor T \rceil$ must compute the value of $s \in \Phi(s)$ in order to not abort $S_{SFCM-R}$ due to $c_0=p_1$, with $\Phi(s) : S \to 2^S$, $S=\text{$v_{H}$}_i ... \text{$v_{H}$}_k$ being a set that maps attachable $C_\text{$v_{H}$}$ components in $S$ to subsets of $S$. If such computation is possible, then the existence of $s$ implies that $\vartheta$ is unfair for $S_{R-SE}$ because of $S_{SFCM-R}$ reaching $x \in [p_0 ... p_1[$ is guaranteed by $\lfloor T \rceil$ because of (I), (II), (III), (IV) and (V). Likewise, the non-existence of $s$ implies that $\vartheta$ is unfair for $S_{SFCM-R}$ because of $S_{SFCM-R}$ reaching $x = p_1$ is guaranteed by $\lfloor T \rceil$.
It's worth mentioning that $\lfloor F \rceil$ also needs to minimize the entropy of $S_{RS-R}$ by using a systematic method instead of using a probabilistic approach in order to not fail to produce a valid output, directly or indirectly, since $T_{S_{RS-R}}$ needs to collapse to either $\nabla$ or $\infty$ precisely.
\begin{equation}
s \in \Phi(s) \Longrightarrow \text{(} { \underset {\curlywedge_j} {\operatorname{min}}\;\underset {\gamma} {\operatorname{max}}\; $G=(V,E)$ ame(\curlywedge_j, \gamma)}\text{ is unfair for }S_{R-SE}\text{)}
\end{equation}
Therefore, (I), (II), (III), (IV) and (V) imply that the existence of $F_v$ is not a sufficient condition to make \textsc{Reconstruct} degenerate the error rate distortion made by \textsc{Mapping} and imitate RS-E.
Now, let $X_i$ be a set $Z$ added to $\textbf{\textit{J}}$ node in expansion call $i$ that doesn't active any static $\text{$v_{H}$}$. As \textsc{Reconstruct} passes through $H$ by using $H^{*}$ paths, we have:
\begin{enumerate}[(1)]
\item by Corollary \ref{clr:1}, paths of $H^{*}$ can generate potential independent forbidden minors; and
\item by Corollary \ref{clr:2}, $\lfloor F \rceil$ can't ignore the constraints of SFCM-R completely.
\end{enumerate}
Thus, we can assume that, if there exists $X_i=j_i$ and $X_k=j_k$,$i>k$, then there exists two ordered fragments $S_i$ and $S_k$ of a potential hamiltonian sequence, such that $S_i \cap X_i \neq \emptyset$, $S_k \cap X_k \neq \emptyset$ that $\lfloor T \rceil$ is forced to create due to $\ltimes=F$ in order to degenerate $X_i$ and $X_k$.
Otherwise, \textsc{Reconstruct} would need to imitate RS-E explicitly, since it should have used probability to imitate RS-E in order to avoid both $X_i$ and $X_k$, instead of using the proposed goal-oriented strategies (and variants) along with $\lfloor T \rceil$ to postpone the creation of both $X_i$ and $X_k$ as well as the activation of both $X_i$ and $X_k$, which is invalid because of $S_{SFCM-R}$ avoiding $\vartheta$.
Notice we can also assume that RS-R also uses an optimizable tolerance policy, directly or indirectly, since:
\begin{enumerate}[(1)]
\item by Corollary \ref{clr:3}, $\lfloor F \rceil$ must distort the potentially-exponential error rate curve of RS-R, which is represented by the number of times that $v \to u=F$ holds for $u$, by using a systematic approach in order to make RS-R fail to produce a valid output; and
\item by Corollary \ref{clr:2}, as $\lfloor F \rceil$ can ignore the constraints of SFCM-R partially, we can assume that,directly or indirectly, $\lfloor F \rceil$ can tolerate a small $\gamma$ growth rate in order to consider SFCM-R constraints progressively as a mean to minimize the entropy of $S_{RS-R}$.
\end{enumerate}
Otherwise, RS-R would also need to imitate RS-E explicitly, since it wouldn't predict optimally if its error rate curve distortion would be degenerated in order to abort itself, which is invalid.
As $\lfloor F \rceil$ needs to map the ordering constraints related to potential independent forbidden minors by using RSPN in order to not make RS-R imitate RS-E, \textsc{Reconstruct} is imitating RS-R due to the fact that \textsc{Reconstruct} needs to pass through $X_i$ before $X_k$ by using $\lfloor T \rceil$.
However, if \textsc{Reconstruct} happens to pass through $X_k$ before $X_i$, there may exist a hidden region $X_l$ such that $l > k$, that updates $X_k$ in way that the ordering $X_i ... X_k$ remains preserved. As $X_l$ can be created by $\lfloor T \rceil$ in any subsequent expansion call, such event is not a sufficient condition to prove that \textsc{Reconstruct} ignores $X_i ... X_k$ ordering unless both $j_i = \text{$v_{H}$}_i$ and $j_k=\text{$v_{H}$}_k$ are active. In this case, if $v \to u = T$\space with $u \in X_k$ and $X_i \cap V(H) \neq \emptyset$, $\Delta(H)$ could get increased by $\lfloor T \rceil$ at any moment since:
\begin{enumerate}[(1)]
\item \textsc{Reconstruct} can't create any $X_l$ in subsequent expansion calls; and
\item $\lfloor T \rceil$ failed to postpone the creation of both $\text{$v_{H}$}_i$ and $\text{$v_{H}$}_k$ and the activation of both $\text{$v_{H}$}_i$ and $\text{$v_{H}$}_k$, while preventing \textsc{Reconstruct} from imitating RS-E.
\end{enumerate}
Because of that, $\lfloor T \rceil$ is forced to delete some edges $e \in L_e$ to enforce the ordering of active static $\text{$v_{H}$}$ points in order to make $\Delta(H)$ be consistent. In this case, \textsc{Reconstruct} is still using paths of $H^{*}$ even if some of edges are removed from $L_e$, which means that the existence of such removal operations is not a sufficient condition to prove that the error rate distortion made by \textsc{Mapping} is degenerated by \textsc{Reconstruct}. In addition, by Corollary \ref{clr:2}, $\lfloor F \rceil$ can't ignore the constraints of SFCM-R completely.
However, if \textsc{Reconstruct} is not able to add $[v,u]$, for at least one $v$, in an arbitrary region $R$ because of such ordering, \textsc{Reconstruct} can't pass through such region unless by using probability. In such state, \textsc{Reconstruct} is aborted by $\lfloor T \rceil$ since the error rate curve distortion made by \textsc{Mapping} (Corollary \ref{clr:4}) is about to be degenerated, which makes $\lfloor T \rceil$ trigger Strategy \ref{str:1} to disintegrate the curve distortion ring $\ltimes$ in order to make $\gamma$ grow exponentially.
That's because \textsc{Reconstruct} would need to imitate the behaviour of RS-E explicitly by ignoring $L_e$ as well as its tolerance policy completely in order to continue the reconstruction process. Notice that such state imitates the abort condition of RS-R by making $v \to u=F$ hold for every $u$, since:
\begin{enumerate}[(1)]
\item $\lfloor T \rceil$ failed to prevent \textsc{Reconstruct} from degenerating itself while preventing \textsc{Reconstruct} from imitating RS-E; and \item $\lfloor F \rceil$ can't ignore the constraints of SFCM-R completely by Corollary \ref{clr:2}.
\end{enumerate}
In addition, as a static $\text{$v_{H}$}_i$ can't have duplicated \text{$v_{H}$}\space points and the first \textsc{Reconstruct} call can update $\textbf{\textit{J}}$ node, $|V|-1$ expansion calls, with a different $x_1 \in \phi$, is a sufficient condition to activate every static $\text{$v_{H}$}$. If \textsc{Reconstruct} makes $|V|-1$ expansion calls with a different $x_1 \in \phi$, $L_e$ needs to be a hamiltonian sequence in order to not violate any region ordering. Otherwise, \textsc{Reconstruct} is aborted by Strategy \ref{str:1}. In such case, by Corollary \ref{clr:3}, \textsc{Reconstruct} also imitates the stop condition of RS-R, since a valid $u$ must exist for every $v$ found when RS-R is not aborted, and, a distorted error rate curve must exist when RS-R is not aborted.
\\
Therefore, \textsc{Reconstruct} is goal-oriented if at most $|V|-1$ expansion calls, with a different $x_1 \in \phi$, are made by using an optimizable tolerance policy $\lfloor T \rceil$.
\end{proof}
\section{Conclusion}
\label{sec:14}
In this paper, a novel algorithm to hamiltonian sequence is proposed. Such algorithm tries to reconstruct a potential hamiltonian sequence $P$ by solving a synchronization problem between the forbidden condition of an unknown non-exhaustive hamiltonian sequence characterization test, which is a set of unknown sufficient conditions that makes such test fail to produce a valid output, and the forbidden condition of the proposed algorithm, which is a set of sufficient conditions that makes the proposed algorithm fail to produce a valid output. In conclusion, this study suggests that the hamiltonian sequence problem can be treated as a synchronization problem involving the two aforementioned forbidden conditions.
\end{document} |
\textbf{b}egin{document}
\title{Mean value formulas on sublattices and flags of the random lattice}
\textbf{b}egin{abstract}
We present extensions of the Siegel integral formula (\textbf{c}ite{Sie}), which counts the vectors of the random lattice, to the context of counting its sublattices and flags. Perhaps surprisingly, it turns out that many quantities of interest diverge to infinity.
\textbf{e}nd{abstract}
\section{Introduction}
We start by recalling the celebrated Siegel integral formula (\textbf{c}ite{Sie}), one of the cornerstones of geometry of numbers. Let $X_n = \mathrm{SL}(n, \mathbb{Z}) \textbf{b}ackslash \mathrm{SL}(n, \mathbb{R})$ be the space of lattices of determinant $1$, and equip $X_n$ with the measure $\mu_n$ (defined up to a constant, which is to be determined by Theorem \ref{thm:siegel_int} below) that is inherited from the Haar measure of $\mathrm{SL}(n, \mathbb{R})$; in particular, $\mu_n$ is invariant under the right $\mathrm{SL}(n, \mathbb{R})$-action on $X_n$. In this setting, Siegel proved the following theorem.
\textbf{b}egin{theorem}[Siegel \textbf{c}ite{Sie}] \label{thm:siegel_int}
$\mu_n(X_n) < \infty$, so upon normalizing we may suppose $\mu_n(X_n) = 1$. Also, for $f: \mathbb{R}^n \rightarrow \mathbb{R}$ a compactly supported and bounded Borel measurable function, we have
\textbf{b}egin{equation*}
\int_{X_n} \sum_{x \in L \textbf{a}top x \neq 0} f(x) d\mu_n(L) = \int_{\mathbb{R}^n} f(x) dx.
\textbf{e}nd{equation*}
\textbf{e}nd{theorem}
There are many other useful variations of Theorem \ref{thm:siegel_int}. For instance, Rogers proved the following, known as the Rogers integral formula.
\textbf{b}egin{theorem}[Rogers \textbf{c}ite{Rogers}] \label{thm:rogers_int}
Let $1 \leq k \leq n-1$. Then for $f: (\mathbb{R}^n)^k \rightarrow \mathbb{R}$ a compactly supported and bounded Borel measurable function, we have
\textbf{b}egin{equation*}
\int_{X_n} \sum_{x_1, \ldots, x_k \in L \textbf{a}top \mathrm{rk}\,\langle x_1, \ldots, x_k \rangle = k} f(x_1, \ldots, x_k) d\mu_{n}(L) = \int_{\mathbb{R}^n} \ldots \int_{\mathbb{R}^n} f(x_1, \ldots, x_k) dx_1 \ldots dx_k.
\textbf{e}nd{equation*}
\textbf{e}nd{theorem}
Rogers also proved explicit formulas of this kind for the cases $\mathrm{rk}\,\langle x_1, \ldots, x_k \rangle = l$ for any $1 \leq l < k$ --- see (\textbf{c}ite{Rogers}) for the precise statement. This result, called the Rogers integral formula, is the essential tool for the study of random lattice vectors, since it makes possible to study the higher moments of the lattice vector-counting function $\sum_{x\in L \textbf{b}ackslash \{0\}} f(x)$. See Schmidt (\textbf{c}ite{Sch3}) for yet more variants of the Siegel integral formula, and Kim (\textbf{c}ite{Kim}), Shapira and Weiss (\textbf{c}ite{SW}), and S\"odergren and Str\"ombergsson (\textbf{c}ite{SS}) for some of the recent applications of these mean value theorems.
The motivation of the present paper is to explore the extensions of these results from the counting of lattice vectors to that of rank $d < n$ sublattices and of flags. In other words, we ask whether they too demonstrate any interesting statistical behavior, like the lattice vectors do.
On the practical side, we hope that such extensions will find applications in lattice-based cryptography. One of the basic implications of Theorem \ref{thm:siegel_int}, that a ball of volume $V$ contains $V$ nonzero lattice vectors on average, is already a fundamental tool for predicting and fine-tuning the decryption process. More recent results, such as the Poissonian behavior of short lattice vectors (\textbf{c}ite{Sod}), have also found applications in cryptanalysis (\textbf{c}ite{BSW}). It is therefore natural to expect that analogous results on sublattices would be useful too, since those are what the BKZ algorithm (stands for Block Korkine-Zolotarev, originally proposed by Schnorr and Euchner \textbf{c}ite{SE94}), the standard decryption algorithm for lattice-based systems, operates on.
Our first result is the following generalization of Theorem \ref{thm:rogers_int}. For a lattice $L \in X_n$ and $1 \leq d < n$, write $\mathrm{Gr}(L, d)$ for the set of primitive rank $d$ sublattices of $L$. For an element $A \in \mathrm{Gr}(L,d)$, define $\det A$ as follows: choose any basis $\{v_1, \ldots, v_d\}$ of $A$, and we set $\det A = \|v_1 \textbf{w}edge \ldots \textbf{w}edge v_d \|$, where $\| \textbf{c}dot \|$ here is the standard Euclidean norm in $\textbf{w}edge^d \mathbb{R}^n$. This definition is independent of the basis choice. Throughout this paper, for $A \in \mathrm{Gr}(L, d)$ and $H \geq 0$ we write
\textbf{b}egin{equation*}
f_{H}(A) = \textbf{b}egin{cases} 1 & \mbox{if $\det A \leq H$} \\ 0 & \mbox{otherwise.} \textbf{e}nd{cases}
\textbf{e}nd{equation*}
We also define
\textbf{b}egin{equation*}
a(n,d) = \textbf{f}rac{1}{n}\textbf{b}inom{n}{d}\prod_{i=1}^{d}\textbf{f}rac{V(n-i+1)\textbf{z}eta(i)}{V(i)\textbf{z}eta(n-i+1)},
\textbf{e}nd{equation*}
where $V(i) = \pi^{i/2}/\Gamma(1+i/2)$ is the volume of the unit ball in $\mathbb{R}^i$, and $\textbf{z}eta(i)$ is the Riemann zeta function evaluated at the positive integer $i$, except that we pretend $\textbf{z}eta(1) = 1$ for notational convenience. Then we prove the following, the first main result of the present paper.
\textbf{b}egin{theorem} \label{thm:main}
Suppose $1 \leq k \leq n-1$, $1 \leq d_1, \ldots, d_k \leq n-1$ with $d_1 + \ldots + d_k \leq n-1$. Then
\textbf{b}egin{equation*}
\int_{X_n} \textbf{u}nderset{A_1, \ldots, A_k\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(L, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(L, d_k)}} f_{H_1}(A_1) \ldots f_{H_k}(A_k) d\mu_n(L) = \prod_{i=1}^k a(n,d_i)H_i^n.
\textbf{e}nd{equation*}
\textbf{e}nd{theorem}
\textbf{b}egin{remark}
(i) We point out that the $k=1$ case of this theorem is proved by Thunder (\textbf{c}ite{Thu3}) in the more general context of number fields.
(ii) If one wants a formula that counts non-primitive sublattices as well, by a standard M\"obius inversion argument (see \textbf{c}ite{Sch}, or Section 7.1 of \textbf{c}ite{Kim2}) one can show that we could simply replace all the $a(n,d)$ by
\textbf{b}egin{equation*}
c(n,d) = a(n,d)\prod_{i=1}^d \textbf{z}eta(n-i+1).
\textbf{e}nd{equation*}
The same applies to our other results introduced below.
\textbf{e}nd{remark}
It is natural to ask what happens if the sublattices $A_1, \ldots, A_k$ are not independent. In fact, if $A_i \textbf{c}ap A_j \neq A_i, A_j, 0$ for some $i, j$, the corresponding integral diverges, as can be seen from the next statement that we prove.
\textbf{b}egin{theorem}\label{thm:overlap}
Let $1 \leq r < d_1, d_2$, so that $d_1 + d_2 < n + r$. Then
\textbf{b}egin{equation*}
\int_{X_n} \sum_{A \in \mathrm{Gr}(L,d_1)} \sum_{B \in \mathrm{Gr}(L,d_2) \textbf{a}top \mathrm{rk}\, A \textbf{c}ap B = r} f_{H_1}(A) f_{H_2}(B) d\mu_n(L) = \infty.
\textbf{e}nd{equation*}
\textbf{e}nd{theorem}
Because
\textbf{b}egin{equation} \label{eq:l2}
\int_{X_n} \left(\sum_{A \in \mathrm{Gr}(L, d)} f_{H}(A) \right)^2 d\mu_n(L) = \sum_{r=0}^d \int_{X_n} \sum_{A \in \mathrm{Gr}(L,d)} \sum_{B \in \mathrm{Gr}(L,d) \textbf{a}top \mathrm{rk}\, A \textbf{c}ap B = r} f_{H}(A) f_{H}(B) d\mu_n(L),
\textbf{e}nd{equation}
Theorem \ref{thm:overlap} has the following consequence.
\textbf{b}egin{corollary}
The $L^2$-norm of $\sum_{A \in \mathrm{Gr}(L, d)} f_{H}(A)$ diverges.
\textbf{e}nd{corollary}
Thus, unfortunately, it would be difficult to study $\mathrm{Gr}(L,d)$ via the standard methods. At least we learn that the statistics of rank $d$ sublattices is radically different from that of lattice vectors. In particular, the various Poissonian properties enjoyed by random lattice vectors (see e.g. \textbf{c}ite{Kim}) fail spectacularly for rank $d$ sublattices. It may be possible to tweak the left-hand side of \textbf{e}qref{eq:l2}, for example by restricting to counting certain pairs of elements of $\mathrm{Gr}(L,d)$, to extract some useful information, but I have been unable to do so.
At the other extreme is the case in which $A_1 \subseteq \ldots \subseteq A_k$, and we would be counting \textbf{e}mph{flags}. For $L \in X_n$ and $d_0 = 0 < d_1 < \ldots < d_k <d_{k+1} = n$, a flag of type $\mathfrak{d} = (d_1, \ldots, d_k)$ (rational with respect to $L$) is a sequence of sublattices
\textbf{b}egin{equation*}
A_1 \subseteq A_2 \subseteq \ldots \subseteq A_k \subseteq L
\textbf{e}nd{equation*}
such that $\dim A_i = d_i$. Define
\textbf{b}egin{equation*}
a(n, \mathfrak{d}) = a(n,d_1)\prod_{i=1}^{k-1} \textbf{f}rac{n-d_{i-1}}{d_{i+1}-d_{i-1}}a(n-d_i, d_{i+1} - d_i).
\textbf{e}nd{equation*}
(This coincides with $a(\textbf{a}lpha)$ in Thunder (\textbf{c}ite{Thu2}).) Then we have the following formula for counting rational flags.
\textbf{b}egin{theorem} \label{thm:flags}
The $\mu_n$-average of the number of flags $A_1 \subseteq \ldots \subseteq A_k$ of type $\mathfrak{d} = (d_1, \ldots, d_k)$ such that $\det A_i \leq H_i$ for $i = 1, \ldots, k$ is equal to
\textbf{b}egin{equation*}
a(n, \mathfrak{d})\prod_{i=1}^k H_i^{d_{i+1} - d_{i-1}}.
\textbf{e}nd{equation*}
\textbf{e}nd{theorem}
Theorem \ref{thm:flags} has the following corollary.
\textbf{b}egin{corollary}
The $\mu_n$-average number of flags of height $\leq H$ is equal to $\infty$.
\textbf{e}nd{corollary}
Here, the \textbf{e}mph{height} of a flag $A_1 \subseteq \ldots \subseteq A_k$ is defined as the quantity
\textbf{b}egin{equation*}
\prod_{i=1}^k (\det A_i)^{d_{i+1}-d_{i-1}}.
\textbf{e}nd{equation*}
(See e.g. \textbf{c}ite{FMT} or \textbf{c}ite{Thu2}.) Therefore, the average number of flags of height $\leq H$ equals
\textbf{b}egin{equation*}
a(n,\mathfrak{d})\int_{x_i > 0 \textbf{a}top x_1 \ldots x_k \leq H} dx_1 \ldots dx_k,
\textbf{e}nd{equation*}
but this is $\infty$ for $k \geq 2$. It may be interesting to compare this result with Theorem 5 in Thunder (\textbf{c}ite{Thu2}).
We end the introduction with a few words on the method of proof. It is a product of a reflection on the phenomenon in which the mean of a counting function over $X_n$ coincides with its main term for a fixed individual lattice. For instance, Theorem \ref{thm:siegel_int} implies
\textbf{b}egin{equation*}
\int_{X_n} \left| B(V) \textbf{c}ap L \textbf{b}ackslash \{0\} \right| d\mu_n(L) = V,
\textbf{e}nd{equation*}
which is the expected main term of an estimate of $\left| B(V) \textbf{c}ap L \textbf{b}ackslash \{0\} \right|$ for a fixed $L$. Among several possible approaches to mean value formulas, we chose the one that transparently shows how this happens. A discrete analogue of Theorem 1 of Rogers (\textbf{c}ite{Rogers}), which bears some semblance to the Hecke equidistribution (Lemma \ref{lemma:eqdist} below), reduces the problem of averaging over $X_n$ to that of counting over a fixed lattice; and the author's recent work (\textbf{c}ite{Kim2}) solves this counting problem. This argument also serves to fix a recently found error in Rogers' proof of the formula named after him (\textbf{c}ite{Rogers}); see Section 2 below for details. For Theorems \ref{thm:overlap} and \ref{thm:flags}, we also need the standard unfolding trick for the Eisenstein series.
If so desired, an appropriate combination of these techniques allows one to handle many other integrals in which the sublattices $A_1, \ldots, A_k$ interact in different ways, e.g. something like
\textbf{b}egin{equation*}
\int_{X_n} \textbf{u}nderset{A_1, A_2\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(L, d_1)} \sum_{A_2 \in \mathrm{Gr}(L, d_2)}} \sum_{B \in \mathrm{Gr}(A_1 \oplus A_2, r)} f_{H_1}(A_1)f_{H_2}(A_2)f_{H_3}(B) d\mu_n(L).
\textbf{e}nd{equation*}
\subsection{Acknowledgments}
This work was supported by NSF grant CNS-2034176. The author thanks the referee for the careful review of the original manuscript.
\section{Some background}
Fix a positive integer $n$. For a prime $p$ and $c \in \{ 1, \ldots, n \}$, let $\mathcal{M}^{(c)}_p$ be the set of $n \times n$ integral matrices $M = (m_{ij})_{1 \leq i, j \leq n}$ such that $m_{ij} = \delta_{ij}$ for $j \neq c$, and $0 \leq m_{ic} \leq p-1$ for $i < c$, $m_{cc} = p$, and $m_{ic} = 0$ for $i > c$. To illustrate, $M$ is a matrix of the form
\textbf{b}egin{equation*}
\textbf{b}egin{pmatrix}
1 & & & x_1 & & & \\
& \ddots & & \textbf{v}dots & & & \\
& & 1 & x_{c-1} & & & \\
& & & p & & & \\
& & & & 1 & & \\
& & & & & \ddots & \\
& & & & & & 1 \textbf{e}nd{pmatrix}.
\textbf{e}nd{equation*}
Also let $\mathcal{M}_p = \textbf{b}igcup_{c=1}^n \mathcal{M}^{(c)}_p$. It is clear that $|\mathcal{M}^{(c)}_p| = p^{c-1}$, so $|\mathcal{M}_p| = 1 + p + \ldots + p^{n-1}$. For the $n \times n$ diagonal matrix
\textbf{b}egin{equation*}
\textbf{a}lpha_p = \mathrm{diag}(1, \ldots, 1, p) = \textbf{b}egin{pmatrix}
1 & & & \\
& \ddots & & \\
& & 1 & \\
& & & p \textbf{e}nd{pmatrix},
\textbf{e}nd{equation*}
$\mathcal{M}_p$ serves as a set of all distinct representatives of the right cosets of $\Gamma$ in the double coset $\Gamma \textbf{a}lpha_p \Gamma$, where $\Gamma = \mathrm{SL}(n, \mathbb{Z})$ --- see Chapter 3 of Shimura (\textbf{c}ite{Shi}). The Hecke operator on the functions on $X_n$ with respect to $\textbf{a}lpha_p$, which we denote by $T_p$, is defined as
\textbf{b}egin{align*}
T_p\rho(L) &= \textbf{f}rac{1}{|\Gamma \textbf{b}ackslash \Gamma\textbf{a}lpha_p\Gamma|}\sum_{M \in \Gamma \textbf{b}ackslash \Gamma\textbf{a}lpha_p\Gamma} \rho(p^{-1/n}ML) \\
&= \textbf{f}rac{1}{|\mathcal{M}_p|}\sum_{M \in \mathcal{M}_p} \rho(p^{-1/n}ML).
\textbf{e}nd{align*}
Throughout this paper, we will also write $L$ for any representative of the coset $L \in X_n$ when it is harmless to do so, as we have done above. In other words, we write $L$ for both an element of $\mathrm{SL}(n,\mathbb{R})$ and the lattice spanned by the row vectors of $L$, to keep the notations simple.
Lemma \ref{lemma:eqdist} below, which is a discrete analogue of Theorem 1 of Rogers (\textbf{c}ite{Rogers}), is crucial to the present paper. However, the author (thanks to Kevin Schmitt) recently found that Rogers's argument contains an error: in p. 256 of \textbf{c}ite{Rogers}, he claims $\int_{X_n} \rho(\gamma L) d\mu_n = \int_{X_n} \rho(L) d\mu_n$ for any $\gamma \in \mathrm{SL}(n,\mathbb{R})$, saying that it follows ``from the known properties of the fundamental domain,'' but offering no justification otherwise. This equality is in fact not true, which can be seen by taking $n=2$, $\rho$ any function on $X_n$ vanishing in a neighborhood of the cusp, and
\textbf{b}egin{equation*}
\gamma = \textbf{b}egin{pmatrix} a & 0 \\ 0 & a^{-1} \textbf{e}nd{pmatrix}
\textbf{e}nd{equation*}
for large $a > 0$, for example. Fortunately, in our discrete context, the needed relation follows from the basic properties of the Hecke operators.
\textbf{b}egin{lemma} \label{lemma:hecke}
Let $\rho(L)$ be an integrable function on $X_n$. Then
\textbf{b}egin{equation*}
\int_{X_n} T_p\rho(L) d\mu_n(L) = \int_{X_n} \rho(L) d\mu_n(L).
\textbf{e}nd{equation*}
\textbf{e}nd{lemma}
\textbf{b}egin{proof}
The argument is very similar to the proof of Proposition 3.39 in \textbf{c}ite{Shi}. Write $\Gamma = \mathrm{SL}(n,\mathbb{Z})$ as before, and let
\textbf{b}egin{equation*}
\textbf{w}idetilde{\mathcal{M}}_p^{(c)} = \{E_{cn}M : M \in \mathcal{M}_p^{(c)}\},
\textbf{e}nd{equation*}
where $E_{cn}$ is the permutation matrix obtained by swapping the $c$-th and $n$-th rows of the $n \times n$ identity matrix and then changing the sign of the $n$-th row (so that $\det E_{cn} = 1$). Observe that $\textbf{w}idetilde{\mathcal{M}}_p := \textbf{b}igcup_{c=1}^n \textbf{w}idetilde{\mathcal{M}}_p^{c}$ is also a set of all representatives of the right cosets of $\Gamma$ in $\Gamma\textbf{a}lpha_p\Gamma$. Moreover, $\textbf{a}lpha_p^{-1}M \in \Gamma$ for all $M \in \textbf{w}idetilde{\mathcal{M}}_p$. Therefore
\textbf{b}egin{equation*}
\Gamma = \Gamma \textbf{c}ap \textbf{a}lpha^{-1}_p \Gamma \textbf{a}lpha_p \Gamma = \textbf{b}igcup_{M \in \textbf{w}idetilde{\mathcal{M}}_p} (\Gamma \textbf{c}ap \textbf{a}lpha_p^{-1} \Gamma \textbf{a}lpha_p \textbf{a}lpha^{-1}_p M) = \textbf{b}igcup_{M \in \textbf{w}idetilde{\mathcal{M}}_p} (\Gamma \textbf{c}ap \textbf{a}lpha_p^{-1} \Gamma \textbf{a}lpha_p) \textbf{a}lpha^{-1}_p M.
\textbf{e}nd{equation*}
This shows that the elements $\textbf{a}lpha_p^{-1}M$, $M \in \textbf{w}idetilde{\mathcal{M}}_p$, serve as the coset representatives of $\Gamma \textbf{c}ap \textbf{a}lpha^{-1}_p \Gamma \textbf{a}lpha_p$ in $\Gamma$. Hence we can reinterpret $T_p$ as
\textbf{b}egin{equation*}
T_p\rho(L) = \textbf{f}rac{1}{|\mathcal{M}_p|}\sum_{N \in (\Gamma \textbf{c}ap \textbf{a}lpha_p^{-1} \Gamma \textbf{a}lpha_p) \textbf{b}ackslash \Gamma} \rho(p^{-1/n}\textbf{a}lpha_p NL).
\textbf{e}nd{equation*}
For a choice of the fundamental domain $P$ with respect to $\Gamma \textbf{c}ap \textbf{a}lpha_p^{-1} \Gamma \textbf{a}lpha_p$, we have
\textbf{b}egin{align*}
&\int_{X_n} T_p\rho(L) d\mu_n(L) \\
&= \textbf{f}rac{1}{|\mathcal{M}_p|} \int_{X_n} \sum_{N \in (\Gamma \textbf{c}ap \textbf{a}lpha_p^{-1} \Gamma \textbf{a}lpha_p) \textbf{b}ackslash \Gamma} \rho(p^{-1/n}\textbf{a}lpha_p NL) d\mu_n(L) \\
&= \textbf{f}rac{1}{|\mathcal{M}_p|} \int_{P} \rho(p^{-1/n}\textbf{a}lpha_p L) d\mu_n(L) \\
&= \textbf{f}rac{1}{|\mathcal{M}_p|} \int_{p^{-1/n}\textbf{a}lpha_p P} \rho(L) d\mu_n(L).
\textbf{e}nd{align*}
However, $p^{-1/n}\textbf{a}lpha_p P$ is a fundamental domain with respect to $\textbf{a}lpha_p \Gamma \textbf{a}lpha^{-1}_p \textbf{c}ap \Gamma$. Since $|\mathcal{M}_p| = [\Gamma : \Gamma \textbf{c}ap \textbf{a}lpha_p^{-1} \Gamma \textbf{a}lpha_p] = [\Gamma : \textbf{a}lpha_p \Gamma \textbf{a}lpha^{-1}_p \textbf{c}ap \Gamma]$ (see e.g. Proposition 3.6 of \textbf{c}ite{Shi}), this completes the proof.
\textbf{e}nd{proof}
\textbf{b}egin{lemma} \label{lemma:eqdist}
Let $\rho(L)$ be a non-negative integrable function on $X_n$. Suppose $\lim_{p \rightarrow \infty} T_p \rho(L)$ exists and have the same (finite) value $I$ for all $L \in X_n$. Then $I = \int_{X_n} \rho d\mu_n$.
\textbf{e}nd{lemma}
\textbf{b}egin{proof}
For any function $F$ and $h \in \mathbb{R}$, write $[F]_h := \min(F, h)$. For any $h > I$, the dominated convergence theorem implies
\textbf{b}egin{equation*}
\int_{X_n} \left[T_p\rho\right]_h(L) d\mu_n(L) \rightarrow I
\textbf{e}nd{equation*}
as $p \rightarrow \infty$. Also by Lemma \ref{lemma:hecke}, we have
\textbf{b}egin{equation*}
\int_{X_n} [\rho]_h(L) d\mu_n(L) = \int_{X_n} T_p[\rho]_h(L) d\mu_n(L) \leq \int_{X_n} [T_p\rho]_h(L) d\mu_n(L).
\textbf{e}nd{equation*}
Taking $p \rightarrow \infty$ and then $h \rightarrow \infty$ here, by the monotone convergence theorem we obtain the upper bound
\textbf{b}egin{equation*}
\int_{X_n} \rho(L) d\mu_n(L) \leq I.
\textbf{e}nd{equation*}
On the other hand, consider the integral
\textbf{b}egin{align*}
&\int_{X_n} T_p\rho(L) d\mu_n(L) \\
&= \int_{X_n} \textbf{f}rac{1}{|\mathcal{M}_p|} \sum_{M \in \mathcal{M}_p} \rho(p^{-1/n}M L) d\mu_n(L) \\
&= \textbf{f}rac{1}{|\mathcal{M}_p|} \sum_{M \in \mathcal{M}_p} \int_{X_n} \rho(p^{-1/n}M L) d\mu_n(L).
\textbf{e}nd{align*}
Combined with Fatou's lemma, this implies that
\textbf{b}egin{equation*}
I = \int_{X_n} \lim_{p \rightarrow \infty} T_p\rho(L) d\mu_n(L) \leq \lim_{p \rightarrow \infty} \int_{X_n} T_p\rho(L) d\mu_n(L) = \int_{X_n} \rho(L) d\mu_n(L),
\textbf{e}nd{equation*}
again by Lemma \ref{lemma:hecke}. This completes the proof.
\textbf{e}nd{proof}
\textbf{b}egin{remark}
Lemma \ref{lemma:eqdist} may remind one of the Hecke equidistribution (\textbf{c}ite{DM}). Of course, the latter is a much deeper statement, but Lemma \ref{lemma:eqdist} has the advantage that it applies to functions that are neither compactly supported nor bounded, which is the situation we are in.
\textbf{e}nd{remark}
To confirm the rather strong condition for Lemma \ref{lemma:eqdist}, we need the following theorem recently established by the author (\textbf{c}ite{Kim2}). Here we only state the parts that we need.
\textbf{b}egin{theorem}[Kim \textbf{c}ite{Kim2}] \label{thm:fixed_L}
For a (full-rank) lattice $L \in \mathbb{R}^n$, define $P(L, d, H)$ to be the number of primitive rank $d < n$ sublattices of $L$ of determinant less than or equal to $H$. Also let
\textbf{b}egin{equation*}
b(n, d) = \max\left(\textbf{f}rac{1}{d}, \textbf{f}rac{1}{n-d}\right).
\textbf{e}nd{equation*}
Then
\textbf{b}egin{equation} \label{eq:fixed_L}
P(L, d, H) = a(n, d)\textbf{f}rac{H^n}{(\det L)^d} + O\left(\sum_{\gamma \in \mathbb{Q} \textbf{a}top 0 \leq \gamma \leq n - b(n,d)} b_\gamma(L)H^\gamma\right),
\textbf{e}nd{equation}
where the implied constant depends only on $n$ and $d$, the sum on the right is finite, and every $b_\gamma$ is a reciprocal of a product of the successive minima of $L$, so that the right-hand side of \textbf{e}qref{eq:fixed_L} is invariant under rescaling $L$ to $\textbf{a}lpha L$ and $H$ to $\textbf{a}lpha^d H$.
Furthermore, for a sublattice $S \subseteq L$ of rank $\leq n-d$, define $P_S(L,d,H)$ to be the number of primitive rank $d$ sublattices of $L$ of determinant $\leq H$ that intersect trivially with $S$. Then $P_S(L,d,H)$ also satisfies the estimate \textbf{e}qref{eq:fixed_L}; in particular, the error term is independent of $S$. The sublattices that do intersect $S$ contribute at most $O(\sum_{\gamma \leq n-1} b_\gamma H^{\gamma})$.
\textbf{e}nd{theorem}
For a $d \times n$ matrix $A = (a_{ij})_{d \times n}$ and $c \in \{1, \ldots, n\}$, write $A^{(c)} = (a_{ij})_{d \times (c-1)}$ for the ``first'' $d \times (c-1)$ submatrix of $A$. We also define $\det A = (\det AA^T)^{1/2}$. As with $L$, let us use the same letter $A$ to refer to the rank $d$ lattice generated by the row vectors of $A$. With this convention, the definition of $\det A$ just given for the matrix $A$ is consistent with the definition of $\det A$ for $A \in \mathrm{Gr}(L,d)$ in the introduction. For $A \in \mathrm{Gr}(L,d)$, we also sometimes write $\det_L A$ when the extra clarification might be helpful.
\textbf{b}egin{proposition} \label{prop:mat-det}
We continue with the notations of the preceding discussion. In addition, choose a basis $\{v_1, \ldots, v_n\}$ of $L \in X_n$, and also write $L$ for the matrix whose $i$-th row is $v_i$. Let $A$ be an integral $d \times n$ matrix, $c \in \{d+1, \ldots, n\}$, and let $\textbf{b}ar{L}^{(c)}$ be the $(c-1) \times n$ matrix whose $i$-th row vector $\textbf{b}ar{v}_i$ is the projection of $v_i$ onto $\mathrm{span}\{v_c, \ldots, v_n\}^\perp$. Then, provided $\det A^{(c)} \neq 0$, $\det AL \geq \det A^{(c)}\textbf{b}ar{L}^{(c)}$.
\textbf{e}nd{proposition}
\textbf{b}egin{proof}
We first present the proof for the case $c = n$. Write $A = (A^{(n)}; a)$, with $a = (a_{1n}, \ldots, a_{dn})^T$. Similarly, write
\textbf{b}egin{equation*}
L =
\textbf{b}egin{pmatrix}
1 & & & \mu_1 \\
& \ddots & & \textbf{v}dots \\
& & 1 & \mu_{n-1} \\
& & & 1
\textbf{e}nd{pmatrix}
\textbf{c}dot
\textbf{b}egin{pmatrix}
& & \\
& \textbf{b}ar{L}^{(n)} & \\
& & \\
& v_n &
\textbf{e}nd{pmatrix}
\textbf{e}nd{equation*}
for some $\mu_1, \ldots, \mu_{n-1} \in \mathbb{R}$. Write $\mu = (\mu_1, \ldots, \mu_{n-1})^T$. Then
\textbf{b}egin{equation*}
AL = A^{(n)}\textbf{b}ar{L}^{(n)} + (A^{(n)}\mu + a)v_n.
\textbf{e}nd{equation*}
Temporarily write $\mathcal{A} = A^{(n)}\textbf{b}ar{L}^{(n)}$, $\mathcal{B} = (A^{(n)}\mu + a)v_n$. Then
\textbf{b}egin{equation*}
(AL)(AL)^T = (\mathcal{A} + \mathcal{B})(\mathcal{A}^T + \mathcal{B}^T) = \mathcal{A}\mathcal{A}^T + \mathcal{B}\mathcal{B}^T,
\textbf{e}nd{equation*}
because $\mathcal{A}\mathcal{B}^T = \mathcal{B}\mathcal{A}^T = 0$. Also observe that
\textbf{b}egin{equation*}
\mathcal{B}\mathcal{B}^T = \|v_n\|^2(A^{(n)}\mu + a)(A^{(n)}\mu + a)^T.
\textbf{e}nd{equation*}
The matrix-determinant lemma now gives
\textbf{b}egin{equation*}
(\det AL)^2 = (\det \mathcal{A})^2(1+ \|v_n\|^2(A^{(n)}\mu + a)^T(\mathcal{A}\mathcal{A}^T)^{-1}(A^{(n)}\mu + a)) \geq (\det \mathcal{A})^2,
\textbf{e}nd{equation*}
which yields the desired conclusion.
To prove the $c=n-1$ case, for example, observe that we can write
\textbf{b}egin{equation*}
\textbf{b}ar{L}^{(n)} = \textbf{b}egin{pmatrix}
1 & & & \mu'_1 \\
& \ddots & & \textbf{v}dots \\
& & 1 & \mu'_{n-2} \\
& & & 1
\textbf{e}nd{pmatrix}
\textbf{c}dot
\textbf{b}egin{pmatrix}
& & \\
& \textbf{b}ar{L}^{(n-1)} & \\
& & \\
& \textbf{b}ar{v}_{n-1} &
\textbf{e}nd{pmatrix}
\textbf{e}nd{equation*}
for some $\mu'_1, \ldots, \mu'_{n-2} \in \mathbb{R}$, where $\textbf{b}ar{v}_{n-1}$ is the last row of $\textbf{b}ar{L}^{(n)}$. By repeating the argument above, we obtain $\det A^{(n)}\textbf{b}ar{L}^{(n)} \geq \det A^{(n-1)}\textbf{b}ar{L}^{(n-1)}$. The remaining cases follow by further repetitions.
\textbf{e}nd{proof}
\section{Proof of Theorem \ref{thm:main}}
Recall that we wish to evaluate the integral
\textbf{b}egin{equation} \label{eq:toy2}
\int_{X_n} \textbf{u}nderset{A_1, \ldots, A_k\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(L, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(L, d_k)}} f_{H_1}(A_1) \ldots f_{H_k}(A_k) d\mu_n(L),
\textbf{e}nd{equation}
where we have $d_1 + \ldots + d_k := d < n$. The plan is to instead estimate the sum
\textbf{b}egin{equation} \label{eq:toy2sum}
\textbf{f}rac{1}{|\mathcal{M}_p|} \sum_{M \in \mathcal{M}_p}\textbf{u}nderset{A_1, \ldots, A_k\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1ML) \ldots f_{p^{d_k/n}H_k}(A_kML)
\textbf{e}nd{equation}
for a fixed $L \in X_n$ in the $p$ limit, and then use Lemma \ref{lemma:eqdist} to prove Theorem \ref{thm:main}. More precisely, we rewrite \textbf{e}qref{eq:toy2sum} as
\textbf{b}egin{equation*}
\textbf{f}rac{1}{|\mathcal{M}_p|} \sum_{c=1}^{n} \sum_{M \in \mathcal{M}^{(c)}_p}\textbf{u}nderset{A_1, \ldots, A_k\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1ML) \ldots f_{p^{d_k/n}H_k}(A_kML),
\textbf{e}nd{equation*}
and study the inner sum for each $c$, to show that this approaches $\prod_{i=1}^k a(n,d_i)H_i^n$ as $p \rightarrow \infty$. This is independent of $L$, and hence Lemma \ref{lemma:eqdist} applies.
Throughout this section, we fix a representative of $L$ in $\mathrm{SL}(n,\mathbb{R})$, also denoted by $L$. It will also be helpful for us to identify each $A_l \in \mathrm{Gr}(\mathbb{Z}^n,d_l)$ with a choice of its matrix representative, e.g. its Hermite normal form (HNF), for explicit computations. Let $c \in \{1, \ldots, n\}$, and
\textbf{b}egin{equation*}
M = \textbf{b}egin{pmatrix}
1 & & & x_1 & & & \\
& \ddots & & \textbf{v}dots & & & \\
& & 1 & x_{c-1} & & & \\
& & & p & & & \\
& & & & 1 & & \\
& & & & & \ddots & \\
& & & & & & 1 \textbf{e}nd{pmatrix} \in \mathcal{M}^{(c)}_p.
\textbf{e}nd{equation*}
If we write $A_l = (a^l_{ij})_{d_l \times n}$ as a matrix, then for each $l$
\textbf{b}egin{equation*}
A_lML =
\textbf{b}egin{pmatrix}
a^l_{11} & a^l_{12} & \dots & \sum_{j=1}^{c-1} a^l_{1j}x_j + pa^l_{1c} & \dots & a^l_{1n} \\
\textbf{v}dots & \textbf{v}dots & & \textbf{v}dots & & \textbf{v}dots \\
a^l_{d_l1} & a^l_{d_l2} & \dots & \sum_{j=1}^{c-1} a^l_{d_lj}x_j + pa^l_{d_lc} & \dots & a^l_{d_ln}
\textbf{e}nd{pmatrix}
L.
\textbf{e}nd{equation*}
Denote by $A^{(c)}_l = (a^l_{ij})_{d_l \times (c-1)}$ the first $d_l \times (c-1)$ submatrix of $A_l$, and write
\textbf{b}egin{equation*}
A = (a_{ij})_{d \times n} = \textbf{b}egin{pmatrix} A_1 \\ \textbf{v}dots \\ A_k \textbf{e}nd{pmatrix},
A^{(c)} = (a_{ij})_{d \times (c-1)} = \textbf{b}egin{pmatrix} A^{(c)}_1 \\ \textbf{v}dots \\ A^{(c)}_k \textbf{e}nd{pmatrix},
\textbf{e}nd{equation*}
which are $d \times n$ and $d \times (c-1)$ matrices, respectively.
\subsection{The main term}
The main term of \textbf{e}qref{eq:toy2sum} comes from the terms with $c=n$, namely
\textbf{b}egin{equation}\label{eq:c=n}
\textbf{f}rac{1}{|\mathcal M_{p}|}\sum_{M \in \mathcal{M}^{(n)}_p}\textbf{u}nderset{A_1, \ldots, A_k\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1ML) \ldots f_{p^{d_k/n}H_k}(A_kML).
\textbf{e}nd{equation}
Consider the map $A^{(n)}: \mathbb{F}_p^{n-1} \rightarrow \mathbb{F}_p^d$ induced by the matrix $A^{(n)}$ as above. We claim that the contribution to the above sum from the $A_l$'s for which $A^{(n)}$ is not surjective is negligible. There are two cases:
\textbf{b}egin{enumerate}[(i)]
\item $\det A^{(n)} \geq p$ over $\mathbb{Q}$. Then
\textbf{b}egin{equation*}
p \leq \det A^{(n)} \leq \prod_{l=1}^k \det A^{(n)}_l,
\textbf{e}nd{equation*}
so there exists an $l$ such that $\det A^{(n)}_l \geq p^{d_l/n + 1/nk}$, and hence $\det A^{(n)}_l\textbf{b}ar{L} \gg_L p^{d_l/n + 1/nk}$, where $\textbf{b}ar{L}$ here is $\textbf{b}ar{L}^{(n)}$ in the statement of Proposition \ref{prop:mat-det}. Proposition \ref{prop:mat-det} implies $\det A_lML \gg_L p^{d_l/n + 1/nk}$. For $p$ sufficiently large, this is greater than $p^{d_l/n}H_l$, and so does not contribute to the sum.
\item $\det A^{(n)} = 0$ over $\mathbb{Q}$. Row-reduce $A$ so that the last row equals $(0, \ldots, 0, C)$ with $C \neq 0$ --- possible by assumption $\mathrm{rk}\, A = d$ --- which shows that $\det AM \geq p$. This again implies $\det A_lML \gg_L p^{d_l/n + 1/nk}$ for some $l$.
\textbf{e}nd{enumerate}
Now if $A^{(n)}$ did induce a surjective map onto $\mathbb{F}_p^d$, then the vectors
\textbf{b}egin{equation*}
\left( \sum_{j=1}^{n-1} {a_{1j}x_j}, \ldots, \sum_{j=1}^{n-1} {a_{dj}x_j} \right)
\textbf{e}nd{equation*}
are equidistributed mod $p$. Therefore computing \textbf{e}qref{eq:c=n} is equivalent to computing
\textbf{b}egin{equation*}
\textbf{f}rac{p^{n-1}}{p^d|\mathcal M_{p}|}\textbf{u}nderset{\mathrm{rk}\,_p A^{(n)} = d}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n,d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n,d_k)}} f_{p^{d_1/n}H_1}(A_1L) \ldots f_{p^{d_k/n}H_k}(A_kL)
\textbf{e}nd{equation*}
(here $\mathrm{rk}\,_p$ means the rank modulo $p$). This is equal to
\textbf{b}egin{equation} \label{eq:ribbit}
\textbf{f}rac{p^{n-1}}{p^d|\mathcal M_{p}|}\textbf{u}nderset{\mathrm{rk}\, A^{(n)} = d}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n,d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n,d_k)}} f_{p^{d_1/n}H_1}(A_1L) \ldots f_{p^{d_k/n}H_k}(A_kL)
\textbf{e}nd{equation}
because $\mathrm{rk}\,_p A^{(n)} = d \Leftrightarrow \mathrm{rk}\, A^{(n)} = d$ and $p \nmid \det A^{(n)}$, and we already showed that if $p \mid \det A^{(n)}$ the corresponding sets of $A_l$'s do not contribute to the sum.
The summation in \textbf{e}qref{eq:ribbit} requires that $A_k$ is independent of $A_1, \ldots, A_{k-1}$ and $(0, \ldots, 0, 1)$, since otherwise, $A_1 \oplus \ldots \oplus A_k$ contains a nonzero multiple of $(0, \ldots, 0, 1)$, which implies $\mathrm{rk}\, A^{(n)} < d$. Thus, by applying Theorem \ref{thm:fixed_L} with $S = A_1 \oplus \ldots \oplus A_{k-1} \oplus \mathrm{span}_\mathbb{Z}\{(0, \ldots, 0, 1)\}$, we can rewrite \textbf{e}qref{eq:ribbit} as
\textbf{b}egin{align*}
&\textbf{f}rac{p^{n-1}}{p^d|\mathcal M_{p}|}\left(a(n,d_k)H_k^np^{d_k} + o_L(H_k^np^{d_k})\right) \textbf{c}dot \\
&\textbf{u}nderset{\mathrm{rk}\, A^{(n)} = d - d_k}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n,d_1)} \ldots \sum_{A_{k-1} \in \mathrm{Gr}(\mathbb{Z}^n,d_{k-1})}} f_{p^{d_1/n}H_1}(A_1L) \ldots f_{p^{d_{k-1}/n}H_{k-1}}(A_{k-1}L),
\textbf{e}nd{align*}
where $A^{(n)}$ here now means
\textbf{b}egin{equation*}
A^{(n)} = \textbf{b}egin{pmatrix} A^{(n)}_1 \\ \textbf{v}dots \\ A^{(n)}_{k-1} \textbf{e}nd{pmatrix}.
\textbf{e}nd{equation*}
Repeating the same argument with other $A_i$'s, we find that \textbf{e}qref{eq:ribbit} equals
\textbf{b}egin{equation*}
\textbf{f}rac{p^{n-1}}{|\mathcal M_{p}|}\left(\prod_{i=1}^ka(n,d_i)H^n_i + o_{L,H_1, \ldots, H_k}(1)\right).
\textbf{e}nd{equation*}
Recalling $|\mathcal M_{p}| = \sum_{i=0}^{n-1} p^i$, and taking $p \rightarrow \infty$, this gives the intended main term $\prod_{i=1}^k a(n,d_i)H^n_i$ for \textbf{e}qref{eq:toy2sum}.
\subsection{Error terms, part 1}
In the rest of this section, we show that, for $c \in \{1, \ldots, n-1\}$,
\textbf{b}egin{equation}\label{eq:c<n}
\textbf{f}rac{1}{|\mathcal M_{p}|}\sum_{M \in \mathcal{M}^{(c)}_p}\textbf{u}nderset{A_1, \ldots, A_k\ \mathrm{independent}}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1ML) \ldots f_{p^{d_k/n}H_k}(A_kML)
\textbf{e}nd{equation}
vanishes as $p \rightarrow \infty$. This will complete the proof of Theorem \ref{thm:main}.
We first assume $c > d$, and consider the contributions from those $A_1, \ldots, A_k$ such that $\mathrm{rk}\,_p A^{(c)} = d$. By a similar argument to the $c=n$ case, the surjection of the linear map $A^{(c)} : \mathbb{F}^{c-1}_p \rightarrow \mathbb{F}^d_p$ implies that their contributions amount to
\textbf{b}egin{equation*}
\textbf{f}rac{p^{c-1}}{p^d|\mathcal M_{p}|}\textbf{u}nderset{\mathrm{rk}\,_p A^{(c)} = d}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n,d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n,d_k)}} f_{p^{d_1/n}H_1}(A_1L) \ldots f_{p^{d_k/n}H_k}(A_kL).
\textbf{e}nd{equation*}
We simply drop the rank condition and bound this by
\textbf{b}egin{equation*}
\textbf{f}rac{p^{c-1}}{|\mathcal M_{p}|}\left(\prod_{i=1}^k a(n,d_i)H_i^n + o_{L, H_1, \ldots, H_k}(1)\right),
\textbf{e}nd{equation*}
which clearly vanishes as $p \rightarrow \infty$.
Continue with the assumption $c > d$, but this time suppose $\mathrm{rk}\,_p A^{(c)} < d$. If $\det A^{(c)} \geq p$ (over $\mathbb{Q}$), then we can argue exactly as in (i) in Section 3.1 above and show it does not contribute to \textbf{e}qref{eq:c<n}. The case $\det A^{(c)} = 0$ will be handled below.
\subsection{Error terms, part 2}
We now assume that either $c > d$ and $\det A^{(c)} = 0$, or $c \leq d$, in which case $\det A^{(c)} = 0$ necessarily. Write $\mathrm{rk}\,_p A^{(c)} = c' < \min(c,d)$. We claim that we may assume $\mathrm{rk}\, A^{(c)} = c'$ as well. If not, then $\mathrm{rk}\, A^{(c)} > \mathrm{rk}\,_p A^{(c)}$, and thus the HNF of $A^{(c)}$ has a leading coefficient (also called a pivot) that is a nonzero multiple of $p$. But this implies that $\det A \geq p$ by the Cauchy-Binet formula, and we can again argue as in (i) in Section 3.1 to show that this $A$ contributes zero to \textbf{e}qref{eq:c<n}.
Suppose in addition that $\mathrm{rk}\, A^{(c+1)} = c' + 1$. Then the HNF of $A$ has a pivot in column $c$, and it follows that the HNF of $AM$ has a pivot in column $c$ that is a multiple of $p$. This implies $\det AM \geq p$, and again we argue as in (i) in Section 3.1.
Summarizing our argument so far, it remains to consider the case in which $\det A^{(c)} = 0$, and $\mathrm{rk}\,_p A^{(c)} = \mathrm{rk}\, A^{(c)} = \mathrm{rk}\, A^{(c+1)} = c' < \min(c,d)$. For integers $1 \leq r_1 < r_2 < \ldots < r_{c'} \leq d$, let $r = (r_1, \ldots, r_{c'})$, and for a matrix $B$ with $d$ rows, denote by $B|_r$ the matrix with $c'$ rows whose $i$-th row is the $r_i$-th row of $B$. For each $r$, let us restrict \textbf{e}qref{eq:c<n} to those $A$ for which the rows of $A^{(c)}|_r$ are linearly independent. Thus, we are considering the following restriction of \textbf{e}qref{eq:c<n}:
\textbf{b}egin{equation*}
\textbf{f}rac{1}{|\mathcal M_{p}|}\sum_{M \in \mathcal{M}^{(c)}_p}\textbf{u}nderset{\mathrm{rk}\, A = d,\, \mathrm{rk}\, A^{(c)}|_r = \mathrm{rk}\,_p A^{(c)} = \mathrm{rk}\, A^{(c)} = \mathrm{rk}\, A^{(c+1)} = c'}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1ML) \ldots f_{p^{d_k/n}H_k}(A_kML).
\textbf{e}nd{equation*}
For each $A$ appearing in this sum, there exists a rational $d \times c'$ matrix $R$ such that $A^{(c)} = RA^{(c)}|_r$. Due to the rank condition $\mathrm{rk}\, A^{(c)} = \mathrm{rk}\, A^{(c+1)}$, we also must have $A^{(c+1)} = RA^{(c+1)}|_r$. In other words, $R$ and $A^{(c+1)}|_r$ determine $A^{(c+1)}$ under our current assumptions. With this understanding, we can rewrite the above sum as
\textbf{b}egin{align*}
&\textbf{f}rac{1}{|\mathcal M_{p}|} \sum_R \sum_{M \in \mathcal{M}^{(c)}_p} \\
&\textbf{u}nderset{\mathrm{rk}\, A = d, A^{(c)} = RA^{(c)}|_r, \textbf{a}top \mathrm{rk}\, A^{(c)}|_r = \mathrm{rk}\,_p A^{(c)} = \mathrm{rk}\, A^{(c)} = \mathrm{rk}\, A^{(c+1)} = c'}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1ML) \ldots f_{p^{d_k/n}H_k}(A_kML),
\textbf{e}nd{align*}
where the sum over $R$ is over all $d \times c'$ matrices such that the inner sum is nontrivial. Similarly to the argument in Section 3.1, for each $A$ appearing in the sum, as $M$ ranges over $\mathcal{M}^{(c)}_p$, the $c$-th column of $A|_rM$
\textbf{b}egin{equation*}
\left( \sum_{j=1}^{c-1} a_{r_1j} x_j + pa_{r_1c}, \ldots, \sum_{j=1}^{c-1} a_{r_{c'}j} x_j + pa_{r_{c'}c} \right)^T
\textbf{e}nd{equation*}
becomes equidistributed mod $p$. Also, multiplying by $M$ from the right keeps all the rank conditions invariant. Thus the above sum becomes
\textbf{b}egin{align}
\textbf{f}rac{p^{c-1}}{p^{c'}|\mathcal{M}_p|} \sum_R \textbf{u}nderset{\mathrm{rk}\, A = d, A^{(c)} = RA^{(c)}|_r, \textbf{a}top \mathrm{rk}\, A^{(c)}|_r = \mathrm{rk}\,_p A^{(c)} = \mathrm{rk}\, A^{(c)} = \mathrm{rk}\, A^{(c+1)} = c'}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n, d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n, d_k)}} f_{p^{d_1/n}H_1}(A_1L) \ldots f_{p^{d_k/n}H_k}(A_kL) \notag \\
= \textbf{f}rac{p^{c-1}}{p^{c'}|\mathcal{M}_p|}\textbf{u}nderset{\mathrm{rk}\, A = d,\, \mathrm{rk}\, A^{(c)}|_r = \mathrm{rk}\, A^{(c)} = \mathrm{rk}\, A^{(c+1)} = c'
}{\sum_{A_1 \in \mathrm{Gr}(\mathbb{Z}^n,d_1)} \ldots \sum_{A_k \in \mathrm{Gr}(\mathbb{Z}^n,d_k)}} f_{p^{d_1/n}H_1}(A_1L) \ldots f_{p^{d_k/n}H_k}(A_kL). \label{eq:abcde}
\textbf{e}nd{align}
It remains to estimate this sum \textbf{e}qref{eq:abcde}. By dropping the rank conditions and applying Theorem \ref{thm:fixed_L}, \textbf{e}qref{eq:abcde} is at most
\textbf{b}egin{equation*}
\textbf{f}rac{p^{c-c'+d-1}}{|\mathcal{M}_p|}\left(\prod_{i=1}^k a(n,d_i)H_i^{n} + o_{L,H_1, \ldots, H_k}(1)\right),
\textbf{e}nd{equation*}
which approaches $0$ as $p \rightarrow \infty$ provided $n-c > d-c'$ (note that $n-c \geq d-c'$ always). If $n-c = d-c'$, note that the HNF of $A$ is of the form
\textbf{b}egin{equation*}
\textbf{b}egin{pmatrix} P & * \\ 0 & Q \textbf{e}nd{pmatrix},
\textbf{e}nd{equation*}
where $P$ is a $c' \times c$ matrix, and $Q$ is a $(d-c') \times (n-c)$ matrix, where $P$ and $Q$ are themselves HNFs and must be of full rank. Now since $n-c=d-c'$ by assumption, $Q$ is a square matrix, and therefore $Q$ must be an upper triangular matrix with no nonzero diagonal entries. This implies that $A_1 \oplus \ldots \oplus A_k$ intersects nontrivially with $\mathrm{span}_\mathbb{Z}\{(0, \ldots, 0, 1)\}$. Therefore, one of the $A_l$'s intersects nontrivially with the lattice $S_l := A_1 \oplus \ldots \oplus \hat{A}_l \oplus \ldots \oplus A_{k} \oplus \mathrm{span}_\mathbb{Z}\{(0, \ldots, 0, 1)\}$ --- where $\hat{A}_l$ here indicates that $A_l$ does \textbf{e}mph{not} appear in the sum --- of dimension at most $d-d_l+1$. But Theorem \ref{thm:fixed_L} implies
\textbf{b}egin{equation*}
\sum_{A_l \in \mathrm{Gr}(\mathbb{Z}^n, d_l) \textbf{a}top A_l \textbf{c}ap S_l \neq 0} f_{p^{d_l/n}H_l}(A_lL) = o_{L, H_1, \ldots, H_k}(p^{d_l}),
\textbf{e}nd{equation*}
which implies that \textbf{e}qref{eq:abcde} is bounded by
\textbf{b}egin{equation*}
o_{L,H_1, \ldots, H_k}\left(\textbf{f}rac{p^{c-c'+d-1}}{|\mathcal{M}_p|}\right) = o_{L,H_1, \ldots, H_k}(1),
\textbf{e}nd{equation*}
as desired.
\section{The case of partially overlapping sublattices}
In this section we prove Theorem \ref{thm:overlap}. Recall that we are considering the integral
\textbf{b}egin{equation} \label{eq:toy4}
\int_{X_n} \sum_{A \in \mathrm{Gr}(L,d_1)} \sum_{B \in \mathrm{Gr}(L,d_2) \textbf{a}top \mathrm{rk}\, A \textbf{c}ap B = r} f_{H_1}(A) f_{H_2}(B) d\mu_n(L)
\textbf{e}nd{equation}
for $1 \leq r < d_1, d_2$, so that $d_1 + d_2 < n + r$.
Let $L \in X_n$. For a primitive sublattice $C \subseteq L$ of rank $r < n$, we identify the quotient lattice $L/C$ with the projection of $L$ onto the $n-r$-dimensional subspace of $\mathbb{R}^n$ orthogonal to $C$, and assign the metric induced by this projection from the metric on $L$. If $A \in \mathrm{Gr}(L,d)$ satisfies $C \subseteq A$, then it is easy to see that $A/C \in \mathrm{Gr}(L/C, d-r)$, and that it satisfies
\textbf{b}egin{equation*}
{\det}_L(A) = {\det}_L(C) {\det}_{L/C}(A/C).
\textbf{e}nd{equation*}
Using this relation, we rewrite the inner sum of \textbf{e}qref{eq:toy4} as
\textbf{b}egin{equation*}
\sum_{C \in \mathrm{Gr}(L, r)} \sum_{\textbf{b}ar{A} \in \mathrm{Gr}(L/C, d_1-r) \textbf{a}top { \textbf{b}ar{B} \in \mathrm{Gr}(L/C, d_2-r) \textbf{a}top \mathrm{indep}}} f_{H_1/\det_L C}(\textbf{b}ar{A}) f_{H_2/\det_L C}(\textbf{b}ar{B}).
\textbf{e}nd{equation*}
We will interpret this expression as a pseudo-Eisenstein series, i.e. a function on $X_n$ of form $\sum_{\gamma \in P \textbf{c}ap \Gamma \textbf{b}ackslash \Gamma} f(\gamma L)$ where $\Gamma = \mathrm{SL}(n,\mathbb{Z})$ and $P$ is a parabolic subgroup of $\mathrm{SL}(n,\mathbb{R})$, and then use the unfolding trick. Fix a representative of $L \in X_n$ in $\mathrm{SL}(n,\mathbb{R})$, again denoted by $L$. In this context, a choice of $C \in \mathrm{Gr}(L, r)$ corresponds to two choices of $\gamma \in P(n-r,r,\mathbb{Z}) \textbf{b}ackslash \mathrm{SL}(n,\mathbb{Z})$, where
\textbf{b}egin{equation*}
P(a,b,F) = \left\{
\textbf{b}egin{pmatrix}
G_a & * \\
& G_b
\textbf{e}nd{pmatrix}
: G_a \in \mathrm{SL}(a, F), G_b \in \mathrm{SL}(b, F)
\right\}
\textbf{e}nd{equation*}
for any ring $F$. $C$ and $\gamma$ are related by $C = \mbox{(sublattice generated by last $r$ rows of $\gamma L$)}$, and this correspondence is one-to-two due to the two possible orientations for the last $r$ rows of $\gamma L$.
Next, fixing a representative of $\gamma$, we can uniquely decompose $\gamma L$ in the form
\textbf{b}egin{equation} \label{eq:levi_decomp}
\textbf{b}egin{pmatrix} G_{n-r} & U \\ & G_r \textbf{e}nd{pmatrix} \textbf{b}egin{pmatrix} \textbf{a}lpha^{-\textbf{f}rac{1}{n-r}}I_{n-r} & \\ & \textbf{a}lpha^{\textbf{f}rac{1}{r}}I_r \textbf{e}nd{pmatrix} K',
\textbf{e}nd{equation}
for some $G_{n-r} \in \mathrm{SL}(n-r,\mathbb{R})$, $G_{r} \in \mathrm{SL}(r,\mathbb{R})$, $U \in \mathrm{Mat}_{(n-r) \times r}(\mathbb{R})$, and $K'$ an element of a fundamental domain of $(\mathrm{SO}(n-r,\mathbb{R}) \times \mathrm{SO}(r,\mathbb{R})) \textbf{b}ackslash \mathrm{SO}(n,\mathbb{R})$, which we fix in advance. In this context, a choice of an independent pair $\textbf{b}ar{A} \in \mathrm{Gr}(L/C, d_1-r)$ and $\textbf{b}ar{B} \in \mathrm{Gr}(L/C, d_2-r)$ corresponds to four choices --- again due to the four possibilities for the orientations --- of $\delta \in P'(n-d_1-d_2+r, d_2-r, d_1-r,\mathbb{Z}) \textbf{b}ackslash \mathrm{SL}(n-r, \mathbb{Z})$, where
\textbf{b}egin{equation*}
P'(a,b,c, F) = \left\{
\textbf{b}egin{pmatrix}
G_a & * & * \\
& G_b & 0 \\
& & G_c
\textbf{e}nd{pmatrix}
: G_a \in \mathrm{SL}(a, F), G_b \in \mathrm{SL}(b, F), G_c \in \mathrm{SL}(c, F)
\right\}
\textbf{e}nd{equation*}
for any ring $F$. Indeed, $\textbf{b}ar{A}$ is the sublattice generated by the last $d_1-r$ rows of
\textbf{b}egin{equation}\label{eq:temtem}
\delta \textbf{c}dot G_{n-r} \textbf{c}dot \textbf{a}lpha^{-\textbf{f}rac{1}{n-r}}I_{n-r} \textbf{c}dot \left(\mbox{first $n-r$ rows of $K'$} \right);
\textbf{e}nd{equation}
this expression is independent of $U$ because $\textbf{b}ar A$ is orthogonal to $C$, and equivalently also to the last $r$ rows of $K'$. Similarly, $\textbf{b}ar{B}$ is the sublattice generated by the next last $d_2-r$ rows of \textbf{e}qref{eq:temtem}. But since we are only interested in the determinants of $\textbf{b}ar{A}$ and $\textbf{b}ar{B}$, in what follows we can regard them as the sublattices generated by the corresponding rows of $\delta \textbf{c}dot \textbf{a}lpha^{-\textbf{f}rac{1}{n-r}}G_{n-r}$.
The considerations so far allows us to rewrite \textbf{e}qref{eq:toy4} as
\textbf{b}egin{equation*}
\textbf{f}rac{1}{8}\int_{P(n-r,r,\mathbb{Z}) \textbf{b}ackslash \mathrm{SL}(n,\mathbb{R})} \sum_{\delta} f_{H_1/\textbf{a}lpha}(\textbf{b}ar{A}) f_{H_2/\textbf{a}lpha}(\textbf{b}ar{B}) d\mu_n,
\textbf{e}nd{equation*}
where $\delta$ is summed over $P'(n-d_1-d_2+r, d_2-r, d_1-r,\mathbb{Z}) \textbf{b}ackslash \mathrm{SL}(n-r, \mathbb{Z})$.
Lemma \ref{lemma:measure} below, which gives a decomposition of $d\mu_n$ compatible with \textbf{e}qref{eq:levi_decomp}, implies that this equals
\textbf{b}egin{equation*}
\mathrm{(const)}\int_0^\infty \int_{X_{n-r}} \sum_{\textbf{b}ar{A} \in \mathrm{Gr}(\textbf{a}lpha^{-\textbf{f}rac{1}{n-r}}G_{n-r},d_1-r) \textbf{a}top {\textbf{b}ar{B} \in \mathrm{Gr}(\textbf{a}lpha^{-\textbf{f}rac{1}{n-r}}G_{n-r},d_2-r) \textbf{a}top \mathrm{indep}}} f_{H_1/\textbf{a}lpha}(\textbf{b}ar{A}) f_{H_2/\textbf{a}lpha}(\textbf{b}ar{B}) d\mu_{n-r}(G_{n-r}) \textbf{c}dot \textbf{a}lpha^{n-1} d\textbf{a}lpha,
\textbf{e}nd{equation*}
which, by Theorem \ref{thm:main} (notice that $\det\textbf{a}lpha^{-\textbf{f}rac{1}{n-r}}G_{n-r} = \textbf{a}lpha^{-1}$, so we normalize accordingly), equals
\textbf{b}egin{equation*}
\mathrm{(const)}\int_0^\infty H_1^{n-r}H_2^{n-r}\textbf{a}lpha^{-n+d_1+d_2-1}d\textbf{a}lpha,
\textbf{e}nd{equation*}
which is divergent, proving Theorem \ref{thm:overlap}. One way to understand this phenomenon is that, if $L \in X_n$ is heavily skewed i.e. its successive minima have a huge gap, there may exist too many possibilities for $C = A \textbf{c}ap B$. Indeed, if we additionally required that $\textbf{a}lpha = \det C \geq 1$, say, then we would have instead obtained
\textbf{b}egin{equation*}
\mathrm{(const)}\int_1^\infty H_1^{n-r}H_2^{n-r}\textbf{a}lpha^{-n+d_1+d_2-1}d\textbf{a}lpha,
\textbf{e}nd{equation*}
which converges, at least when $d_1+d_2 < n$.
Before we prove the needed lemma, we fix our notations related to $d\mu_n$. Recall the standard fact (see e.g. \textbf{c}ite{KV}) that, with respect to the $NAK$ decomposition of $\mathrm{SL}(n,\mathbb{R})$, we can write $d\mu_n$ as
\textbf{b}egin{equation*}
d\mu_n = \tau(n)dN \textbf{c}dot dA \textbf{c}dot dK,
\textbf{e}nd{equation*}
where $\tau(n)$ is some constant, $dN = \prod_{i < j} dn_{ij}$, $dA = \prod_i \textbf{a}lpha_i^{-i(n-i)} d\textbf{a}lpha_i / \textbf{a}lpha_i$ upon writing $A = \mathrm{diag}(a_1, \ldots, a_n)$ and $\textbf{a}lpha_i = a_i/a_{i+1}$, and $dK$ is the Haar measure on $\mathrm{SO}(n,\mathbb{R})$ so that
\textbf{b}egin{equation*}
\int_{\mathrm{SO}(n,\mathbb{R})} dK = \prod_{i=2}^n iV(i).
\textbf{e}nd{equation*}
To make $\int_{X_n} d\mu_n = 1$, we set
\textbf{b}egin{equation*}
\tau(n) = \textbf{f}rac{1}{n}\prod_{i=2}^n \textbf{z}eta^{-1}(i).
\textbf{e}nd{equation*}
\textbf{b}egin{lemma}\label{lemma:measure}
With respect to the decomposition \textbf{e}qref{eq:levi_decomp} of $\mathrm{SL}(n,\mathbb{R})$, we have
\textbf{b}egin{equation*}
d\mu_n = \textbf{f}rac{n}{r(n-r)} \textbf{c}dot \textbf{f}rac{\tau(n)}{\tau(r)\tau(n-r)}dU d\mu_r(G_r) d\mu_{n-r}(G_{n-r}) \textbf{a}lpha^{n-1} d\textbf{a}lpha dK',
\textbf{e}nd{equation*}
where $dU = \prod_{1 \leq i \leq n-r \textbf{a}top 1 \leq j \leq r} du_{ij}$ on writing $U = (u_{ij})_{1 \leq i \leq n-r \textbf{a}top 1 \leq j \leq r}$, and $dK'$ is the natural measure on $(\mathrm{SO}(n-r,\mathbb{R}) \times \mathrm{SO}(r,\mathbb{R})) \textbf{b}ackslash \mathrm{SO}(n,\mathbb{R})$ descended from the measure $dK$ on $\mathrm{SO}(n,\mathbb{R})$.
\textbf{e}nd{lemma}
\textbf{b}egin{proof}
The only nontrivial part of the proof consists of comparing the diagonal parts, or the ``$A$ parts'', of the measures $d\mu_{n}, d\mu_{n-r}, d\mu_r$. We can decompose
\textbf{b}egin{equation*}
\textbf{b}egin{pmatrix}
a_1 & & & & \\
& & & & \\
& & \ddots & & \\
& & & & \\
& & & & a_n
\textbf{e}nd{pmatrix}
=
\textbf{b}egin{pmatrix}
b'_1 & & & & & \\
& \ddots & & & & & \\
& & b'_{n-r} & & & \\
& & & b''_1 & & \\
& & & & \ddots & \\
& & & & & b''_r
\textbf{e}nd{pmatrix}
\textbf{b}egin{pmatrix}
\textbf{a}lpha^{\textbf{f}rac{-1}{n-r}} & & & & & \\
& \ddots & & & & & \\
& & \textbf{a}lpha^{\textbf{f}rac{-1}{n-r}} & & & \\
& & & \textbf{a}lpha^{\textbf{f}rac{1}{r}} & & \\
& & & & \ddots & \\
& & & & & \textbf{a}lpha^{\textbf{f}rac{1}{r}}
\textbf{e}nd{pmatrix},
\textbf{e}nd{equation*}
where there is the relation $\prod a_i = \prod b'_i = \prod b''_i = 1$ among the entries. Write $\textbf{a}lpha_i = a_i/a_{i+1}, \textbf{b}eta'_i = b'_i/b'_{i+1}, \textbf{b}eta''_i = b''_i/b''_{i+1}$. Then the measures on the ``A parts'' of the groups $G_n, G_{n-r}, G_r$ are, respectively,
\textbf{b}egin{equation*}
dA := \prod_i \textbf{a}lpha_i^{-i(n-i)}\textbf{f}rac{d\textbf{a}lpha_i}{\textbf{a}lpha_i}, dB' = \prod_i \textbf{b}eta_i'^{-i(n-r-i)}\textbf{f}rac{d\textbf{b}eta'_i}{\textbf{b}eta'_i}, dB'' = \prod_i \textbf{b}eta''^{-i(r-i)}\textbf{f}rac{d\textbf{b}eta''_i}{\textbf{b}eta''_i}.
\textbf{e}nd{equation*}
It remains to perform the change of coordinates from the $\textbf{a}lpha_i$-coordinates to the $\textbf{b}eta'_i, \textbf{b}eta''_i, \textbf{a}lpha$-coordinates. We have $\textbf{a}lpha_i = \textbf{b}eta'_i$ for $1 \leq i \leq n-r-1$ and $\textbf{a}lpha_{n-r+i} = \textbf{b}eta''_i$ for $1 \leq i \leq r-1$, and the single nontrivial relation
\textbf{b}egin{equation*}
\textbf{a}lpha_{n-r} = \textbf{f}rac{b'_{n-r}}{b''_1}\textbf{a}lpha^{-\textbf{f}rac{n}{r(n-r)}}.
\textbf{e}nd{equation*}
At this point, we can compute and find that
\textbf{b}egin{equation} \label{eq:subsub}
dA = dB'dB'' \textbf{c}dot \prod_{i=1}^{n-r-1} \textbf{b}eta'^{-ri}_i \prod_{i=1}^{r-1} \textbf{b}eta_{i}''^{-(n-r)(r-i)} \textbf{c}dot \textbf{a}lpha_{n-r}^{-r(n-r)}\textbf{f}rac{d\textbf{a}lpha_{n-r}}{\textbf{a}lpha_{n-r}}.
\textbf{e}nd{equation}
On the other hand, from the shape of the Jacobian matrix
\textbf{b}egin{equation*}
\textbf{b}egin{pmatrix}
- & \textbf{f}rac{\partial\textbf{a}lpha_i}{\partial\textbf{b}eta'_1} & - \\
& \textbf{v}dots & \\
- & \textbf{f}rac{\partial\textbf{a}lpha_i}{\partial\textbf{a}lpha} & - \\
& \textbf{f}rac{\partial\textbf{a}lpha_i}{\partial\textbf{b}eta''_1} &\\
- & \textbf{v}dots & -
\textbf{e}nd{pmatrix}
= \textbf{b}egin{pmatrix}
1 & & & * & & & \\
& \ddots & & \textbf{v}dots & & & \\
& & 1 & * & & & \\
& & & \textbf{f}rac{\partial\textbf{a}lpha_{n-r}}{\partial\textbf{a}lpha} & & & \\
& & & * & 1 & & \\
& & & \textbf{v}dots & & \ddots & \\
& & & * & & & 1
\textbf{e}nd{pmatrix}
\textbf{e}nd{equation*}
and the fact that
\textbf{b}egin{equation*}
\textbf{f}rac{\partial\textbf{a}lpha_{n-r}}{\partial\textbf{a}lpha} = -\textbf{f}rac{n}{r(n-r)}\textbf{a}lpha_{n-r}\textbf{a}lpha^{-1},
\textbf{e}nd{equation*}
we have
\textbf{b}egin{equation*}
-\textbf{f}rac{d\textbf{a}lpha_{n-r}}{\textbf{a}lpha_{n-r}} = \textbf{f}rac{n}{r(n-r)}\textbf{f}rac{d\textbf{a}lpha}{\textbf{a}lpha} + \mbox{(terms in $d\textbf{b}eta'$ and $d\textbf{b}eta''$)},
\textbf{e}nd{equation*}
and the $d\textbf{b}eta'$ and $d\textbf{b}eta''$ parts here do not affect the outcome of the computation. Thus we can pretend that we have
\textbf{b}egin{align*}
&\textbf{a}lpha_{n-r}^{-r(n-r)}\textbf{f}rac{d\textbf{a}lpha_{n-r}}{\textbf{a}lpha_{n-r}} \\
&= \textbf{f}rac{-n}{r(n-r)}\left(\textbf{f}rac{b'_{n-r}}{b''_1}\textbf{a}lpha^{-\textbf{f}rac{n}{r(n-r)}}\right)^{-r(n-r)}\textbf{f}rac{d\textbf{a}lpha}{\textbf{a}lpha} \\
&= \textbf{f}rac{-n}{r(n-r)} \textbf{f}rac{\prod_{i=1}^{n-r-1} \textbf{b}eta'^{ri}_i}{\prod_{i=1}^{r-1} \textbf{b}eta_{i}''^{-(n-r)(r-i)}}\textbf{a}lpha^{n} \textbf{f}rac{d\textbf{a}lpha}{\textbf{a}lpha}.
\textbf{e}nd{align*}
Substituting this into \textbf{e}qref{eq:subsub}, we obtain
\textbf{b}egin{equation*}
dA = \textbf{f}rac{-n}{r(n-r)}dB'dB''\textbf{a}lpha^n\textbf{f}rac{d\textbf{a}lpha}{\textbf{a}lpha},
\textbf{e}nd{equation*}
which completes the proof. By reorienting $\textbf{a}lpha$ (which moves in the opposite direction to $\textbf{a}lpha_{n-r}$) we can eliminate the negative sign.
\textbf{e}nd{proof}
\section{Average number of flags}
The same technique as in the previous section can be applied to compute the $\mu_n$-average number of flags bounded by certain constraints, even though such a formula for a fixed lattice is not known and is probably difficult to find. In this section, we compute the average number of flags such that $\det A_i \leq H_i$ for $i = 1, \ldots, k$, i.e. the quantity
\textbf{b}egin{equation*}
\int_{X_n} \sum_{A_1 \subseteq \ldots \subseteq A_k \subseteq L \textbf{a}top \dim A_i = d_i} \prod_{i=1}^k f_{H_i}(A_i) d\mu_n,
\textbf{e}nd{equation*}
or equivalently
\textbf{b}egin{equation*}
\int_{X_n} \sum_{A_1 \in \mathrm{Gr}(L, d_1)} \sum_{\textbf{b}ar{A}_2 \in \mathrm{Gr}(L/A_1, d_2-d_1)} \ldots \sum_{\textbf{b}ar{A}_k \in \mathrm{Gr}(L/A_{k-1}, d_k-d_{k-1})} f_{H_1}(A_1)f_{H_2/\det A_1}(A_2) \ldots d\mu_n,
\textbf{e}nd{equation*}
thereby proving Theorem \ref{thm:flags}.
First consider the case $k=2$, in which we are computing
\textbf{b}egin{equation*}
\int_{X_n} \sum_{A_1 \in \mathrm{Gr}(L, d_1)} \sum_{\textbf{b}ar{A}_2 \in \mathrm{Gr}(L/A_1, d_2-d_1)} f_{H_1}(A_1)f_{H_2/\det A_1}(A_2) d\mu_n.
\textbf{e}nd{equation*}
By the same argument as in the previous section, and Lemma \ref{lemma:measure}, this equals
\textbf{b}egin{align*}
&\textbf{f}rac{n}{d_1(n-d_1)} \textbf{c}dot \textbf{f}rac{\tau(n)}{\tau(d_1)\tau(n-d_1)} \textbf{c}dot \textbf{f}rac{1}{2}\mathrm{vol}\left(\textbf{f}rac{\mathrm{SO}(n,\mathbb{R})}{\mathrm{SO}(n-d_1,\mathbb{R}) \times \mathrm{SO}(d_1,\mathbb{R})}\right) \textbf{c}dot \\
&\int \int_{X_{d_1}}d\mu_{d_1} \int_{X_{n-d_1}} \sum_{\textbf{b}ar{A}_2} f_{H_2/\textbf{a}lpha}(\textbf{b}ar{A}_2) d\mu_{n-d_1} f_{H_1}(\textbf{a}lpha)\textbf{a}lpha^{n-1}d\textbf{a}lpha.
\textbf{e}nd{align*}
Using the fact that $\mathrm{vol}(\mathrm{SO}(n,\mathbb{R})) = \prod_{i=2}^n iV(i)$, one finds that the product of the terms on the first line here equals $na(n,d_1)$. By Theorem \ref{thm:main}, the integral part is equal to
\textbf{b}egin{align*}
&a(n-d_1, d_2 - d_1)\int_0^{H_1} \textbf{a}lpha^{n-1} \textbf{c}dot \textbf{a}lpha^{d_2-d_1} \left(\textbf{f}rac{H_2}{\textbf{a}lpha}\right)^{n-d_1} d\textbf{a}lpha \\
&= a(n-d_1, d_2 - d_1)H_2^{n-d_1} \int_0^{H_1} \textbf{a}lpha^{d_2-1}d\textbf{a}lpha \\
&= \textbf{f}rac{a(n-d_1, d_2 - d_1)}{d_2}H_1^{d_2}H_2^{n-d_1}.
\textbf{e}nd{align*}
This proves the $k=2$ case. For general $k$, we proceed by induction: we have
\textbf{b}egin{align*}
&\textbf{f}rac{n}{d_1(n-d_1)} \textbf{c}dot \textbf{f}rac{\tau(n)}{\tau(d_1)\tau(n-d_1)} \textbf{c}dot \textbf{f}rac{1}{2}\mathrm{vol}\left(\textbf{f}rac{\mathrm{SO}(n,\mathbb{R})}{\mathrm{SO}(n-d_1,\mathbb{R}) \times \mathrm{SO}(d_1,\mathbb{R})}\right) \textbf{c}dot \\
&\int \int_{X_{d_1}}d\mu_{d_1} \int_{X_{n-d_1}} \sum_{\textbf{b}ar{A}_2, \ldots, \textbf{b}ar{A}_k} f_{H_2/\textbf{a}lpha}(\textbf{b}ar{A}_2) \ldots f_{H_k/\det A_{k-1}\textbf{a}lpha}(\textbf{b}ar{A}_k) d\mu_{n-d_1} f_{H_1}(\textbf{a}lpha)\textbf{a}lpha^{n-1}d\textbf{a}lpha.
\textbf{e}nd{align*}
The first line is exactly the same as in the $k=2$ case. As for the integral, writing $\mathfrak{d}' = (d_2-d_1, \ldots, d_k-d_1)$, and using the induction hypothesis, we find that it is equal to
\textbf{b}egin{align*}
&a(n-d_1, \mathfrak{d}')\int_0^{H_1} \textbf{a}lpha^{n-1} \textbf{c}dot \textbf{a}lpha^{d_k-d_1} \prod_{i=2}^{k}\left(\textbf{f}rac{H_i}{\textbf{a}lpha}\right)^{d_{i+1}-d_{i-1}} d\textbf{a}lpha \\
&= a(n-d_1, \mathfrak{d}')\prod_{i=2}^{k}{H_i}^{d_{i+1}-d_{i-1}}\int_0^{H_1} \textbf{a}lpha^{d_2-1} d\textbf{a}lpha \\
&= \textbf{f}rac{a(n-d_1, \mathfrak{d}')}{d_2}\prod_{i=1}^{k}{H_i}^{d_{i+1}-d_{i-1}}.
\textbf{e}nd{align*}
Since
\textbf{b}egin{equation*}
a(n-d_1, \mathfrak{d}') = a(n-d_1, d_2-d_1) \prod_{i=2}^{k-1} \textbf{f}rac{n-d_{i-1}}{d_{i+1}-d_{i-1}}a(n-d_i, d_{i+1} - d_i),
\textbf{e}nd{equation*}
this gives the desired result.
\textbf{b}egin{thebibliography}{99}
\textbf{b}ibitem{BSW} S. Bai, D. Stehl\'e, W. Wen. Measuring, simulating and exploiting the head concavity phenomenon in BKZ. \textbf{e}mph{Advances in cryptology --- ASIACRYPT 2018}. Part I, 369-404, Lecture Notes in Comput. Sci., 11272, Springer, Cham, 2018.
\textbf{b}ibitem{DM} S. G. Dani and G. Margulis. Limit distribution of orbits of unipotent flows and values of quadratic forms. Advances in Soviet Math., Vol 16, 1993, pp. 91-137.
\textbf{b}ibitem{FMT} J. Franke, Y. Manin, and Y. Tschinkel. Rational points of bounded height on Fano varieties. Invent. Math. 95 (1989), no. 2, 421-435.
\textbf{b}ibitem{Kim} S. Kim. Random lattice vectors in a set of size $O(n)$. Int. Math. Res. Not. (2020), 2020(5): 1385-1416.
\textbf{b}ibitem{Kim2} S. Kim. Counting rational points of a Grassmannian. arXiv: 1908.01245
\textbf{b}ibitem{KV} S. Kim and A. Venkatesh. The behavior of random reduced bases. Int. Math. Res. Not. (2018), 2018(20): 6442-6480.
\textbf{b}ibitem{Rogers} C. A. Rogers, Mean values over the space of lattices. Acta Math. 94 (1955), 249-287.
\textbf{b}ibitem{Sch} W. M. Schmidt. Asymptotic formulae for point lattices of bounded determinant and subspaces of bounded height. Duke Math. J. 35 (1968), 327-339.
\textbf{b}ibitem{Sch3} W. M. Schmidt. Masstheorie in der Geometrie der Zahlen. Acta Math. 102, no. 3-4 (1959): 159-224.
\textbf{b}ibitem{SE94} P. Schnorr and M. Euchner. Lattice basis reduction: improved practical algorithms and solving subset sum problems. \textbf{e}mph{Math. Programming} 66 (1994), no. 2, Ser. A, 181-199.
\textbf{b}ibitem{SW} U. Shapira and B. Weiss. A volume estimate for the set of stable lattices. Comptes Rendus Math\'ematique 352, no.11 (2014), pp.875-879.
\textbf{b}ibitem{Shi} G. Shimura. Introduction to the arithmetic theory of automorphic functions. Princeton University Press, Princeton, N.J., 1971.
\textbf{b}ibitem{Sie} C. L. Siegel. A mean value theorem in geometry of numbers. Ann. of Math. (2) 46, (1945). 340-347.
\textbf{b}ibitem{Sod} A. S\"odergren, On the Poisson distribution of lengths of lattice vectors in a random lattice. Math. Z. 269 (2011), 945-954.
\textbf{b}ibitem{SS} A. S\"odergren and A. Str\"ombergsson. On the generalized circle problem for a random lattice in large dimension. Adv. Math. 345 (2019), 1042-1074.
\textbf{b}ibitem{Thu2} J. L. Thunder. Asymptotic estimates for rational points of bounded height on flag varieties. Compositio Math. 88 (1993), no. 2, 155-186.
\textbf{b}ibitem{Thu3} J. L. Thunder. Higher-dimensional analogs of Hermite's constant. Michigan Mathematics Journal 45, no. 2 (1998): 301-314.
\textbf{e}nd{thebibliography}
\textbf{e}nd{document} |
\begin{document}
\journal{Computers \& Mathematics with Applications}
\title{Discontinuous Galerkin method for coupling hydrostatic free surface flows to saturated subsurface systems}
\author[FAU]{Andreas Rupp}
\ead{rupp@math.fau.de}
\author[AWI,FAU]{Vadym Aizinger\corref{cor}}
\ead{vadym.aizinger@awi.de}
\author[FAU]{Balthasar Reuter}
\ead{reuter@math.fau.de}
\author[FAU]{Peter Knabner}
\ead{knabner@math.fau.de}
\address[FAU]{Friedrich--Alexander University of Erlangen--N\"urnberg, Department of Mathematics,
Cauerstra{\ss}e~11, 91058~Erlangen, Germany}
\cortext[cor]{Corresponding author}
\address[AWI]{Alfred Wegener Institute, Helmholtz Centre for Polar and Marine Research, Am Handelshafen 12, 27570 Bremerhaven, Germany}
\date{Received: date / Accepted: date}
\begin{abstract}
We formulate a~coupled surface/subsurface flow model that relies on hydrostatic equations with free surface in the free flow domain and on the Darcy model in the subsurface part. The model is discretized using the local discontinuous Galerkin method, and a~statement of discrete energy stability is proved for the fully non-linear coupled system.
\end{abstract}
\begin{keyword}
Darcy flow \sep hydrostatic equations \sep three-dimensional shallow water equations with free surface \sep coupled model \sep local discontinuous Galerkin method \sep discrete energy stability analysis
\end{keyword}
\maketitle
\section{Introduction}\label{sec:introduction}
The interaction between free flow and subsurface systems (the latter either saturated or unsaturated) is important for a~variety of environmental applications, e.g. infiltration of overland flow into the soil during rainfall, contaminant propagation into the subsurface, sedimentation processes, interaction of seas, lakes, rivers, or wetlands with groundwater aquifers. Mathematical models for such coupled surface/subsurface flows generally express the conservation of mass and momentum in the coupled system. Coupled models usually pose substantial challenges on various levels: Mathematical -- due to differences in PDE system types in different subdomains giving rise to well-posedness and stability issues, numerical -- due to a~pronouncedly multi-scale character of the flow, and computational -- arising from the growing algorithmic complexity and increased performance and parallel scalability demands.
Depending on the target application and the level of modeling complexity, different model combinations in the surface and subsurface subdomains have been considered in the literature; the aspects covered include:
\begin{itemize}
\setlength\itemsep{0pt}
\item modeling approaches, in particular various choices of conditions at the model interface,
\item numerical methodology focusing on sub-problem discretizations and on solution algorithms that become critically important in the case of time-dependent flows,
\item theoretical issues mainly investigating the well-posedness, stability, and accuracy of coupled formulations.
\end{itemize}
In the context of geophysical flows, one can distinguish between two main types of fluid in the free flow subdomain, water and air, although both can certainly transport various additional substances. The subsurface systems usually contain either one (water), two (water and air), or three (water, air, and, e.g. oil) distinct phases, and each of those can furthermore transport additional species. The coupled models investigating the air flows (or transport of other gases dissolved in the air, e.g. CO$_2$) usually consider Stokes model in the surface subdomain and the one- or two-phase Darcy or Richards equations in the subsurface part~\cite{Rybak_etal_15,Mosthaf_Baber_etal_11}.
The modeling efforts for flow and transport processes involving water -- such as the present study -- cover a~much greater range of models in the free surface flow subdomain. A~number of recent studies (see \cite{Spanoudaki2009,Maxwell2014} for an~intercomparison) consider coupling free surface flows represented either by the 1D/2D shallow water equations~\cite{Dawson_08} or even simpler models (e.g. the kinematic wave equation~\cite{Sochala_Ern_Piperno_09}, a~diffusion wave approximation of the Saint--Venant equation~\cite{Sulis_etal_10}) with saturated subsurface flow described by the Richards or Darcy equations.
The theoretical aspects of coupled surface/subsurface flow modeling such as the well-posedness and the stability have also attracted some attention in the last decade. The most relevant studies in the context of the present work consider a~3D Navier-Stokes/Darcy-coupling based on a~discontinuous Galerkin (DG) method or on various combinations~\cite{Chidyagwai2009,Girault2013,Cesmelioglu2013,Badea2010} of the DG and finite element methods (see overview in~\cite{Discacciati2009}).
The hydrostatic primitive equations (sometimes also called the 3D shallow water equations) employed in our work is the most commonly used model for simulating circulation in geophysical domains with free surface such as oceans, lakes, estuaries, etc. The main assumption underlying this model (and setting it apart from the incompressible Navier-Stokes equations it is derived from) is the ratio between the horizontal and the vertical dimensions of at least 20:1 \cite[Sec.~2.3]{Vreugdenhil} with similar ratios for the horizontal to the vertical velocities and accelerations. This clear separation of the horizontal from the vertical scales is a~critical aspect of the hydrostatic modeling and is reflected in the direction of the gravity force, turbulence parametrizations, computational meshes made up of thin long elements with strictly vertical lateral faces, and many other details. This system also serves as the starting point for the derivation of the well known 2D shallow water equations.
Although the hydrostatic primitive equations is a~widely used model, the aspects of well-posedness and stability of this PDE system as well as similar investigations of its discretizations are not very common and certainly appear to be neglected compared to more general models such as incompressible Navier--Stokes equations or less general ones such as 2D shallow water equations. The exceptions include works by Lions et al.~\cite{Lions1992a,Lions1992b}, Azerad~\cite{Azerad2001}, and the existence proofs for global strong solutions~\cite{Kobelkov2006,Titi2012}. Regarding the finite element analysis, one can note several works of Guill{\'e}n-Gonz{\'a}lez and co-workers treating this discretized system and its analysis as the limiting case of the Stokes system~\cite{GG2005,GG2015a,GG2015b} and our previous study of the DG method~\cite{AizingerPaper}. However, very few authors consider the problem in its full complexity and include the non-linear advection, free surface, or attempt to handle the difficulties arising from the hydrostatic approximation of the vertical velocity component. All aforementioned works except for~\cite{AizingerPaper} make the rigid lid assumption, \cite{GG2015b} introduces a~viscosity term into the continuity equation; other common simplifications include omitting the non-linear advection~\cite{GG2015a,GG2015b} and factoring out the vertical velocity~\cite{GG2005,Titi2012}.
The area of numerical modeling for subsurface applications in all its facets enjoys far more attention; this concerns the development and testing of new discretization techniques as well as their analysis. We refer the interested reader to a~recent article~\cite{DiPietro2014} for an overview.
The present study formulates a~coupled model consisting of the free surface flows represented by the three-dimensional hydrostatic equations and a~subsurface flow system modeled by Darcy's law. A~coupling condition is introduced based on a~special form of dynamic pressure, this coupling is then motivated using the weak formulation of the coupled system. The model equations are discretized using the local discontinuous Galerkin (LDG) method introduced in~\cite{DawsonAizinger2005} and further developed in~\cite{AizingerPDPN2013} for the hydrostatic free surface system and in~\cite{AizingerRSK2016,RuppKnabner2017,RuppKnabnerDawson2018} for Darcy's law. Finally, a~statement of semi-discrete energy stability is proved for the full non-linear formulation that also accounts for the dynamic free surface in the free flow domain.
The rest of the current paper is structured as follows. The next section introduces the mathematical models for the free surface and subsurface flow systems and proposes the interface conditions. In Sec.~\ref{sec:weak}, the weak problem formulation for the coupled problem is provided, and our choice of interface conditions is motivated by proving a~statement of weak steady-state stability for homogeneous boundary conditions. In Sec.~\ref{sec:discrete}, both problems are discretized using the LDG method, and a~statement of discrete stability is proved in Sec.~\ref{sec:analysis}. A~convergence study using the proposed formulation is given in Sec.~\ref{sec:numerical}, and a~conclusions section wraps up this work.
\section{Mathematical model}\label{sec:model}
\subsection{Computational domain}
A very important feature of 3D geophysical flow models is their natural an\-isotropy due to the gravity force acting in the vertical direction. This fact is usually reflected in the mathematical and numerical formulations as well as in the construction of computational domains and grids. The top boundary of most 3D surface flow domains is a dynamically changing surface whose movements correspond to time variations in the free surface elevation, although some models make a 'rigid lid' assumption to avoid increased computational costs connected with dynamically changing meshes.
Let $\Omega(t) \subset {{\rm I}\!{\rm R}}^3$ (see Figure~\ref{figure0}) be our time-dependent domain for the hydrostatic free flow equations. We define $\Pi$ as the standard orthogonal projection operator from ${\rm I}\!{\rm R}^3$ to ${\rm I}\!{\rm R}^2$ ($\Pi(x, y, z) = (x, y)$, $\forall (x, y, z) \in {\rm I}\!{\rm R}^3$), and ${\Omega_{2}} \coloneqq \Pi(\Omega(t))$. We require our top and bottom boundaries to be single-valued functions defined on ${\Omega_{2}}$ at any time (this excludes, e.g., wave breaking situations).
The \textcolor{golden}{golden} top boundary of the domain $\p \Omega_{top}(t)$ is assumed to be the only moving boundary. The \textcolor{red}{red} bottom $\p \Omega_{bot}$ and \textcolor{blue}{blue} lateral $\p \Omega_{lat}(t)$ boundaries are considered to be fixed (though the height of the lateral boundaries can vary with time according to the movements of the free surface). We also require the lateral boundaries to be strictly vertical.
$\p \Omega_{bot}$ separates the time-dependent domain $\Omega(t)$ of the hydrostatic equations from the fixed domain $\tilde{\Omega}$ of Darcy flow. Here, the \textcolor{red}{red} $\p \Omega_{bot} = \p \tilde{\Omega}_{top}$ -- i.e. the bottom boundary of the free surface flow domain is the top boundary of the Darcy domain. The \textcolor{green}{green} boundary is the bottom boundary of the Darcy domain $\p \tilde{\Omega}_{bot}$.
In the following, all 2D counterparts of 3D vectors, operators, etc. consisting of the first two components of the former will be denoted by the subscript '2' without separate definitions (e.g., $\nabla_{2} \coloneqq (\p_x, \p_y)^T$). In a~similar manner, all functions defined on domain ${\Omega_{2}}$ will be trivially evaluated on $\Omega(t)$ via a~composition with $\Pi$, i.e. $\xi(x,y,z) \coloneqq \xi(\Pi(x,y,z))$. Furthermore, all unknowns, sets, etc. associated with the Darcy domain will be marked by tilde~$\tilde \cdot$.
\begin{figure}
\caption{Vertical cross section of computational domain $\Omega(t)$ for hydrostatic equations on top of (fixed) computational domain $\tilde{\Omega}
\label{figure0}
\end{figure}
\subsection{Primitive hydrostatic equations}\label{sec:hydrostatic}
The primitive hydrostatic equations with constant (unit) density describe the following properties of the free surface flow system \cite{Vreugdenhil}:
\begin{itemize}
\item 2D conservation of volume (mass) also known as the primitive continuity equation (PCE)
\begin{equation}\label{pce}
\p_t \xi \ + \ \nabla_{2} \cdot \int_{z_b}^\xi {\bf u}x \, dz \ = \ F_H,
\end{equation}
where $\xi, z_b$ are the values of the $z$ coordinate with respect to some datum at the free surface and the surface/subsurface flow interface, respectively, ${\bf u} = (u,v,w)^T$ is the velocity vector, and $F_H$ is the source term that accounts for the normal flux from/to the subsurface domain.
\item 3D conservation of momentum (in conservative form)
\begin{equation}\label{momentum_cons}
\p_t {\bf u}x \ + \ \nabla \cdot \left({\bf u}x \otimes {\bf u} \ - \ {\mathcal{D}} \nabla {\bf u}x \right) \ + \ g \nabla_{2} \xi \ - \ \left(\begin{array}{cc} 0 & -f_c\\ f_c & 0 \end{array}\right) {\bf u}x \ = \ \bF_U,
\end{equation}
where the wind stress, the atmospheric pressure gradient, and the tidal potential are combined into a body force term $\bF_U$, $f_c$ is the Coriolis coefficient, $g$ is acceleration due to gravity. To prevent our analysis from being obscured by nonessential details, we simplify the momentum equations by omitting the Coriolis term and by rescaling the system so that $g=1$. The omission of the Coriolis term does not, in fact, affect the final result at all since the Coriolis force is energy neutral both, in the continuous and in the discrete sense and thus cancels out in the energy norm (see \cite{AizingerDiss}). In~e_\mathbf qref{momentum_cons}, ${\mathcal{D}} = {\mathcal{D}}({\bf u})$ denotes the tensor of eddy viscosity coefficients that can depend on the flow velocity (see, e.g., \cite{Davies1986}) defined as
\begin{equation*}
{\mathcal{D}} = \left( \ba{cc} D_u & 0\\ 0 & D_v \ea \right), {\scriptstyle{\mathcal{Q}}}quad {\mathcal{D}} \nabla {\bf u}x : = \left( \ba{c} (D_u \nabla u)^T\\ (D_v \nabla v)^T \ea \right) \in \mathbb R^{2\times 3},
\end{equation*}
where $D_u$, $D_v$ and their inverses are $3 \times 3$ uniformly s.p.d. (symmetric positive definite) matrices.
In Eq.~e_\mathbf qref{momentum_cons}, $\otimes$ denotes the \emph{tensor product}, and $\nabla \cdot$ is the \emph{matrix divergence} defined as
\begin{equation*}
\left(\nabla \cdot A\right)_i \coloneqq \left(\sum_{j = 1}^n \p_{x_j} (A)_{i,j}\right)_i {\scriptstyle{\mathcal{Q}}}quad \text{for } i = 1, \ldots, m, \; A \in \mathbb R^{m \times n}.
\end{equation*}
$D_u(\cdot, \cdot)$, $D_u^{-1}(\cdot, \cdot)$ being uniformly s.p.d. is equivalent to the existence of a constant $C_{D} \ge 1$ (independent of ${\bf u}$) such that for all $\boldsymbol x \in \mathbb R^3$
\begin{equation*}\label{REM:spdTens}
C_{D}^{-1} \| \boldsymbol x \|^2_2 \le \boldsymbol x^T D_u({\bf u})\, \boldsymbol x \le C_{D} \| \boldsymbol x \|^2_2, {\scriptstyle{\mathcal{Q}}}quad C_{D}^{-1} \| \boldsymbol x \|^2_2 \le \boldsymbol x^T D_u^{-1}({\bf u})\, \boldsymbol x \le C_{D}\, \| \boldsymbol x \|^2_2.
\end{equation*}
This implies $C_{D} \ge \max\{\| D_u \|_{L^\infty(\Omega(t))}, \| D_u^{-1} \|_{L^\infty(\Omega(t))} \}$. For simplicity, we also assume that $D_v, D_v^{-1}$ satisfies the above equalities with the same constant $C_{D}$.
In the LDG framework employed in the present work, an~auxiliary variable ${\scriptstyle{\mathcal{Q}}}$ is introduced, and the second-order momentum equations \rf{momentum_cons} are re-written in mixed form
\begin{align}
\p_t {\bf u}x \ + \ \nabla \cdot \left({\bf u}x \otimes {\bf u} \ + {\scriptstyle{\mathcal{Q}}} \right) \ + \nabla_{2} \xi & = \ \bF_U,\label{eq:mixed_momentum_1}\\
{\mathcal{D}}^{-1}({\bf u})\,{\scriptstyle{\mathcal{Q}}} + \nabla {\bf u}x & = 0,\label{eq:mixed_momentum_2}
\end{align}
where $\nabla{\bf u}x$ denotes the \emph{Jacobian} of ${\bf u}x$.
Note that Eqs.~e_\mathbf qref{eq:mixed_momentum_1},e_\mathbf qref{eq:mixed_momentum_2} actually represent a~system of $2+2\times3$ equations.
\item 3D conservation of volume (mass) also known as the continuity equation
\begin{equation}\label{cont_eq}
\nabla \cdot {\bf u} \ = \ 0.
\end{equation}
Note that, differently from the incompressible Navier-Stokes system, \rf{cont_eq} is not a constraint used to determine pressure but rather an~equation for $w$.
\end{itemize}
The following boundary conditions (see \cite{Vreugdenhil} for details) are specified for the free surface flow system (except for the interface boundary given in Sec.~\ref{sec:interface}):
\begin{itemize}
\item Denoting by $\bn = (n_x, n_y, n_z)^T$ an~exterior unit normal to the boundary of $\Omega(t)$ we distinguish between lateral inflow $\p \Omega_{i}(t) : = \{\p \Omega_{lat}(t) : {\bf u} \cdot \bn \le 0\}$ and lateral outflow $\p \Omega_{o}(t) \coloneqq \p \Omega_{lat}(t) \setminus \p \Omega_{i}(t)$ boundaries.
\begin{equation}\label{hydrostatic_lateral_bc}
{{\bf u}x}\big|_{\p \Omega_{lat}}\ = \ {\bf u}xh, {\scriptstyle{\mathcal{Q}}}quad \xi\big|_{\Pi(\p \Omega_{i}(t))} \ = \ \hat{\xi}.
\end{equation}
Even though the velocity is specified on the whole lateral boundary of ${\Omega(t)}$, the advection terms only use the normal flux boundary condition at the outflow boundary ${\bf u}xh \cdot \bnx$ (see e_\mathbf qref{discrete_general_2}). This somewhat unusual placement of flux (at $\Omega_o$) and water elevation (at $\Omega_i$) allows to compactify our discrete stability analysis and can be reversed to a~more standard configuration -- at the cost of some additional technicalities.
\item The free surface boundary conditions have the form
\begin{equation}\label{hydrostatic_surface_bc}
\nabla u(\xi) \cdot \bn \ = \ \nabla v(\xi) \cdot \bn \ = \ 0.
\end{equation}
\item Additionally, initial data for ${\bf u}$ and $\xi$ is given. Note that the initial and boundary conditions must be {\em compatible}.
\end{itemize}
Thus the free flow system that we consider in this problem consists of Eqs.~e_\mathbf qref{pce}, e_\mathbf qref{eq:mixed_momentum_1}, e_\mathbf qref{eq:mixed_momentum_2}, e_\mathbf qref{cont_eq} complemented by the corresponding initial and boundary conditions. Also note that the introduced simplifications neither affect the non-linearity of the system nor lower any analysis hurdles.
\subsection{System of equations for 3D Darcy flow}\label{sec:darcy}
Single phase flow through a porous medium $\tilde{\Omega}$ is usually modeled by Darcy's law linking the hydraulic head $\tilde h$ and the seepage velocity $\tilde{\mathbf u} = (\tilde u, \tilde v, \tilde w)$. In mixed formulation, the equations for constant (unit) density have the form:
\begin{subequations}
\begin{align}
\p_t \tilde h + \nabla \cdot \tilde{\mathbf u} & = \tilde f, \label{EQ:Darcy:cont1}\\
\tilde \D^{-1}(\tilde h) \tilde{\mathbf u} + \nabla \tilde h & = 0 \label{EQ:Darcy:cont2}
\end{align}
for~a given source term $\tilde f$ and with $\tilde \D$ and its inverse uniformly s.p.d. tensors (similarly to Sec.~\ref{sec:hydrostatic} and with the same constant $C_{D}$). The boundary conditions for the flux and head are given by
\begin{equation}\label{darcy_bc}
(\tilde{\mathbf u} \cdot \tilde \bn)\big|_{\p \tilde{\Omega}_N} = \hat u_{\tilde n}, {\scriptstyle{\mathcal{Q}}}quad \tilde h{\big|_{\p \tilde{\Omega}_D}} = \hat h.
\end{equation}
\end{subequations}
Here, $\tilde \bn$ denotes the outward unit normal with respect to $\tilde{\Omega}$. In addition to this, initial data $\tilde h_0$ is given. The bottom and lateral boundaries of $\tilde{\Omega}$ are either Dirichlet or Neumann boundaries, while a coupling boundary condition is imposed at the top boundary. Eqs.~e_\mathbf qref{EQ:Darcy:cont1},e_\mathbf qref{EQ:Darcy:cont2} have been simplified via division by the specific storativity $\tilde \Phi(t, \mathbf x) \ge \tilde \Phi_0 > 0$.
\subsection{Interface conditions}\label{sec:interface}
Specifying the interface conditions between the sub-models is not a simple task in the context of the present study; the main difficulty is finding a~set of transition conditions that guarantee a~physically founded and mathematically well-posed system of equations for the coupled model. In our case, this task is more challenging for the free flow model due to its greater complexity (i.e., the presence of non-linear advection terms). Thus, even a~standard variational formulation of the incompressible Navier-Stokes/Darcy system includes an undetermined-sign term (see \cite{Girault2009}) on the transition boundary. By resorting to a~linear Stokes model some authors avoid this problem (see discussions of the modeling and coupling issues in \cite{Discacciati2009,Girault2013}). Another avenue to handle this problem involves modifying the momentum equations by adding the so called Temam stabilization \cite{Temam1968} term that is equal to zero in the strong sense but can be exploited in a~way that provides some additional control over the kinetic energy in the weak formulation.
In this study, no Temam stabilization is used, and the full non-linear advection is retained. We impose the following transition conditions at the boundary $\p {\Omega(t)}_{bot}$ = $\p \tilde{\Omega}_{top}$ between the free surface and subsurface flow subdomains:
\begin{itemize}
\item the continuity of the normal flux (volume/mass conservation)
\begin{equation}\label{transition_bc_1}
({\bf u} \cdot \bn)\big|_{\p {\Omega(t)}_{bot}} = - (\tilde{\mathbf u} \cdot \tilde \bn)\big|_{\p \tilde{\Omega}_{top}};
\end{equation}
\item the continuity of pressure (head), where we use a~special form of dynamic pressure in the free flow subdomain (cf.~\cite{Girault2009,Fetzer2016}) and ignore viscous terms (also see the discussion in Sec.~\ref{sec:weak-darcy})
\begin{equation}\label{transition_bc_2}
\tilde h\big|_{\p \tilde{\Omega}_{top}} = \left( \xi + \frac{{\bf u}x \cdot {\bf u}x}{2 g}\right)\bigg|_{\p {\Omega(t)}_{bot}} {\scriptstyle{\mathcal{Q}}}uad \mbox{($g$=1 was assumed in Sec.~\ref{sec:hydrostatic} and is included here for consistency)},
\end{equation}
where we recall the hydraulic head definition: $\tilde h = z + p/(g \rho_w)$, with $z$ denoting the vertical coordinate of the point with respect to the datum, $\rho_w$ the water density, and $p$ the fluid pressure;
\item the friction law on horizontal velocity components modeled on the standard friction laws for turbulent shallow-water flows (see, e.g., \cite{Vreugdenhil}) and rather similar to the Beavers-Joseph-Saffman~\cite{Saffman1971} condition very common in coupled surface/subsurface flow applications
\begin{equation}\label{transition_bc_3}
{\mathcal{D}}_u \nabla u(z_b) \cdot \bn \ = \ -C_f({\bf u}) u(z_b), {\scriptstyle{\mathcal{Q}}}quad {\mathcal{D}}_v \nabla v(z_b) \cdot \bn \ = \ -C_f({\bf u}) v(z_b),
\end{equation}
where the minus sign is due to $\bn$ being an {\em exterior} unit normal to $\p {\Omega(t)}_{bot}$, and $C_f({\bf u})>0$ is the bottom friction coefficient that in shallow-water applications is usually represented by either $C_f({\bf u})=const$ for a~linear or by $C_f({\bf u}) = C'_f \,|{\bf u}(z_b)|$ with $C'_f = const$ for a~quadratic friction law.
\end{itemize}
The interface conditions specified above are modeled closely on those used in Navier-Stokes/Darcy coupled models (these are the closest analog to our setting found in the literature, see, e.g.~\cite{Fetzer2016}) with certain modifications motivated by the important differences between the incompressible Navier-Stokes and the hydrostatic model used here. The main difference reflected both in the dynamic pressure term e_\mathbf qref{transition_bc_2} and in the friction formula e_\mathbf qref{transition_bc_3} is the fact that a~hydrostatic system does not conserve the vertical momentum; instead, the vertical velocity is computed by the continuity equation~e_\mathbf qref{cont_eq} that expresses the 3D conservation of mass/volume. This circumstance makes a~physically consistent formulation of coupling conditions for the momentum equations particularly challenging.
\section{Weak formulation of the coupled system}\label{sec:weak}
\subsection{Weak formulation of the hydrostatic equations}\label{sec:weak-swe}
To simplify notation we use from now on $\| u \|_\Omega$ for the $L^2$ norm of $u$ and $( \ . \ , \ . \ )_\Omega$, $< \ . \ , \ . \ >_\gamma$ for the $L^2$ inner products on domains $\Omega \subset {{\rm I}\!{\rm R}}^d$ and surfaces $\gamma \subset {{\rm I}\!{\rm R}}^d$, respectively. Used in conjunction with vectors or tensors, these products are to be understood as sums of componentwise $L^2$ inner products.
Next, we obtain a~weak formulation of the hydrostatic system by multiplying Eqs.~e_\mathbf qref{pce}, e_\mathbf qref{eq:mixed_momentum_1}, e_\mathbf qref{eq:mixed_momentum_2}, e_\mathbf qref{cont_eq} with some smooth test functions and integrating by parts. For the PCE, we get:
\begin{equation*}
\left(\p_t \xi, \delta \right)_{\Omega_{2}}
+ \lan \int_{z_b}^\xi {\bf u}x \, dz \cdot \bnx, \delta \ran_{\p {\Omega_{2}}}
\hspace{-2mm} - \left( \int_{z_b}^\xi {\bf u}x \, dz \cdot \nabla_{2}, \delta \right)_{\Omega_{2}}
= \left( F_H, \delta \right)_{\Omega_{2}}.
\end{equation*}
Exploiting the fact that the lateral boundaries of ${\Omega(t)}$ are strictly vertical and substituting e_\mathbf qref{transition_bc_1} into $F_H$, we can rewrite the equation above in a special 2D/3D form
\begin{subequations}\label{eq:weak-hydrostatic}
\begin{equation}\label{eq:weak-h}
\left(\p_t \xi, \delta \right)_{\Omega_{2}} + \lan {\bf u}x \cdot \bnx, \delta \ran_{\p \Omega_{lat}} - \left({\bf u}x \cdot \nabla_{2}, \delta \right)_{\Omega(t)} + \lan \tilde {\bf u} \cdot \bn, \delta \ran_{\p \Omega_{bot}} = 0.
\end{equation}
Note that e_\mathbf qref{eq:weak-h} is well defined for any $\xi, \delta \in H^1({\Omega_{2}})$ and a.e. $t \in [0, T]$.
A~weak form of the momentum equations given by
\begin{align}
& \left(\p_t {\bf u}x, {\gvec \varphi} \right)_{\Omega(t)} + \lan ({\bf u}x \otimes {\bf u} + {\scriptstyle{\mathcal{Q}}}) \cdot \bn + \xi \bnx, {\gvec \varphi} \ran_{\p {\Omega(t)}}
- \left( ({\bf u}x \otimes {\bf u} + {\scriptstyle{\mathcal{Q}}}) \cdot \nabla + \xi \nabla_{2}, {\gvec \varphi} \right)_{\Omega(t)} = \left( \bF_U, {\gvec \varphi} \right)_{\Omega(t)},\label{eq:weak-u} \\
& \left({\mathcal{D}}^{-1}({\bf u})\;{\scriptstyle{\mathcal{Q}}}, {\it \Psi} \right)_{\Omega(t)} + \lan {\bf u}x \otimes \bn, {\it \Psi} \ran_{\p {\Omega(t)}}\ - \ \left( {\bf u}x \otimes \nabla, {\it \Psi} \right)_{\Omega(t)} = 0.\label{eq:weak-q}
\end{align}
For the continuity equation, we get
\begin{equation}\label{eq:weak-w}
\lan {\bf u} \cdot \bn, \sigma \ran_{\p {\Omega(t)}}\ - \ \left({\bf u} \cdot \nabla, \sigma \right)_{\Omega(t)}\ = \ 0.
\end{equation}
\end{subequations}
Eqs.~e_\mathbf qref{eq:weak-u}--e_\mathbf qref{eq:weak-w} are well defined $\forall {\bf u}x, {\gvec \varphi} \in H^1({\Omega(t)})^2$, ${\scriptstyle{\mathcal{Q}}}, {\it \Psi} \in H^1({\Omega(t)})^{2 \times 3}$, $w, \sigma \in H^1({\Omega(t)})$, and for a.e. $t \in [0, T]$.
\subsection{Weak formulation of Darcy equation}\label{sec:weak-darcy}
For smooth test functions $\tilde{\delta}$ and $\tilde{\bphi}$, we multiply e_\mathbf qref{EQ:Darcy:cont1} and e_\mathbf qref{EQ:Darcy:cont2} by these test functions and integrate by parts.
\begin{subequations}\label{eq:weak-darcy}
\begin{align}
& \left(\p_t \tilde h, \tilde{\delta} \right)_{\tilde{\Omega}}
- \left(\tilde{\mathbf u} \cdot \nabla , \tilde{\delta} \right)_{\tilde{\Omega}}
+ \left\langle \tilde{\mathbf u} \cdot \tilde \bn, \tilde{\delta}\right\rangle_{\p \tilde{\Omega}}
= \left(\tilde f, \tilde{\delta} \right)_{\tilde{\Omega}},\label{eq:weak-tilde-h}\\
& \left({\mathcal{D}}^{-1}(\tilde h)\, \tilde{\mathbf u}, \tilde{\bphi} \right)_{\tilde{\Omega}}
- \left(\tilde h\, \nabla , \tilde{\bphi} \right)_{\tilde{\Omega}}
+ \left\langle \tilde h \, \tilde \bn, \tilde{\bphi} \right\rangle_{\p \tilde{\Omega}} = 0\label{eq:weak-tilde-u}.
\end{align}
\end{subequations}
The above terms are well defined for $\tilde h,\tilde{\delta} \in H^1(\tilde{\Omega})$ and $\tilde{\mathbf u},\tilde{\bphi} \in H^1(\tilde{\Omega})^3$, and for a.e. $t \in [0,T]$.
\subsection{Weak energy estimate for the coupled system}\label{sec:weak-coupled}
In this section, we formulate a~statement of weak energy stability for the coupled system to illustrate the difficulties connected with finding a~workable set of transition conditions at the coupling interface. We consider a~stationary variant of problem e_\mathbf qref{eq:weak-hydrostatic}, e_\mathbf qref{eq:weak-darcy} and further simplify our task by using homogeneous boundary conditions for velocities and fluxes in both, free flow and subsurface subdomains. That is $\partial \Omega_{lat} = \partial \Omega_i$ and $\partial \tilde{\Omega} \setminus \partial \tilde{\Omega}_{top} = \p \Omega_N$.
\noindent
Denoting by $H^1_{0,\Gamma}(\Omega)^d$ for $\Gamma \subset \p \Omega$ the space $\{f \in H^1(\Omega)^d: f\big|_{\Gamma} = 0\}$, we select the test and trial spaces as follows:
\begin{equation*}
\xi, \delta \in H^1(\Omega_2), {\scriptstyle{\mathcal{Q}}}uad {\bf u}x, {\gvec \varphi} \in H^1_{0,\partial \Omega_{lat}}(\Omega)^2, {\scriptstyle{\mathcal{Q}}}uad {\scriptstyle{\mathcal{Q}}}, {\it \Psi} \in H^1_{0, \partial \Omega_{top}} (\Omega)^{2\times3}, {\scriptstyle{\mathcal{Q}}}quad
\tilde h, \tilde \delta \in H^1(\tilde{\Omega}), {\scriptstyle{\mathcal{Q}}}quad \tilde{\mathbf u}, \tilde {\gvec \varphi} \in H^1_{0, \partial \tilde{\Omega} \setminus \partial \tilde{\Omega}_{top}}(\tilde{\Omega})^3.
\end{equation*}
Setting $\delta = \xi,\, {\gvec \varphi} = {\bf u}x,\, {\it \Psi} = {\scriptstyle{\mathcal{Q}}}$ in e_\mathbf qref{eq:weak-h}, e_\mathbf qref{eq:weak-u}, e_\mathbf qref{eq:weak-q} and using the definitions of test spaces and boundary conditions, we obtain
\begin{align*}
& -({\bf u}x, \nabla_2 \xi)_{{\Omega(t)}} + \lan \tilde{\mathbf u} \cdot \bn, \xi \ran_{\p \Omega_{bot}} = 0\\
& \lan {\bf u}x ({\bf u} \cdot \bn) + C_f {\bf u}x + \xi \bnx, {\bf u}x \ran_{\p \Omega_{bot}} + \lan {\bf u}x ({\bf u} \cdot \bn) + \xi \bnx, {\bf u}x \ran_{\p \Omega_{top}}
- \left( {\bf u}x({\bf u} \cdot \nabla) + {\scriptstyle{\mathcal{Q}}} \cdot \nabla + \xi \nabla_{2}, {\bf u}x \right)_{\Omega(t)} = \left( \bF_U, {\bf u}x \right)_{\Omega(t)}, \\
& \left({\mathcal{D}}^{-1}({\bf u})\;{\scriptstyle{\mathcal{Q}}}, {\scriptstyle{\mathcal{Q}}} \right)_{\Omega(t)} + \lan {\bf u}x, {\scriptstyle{\mathcal{Q}}} \cdot \bn \ran_{\p \Omega_{bot}} - \left( {\bf u}x , \nabla \cdot {\scriptstyle{\mathcal{Q}}} \right)_{\Omega(t)} = 0.
\end{align*}
Since $\p_t \xi =0$, we have $({\bf u} \cdot \bn)\big|_{\p \Omega_{top}}=0$. Also note that the integration by parts and the continuity equation~e_\mathbf qref{cont_eq} give us
\begin{equation*}
\left( {\bf u}x({\bf u} \cdot \nabla), {\bf u}x \right)_{\Omega(t)} = \f 12 \left( {\bf u}, \nabla |{\bf u}x|^2 \right)_{\Omega(t)} = \f 12 \lan {\bf u}x (\tilde {\bf u} \cdot \bn), {\bf u}x \ran_{\p \Omega_{bot}}.
\end{equation*}
Adding all equations together and using some simplifications that utilize the boundary conditions and an~integration by parts of element integral terms, we obtain the statement for energy in the free flow subdomain
\begin{align*}
&\lan \tilde{\mathbf u} \cdot \bn, \xi \ran_{\p \Omega_{bot}} + \lan C_f {\bf u}x, {\bf u}x \ran_{\p \Omega_{bot}} + \left({\mathcal{D}}^{-1}({\bf u})\;{\scriptstyle{\mathcal{Q}}}, {\scriptstyle{\mathcal{Q}}} \right)_{\Omega(t)}
+ \f 12 \lan {\bf u}x (\tilde {\bf u} \cdot \bn), {\bf u}x \ran_{\p \Omega_{bot}} = \left( \bF_U, {\bf u}x \right)_{\Omega(t)}.
\end{align*}
Setting $\tilde{\delta} = \tilde h, \tilde{\bphi} = \tilde{\mathbf u}$ in e_\mathbf qref{eq:weak-tilde-h}--e_\mathbf qref{eq:weak-tilde-u}, integrating by parts, adding equations, and using the boundary conditions, we get
\begin{equation*}
\left(\tilde {\mathcal{D}}^{-1}(\tilde h)\, \tilde{\mathbf u}, \tilde{\mathbf u} \right)_{\tilde{\Omega}} + \left\langle \xi + \f {{\bf u}x \cdot {\bf u}x} {2}, \tilde{\mathbf u} \cdot \tilde \bn \right\rangle_{\p \tilde{\Omega}_{top}} = \left(\tilde f, \tilde h \right)_{\tilde{\Omega}}.
\end{equation*}
Since $\bn = - \tilde \bn$ on the interface boundary, the mass flux terms cancel out; also our choice of transition condition on the pressure becomes obvious. The statement of energy stability for the coupled system then reads:
\begin{equation*}
\left({\mathcal{D}}^{-1}({\bf u})\, {\scriptstyle{\mathcal{Q}}}, {\scriptstyle{\mathcal{Q}}} \right)_{\Omega(t)} + \lan C_f {\bf u}x, {\bf u}x \ran_{\p \Omega_{bot}} + \left(\tilde {\mathcal{D}}^{-1}(\tilde h)\, \tilde{\mathbf u}, \tilde{\mathbf u} \right)_{\tilde{\Omega}} = \left( \bF_U, {\bf u}x \right)_{\Omega(t)} +\left(\tilde f, \tilde h \right)_{\tilde{\Omega}}.
\end{equation*}
\section{Discrete Formulation}\label{sec:discrete}
\subsection{Basic definitions and mathematical analysis tools}\label{sec:tools}
In the following, $\mathcal Th$ denotes a~non-overlapping $d$-dimensional polytopic partition of $\Omega \in \{{\Omega(t)}, \tilde{\Omega}, {\Omega_{2}}\}$ (see \cite[Def.~1.12]{PietroErn}). All partitions are assumed to be \emph{geometrically conformal} (in the sense of \cite[Def. 1.55]{ErnGuerm}). All proofs and arguments also hold for geometrically non-conformal meshes, but the notation becomes more cumbersome. The \emph{test} and \emph{trial} spaces for our LDG method are defined as the $d$-dimensional ($d \ge 1$) \emph{broken polynomial spaces of order $k$}
\begin{equation*}
\mathbb P_k^d(\mathcal Th) \coloneqq \left\{ \mathbf v \in L^2(\Omega)^d \, : \, \mathbf v_{\mid {\mathcal K}} \mbox{ is a polynomial of degree at most $k$, } \forall {\mathcal K} \in \mathcal Th \right\}.
\end{equation*}
Let $\mathcal F = \mathcal F(\mathcal Th)$ be the set of faces; for a~scalar function $w$ and a~vector function $\mathbf v$, we define the average $\avg{ \cdot }$ and the jump $\jump{ \cdot}$ on $\p {\mathcal K}_i \cap \p {\mathcal K}_j$ for neighboring mesh elements ${\mathcal K}_i, {\mathcal K}_j \in \mathcal Th,\, {\mathcal K}_i \neq {\mathcal K}_j$ in the following way:
\begin{align*}
&\avg{ w } = \frac{1}{2} \left( w_{\mid {\mathcal K}_i} + w_{\mid {\mathcal K}_j} \right),
&&\avg{ \mathbf v} = \frac{1}{2} \left( \mathbf v_{\mid {\mathcal K}_i} + \mathbf v_{\mid {\mathcal K}_j} \right),\\
&\jump{ w } = w_{\mid {\mathcal K}_i}\bn_{{\mathcal K}_i} + w_{\mid {\mathcal K}_j}\bn_{{\mathcal K}_j},
&& \jump{ \mathbf v } = \mathbf v_{\mid {\mathcal K}_i} \cdot \bn_{{\mathcal K}_i} + \mathbf v_{\mid {\mathcal K}_j} \cdot \bn_{{\mathcal K}_j},
&\jumpt{ \mathbf v } = \mathbf v_{\mid {\mathcal K}_i} \otimes \bn_{{\mathcal K}_i} + \mathbf v_{\mid {\mathcal K}_j} \otimes \bn_{{\mathcal K}_j},
\end{align*}
where $\bn_{\mathcal K}$ is the outward unit normal with respect to ${\mathcal K}$. Note, that a jump in a scalar variable is a vector, whereas a jump of a vector is a scalar. In addition, $\jumpt{\cdot}$ is introduced for vectors to denote a~second order tensor resulting from using the scalar jump definition component-wise.
In our analysis, we use some well-known properties of jumps:
\begin{subequations}
\begin{align}
\jump{ab} & = \; \avg{a} \jump{b} + \jump{a} \avg{b},\label{jump1}\\
\avg{ab} & = \; \avg{a} \avg{b} + \frac 14 \jump{a} \cdot \jump{b}.\label{jump2}
\end{align}
\end{subequations}
The standard mathematical analysis tools used in this work include Young's and Cauchy-Schwarz' inequalities as well as the following results (see \cite[Sec.~1.4.1--1.4.3]{PietroErn})
\begin{Definition}[Shape and contact regularity]\label{def:regularity}
A family of meshes $\mathcal Th$ is called \emph{shape and contact regular} (for short \emph{regular}) if, for all ${\mathcal{D}}elta x> 0$, $\mathcal Th$ admits a geometrically conformal, \emph{matching simplicial submesh} $\bar \mathcal Th$ such that
\begin{enumerate}
\item $\bar \mathcal Th$ is \emph{shape-regular} in the sense of \cite{CiaHB}, i.e. there exists $\lambda_1 > 0$, independent of ${\mathcal{D}}elta x$, such that for all $\bar {\mathcal K} \in \bar \mathcal Th$
\begin{equation*}
\lambda_1 {\mathcal{D}}elta x_{\bar {\mathcal K}} \le \rho_{\bar {\mathcal K}},
\end{equation*}
where $\rho_{\bar {\mathcal K}}$ is the diameter of the largest ball that can be inscribed in $\bar {\mathcal K}$.
\item there exists a~constant $\lambda_2 > 0$ independent of ${\mathcal{D}}elta x$ such that for all ${\mathcal K} \in \mathcal Th$ and for all $\bar {\mathcal K} \in \bar \mathcal Th$ with $\bar {\mathcal K} \subset {\mathcal K}$
\begin{equation*}
\lambda_2 {\mathcal{D}}elta x_{\mathcal K} \le {\mathcal{D}}elta x_{\bar {\mathcal K}},
\end{equation*}
\item there exists a~constant $\lambda_3 > 0$ independent of ${\mathcal{D}}elta x$ such that for all ${\gamma} \in \mathcal F$
\begin{equation*}
\lambda_3 {\mathcal{D}}elta x \le {\mathcal{D}}elta x_{{\gamma}}.
\end{equation*}
\end{enumerate}
\end{Definition}
\begin{Lemma}[Discrete trace inequality]\label{lem:disc:trace}
Let $(\mathcal Th)$ be a regular mesh sequence with parameters $\lambda_1, \lambda_2, \lambda_3$. Then for all ${\mathcal{D}}elta x > 0$, all $\mathbf p \in \mathbb P^d_k(\mathcal Th)$, the following holds with $C_t$ only depending on $\lambda_1, \lambda_2, \lambda_3$, $d$, and $k$:
\begin{equation*}
{\mathcal{D}}elta x^{1/2} \sum_{{\gamma} \in \mathcal F} \| \mathbf p \|_{L^2({\gamma})} \; \le \; C_t \sum_{{\mathcal K} \in \mathcal Th} \| \mathbf p \|_{L^2({\mathcal K})} \;=\; C_t \| \mathbf p \|_{L^2(\Omega)}.
\end{equation*}
For ${\gamma}$ shared by elements ${\mathcal K}_i$ and ${\mathcal K}_j$, $\| \mathbf p \|_{L^2({\gamma})}$ is assumed to contain both traces
\begin{equation*}
\| \mathbf p \|_{L^2({\gamma})} \;=\; \| \mathbf p_{\mid {\mathcal K}_i} \|_{L^2({\gamma})} + \| \mathbf p_{\mid {\mathcal K}_j} \|_{L^2({\gamma})}.
\end{equation*}
\end{Lemma}
\subsection{Computational mesh and free surface representation}
Keeping in line with the specific anisotropy of $\Omega(t)$ we construct our 3D mesh by extending a 2D triangular mesh of ${\Omega_{2}}$ in the vertical direction resulting in a~3D mesh of $\Omega(t)$ that consists of one or more layers of prismatic elements. In order to better reproduce the bathymetry and the free surface elevation of the computational domain, top and bottom faces of prisms can be non-parallel to the $xy$-plane; however, the lateral faces are assumed to be strictly vertical.
For our analysis, we introduce the following sets of elements and faces:
\begin{itemize}
\item $I_e$ - set of prismatic elements in $\Omega(t)$;
\item $I_{e, 2D}$ - set of triangular elements in ${\Omega_{2}}$;
\item $I_{e, {\mathcal K}x}$ - set of prismatic elements corresponding to 2D element ${\mathcal K}x$;
\item $I_{lat}$ - set of interior lateral faces in $\Omega(t)$;
\item $I_{horiz}$ - set of interior horizontal faces in $\Omega(t)$;
\item $I_i, I_o$ - sets of exterior inflow and outflow lateral faces in $\Omega(t)$;
\item $I_{top}$ - set of exterior faces on the top boundary of $\Omega(t)$;
\item $I_{bot}$ - set of exterior faces on the bottom (transition) boundary of $\Omega(t)$;
\item $\tilde I_e$ - set of elements in $\tilde{\Omega}$;
\item $\tilde I_{int}$ - set of interior faces in $\tilde{\Omega}$;
\item $\tilde I_{D}$ - set of faces on Dirichlet boundary of $\tilde{\Omega}$;
\item $\tilde I_{top}$ - set of faces on top (transition) boundary of $\tilde{\Omega}$;
\item $\tilde I_N$ - set of faces on Neumann boundary of $\tilde{\Omega}$.
\end{itemize}
A key feature of our 3D LDG model is the fact that all primary variables -- including the free surface elevation -- are discretized using discontinuous polynomial spaces. As a result, computed values of the free surface elevation may have jumps across inter-element boundaries. If our finite element grids were to follow exactly the computed free surface elevation field this would cause the elements in the surface layer to have mismatching lateral faces (staircase boundary). We avoid this difficulty by employing a globally continuous (piecewise linear) free surface approximation that is obtained from the computed values of the free surface elevation with the help of a smoothing algorithm (see Fig.~\ref{Mesh_smoothing}) and denote by $\Xi_s$ the free surface elevation of the smoothed mesh. It must be noted here that solely the computational mesh is modified by the smoothing algorithm whereas the computed (discontinuous) approximations to all unknowns, including the free surface elevation, are left unchanged. This approach preserves the local conservation property of the LDG method and is essential for our algorithm's stability.
\begin{figure}
\caption{Vertical cross-section of the coupled mesh and the free surface geometry approximation (solid yellow line).}
\label{Mesh_smoothing}
\end{figure}
\subsection{Semi-discrete LDG formulation for the hydrostatic equations}
Our next step is to approximate $\left(\xi(t,\cdot), {\bf u}x(t,\cdot), w(t,\cdot), {\scriptstyle{\mathcal{Q}}}(t,\cdot)\right)$, a~solution to the weak problem, with a~function $\left(\Xi(t,\cdot), {\bf U}x(t,\cdot), W(t,\cdot), {\mathcal{Q}}(t,\cdot)\right)\in {\cal H}_{\mathcal{D}}elta \times U_{\mathcal{D}}elta \times W_{\mathcal{D}}elta \times Z_{\mathcal{D}}elta$, where ${\cal H}_{\mathcal{D}}elta$, $U_{\mathcal{D}}elta$, $W_{\mathcal{D}}elta $, and $Z_{\mathcal{D}}elta$ denote finite-dimensional DG spaces.
For this purpose, we use the weak formulation with one important modification: Since the DG approximation spaces do not guarantee continuity across the inter-element boundaries, all integrands in the integrals over interior faces have to be approximated by suitably chosen numerical fluxes that preserve consistency and stability of the method. Similar treatment may be needed at the exterior boundaries as well. Then a semi-discrete finite element solution is obtained by requiring that for a.e. $t \in [0, T]$, for all ${\mathcal K} \in \mathcal Th$, and for all $(\delta,{\gvec \varphi},{\it \Psi},\sigma) \in {\cal H}_{\mathcal{D}}elta \times U_{\mathcal{D}}elta \times W_{\mathcal{D}}elta \times Z_{\mathcal{D}}elta$, the following holds:
\begin{subequations}
\begin{align}
& \hspace{-5mm} \left(\p_t \Xi, \delta \right)_{\mathcal K}x + \sum_{{\mathcal K} \in I_{e, {\mathcal K}x}} \left\{ \lan R_H, \delta \ran_{\p {\mathcal K}_{lat}} - \left({\bf U}x \cdot \nabla_{2}, \delta \right)_{\mathcal K} + \lan \mathcal TU \cdot \bn , \delta \ran_{\p {\mathcal K} \cap \p \Omega_{bot}} \right\}
= 0, \label{discrete_general_1}\\
& \hspace{-5mm} \left(\p_t {\bf U}x, {\gvec \varphi} \right)_{\mathcal K} + \lan \mathbf{R}_U + \mathbf{S}_U, {\gvec \varphi} \ran_{\p {\mathcal K}} - \left( ({\bf U}x \otimes {\bf U} + {\mathcal{Q}}) \cdot \nabla + \Xi \nabla_{2}, {\gvec \varphi} \right)_{\mathcal K}
+ \f {n_z} 2 \lan \p_t (\Xi_s-\Xi)\, {\bf U}x, {\gvec \varphi} \ran_{\p {\mathcal K} \cap \p \Omega_{top}} = \left(\bF_U, {\gvec \varphi} \right)_{\mathcal K}, \label{discrete_general_2}\\
& \hspace{-5mm} \left({\mathcal{D}}^{-1}({\bf U})\;{\mathcal{Q}}, {\it \Psi} \right)_{\mathcal K} + \lan S_Q, {\it \Psi} \ran_{\p {\mathcal K}} - \left( {\bf U}x \otimes \nabla, {\it \Psi} \right)_{\mathcal K} = 0 , \label{discrete_general_3}\\
& \hspace{-5mm} \lan R_H, \sigma\ran_{\p {\mathcal K}_{lat}} + \lan {\bf U}d \cdot \bn, \sigma \ran_{\p {\mathcal K}_{horiz} \setminus \p \Omega_{bot}} + \lan \mathcal TU \cdot \bn, \sigma \ran_{\p {\mathcal K} \cap \p \Omega_{bot}} - \left({\bf U} \cdot \nabla, \sigma \right)_{\mathcal K} = 0,\label{discrete_general_4}
\end{align}
\end{subequations}
where ${\bf U}d$ denotes the value of ${\bf U}$ taken from the element below the horizontal face, and $\p {\mathcal K}_{lat}$ and $\p {\mathcal K}_{horiz}$ are the lateral and the horizontal parts (faces) of $\p {\mathcal K}$, respectively. $R_H$, and $\mathbf{R}_U$ are the normal advective fluxes for ${\bf U}x \cdot \bnx$ and ${\bf U}x ({\bf U} \cdot \bn) + \Xi \bnx$ (also see Remark~\ref{remark-advective-fluxes}), respectively, whereas $\mathbf{S}_U$ and $S_Q$ denote the normal diffusive fluxes on element faces (Remark~\ref{remark-diffusive-fluxes}).
In our study, the following flux approximations are used:
\begin{subequations}
\begin{align}
\label{flux_s}
& \mathbf{S}_U|_{\gamma}
\coloneqq \left\{\begin{array}{ll}
\avg{{\mathcal{Q}}} \cdot \bn, & {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{lat} \cup I_{horiz},\\
{\mathcal{Q}} \cdot \bn, & {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_i \cup I_o,\\
0,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{top},\\
C_f {\bf U}x, & {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{bot},
\end{array} \right.{\scriptstyle{\mathcal{Q}}}uad
S_Q|_{\gamma}
\coloneqq \left\{\begin{array}{ll}
\avg{{\bf U}x} \otimes \bn, & {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{lat} \cup I_{horiz},\\
{\bf u}xh \otimes \bn, & {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_i \cup I_o,\\
{\bf U}x \otimes \bn,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{top} \cup I_{bot}.
\end{array} \right.\\
\label{flux_r}
& R_H\big|_{\gamma}
\coloneqq \left\{\begin{array}{ll}\avg{{\bf U}x} \cdot \bnx,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{lat},\\
{\bf U}x \cdot \bnx, & {\scriptstyle{\mathcal{Q}}}uad \gamma \in I_i,\\
{\bf u}xh \cdot \bnx, & {\scriptstyle{\mathcal{Q}}}uad \gamma \in I_o, \end{array} \right. {\scriptstyle{\mathcal{Q}}}uad
\mathbf{R}_U|_{\gamma}
\coloneqq \left\{\begin{array}{ll} \avg{{\bf U}x \otimes {\bf U}} \cdot \bn + \avg{ \Xi } \bnx + \f {\lambda_U} 2 \jumpt{{\bf U}x} \cdot \bnx,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{lat},\\
\left(\avg{{\bf U}x} \otimes {\bf U}d\right) \cdot \bn + \Xi \bnx,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{horiz},\\
{\bf U}x ({\bf U} \cdot \bn) + \hat{\xi} \bnx + \f {\lambda_U} 2 ({\bf U}x - {\bf u}xh),& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_i,\\
{\bf U}x ({\bf u}h \cdot \bn) + \Xi \bnx,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_o,\\
{\bf U}x ({\bf U} \cdot \bn) + \Xi \bnx,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{top},\\
{\bf U}x (\mathcal TU \cdot \bn) + \Xi \bnx,& {\scriptstyle{\mathcal{Q}}}uad\gamma \in I_{bot}
\end{array} \right.
\end{align}
\end{subequations}
The value of the penalty coefficient in the momentum flux on lateral faces is closely related to that of the standard Lax-Friedrichs solver (see a~discussion in the remainder of this section) and is given by
\begin{equation}
\lambda_U \coloneqq \left\{\begin{array}{ll} \avg{ \left|{\bf U}x \cdot \bnx\right| } + \sqrt{\avg{\left|{\bf U}x \cdot \bnx\right|}^2 + 1}, & \mbox{ on interior lateral faces,}\\ \left|{\bf U}x \cdot \bnx\right| + \sqrt{\left({\bf U}x \cdot \bnx\right)^2 + 1}, & \mbox{ on inflow lateral faces.} \end{array} \right.\label{lambda_U_def}
\end{equation}
Also note that the vertical component of the normal to lateral faces is zero, thus ${\bf U} \cdot \bn = {\bf U}x \cdot \bnx$, etc. there.
\begin{Remark}[Advective fluxes] \label{remark-advective-fluxes}
Normal fluxes $R_H$ and $\mathbf{R}_U$ for the non-linear advection operator on lateral faces for the PCE~\rf{discrete_general_1} and the momentum Eqs. \rf{discrete_general_2} must be computed by solving a~Riemann problem in a~coupled way (see \cite{AizingerDiss,DawsonAizinger2005,Toro2001} for a~discussion of this issue). These fluxes are much more important for the stability of the discrete scheme than those on horizontal faces. This phenomenon is a consequence of the specific anisotropy of our problem and of the computational mesh tailored for this anisotropy: the free surface elevation has jumps across lateral faces but not across horizontal ones. On horizontal faces, the free surface elevation $\Xi$ is continuous, thus the Riemann problem simplifies to that for momentum equations only.
\end{Remark}
The largest (in absolute value) eigenvalue of the normal advective flux (see \cite{AizingerPaper}) given by
\begin{equation*}
|\lambda_{\max} ({\bf U}x)| = |{\bf U}x \cdot \bnx| + \sqrt{ ({\bf U}x \cdot \bnx)^2 + 1}
\end{equation*}
is used in the standard Lax-Friedrichs flux as the penalty coefficient.
In this work, we slightly modify this Riemann solver to reduce the technicalities involved in the the stability analysis; however, the standard Lax-Friedrichs formulation works as well. The modifications amount to just omitting the penalty term in the PCE e_\mathbf qref{discrete_general_1} and retaining it in the momentum e_\mathbf qref{discrete_general_2} equation (cf.~e_\mathbf qref{flux_r}).
For our choice of penalty coefficient given in Eq.~e_\mathbf qref{lambda_U_def}, one can prove the following
\begin{Lemma}[Properties of $\lambda_U$\label{properties_lambda}]
The following inequality holds for $\lambda_U$:
\begin{equation*}
\lambda_U({\bf U}x) \; \ge \; \f {\sqrt{2} + 1}{\sqrt{2}} \left| {\bf U}x \cdot \bnx \right| + \f 1{\sqrt{2}}.
\end{equation*}
\end{Lemma}
\noindent
{\em Proof}: This property follows directly from a simple arithmetic inequality
\begin{equation*}
a + b \; \le \; \sqrt{2a^2 + 2b^2}, {\scriptstyle{\mathcal{Q}}}quad \forall a,b \ge 0.
\end{equation*}
\begin{Remark}[Diffusive fluxes]\label{remark-diffusive-fluxes}
Choosing diffusive fluxes $\mathbf{S}_U$ and $S_Q$ in Eqs.~e_\mathbf qref{discrete_general_2} and e_\mathbf qref{discrete_general_3} is simpler than solving the corresponding problem for the advective fluxes. In our analysis and implementation, those were set equal to central approximations on interior faces and to corresponding boundary conditions on the exterior ones (see~e_\mathbf qref{flux_s}.
\end{Remark}
\begin{Remark}[Mesh penalty]\label{remark-mesh-penalty}
In addition to the ``usual'' DG penalty terms for primary variables, our formulation also has a~special term $\f 1 2 \p_t (\Xi_s-\Xi) {\bf U}x$ in Eq.~e_\mathbf qref{discrete_general_2} that penalizes the difference between the computed (discontinuous) free surface elevation field and the smoothed (continuous) free surface mesh (Fig.~\ref{Mesh_smoothing}). This term is optional in practical applications but it is indispensable for the proof below to go through. This underscores the importance of consistent treatment of moving free surface geometry. The advantage of including this term is the fact that our stability analysis is not tied to any specific choice of mesh smoothing algorithm.
\end{Remark}
Incorporating our approximations for boundary conditions and the explicit forms of the modified Lax-Friedrichs fluxes as well as summing over all elements, we end up with the following system for the free flow:
\begin{subequations}
\label{discrete_formulation}
\begin{align}
& \hspace{-9mm}\sum_{{\mathcal K}x \in I_{e,2D}} \left(\p_t \Xi, \delta \right)_{\mathcal K}x + A_H({\bf U}x, \delta) = 0,\label{discrete_formulation_h}\\
& \hspace{-9mm}\sum_{{\mathcal K} \in I_e} \left(\p_t {\bf U}x, {\gvec \varphi} \right)_{\mathcal K}
+ A_U(\Xi, {\gvec \varphi})
+ B_U({\bf U}x, {\bf U}, {\gvec \varphi})
+ E_U({\mathcal{Q}}, {\gvec \varphi})
+ \!\!\!\sum_{{\gamma} \in I_{top}} \!\!\!\lan \f {n_z} 2 \p_t (\Xi_s-\Xi) {\bf U}x, {\gvec \varphi} \ran_{\gamma} \!
+ \Lambda_U({\bf U}x, {\gvec \varphi})
= \!\!\sum_{{\mathcal K} \in I_e}\left(\bF_U, {\gvec \varphi} \right)_{\mathcal K}, \label{discrete_formulation_u} \\
& \hspace{-9mm}\sum_{{\mathcal K} \in I_e} \left({\mathcal{D}}^{-1}({\bf U})\; {\mathcal{Q}}, {\it \Psi} \right)_{\mathcal K}
+ E_Q({\bf U}x, {\it \Psi}) = 0,\label{discrete_formulation_q}\\
& \hspace{-9mm}A_H({\bf U}x, \sigma)
+ \sum_{{\gamma} \in I_{horiz}} \!\!\! \lan {\bf U}d, \jump{\sigma}\ran_{\gamma}
+ \sum_{{\gamma} \in I_{top}} \!\! \lan {\bf U} \cdot \bn, \sigma \ran_{\gamma}
- \sum_{{\mathcal K} \in I_e} \left(W, \p_z \sigma \right)_{\mathcal K} = 0\label{discrete_formulation_w}
\end{align}
\end{subequations}
with forms $A_H, A_U, B_U, \Lambda_U, E_U, E_Q$ defined as
\begin{align*}
& \hspace{-5mm} A_H({\bf U}x, \sigma) \coloneqq \sum_{{\gamma} \in I_{lat}} \lan {\bf U}xa, \jump{\sigma}\ran_{\gamma}
+ \ \sum_{{\gamma} \in I_i} \lan {\bf U}x \cdot \bnx, \sigma \ran_{\gamma}\
+ \sum_{{\gamma} \in I_{bot}} \lan \mathcal TU \cdot \bn, \sigma \ran_{\gamma}
+ \sum_{{\gamma} \in I_o} \lan {\bf u}xh \cdot \bnx, \sigma \ran_{\gamma}
- \sum_{{\mathcal K} \in I_e} \left({\bf U}x \cdot \nabla_{2}, \sigma \right)_{\mathcal K}, \nonumber\\
& \hspace{-5mm} A_U(\Xi, {\gvec \varphi}) \coloneqq \sum_{{\gamma} \in I_{lat}} \lan \avg{\Xi}, \jump{{\gvec \varphi}} \ran_{\gamma}
+ \ \sum_{{\gamma} \in I_{horiz}} \lan \Xi, \jump{{\gvec \varphi}} \ran_{\gamma}\
+ \ \sum_{{\gamma} \in I_i} \lan \hat {\xi} \bnx, {\gvec \varphi} \ran_{\gamma}
+ \sum_{{\gamma} \in I_o \cup I_{top} \cup I_{bot}} \lan \Xi \bnx, {\gvec \varphi} \ran_{\gamma}\
- \ \sum_{{\mathcal K} \in I_e} \left(\Xi \nabla_{2}, {\gvec \varphi} \right)_{\mathcal K}, \nonumber\\
& \hspace{-5mm} B_U({\bf U}x, {\bf U}, {\gvec \varphi}) \coloneqq
\sum_{{\gamma} \in I_{lat}} \!\!\! \lan \avg{{\bf U}x \otimes {\bf U}}, \jumpt{{\gvec \varphi}} \ran_{\gamma}
+ \sum_{{\gamma} \in I_{horiz}} \!\!\! \lan {\bf U}xa \otimes {\bf U}d, \jumpt{{\gvec \varphi}} \ran_{\gamma}
+ \sum_{{\gamma} \in I_i} \lan {\bf U}x ({\bf U} \cdot \bng), {\gvec \varphi} \ran_{\gamma}\\
&\hspace{20mm} + \sum_{{\gamma} \in I_o} \lan {\bf U}x ({\bf u}h \cdot \bng), {\gvec \varphi} \ran_{\gamma}
+ \sum_{{\gamma} \in I_{top}} \!\!\! \lan {\bf U}x ({\bf U} \cdot \bn), {\gvec \varphi} \ran_{\gamma}
+ \sum_{{\gamma} \in I_{bot}} \lan {\bf U}x (\mathcal TU \cdot \bn), {\gvec \varphi} \ran_{\gamma}
- \sum_{{\mathcal K} \in I_e} \left({\bf U}x({\bf U} \cdot \nabla), {\gvec \varphi} \right)_{\mathcal K}, \nonumber\\
& \hspace{-5mm} \Lambda_U({\bf U}x, {\gvec \varphi}) \coloneqq
\sum_{{\gamma} \in I_{lat}} \lan \f {\lambda_U} 2 \jumpt{{\bf U}x}, \jumpt{{\gvec \varphi}}\ran_{\gamma}
+ \sum_{{\gamma} \in I_i} \lan \f {\lambda_U} 2 \,\left( {\bf U}x - {\bf u}xh \right), {\gvec \varphi} \ran_{\gamma},\nonumber\\
& \hspace{-5mm} E_U({\mathcal{Q}}, {\gvec \varphi}) \coloneqq \sum_{{\gamma} \in I_{lat} \cup I_{horiz}} \!\!\!\lan \rD {\mathcal{Q}}a, \jumpt{{\gvec \varphi}} \ran_{\gamma}
+ \sum_{{\gamma} \in I_i \cup I_o } \!\!\!\! \lan \rD {\mathcal{Q}} \cdot \bn , {\gvec \varphi} \ran_{\gamma}
+ \sum_{{\gamma} \in I_{bot}} \!\!\! \lan C_f {\bf U}x, {\gvec \varphi} \ran_{\gamma}
- \sum_{{\mathcal K} \in I_e} \!\! \left(\rD {\mathcal{Q}} \cdot \nabla, {\gvec \varphi} \right)_{\mathcal K},\\
& \hspace{-5mm} E_Q({\bf U}x, {\it \Psi}) \coloneqq \sum_{{\gamma} \in I_{lat} \cup I_{horiz}} \! \lan {\bf U}xa , \jump{{\it \Psi}} \ran_{\gamma}
+ \sum_{{\gamma} \in I_i \cup I_o} \! \! \lan {\bf u}xh , {\it \Psi} \cdot \bn \ran_{\gamma}
+ \sum_{{\gamma} \in I_{top} \cup I_{bot}} \lan {\bf U}x, {\it \Psi} \cdot \bn \ran_{\gamma}
- \ \sum_{{\mathcal K} \in I_e} \left( {\bf U}x, \nabla \cdot {\it \Psi} \right)_{\mathcal K}. \nonumber
\end{align*}
\subsection{Semi-discrete LDG formulation for the Darcy system}
Analogously to the above section, we formulate the semi-discrete Darcy system
\begin{subequations}
\label{Darcy_compact}
\begin{align}
\label{Darcy_compact_1}
& \sum_{{\mathcal K} \in \tilde I_e} \left(\p_t \mathcal TH, \mathcal TestH \right)_{\mathcal K}
+ \tilde{E}_{\mathcal TH} (\mathcal TU, \mathcal TestH)
+ \tilde{\Lambda}_{\mathcal TH}(\mathcal TH, \mathcal TestH)
= \sum_{{\mathcal K} \in \tilde I_e} \left(f, \mathcal TestH \right)_{\mathcal K},\\
\label{Darcy_compact_2}
& \sum_{{\mathcal K} \in \tilde I_e} \left({\mathcal{D}}^{-1}(\mathcal TH)\, \mathcal TU, \mathcal TestU \right)_{\mathcal K}
+ \tilde{E}_{\mathcal TU} (\mathcal TH, \mathcal TestU)
= 0
\end{align}
\end{subequations}
with forms $\tilde{E}_{\mathcal TH}, \tilde{E}_{\mathcal TU}, \tilde{\Lambda}_{\mathcal TH}$ defined as follows:
\begin{align*}
& \hspace{-7mm} \tilde{E}_{\mathcal TH} (\mathcal TU, \mathcal TestH) \coloneqq
\sum_{{\gamma} \in \tilde I_{int}} \left\langle \avg{\mathcal TU}, \jump{\mathcal TestH} \right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D} \cup \tilde I_{top} } \left\langle \mathcal TU \cdot \tilde \bn, \mathcal TestH\right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_N} \left\langle \hat u_{\tilde n}, \mathcal TestH \right\rangle_{{\gamma}}
- \sum_{{\mathcal K} \in \tilde I_e} \left(\mathcal TU \cdot \nabla, \mathcal TestH \right)_{\mathcal K}, \\
& \hspace{-7mm} \tilde{E}_{\mathcal TU} (\mathcal TH, \mathcal TestU) \coloneqq
\sum_{{\gamma} \in \tilde I_{int}} \left\langle \avg{\mathcal TH}, \jump{\mathcal TestU}\right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{top}} \left\langle \Xi+\f 12 \left({\bf U}x \cdot {\bf U}x\right), \mathcal TestU \cdot \tilde \bn \right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_N} \left\langle \mathcal TH, \mathcal TestU \cdot \tilde \bn \right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_D} \left\langle \hat h, \mathcal TestU \cdot \tilde \bn \right\rangle_{{\gamma}}
- \sum_{{\mathcal K} \in \tilde I_e} \left(\mathcal TH, \nabla \cdot \mathcal TestU \right)_{\mathcal K}, \\
& \hspace{-7mm} \tilde{\Lambda}_{\mathcal TH}(\mathcal TH, \mathcal TestH) \coloneqq
\sum_{{\gamma} \in \tilde I_{int}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\langle \jump{\mathcal TH}, \jump{\mathcal TestH} \right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\langle \mathcal TH - \hat h, \mathcal TestH\right\rangle_{{\gamma}}.
\end{align*}
The initial state $\mathcal TH(0)$ is created by the element-wise $L^2$-projection of $\tilde h_0$. Here, $\Xi = \Xi(x,y)$ denotes the free surface elevation from the free flow problem.
\section{Discrete energy stability estimate for the coupled system}\label{sec:analysis}
\begin{Theorem}[Discrete stability\label{discrete_stability}]
Let the free surface elevation of the smoothed mesh satisfy $\Xi_s\big|_{\Pi(\p \Omega_{i})}=\hat \xi$, and let $\Xi, \delta \in \mathbb P_{2k}^2(\Pi {\mathcal K})$, ${\bf U}x, {\gvec \varphi} \in \mathbb P_k^3({\mathcal K})^2$, $W, \sigma \in \mathbb P_{2k}^3({\mathcal K})$, ${\mathcal{Q}}, {\it \Psi} \in \mathbb P_k^3({\mathcal K})^{2 \times 3}$, $\mathcal TH, \mathcal TestH \in \mathbb P_\hat k(\tilde {\mathcal K})$, and $\mathcal TU, \mathcal TestU \in \mathbb P_\bar k^3(\tilde {\mathcal K})$ for some $k, \bar k, \hat k \ge 0$, a.e. $t\in[0,T]$, and all ${\mathcal K} \in \mathcal Th(\Omega(t)), \tilde {\mathcal K} \in \mathcal Th(\tilde{\Omega})$. Then scheme~e_\mathbf qref{discrete_formulation}--e_\mathbf qref{Darcy_compact} is stable in the following sense:
\begin{align*}
& \hspace{-8mm} \p_t \left\{ \left\| \Xi \right\|_{\Omega_{2}}^2
+ \left\| {\bf U}x \right\|^2_{\Omega(T)}
+ \left\| \mathcal TH \right\|^2_\tilde{\Omega} \right\}
+ \left\| \sqrt{{\mathcal{D}}^{-1}({\bf U})}\, {\mathcal{Q}} \right\|^2_{{\Omega(t)}}
+ \sum_{{\gamma} \in I_{lat}} \left\|\jumpt{{\bf U}x} \right\|^2_{\gamma}
+ \bigg\| \sqrt{\tilde \D^{-1}(\mathcal TH)}\, \mathcal TU \bigg\|^2_{\tilde{\Omega} }
+ \sum_{{\gamma} \in \tilde I_{int}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\| \jump{\mathcal TH} \right\|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\| \mathcal TH \right\|^2_{{\gamma}}\\
& \hspace{-8mm} {\scriptstyle{\mathcal{Q}}}quad \le C(C_t, {\Omega(t)}, \tilde{\Omega}, {\mathcal{D}}, \tilde \D, \eta, \bF_U, \tilde f, z_b, \hat{\xi}, {\bf u}xh, \hat u_{\tilde n}, {\mathcal{D}}elta x).
\end{align*}
\end{Theorem}
\noindent
{\em Proof}:
We start with the stability estimate for Darcy flow. Choosing $\mathcal TestH = \mathcal TH$, $\mathcal TestU = \mathcal TU$ and adding e_\mathbf qref{Darcy_compact_1}, e_\mathbf qref{Darcy_compact_2} gives
\begin{equation} \label{Darcy_stability_1}
\sum_{{\mathcal K} \in \tilde I_e} \left(\p_t \mathcal TH, \mathcal TH \right)_{\mathcal K}
+ \tilde{E}_{\mathcal TH} (\mathcal TU, \mathcal TH)
+ \tilde{\Lambda}_{\mathcal TH}(\mathcal TH, \mathcal TH)
+ \sum_{{\mathcal K} \in \tilde I_e} \left(\tilde \D^{-1}(\mathcal TH)\, \mathcal TU, \mathcal TU \right)_{\mathcal K}
+ \tilde{E}_{\mathcal TU} (\mathcal TH, \mathcal TU)
= \sum_{{\mathcal K} \in \tilde I_e} \left(f, \mathcal TH \right)_{\mathcal K}.
\end{equation}
Integration by parts of the element integral term in $\tilde{E}_{\mathcal TU}$ and the use of e_\mathbf qref{jump1} leads to
\[
\tilde{E}_{\mathcal TH} (\mathcal TU, \mathcal TH) + \tilde{E}_{\mathcal TU} (\mathcal TH, \mathcal TU)
= \sum_{{\gamma} \in \tilde I_{top}} \left\langle \Xi +\f 12 ({\bf U}x \cdot {\bf U}x), \mathcal TU \cdot \tilde \bn \right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \left\langle \hat h, \mathcal TU \cdot \tilde \bn \right\rangle_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_N} \left\langle \hat u_{\tilde n}, \mathcal TH \right\rangle_{{\gamma}}.
\]
Substituting the above expression into~e_\mathbf qref{Darcy_stability_1} and splitting the penalty terms results in
\begin{align*}
& \hspace{-8mm} \frac{1}{2} \p_t \left\| \mathcal TH \right\|^2_{\tilde{\Omega} }
+ \left\| \sqrt{\tilde \D^{-1}(\mathcal TH)}\, \mathcal TU \right\|^2_{\tilde{\Omega}}
+ \sum_{{\gamma} \in \tilde I_{int}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\| \jump{\mathcal TH} \right\|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\| \mathcal TH \right\|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{top}} \left\langle \Xi +\f 12 ({\bf U}x \cdot {\bf U}x), \mathcal TU \cdot \tilde \bn \right\rangle_{{\gamma}} \\
& \hspace{-8mm}{\scriptstyle{\mathcal{Q}}}uad = \un{\left(\tilde f, \mathcal TH\right)_{\tilde{\Omega}}}_{\tilde \Upsilon_1}
+ \un{\sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\langle\hat h, \mathcal TH \right\rangle_{{\gamma}} }_{\tilde \Upsilon_2}
- \un{\sum_{{\gamma} \in \tilde I_{D}} \left\langle \hat h, \mathcal TU \cdot \tilde \bn \right\rangle_{{\gamma}} }_{\tilde \Upsilon_3}
- \un{\sum_{{\gamma} \in \tilde I_N} \left\langle \hat u_{\tilde n}, \mathcal TH \right\rangle_{{\gamma}}}_{\tilde \Upsilon_4}
\end{align*}
Now we estimate terms $\tilde \Upsilon_1$--$\tilde \Upsilon_4$ using Young's inequality, uniform bounds on $\tilde \D(\mathcal TH)$, and the auxiliary results from Sec.~\ref{sec:tools} (also see \cite[p. 1382 - 1383]{RuppKnabner2017} presenting similar estimates in greater detail).
\begin{align*}
\hspace{-8mm}|\tilde{\Upsilon}_1| &
\;\le\; \f 12 \|\tilde f\|^2_{\tilde{\Omega}} + \f 12 \|\mathcal TH\|^2_{\tilde{\Omega}},\\
\hspace{-8mm}|\tilde{\Upsilon}_2| &
\;\le\; \sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{2 {\mathcal{D}}elta x_{{\gamma}}} \| \hat h \|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{2 {\mathcal{D}}elta x_{{\gamma}}} \| \mathcal TH \|^2_{{\gamma}},\\
\hspace{-8mm}|\tilde{\Upsilon}_3| & \;\le\;
\sum_{{\gamma} \in \tilde I_{D}} \frac{C_t C_{D}}{2 {\mathcal{D}}elta x_{{\gamma}}} \| \hat h \|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \frac{{\mathcal{D}}elta x_{{\gamma}}}{2 C_t C_{D}} \| \mathcal TU \|^2_{{\gamma}}
\;\le\; C(C_t, \tilde \D) \sum_{{\gamma} \in \tilde I_{D}} {\mathcal{D}}elta x_{{\gamma}}^{-1} \| \hat h \|^2_{{\gamma}}
+ \frac 12 \left\| \sqrt{\tilde \D^{-1}(\mathcal TH)}\, \mathcal TU \right\|^2_{\tilde{\Omega}},\\
\hspace{-8mm}|\tilde{\Upsilon}_4| &
\;\le\; \sum_{{\gamma} \in \tilde I_N} \frac{C_t}{2 {\mathcal{D}}elta x_{{\gamma}}} \| \hat u_{\tilde n} \|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_N} \frac{{\mathcal{D}}elta x_{{\gamma}}}{2 C_t} \| \mathcal TH \|^2_{{\gamma}}
\;\le\; C(C_t) \sum_{{\gamma} \in \tilde I_N} {\mathcal{D}}elta x_{{\gamma}}^{-1} \| \hat u_{\tilde n} \|^2_{{\gamma}}
+ \f 12 \|\mathcal TH\|^2_{\tilde{\Omega}}.
\end{align*}
Using the above estimates and noting that $\tilde \bn = - \bn$ on $\p \tilde{\Omega}_{top}$, we obtain
\begin{align}\label{EQ:Darcy:estimate}
& \f 12 \p_t \left\| \mathcal TH \right\|^2_{\tilde{\Omega}}
+ \f 12 \left\| \sqrt{\tilde \D^{-1}(\mathcal TH)}\, \mathcal TU \right\|^2_{\tilde{\Omega}}
+ \sum_{{\gamma} \in \tilde I_{int}} \frac{\eta}{{\mathcal{D}}elta x_{{\gamma}}} \left\| \jump{\mathcal TH} \right\|^2_{{\gamma}}
+ \sum_{{\gamma} \in \tilde I_{D}} \frac{\eta}{2 {\mathcal{D}}elta x_{{\gamma}}} \left\| \mathcal TH \right\|^2_{{\gamma}}
- \sum_{{\gamma} \in \tilde I_{top}} \left\langle \Xi+\f 12 ({\bf U}x \cdot {\bf U}x), \mathcal TU \cdot \bn \right\rangle_{{\gamma}} \notag\\
& {\scriptstyle{\mathcal{Q}}}uad \le \| \mathcal TH \|^2_\tilde{\Omega}
+ \f 12 \| \tilde f \|^2_{\tilde{\Omega}}
+ C(C_t, \tilde \D, \eta) \sum_{{\gamma} \in \tilde I_{D}} {\mathcal{D}}elta x_{{\gamma}}^{-1} \| \hat h \|^2_{{\gamma}}
+ C(C_t) \sum_{{\gamma} \in \tilde I_N} {\mathcal{D}}elta x_{{\gamma}}^{-1} \| \hat u_{\tilde n} \|^2_{{\gamma}}.
\end{align}
\noindent
Turning to the analysis of the free surface flow sub-system, we set $\delta=\Xi, {\gvec \varphi}={\bf U}x, {\it \Psi} ={\mathcal{Q}}$ and add Eqs.~\rf{discrete_formulation_h}--\rf{discrete_formulation_q}
\begin{align}
\label{discrete_stability_1}
& \sum_{{\mathcal K}x \in I_{e,2D}} \left(\p_t \Xi, \Xi \right)_{\mathcal K}x
+ A_H({\bf U}x, \Xi)
+ \sum_{{\mathcal K} \in I_e} \left(\p_t {\bf U}x, {\bf U}x \right)_{\mathcal K}
+ A_U(\Xi, {\bf U}x)
+ B_U({\bf U}x, {\bf U}, {\bf U}x)
+ E_U({\mathcal{Q}}, {\bf U}x)
+ \Lambda_U({\bf U}x, {\bf U}x)
\nonumber\\
& {\scriptstyle{\mathcal{Q}}}uad + \sum_{{\gamma} \in I_{top}} \!\!\! \lan \f {n_z} 2 \p_t (\Xi_s-\Xi) {\bf U}x, {\bf U}x \ran_{\gamma}
+ \sum_{{\mathcal K} \in I_e} \!\!\left({\mathcal{D}}^{-1}({\bf U})\;{\mathcal{Q}}, {\mathcal{Q}} \right)_{\mathcal K}
+ E_Q({\bf U}x, {\mathcal{Q}})
= \sum_{{\mathcal K} \in I_e}\!\!\left(\bF_U, {\bf U}x \right)_{\mathcal K}.
\end{align}
First, we deal with terms containing $\Xi$. Integration by parts and \rf{jump1} produce
\[
A_H({\bf U}x, \Xi) + A_U(\Xi, {\bf U}x)
= \sum_{{\gamma} \in I_{bot}} \lan \mathcal TU \cdot \bn, \Xi \ran_{\gamma}
+ \sum_{{\gamma} \in I_i} \lan \hat{\xi} \bnx, {\bf U}x \ran_{\gamma}
+ \sum_{{\gamma} \in I_o} \lan {\bf u}xh \cdot \bnx, \Xi \ran_{\gamma}.
\]
The step dealing with the non-linear advective terms in the momentum equation is the crucial and,
at the same time, the most technically involved step of the proof and thus will be presented in
greater detail.
First, note that
\[
- \sum_{{\mathcal K} \in I_e} \left({\bf U}x({\bf U} \cdot \nabla), {\bf U}x \right)_{\mathcal K}
\ = \ - \sum_{{\mathcal K} \in I_e} \left({\bf U}, \f 1 2 \nabla \left({\bf U}x \cdot {\bf U}x \right) \right)_{\mathcal K}.
\]
Setting $\sigma = \f 1 2 \left({\bf U}x \cdot {\bf U}x \right)$ in~\rf{discrete_formulation_w} (recalling that its test space contains products of elements from the test space of \rf{discrete_formulation_u}!), we replace the above term in the definition of $B_U$ with the boundary integral terms resulting from~\rf{discrete_formulation_w}.
\begin{align*}
\hspace{-8mm}B_U({\bf U}x, {\bf U}, {\bf U}x)
&= \sum_{{\gamma} \in I_{lat}} \bigg\{ \un{ \lan \avg{{\bf U}x \otimes {\bf U}}, \jumpt{{\bf U}x} \ran_{\gamma} }_{\mathcal Theta_1}
- \f 1 2 \lan {\bf U}xa, \jump{ {\bf U}x \cdot {\bf U}x }\ran_{\gamma} \bigg\}
+ \!\!\sum_{{\gamma} \in I_{horiz}} \!\! \bigg\{\un{\lan {\bf U}xa \otimes {\bf U}d, \jumpt{{\bf U}x} \ran_{\gamma}}_{\mathcal Theta_2}
- \f 1 2 \lan {\bf U}d, \jump{{\bf U}x \cdot {\bf U}x } \ran_{\gamma} \bigg\}\\
\hspace{-8mm}& {\scriptstyle{\mathcal{Q}}}uad + \f 1 2 \sum_{{\gamma} \in I_o} \lan {\bf U}x\, ({\bf u}xh \cdot \bnx), {\bf U}x \ran_{\gamma}
+ \f 1 2 \sum_{{\gamma} \in I_{top} \cup I_i} \lan {\bf U}x\, ({\bf U} \cdot \bn), {\bf U}x \ran_{\gamma}
+ \f 1 2 \sum_{{\gamma} \in I_{bot}} \lan {\bf U}x\, (\mathcal TU \cdot \bn), {\bf U}x \ran_{\gamma}.
\end{align*}
Using \rf{jump1}, \rf{jump2} and noting $({\bf U} \cdot \bn)|_{{\gamma}} = ({\bf U}x \cdot \bnx)|_{{\gamma}}, \forall {\gamma} \in I_{lat}$ we find
\begin{align*}
\mathcal Theta_1 &= \lan \avg{{\bf U}x} \otimes \avg{{\bf U}} + \f 1 4 \jumpt{{\bf U}x} \jumpt{{\bf U}}^T, \jumpt{{\bf U}x} \ran_{\gamma}
= \lan \avg{{\bf U}x} \otimes \avg{{\bf U}} + \f 1 4 \left( {\bf U}x^+ - {\bf U}x^-\right) \otimes \left( {\bf U}^+ - {\bf U}^-\right), {\bf U}x^+ \otimes \bn^+ + {\bf U}x^- \otimes \bn^- \ran_{\gamma}\\
&= \lan \avg{{\bf U}x} \left( \avg{{\bf U}} \cdot \bn^+ \right) + \f 1 4\left( {\bf U}x^+ - {\bf U}x^- \right) \jump{{\bf U}}, {\bf U}x^+ - {\bf U}x^- \ran_{\gamma}
= \f 1 2 \lan \avg{{\bf U}x}, \jump{{\bf U}x \cdot {\bf U}x} \ran_{\gamma}
+ \f 1 4 \lan \jumpt{{\bf U}x} \jump{{\bf U}}, \jumpt{{\bf U}x} \ran_{\gamma}.
\end{align*}
In a~similar manner, we obtain $\mathcal Theta_2 = \f 1 2 \lan {\bf U}d, \jump{{\bf U}x \cdot {\bf U}x } \ran_{\gamma}$; this gives us
\begin{equation}\label{B_U}
B_U({\bf U}x, {\bf U}, {\bf U}x)
= \f 1 4 \sum_{{\gamma} \in I_{lat}} \lan \jumpt{{\bf U}x} \jump{{\bf U}}, \jumpt{{\bf U}x} \ran_{\gamma}
+ \f 1 2 \bigg\{ \sum_{{\gamma} \in I_o} \lan {\bf U}x\, ({\bf u}xh \cdot \bnx), {\bf U}x \ran_{\gamma}
+ \sum_{{\gamma} \in I_{top} \cup I_i} \lan {\bf U}x\, ({\bf U} \cdot \bn), {\bf U}x \ran_{\gamma}
+ \sum_{{\gamma} \in I_{bot}} \lan {\bf U}x\, (\mathcal TU \cdot \bn), {\bf U}x \ran_{\gamma} \bigg\}.
\end{equation}
The movement of the free surface is accounted for via the mesh penalty term.
Here we use Eqs. \rf{discrete_formulation_h} and \rf{discrete_formulation_w},
once again taking advantage of the higher order test spaces in them.
Noting that for any free surface boundary face $\gamma$ in our smoothed mesh
$\int_{\gamma} n_z f(x, y, z) ds = \int_{\Pi(\gamma)} f(x, y, \Xi_s) dx dy$
and applying Leibniz' Rule we proceed as follows:
\begin{align*}
&\hspace{-5mm} \sum_{{\mathcal K} \in I_e} \left(\p_t {\bf U}x, {\bf U}x \right)_{\mathcal K}
+ \ \sum_{{\gamma} \in I_{top}} \lan \f {n_z} 2 \p_t (\Xi_s-\Xi) {\bf U}x, {\bf U}x \ran_{\gamma}
= \ \f 1 2 \sum_{{\mathcal K}x \in I_{e,2D}} \!\!\! \left( \int_{z_{bot}}^{\Xi_s(t)}
\p_t |{\bf U}x|^2 dz, 1 \right)_{\mathcal K}x
+ \f 1 2 \sum_{{\mathcal K}x \in I_{e,2D}} \!\!\! \left( \p_t \Xi_s -
\p_t \Xi, |{\bf U}x(\Xi_s)|^2 \right)_{\mathcal K}x \nonumber\\
& \hspace{-5mm} {\scriptstyle{\mathcal{Q}}}uad = \ \f 1 2 \sum_{{\mathcal K} \in I_e} \p_t \left\| {\bf U}x \right\|^2_{\mathcal K}
\ - \ \f 1 2 \sum_{{\mathcal K}x \in I_{e,2D}} \left(\p_t \Xi, |{\bf U}x(\Xi_s)|^2 \right)_{\mathcal K}x
=\ \f 1 2 \sum_{{\mathcal K} \in I_e} \p_t \left\| {\bf U}x \right\|^2_{\mathcal K}
\ - \ \f 1 2 \sum_{{\gamma} \in I_{top}} \lan {\bf U} \cdot \bn, {\bf U}x \cdot {\bf U}x \ran_{\gamma}.
\end{align*}
The last equality follows by setting $\delta = \sigma = |{\bf U}x(\Xi_s)|^2$ in
\rf{discrete_formulation_h} and
\rf{discrete_formulation_w}, respectively, and subtracting the latter from the former.
The last term in the expression above cancels a corresponding term in the estimate~e_\mathbf qref{B_U} for $B_U$.
For the diffusion terms, the divergence theorem and \rf{jump1} give us
\[
E_U({\mathcal{Q}}, {\bf U}x) + E_Q({\bf U}x, {\mathcal{Q}})
= \sum_{{\gamma} \in I_i \cup I_o} \lan {\bf u}xh \rD, {\mathcal{Q}} \cdot \bn \ran_{\gamma}
+ \sum_{{\gamma} \in I_{bot}} \lan C_f {\bf U}x , {\bf U}x \ran_{\gamma}.
\]
Substituting the results of the above simplifications into \rf{discrete_stability_1} we obtain
\begin{align}
\label{discrete_stability_2}
&\f 1 2 \p_t \left\| \Xi \right\|_{\Omega_{2}}^2
+ \f 1 2 \p_t \left\| {\bf U}x \right\|^2_{\Omega(t)}
+ \left\| \sqrt{{\mathcal{D}}^{-1}({\bf U})}\, {\mathcal{Q}} \right\|^2_{\Omega(t)}
+ \sum_{{\gamma} \in I_{lat}} \lan \f {\lambda_U} 2 \jumpt{{\bf U}x}, \jumpt{{\bf U}x} \ran_{\gamma}
+ \sum_{{\gamma} \in I_i} \lan \f {\lambda_U} 2 {\bf U}x, {\bf U}x \ran_{\gamma} \nonumber\\
& {\scriptstyle{\mathcal{Q}}}quad + \f 1 2 \sum_{{\gamma} \in I_{bot}} \lan {\bf U}x (\mathcal TU \cdot \bn), {\bf U}x \ran_{\gamma}
+ \un{\sum_{{\gamma} \in I_{bot}} \lan C_f {\bf U}x, {\bf U}x \ran_{\gamma} }_{\ge 0}
+ \un{\f 1 2 \sum_{{\gamma} \in I_o} \lan {\bf U}x ({\bf u}xh \cdot \bnx), {\bf U}x \ran_{\gamma} }_{\ge 0} \nonumber\\
& {\scriptstyle{\mathcal{Q}}}uad = -\un{\f 1 4 \sum_{{\gamma} \in I_{lat}} \lan \jumpt{{\bf U}x}\, \jump{{\bf U}}, \jumpt{{\bf U}x} \ran_{\gamma} }_{\Upsilon_1}
+ \un{\sum_{{\mathcal K} \in I_e} \left(\bF_U(t), {\bf U}x \right)_{\mathcal K} }_{\Upsilon_2}
- \un{ \sum_{{\gamma} \in I_i} \lan \hat{\xi} \bnx, {\bf U}x \ran_{\gamma} }_{\Upsilon_3}
- \un{\sum_{{\gamma} \in I_o} \lan {\bf u}xh \cdot \bnx, \Xi \ran_{\gamma} }_{\Upsilon_4}
+ \un{\sum_{{\gamma} \in I_i} \lan \f {\lambda_U} 2 {\bf u}xh, {\bf U}x \ran_{\gamma} }_{\Upsilon_5} \nonumber\\
& {\scriptstyle{\mathcal{Q}}}quad - \un{\sum_{{\gamma} \in I_i \cup I_o} \lan {\bf u}xh \rD, {\mathcal{Q}} \cdot \bn \ran_{\gamma} }_{\Upsilon_6}
- \un{\f 1 2 \sum_{{\gamma} \in I_i} \lan {\bf U}x ({\bf U} \cdot \bn), {\bf U}x \ran_{\gamma} }_{\Upsilon_7}
- \sum_{{\gamma} \in I_{bot}} \lan \mathcal TU \cdot \bn, \Xi \ran_{\gamma}.
\end{align}
In the remainder of the proof, we estimate terms $\Upsilon_1$--$\Upsilon_7$ relying on
Young's and Cauchy-Schwarz' inequalities, properties of $\Xi_s$ and ${\mathcal{D}}$, and results from Sec.~\ref{sec:tools}.
\begin{align*}
\hspace{-8mm}|\Upsilon_1| & \le \f 1 2 \sum_{{\gamma} \in I_{lat}} \lan \jumpt{{\bf U}x} \avg{|{\bf U} \cdot \bng|}, \jumpt{{\bf U}x} \ran_{\gamma}, {\scriptstyle{\mathcal{Q}}}quad \text{(cf. the definition of $\lambda_U$)}\\
\hspace{-8mm}|\Upsilon_2| & \le \f 1 4 \left\| \bF_U(t) \right\|_{\Omega(t)}^2
+ \; \left\| {\bf U}x \right\|_{\Omega(t)}^2,\\
\hspace{-8mm}|\Upsilon_3| & \le \f 1 {4 \alpha} \sum_{{\gamma} \in I_i} \| \hat{\xi} \|^2_{\gamma}
+ \alpha \sum_{{\gamma} \in I_i} \| {\bf U}x \|^2_{\gamma},
\; \mbox{where} \; \f 1 {4 \alpha} \sum_{{\gamma} \in I_i} \| \hat{\xi} \|^2_{\gamma}
= \f 1 {4 \alpha} \lan \Xi_s-z_b, \hat{\xi}^2 \ran_{\Pi(\p \Omega_i)}
\le \f 1 {4 \alpha} \left( \| \hat{\xi} \|^3_{\Pi(\p \Omega_i)}
+ \|z_b\|_{L^\infty(\p \Omega_i)}\| \hat{\xi} \|^2_{\Pi(\p \Omega_i)} \right), \\
\hspace{-8mm}|\Upsilon_4| & = \lan \Xi, \int^{\Xi_s}_{z_b} {\bf u}xh \cdot \bnx \, dz \ran_{\Pi(\p \Omega_o)}
\le \f{{\mathcal{D}}elta x}{C_t} \| \Xi\|^2_{\Pi(\p \Omega_o)}
+ \f{C_t}{4 {\mathcal{D}}elta x} \lan 1, \left(\int^{\Xi_s}_{z_b} {\bf u}xh \cdot \bnx \, dz \right)^2 \ran_{\Pi(\p \Omega_o)}
\le \| \Xi\|^2_{{\Omega_{2}}}
+ C(C_t, {\mathcal{D}}elta x, \p \Omega_o)\, \| {\bf u}xh \|^2_{\p \Omega_o},\\
\hspace{-8mm}|\Upsilon_5| & \le \f 1 {8 \beta} \;\sum_{{\gamma} \in I_i} \lan \lambda_U {\bf u}xh, {\bf u}xh \ran_{\gamma}
+\; \beta \sum_{{\gamma} \in I_i} \lan \lambda_U {\bf U}x, {\bf U}x \ran_{\gamma}
\; \le \; \sum_{{\gamma} \in I_i} \lan \lambda_U, \lambda_U \ran_{\gamma}
+ \;\f 1 {32 \beta} \sum_{{\gamma} \in I_i} \lan |{\bf u}xh|^2, |{\bf u}xh|^2 \ran_{\gamma}
+ \; \beta \sum_{{\gamma} \in I_i} \lan \lambda_U {\bf U}x, {\bf U}x \ran_{\gamma} \nonumber\\
\hspace{-8mm}& \le \alpha \sum_{{\gamma} \in I_i} \| {\bf U}x \|^2_{\gamma}
+ \f 1 {4 \alpha} \sum_{{\gamma} \in I_i} \lan 1, 1 \ran_{\gamma}
+ \f 1 {32 \beta} \sum_{{\gamma} \in I_i} \|{\bf u}xh\|^4_{\gamma}
+ \beta \sum_{{\gamma} \in I_i} \lan \lambda_U {\bf U}x, {\bf U}x \ran_{\gamma}
\; \mbox{with}{\scriptstyle{\mathcal{Q}}}uad \f 1 {4 \alpha} \sum_{{\gamma} \in I_i} \lan 1, 1 \ran_{\gamma}
= \f 1 {4 \alpha} \lan 1, \hat{\xi} -z_b \ran_{\Pi(\p \Omega_i)}\\
\hspace{-8mm}|\Upsilon_6| & \le \f {C_t^2 C_{D}} {2} \sum_{{\gamma} \in I_i \cup I_o} {\mathcal{D}}elta x_{{\gamma}}^{-1} \| {\bf u}xh\|^2_{\gamma}
\;+ \; \f 1 {2 C_t^2 C_{D}} \sum_{{\gamma} \in I_i \cup I_o} {\mathcal{D}}elta x_{{\gamma}} \|{\mathcal{Q}} \|^2_{\gamma}
\;\le\; C\left(C_t, {\mathcal{D}}\right) \sum_{{\gamma} \in I_i \cup I_o} {\mathcal{D}}elta x_{{\gamma}}^{-1} \| {\bf u}xh\|^2_{\gamma}
+\; \f 1 2 \left\|\sqrt{{\mathcal{D}}^{-1}({\bf U})}\, {\mathcal{Q}} \right\|^2_{\Omega(t)},
\end{align*}
where $0 < \alpha, \beta < 1$ are some parameters that will be determined later.
Collecting the terms containing ${\bf U}x$ on the inflow faces, namely $\Upsilon_7$ and the corresponding
terms in the estimates for $\Upsilon_3$ and $\Upsilon_5$, we use the penalty term to estimate on the left hand side of
e_\mathbf qref{discrete_stability_2}
\[
\sum_{{\gamma} \in I_i} \lan \f {\lambda_U} 2 {\bf U}x, {\bf U}x \ran_{\gamma}
- \; 2 \alpha \sum_{{\gamma} \in I_i} \| {\bf U}x \|^2_{\gamma}
- \; \beta \sum_{{\gamma} \in I_i} \lan \lambda_U {\bf U}x, {\bf U}x \ran_{\gamma}
- \; \f 1 2 \sum_{{\gamma} \in I_i} \lan {\bf U}x \left|{\bf U} \cdot \bn \right|, {\bf U}x \ran_{\gamma}
\ge 0,
\]
which by Lemma~\ref{properties_lambda} can be shown to hold for the following choices of $\alpha$ and $\beta$:
\[
0 \; < \; \beta \; \le \; \f 1 {2 \sqrt{2} + 2}, {\scriptstyle{\mathcal{Q}}}quad 0 \; < \; \alpha \;\le\; \f {1/2 - \beta} {2\sqrt{2}}.
\]
Substituting the estimates above into~\rf{discrete_stability_2} we obtain the following inequality:
\begin{align}\label{discrete_stability_3}
& \f 1 2 \p_t \left\| \Xi \right\|_{\Omega_{2}}^2
+ \f 1 2 \p_t \left\| {\bf U}x \right\|^2_{\Omega(t)}
+ \f 1 2 \left\| \sqrt{{\mathcal{D}}^{-1}({\bf U})}\, {\mathcal{Q}}(t) \right\|^2_{\Omega(t)}
+ \f 1 2 \sum_{{\gamma} \in I_{lat}}\!\!\! \lan \jumpt{{\bf U}x}, \jumpt{{\bf U}x} \ran_{\gamma}
+ \sum_{{\gamma} \in I_{bot}} \lan \Xi + \f 1 2 ( {\bf U}x \cdot {\bf U}x ), \mathcal TU \cdot \bn \ran_{\gamma}
\nonumber\\
& {\scriptstyle{\mathcal{Q}}}quad \le \| \Xi\|^2_{{\Omega_{2}}}
+ \left\| {\bf U}x \right\|_{\Omega(t)}^2
+ \; C(C_t, {\Omega(t)}, \bF_U, z_b, \hat{\xi}, {\bf u}xh, {\mathcal{D}}elta x).
\end{align}
The claim of our theorem follows by adding~e_\mathbf qref{EQ:Darcy:estimate} to~e_\mathbf qref{discrete_stability_3}.
\framebox[\width]{\Huge{ }}\\
\section{Numerical results}\label{sec:numerical}
The numerical implementation is based on our FESTUNG framework \cite{FrankRAK2015,ReuterAWFK2016,JaustRASK2018} and, specifically, utilizes the setup detailed in the companion paper~\cite{ReuterRAK2018}.
We choose a two-dimensional (in a~vertical $xz$-slice) computational domain~$\Omega(t)\cup\tilde{\Omega} \subset\mathbb R^2$ with~$\Omega(t) \coloneqq (0,100)\times(z_b,\xi(t))$, $\tilde{\Omega} \coloneqq (0,100)\times(-5,z_b)$, time interval~$J=(0,10)$, and a~sloped interface between free flow and subsurface domains~$z_b(x^1) \coloneqq 0.005 x^1$, which has a~constant normal vector~$\mathbf{\nu} = \pm 1/\sqrt{1+0.005^2} \,[-0.005, 1]^T$.
For a~given free surface elevation~$\xi$ and horizontal velocity~$u$, one can derive matching analytical functions for~$\tilde h$ using interface condition~e_\mathbf qref{transition_bc_2} and for~$w$ using continuity equation~e_\mathbf qref{cont_eq} and interface condition~e_\mathbf qref{transition_bc_1}.
Instead of e_\mathbf qref{hydrostatic_surface_bc}, we use here non-homogeneous boundary conditions at the free surface to have more freedom in our choice for~$u$ resulting in the following analytical solution
\begin{align*}
\xi(t,x) &\coloneqq\;
5 + 0.003\,\sin(0.08\, x + 0.08\, t) \,,\\
u(t,\mathbf{x}) &\coloneqq\;
r(t,x) \big( \cos(0.1\, z) - \cos\left(0.1\, z_b(x)\right) \big)\,,\\
w(t,\mathbf{x}) &\coloneqq\;
n(t,\mathbf{x}) + \varepsilon(t,x) \,,\\
\tilde h(t,\mathbf{x}) &\coloneqq\;
\xi(t,x) + \big(\sin(0.3\, z) - \sin\left(0.3\, z_b(x)\right)\big)\, m(t,x)
\end{align*}
with diffusion coefficients~${\mathcal{D}} \coloneqq 0.05 \,I$, $\tilde \D \coloneqq 0.01\, I$. $n(t,\mathbf{x})$ is chosen so that~$\partial_{x} u + \partial_{z} w = 0$ in~$\Omega(t)$:
\begin{equation*}
n(t,\mathbf{x}) \,\coloneqq\;
-\partial_{x} r(t,x) \left( \frac{1}{0.1} \sin(0.1\, z) - z \cdot \cos\left(0.1 \,z_b(x) \right) \right)
- 0.1 \cdot 0.005\, \cdot r(t,x) \, z \cdot \sin\left(0.1\, z_b(x)\right)\,,
\end{equation*}
and $\varepsilon(t,x)$ shifts~$w$ to fulfill coupling condition~e_\mathbf qref{transition_bc_2}, i.e.,
\begin{equation*}
\varepsilon(t,x) \,\coloneqq\; 0.01 \left( 0.005 \, \partial_{x}\tilde h\left(t,x,z_b(x)\right) - \partial_{z} \tilde h\left(t,x,z_b(x)\right) \right) - n\left(t,x,z_b(x)\right)\,.
\end{equation*}
Functions~$r(t,x),m(t,x)$ are used to increase the spatial variability in $x$-direction and to introduce a~time dependency.
Here, we use
\begin{equation*}
r(t,x)\,\coloneqq\; \sin(0.07\, x + 0.4\, t)
{\scriptstyle{\mathcal{Q}}}uad\text{ and }{\scriptstyle{\mathcal{Q}}}uad
m(t,x)\,\coloneqq\; \cos(0.07\, x + 0.07\, t) \,.
\end{equation*}
We prescribe Dirichlet boundary conditions for all unknowns and derive boundary data, right hand side functions, and initial data from the analytical solution.
Using this setup, we compute the solution for a~sequence of increasingly finer meshes with element sizes~${\mathcal{D}}elta x_j$ and evaluate errors and estimated orders of convergence for any function~$c_{{\mathcal{D}}elta}$ by
\begin{equation*}
\mathrm{Err}(c) \,\coloneqq\; \|c_{{\mathcal{D}}elta_{j-1}} - c\|_{L^2(\Omega)}\,, {\scriptstyle{\mathcal{Q}}}quad{\scriptstyle{\mathcal{Q}}}quad
\mathrm{EOC}(c) \,\coloneqq\; \ln \left(\frac{\|c_{{\mathcal{D}}elta_{j-1}} - c\|_{L^2(\Omega)}}{\|c_{{\mathcal{D}}elta_{j}} - c\|_{L^2(\Omega)}} \right)\Bigg/ \ln \left(\frac{{\mathcal{D}}elta x_{j-1}}{{\mathcal{D}}elta x_j}\right)
\end{equation*}
and list those in~Table~\ref{tab:conv:coupled}. Following our analysis, we use approximations of polynomial order $2p$ for $h$ and $w$, whereas all other unknowns are approximated with order $p$.
\begin{table}[!ht]
\small
\setlength{\tabcolsep}{4pt}
\renewcommand{1.1}{1.1}
\begin{tabular}{cccccccccccccc}
\toprule
$p$ & $j$ & Err($\xi$) & $\EOC{\xi}$ & Err($u$) & $\EOC{u}$ & Err($w$) & $\EOC{w}$ & Err($\tilde h$) & $\EOC{\tilde h}$ & Err($\tilde u$) & $\EOC{\tilde u}$ & Err($\tilde w$) & $\EOC{\tilde w}$\\
\bottomrule
& 0 & 2.47e-01 & --- & 9.63e-01 & --- & 2.40e-01 & --- & 4.60e+00 & --- & 3.95e-01 & --- & 1.47e+00 & --- \\
& 1 & 5.52e-02 & 2.16 & 2.16e-01 & 2.16 & 1.17e-01 & 1.03 & 1.53e+00 & 1.59 & 2.94e-01 & 0.43 & 7.65e-01 & 0.94 \\
1 & 2 & 1.43e-02 & 1.95 & 5.62e-02 & 1.94 & 5.85e-02 & 1.00 & 4.08e-01 & 1.90 & 2.12e-01 & 0.47 & 3.96e-01 & 0.95 \\
& 3 & 3.59e-03 & 1.99 & 1.62e-02 & 1.80 & 2.85e-02 & 1.04 & 9.83e-02 & 2.05 & 1.09e-01 & 0.95 & 1.88e-01 & 1.08 \\
& 4 & 9.02e-04 & 1.99 & 5.90e-03 & 1.46 & 1.41e-02 & 1.01 & 2.33e-02 & 2.08 & 5.42e-02 & 1.01 & 9.27e-02 & 1.02 \\
\midrule
& 0 & 1.38e-01 & --- & 1.25e-01 & --- & 4.35e-02 & --- & 1.60e+00 & --- & 2.82e-01 & --- & 5.15e-01 & --- \\
& 1 & 4.63e-02 & 1.57 & 3.49e-02 & 1.84 & 1.96e-02 & 1.15 & 2.24e-01 & 2.84 & 7.43e-02 & 1.93 & 1.64e-01 & 1.65 \\
2 & 2 & 9.02e-03 & 2.36 & 4.98e-03 & 2.81 & 4.44e-03 & 2.14 & 3.89e-02 & 2.52 & 2.28e-02 & 1.70 & 4.39e-02 & 1.90 \\
& 3 & 2.01e-03 & 2.17 & 7.02e-04 & 2.83 & 1.51e-03 & 1.56 & 5.41e-03 & 2.85 & 5.83e-03 & 1.97 & 8.96e-03 & 2.29 \\
& 4 & 4.69e-04 & 2.10 & 1.32e-04 & 2.41 & 6.81e-04 & 1.15 & 7.04e-04 & 2.94 & 1.47e-03 & 1.99 & 1.81e-03 & 2.31 \\
\bottomrule
\end{tabular}
\caption{$L^2(\Omega)$-errors and estimated orders of convergence (EOC) for the coupled problem. On the $j$th refinement level, we used $2^{j+1} \times 2^j$ elements and time step~${\mathcal{D}}elta \tilde{t} = \frac{1}{5} \cdot 2^{-p} \cdot 4^{-j}$ for the subsurface problem and~${\mathcal{D}}elta t = \frac{1}{50} \cdot 2^{-p} \cdot 4^{-j}$ for the free flow problem.}
\label{tab:conv:coupled}
\end{table}
\section{Conclusions}
Our stability analysis for the discrete formulation of the coupled hydrostatic/Darcy system motivated our choice of the transition condition for the hydrostatic pressure/hydraulic head. This transition condition includes a~special form of dynamic pressure -- modified to suit the specifics of the hydrostatic model used in the free surface flow system. Further investigations (involving numerical studies and possibly also experimental validations) of this interface condition might be needed to substantiate the physical validity of our choice.
\end{document} |
\begin{document}
\baselineskip 20pt
\title{Ascertaining the Uncertainty Relations via Quantum Correlations}
\author{Jun-Li Li, Kun Du, and
Cong-Feng Qiao\footnote{Corresponding author: qiaocf@ucas.ac.cn}\\[0.5cm]
\small School of Physics, University of Chinese Academy of Sciences \\
\small YuQuan Road 19A, Beijing 100049, China\\[0.2cm]
}
\date{}
\maketitle
\begin{abstract}
We propose a new scheme to express the uncertainty principle in form of
inequality of the bipartite correlation functions for a given multipartite
state, which provides an experimentally feasible and model-independent way
to verify various uncertainty and measurement disturbance relations. By
virtue of this scheme the implementation of experimental measurement on
the measurement disturbance relation to a variety of physical systems
becomes practical. The inequality in turn also imposes a constraint on the
strength of correlation, i.e. it determines the maximum value of the
correlation function for two-body system and a monogamy relation of the
bipartite correlation functions for multipartite system.
\end{abstract}
The uncertainty principle lies at the heart of quantum mechanics and is one
of the most fundamental features which distinguish it from the classical
mechanics. The original form, $p_1q_1\sim h$, stems from a heuristic
discussion of Heisenberg on Compton scattering \cite{Heisenberg-o} where
$p_1$, $q_1$ are the determinable precisions of position and momentum, $h$ is
the Planck constant. A generalization to arbitrary pairs of observables is $
\Delta {A} \Delta {B} \geq |\langle [A,B] \rangle|/2 $, where the standard
deviation is $\Delta {X} = (\langle X^2\rangle - \langle X \rangle^2)^{1/2}$,
$X =A\ \text{or}\ B$, $\langle \cdots \rangle$ stands for expectation value,
and the commutator is defined as $[A,B] \equiv AB - BA$. This is the usually
called Heisenberg-Robertson uncertainty relation \cite{Robertson}. A more
stronger version is the Robertson-Schr\"odinger uncertainty relation
\cite{Schrodinger} which takes the form of $(\Delta {A})^2 (\Delta {B})^2
\geq (\langle \{A, B\} \rangle/2 - \langle A \rangle \langle B \rangle )^2 +
|\langle [A, B] \rangle|^2/4 $ where the anticommutator is defined as
$\{A,B\} \equiv AB + BA$.
Note that in the form involving standard deviations, the uncertainty
relation represents the property of the ensemble of arbitrary quantum state
in Hilbert space and does not concern with the specific measurements. Thus
such uncertainty relation is not related to the precision of measurement on
one observable and the disturbance to its conjugate.
If we assume $\epsilon(A)$ to be the precision of the measurement on $A$ and
$\eta(B)$ to be the disturbance of the same measurement on $B$, the
Heisenberg-type relation with regard to measurement and disturbance would
read
\begin{eqnarray}
\epsilon(A) \eta(B) \geq \frac{1}{2}| \langle [A, B] \rangle | \; . \label{H-MDR}
\end{eqnarray}
In recently, Ozawa found that the this form of measurement disturbance
relation (MDR) (\ref{H-MDR}) is not a universal one, and a new MDR was
proposed \cite{Ozawa-operator}, which are thought to be generally valid,
i.e.
\begin{eqnarray}
\epsilon(A)\eta(B) + \epsilon(A)\Delta B + \Delta A \, \eta(B)
\geq \frac{1}{2} |\langle [A, B] \rangle|
\; . \label{O-MDR}
\end{eqnarray}
Eq.(\ref{O-MDR}) is of fundamental importance, for example, it leads to a
totally different accuracy limit $\epsilon(A)$ for non-disturbing
measurements ($\eta(B)=0$) comparing to the Heisenberg-type MDR. In quantum
information science, the uncertainty principle in general is also crucial to
the security of certain protocols in quantum cryptography \cite{QKD-UP}, and
additionally, it plays an important role in the quantum metrology
\cite{quantum-metrology}.
Despite the importance of the uncertainty principle, only the uncertainty
relation in form of standard deviations has been well verified in various
situations, e.g., see \cite{Standard-uncertainty-exp} and the references
therein. Experiments concerning both Heisenberg-type and Ozawa's MDRs have
just been performed with neutrons \cite{MDR-Neutron} and photons
\cite{MDR-Photon}. For neutrons in a given polarization state, the error and
disturbance can be statistically determined based on a method proposed by
Ozawa \cite{Method-Ozawa}. In the photon experiment, the weak measurement
model introduced in \cite{MDR-Weak-values} was employed for the measurement.
Large samples of data is necessary due to the sensitivity to the measurement
strength of a weak measurement process which is used for gathering
information of the system prior to the actual measurement
\cite{Polarization-Weak-values}. The results of \cite{MDR-Neutron} and
\cite{MDR-Photon} exhibit the validation of Ozawa's MDR but rather the
Heisenberg-type. Since the uncertainty principle limits our ultimate ability
to reduce noise when gaining information from the state of a physical
system, its experimental verification in various systems and different
measurement interactions is still an important subject.
Here in this work, we present such a general scheme from which both the
uncertainty relation and MDR turn to the forms involving only bipartite
correlation functions. In this formalism, whilst the uncertainty relation
becomes an inequality imposed on the correlation functions of bipartite
states, the different forms of MDRs transform into strong constraints on the
shareability (monogamy) of the bipartite correlations in multipartite state.
This directly relates the key element of quantum information, i.e., the
nonlocal correlation, with the fundamental principle of quantum mechanics,
i.e., uncertainty principle, in a quantitative way. And most importantly, it
enables us to test the MDRs in a variety of physical systems.
To test the validity of the various MDRs, one has to measure the physical
observable quantities for which the different MDRs exhibit distinct
responses. Here we present our method of constructing such quantities for
qubit systems. Although the generalization to arbitrary systems is not
trivial, the various MDRs have already shown the essential differences in
two-dimensional Hilbert spaces within our scheme. The qubit systems include
spin 1/2 particle, polaizations of photons, two level atoms, etc. For the
sake of convenience we take the measurable observables to be the spin
components. A measurement of spin along arbitrary vector $\vec{a}$ in three
dimensional Euclidean space can be represented by the following operator
\begin{eqnarray}
A = \vec{\sigma} \cdot \vec{a} = |\vec{a}| \vec{\sigma} \cdot \vec{n}_a
\; . \label{operator-def}
\end{eqnarray}
Here $\vec{\sigma} = (\sigma_x, \sigma_y, \sigma_z)$ are Pauli matrices,
$\vec{n}_a = \vec{a}/|\vec{a}|$, and a general commutative relation holds for
such operators
\begin{eqnarray}
[A, B] = 2iC \; , \label{basic-commutator}
\end{eqnarray}
where $B = \vec{\sigma}\cdot \vec{b}$, $C = \vec{\sigma} \cdot \vec{c}$,
$\vec{c} = \vec{a}\times \vec{b}$. Let $|n^{\pm}_p\rangle$ be the two
eigenvectors of operator $P = \vec{\sigma} \cdot \vec{n}_p$ with eigenvalues
$\pm1$, the following complete relations hold
\begin{eqnarray}
|n^+_p\rangle \langle n^+_p| + |n^-_p\rangle \langle n^-_p|= 1 \; , \;
|n^+_p \rangle \langle n^+_p| - |n^-_p \rangle \langle n^-_p| =
\vec{\sigma} \cdot \vec{n}_p = P \; . \label{complete-relation}
\end{eqnarray}
Here $\vec{n}_p$ is a unit vector, $|n_p^{\pm}\rangle \langle n_{p}^{\pm}|
\equiv P^{\pm}$ are the projection operators. Using the Schmidt
decomposition, any bipartite pure state is unitarily equivalent to the state
\cite{QIP-Book}: $|\psi_{12} \rangle = \alpha |+\rangle|+\rangle + \beta|-
\rangle|-\rangle$ where $|\alpha|^2 + |\beta|^2 =1$, and $\alpha\geq 0$,
$\beta\geq 0$. The correlation function between two operators $A$ and $B$
for arbitrary quantum state $|\psi\rangle$ is defined as $E(A_1,B_2) =
\langle \psi|A_{1}\otimes B_2|\psi\rangle$. Here the subscripts of $A$, $B$
stand for the corresponding partite which they are acting.
For the Robertson-Schr\"odinger uncertainty relation we have the following
theorem:
\begin{theorem}
The Robertson-Schr\"odinger uncertainty relation imply the following
inequality on the correlation functions of arbitrary bipartite quantum state
\begin{eqnarray}
\left| E(A_1, P_2)\vec{b} - E(B_1, P_2)\vec{a} \right|^2 +
\left|E(C_1, P_2) \right|^2 \leq S^2 \; , \nonumber
\end{eqnarray}
where $X_i = \vec{\sigma}_i \cdot\vec{x}$, $X = A,\ B,\ \text{or}\ C$,
$\vec{c} = \vec{a} \times \vec{b}$, $P_i=\vec{\sigma}_i \cdot \vec{n}_p$,
$\vec{n}_p$ is unit vector, $i=1,2$ denote the corresponding partite, $S$ is
the parallelogram area formed by $\vec{a}$, $\vec{b}$. \label{theorem-R-S}
\end{theorem}
This theorem indicates that the correlation functions between one specific
operator ($P$) and two other operators ($A$, $B$) and their commutator ($C$)
in bipartite states are constrained by the area of parallelogram formed with
$\vec{a}$ and $\vec{b}$. The maximal attainable value of the bipartite
correlation function is $E(A_1, A_2) = |\vec{a}|^2$ which is the area of a
square with length $|\vec{a}|$. A proof of this theorem is given in Appendix
A.
As for the MDR, it is a subtle problem in quantum theory. In order to detect
the influence (disturbance) on quantity $B$ introduced in measuring $A$, one
needs to measure $B$ before and after the measurement on $A$. If the initial
state is not $B$'s eigenstate, the acquisition of information on $B$ prior
to the measurement $A$ will inevitably change the the initial state and
makes the subsequent measurement process irrelevant to the initial state. To
illustrate this, a simple measurement scheme is presented in
Fig.\ref{Fig-Measure-PDM} where the measurement is performed via the
interaction of the signal system $|\psi_{1}^{\pm}\rangle$ with a meter
system $|\psi_3\rangle$ \cite{MDR-Weak-values}.
\begin{figure}
\caption{Illustration of the detection of measurement precision and disturbance.
{\bf P}
\label{Fig-Measure-PDM}
\end{figure}
The Ozawa's precision and disturbance quantities in Eq.(\ref{O-MDR}) are
defined as \cite{Ozawa-operator}
\begin{eqnarray}
\epsilon(A)^2 & \equiv & \langle [U^{\dag}_{13}(I_1 \otimes M_3) U_{13} -
A_1\otimes I_3 ]^2 \rangle \; , \label{def-precision} \\
\eta(B)^2 & \equiv & \langle [U^{\dag}_{13}(B_1 \otimes I_3) U_{13} -
B_1\otimes I_3]^2 \rangle \; . \label{def-disturbance}
\end{eqnarray}
Here the expectation values in Eqs.(\ref{def-precision},
\ref{def-disturbance}) are evaluated with the same compound state
$|\psi_{1}\rangle |\psi_{3}\rangle$, where $|\psi_1\rangle$ can be
arbitrary, i.e., $|\psi_1^{\pm}\rangle$; $|\psi_3\rangle$ is the quantum
state of the measurement apparatus; $U_{13}$ is a unitary measurement
interaction. If the measurement process is carried out via spin dependent
interaction with a qubit state (partite 3) and regarding the measurement
read out of the spin of partite 3 to be the measurement result of the signal
state $|\psi_1\rangle$, we can have $M_3 \to A_3$. It is obvious that in
determining $\eta(B)$ (Eq.(\ref{def-disturbance})), we have to measure $B_1$
before and after the measurement interaction $U_{13}$.
Our procedure to settle the measurement problem under Ozawa's defintions
goes as follows. Suppose we want to measure the MDR with respect to any
given pair of spin components of $A_1 = \vec{\sigma}_1\cdot \vec{a}$ and
$B_1 = \vec{\sigma}_1 \cdot \vec{b}$ for arbitrary state $|\psi_1\rangle$.
This state can be prepared via the following entangled state
\begin{eqnarray}
|\psi_{12}^{(m)}\rangle = \frac{1}{\sqrt{2}}
\left( |+\rangle_c|-\rangle_{c} + (-1)^m |-\rangle_c|+\rangle_c \right) \; .
\end{eqnarray}
Here, $m\in \{0,1\}$; $\vec{c} = \vec{a} \times \vec{b}$ and
$|\pm\rangle_{c}$ are the spin eigenstates along $\vec{c}$ ($|\pm\rangle$
stand for the eigenstates along $z$ if not specified). Without loss of
generality, we can set the $\vec{a}$-$\vec{b}$ plane as $x$-$z$ plane then
$\vec{c}$ is along the $y$ axis
\begin{eqnarray}
|\psi_{12}^{(1)}\rangle & = & \frac{1}{\sqrt{2}}
\left( |+-\rangle - |-+\rangle \right) \; , \\
|\psi_{12}^{(0)}\rangle &=& \frac{1}{\sqrt{2}}
\left( |++\rangle + |--\rangle \right) \; .
\end{eqnarray}
$|\psi_{12}^{(m)}\rangle$ have the following property
\begin{eqnarray}
V_1\otimes V^{-1}_2|\psi_{12}^{(m)}\rangle =
(-1)^{m}|\psi_{12}^{(m)}\rangle \; , \; m \in\{0,1\} \; ,
\label{rotation-invariant-12}
\end{eqnarray}
where $V_i =\vec{\sigma}_i \cdot\vec{v}$ is an operator acting on the $i$th
partite and $\vec{v}$ is a unit vector in the $\vec{a}$-$\vec{b}$ (i.e.,
$x$-$z$) plane. With the definition of projection operators in
Eq.(\ref{complete-relation}), an arbitrary quantum state ($|\psi_1\rangle$)
of partite 1 can be obtained via a projective measurement {\bf P} on partite
2 (see Fig.\ref{Fig-Measure-PDM})
\begin{eqnarray}
|\psi_{1}^{\pm}\rangle & = &
\frac{_2\langle n_p^{\pm} |\psi_{12}^{(m)} \rangle}{|_2\langle n_p^{\pm}
|\psi_{12}^{(m)}\rangle|} \; .
\label{project-to-psi1}
\end{eqnarray}
Here in the present situation $|_2\langle n_p^{\pm}|\psi_{12}^{(m)}\rangle|
= 1/\sqrt{2}$ and the arbitrariness of $|\psi_{1}^{\pm}\rangle$ is
guaranteed by the arbitrariness of $\vec{n}_{p}$.
The measurement precision of quantity $A$ for quantum state
$|\psi_{1}^{\pm}\rangle$ and the corresponding disturbance on another
quantity $B$ now can be written as
\begin{eqnarray}
\epsilon^{\pm}(A)^2 = \langle \psi_3|\langle \psi_1^{\pm}
|\left[ U_{13}^{\dag}(I_1\otimes A_3)U_{13} -
A_1 \otimes I_3 \right]^2 |\psi_1^{\pm}\rangle |\psi_3\rangle \; , \\
\eta^{\pm}(B)^2 = \langle \psi_3|\langle \psi_1^{\pm}|\left[ U_{13}^{\dag} ( B_1
\otimes I_3 )U_{13} -
B_1 \otimes I_3 \right]^2 |\psi_1^{\pm}\rangle |\psi_3\rangle \; .
\end{eqnarray}
With these definitions, we can derive the following relation (see the
Appendix B)
\begin{eqnarray}
& & |\vec{a}|^2 + |\vec{b}|^2 - (-1)^m [E(A_2,A_3) + E(B_1, B_2)] \nonumber \\
& = & \frac{1}{4} \left[ \epsilon^+(A)^2 + \eta^+(B)^2 +
\epsilon^-(A)^2 + \eta^-(B)^2 \right] \; , \label{MDR-Correlation}
\end{eqnarray}
where the correlation function $E(X_i, X_j) = \langle \psi_{123}|X_i \otimes
X_j|\psi_{123}\rangle$, $X = A\ \text{or}\ B$, $|\psi_{123}\rangle \equiv
U_{13}|\psi_{12}^{(m)}\rangle |\psi_{3}\rangle$, $i,j \in \{1,2,3\}$, the
subscripts of operators stand for the corresponding partite which they are
acting. The precision and disturbance of the measurement now are directly
related to the bipartite correlation functions of a tripartite state.
Eq.(\ref{MDR-Correlation}) is universally valid regardless of the
measurement interaction $U_{13}$ which brings about the tripartite state.
For arbitrary given state $|\psi_1^{\pm}\rangle$, the Heisenberg-type and
Ozawa's MDRs read
\begin{eqnarray}
\epsilon^{\pm}(A)\eta^{\pm}(B) \geq \frac{1}{2}
|\langle \psi_1^{\pm}| [A,B] |\psi_{1}^{\pm}\rangle| \; , \label{Heisenberg-MDR-PD} \\
\epsilon^{\pm}(A)\eta^{\pm}(B) + \epsilon^{\pm}(A)\Delta^{\pm}(B) + \eta^{\pm}(B)
\Delta^{\pm}(A) \geq \frac{1}{2}|\langle \psi_{1}^{\pm} |[A, B]|\psi_{1}^{\pm} \rangle|
\; . \label{Ozawa-MDR-PD}
\end{eqnarray}
An intuitive view of the above equations tells that the allowed regions for
$\epsilon$ and $\eta$ lie above the hyperbolic curves of $\epsilon^{\pm}(A)$
and $\eta^{\pm}(B)$ in the quadrant I. The constraints
Eqs.(\ref{Heisenberg-MDR-PD},\ref{Ozawa-MDR-PD}) are then transferred to the
bipartite correlation functions via Eq.(\ref{MDR-Correlation}). Thus we have
the following theorem
\begin{theorem}
For $A=\vec{\sigma} \cdot \vec{a}$, $B = \vec{\sigma} \cdot \vec{b}$, a
tripartite state can be obtained by interacting one partite of
$|\psi_{12}^{(m)}\rangle$ with a third partite 3. The Heisenberg-type and
Ozawa's MDRs imply the following different relations on the resulted
tripartite state
\begin{eqnarray}
E(A_2, A_3) + E(B_1, B_2)
\leq |\vec{a}|^2 + |\vec{b}|^2 - \kappa_{h,o}|\vec{n}_p \cdot (\vec{a}\times\vec{b})| \; .
\label{upper-Heisenberg-Ozawa}
\end{eqnarray}
Here $E(X_i, X_j)$ are the bipartite correlation functions of the tripartite
state, $\kappa_{h} = 1$ and $ \kappa_o= (\sqrt{2}-1)^2$ for Heisenberg-type
and Ozawa's MDR respectively, $\vec{n}_p$ is an arbitrary unit vector.
\label{Theorem-Heisenberg-Ozawa}
\end{theorem}
The proof of Theorem \ref{Theorem-Heisenberg-Ozawa} is presented in
Appendix C. From Theorem \ref{theorem-R-S} we know that $|\vec{a}|^2$
and $|\vec{b}|^2$ are the maximum values of $E(A_2, A_3)$ and $E(B_1,
B_2)$ in bipartite states. Now due to Theorem
\ref{Theorem-Heisenberg-Ozawa} the maximum of the sum of the two
bipartite correlations in the tripartite state is reduced by an
amount proportional to the volume of the parallelepiped with edges
$\vec{a}$, $\vec{b}$, and $\vec{n}_p$.
The experiments to test the validity of the MDRs become straightforward due
to Theorem \ref{Theorem-Heisenberg-Ozawa}. Here we present an example of the
measurement model of qubit system with the measurement interaction $U_{13}$
being the CNOT gate \cite{MDR-Weak-values} within our method. Suppose we
want to measure the precision of $Z = \sigma_z$ and the disturbance on $X =
\sigma_x$ for an arbitrary qubit state $|\psi_{1}\rangle$. Following Theorem
\ref{Theorem-Heisenberg-Ozawa}, on choosing $|\psi_{12}^{(1)}\rangle =
\frac{1}{\sqrt{2}}(|++\rangle + |--\rangle)$, the measurement interaction
CNOT gate between one partite of $|\psi_{12}^{(1)}\rangle$ and the meeter
system $|\psi_3\rangle = \cos\theta_3|+\rangle + \sin\theta_3|-\rangle$ will
lead to the following tripartite state
\begin{eqnarray}
|\psi_{123}\rangle & = &
\frac{1}{\sqrt{2}}[|++\rangle(\cos\theta_3|+\rangle + \sin\theta_3|-\rangle) + \nonumber \\
& & \hspace{0.8cm} |--\rangle (\cos\theta_3|-\rangle + \sin\theta_3|+\rangle)] \; .
\end{eqnarray}
According to Theorem \ref{Theorem-Heisenberg-Ozawa}, the Heisenberg-type and
Ozawa's MDRs impose the following constraints on the bipartite correlation
functions of $|\psi_{123}\rangle$
\begin{eqnarray}
\text{Heisenberg-type MDR: }\; E(Z_2,Z_3) + E(X_1,X_2)
& \leq & 2-|\cos\theta_p| \; , \\
\text{Ozawa's MDR: }\; E(Z_2, Z_3) + E( X_1, X_2)
& \leq & 2-(\sqrt{2}-1)^2|\cos\theta_p| \; ,
\end{eqnarray}
for arbitrary $\theta_p$, the angle between $\vec{n}_p$ and $\vec{c}$. The
tightest bound happens when $\theta_p=0$. Thus a measurement of bipartite
correlation function of $E(Z_2, Z_3)$, $E(X_1, X_2)$ in the tripartite state
would be capable to verify the Heisenberg-type and Ozawa's MDR (see
Fig.\ref{Fig-violation}). That is the Heisenberg-type MDR will be violated
provided that the experimental result agrees with the solid line of $E(Z_2,
Z_3) + E(X_1, X_2)$ in Fig.\ref{Fig-violation}.
\begin{figure}
\caption{The demonstration
of Heisenberg-type and Ozawa's MDR with measurement precision of $A = Z$
and its disturbance on $B = X$. Here $K_{H,O}
\label{Fig-violation}
\end{figure}
From the above example, the procedure of our scheme can be summarized as:
(1) prepare a bipartite entangled state, (2) interact one partite of the
entangled state with a third partite, and (3) measure the bipartite
correlation functions of the resulted tripartite state. The generation of
the bipartite entangled state has already been realized in various systems,
e.g. photons \cite{photon-1,photon-2}, atoms
\cite{atom-entanglement,atom-RMP}, and high energy particles
\cite{high-energy,high-energy-2}. The further interaction of one partite of
the entangled state with a third partite can also be arbitrary, i.e.,
elastic or inelastic collisions, or via optical cavities, etc. More
importantly, we need only to measure the bipartite correlation functions of
the obtained tripartite state rather than the measurement precision and
disturbance, which may not be easy to quantify for some types of measurement
interactions. Hence, our scheme could be applied to a large number of
systems in the verification of the MDRs.
In addition to a clear discrimination between the Heisenberg-type and
Ozawa's MDRs, a more important physical consequence of the Theorem
\ref{Theorem-Heisenberg-Ozawa} is that it reveals a monogamy relation on
Bell correlations \cite{Bell-inequality, CHSH-monogamy, Bell-monogamy} in
the tripartite entangled state. According to the Theorem
\ref{Theorem-Heisenberg-Ozawa}, when measuring the precision of $B$ and the
disturbance it imposes on $A$, we will have
\begin{eqnarray}
|E(B_2, B_3) + E( A_1, A_2)|
\leq |\vec{a}|^2 + |\vec{b}|^2 - \kappa_{h,o}|\vec{n}_p \cdot (\vec{a}\times\vec{b})|
\; , \label{upper-Heisenberg2}
\end{eqnarray}
Introducing two new vectors $\vec{a}\,' = \frac{1}{2}(\vec{a} + \vec{b})$ ,
$\vec{b}' = \frac{1}{2}(\vec{b} - \vec{a})$, we can similarly define $A' =
\vec{\sigma} \cdot \vec{a}'$, $B' = \vec{\sigma} \cdot \vec{b}'$. Following
the definition of correlation function in Eq.(\ref{MDR-Correlation}), we can
get
\begin{eqnarray}
E(A_i, A_j) & = & E(A_i, A'_j) - E(A_i,B'_j) \; ,\label{Bell-tran1} \\
E(B_i, B_j) & = & E(B_i, A'_j) + E(B_i, B'_j) \; . \label{Bell-tran2}
\end{eqnarray}
Adding Eq.(\ref{upper-Heisenberg-Ozawa}) and Eq.(\ref{upper-Heisenberg2}),
and taking Eqs.(\ref{Bell-tran1},\ref{Bell-tran2}), we have
\begin{eqnarray}
& & \left| E(A_2,A'_3) - E(A_2,B'_3) + E(B_2, A'_{3})+ E(B_2, B'_{3}) + \right. \nonumber \\
& & \left. \hspace{0.1cm} E(A_1, A'_2) - E(A_1, B'_2) +
E(B_1, A'_{2}) + E(B_1, B'_{2}) \hspace{0.2cm}\right|
\leq 2K_{H,O} \; . \label{Bell-monogamy}
\end{eqnarray}
where $K_{H,O}=|\vec{a}|^2 + |\vec{b}|^2 - \kappa_{h,o}|\vec{n}_p \cdot
(\vec{a}\times\vec{b})|$. When $|\vec{a}|=|\vec{b}|=1$, $\vec{a}
\perp\vec{b}$, Eq.(\ref{Bell-monogamy}) leads to the sum of two particular
CHSH type correlations \cite{CHSH}
\begin{eqnarray}
\left|B_{\text{CHSH}}^{(23)} + B_{\text{CHSH}}^{(12)}\right| \leq 2\sqrt{2} K_{H,O} \; .
\end{eqnarray}
Here $B_{\text{CHSH}}^{(ij)} = E(A_i,A'_j) - E(A_i,B'_j) + E(B_i,A'_j) +
E(B_i,B'_j)$. The tightest bound also happens when $\theta_p=0$, which lead
the following
\begin{eqnarray}
\text{Heisenberg-type MDR: }\;
\left|B_{\text{CHSH}}^{(23)} + B_{\text{CHSH}}^{(12)}\right| & \leq & 2\sqrt{2} \; , \\
\text{Ozawa's MDR: }\;
\left|B_{\text{CHSH}}^{(23)} + B_{\text{CHSH}}^{(12)}\right| & \leq & 2\sqrt{2}(2\sqrt{2}-1)
\; .
\end{eqnarray}
The above monogamy relations on quantum nonlocality are direct results of
the MDRs according to our theorem. Note, there are also discussions in the
literature on Bell correlations based on the entropic measures of
uncertainty relation \cite{UP-determine-nonlocal, Complementary-monogamy}.
It should be noted that the definitions of measurement precision and
disturbance in Eqs.(\ref{def-precision},\ref{def-disturbance}) by Ozawa
involve the comparisons of the same physical observable before and after the
measurement, thus base on practical physical motivations. However, the exact
definitions that capture the full physical contents of the measurement error
and disturbance are still under study \cite{Busch-1, Dressel,Busch-2}.
Nevertheless, Ozawa's definitions and the resulted MDRs may be regarded as
one of the best attempts to capture the quantitative descriptions of the
measurement and its back action in quantum mechanics. The method we
presented just provides a powerful tool to study the physical consequences
of the MDRs which is meaningful in judging their usefulness. For example,
our method transforms the MDRs into inequalities of correlation functions of
tripartite entangled state. In this way the importance of the MDRs manifests
in their connections with the quantum entanglement which is a key physical
resource in quantum information science and has a close relation with
quantum metrology \cite{quantum-metrology}. Meanwhile, in principle the idea
of our scheme may also be applied to other definitions of the error and
disturbance. This would enable the method to examine the meaningfulness of
the variant definitions.
In conclusion, we proposed in this work a general scheme to express the
uncertainty principle in terms of bipartite correlation functions, by which
the essential differences between the MDRs are characterized by the
inequalities constraining the correlation functions of multipartite state.
This not only builds a bridge between the MDRs and the quantum entanglement
but also provides a way to study the direct physical consequences of such
fundamental relations. The resulted inequalities reveal that both the
strength and the shareability (monogamy) of the quantum correlation are
determined by the uncertainty principle. Further studies on the uncertainty
relation and MDRs with, e.g., atoms, ions, or even high energy particles
become possible due to our scheme. The connections between MDRs and
entanglement revealed in our scheme may also shed new light on the the
studies of the relations between the MDRs and the quantum cryptography,
quantum metrology, etc.
Note: after the completion of the manuscript, there has been some progress
in the study of MDRs, i.e., \cite{Weston-Pryde},\cite{Branciard},etc. Our
method may apply to such cases as well and these MDRs would also give
distinct constraints on quantum correlations \cite{Inprogress}.)
\noindent {\bf Acknowledgments}
This work was supported in part by the National Natural Science Foundation
of China(NSFC) under the grants 10935012, 11121092, 11175249 and 11205239.
\appendix{\noindent {\bf\Large Appendix}}
\section{Proof of theorem \ref{theorem-R-S}} \label{appendix-1}
Proof of the equation of theorem \ref{theorem-R-S}:
\begin{eqnarray}
\left| E(A_1, P_2)\vec{b} - E(B_1, P_2)\vec{a} \right|^2 +
\left|E(C_1, P_2) \right|^2 \leq S^2 \; . \nonumber
\end{eqnarray}
\noindent {\bf Proof}: Following the definition of the standard
deviation, the Robertson-Schr\"odinger uncertainty relation takes the
following form
\begin{eqnarray}
(\langle A^2\rangle - \langle A\rangle^2) (\langle
B^2\rangle - \langle B\rangle^2) \geq \left( \frac{1}{2}\langle
AB + BA \rangle -
\langle A\rangle \langle B \rangle \right)^2 +
\frac{1}{4} \left| \langle [A, B] \rangle \right|^2 \; .
\label{R-S-Relation-1}
\end{eqnarray}
With the definition of operators as in Eq.(\ref{operator-def}) and the
basic commutator Eq.(\ref{basic-commutator}), Eq.(\ref{R-S-Relation-1}) can
be written as
\begin{eqnarray}
|\vec{a}|^2|\vec{b}|^2 - \langle A \rangle^2 |\vec{b}|^2 -
\langle B \rangle^2 |\vec{a}|^2 & \geq &
(\vec{a} \cdot \vec{b})^2 - 2( \vec{a} \cdot \vec{b})
\langle A \rangle \langle B\rangle + \langle C \rangle^2\;. \nonumber
\end{eqnarray}
After rearranging the terms, we have
\begin{eqnarray}
|\langle A\rangle \vec{b} -
\langle B \rangle \vec{a}|^2 + \langle C\rangle^2 \leq
|\vec{a}|^2| \vec{b}|^2 - (\vec{a} \cdot \vec{b})^2 = S^2
\;. \nonumber
\end{eqnarray}
The right hand side of the inequality is just the determinant of Gram
matrix of the vector $\vec{a}$, $\vec{b}$, which is the square of
area of parallelogram formed by $\vec{a}$, $\vec{b}$. The expectation
value is evaluated for certain quantum state which can be prepared by
projecting one partite of the bipartite entangled state onto specific
quantum state. For example, for the entangled state $|\psi_{12}
\rangle = \alpha |+\rangle_1|+\rangle_2 +
\beta|-\rangle_1|-\rangle_2$, by projecting the partite 2 onto a
specific state $|n_p^{+}\rangle_2 = \cos\frac{\theta}{2}|+\rangle +
e^{i\phi} \sin\frac{\theta}{2}|-\rangle$ (Eigenstate of
$\vec{\sigma}_2 \cdot \vec{n}_{p}$ where $\vec{n}_p =
(\sin\theta\cos\phi, \sin\theta\sin\phi, \cos\theta)$), we can get
arbitrary quantum state $|\psi_1^+\rangle$
\begin{eqnarray}
|\psi_1^+\rangle = \frac{ _2\langle n_p^+|\psi_{12}\rangle}{|\, _2\langle n_p^+ |
\psi_{12}\rangle|} = \frac{1}{|\, _2\langle n_p^+|\psi_{12}\rangle|}
\left( \alpha \cos\frac{\theta}{2}| + \rangle + e^{-i\phi} \beta
\sin \frac{ \theta}{2}|-\rangle \right)\; . \label{projector-bipartite}
\end{eqnarray}
Similar expression holds for $|\psi_1^-\rangle$ when projecting with
$|n_p^-\rangle_2$. The uncertainty relation holds for arbitrary state, so
for $|\psi_1^{\pm}\rangle$
\begin{eqnarray}
& & |\langle A\rangle \vec{b} -
\langle B \rangle \vec{a}|^2 + \langle C\rangle^2 \leq S^2 \nonumber \\
& \Rightarrow & |\langle \psi_1^{\pm}| A_1 |\psi_1^{\pm} \rangle \vec{b} -
\langle \psi_1^{\pm}| B_1|\psi_1^{\pm} \rangle \vec{a} |^2 +
\langle \psi_1^{\pm}| C_1| \psi_1^{\pm} \rangle^2 \leq S^2 \; . \label{Sch-pm}
\end{eqnarray}
Here the subscript $1$ standards for partite 1. Multiplying $|\,
_2\langle n_p^{\pm}|\psi_{12}\rangle|^2$ to Eq.(\ref{Sch-pm}) with
the corresponding superscript $\pm$ and adding the two inequalities
we have
\begin{eqnarray}
|_2\langle n_p^{+}|\psi_{12}\rangle|^2
|\langle \psi_1^+|A_1 |\psi_1^+\rangle \vec{b} -
\langle \psi_1^+| B_1 |\psi_1^+\rangle \vec{a} |^2 +
| _2\langle n_p^{+}| \psi_{12}\rangle|^2 \langle \psi_1^+| C_1 | \psi_1^+ \rangle^2 + & & \nonumber \\
|_2\langle n_p^{-}|\psi_{12}\rangle|^2 |\langle \psi_1^-| A_1 | \psi_1^-\rangle \vec{b} -
\langle \psi_1^-| B_1 |\psi_1^-\rangle \vec{a}|^2 +
| _2\langle n_p^{-}|\psi_{12}\rangle|^2 \langle \psi_1^-| C_1 | \psi_1^- \rangle^2
& \leq & S^2 \; .
\end{eqnarray}
With Cauchy's inequality $\sum_{i}p_i\sum_{i}p_i a_i^2 \geq
(\sum_{i}p_ia_i)^2$, Eq.(\ref{projector-bipartite}), and the
following relation
\begin{eqnarray}
& & |_2\langle n_p^+|\psi_{12}\rangle|^2 |\langle \psi_1^+| A_1 |\psi_1^+\rangle| +
|_2\langle n_p^-|\psi_{12}\rangle|^2 |\langle \psi_1^-| A_1 |\psi_1^-\rangle| \nonumber \\
& = & |\langle \psi_{12}|A_1 \otimes |n_p^+\rangle_2\langle n_p^+||\psi_{12} \rangle| +
|\langle \psi_{12}|A_1 \otimes |n_p^-\rangle_2 \langle n_p^-||\psi_{12}\rangle| \nonumber \\
& \geq & \left|\langle \psi_{12}| A_1 \otimes |n_p^+\rangle_2 \langle n_p^+||\psi_{12}\rangle -
\langle \psi_{12}| A_1 \otimes |n_p^-\rangle_2 \langle n_p^-||\psi_{12}\rangle \right| \nonumber \\
& = & \left|\langle \psi_{12}| A_1 \otimes (|n_p^+\rangle_2 \langle n_p^+| - |n_p^-\rangle_2\langle n_p^-|)
|\psi_{12}\rangle \right| \nonumber \\
& = & \left|\langle \psi_{12}| A_1 \otimes P_2|\psi_{12}\rangle \right|
= \left|E(A_1, P_2)\right| \; ,
\end{eqnarray}
we can get
\begin{eqnarray}
\left| E(A_1, P_2)\vec{b} - E(B_1, P_2)\vec{a} \right|^2 +
\left| E(C_1, P_2) \right|^2 \leq S^2 \; .
\end{eqnarray}
Q.E.D.
\section{Proof of Eq.(\ref{MDR-Correlation})} \label{appendix-2}
Proof of Eq.(\ref{MDR-Correlation}):
\begin{eqnarray}
& & |\vec{a}|^2 + |\vec{b}|^2 - (-1)^m [E(A_2,A_3) + E(B_1, B_2)] \nonumber \\
& = & \frac{1}{4} \left[ \epsilon^+(A)^2 + \eta^+(B)^2 +
\epsilon^-(A)^2 + \eta^-(B)^2 \right]
\; . \nonumber
\end{eqnarray}
\noindent {\bf Proof}: For the particular state $|\psi_1^{\pm}\rangle$,
taking the definitions of Eq.(\ref{project-to-psi1}), the measurement
precisions turn to
\begin{eqnarray}
|_2\langle n_p^{\pm}|\psi_{12}^{(m)} \rangle|^2\epsilon^{\pm}(A)^2 =
\langle \psi_3|\langle \psi_{12}^{(m)}| P_{2}^{\pm}
\left[ U_{13}^{\dag} (I_1 \otimes I_2 \otimes A_3) U_{13} -
A_1 \otimes I_2 \otimes I_3 \right]^2
P_{2}^{\pm}|\psi_{12}^{(m)}\rangle|\psi_3\rangle \; . \nonumber
\end{eqnarray}
The corresponding disturbances are
\begin{eqnarray}
|_2\langle n_p^{\pm}|\psi_{12}^{(m)} \rangle|^2 \eta^{\pm}(B)^2 =
\langle \psi_3|\langle \psi_{12}^{(m)}| P_{2}^{\pm}
\left[ U_{13}^{\dag} (B_1 \otimes I_2 \otimes I_3) U_{13} - B_1
\otimes I_2 \otimes I_3 \right]^2
P_{2}^{\pm}|\psi_{12}^{(m)}\rangle|\psi_3\rangle \; . \nonumber
\end{eqnarray}
Using the complete relation of projection operators, the summation of
the precision and disturbance for $|\psi_1^+\rangle$ and
$|\psi_1^-\rangle$ gives
\begin{eqnarray}
& & |\alpha_m|^2 \epsilon^+(A)^2 +
|\beta_m|^2 \epsilon^-(A)^2 \nonumber \\
& = & \langle \psi_{3}|\langle \psi_{12}^{(m)}| \left[ U_{13}^{\dag}
(I_1\otimes I_2\otimes A_3) U_{13} -
A_1\otimes I_2 \otimes I_3 \right]^2 |\psi_{12}^{(m)}\rangle |\psi_{3}\rangle \; , \\
& & |\alpha_m|^2 \eta^+(B)^2 +
|\beta_m|^2 \eta^-(B)^2 \nonumber \\
& = & \langle \psi_{3}| \langle \psi_{12}^{(m)}| \left[ U_{13}^{\dag}
( B_1 \otimes I_2\otimes I_3) U_{13} -
B_1\otimes I_2 \otimes I_3 \right]^2 |\psi_{12}^{(m)}\rangle |\psi_{3}\rangle \; .
\end{eqnarray}
where $\alpha_m\equiv \ _2\langle n_p^+|\psi_{12}^{(m)}\rangle$, $\beta_{m}
\equiv \ _2\langle n_p^-|\psi_{12}^{(m)}\rangle$ and $|\alpha_m|^2 +
|\beta_m|^2=1$. Due to the properties of Eq.(\ref{rotation-invariant-12}),
we have
\begin{eqnarray}
& & |\alpha_m|^2 \epsilon^+(A)^2 + |\beta_m|^2 \epsilon^-(A)^2 \nonumber \\
& = & \langle \psi_{3}|\langle \psi_{12}^{(m)}| \left[ U_{13}^{\dag}
(I_1\otimes I_2\otimes A_3) U_{13} - (-1)^m
I_1 \otimes A_2 \otimes I_3 \right]^2 |\psi_{12}^{(m)}\rangle |\psi_{3}\rangle \; , \\
& & |\alpha_m|^2 \eta^+(B)^2 +
|\beta_m|^2 \eta^-(B)^2 \nonumber \\
& = & \langle \psi_{3}| \langle \psi_{12}^{(m)}| \left[ U_{13}^{\dag}
( B_1 \otimes I_2\otimes I_3) U_{13}
-(-1)^m I_1\otimes B_2 \otimes I_3 \right]^2 |\psi_{12}^{(m)} \rangle |\psi_{3}\rangle \; .
\end{eqnarray}
The measurement interaction only involves particles of $1$,$3$, thus it
commutates with operators acting on partite 2, so we have
\begin{eqnarray}
& & |\alpha_m|^2 \epsilon^+(A)^2 + |\beta_m|^2 \epsilon^-(A)^2 \nonumber \\
& = & \langle \psi_{3}|\langle \psi_{12}^{(m)}| U_{13}^{\dag}
\left(I_1\otimes I_2\otimes A_3 - (-1)^m
I_1 \otimes A_2 \otimes I_3 \right)^2
U_{13}|\psi_{12}^{(m)}\rangle |\psi_{3}\rangle \; , \label{pre-permute} \\
& & |\alpha_m|^2 \eta^+(B)^2 + |\beta_m|^2 \eta^-(B)^2 \nonumber \\
& = & \langle \psi_{3}| \langle \psi_{12}^{(m)}| U_{13}^{\dag}
\left( B_1 \otimes I_2\otimes I_3
-(-1)^m I_1\otimes B_2 \otimes I_3 \right)^2
U_{13} |\psi_{12}^{(m)} \rangle |\psi_{3}\rangle \; . \label{dis-permute}
\end{eqnarray}
Define $|\psi_{123}\rangle \equiv U_{13}|\psi_{12}^{(m)} \rangle
|\psi_3\rangle$, Eqs.(\ref{pre-permute},\ref{dis-permute}) turn to
\begin{eqnarray}
|\alpha_m|^2\epsilon^+(A)^2 + |\beta_{m}|^2\epsilon^-(A)^2
& = & \langle \psi_{123}| \left(A_3 - (-1)^{m} A_2 \right)^2
|\psi_{123}\rangle \; , \\
|\alpha_m|^2 \eta^+(B)^2 + |\beta_m|^2 \eta^-(B)^2
& = & \langle \psi_{123}| \left( B_1 - (-1)^{m} B_2\right)^2 |\psi_{123}\rangle\; .
\end{eqnarray}
From the definition of operators $A=\vec{\sigma}\cdot \vec{a}$, $B =
\vec{\sigma}\cdot \vec{b}$, and the wave function $|\psi_{12}^{(m)}\rangle$
we have chosen (this gives $|\alpha_m|^2 = |\beta_m|^2 = 1/2$), the above
equations reduce to
\begin{eqnarray}
\frac{1}{2} \left[\epsilon^+(A)^2 + \epsilon^-(A)^2\right] =
2|\vec{a}|^2 - (-1)^{m}2E(A_2,A_3) \; , \\
\frac{1}{2} \left[\eta^+(B)^2 + \eta^-(B)^2 \right] =
2|\vec{b}|^2 -(-1)^{m} 2 E(B_1,B_2) \; . \label{precision-disturbance}
\end{eqnarray}
This gives the relation Eq.(15). Q.E.D.
\section{Proof of Theorem \ref{Theorem-Heisenberg-Ozawa}} \label{appendix-3}
\noindent {\bf Proof}: Here we present the proof for $m=0$, the case of
$m=1$ can be derived similarly. For the Heisenberg-type MDR, taking $[A,B]
=2iC$ we have
\begin{eqnarray}
\epsilon^{+}(A)\eta^+(B) \geq |\langle \psi_1^+|C |\psi_1^+\rangle| \; , \;
\epsilon^{-}(A)\eta^-(B) \geq |\langle \psi_1^-|C |\psi_1^-\rangle|\; . \nonumber
\end{eqnarray}
These hyperbolic form constraints on $\epsilon(A)$ and $\eta(B)$ with given
asymptotes are totally characterized by the distances from the vertices to
the origin of the coordinates. That is, the essence of the above
inequalities is characterized by
\begin{eqnarray}
\epsilon^{+}(A)^2 + \eta^+(B)^2 \geq 2 |\langle \psi_1^+|C|\psi_1^+\rangle| \; ,
\; \epsilon^{-}(A)^2 + \eta^-(B)^2 \geq 2 |\langle \psi_1^-|C|\psi_1^-\rangle| \; . \nonumber
\end{eqnarray}
The summation over the above two equations gives
\begin{eqnarray}
\epsilon^{+}(A)^2 + \eta^+(B)^2 + \epsilon^{-}(A)^2 + \eta^-(B)^2 \geq
2 (|\langle \psi_1^+|C|\psi_1^+\rangle| + |\langle \psi_1^-|C|\psi_1^-\rangle|)
\end{eqnarray}
The left hand side of the above inequality can be represented as correlation
functions via Eq.(15). The right hand sides of the inequality can be written
as
\begin{eqnarray}
& & (|\langle \psi_1^+| C_1 |\psi_{1}^+\rangle| +
|\langle \psi_1^-| C_1 |\psi_{1}^-\rangle| ) \nonumber \\
& = & 2 \left( \left|\langle \psi_{12}^{(0)}| C_1 \otimes P_2^+|\psi_{12}^{(0)}\rangle \right| +
\left| \langle \psi_{12}^{(0)}| C_1 \otimes P_2^-|\psi_{12}^{(0)}\rangle \right|\right)
\nonumber \\ & \geq & 2 \left| \langle \psi_{12}^{(0)}|C_1 \otimes P_2^+|\psi_{12}^{(0)}\rangle -
\langle \psi_{12}^{(0)}|C_{1} \otimes P_2^-|\psi_{12}^{(0)}\rangle \right| \nonumber \\
& = & 2 \left| \langle \psi_{12}^{(0)}|C_1 \otimes P_2|\psi_{12}^{(0)}\rangle \right|
\equiv 2 |E_{12}(C_1, P_2)| \; ,
\end{eqnarray}
where we have used Eq.(\ref{project-to-psi1}) and $P_2^{\pm} =
|n_p^{\pm}\rangle_2\langle n_p^{\pm}|$. It is clear that the essence of the
Heisenberg-type MDR, combining Eq.(\ref{MDR-Correlation}) and
Eq.(\ref{Heisenberg-MDR-PD}), is characterized by following inequalities
\begin{eqnarray}
E(A_2,A_3) + E(B_1,B_2) + |E_{12}(C_1,P_2)|
\leq |\vec{a}|^2 + |\vec{b}|^2\; . \label{choose-P2}
\end{eqnarray}
Here the bipartite correlation function $E_{12}$ is written with
subscript explicitly. Eq.(\ref{choose-P2}) must be satisfied for any
given $P_2$
\begin{eqnarray}
E(A_2,A_3) + E(B_1,B_2)
\leq |\vec{a}|^2 + |\vec{b}|^2 - |\vec{n}_p \cdot \vec{c}| \; .
\end{eqnarray}
This is just the Heisenberg upper bound for the correlations and its lower
limit is 0 for $m=0$.
From the Ozawa's MDR, we have
\begin{eqnarray}
& & \epsilon^{\pm}(A) \eta^+(B)+ \epsilon^{\pm}(A)\Delta^{\pm}(B) +
\eta^{\pm}(B) \Delta^{\pm}(A)\geq
|\langle \psi_1^{\pm} |C_1|\psi_{1}^{\pm}\rangle| \nonumber \\
& \Rightarrow & \left[\epsilon^{\pm}(A) + \Delta^{\pm}(B) \right]
\left[\eta^+(B)+ \Delta^{\pm}(A)\right] \geq
|\langle \psi_1^{\pm} |C_1|\psi_{1}^{\pm}\rangle| + \Delta^{\pm}(A)\Delta^{\pm}(B)\; , \nonumber
\end{eqnarray}
where $\Delta^{\pm}(A,B)$ are the standard deviations evaluated with
$|\psi_1^{\pm}\rangle$. We see that the Ozawa's MDR is just a displaced
hyperbolic curve compared to the Heisenberg-type MDR. The characterization
distance of its vertices to the origin can be formulated as
\begin{eqnarray}
\epsilon^{\pm}(A)^2 + \eta^{\pm}(B)^2 \geq f[\Delta^{\pm}(A),\Delta^{\pm}(B),
|\langle \psi_1^{\pm} |C_1|\psi_{1}^{\pm}\rangle|] \; .
\end{eqnarray}
where $f$ is a function of $\Delta(A)$, $\Delta(B)$ and $|\langle
C\rangle|$. In order to make this inequality universally valid the left hand
side has to be greater than or equal to the maximum value of the right hand
side. Function $f$ gets the maximum value of $(2-\sqrt{2})^2|\langle
\psi_1^{\pm} |C_1|\psi_{1}^{\pm}\rangle|$ at $\Delta^{\pm}(A)^2 =
\Delta^{\pm}(B)^2 = |\langle \psi_1^{\pm} |C_1|\psi_{1}^{\pm}\rangle| $.
Similar as the case of Heisenberg-type MDR, we will get
\begin{eqnarray}
& & (|\vec{a}|^2 + |\vec{b}|^2) - \left[ E(A_2,A_3) + E(B_1, B_2) \right] \nonumber \\
& = & \frac{1}{4}( \epsilon^{+}(A)^2 + \eta^{+}(B)^2 + \epsilon^{-}(A)^2 + \eta^{-}(B)^2)
\nonumber \\
& \geq & \frac{1}{2}(\sqrt{2}-1)^2 (|\langle \psi_1^+|C_1|\psi_1^+\rangle| +
|\langle \psi_1^-|C_1|\psi_1^-\rangle|) \nonumber \\
& \geq & (\sqrt{2}-1)^2 |E_{12}(C_1, P_2)|\; .
\end{eqnarray}
Thus the essence of the Ozawa's MDR is characterized by the following
inequalities
\begin{eqnarray}
E(A_2, A_3) + E(B_1, B_2) \leq |\vec{a}|^2 + |\vec{b}|^2 -
(\sqrt{2}-1)^2|\vec{n}_p \cdot \vec{c}| \; . \label{upper-Ozawa}
\end{eqnarray}
It should be noted here that the above constraint on correlations has no
lower limit because the MDRs (both Heisenberg-type and Ozawa's) does not
specify the upper limits. In the qubit systems, the upper bound for the
measurement precision and disturbance of the observables may be obtained
from the finite spectrums of the observable operators. Q.E.D.
\end{document} |
\begin{document}
\title{Quantum Erasure: Quantum Interference Revisited\footnote{Unedited and unillustrated version of ``Quantum Erasure", \textit{American Scientist} \textbf{91} 336-343 (2003).}}
\author{Stephen P. Walborn}
\email[]{swalborn@fisica.ufmg.br}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG
30123-970, Brazil}
\author{Marcelo O. Terra Cunha}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG
30123-970, Brazil}
\author{Sebasti\~ao P\'adua}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG
30123-970, Brazil}
\author{Carlos H. Monken}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG
30123-970, Brazil}
\date{ April 20, 2003}
\begin{abstract}
Recent experiments in quantum optics have shed light on the foundations of quantum physics. Quantum erasers - modified quantum interference experiments - show that quantum entanglement is responsible for the complementarity principle.
\end{abstract}
\pacs{03.65Bz, 42.50.Ar}
\maketitle
It may be somewhat surprising that Thomas Young's double-slit experiment - a staple in the freshman physics laboratory - would be such an invaluable testing ground for the foundations of quantum physics. Yet the quantum version of the double-slit experiment has been at the center of many debates over the fundamentals of quantum physics since the theory was born, nearly a century ago. In fact, Young's experiment embodies the very nature of quantum physics. Last year, the readers of Physics World magazine voted Young's double-slit experiment with electrons ``the most beautiful experiment" in physics. The significance of Young's experiment lies in the fact that interference is a phenomenon exhibited only by waves. The puzzle that quantum physics presents is that a particle, which is usually thought of as an indivisible, localized object, can also behave like a classical wave, which interferes and diffracts. In ``the most beautiful experiment", electrons pass through the slits like waves and are detected like particles! This interference behavior is perhaps the greatest mystery in quantum theory. In fact, Nobel Prize-winning physicist Richard Feynman has called quantum interference ``the only mystery" in quantum physics. Recently, some progress has been made in the understanding of these interference effects within the foundations of quantum theory. Experiments called quantum erasers - modified versions of Young's experiment - have shed light on the foundations of quantum physics. However, before we explain the notion of quantum erasure, we take a detour to explore the concept and the history of classical and quantum interference.
\par
In the freshman physics laboratory, the double-slit experiment is quite simple. A laser beam is directed onto two closely spaced transparent slits that are etched into an opaque microfilm. The slits and their spacing are about a tenth of a millimeter wide. The laser beam is scattered by this ``double-slit" and a pattern of alternating bright and dark stripes - commonly called interference \emph{fringes} - is projected onto a distant viewing screen. Understanding the reason for this interference is not difficult: the paths from each slit to a given observation point are not necessarily equal, so light beams traveling from each slit arrive with different phases of propagation. These light beams interfere depending upon the difference in their phases: either constructively, resulting in an interference maximum (bright stripe) or destructively, resulting in an interference minimum (dark stripe). Even if you have never set foot in a physics laboratory, you have undoubtedly observed interference. Interference effects cause many common optical phenomena, such as the color patterns seen in soap bubbles or in the oily puddles in the parking lot of a gas station.
\par
Another way to visualize interference is to imagine a water wave incident on a wall with two vertical openings. When the wave front encounters the wall, a part of the wave goes through each opening, while the rest is reflected. The two sections that pass through the slots will meet up again a distance later and combine, or interfere. If a classical particle, say a tiny dust particle or even a tennis ball, is launched at the wall, it will either go through one of the openings or bounce back. To interfere, the particle would have to ``pass through both slits at the same time”! So it is very surprising and almost unbelievable that when this particle is instead an electron, for example, it interferes like a wave.
\par
A fundamental result of quantum theory is that light is made up of tiny quanta of energy – ``particles of light" - called photons. In 1909, Geoffrey Taylor demonstrated diffraction of individual photons using the tip of a needle. Diffraction occurs when a wave passes through a tiny aperture or object. The diffraction pattern is similar to an interference pattern: maxima and minima are due to the interference of different parts of the transmitted wave that meet at the detection screen.
\par
What happens when Young's experiment is repeated using individual photons instead of an intense light beam? An attenuated light source ensures that only one photon is incident on the double slit at a time. After recording data for many photons, the resulting pattern of individual points (each corresponding to the detection of one photon) on the photosensitive screen is identical to that of an intense light beam, interference fringes and all. This seems to imply that the individual photons had ``passed through both slits at the same time" and ``interfered with themselves", a seemingly astounding feat, even for something as aloof and mysterious as the photon. To date, variations of the quantum double-slit experiment have been performed using many different types of particles, including photons, electrons, neutrons and even large carbon-60 fullerenes. All results confirm the counter-intuitive result that, at the quantum level, particles ``interfere with themselves" just like classical waves.
\section{The quantum coin toss}
To further understand why the interference of quantum particles is an unexpected result, here is a simple example. Consider the usual coin toss, of the sort that takes place at the start of an NFL football game, where the coin has the same chance of giving heads or tails. The probability (call it $P$(heads)) that a coin lands heads is thus 50 \%. Likewise, the probability that the coin gives tails is $P$(tails) = 50\%. Obviously, there are only two possible outcomes, the coin must land either heads or tails, so the total probability to give heads or tails is just the sum of the individual probabilities: $P$(heads or tails) = $P$(heads) + $P$(tails) = 100\%. The quantum double-slit is a type of ``quantum coin toss", and so we can make a similar analysis. Given a certain position on the detection screen, one can try to assign a probability $P$(slit 1) or $P$(slit 2) that a photon detected at that point on the screen passed through slit 1 or slit 2. Here comes the surprising result: unlike the coin toss, the total probability to register a photon is not equal to the sum of the individual probabilities: $P$(slit 1 or slit 2) $\neq$ $P$(slit 1) + $P$(slit 2).
\par
The physical principle responsible for this strange behavior is called \emph{superposition}, which says that wavelike events combine according to their probability amplitudes, not their probabilities. Let's denote the probability amplitude with the letter $A$. The probability amplitude for a photon to pass through slit 1 is $A$(slit 1) and $A$(slit 2) is the amplitude for the photon to pass through slit 2. One difference between a probability and a probability amplitude is that the amplitudes are now complex numbers, to incorporate the concept of phase. The total probability amplitude for a photon to pass through slit 1 or slit 2 is $A$(slit 1 or slit 2) = $A$(slit 1) + $A$(slit 2). The probability for a given event is then obtained by calculating the ``absolute square" of the corresponding probability amplitude: $P = |A|^2$. Thus, the total probability to detect a photon is $P$(upper or lower) =$ | A$(slit 1) + $A$(slit 2)$|^2$. Computing this probability gives rise to quantities, not present in the NFL coin toss example above, which are responsible for the interference effects. Quantum particles - electrons, photons, etc - interfere because they behave according to the superposition principle, which describes the physical phenomenon of waves. Thus, when you flip a ``quantum coin", it can give both heads and tails at the same time.
\section{Particles or waves?}
By the time Young performed his experiment in 1801, physicists had been debating the nature of light for many years. The question was: Is light made of waves or particles? Some scientists, such as Isaac Newton (1717), believed light was made up of tiny classical particles, like particles of dust. The movement of each particle traced out a trajectory, called a ray. Others, such as Dutch physicist Christian Huygens (1690), advocated a classical wave theory, like water waves or oscillations on a stretched string. Each theory was able to explain some of the phenomena observed up until that time, such as shadows, refraction and reflection. But when Thomas Young showed that a beam of light interferes with itself, which a classical particle could never do, the particle theory was laid to rest. That is, until Albert Einstein came along.
\par
At the end of the nineteenth century, German physicist Max Planck was concerned with the following problem: explain the color spectrum of radiation emitted by a ``blackbody". A black body is basically a metal box kept at a certain temperature with a small hole allowing radiation to escape. Planck was interested in the color spectrum emitted by the box with respect to its temperature. Using classical radiation theory to describe blackbody radiation gave an inaccurate result known as the ultra-violet catastrophe. To accurately explain the radiation spectrum, Planck proposed the idea that light is made up of discrete energy units, or quanta, which we now call photons. Planck was reluctant to accept his own idea, which he thought of as a mathematical``trick” which happened to fit the experimental data. Planck tried vigorously to explain blackbody radiation using other physical concepts.
Shortly thereafter in 1905, Albert Einstein, in addition to publishing his seminal works on relativity and Brownian motion, applied Planck's revolutionary idea to explain the photoelectric effect, the work for which he was later granted the Nobel prize in 1921 (Planck had won the Nobel prize for his research 3 years earlier). Though Planck was the first to propose the idea of quanta, it was Einstein who embraced the idea, and his work along with Planck's forced the physics community to accept it. It was the dawn of quantum physics.
\section{Matter waves matter}
\par
Photons and other quantum particles are absorbed in discrete units of energy. The detection of a particle corresponds to a tiny point on some type of detection screen. But above we stated that quantum particles interfere with themselves just like waves. How can quantum objects have both particle and wave characteristics? In other words, how can a photon interfere with itself when passing through a double-slit but later appear as a tiny point on a photosensitive film? This paradox is known as wave-particle duality, and is one of the cornerstones of quantum theory. Wave-particle duality is often revealed through another underlying concept called the \emph{complementarity principle}.
\par
In quantum physics, physically measurable quantities (such as position, momentum, etc.) are often called \emph{observables}. The complementarity principle states that the more we know about a given observable, the less we know about its complement. For example, if we measure the exact position of an object at an instance in time, then we can have no knowledge of the object's momentum at that instance. Position and momentum are called complementary observables. To avoid any confusion with the classical and quantum aspects of the word``particle", we have now resorted to using the word``object" to describe a quantum particle - meaning a photon, an electron, a neutron, etc.
\par
The concept of position corresponds to a point in space. Imagining again a water wave, or a wave on a stretched string, with series of peaks and troughs, it is easy to see that a wave does not have a well-defined position in this sense. A wave, such as those that can be seen crashing onto a sandy beach, can be localized to within a certain region, but not to a point. A classical particle does possess a well-defined position, and using the laws of classical physics, one can calculate the particle's trajectory and know its position at all instances in time. Therefore, position is identified as a particle-like property. A wave, on the other hand, can be described in terms of its frequency, wavelength, amplitude and phase. In 1927 Louis de Broglie characterized the wavelength (now known as the de Broglie wavelength) of a quantum object with its momentum, work for which he was later granted the Nobel Prize. Consequently, in quantum physics, momentum is a wave-like property. Hence the complementarity of position and momentum leads to wave-particle duality: quantum objects can behave as either particles or waves. The observed behavior depends on what type of measurement the experimenter chooses to make: if a particle-like property such as position is measured, then the quantum object behaves like a particle. Likewise if we choose to observe a wave-like property, such as momentum, the observed behavior is wave-like. Moreover, quantum physics does not provide us with the means to make any definite statement about the properties of the quantum object before we measure it. The observation of a wave-like property does not imply that the quantum object was behaving as a wave just before the measurement.
\par
If this all sounds pretty unbelievable to you then you are in good company. Many of the founding fathers of quantum theory were not very satisfied with this state of affairs either, including Einstein, whose intellectual battles with Danish physicist Niels Bohr are the stuff that many physics books are made of. Bohr was the greatest proponent of the idea of complementarity, an idea that Einstein was reluctant to accept. Einstein could not come to terms with the idea that what we observe and consequently call``reality" seems to be based solely on the manner in which we choose to look. Moreover, he was bothered by the fact that according to quantum theory, this reality only exists while we are observing. He expressed his discontent to Abraham Pais by asking: ``Do you believe that the moon exists only when you look at it?" Einstein did not accept that quantum theory was a complete description of nature. Interestingly, it was Einstein's dissatisfaction that motivated and still motivates much of the modern research in quantum mechanics.
\par
Many of the great Einstein-Bohr dialogs took place at the Solvay conferences in the 1920's. On several occasions, Einstein thought he could poke holes in Bohr's so-called Copenhagen Interpretation of quantum theory. Throughout the history of physics, much of the discussion and debate over the nature of the world is done through examples and counter-examples of \emph{gedanken} experiments: idealized thought experiments. One of Einstein's famous examples is the following. Repeat the quantum version of Young's experiment, but this time the double slit is suspended by sensitive springs so that it is free to move back and forth. An incident photon, scattered by the slits, suffers a change in momentum, which is absorbed by the double slit apparatus, giving it a slight kick. One could then measure the recoil of the slit apparatus together with the photon's position on the detection screen and infer the photon's trajectory, a particle-like property. The trajectory of the photon itself should not be altered by this measurement, so the interference fringes - a wave-like property - should still be observed. From the spacing between the interference fringes one can calculate the (de Broglie) wavelength and thus the momentum of the photon. In such a way it should be possible to observe the characteristic interference fringes and calculate the momentum as well as know the photon's trajectory. The complementarity principle must be a hoax!
\par
Bohr later pointed out, however, that Heisenberg's uncertainty relation prevented one from seeing interference fringes and determining the photon's trajectory simultaneously. The uncertainty relation is a quantitative statement about the best precision with which one can measure complementary observables. The recoil of the double-slit apparatus (an indicator of the momentum of the photon) disturbs the system creating an uncertainty in the detection of the photon's position on the detection screen. This uncertainty is great enough to ``wash out" or blur the interference fringes to such a degree that they no longer appear. Any attempt to measure the photon's trajectory disturbs the system and prevents the observation of interference fringes. All ideas similar to that of Einstein's have failed due to similar arguments. For many years it was thought that the uncertainty relation was the mechanism responsible for the complementarity principle. The question remained: are we able to mark the particle's path (1) without altering it's trajectory and (2) in such a way that we can get around the uncertainty principle?
\section{Quantum Erasure}
Roughly twenty years ago, physicists Marlan O. Scully and Kai Dr\"uhl (at the Max-Planck Institut f\"ur Quantenoptik and University of New Mexico) shook the physics community and strengthened the foundations of quantum physics, when they introduced the idea of quantum erasure. The logic of quantum erasure is the following: if the information providing the object's trajectory can be determined without significantly perturbing it, then the interference disappears, but the``erasure" of this information should bring the interference back. Through the introduction of this new concept, they showed that the complementarity principle plays a much more fundamental role in quantum physics than the uncertainty relation.
\par
Later, Scully, with Berthold-Georg Englert and Herbert Walther (both at the Max-Planck Institut f\"ur Quantenoptik) proposed a way to bring this about using Rydberg atoms as the interfering objects. Rydberg atoms are excited at very high electron energy levels (for example $n=50$) with long decay times. The atoms are incident on a double-slit. Two microwave cavities, made of a pair of microwave high reflectors, are then placed one behind each slit. The microwave cavities serve as path markers. When an atom passes through a cavity it emits a photon, which remains in the cavity. In this process, the atom’s trajectory is not disturbed. By simply looking to see which cavity contains the photon, it would be possible to know where the atom has been. So far the Scully-Englert-Walther experiment has never been realized in the laboratory.
However, we have succeeded in performing an experiment that is analogous to their proposal and much easier to implement experimentally. However, first we must digress briefly to explain the concept of polarization.
\par
The electromagnetic field, that is light, as well as the photon, has an internal property called polarization. In classical optics, light is viewed as a transverse electromagnetic wave and polarization refers to the direction in which it oscillates. A field that oscillates in a specific manner is said to be polarized. A field with linear polarization oscillates back and forth along a certain direction, perpendicular to the propagation direction, while a field with circular polarization oscillates in a circular pattern. Right-circular polarized light oscillates in the clockwise direction, while left-circular polarized light oscillates in the counter-clockwise direction. A circular polarized light beam can be described as a superposition of horizontally and vertically polarized beams that are a quarter cycle (or quarter wavelength) out of phase with each other. For right-circular polarization the vertical component is a quarter cycle ahead of the horizontal component, while for left-circular polarization the vertical component is a quarter cycle behind the horizontal component. Other commonly used polarization directions are the diagonal directions, $45^\circ$ and $-45^\circ$. The diagonal directions are superpositions of horizontal and vertical components just like the right- and left-circular polarizations, only now the horizontal and vertical components are in phase ($45^\circ$) or one-half cycle out of phase ($-45^\circ$) with each other.
Optical components called wave plates are used to change the polarization, while the propagation direction of the electromagnetic field is left untouched. A quarter-wave plate can be used to convert a linearly polarized beam into a circularly polarized beam. Another commonly used optical components is a polarizer, which acts as polarization filter, allowing only light with a given polarization to pass. For example, if a circularly polarized beam is directed onto a horizontal polarizer, the beam which exits is horizontally polarized and half as intense as the input beam. Polarizing sunglasses use this concept to eliminate glare from reflective surfaces.
\par
Now imagine that we repeat Young's experiment with photons polarized linearly in the vertical direction, and we observe interference fringes on a distant screen. Suppose now that we insert two quarter-wave plates, one behind each slit, in such a way that plate 1 transforms the vertically polarized photons into right-circularly polarized photons, while plate 2 transforms the vertically polarized photons into left-circularly polarized photons. The result is that no interference pattern is observed at the detection screen. Instead, after many photons, we will observe a distribution of photon detections that produces the famous bell-shaped curve. The pattern looks something like a mountain peak, with a maximum in the middle, where photons from each slit will hit. There is only one peak because the two slits are very close together. If the slits were well separated, two peaks would appear.
\par
What happened to the interference? The quarter wave plates have marked the polarization of the photons. All we have to do is measure the circular-polarization direction (left or right) of the photons at the screen and we will know through which slit the photons have passed. Since right- and left-circular polarizations oscillate in opposite directions, they are completely distinguishable from each other. Moreover, the quarter-wave plates do not alter the propagation direction of the photons. It is important to note that we don't actually have to measure the polarization direction in order to destroy the interference pattern. It is enough that the so-called which-path information is available to us. Playing dumb will not restore the interference fringes.
\par
One might note that this experiment could just as well have been performed using an intense classical light beam. We have chosen to use quantum interference - photons - because the question as to which slit the beam of light has passed through has no significance in classical optics, where a beam of light is always a wave, and thus the concept of position is meaningless.
\subsection{Interference is Ignorance}
\par
What happens if we instead measure polarization in the horizontal direction? If we limit our observation apparatus to only horizontally polarized photons, then we will again see interference fringes. But how can that be? The quarter-wave plates have marked the photons path. Simply ignoring the information does not bring back interference. Why do we observe interference if we measure horizontal polarization?
\par
Both right- and left-circular polarizations have a horizontal component and thus observation of a horizontally polarized photon tells us nothing about through which slit the photon has passed. The key here is that measuring horizontal polarization erases the which-path information (hence the name``quantum erasure"). If we tried to measure right- or left-circular polarization again after the horizontal polarizer, we would gain nothing in the way of which-path information.
\par
Similarly, if we choose to measure vertical polarization, we again erase the which-path information and restore interference. However, in this case we observe interference in the form of \emph{antifringes} that are completely out of phase with those observed with horizontal polarization, meaning that where we had observed an interference maximum (a bright spot) we now observe a minimum (a dark spot), and vice versa. As it so happens, the sum of these interference patterns reproduces the``mountain peak" pattern that one would obtain had no polarization measurement be made. This is the essence of quantum erasure.
\par
Our choice of polarization measurement divides the experimental results into subsets. Some of these subsets give interference fringes, as in the case where we measure horizontal or vertical polarization, while other subsets give which-path information, as when we measure either right- or left-circular polarization. If we add together the measurement results for the cases which give interference, the sum reproduces the mountain peak, as though we had not made any polarization measurement. Similarly, if we add together the measurement results for the cases which give which-path information, we obtain the same result.
\par
We observe interference because the two possibilities corresponding to slit 1 and slit 2 are at least somewhat indistinguishable, that is, our choice of measurement cannot tell us with certainty through which slit a detected photon has passed. If the two possibilities are completely indistinguishable, as is the case when we measure horizontal or vertical polarization, we observe perfect high-contrast interference fringes. Likewise interference is completely destroyed when the two possibilities are distinguishable, meaning that our measurement apparatus is capable of telling us with certainty through which slit the photon has passed, as is the case when we measure circular polarization. There exist quantitative mathematical relationships governing the contrast of interference fringes and amount of which-path information we can observe simultaneously.
\par
What prevents us from observing interference and determining the photon's trajectory in the quantum eraser? Polarization and position are not complementary observables so there is no place for an explanation based on the uncertainty principle. Moreover, the fact that we can erase the which-path information and observe interference implies that there is no ``disturbance" involved in the measurements. Yet the fact remains, we are still unable to obtain which-path knowledge and observe interference fringes simultaneously. It must be that the complementary principle is enforced through some mechanism more fundamental than the uncertainty relation.
\par
If it is not the uncertainty relation, then what is responsible for complementarity? The answer is \emph{quantum entanglement}. When a photon passes through the double-slit apparatus (just before it passes through the quarter-wave plates), it is in a superposition of position states: slit 1 + slit 2. The quarter-wave plates then perform a conditional logic operation on the photon: if a photon passes through slit 1 then it emerges with right-circular polarization, and if a photon passes through slit 2 then it emerges with left-circular polarization. The photon's polarization has become entangled with its path. The result is a more complicated quantum superposition involving two degrees of freedom: the photon's path and its polarization.
\par
Entanglement is the name given to this type of quantum correlation, which is much stronger than any classical correlation. The reason for this is that entanglement correlates the probability amplitudes, while a classical correlation correlates only the probabilities. To see this, let's return to the NFL coin toss example, however, imagine now that we have two ``magical" coins, correlated such that when flipped they always give opposite results: one coin gives heads while the other gives tails. This is a type of classical correlation. Individually, each coin still lands heads 50\% of the time and tails the other 50\% of the time. If you flip both coins and then quickly hide one of them, you can always discover the result of the hidden coin simply by looking at the result of the exposed coin.
\par
The difference between this example of classical correlation and quantum entanglement is that the quantum correlation exists even when you look at superpositions of the individual states. For example, as we will discuss below, it is possible to create two photons that have entangled polarizations. That is, if one photon is horizontally polarized then the other is vertically polarized. If we test both photons individually, there is a 50\% chance that we will measure each photon to be either horizontal or vertical, but we will never find that they are polarized in the same direction simultaneously. One can test this experimentally using horizontal and vertical polarizers. Up to this point, this seems to be the same as the magical NFL coins. However, unlike the NFL coins, it is possible to rotate the polarizers $45^\circ$ so that they measure $45^\circ$ and $-45^\circ$ diagonal polarization. The photons will display the same correlation: each individual photon has a 50\% chance to be detected $45^\circ$ diagonally polarized and a 50\% chance to be detected $-45^\circ$ polarized, but they are never polarized in the same direction simultaneoulsy. Moreover, this is true for any mutual rotation of the polarizers. This is impossible using the magical NFL coins or any other type of classical correlation! In this sense, quantum entanglement is much stronger than any classical correlation.
\par
As an aside, physicists have known about quantum entanglement since the renowned 1935 paper of Albert Einstein, Boris Podolsky and Nathan Rosen. Shortly thereafter, Austrian physicist Erwin Schr\"odinger coined the name entanglement. The fathers of quantum theory, including Einstein and Bohr, puzzled over the nature of entanglement just as they did over quantum superpositions. Since then, scientists have realized that quantum entanglement is a physical resource that can actually be used in the areas of information technology. In fact, quantum entanglement is the backbone of a new and rapidly flourishing multidisciplinary field called \emph{quantum information}.
\par
Nearly twenty years ago, several physicists toyed with the idea of using two-level quantum systems, such as the polarization of a photon, as ``quantum bits" in a computer. Since then the same idea has been applied to many problems in cryptography, communications and computer science, and produced some promising results. For example, the ``strange" laws of quantum physics provide the only form of cryptography that is proven to be secure, certainly interesting to governments sending top secret information or to anyone making a credit card purchase via the internet.
\par
Returning now to the quantum eraser, the quarter-wave plates have entangled the photon's path with it's polarization. Since the two possible polarizations, right- and left-circular, are distinguishable (they oscillate in opposite senses), we can measure the polarization and determine the photon's path with certainty. Entanglement enforces the complementarity principle by coupling the photons path to different polarizations which are completely distinguishable from each other. Physicists have now come to roadblock similar to that of Einstein and Bohr. Is it possible to measure the path of the photon without entangling it? Entanglement is a fundamental player in the quantum theory of measurement. In a way, entanglement is the act of measurement: since it associates the photon’s path (the slit) with its polarization (which we can measure). Most physicists would probably bet that the answer to this question is no.
\section{Twin Photons: an entangled story}
Recently, in the Quantum Optics laboratory at the Universidade Federal de Minas Gerais (UFMG), we took this experiment a step further. We created a pair of entangled photons using a non-linear optical process called spontaneous parametric down-conversion. In our experiment, we directed an ultraviolet argon laser beam onto a thin non-linear crystal, which creates two lower energy ``twin" photons. The two photons, which we will call $a$ and $b$, were generated in such a way that when photon $a$ is found to have horizontal polarization, then photon $b$ will necessarily be vertically polarized. Likewise, if $a$ is found to have vertical polarization, then $b$ has horizontal polarization. As discussed above, similar correlations exist for any type of polarization measurements made on the two photons, as long the polarizers measure perpendicular polarizations (horizontal and vertical, left- and right-circular, etc). These photons are said to be polarization-entangled. Furthermore, now that the entangled systems are two independent photons, they can be separated any arbitrary distance. It has been shown experimentally that entangled photons can remain entangled over great distances - the current record, held by physicists at the University of Geneva, is between the cities of Bellevue and Bernex, a distance of about 11 kilometers!
\par
After creating the entangled photons, we manuevered photon $a$ to the double-slit apparatus (double slit and quarter-wave plates) and then to a photodetector, while photon $b$ passes directly to a separate polarizer and detector. When the quarter-wave plates were removed, after many photon pairs we observed the usual interference pattern. However, since we were working with two photons, the photons pairs were detected in coincidence. Coincidence detection means that we are only interested in the cases where the two photons are registered at their respective detectors simultaneously. Experimentally, the photons are detected within a small window of time, usually on the order of $10^{-9}$ seconds.
\par
The biggest experimental hurdle we had to leap was figuring out a way to mount the quarter wave plates in front of the narrow double slit. To create an observable interference pattern, each slit of the double-slit was about 0.2 millimeters wide, and they were spaced 0.2 millimeters apart. The usual quarter wave plates that are commercially available are round in shape, about 1 centimeter in diameter and about 2 millimeters thick. Due their shape and size, it was necessary to modify the wave plates so that they would each cover only one slit. Using high quality sandpaper, we sanded a straight edge into each wave plate at the required angle, so that they would each cover one slit and join in the narrow space between the slits.
\par
When we put the quarter-wave plates in place, the interference was destroyed, just like before. This time, however, the which-path information is available only through coincidence detection. One quarter wave plate transforms $a$ 's vertical polarization to right-circular, while the other transforms to left-circular. However, now photon $a$ can be found to be either vertically or horizontally polarized. For horizontal polarization, the action of the wave plates is reversed. Thus, measuring only the polarization of photon $a$ will not provide enough information to determine through which slit $a$ has passed. Through coincidence detection, however, we \emph{are} provided sufficient information. The two-photon logic statements are:
(1) ``$a$ right-circular and $b$ horizontal" or ``$a$ left-circular and $b$ vertical" implies that $a$ passed through slit 1 while (2) ``$a$ left-circular and $b$ horizontal" or ``$a$ right-circular and $b$ vertical" implies that $a$ passed through slit 2. Interestingly enough, due to the entanglement between $a$ and $b$, we can choose to observe interference or obtain which-path information of photon $a$ based solely on the polarization direction we measure on photon $b$. Instead of measuring horizontal or vertical polarization of photon $b$, we can measure diagonal (or circular) polarizations, which are superpositions of horizontal and vertical polarizations. Detecting a diagonally polarized photon erases the which-path information, and consequently we observe interference fringes. A measurement in the positive diagonal direction ($45^\circ$) gives interference fringes, while a measurement in the negative diagonal direction ($-45^\circ$) gives interference antifringes, exactly out of phase with the fringes.
\section{Delayed Choice}
Curiously, with this quantum eraser we could actually choose to observe interference or determine photon $a$'s path after photon $a$ has been detected. Imagine that the detector registering photon $b$ is moved very far away, so that photon$b$ is detected some time after photon $a$. The experimenter could then wait until after photon $a$ is registered to decide which measurement to perform on photon $b$, and consequently observe interference or determine $a$'s path. Moreover, we could let photons $a$ and $b$ travel several light minutes away from each other, so that no signal could travel from $a$ to inform $b$ of it's position in the time between $a$ and $b$ are detected. How can one choose to observe particle-like or wave-like behavior after the interfering particle has already passed through the double-slit?
When first discussed by American physicist John A. Wheeler in 1978, before the quantum eraser concept was introduced, this type of delayed choice experiment raised serious physical and metaphysical questions. It seems to imply that the observer could alter photon $a$'s past by choosing how to measure photon $b$. However, this is not the case. To explain why, we will tell you a story about the two most famous people in quantum information: Alice and Bob.
\par
Two quantum physicists, Alice and Bob, decide to perform an experiment testing the foundations of quantum mechanics. Alice sets up a double slit experiment with quarter wave plates, just like we described, in her laboratory on Earth. Her friend and colleague Bob, who lives on the Mars colony, sends her photons, one by one, across a quantum ``telephone line" that they have set up between their laboratories. Alice sends the photons, one by one through the double-slit-wave-plate apparatus. For every photon, she marks it's position, writing something like``Photon 567 landed at position $x = 4.3$" in her lab notebook. When Alice later plots her experimental results, she sees that large ``dull" mountain peak, and concludes that there was no interference present in the experiment. What Bob has not told Alice is that each of her photons is entangled with another photon which Bob has kept for himself. Bob performs a series of polarization measurements on the photons, about half the time measuring horizontal and vertical polarization and the other half measuring $+45^\circ$ and $-45^\circ$ diagonal polarization. He records all of his results in his lab book, with statements such as ``Photon 567 ($b$) was detected with horizontal polarization", but he does not inform Alice of his mischief.
\par
Bob loves magic and a good practical joke. When visiting Alice one day, she shows him her experimental results on the computer and says ``Look Bob, I performed that quantum eraser experiment and when I plotted my date, all I got was this dull mountain peak, there was no interference". Bob says "Alice, are you sure" and, after checking his own lab book, he tells her to plot only those photons for which he measured it's entangled partner to be $+45^\circ$ diagonally polarized and ``Ta-Da!" an interference fringe pattern appears. "Wait Bob, that wasn't there before! How did you make the photons interfere after I already detected them and recorded it all in my lab book?!", Alice exclaims. Bob, who loves to play for the an audience, replies, "You think that's impressive, well check this out", and he consults his lab book and plots Alice's photons that are paired with photons for which he measured horizontal polarization and ``Ta-Da!" there is no interference pattern, just the smaller (half height) mountain peak. Alice is perplexed. Bob, not knowing when to call it quits, does the same for Alice's photons paired with his $-45^\circ$ diagonal polarization measurements and ``Ta-Da!" interference is back, this time in the form of antifringes. "Bob, that is amazing! You have control over the past! While you are at it, can you go back change my lottery ticket from last week to 67-81-138?," Alice asks with a look of awe in her eyes. Bob is loving the moment, but he is not the greatest magician, and cannot keep his mouth shut about the secret to his tricks. "No Alice, look, the photons I gave you were actually entangled with photons that I kept for myself. I did a series of polarization measurements, and recorded my results. My polarization measurements tell me how to divide up your experimental results so that we can see interference or not, but I cannot change the position at which any photon actually landed," Bob explains. He shows her by plotting all of the results for which he measured horizontal OR vertical (orthogonal directions) and they observe the large mountain peak. He then does the same with all results of $+45^\circ$ and $-45^\circ$, and they observe the same mountain peak. Of course plotting all of the results together regardless of polarization also gives the mountain peak, as Alice had already observed. So Bob was not able to alter the past, it is just that he had more information than Alice.
\par
Presumably, Einstein would not be happy with this state of affairs. Quantum erasure seems to confirm that the complementarity principle is indeed a fundamental part of quantum theory. Quantum physics has in its realm some strange consequences if one insists on using concepts from classical physics. The founding fathers were certainly aware of this nearly a century ago. Nowadays, physicists have learned to accept the fact that the laws of classical physics do not necessarily apply to the quantum world. We have become much more comfortable with the ``quantum weirdness".
\par
The quantum eraser and other experiments have done much to illustrate the dual nature of quantum theory. However, physicists today are still unable to explain why wave-particle duality exists. In this respect, it seems that we have not come too far since the 1960's, when Richard Feynman stated, in Feynman Lectures on Physics: “We cannot make the mystery go away by explaining how it works. We will just tell you how it works."
Yet great progress has been made. Understanding that it is not the uncertainty principle, but rather quantum entanglement responsible for complementarity is an enormous step, presumably in the right direction. Quantum entanglement is at the heart of the modern theory of quantum measurement. We have learned that it is the act of measurement itself, and not the “quantum uncertainty" involved with the measurement that is responsible for the complementarity principle. This may seem like a subtle point, but it is one that has caused many physicists to sleep more soundly at night.
\begin{acknowledgments}
This research was performed in the Quantum Optics Laboratory at the Universidade Federal de Minas Gerais (UFMG) in Belo Horizonte, Minas Gerais, Brazil, with finanical support from the Brazilian funding agencies CNPq and CAPES.
\end{acknowledgments}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Variational approach to the existence of solutions for non-instantaneous impulsive differential equations with perturbation}
\author[Yao]{Wangjin Yao}
\address[Yao]{School of Mathematics and Finance, Putian University, Putian, 351100, P.R. China}
\author[Dong]{Liping Dong}
\address[Dong]{College of Mathematics and Informatics, Fujian Normal University, Fuzhou, 350117, P.R. China}
\author[Zeng]{Jing Zeng\corref{cor}}
\address[Zeng]{College of Mathematics and Informatics, Fujian Key Laboratory of Mathematical Analysis and Applications (FJKLMAA), Fujian Normal University, Fuzhou, 350117, P.R. China}
\cortext[cor]{Corresponding author, email address: zengjing@fjnu.edu.cn. The author is supported by the National Science Foundation of China (Grant No. 11501110) and Fujian Natural Science Foundation (Grant No. 2018J01656).}
\begin{abstract}
In this paper, we study the existence of solutions for second-order non-instantaneous impulsive differential equations with a perturbation term. By variational approach, we obtain the problem has at least one solution under assumptions that the nonlinearities are super-quadratic at infinity, and sub-quadratic at the origin.
\end{abstract}
\begin{keyword}
Non-instantaneous impulsive differential equation \sep Mountain pass theorem \sep A perturbation term
\end{keyword}
\end{frontmatter}
\section{Introduction}
\label{}
In this paper, we consider the following problem:
\begin{equation}\label{eq1}
\left\{ {\begin{array}{l}
-u''(t)=D_{x}F_{i}(t,u(t)-u(t_{i+1}))+p(t),\quad t\in(s_{i},t_{i+1}],~i=0,1,2,...,N,\\
u'(t)=\alpha_{i},\qquad \qquad \qquad \qquad \qquad \qquad ~~\quad t\in(t_{i},s_{i}],~i=1,2,...,N,\\
u'(s_{i}^{+})=u'(s_{i}^{-}),\qquad \qquad \qquad \qquad \qquad~ \quad i=1,2,...,N,\\
u(0)=u(T)=0, u'(0)=\alpha_{0},
\end{array}} \right.
\end{equation}
where $0=s_{0}<t_{1}<s_{1}<t_{2}<s_{2}<...<t_{N}<s_{N}<t_{N+1}=T$. For the impulses start abruptly at the points $t_{i}$ and keep the derivative constant on a finite time interval $(t_{i},s_{i}]$, we set $u'(s_{i}^{\pm})=\lim_{s\rightarrow s_{i}^{\pm}}u'(s)$. $\alpha_{i} \ (i=1,...,N)$ are constants, $p(t):(s_{i},t_{i+1}]\rightarrow \mathbb{R}$ belongs to $ L^{2}(s_{i},t_{i+1}] (i=1, ..., N)$.
The mathematical model of real world phenomena, in which discontinuous jump occurs, leads to the impulsive differential equations. The non-instantaneous impulsive differential equation is related to the hemodynamical equilibrium. Hence, it is important to study the non-instantaneous impulsive differential equations with a perturbation term, such as $p(t)$ in \eqref{eq1}.
As far as we know, the introduction of equation \eqref{eq1} was initiated by Hern$\acute{a}$ndez and O'Regan in \cite{8}.
In \eqref{eq1}, the action starts abruptly at points $t_{i}$, and remains during a finite time interval
$(t_{i}, s_{i}]$. Obviously, it is a natural generalization of the following classical instantaneous impulsive differential equation:
\begin{equation}\label{eq55}
\left\{ {\begin{array}{l}
-u''(t)=f(t,u(t)),\quad t\in([0, T],\\
u'(t_{i}^{+})-u'(t_{i}^{-})=I_i(u(t_i)),\qquad i=1,2,...,N,\\
u(0)=u(T)=0.
\end{array}} \right.
\end{equation}
Many classical methods can be used to study the non-instantaneous impulsive differential equations, such as theory of Analytic Semigroup, Fixed-Point theory \cite{6,7,12,13} and so on. For some recent works on this type equation, we refer the readers to \cite{1,4,5,10,11,15,16,17}.
To the best of our knowledge, Variational Method can be used to study some impulsive differential equation.
Bai-Nieto \cite{2} studied the following linear problem, and obtained the existence and uniqueness of weak solutions.
\begin{equation*}\label{eq2}
\left\{ {\begin{array}{l}
-u''(t)=\sigma_{i}(t),\quad t\in(s_{i},t_{i+1}], i=0,1,2,...,N,\\
u'(t)=\alpha_{i},\quad t\in(t_{i},s_{i}], i=1,2,...,N,\\
u'(s_{i}^{+})=u'(s_{i}^{-}),\quad i=1,2,...,N,\\
u(0)=u(T)=0 , u'(0)=\alpha_{0},
\end{array}} \right.
\end{equation*}
where $\sigma_{i}\in L^{2}((s_{i},t_{i+1}),\mathbb{R})$, $\alpha_{i} \ (i=0,...,N)$ are constants. By Variational Method, Bai-Nieto-Wang \cite{3} obtained at least two distinct nontrivial weak solutions of problem:
\begin{equation*}\label{eq3}
\left\{ {\begin{array}{l}
-u''(t)=D_{x}F_{i}(t,u(t)-u(t_{i+1})),\quad t\in(s_{i},t_{i+1}], i=0,1,2,...,N,\\
u'(t)=\alpha_{i},\quad t\in(t_{i},s_{i}], i=1,2,...,N,\\
u'(s_{i}^{+})=u'(s_{i}^{-}),\quad i=1,2,...,N,\\
u(0)=u(T)=0 , u'(0)=\alpha_{0},
\end{array}} \right.
\end{equation*}
where $D_{x}F_{i}(t,x)$ are the derivatives of $F_{i}(t,x)$ with respect to $x$, $i=0,1,2,...,N.$ Zhang-Yuan \cite{18} considered the following equation with a perturbation term $p(t)$, and obtained infinitely many weak solutions.
\begin{equation*}\label{eq4}
\left\{ {\begin{array}{l}
-u''(t)+\lambda u(t)=f(t,u(t))+p(t), \quad a.e.~t\in[0,T],\\
\bigtriangleup u'(t_{i})=I_{i}(u(t_{i})),\quad i=1,...,N,\\
u(0)=u(T)=0,
\end{array}} \right.
\end{equation*}
where $f: [0, T]\times\mathbb{R}\rightarrow \mathbb{R}$ is continuous, the impulsive functions $I_{i}:\mathbb{R}\rightarrow \mathbb{R} (i=1, 2, . . . ,N)$ are continuous and $p(t):[0, T]\rightarrow \mathbb{R}$ belongs to $L^{2}[0, T]$.
Motivated by the work of \cite{2,3,18}, we
obtain the weak solution of the problem \eqref{eq1} by Variational Method. Our main result is a natural extension of \cite{3}.
We denotes $D_{x}F_{i}(t,x)$ the derivatives of $F_{i}(t,x)$ with respect to $x (i=0, 1, ..., N)$. $ F_{i}(t,x) $ is measurable in $t$ for every $x\in \mathbb{R}$ and continuously differentiable in $x$ for $a.e.\ t\in (s_{i},t_{i+1}]$.
We assume that $\lambda_{1}$ is the first eigenvalue of:
\begin{equation}\label{eq5}
\left\{ {\begin{array}{l}
\displaystyle -u''(t)=\lambda u(t), \quad t\in[0,T],\\
\displaystyle u(0)=u(T)=0.
\end{array}} \right.
\end{equation}
Our assumptions are:
\begin{description}
\item[$(H1)$] There exist $\alpha \in C(\mathbb{R}^{+},\mathbb{R}^{+})$ and $ b \in L^{1}(s_{i},t_{i+1};\mathbb{R}^{+})$ such that
$$|F_{i}(t,x)|\leq \alpha(|x|)b(t),~|D_{x}F_{i}(t,x)|\leq \alpha(|x|)b(t),$$ for all $x\in \mathbb{R}$,
where $F_{i}(t,0)=0$ for $a.e.\ t\in(s_{i},t_{i+1}) ~(i=0,1,2,...,N)$.
\item[$(H2)$] There exist constants $\mu_{i}>2$ such that $0<\mu_{i}F_{i}(t,x)\leq xD_{x}F_{i}(t,x)$
for $a.e.\ t\in (s_{i},t_{i+1}], ~x\in \mathbb{R}\backslash \{0\} (i=0,1,2,...,N).$
\item[$(H3)$] There exist constant $M$ such that $\sum\limits_{i=0}^{N}\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}<M,$
where $M=\frac{1}{8\beta^{2}}-\frac{1}{2}\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|-\sum\limits _{i=0}^{N}\int_{s_{i}}^{t_{i+1}}M_{i}(t)dt,~\beta=(T\lambda_{1})^{-\frac{1}{2}}+T^{\frac{1}{2}},~M_{i}(t):=\max \limits_{|x|=1}F_{i}(t,x)~(i=0,1,2,...,N).$
\end{description}
\begin{remark}
$M$ in $(H3)$ is originated from the proof of Theorem \ref{th1}.
\end{remark}
\begin{theorem} \label {th1}
Suppose that $(H1)$-$(H3)$ hold, then problem \eqref{eq1} has at least one weak solution.
\end{theorem}
The article is organized as following: In Section 2, we present some basic knowledge and preliminary results. In Section 3, we prove Theorem \ref{th1}.
\section{Preliminaries}
In this section, we present some preliminary results which will be used in the proof of our result.
\begin{definition}\label{del}{\bf (\cite{9}, (PS) condition)}
Let $E$ be a real Banach space and $I\in C^{1}(E, \mathbb{R})$. $I$ is said to be satisfying the Palais-Smale condition on $E$ if
any sequence $\{u_{k}\}\in E$ for which $I(u_{k})$ is bounded and $I'(u_{k})\rightarrow0$ as $k\rightarrow\infty$ possesses
a convergent subsequence in $E$.
\end{definition}
\begin{theorem}\label{th2}{\bf(\cite{14}, Mountain Pass Theorem)}
Let $E$ be a real Banach space and $I\in C^{1}(E,\mathbb{R})$ satisfy the $(PS)$ condition with $I(0)=0$. If $I$ satisfies the following conditions:
\begin{description}
\item[$(1)$] there exist constants $\rho,\alpha >0$, such that $I|_{\partial B_{\rho}}\geq \alpha$;
\item[$(2)$] there exists an $e\in E\backslash B_{\rho}$, such that $I(e)\leq 0$,
\end{description}
then $I$ possesses a critical value $c\geq \alpha$. Moreover, $c$ is characterized as $$c=\inf \limits_{g\in \Gamma}\max \limits_{s\in [0,1]}I(g(s)),$$
where $$\Gamma=\{g\in C([0,T],E)|~g(0)=0,g(1)=e\}.$$
\end{theorem}
Next, we introduce the well-known Poincar$\acute{e}$ inequality $$\int_{0}^{T}|u|^{2}dt\leq\frac{1}{\lambda_{1}}\int_{0}^{T}|u'|^{2}dt, ~u\in H_{0}^{1}(0,T),$$
where $\lambda_{1}$ is given in \eqref{eq5}.
In the Sobolev space $H_{0}^{1}(0,T)$, we consider the inner product $(u,v)=\int_{0}^{T}u'(t)v'(t)dt,$
which induces the norm $\|u\|=\left(\int_{0}^{T}|u'(t)|^{2}\right)^{\frac{1}{2}}.$ In $L^{2}[0,T]$ and $C[0,T]$, we define the norms:
$$\|u\|_{L^{2}}=\left(\int_{0}^{T}|u(t)|^{2}dt\right)^{\frac{1}{2}},~~\|u\|_{\infty}= \max\limits_{t\in [0,T]}|u(t)|.$$
By the Mean Value Theorem and the H$\ddot{o}$lder inequality, for any $u\in H_{0}^{1}(0,T)$, we have
\begin{equation}\label{eq106}
\|u\|_{\infty}\leq\beta\|u\|,
\end{equation}
where $\beta=(T\lambda_{1})^{-\frac{1}{2}}+T^{\frac{1}{2}},$ $\lambda_{1}$ is given in \eqref{eq5}.
Take $v\in H_{0}^{1}(0,T)$, multiply \eqref{eq1} by $v$ and integrate from $0$ to $T$, we obtain
\begin{equation*}\label{eq6}
\begin{split}
\int_{0}^{T}u''vdt=&\int_{0}^{t_{1}}u''vdt+\sum\limits_{i=1}^{N}\int_{t_{i}}^{s_{i}}u''vdt+\sum\limits_{i=1}^{N-1}\int_{s_{i}}^{t_{i+1}}u''vdt+\int_{s_{N}}^{T}u''vdt\\
=&-\int_{0}^{T}u'v'dt+\sum\limits_{i=1}^{N}[u'(t_{i}^{-})-u'(t_{i}^{+})]v(t_{i})+\sum\limits_{i=1}^{N}[u'(s_{i}^{-})-u'(s_{i}^{+})]v(s_{i}).
\end{split}
\end{equation*}
By \eqref{eq1},
\begin{equation}\label{eq7}
\begin{split}
\int_{0}^{T}u''vdt=&-\int_{0}^{T}u'v'dt+\sum\limits_{i=1}^{N}[\alpha_{i-1}-\alpha_{i}]v(t_{i})\\
&-\sum\limits_{i=0}^{N-1}\int_{s_{i}}^{t_{i+1}}(D_{x}F_{i}(t,u(t)-u(t_{i+1}))+p(t))dt)v(t_{i+1}).
\end{split}
\end{equation}
On the other hand,
\begin{equation}\label{eq8}
\begin{split}
\int_{0}^{T}u''vdt=&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}(D_{x}F_{i}(t,u(t)-u(t_{i+1}))+p(t))vdt+\sum\limits_{i=1}^{N}\int_{t_{i}}^{s_{i}}\frac{d}{dt}[\alpha_{i}]vdt\\
=&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}(D_{x}F_{i}(t,u(t)-u(t_{i+1}))+p(t))vdt.
\end{split}
\end{equation}
Thus, it follows $v(t_{N+1})=v(T)=0$, \eqref{eq7} and \eqref{eq8} that
\begin{equation}\label{eq9}
\begin{split}
-\int_{0}^{T}u'v'dt+\sum\limits_{i=1}^{N}[\alpha_{i-1}-\alpha_{i}]v(t_{i})=&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}(D_{x}F_{i}(t,u(t)-u(t_{i+1}))\\
&+p(t))(v(t)-v(t_{i+1})dt.
\end{split}
\end{equation}
A weak solution to \eqref{eq1} is a function $u\in H_{0}^{1}(0,T)$ such that \eqref{eq9} holds for any $v\in H_{0}^{1}(0,T)$.
Consider the functional $I:~H_{0}^{1}(0,T)\rightarrow \mathbb{R},$
\begin{equation}\label{eq10}
\begin{split}
I(u)=&\displaystyle\frac{1}{2}\int_{0}^{T}|u'|^{2}dt-\sum\limits_{i=1}^{N}(\alpha_{i-1}-\alpha_{i})u(t_{i})\\
&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(u(t)-u(t_{i+1}))dt-\sum\limits_{i=0}^{N}\varphi_{i}(u),
\end{split}
\end{equation}
where $\varphi_{i}(u):=\displaystyle\int_{s_{i}}^{t_{i+1}}F_{i}(t,u(t)-u(t_{i+1}))dt.$
For $u$ and $v$ fixed in $H_{0}^{1}(0,T)$ and $\lambda\in[-1, 1]$, by \eqref{eq106}, we have
\begin{equation}\label{eq50}
|u(t)-u(t_{i+1})|\leq2\|u\|_{\infty}\leq2\beta\|u\|.
\end{equation}
Hence
$$|u(t)-u(t_{i+1})+\lambda\theta(v(t)-v(t_{i+1}))|\leq2\beta(\|u\|+\|v\|),~\text{for} ~\theta\in(0,1),$$
and for $a.e.$ $t\in (s_{i},t_{i+1}]$,
\begin{align*}
\begin{split}
&\lim\limits_{\lambda\rightarrow0}\frac{1}{\lambda}\left[F_{i}(t,u(t)-u(t_{i+1})+\lambda(v(t)-v(t_{i+1})))-F_{i}(t,u(t)-u(t_{i+1}))\right]\\
=&D_{x}F_{i}(t,u(t)-u(t_{i+1}))(v(t)-v(t_{i+1})).
\end{split}
\end{align*}
By $(H1)$, \eqref{eq50} and the Mean Value Theorem, we obtain
\begin{equation*}
\begin{split}
&\left|\frac{1}{\lambda}\left[F_{i}(t,u(t)-u(t_{i+1})+\lambda(v(t)-v(t_{i+1})))-F_{i}(t,u(t)-u(t_{i+1}))\right]\right|\\
=&\bigg|D_{x}F_{i}(t,u(t)-u(t_{i+1})+\lambda\theta(v(t)-v(t_{i+1}))(v(t)-v(t_{i+1}))\bigg|\\
\leq&\max \limits_{z\in[0,2\beta(\|u\|+\|v\|)]}a(z)2\beta\|v\|b(t)\in L^{1}(s_{i},t_{i+1};\mathbb{R}^{+}).
\end{split}
\end{equation*}
Lebesgue's Dominated Convergence Theorem shows that
\begin{equation*}\label{eq11}
(\varphi_{i}'(u),v)=\int_{s_{i}}^{t_{i+1}}D_{x}F_{i}(t,u(t)-u(t_{i+1}))(v(t)-v(t_{i+1}))dt.
\end{equation*}
Moreover, $\varphi_{i}'(u)$ is continuous. So $I\in C^{1}(H_{0}^{1}(0,T),\mathbb{R})$ and
\begin{equation}\label{eq12}
\begin{split}
I'(u)v=&\int_{0}^{T}u'v'dt+\sum\limits_{i=1}^{N}[\alpha_{i-1}-\alpha_{i}]v(t_{i})\\
&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}\left(D_{x}F_{i}(t,u(t)-u(t_{i+1}))+p(t)\right)(v(t)-v(t_{i+1}))dt.
\end{split}
\end{equation}
Then the correspond critical points of $I$ are the weak solutions of the problem \eqref{eq1}.
\begin{lemma} \label{le2} {\bf(\cite{3})}
If assumption $(H2)$ holds, then for each $i=0,1,2,..,N$, there exist $M_{i},m_{i},b_{i}\in L^{1}(s_{i}, t_{i+1})$ which are
almost everywhere positive such that
$$F_{i}(t,x)\leq M_{i}(t)|x|^{\mu_{i}},~for ~a.e.~t\in(s_{i}, t_{i+1}],~and~|x|\leq1,$$
and
$$F_{i}(t,x)\geq m_{i}(t)|x|^{\mu_{i}}-b_{i}(t),~for ~a.e.~t\in(s_{i}, t_{i+1}],~and~x\in\mathbb{R},$$
where $m_{i}(t):=\min \limits_{|x|=1}F_{i}(t,x)$, $M_{i}(t):=\max \limits_{|x|=1}F_{i}(t,x),~a.e.~t\in(s_{i},t_{i+1}].$
\end{lemma}
\begin{remark}
Lemma \ref{le2} implies that $D_{x}F_{i}(t,x)\ (i=1, ..., N)$ are super-quadratic at infinity, and sub-quadratic at the origin.
\end{remark}
\begin{lemma}\label{le3}
Suppose that $(H1)$, $(H2)$ hold, then $I$ satisfies the (PS) condition.
\end{lemma}
\noindent{\bf Proof:} Let $\{u_{k}\}\subset H_{0}^{1}(0,T)$ such that $\{I (u_{k})\}$ be a bounded sequence and $\lim \limits_{k\rightarrow \infty}I'(u_{k})=0$.
By \eqref{eq106},
\begin{equation}\label{eq17}
|\sum\limits_{i=1}^{N}(\alpha_{i-1}-\alpha_{i})u(t_{i})|\leq\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\|u\|_{\infty}\leq\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\beta\|u\|. \end{equation}
There exists constant $C_{1}>0$ such that $$|I(u_{k})|\leq C_{1},~|I'(u_{k})|\leq C_{1}.$$
First, we prove that $\{u_{k}\}$ is bounded. Let $\mu:=\min\{\mu_{i}:i=0,1,2,...,N\}$, by \eqref{eq10}, \eqref{eq17} and $(H2)$, we obtain
\begin{equation*}
\begin{split}
\int_{0}^{T}|u_{k}'|^{2}dt=& 2I(u_{k})+2\sum\limits_{i=1}^{N}(\alpha_{i-1}-\alpha_{i})u_{k}(t_{i})\\
&+2\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(u_{k}(t)-u_{k}(t_{i+1}))dt\\
&+2\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}F_{i}(t,u_{k}(t)-u_{k}(t_{i+1}))dt,\\
\leq& 2C_{1}+2\beta\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\|u_{k}\|\\
&+2\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(u_{k}(t)-u_{k}(t_{i+1}))dt\\
&+\frac{2}{\mu}\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}D_{x}F_{i}(t,u_{k}(t)-u_{k}(t_{i+1}))(u_{k}(t)-u_{k}(t_{i+1}))dt,
\end{split}
\end{equation*}
which combining \eqref{eq12} yields that
\begin{align*}
(1-\frac{2}{\mu})\|u_{k}\|^{2}\leq&2C_{1}+(2+\frac{2}{\mu})\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\beta\|u_{k}\|-\frac{2}{\mu}I'(u_{k})u_{k}\\
&+(2-\frac{2}{\mu})\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(u_{k}(t)-u_{k}(t_{i+1}))dt,\\
\leq& 2C_{1}+(2+\frac{2}{\mu})\beta\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\|u_{k}\|+\frac{2}{\mu}C_{1}\beta\|u_{k}\|\\
&+2(2-\frac{2}{\mu})\beta\sum\limits_{i=0}^{N}\|u_{k}\|\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}.
\end{align*}
Since $\mu>2$, it follow that $\{u_{k}\}$ is bounded in $H_{0}^{1}(0,T)$.
Therefore, there exists a subsequence also denoted by $\{u_{k}\}\in H_{0}^{1}(0,T)$ such that
\begin{equation*}
\begin{split}
&u_{k}\rightharpoonup u, ~~ \text{in} ~H_{0}^{1}(0,T),\\
&u_{k} \rightarrow u, ~~\text{in} ~L^{2}(0,T),\\
&u_{k} \rightarrow u, ~~\text{uniformly in} ~[0,T],~~\text{as} ~k\rightarrow \infty.
\end{split}
\end{equation*}
Since
\begin{equation*}
\begin{split}
|u_{k}(t)-u_{k}(t_{i+1})-u(t)+u(t_{i+1})|\leq& |u_{k}(t)-u(t)|+|u(t_{i+1})-u_{k}(t_{i+1})|\\
\leq& 2\|u_{k}-u\|\rightarrow 0, \quad \text{as}~~ k\rightarrow \infty.
\end{split}
\end{equation*}
Hence
\begin{equation*}
\begin{split}
\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}&(D_{x}F_{i}(t,u_{k}(t)-u_{k}(t_{i+1}))-D_{x}F_{i}(t,u(t)-u(t_{i+1})))\\
\cdot&(u_{k}(t)-u_{k}(t_{i+1})-u(t)+u(t_{i+1}))dt\rightarrow 0,
\end{split}
\end{equation*}
$$|\langle I'(u_{k})-I'(u),u_{k}-u\rangle|\leq\|I'(u_{k})-I'(u)\|\|u_{k}-u\|\rightarrow 0.$$
Moreover, we obtain
\begin{equation*}
\begin{split}
&\langle I'(u_{k})-I'(u),u_{k}-u\rangle\\
=&\|u_{k}-u\|-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}(D_{x}F_{i}(t,u_{k}(t)-u_{k}(t_{i+1}))\\
&-D_{x}F_{i}(t,u(t)-u(t_{i+1})))(u_{k}(t)-u_{k}(t_{i+1})-u(t)+u(t_{i+1}))dt,
\end{split}
\end{equation*}
so $\|u_{k}-u\|\rightarrow 0$ as $k\rightarrow +\infty$. That is, $\{u_{k}\}$ converges strongly to $u$ in $H_{0}^{1}(0,T)$.
Thus, $I$ satisfies the (PS) condition. $\Box$
\section{Proof of theorem}
\noindent{\bf Proof of Theorem \ref{th1}} We found that $I(0)=0$ and $I\in C^{1}(H_{0}^{1}(0,T),\mathbb{R})$. By Lemma \ref{le3}, we obtain $I$ satisfies (PS) condition. By Lemma
\ref{le2} and \eqref{eq50}, we have
\begin{equation*}
\begin{split}
\int_{s_{i}}^{t_{i+1}}F_{i}(t,u(t)-u(t_{i+1})dt&\leq\int_{s_{i}}^{t_{i+1}}M_{i}(t)|u(t)-u(t_{i+1})|^{\mu_{i}}dt\\
&\leq\int_{s_{i}}^{t_{i+1}}M_{i}(t)|2\beta\|u\||^{\mu_{i}}dt,
\end{split}
\end{equation*}
and
$$\sum\limits_{i=1}^{N}(\alpha_{i-1}-\alpha_{i})u(t_{i})\leq\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\|u\|_{\infty}\leq\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\beta\|u\|,$$
$$\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(u(t)-u(t_{i+1}))\leq\sum\limits_{i=0}^{N}2\beta\|u\|\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}.$$
By \eqref{eq10},
\begin{equation}\label{eq19}
\begin{split}
I(u)\geq&\frac{1}{2}\|u\|^{2}-\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\beta\|u\|-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}M_{i}(t)|2\beta\|u\||^{\mu_{i}}dt\\
&-\sum\limits_{i=0}^{N}2\beta\|u\|\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}.
\end{split}
\end{equation}
Take $\|u\|=\frac{1}{2\beta}$, then $|u(t)-u(t_{i+1})|\leq 1$, so
\begin{equation*}
\begin{split}
&\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\beta\|u\|\leq\frac{1}{2}\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|,\\
&\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}M_{i}(t)|2\beta\|u\||^{\mu_{i}}dt\leq\sum\limits _{i=0}^{N}\int_{s_{i}}^{t_{i+1}}M_{i}(t)dt,\\
&\sum\limits_{i=0}^{N}2\beta\|u\|\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}\leq\sum\limits_{i=0}^{N}\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}.
\end{split}
\end{equation*}
Hence,
\begin{align*}
I(u)=&\frac{1}{2}\|u\|^{2}-\sum\limits_{i=1}^{N}(\alpha_{i-1}-\alpha_{i})u(t_{i})\\
&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(u(t)-u(t_{i+1}))dt-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}F_{i}(t,u(t)-u(t_{i+1}))dt,\\
\geq&\frac{1}{2}\|u\|^{2}-\frac{1}{2}\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|-\sum\limits _{i=0}^{N}\int_{s_{i}}^{t_{i+1}}M_{i}(t)dt-\sum\limits_{i=0}^{N}\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}.
\end{align*}
By $(H3)$, $I(\frac{1}{2\beta})>0$ and satisfies the condition (1) in Theorem \ref{th2}. Let $\xi>0$ and $w\in H_{0}^{1}(0,T)$ with $\|w\|=1$. We can see that $w(t)$ is not a constant for $a.e.~[s_{i}, t_{i+1}]$. By Lemma \ref{le2},
\begin{equation*}
\begin{split}
\int_{s_{i}}^{t_{i+1}}F_{i}(t,(w(t)-w(t_{i+1}))\xi)dt\geq & \left(\int_{s_{i}}^{t_{i+1}}m_{i}(t)|w(t)-w(t_{i+1})|^{\mu_{i}}dt\right)\xi^{\mu_{i}}\\&-\int_{s_{i}}^{t_{i+1}}b_{i}(t)dt.
\end{split}
\end{equation*}
Let $W_{i}:=\int_{s_{i}}^{t_{i+1}}m_{i}(t)|w(t)-w(t_{i+1})|^{\mu_{i}}dt$, then
$$0\leq W_{i}\leq(2\beta)^{\mu_{i}}\int_{s_{i}}^{t_{i+1}}m_{i}(t)dt,~W_{0}\geq0.$$
We can select the interval $[0, t_{1}]$ and prove $w(t)$ is not a constant for $a.e. ~[0, t_{1}]$. In fact, we suppose that $\int_{0}^{t_{1}}m_{0}(t)|w(t)-w(t_{1})|^{\mu_{0}}dt=0$. Since $m_{0}(t)$ is positive, then $w(t)=w(t_{1})$ for $a.e. ~[0,t_{1}]$. A
contradiction with the assumption on $w$.
By \eqref{eq10}, we obtain
\begin{align*}
I(\xi w)=&\frac{1}{2}\xi^{2}w^{2}-\sum\limits_{i=1}^{N}(\alpha_{i-1}-\alpha_{i})w(t_{i})\xi-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}p(t)(w(t)-w(t_{i+1})\xi)dt\\
&-\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}F_{i}(t,(w(t)-w(t_{i+1}))\xi)dt,\\
\leq&\frac{1}{2}\xi^{2}+\sum\limits_{i=1}^{N}|\alpha_{i-1}-\alpha_{i}|\beta\xi+2\beta\xi\sum\limits_{i=0}^{N}\sqrt{t_{i+1}-s_{i}}\|p\|_{L^{2}}-\sum\limits_{i=0}^{N}W_{i}\xi^{\mu_{i}}\\
&+\sum\limits_{i=0}^{N}\int_{s_{i}}^{t_{i+1}}b_{i}(t)dt.
\end{align*}
Since $\mu_{i}>2$, the above inequation implies that $I(\xi w)\rightarrow -\infty$ as $\xi \rightarrow \infty$, that is, there exists a $\xi\in \mathbb{R}\backslash \{0\}$ such that $\|\xi w\|>\frac{1}{2\beta}$ and $I(\xi w)\leq0$. The proof of Theorem \ref{th1} is completed. $\Box$
\noindent \bfseries Acknowledgments \mdseries
Jing Zeng is supported by the National Science Foundation of China (Grant No. 11501110) and Fujian Natural Science Foundation (Grant No. 2018J01656).
\noindent \bfseries References \mdseries
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Equivalence regimes for geometric quantum discord and local quantum uncertainty}
\author{Oscar Cordero}
\affiliation{ICFO—Institut de Ciencies Fotoniques, the Barcelona Institute of Science and Technology, 08860 Castelldefels (Barcelona), Spain}
\author{Arturo Villegas}
\affiliation{ICFO—Institut de Ciencies Fotoniques, the Barcelona Institute of Science and Technology, 08860 Castelldefels (Barcelona), Spain}
\author{Juan-Rafael Alvarez}
\affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, United Kingdom}
\author{Roberto de J. Le\'on-Montiel}
\affiliation{Instituto de Ciencias Nucleares, Universidad Nacional Autónoma de M\'exico, Apartado Postal 70-543, 04510 Cd. Mx., M\'exico}
\author{M. H. M. Passos}
\affiliation{ICFO—Institut de Ciencies Fotoniques, the Barcelona Institute of Science and Technology, 08860 Castelldefels (Barcelona), Spain}
\email{marcello.passos@icfo.eu}
\author{Juan P. Torres}
\affiliation{ICFO—Institut de Ciencies Fotoniques, the Barcelona Institute of Science and Technology, 08860 Castelldefels (Barcelona), Spain}
\affiliation{Department of Signal Theory and Communications, Universitat Politecnica de Catalunya, 08034 Barcelona, Spain}
\date{\today}
\begin{abstract}
The concept of quantum discord aims at unveiling quantum correlations that go beyond those described by entanglement. Its original formulation [J. Phys. A {\bf 34}, 6899 (2001); Phys. Rev. Lett {\bf 88}, 017901 (2002)] is difficult to compute even for the simplest case of two-qubits systems. Alternative formulations have been developed to address this drawback, such as the geometric measure of quantum discord [Phys. Rev. A {\bf 87}, 062303 (2013)] and the local quantum uncertainty [Phys. Rev. Lett {\bf 110}, 240402 (2013)] that can be evaluated in closed form for some quantum systems, such as two-qubit systems. We show here that these two measures of quantum discord are equivalent for $2 \times D$ dimensional bipartite quantum systems. By considering the relevant example of N00N states for phase estimation in lossy environments, we also show that both metrics of quantum discord quantify the decrease of quantum Fisher information of the phase estimation protocol. Given their ease of computation in $2 \times D$ bipartite systems, the geometric measure of quantum discord and the local quantum uncertainty demonstrate their relevance as computable measures of quantum discord.
\end{abstract}
\keywords{Non-classical correlations, coherence, quantum discord, entanglement}
\maketitle
\section{\label{sec:Introduction}Introduction}
The quantum correlations embedded in entangled states are a resource that facilitates the design of new protocols for parameter estimation. Relative to coherent states, usually considered as benchmark states, entangled states can show enhanced resolution. One paradigmatic example of such states used for quantum-enhanced sensing are N00N states, which allow the estimation of an unknown phase with a resolution that scales as $1/N$, where $N$ is the average number of photons. This is an improvement with respect to the scaling provided by coherent states, that goes as $\sim 1/\sqrt{N}$.
Quantum correlations that go beyond those described by entanglement, e.g., quantum correlations in separable states, can also offer a quantum advantage by enhancing the resolution for estimating unknown parameters in a quantum system \cite{vedral2011}. Henderson and Vedral \cite{henderson2001}, and Ollivier and Zurek \cite{ollivier2002} introduced the concept of quantum discord to quantify those correlations. They noticed that while there are two equivalent expressions for the mutual information of two random variables that give the same result, their generalizations for measuring the correlations between two quantum systems may yield different results.
The original formulation of quantum discord is difficult to compute \cite{huang2014} even for the important but simplest case of two-qubit systems \cite{luo2008,ali2010,chen2011}. This has led to alternative formulations of the concept that still fulfill a set of conditions expected for a good measure of quantum correlations \cite{modi2012} while being more easily computable in certain scenarios of interest.
One of these alternatives is the geometric measure of quantum discord, or geometric quantum discord (GQD) for short. It is based on the assumption that a bipartite quantum state $\rho^{A B}$ has zero discord \cite{introgeometricdiscord,datta2011,acin2009,ferreira2018} if and only if there is a von Neumann measurement $\left\{\Pi_{k}^{A}\right\}=\left|u_{k}\right\rangle\left\langle u_{k}\right|$ on the subspace $A$ such that $\sum_{k}\left(\Pi_{k}^{A} \otimes I^{B}\right) \rho\left(\Pi_{k}^{A} \otimes I^{B}\right)=\rho .$ Here $I^{B}$ designates
the identity operator in the subspace $B$. \textcolor{black}{We restrict ourselves to von Neumann measurements \cite{Busch2009,luoandsun2017}, so all projectors $\Pi_{k}^{A}$ are one-dimensional. In this case we can write the projectors $\Pi_{k}^{A}$ in terms of a set of vectors $\left\{\left|u_{k}\right\rangle\right\}$ that is a basis in subspace $A$}.
This implies that zero-discord quantum states are of the form $\rho=\sum_{k} p_{k}\left|u_{k}\right\rangle\left\langle u_{k}\right| \otimes \rho_{k}^{B}$, where $\rho_{k}^{B}$ are density matrices in subspace $B$ and $p_{k}$ are positive real numbers with $\sum_{k} p_{k}=1 .$ These states are sometimes termed as \textit{classical-quantum} \cite{adesso2016}. From the definition of \textit{classical-quantum states}, it naturally follows that the geometric quantum discord is the minimum distance (square norm in the Hilbert-Schmidt space) between the quantum state $\rho$ and the closest \textit{classical-quantum state} $\sum_{k}\left(\Pi_{k}^{A} \otimes I^{B}\right) \rho\left(\Pi_{k}^{A} \otimes I^{B}\right)$.
Such a definition for GQD might show some drawbacks \cite{piani2012} since it can increase under local operations of the party $B$ that is not measured. This undesirable effect can be corrected \cite{ChangLuo2013} if one substitutes the density matrix $\rho$ by $\rho^{1/2}$, so that the GQD is now the minimum distance (square norm in the Hilbert-Schmidt space) between $\rho^{1/2}$ and $\sum_k \left( \Pi_k^A \otimes I^B \right)\, \rho^{1/2}\, \left( \Pi_k^A \otimes I^B \right)$. This is the version of geometric quantum discord that we use throughout this paper. One major advantage of this expression is that it can be calculated in closed form for quantum bipartite systems of dimension $2 \times D$ \cite{ChangLuo2013,LuoFu2012}.
Interestingly, the very same year that the previous correction of the geometric discord was reported, Girolami, Tufarelli and Adesso \cite{Girolami2013} introduced the local quantum uncertainty (LQU), a new formulation of quantum discord defined as follows: given a specific von Neumann measurement where each projector $\Pi_k^A$ is assigned an eigenvalue $\lambda_k$ (all $\lambda_k$ are different), the LQU is the minimum over all possible ensembles $\left\{ \Pi_k^A\right\}$ of the Wigner-Yanase Skew information, $I$ \cite{Wigner910}:
\begin{equation}
I=-\frac{1}{2} \text{Tr} \left\{ \left[ \rho^{1/2}, M\right]^2 \right\}.
\end{equation}
Here $M=\left( \sum_k \lambda_k \Pi_k^A\right)\,\otimes I^B$ and $I^B$ is the identity on subspace $B$. Again, as in the case of the geometric quantum discord discussed above, one important advantage of LQU is that it can be calculated in closed form for $2 \times D$ quantum bipartite systems.
For a given von Neumann measurement $\left\{ \Pi_k^A \otimes I^B \right\}$, one can define its quantum uncertainty as $Q=\sum_k I_k$, where
\begin{equation}
I_k=-\frac{1}{2} \text{Tr} \left\{ \left[ \rho^{1/2}, \Pi_k^A \otimes I^B\right]^2 \right\}.
\end{equation}
It turns out that the GQD is the \textcolor{black}{minimum} of the quantum uncertainty $Q$ over all possible von Neumann measurements. This introduces a revealing link between the LQU and the GQD formulations of the quantum discord through the use of similar expressions of the Wigner-Yanase Skew information \cite{luoandsun2017}. \textcolor{black}{In a given von Neumann measurement, characterized by a set of one-dimensional operators $\left\{ \Pi_k^A \right\}$, each one associated with a possible experimental outcome, the intrinsic statistical error associated with the measurement has a quantum contribution. The Skew information, a measure of the non-commutativity between the quantum state $\rho$ and the set $\left\{ \Pi_k^A \otimes I^B \right\}$, can be used to quantify this quantum uncertainty. In this context, the local quantum uncertainty and the geometric discord can be understood as the minimum quantum uncertainty that one can have among all possible von Neumann measurements. However, they differ in how they evaluate the quantum uncertainty. The geometric discord considers the sum of the quantum uncertainties associated with each outcome $\Pi_k^A \otimes I^B$, while the local quantum uncertainty considers the quantum uncertainty associated to an operator that describes the global measurement, $M=\left( \sum_k \lambda_k \Pi_k^A \right) \otimes I^B$, where $\lambda_k$ are eigenvalues associated with each possible outcome of the measurement.}
\textcolor{black}{The two quantum discord metrics considered above, namely the local quantum uncertainty and the geometric quantum discord, fulfil similar requirements that the original discord definition does, which make them good discord metrics \cite{modi2012,Girolami2013}. These discord quantifiers are non-negative, invariant under local unitary transformations, they yield zero only for quantum-classical states and the discord reduces to an entanglement monotone, characterized by the marginal entropy of subsystem $A$, for pure states.}
\textcolor{black}{As the geometric discord and the local quantum uncertainty can be both explained as the minimum quantum uncertainty that can be attained in a von Neumann measurement, one might wonder whether they are the same discord metric, at least for certain scenarios}. In this paper, we demonstrate that for bipartite quantum systems whose dimensionality is $2\times D$, the two aforementioned metrics of quantum discord are indeed the same, although this may not be true for systems with other dimensions. Moreover, we take advantage of the fact that both measures can be evaluated in closed form, in sharp contrast to other alternative formulations of quantum discord \cite{vedral2011}.
Finally, we show an example of the potential usefulness of GQD and LQU by evaluating the quantum Fisher information of N00N states for phase estimation in a lossy environment. \textcolor{black}{The use of quantum systems in sensing and imaging applications provides a unique tool to develop new parameter estimation schemes with enhanced resolution. However, quantum systems experiencing losses are fragile. This can lead to a worsening of the resolution achievable, thus reducing the quantum advantage observed for the lossless case. We can use several measures to characterize the effect of losses, i. e., negativity and quantum discord, but it is not clear in principle which is the most convenient or informative in each scenario.}
For one-parameter estimation, the Cram\'er-Rao bound given by the quantum Fisher information \cite{Helstrom1969} is attainable, so it is a good measure of the resolution enhancement provided by a protocol making use of a specific quantum state \cite{fujiwara2005,matsumoto2005}. Remarkably, we demonstrate that the decrease of quantum Fisher information under the presence of losses, with respect to the ideal case with no losses, is precisely the geometric quantum discord. \textcolor{black}{In this sense, the quantum discord is more informative than negativity concerning the spatial resolution achievable under the present of loss, as given by the quantum Fisher information}.
\section{Equivalence between LQU and GQD for $2 \times D$ systems}
The quantum uncertainty $Q$ defined in \cite{luoandsun2017}, whose minimum yields the GQD, can be written as $Q=\sum_j I_j$ where
\begin{eqnarray}
& & I_j=-\frac{1}{2}\,\text{Tr} \left\{ \left[ \rho^{1/2},\Pi_j^A \otimes I^B \right]^2 \right\} \nonumber \\
& & =\text{Tr} \left[ \rho \left( \Pi_j^A \right)^2 \right]- \text{Tr} \left( \rho^{1/2} \Pi_j^A \, \rho^{1/2} \Pi_j^A \right) =\text{Tr}_B\, V_j,
\end{eqnarray}
and $V_j$ is defined as
\begin{equation}
V_j=\langle u_j |\rho|u_j \rangle- \langle u_j |\rho^{1/2}|u_j\rangle \langle u_j|\rho^{1/2}|u_j \rangle.
\end{equation}
If we make use of the resolution of the identity on subspace $A$, i.e., $\sum_i |u_i\rangle \langle u_i|=I^A$, we obtain that
\begin{equation}
Q=\sum_j \text{Tr}_B\, V_j= 2\sum_{j<k} \text{Tr}_B\, V_{jk}, \label{Q}
\end{equation}
where
\begin{equation}
V_{jk}=\langle u_j |\rho^{1/2}|u_k\rangle \langle u_k|\rho^{1/2}|u_j \rangle,
\end{equation}
and $V_{jk}=V_{kj}$.
\begin{figure*}
\caption{\label{figure1}
\label{figure1}
\end{figure*}
In a similar vein, the quantum uncertainty $U$ defined in \cite{Girolami2013}, whose minimum yields the LQU, can be written as
\begin{eqnarray}
& & U=\text{Tr}_B \left\{ \sum_j \lambda_j^2 \langle u_j |\rho|u_j \rangle \right. \nonumber \\
& & \left. - \sum_{j,k}\lambda_j \lambda_k \langle u_j |\rho^{1/2}|u_k\rangle \langle u_k|\rho^{1/2}|u_j \rangle \right\} \nonumber \\
& & = \sum_j \lambda_j^2 \text{Tr}_B\,V_j-2 \sum_{jk} \text{Tr}_B\,\lambda_j \lambda_k V_{jk} \nonumber \\
& & = \sum_{j< k} (\lambda_j^2+\lambda_k^2)\, \text{Tr}_B V_{jk} -2\sum_{j<k} \lambda_j \lambda_k \text{Tr}_B\,V_{jk} \nonumber \\
& & =\sum_{j<k} (\lambda_j-\lambda_k)^2 \text{Tr}_B\, V_{jk}, \label{U}
\end{eqnarray}
where $\lambda_j$ corresponds to the eigenvalue of the $j$-th projector constituting a von Neumann measurement.
Equations (\ref{Q}) and (\ref{U}) are valid for arbitrary dimensions of the Hilbert spaces of the bipartite quantum states, and for any quantum state described by density matrix $\rho$. For a Hilbert space with dimension $2 \times D$ the key observation is that
\begin{eqnarray}
& & \langle u_1 |\rho|u_1 \rangle- \langle u_1 |\rho^{1/2}|u_1\rangle \langle u_1|\rho^{1/2}|u_1 \rangle \nonumber \\
& & = \langle u_1 |\rho^{1/2}|u_2\rangle \langle u_2|\rho^{1/2}|u_1 \rangle \nonumber \\
& & = \langle u_2 |\rho|u_2 \rangle- \langle u_2 |\rho^{1/2}|u_2\rangle \langle u_2|\rho^{1/2}|u_2 \rangle.
\end{eqnarray}
so that $V_1=V_2=V_{12}$. In this case,
\begin{equation}
U=(\lambda_1-\lambda_2)^2 \text{Tr}_B\, V_{12}=\frac{(\lambda_1-\lambda_2)^2}{2} Q.\label{proportionalQU}
\end{equation}
Equation (\ref{proportionalQU}) shows that the quantum uncertainties $Q$ and $U$ are proportional to each other, thus implying that the measures of quantum discord that derive from them are indeed equivalent for bipartite systems of dimension $2 \times D$.
\section{Non-equivalence between LQU and GQD in systems with arbitrary dimensions}
\textcolor{black}{In this section we want to demonstrate that in bipartite systems where the dimension of both subsystems is greater than $2$, the LQU and GQD are not proportional to each other. For the sake of simplicity, we restrict ourselves to comparing the values of $Q$ and $U$ for pure states in Hilbert spaces of dimensions $2\times D$ and $3\times D$}.
We start by noticing that any pure bipartite quantum state can be written as a Schmidt decomposition
\begin{equation}
|\Psi\rangle=\sum_m \sqrt{s_m} |\alpha_m \rangle |\beta_m \rangle,
\end{equation}
where $\left\{ \alpha_m \right\}$ is a basis in subspace $A$, $\left\{ \beta_m \right\}$ is a basis in subspace $B$ and $\left\{ s_j \right\}$ are the Schmidt coefficients, with the normalization condition $\sum_j s_j=1$. We can easily derive that
\begin{equation}
\text{Tr}_B\,V_{jk}=\big[ \sum_m s_m \big|\langle \alpha_m|u_j\rangle \big|^2 \big] \times \big[ \sum_n s_n \big|\langle \alpha_n|u_k\rangle \big|^2 \big].
\end{equation}
In Ref. \cite{ChangLuo2013} it was demonstrated that for pure states the von Neumann measurement that minimizes the quantum uncertainty $Q$ corresponds to choosing $|u_i \rangle \equiv |\alpha_i \rangle$. In this case $\text{Tr}_B\, V_{jk}=s_j s_k$ so the geometric quantum discord \textcolor{black}{for pure states} is $D_G=2\sum_{j<k} s_j s_k$. By making use of the normalization of the quantum state we obtain that $2\sum_{i<j} s_i s_j=1-\sum_i s_i^2$ so the quantum discord for pure states can also be written as $D_G=1-\sum_i s_i^2$, as reported in \cite{ChangLuo2013}.
The expression of the quantum uncertainty $U$ for pure states is
\begin{eqnarray}
& & U=\sum_{j<k} (\lambda_j-\lambda_k)^2 \big[ \sum_m s_m \big|\langle \alpha_m|u_j\rangle \big|^2 \big] \nonumber \\
& & \times \big[ \sum_n s_n \big|\langle \alpha_n|u_k\rangle \big|^2 \big].
\end{eqnarray}
We have performed extensive numerical simulations choosing many random von Neumann bases $\left\{ |u_i \rangle \right\}$ to calculate the range of possible values of the quantum uncertainties $Q$ and $U$. The von Neumann bases are obtained by choosing random unitary transformations $U$ of the bases $\left\{ |\alpha_i \rangle \right\}$ so that $\left\{ |u_i \rangle \right\}=U \left\{ |\alpha_i \rangle \right\}$. For $2 \times D$ and $3 \times D$ quantum systems, one can choose the most general unitary transformation as given in \cite{Rasin1997}.
Figure 1(a) shows all possible values of the quantum uncertainty $Q$ obtained numerically for a $2 \times D$ quantum system. The solid lines correspond to the minimum value of $Q$, that is $D_G=2s_1(1-s_1)$, and the maximum value, $D_G=1-1/2=0.5$ \cite{ChangLuo2013}. Fig. 1(b) shows all possible values of $U$ for a $2 \times D$ quantum system with $(\lambda_1-\lambda_2)^2/2=1$. \textcolor{black}{As expected from the results obtained in Section II, Figs. 1(a) and (b) show the same results.}
\textcolor{black}{Figs. 1(c) to 1(f) correspond to a $3 \times D$ system. The numerical simulations hereby presented show that the minimum of $U$ is attained for von Neumann measurements where the three orthogonal measurement projectors $\Pi_i^A$ ($i=1,2,3$) can be written as $\Pi_i^A=|\alpha_{p(i)} \rangle \langle \alpha_{p(i)}|$, where $p(i)$ designates the permutation $\left\{ 1,2,3 \right\} \longrightarrow \left\{ p(1), p(2), p(3) \right\}$ that yields the minimum value of $U$. We have six possibilities corresponding to the six different ways we can associate one vector of the set $|u_i \rangle$ with one vector of the set $|\alpha_i \rangle$.} The local quantum uncertainty is
\begin{equation}
LQU=\sum_{j<k} (\lambda_j-\lambda_k)^2 s_{p(j)} s_{p(k)}.
\end{equation}
The eigenvalue $\lambda_i$ that we associate to each von Neumann state $|\alpha_i \rangle$ now matters. \textcolor{black}{This is in contrast to the case of $Q$, where there is no eigenvalues associated to each outcome of a measurement and so all outcomes have the same weight.}
\textcolor{black}{Note that the maximum value of $Q$ for pure states is independent of the Schmidt coefficients $s_i$, and it is $1/2$ for $2 \times D$ systems and $2/3$ for $3 \times D$. On the other hand, Figs. 1(d) and (f) show that the maximum value of $U$ for $3 \times D$ systems may change for different values of the Schmidt coefficients- As a conclusion, such value does not depend only on the dimensions of the subsystems, which is the case of the quantum uncertainty $Q$.}
\textcolor{black}{Figure 2 shows how, for two specific set of values of the eigenvalues $\lambda_i$, the correspondence between vectors $|u_i \rangle$ and $|\alpha_i \rangle$ that give the minimum of quantum uncertainty $U$ varies for different values of $s_1$ and $s_2$. Each color in the figures stands for a different value of the minimum of $U$. Fig. 2(a) shows that for the case with eigenvalues $\lambda_1=2$, $\lambda_2=4$ and $\lambda_3=1$, when comparing the minimum of $U$ obtained for each value of $s_1$ and $s_2$, up to six different results are obtained. These six minimum values of $U$ can be obtained making use of the six possible permutations in Eq. (13). In Fig. 2(b) we consider the case with eigenvalues $\lambda_1=4$, $\lambda_2 = 3$ and $\lambda_3 = 2$. Now one can obtain up to three different minima of $U$ when considering all possible Schmidt coefficients.}
\section{Geometric quantum discord of N00N states under non-symmetric losses }
To demonstrate the usefulness of the equivalence between GQD and LQU, we consider the relevant case of N00N states for phase estimation,
\begin{equation}
\ket{\Psi}_{AB}=\frac{1}{\sqrt{2}}\Big(\ket{N}_A\ket{0}_B+\exp(iN\varphi)\ket{0}_A\ket{N}_B \Big),
\end{equation}
where $\varphi$ is the phase per photon introduced in one of the modes (subsystems $A$ or $B$), and $N$ is the non-zero number of photons in either of the modes. N00N states can be used to estimate an unknown phase $\varphi$ with a precision that scales as $1/N$ \cite{Mitchell_2004}. Compared with protocols that make use of coherent states, that provide a precision that scales as $1/\sqrt{N}$, N00N states are an important example of quantum-enhanced phase estimation.
\begin{figure}
\caption{\label{6color}
\label{6color}
\end{figure}
We consider the case where there are losses only in subsystem B (non-symmetric losses). The reason for this is that in this scenario the quantum state is a $2 \times (N+1)$ system, which allows us to calculate the quantum discord in a straightforward way. As shown in Figure \ref{CartoonNOON}, we can model such losses by considering that photons travelling in subsystem $B$ traverse a fictitious beam splitter (BS) with reflection coefficient $r$ (photons moving from subsystem $B$ to subsystem $C$) and a transmission coefficient $t$ (photons that continue in subsystem $B$) \cite{walmsleyPRA}. The overall quantum state after the BS is
\begin{eqnarray}
& & \ket{\Psi}_{ABC}=\frac{1}{\sqrt{2}} \big[ \ket{N}_{A} \ket{0}_{B} \ket{0}_C \label{overall} \label{ABC_combinatory} \\
& & + \sum_{n=0}^N \sqrt{\binom{N}{n}}\,t^n r^{N-n} \exp (in\varphi) \ket{0}_{A} \ket{n}_{B} \ket{N-n}_C \big], \nonumber
\end{eqnarray}
with two accessible states for subsystem $A$ ($\{0,N\}$) and $N+1$ for subsystem $B$ ($\{0, ... , N\}$).
The density matrix that describes subsystem $AB$ is obtained calculating the partial trace of the state given by Eq. (\ref{overall}) with respect to subsystem $C$. In this way,
\begin{eqnarray}
& & \rho^{AB}=\frac{1}{2}\big(\ket{N}_A \ket{0}_B+t^N \exp (iN\varphi)\ket{0}_A\ket{N}_B \Big) \nonumber \\
& & \times \Big( \bra{N}_A \bra{0}_B+t^{*N} \exp(-iN\varphi) \bra{0}_A\bra{N}_B \big) \nonumber \\
& & +\frac{1}{2}\sum_{n=0}^{N-1} \binom{N}{n} |t|^{2n} |r|^{2(N-n)} \ket{0}_A \ket{n}_B\bra{0}_A\bra{n}_B.
\label{partialAB}
\end{eqnarray}
The fact that the dimension of the quantum state of subsystems $AB$ is $2 \times D$ with $D=N+1$ allows us to readily calculate the Local Quantum Uncertainty, or equivalently the geometric quantum discord.
\begin{figure}
\caption{\label{CartoonNOON}
\label{CartoonNOON}
\end{figure}
\subsection{Calculation of the quantum Fisher information}
The quantum Fisher Information $F_Q$ associated to the quantum state given by Eq. (\ref{partialAB}) can be calculated by making use of the spectral decomposition of the state: $\rho^{AB}=\sum_i \lambda_i(\varphi) \ket{\lambda_i(\varphi)}_{AB} \bra{\lambda_i(\varphi)}_{AB}.$ Here $\lambda_i(\varphi)$ are the eigenvalues of the decomposition and $\ket{\lambda_i(\varphi)}_{AB}$ are the corresponding eigenvectors. It can be easily demonstrated that all eigenvalues show no dependence on the value of $\varphi$ and that there are two eigenvectors with a non-zero $\varphi$ -dependence:
\begin{equation}
\left|\lambda_{1}\right\rangle ={\cal N}\left[|N\rangle_{A}|0\rangle_{B}+t^{N}\text{e}^{iN\varphi}|0\rangle_{A}|N\rangle_{B}\right],
\end{equation}
with $\lambda_{1}=\left(1+|t|^{2N}\right)/2$, and
\begin{equation}
\left|\lambda_{2}\right\rangle ={\cal N}\left[-t^{*N}|N\rangle_{A}|0\rangle_{B}+\text{e}^{iN\varphi}|0\rangle_{A}|N\rangle_{B}\right]
\end{equation}
with $\lambda_{2}=0$. The normalization constant is ${\cal N}=(1+|t|^{2N})^{-1/2}$. In this case \cite{walmsleyPRA,EntangledCoherentStates2013} the quantum Fisher information reads $F_Q=\lambda_1 F_1$ with
\begin{equation}
F_{1}=4\left[\bigg\langle\frac{\partial\lambda_1}{\partial\varphi}\bigg|\frac{\partial\lambda_1}{\partial\varphi}\bigg\rangle-\left|\bigg\langle\lambda_{1}\bigg|\frac{\partial\lambda_1}{\partial\varphi}\bigg\rangle\right|^{2}\right],
\end{equation}
which yields the simple expression
\begin{equation}
F_Q=N^2\frac{2|t|^{2N}}{1+|t|^{2N}}.
\end{equation}
Note that for the ideal lossless case, we obtain the well-known result $F_Q=N^2$.
\begin{figure}
\caption{\label{figure2}
\label{figure2}
\end{figure}
\subsection{Calculation of LQU and GQD}
Given that LQU and geometric quantum discord are equivalent discord measures for $2 \times (N+1)$ quantum systems, in what follows we will refer to them as geometric quantum discord $D_G$ for the sake of simplicity. According to Ref. \cite{Girolami2013}, the LQU of $2 \times (N+1)$ bipartite quantum systems is $D_G=1-\lambda_{\mathrm{max}}$ where $\lambda_{\mathrm{max}}$ is the greatest eigenvalue of the $3 \times 3$ symmetric matrix $W_{AB}$,
\begin{equation}
(W_{AB})_{ij}=\mathrm{Tr}\Big(\rho^{1/2}(\sigma_i \otimes \openone)\rho^{1/2}(\sigma_j \otimes \openone) \Big).
\label{matrixW}
\end{equation}
Here $\sigma_i$ designates the three Pauli matrices. We obtain that the greater eigenvalue of the matrix $W$, considering the quantum state $\rho_{AB}$ described by Eq. (\ref{partialAB}), is $\lambda_{\mathrm{max}}=(1-|t|^{2N})/(1+|t|^{2N})$. Therefore the corresponding geometric quantum discord is
\begin{equation}
D_G=\frac{2|t|^{2N}}{1+|t|^{2N}}.
\end{equation}
We can thus write a very simple relationship between the quantum Fisher information with and without loss
\begin{equation}
F_Q^{\text{loss}}=D_G \times F_Q^{\text{lossless}}.
\end{equation}
where $F_Q^{\text{loss}}$ designates the quantum Fisher information of the N00N state in a lossy environment and $F_Q^{\text{lossless}}$ is the quantum Fisher information of the ideal (no losses) N00N state. Remarkably, we have found that the geometric quantum discord (and so the Local quantum uncertainty) quantifies the loss of quantum Fisher information due to losses. Fig. \ref{figure2}(a) shows the linear relationship between Fisher information and $D_G$ for a N00N state with $N=10$. It turns out that the geometric quantum discord is the decrease of quantum Fisher information of a N00N state due to non-symmetric losses.
The quantum state given by Eq. (\ref{partialAB}) is always entangled. This can be demonstrated calculating the negativity, that is an entanglement monotone \cite{horodecki2009}. Fig. \ref{figure2}(b) shows the Quantum Fisher Information as a function of negativity. For high degree of entanglement (low losses and thus negativity close to 1) the Fisher information is a quasi-linear function the negativity of the quantum state. However, for low values of entanglement (high losses and low values of negativity) the relationship between quantum Fisher information and negativity is no longer lineal, contrary to the case of the geometric quantum discord.
\section{Conclusions}
We have demonstrated that two measures of quantum discord, namely the geometric quantum discord introduced in \cite{ChangLuo2013} and the local quantum uncertainty \cite{Girolami2013} are equivalent measures of discord for $2 \times D$ quantum bipartite systems. Contrary to other measures of discord \cite{ollivier2002,vedral2011} that are very difficult to compute, these measures can be computed in closed form for $2 \times D$ systems, which include important cases such as two-qubits systems.
As an example of the relevance of the geometric quantum discord (and local quantum uncertainty), we have considered N00N states in non-symmetric lossy environments, that are $2 \times (N+1)$ quantum bipartite systems. We have found that the geometric quantum discord faithfully quantifies the decrease of quantum Fisher information due to losses, a good indicator of the quantum enhancement provided by N00N states for phase estimation.
\begin{thebibliography}{28}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{https://doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Modi}\ \emph {et~al.}(2011)\citenamefont {Modi},
\citenamefont {Cable}, \citenamefont {Williamson},\ and\ \citenamefont
{Vedral}}]{vedral2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Modi}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Cable}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Williamson}},\ and\
\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Vedral}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review X}\ }\textbf
{\bibinfo {volume} {1}},\ \bibinfo {pages} {021022} (\bibinfo {year}
{2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Henderson}\ and\ \citenamefont
{Vedral}(2001)}]{henderson2001}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Henderson}}\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Vedral}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Journal of Physics A: mathematical and general}\ }\textbf {\bibinfo {volume}
{34}},\ \bibinfo {pages} {6899} (\bibinfo {year} {2001})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Ollivier}\ and\ \citenamefont
{Zurek}(2002)}]{ollivier2002}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Ollivier}}\ and\ \bibinfo {author} {\bibfnamefont {W.~H.}\ \bibnamefont
{Zurek}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {88}},\ \bibinfo
{pages} {017901} (\bibinfo {year} {2002})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Huang}(2014)}]{huang2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Huang}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {New
Journal of Physics}\ }\textbf {\bibinfo {volume} {16}},\ \bibinfo {pages}
{033027} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Luo}(2008)}]{luo2008}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Luo}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical
Review A}\ }\textbf {\bibinfo {volume} {77}},\ \bibinfo {pages} {042303}
(\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ali}\ \emph {et~al.}(2010)\citenamefont {Ali},
\citenamefont {Rau},\ and\ \citenamefont {Alber}}]{ali2010}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Ali}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Rau}},\ and\
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Alber}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {81}},\ \bibinfo {pages} {042105} (\bibinfo {year}
{2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chen}\ \emph {et~al.}(2011)\citenamefont {Chen},
\citenamefont {Zhang}, \citenamefont {Yu}, \citenamefont {Yi},\ and\
\citenamefont {Oh}}]{chen2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont
{Chen}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Zhang}},
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Yu}}, \bibinfo {author}
{\bibfnamefont {X.}~\bibnamefont {Yi}},\ and\ \bibinfo {author}
{\bibfnamefont {C.}~\bibnamefont {Oh}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {84}},\
\bibinfo {pages} {042313} (\bibinfo {year} {2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Modi}\ \emph {et~al.}(2012)\citenamefont {Modi},
\citenamefont {Brodutch}, \citenamefont {Cable}, \citenamefont {Paterek},\
and\ \citenamefont {Vedral}}]{modi2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Modi}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Brodutch}},
\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Cable}}, \bibinfo
{author} {\bibfnamefont {T.}~\bibnamefont {Paterek}},\ and\ \bibinfo {author}
{\bibfnamefont {V.}~\bibnamefont {Vedral}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Reviews of Modern Physics}\ }\textbf
{\bibinfo {volume} {84}},\ \bibinfo {pages} {1655} (\bibinfo {year}
{2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Daki\ifmmode~\acute{c}\else \'{c}\fi{}}\ \emph
{et~al.}(2010)\citenamefont {Daki\ifmmode~\acute{c}\else \'{c}\fi{}},
\citenamefont {Vedral},\ and\ \citenamefont
{Brukner}}]{introgeometricdiscord}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Daki\ifmmode~\acute{c}\else \'{c}\fi{}}}, \bibinfo {author} {\bibfnamefont
{V.}~\bibnamefont {Vedral}},\ and\ \bibinfo {author} {\bibfnamefont
{C.}~\bibnamefont {Brukner}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo
{pages} {190502} (\bibinfo {year} {2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Datta}(2011)}]{datta2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Datta}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{arXiv:1003.5256v2 [quant-ph]}\ } (\bibinfo {year} {2011})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {A.~Ferraro}\ \emph {et~al.}(2010)\citenamefont
{A.~Ferraro}, \citenamefont {Aolita}, \citenamefont {Cavalcanti},
\citenamefont {Cucchietti}, \citenamefont {1},\ and\ \citenamefont
{Acın}}]{acin2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{A.~Ferraro}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Aolita}},
\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Cavalcanti}}, \bibinfo
{author} {\bibfnamefont {F.~M.}\ \bibnamefont {Cucchietti}}, \bibinfo
{author} {\bibnamefont {1}},\ and\ \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Acın}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo
{pages} {052318} (\bibinfo {year} {2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ferreira}\ \emph {et~al.}(2018)\citenamefont
{Ferreira}, \citenamefont {Filenga}, \citenamefont {Cornelio},\ and\
\citenamefont {Fanchini}}]{ferreira2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~S.~S.}\
\bibnamefont {Ferreira}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Filenga}}, \bibinfo {author} {\bibfnamefont {M.~F.}\ \bibnamefont
{Cornelio}},\ and\ \bibinfo {author} {\bibfnamefont {F.~F.}\ \bibnamefont
{Fanchini}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {97}},\ \bibinfo {pages}
{012328} (\bibinfo {year} {2018})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Busch}\ and\ \citenamefont
{Lahti}(2009)}]{Busch2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Busch}}\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Lahti}},\
}\href@noop {} {\emph {\bibinfo {title} {Compendium of Quantum Physics}}}\
(\bibinfo {publisher} {Springer Berlin Heidelberg},\ \bibinfo {year}
{2009})\ pp.\ \bibinfo {pages} {356--358}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Luo}\ and\ \citenamefont
{Sun}(2017)}]{luoandsun2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Luo}}\ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Sun}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review
A}\ }\textbf {\bibinfo {volume} {96}},\ \bibinfo {pages} {022130} (\bibinfo
{year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Adesso}\ \emph {et~al.}(2016)\citenamefont {Adesso},
\citenamefont {Cianciaruso},\ and\ \citenamefont {Bromley}}]{adesso2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Adesso}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Cianciaruso}},\ and\ \bibinfo {author} {\bibfnamefont {T.~R.}\ \bibnamefont
{Bromley}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{arXiv:1611.01959v1 [quant-ph]}\ }\textbf {\bibinfo {volume} {7 November
2016}} (\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Piani}(2012)}]{piani2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Piani}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo {pages}
{034101} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chang}\ and\ \citenamefont
{Luo}(2013)}]{ChangLuo2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Chang}}\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Luo}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review
A}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo {pages} {062303} (\bibinfo
{year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Luo}\ and\ \citenamefont {Fu}(2012)}]{LuoFu2012}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Luo}}\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Fu}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Theoretical and
Mathematical Physics}\ }\textbf {\bibinfo {volume} {171}},\ \bibinfo {pages}
{870} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Girolami}\ \emph {et~al.}(2013)\citenamefont
{Girolami}, \citenamefont {Tufarelli},\ and\ \citenamefont
{Adesso}}]{Girolami2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Girolami}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Tufarelli}},\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Adesso}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {110}},\ \bibinfo
{pages} {240402} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Wigner}\ and\ \citenamefont
{Yanase}(1963)}]{Wigner910}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~P.}\ \bibnamefont
{Wigner}}\ and\ \bibinfo {author} {\bibfnamefont {M.~M.}\ \bibnamefont
{Yanase}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Proceedings of the National Academy of Sciences}\ }\textbf {\bibinfo
{volume} {49}},\ \bibinfo {pages} {910} (\bibinfo {year} {1963})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Helstrom}(1969)}]{Helstrom1969}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~W.}\ \bibnamefont
{Helstrom}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Journal of Statistical Physics}\ }\textbf {\bibinfo {volume} {1}},\ \bibinfo
{pages} {231} (\bibinfo {year} {1969})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Fujiwara}(2005)}]{fujiwara2005}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Fujiwara}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Asymptotic Theory of Quantum Statistical Inference}\ ,\ \bibinfo {pages}
{229}} (\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Matsumoto}(2005)}]{matsumoto2005}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Matsumoto}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Asymptotic Theory of Quantum Statistical Inference}\ ,\ \bibinfo {pages}
{305}} (\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Rasin}(1997)}]{Rasin1997}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Rasin}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{arXiv:hep-ph/9708216}\ } (\bibinfo {year} {1997})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mitchell}\ \emph {et~al.}(2004)\citenamefont
{Mitchell}, \citenamefont {Lundeen},\ and\ \citenamefont
{Steinberg}}]{Mitchell_2004}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont
{Mitchell}}, \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont
{Lundeen}},\ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont
{Steinberg}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Nature}\ }\textbf {\bibinfo {volume} {429}},\ \bibinfo {pages} {161–164}
(\bibinfo {year} {2004})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Demkowicz-Dobrzanski}\ \emph
{et~al.}(2009)\citenamefont {Demkowicz-Dobrzanski}, \citenamefont {Dorner},
\citenamefont {Smith}, \citenamefont {Lundeen}, \citenamefont {Wasilewski},
\citenamefont {Banaszek},\ and\ \citenamefont {Walmsley}}]{walmsleyPRA}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Demkowicz-Dobrzanski}}, \bibinfo {author} {\bibfnamefont {U.}~\bibnamefont
{Dorner}}, \bibinfo {author} {\bibfnamefont {B.~J.}\ \bibnamefont {Smith}},
\bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}}, \bibinfo
{author} {\bibfnamefont {W.}~\bibnamefont {Wasilewski}}, \bibinfo {author}
{\bibfnamefont {K.}~\bibnamefont {Banaszek}},\ and\ \bibinfo {author}
{\bibfnamefont {I.~A.}\ \bibnamefont {Walmsley}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {80}},\ \bibinfo {pages} {013825} (\bibinfo {year}
{2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {M.}\ \emph {et~al.}(2013)\citenamefont {M.},
\citenamefont {Li}, \citenamefont {Yang},\ and\ \citenamefont
{Jin}}]{EntangledCoherentStates2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.~Y.}\ \bibnamefont
{M.}}, \bibinfo {author} {\bibfnamefont {X.~W.}\ \bibnamefont {Li}}, \bibinfo
{author} {\bibfnamefont {W.}~\bibnamefont {Yang}},\ and\ \bibinfo {author}
{\bibfnamefont {G.~R.}\ \bibnamefont {Jin}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {88}},\ \bibinfo {pages} {043832} (\bibinfo {year}
{2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Horodecki}\ \emph {et~al.}(2009)\citenamefont
{Horodecki}, \citenamefont {Horodecki}, \citenamefont {Horodecki},\ and\
\citenamefont {Horodecki}}]{horodecki2009}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Horodecki}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Horodecki}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Horodecki}},\ and\ \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Horodecki}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Review of Modern Physics}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo
{pages} {865} (\bibinfo {year} {2009})}\BibitemShut {NoStop}
\end{thebibliography}
\end{document} |
\begin{document}
\title{Dynamics of trapped atoms around an optical nanofiber probed through polarimetry}
\author{Pablo Solano}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland and NIST, College Park, MD
20742, USA.}
\author{Fredrik K. Fatemi}
\affiliation{Army Research Laboratory, Adelphi, MD 20783, USA.}
\author{Luis A. Orozco}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland and NIST, College Park, MD
20742, USA.}
\author{S. L. Rolston}
\affiliation{Joint Quantum Institute, Department of Physics, University of Maryland and NIST, College Park, MD
20742, USA.}
\begin{abstract}
The evanescent field outside an optical nanofiber (ONF) can create optical traps for neutral atoms. We present a non-destructive method to characterize such trapping potentials. An off-resonance linearly polarized probe beam that propagates through the ONF experiences a slow axis of polarization produced by trapped atoms on opposite sides along the ONF. The transverse atomic motion is imprinted onto the probe polarization through the changing atomic index of of refraction. By applying a transient impulse, we measure a time-dependent polarization rotation of the probe beam that provides both a rapid and non-destructive measurement of the optical trapping frequencies.
\end{abstract}
\maketitle
Nano-optical waveguides allow efficient ways to couple trapped atoms to propagating photons, a crucial element in the development of quantum technologies \cite{Thompson2013,Goban2014,Goban2015,Hood2016}. Optical nanofibers (ONF) \cite{Morrissey2013} have shown to be a particularly versatile platform in this context by enabling quantum memories \cite{Gouraud2015,Sayrin2015,Jones2015,Kumar2015}, switches \cite{OShea2013,Shomroni2014}, diodes \cite{Sayrin2015a}, and reflectors \cite{Corzo2016,Sorensen2016}. These examples show integration of photonic and atomic systems.
An ONF consists of single-mode optical fiber heated and pulled to create a tapered profile. The tapers can adiabatically guide the propagating light in and out of a sub-wavelength diameter waist with less than 0.1\% loss~\cite{Hoffman2014a}.
Because the nanofiber radius is smaller than the wavelength of the propagating mode, most of the field is outside its dielectric body as an evanescent field \cite{LeKien2004}. This field allows coupling of atoms near the ONF surface to the guided mode. The tight confinement of the propagating mode enables significant atom-light coupling.
The large spatial gradient of the evanescent field enables an optical dipole trap for atoms with two different wavelengths of light, one detuned above atomic resonance (blue-detuned) to repel the atoms from the surface, and the other detuned below resonance (red-detuned) for confinement. Such traps are an effective tool to confine atoms close the the ONF waveguide for millisecond time-scales with low optical powers ($\approx$5 mW), creating a robust platform for coupling propagating photons to atoms \cite{Vetsch2010,Goban2012,Reitz2013,Beguin2014a,Kato2015}.
A typical ONF dipole trap, with retro-reflection of the red-detuned light, creates two one-dimensional arrays of atoms on each side of the ONF, sketched in Fig. \ref{fig:1} (a). Characterizing the atom number and trap characteristics is necessary for future applications of this platform. The number of trapped atoms can be measured on resonance \cite{Vetsch2010} or off resonance \cite{Beguin2014a,Qi2016}, \textit{i.e.} destructive and dispersive measurements, respectively. Parametric heating to find vibrational frequencies has also been applied to ONFs~\cite{Vetsch2010a}, but is destructive and is a serial measurement for finding the trap frequencies.
\begin{figure}
\caption{(a) Schematic of the experimental setup showing the two one-dimensional array of atoms. An off-resonance probe beam propagates through the sample with linear polarization rotated by 45$^{\circ}
\label{fig:1}
\end{figure}
In this letter we present a method to non-destructively characterize the trapping potential of an ONF dipole trap. We propagate a weak, off-resonance probe beam through the ONF that is linearly polarized and tilted 45$^{\circ}$ relative to the azimuthal axis defined by the trapping potential. The probe experiences a modified refractive index with a fast axis and a slow axis due to the presence of trapped atoms. This effective birefringence rotates the polarization of the probe as a function of the position of the atoms. Turning on the probe beam imparts a momentum kick to the trapped atoms so that they oscillate at the radial and azimuthal trapping frequencies. Detecting the time-dependent polarization change of the probe gives us a direct and non-destructive measurement of the motion and transverse frequencies of the trapping potential. By probing the atomic motion directly, the spectrum of the system response can be analyzed in a single time-domain measurement up to the bandwidth of the detection.
Because the evanescent field decay constant is proportional to its wavelength, the red (blue) detuned light creates a longer (shorter) range attractive (repulsive) potential. Combining both red and blue detuned light, the atoms experience a potential energy minimum a fraction of a wavelength away from the ONF surface. This two-color dipole trap provides radial confinement for the atoms. Two counter-propagating red-detuned beams in a standing-wave configuration provide confinement along the optical nanofiber in a one-dimensional lattice. Azimuthal confinement is achieved by correctly choosing the polarization of the trapping beams. At the ONF waist, linearly-polarized light becomes quasi-linearly polarized, breaking the azimuthal symmetry of the intensity profile of the propagating field. Aligning the polarization axis of the red-detuned beam orthogonal to the blue detuned one provides azimuthal confinement for the atoms (See Fig. \ref{fig:1} (a) and (b)).
We create a dipole trap for $^{87}$Rb atoms with a 235-nm radius ONF waist by coupling two counter-propagating red-detuned beams (1064 nm) in a standing wave configuration and one blue-detuned beam (750 nm). The dominant resonances for Rb are at 780 nm (D2 line) and 795 nm (D1 line). We typically use 1 mW of power for each red-detuned beam, and 3 mW for the blue-detuned beam. Fig.~\ref{fig:1}(b) shows this configuration, which produces a trapping potential with a depth of about 500 $\mu$K. Here, and throughout the paper, we consider only the atomic scalar polarizability for the calculations of the trapping potentials.
We image the light scattered from the nanofiber to characterize the polarization of the laser beams at the ONF waist~\cite{Hoffman2015}. Because Rayleigh scattering preserves the polarization of the field, with the help of a linear polarizer in front of the camera we determine the polarization of the propagating field. The polarization can be controlled by wave plates at the input of the ONF. Each laser beam has to be characterized and controlled independently, since inherent stress in the ONF creates a birefringent medium that affects each wavelength differently.
A magneto-optical trap (MOT) loads cold $^{87}$Rb atoms into our ONF dipole trap in a vacuum chamber kept at lower than $10^{-9}$ Torr. We further cool the atoms by increasing the detuning of the MOT beams for 90 ms. We then turn off the magnetic field gradient to create optical molasses for 1 ms. The atoms are typically at 15 $\mu$K when we let them fall into the dipole trap. Because of the tight confinement of the trap, the atoms are expected to be in a collisional blockade regime. This leads to a binary loading with one or zero atoms per trapping site. We typically trap a few hundred atoms for trapping lifetimes of the order of 10 ms. The trapped atoms are in an statistical mixture of $m_F$ Zeeman sub-levels.
We send an off-resonant beam, detuned 200 MHz to the blue of the the $F=2\rightarrow F'=3$ transition of the D2 line, through the ONF to probe the trapped atoms. We align its polarization to be 45$^{\circ}$ from the trapping beams when there are no atoms present. The projection of the transverse polarization component along the axis defined by the trapped atoms experiences a modified refractive index while the orthogonal component, which does not interact with the atoms, propagates unaltered. The motion of trapped atoms in the transverse plane of the nanofiber will change this birefringence as a function of time, producing a dynamical polarization rotation of the probe beam. Motion along the fiber axis (z-direction) is likely to be only weakly coupled to the probe and would not produce significant polarization rotation.
\begin{figure}
\caption{(a) Polarization rotation of the probe beam (in units of measured voltage) as a function of time. The sudden spike in the signal denotes the probe turning on. (b) Power spectrum from the Fourier transform of the oscillations in (a). The two distinct peaks (at 73 $\pm 3$ kHz and 197 $\pm 2$ kHz), correspond to the radial and azimuthal trapping frequencies respectively, marked with red dashed lines.}
\label{fig:2}
\end{figure}
Because of the significant atom-light coupling provided by the tight mode area, more than a few tens of nW of probe power will perturb the trap near resonance. We use 70 nW of probe power, enough to imprint a momentum kick in the atoms to start their motion, but too weak to excite the atoms out of the trap. Fig. \ref{fig:1} (c) shows the effect of the probe beam on the trapping potential.
The polarization rotation of such a low probe power is detected by heterodyne measurements by mixing the probe with a local oscillator (LO) with a 1 MHz relative frequency shift. We typically use 9 mW of power for the LO beam. After the probe goes through the ONF it is combined with the LO using a 50/50 beam splitter. We use one of the output paths for detection. Its polarization components are separated by a Wollaston prism and sent to a 4 MHz bandwidth balanced photodetector. The 1 MHz beat note between the probe and the LO is mixed down to DC. This allows us to use the LO as gain for the probe, and directly detect the probe polarization rotation as a function of time with a bandwidth higher than the expected trap frequencies.
Figure \ref{fig:2} (a) shows a typical signal of the polarization rotation of the probe. Although the signal is visible in single-shot, the data is averaged to improve the signal to noise ratio by a factor of 10. The original data was acquired with a 2-ns bin width, and the plot is a 400-ns moving average for visualization purposes. The detector polarizations are set such that when there are no trapped atoms the measured output voltage is zero. However the zero voltage at time $t=0$ in the plot is produced only by the LO (probe beam off). The probe field turns on at 2 $\mu$s. The signal can be decomposed in two time regimes: a short time regime where we observe oscillations due to the atoms moving back and forth in the trapping potential; and a long time regime where the oscillations vanish but the non-zero signal shows the presence of atoms in the trap. The sharp initial peak comes from atoms starting their motion closer to the ONF surface, where they interact more strongly with the probe beam, producing a larger signal. The decoherence of the oscillations comes from the large anharmonicity of the trapping potential and the thermal motion of the trapped atoms. The long timescale slope is the lifetime of the trap. In this case the characteristic decay time is $370\pm 3$ $\mu$s, where the error represents the standard error of the fit. The lifetime is degraded by more than an order of magnitude when the probe beam is kept on. A small fraction of the probe beam gets absorbed by the trapped atoms and results in losses as the trapping potential becomes shallower (see Figs. \ref{fig:1} (b) and (c) with the depth scale).
The temporal response and initial oscillations in Fig. \ref{fig:2} (a) encode information about transverse trapping frequencies. By taking a discrete Fourier transform of the data (after the probe turns on) we obtain the resonance frequencies of the oscillating atoms. Fig. \ref{fig:2} (b) shows the power spectrum of the signal. We observe two distinct peaks at $\nu_{\phi}=73\pm 3 $ kHz and $\nu_{r}=197\pm 2 $ kHz, corresponding to the azimuthal and radial frequencies of the trap. The uncertainties in the mean are calculated from the full width at half maximum of the peak over the signal to noise ratio \cite{Clairon1991}. The width of the spectral peaks and damping of the time-domain oscillations arise from the dephasing of the atoms due to the strong anharmonicity of the trap. As an approximation, we can model the problem as a damped harmonic oscillator. The fit to a Lorentzian line shape shows a linewidth of $\gamma_{\phi}=64\pm 8$ kHz $\gamma_{r}=47\pm 6$ kHz respectively, where the errors are the standard errors of the fit. This represents a decay time of the oscillations of around 20 $\mu$s, enough to measure trapping potentials of more than 50 kHz. The observation of oscillations from the azimuthal motion of the atoms depends on the alignment of the probe polarization to within few degrees. On the other hand, the detection of oscillation from radial motion of the atoms is more robust under misalignments.
We can compare the measured frequencies in Fig. \ref{fig:2} (b) to a numerical calculation. Taking the second derivative of the trapping potential shown in Fig.\ref{fig:1} (c) and knowing the atomic mass $m$ we can calculate the expected trapping frequencies as $\nu_{i}=\sqrt{\frac{1}{2\pi m}\partial^2 U/\partial x_i^2}$, where the index $i$ denotes the radial or azimuthal direction in cylindrical coordinates. For the experimental parameters listed in this paper, which produce Fig. \ref{fig:2} (c), we find that $\nu_{\phi}=70\pm 4$ kHz and $\nu_{r}=195\pm 6$ kHz. The frequencies are extracted by fit an harmonic potential to the bottom of the calculated potential and extracting the corresponding trapping frequency for each spatial direction. The errors represent the sensitivity of the simulation to a 5\% variation of the experimental parameters, these parameters being the four lasers beams power (two red-detuned, a blue-detuned and the probe), and the four polarization angles (three relative angles). We assume that the polarizations are perfectly linearly-polarized, which is in general not true, but greatly reduces the number of free parameters in the simulation. The theoretical results are 2\% above and 7\% below the measured values for the azimuthal and radial frequencies respectively. The measured signal is in good agreement with the expected result within the experimental uncertainties.
\begin{figure}
\caption{Polarization rotation of the probe beam (in units of measured voltage) as a function of time, for a set of four 40 $\mu$s probe pulses. The repeatability of the process shows the non-destructive feature of the measurement technique. The inset shows a Monte Carlo simulation of the signal for radial oscillations only. The simulation considers an ensemble of atoms oscillating in the potential shown in Fig. \ref{fig:1}
\label{fig:3}
\end{figure}
The non-destructive feature of this method is further tested by probing the trapped atoms more than once while they still are in the trap. Fig. \ref{fig:3} shows the polarization rotation as a function of time for a probe beam that turns on and off four times. We see that the first pulse is enough to extract the oscillation frequency of the atoms before it decreases. Consecutively the probe turns off and on again, after 10 $\mu$s, reproducing the same oscillatory signal but with smaller amplitude. This process can be repeated as long there are enough atoms in the trap to produce a detectable signal. The signal from the four pulses shown in Fig. \ref{fig:3} has an over-all slope corresponding to a trapping lifetime of $265\pm 1$ $\mu$s. This is almost 30\% shorter lifetime compared to keeping the probe beam constantly on (as in Fig. \ref{fig:2} (a)), because the momentum kick of suddenly turning the probe beam on and off can induce atom loss. However, the dispersive measurement is non-destructive enough to test the characteristics of the trap while leaving a significant amount of atoms for further experimentation. The inset of Fig. \ref{fig:3} shows a numerical simulation of the detected signal for only radial oscillations (uncoupled motion). Using the simulated trapping potential (Fig \ref{fig:1} (c)) we calculate the motion of a set of 500 atoms randomly positioned with a flat distribution of $\pm 75$ nm centered at $80$ nm towards the ONF from the potential minimum. The trajectories of the atoms, computed and averaged, give an effective trajectory. The signal is proportional to the dynamical change of the coupling into the ONF of an atom following such an effective trajectory. The displacement of the center of the distribution of the initial atomic positions takes into account the displacement of the center of the trap when the probe beam is turned on. The parameters for the simulation are empirically found within a experimentally realistic range. This simple model captures the qualitative behavior of the detected signal.
Although the probe beam modifies the potential landscape felt by the atoms, the good agreement between the measurements and the simulations allows us to extract the trapping potential without the modification due to the probe beam. In our case we obtain $\nu_{\phi}=178.3$ KHz and $\nu_{\phi}=252.2$ KHz from the potential shown in Fig. \ref{fig:1} (b). Moreover, by optimizing the photodetection, a weaker probe beam could be used to minimally perturb the trapping potential. In this configuration another pulsed beam can rapidly imprint a momentum kick to the atoms, so they start oscillating in phase. Colder atoms might also help to establish longer coherence time for the oscillations, since the trapping potential approximates to an harmonic trap around its minimum. The measured signal increases linearly with the number of trapped atoms. A more efficient loading of the trap may increase the number of atoms and the amplitude of the signal.
We have shown how a polarimetric measurement of an off-resonance probe beam can be used for rapid and non-destructive characterization of the trapping potential of a two-color ONF-based dipole trap. This technique can be easily implemented in any ONF-based dipole trap experiment, allowing a shot-to-shot measurement of the trapping potential before performing further experiments in the same experimental sequence, an advantage over other configurations of optical dipole traps. The results are in good agreement with theoretical predictions, showing an understanding of the variables involved in the problem. This points to different strategies to improve the technique in the future. We expect that non-destructive and fast-readout characterization of local potential experienced by trapped atoms near dielectric surfaces to become standard tools in the growing field of interfacing nano-photonic platforms to cold atoms.
\section*{Acknowledgments}
This work has been supported by National Science Foundation of the United States (NSF) (PHY-1307416); NSF Physics Frontier Center at the Joint quantum Institute (PHY-1430094).
\end{document} |
\begin{document}
\title{The graphs with the max-Mader-flow-min-multiway-cut property}
\begin{abstract}
We are given a graph $G$, an independant set $\mathcal{S} \subset V(G)$ of \emph{terminals}, and a function $w:V(G) \to \mathbb{N}$. We want to know if the maximum $w$-packing of vertex-disjoint paths with extremities in $\mathcal{S}$ is equal to the minimum weight of a vertex-cut separating $\mathcal{S}$. We call \emph{Mader-Mengerian} the graphs with this property for each independant set $\mathcal{S}$ and each weight function $w$. We give a characterization of these graphs in term of forbidden minors, as well as a recognition algorithm and a simple algorithm to find maximum packing of paths and minimum multicuts in those graphs.
\end{abstract}
\section{Introduction}
Given a graph $G=(V,E)$, a set $\mathcal{S} \subset V$ with $|\mathcal{S}| \geq$ $2$ and inducing a stable set is called a set of \emph{terminals}. An \emph{$\mathcal{S}$-path} is a path having distinct ends in $\mathcal{S}$, but inner nodes in $V \setminus \mathcal{S}$. A set ${\mathcal{P}}$ of $\mathcal{S}$-paths, is a \emph{packing of vertex-disjoint $\mathcal{S}$-paths} (since there is no risk of confusion, we will use the shorter term \emph{packing of $\mathcal{S}$-paths} within this paper), if two paths in ${\mathcal{P}}$ do not have a vertex in common in $V \setminus \mathcal{S}$. We are looking for a maximum number $\nu(G,\mathcal{S})$ of $\mathcal{S}$-paths in a packing.
An \emph{${\mathcal{S}}$-cut} is a set of vertices in $V \setminus \mathcal{S}$ that disconnect all the pairs of vertices in $\mathcal{S}$ (that is a blocker of the $\mathcal{S}$-paths). We are looking for an ${\mathcal{S}}$-cut with a minimum number $\kappa(G,\mathcal{S})$ of vertices.
The following inequality holds for any graph $G$ and any ${\mathcal{S}}\subseteq V(G)$: $\nu(G,\mathcal{S})\leq \kappa(G,\mathcal{S})$, as any $\mathcal{S}$-path intersects any $\mathcal{S}$-cut. Note that if $|\mathcal{S}| =$ $2$ the equality always holds, being Menger's vertex-disjoint undirected $(s,t)$-paths theorem. This paper deals with graphs for which $\nu(G,{\mathcal{S}})=\kappa(G,{\mathcal{S}})$, for any set $\mathcal{S}$ of terminals. Actually, we try to characterize a stronger property associated with a weighted version of these two optimization problems. Consider the following system with variables $x\in \mathbb{R}^{V\setminus \mathcal{S}}_+$:
\begin{equation}\label{eqn:blocking}
x(P)\geq 1 \mbox{ for every } {\mathcal{S}}\mbox{-path } P
\end{equation}
An integral vector $x$ minimizing $wx$ over~\eqref{eqn:blocking} is necessarily
a ${0,1}$-vector and is the characteristic vector of a minimum
$\mathcal{S}$-cut. Dually, an integral vector $y$ optimum for the dual of
minimizing $wx$ over~(\ref{eqn:blocking}) is necessarily a maximum $w$-packing
of $\mathcal{S}$-paths. Hence, if~\eqref{eqn:blocking} is a TDI system, we
have that the minimum $w$-capacity of an $\mathcal{S}$-vertex-cut is equal to
the maximum $w$-packing of ${\mathcal{S}}$-paths.
\begin{figure}
\caption{The net.}
\label{fig:net}
\end{figure}
As an example, consider the graph of Figure~\ref{fig:net}, called
\emph{net}. Let $\mathcal{S}$ be the square vertices. A maximum integral
packing of $\mathcal{S}$-paths ($w=1$) contains only one path, while any
$\mathcal{S}$-cut must contain at least two vertices. Precisely, there is a fractional
packing of $\mathcal{S}$-paths of value $\frac{3}{2}$ (by taking each
$\mathcal{S}$-path of length $3$ with value $\frac{1}{2}$), and a fractional
$\mathcal{S}$-cut with the same value (by taking $x(v) = \frac{1}{2}$ for all
$v \notin \mathcal{S}$).
Motivated by the following property, we call \emph{Mader-Mengerian} the
graphs for which the system \eqref{eqn:blocking} is TDI for every set
${\mathcal{S}}$ of terminals.
\begin{property}\label{lemma:perfect}
Given a graph $G$ and a set of terminal $\mathcal{S}$, the following conditions are equivalent:
\begin{enumerate}
\item The system~\eqref{eqn:blocking} is TDI,
\item The polyhedron defined by~\eqref{eqn:blocking} is integral,
\item The optimum value of maximizing $w^Tx$ subject to~\eqref{eqn:blocking} is integral (if finite) for all $w \in V^{ \{0,1,+\infty\} }$.
\end{enumerate}
\end{property}
The proof of this property is postponed to section~\ref{sec:bipartite} where
the stronger Lemma~\ref{lemma:main} is proved. We already know that the long
claw is not Mader-Mengerian.
Our main result (Theorem~\ref{th:bad-graphs}) is a description of the Mader-Mengerian graphs in terms of forbidden minors. However we do not use the usual minor operations (edge deletion and edge contraction), but \emph{ad-hoc} operations on vertices. Our proof implies an algorithm (Lemma~\ref{lemma:main}) to find maximal $w$-packing of paths in Mader-Mengerian graphs and minimum vertex multicuts for a given set of terminals. We also give a characterization of the pairs $(G,\mathcal{S})$ for which the system~\eqref{eqn:blocking} is TDI (Theorem~\ref{th:signed}).
One of our most surprising results is that $G$ is Mader-Mengerian if and only if the system~\eqref{eqn:blocking} is TDI for every independant set $\mathcal{S}$ of cardinality $3$. This implies (with Lemma~\ref{lemma:main}) a polynomial algorithm to recognize Mader-Mengerian graphs.
Finding a minimum $\mathcal{S}$-cut is an NP-complete problem, even if $|\mathcal{S}|=3$~\cite{papaseym}. In fact,~\cite{papaseym} deals with edge-cuts (that is, sets of edges disconnecting $\mathcal{S}$), but one may observe that $\mathcal{S}$-edge-cut in a graph $G$ correspond to vertex-cut in the line-graph of the graph obtained from $G$ by adding one leaf to each vertex in $\mathcal{S}$.
Finding maximal packing of disjoint paths is a classical problem in graph theory, even if it was mainly studied for edge-disjoint (or arc-disjoint) paths. Menger~\cite{menger} gave the first significant result, stating that when $|\mathcal{S}|=2$, the maximum number of disjoint $S$-paths is equal to the minimum cardinality of an $(s,t)$-cut, both in edge-disjoint and vertex-disjoint cases. This result was further developped by Ford and Fulkerson~\cite{fordfulkerson}, into what became the network flow theory. When there is more than two terminals, the results are however closer to matching theory than to network flows. Gallai~\cite{gallai} first proved a min-max theorem for packing of fully-disjoint $\mathcal{S}$-paths (that is even the ends of the paths must be disjoint), and his result was then strengthened by Mader~\cite{mader} for inner-disjoint paths with ends in different parts of a partition of the terminals. Mader's theorem implies the following:
\begin{theorem}[Mader, 1978]
Let $G$ be a graph and $\mathcal{S}$ an independant set of $G$. Then,
\[
\nu(G,\mathcal{S}) =
\min |U_0| + \sum_{i=1}^k \left\lfloor \frac{b_{U_0}(U_i)}{2} \right\rfloor
\]
where the minimum ranges over all the partitions $U_0,\ldots,U_k$ of $V \setminus \mathcal{S}$ , such that each $S$-path intersects either $U_0$ or $E(U_i)$ for some $1 \leq i \leq k$. Here, $b_{U_0}(X) := |\{v \in X~:~N(v) \setminus (X \cup U_o) \neq \emptyset\}|$.
\end{theorem}
In the light of Mader's theorem, we are looking for graphs that admit a much simpler characterization: $\nu(G,\mathcal{S}) = \min |U|$ where the minimum ranges over sets $U$ such that each $S$-path intersects $U$. A practical reason for looking for these graphs is that Mader's theorem relies on matching theory, while our result will only use Menger's theorem, that is flow theory. As a consequence, algorithms for finding an optimal packing of $\mathcal{S}$-paths in Mader-Mengerian graphs are simpler and more efficient than those for general graphs.
Mader's theorem has been recently extended by Chudnovsky et al.~\cite{chudnovskyetal}, and by Gyula Pap~\cite{pap}.
Let us mention a similar result for edge-disjoint paths, that was proved by Cherkasky~\cite{cherkasky} and Lovász~\cite{lovasz}:
\begin{theorem}[Cherkasky, Lovász, 1977]
For any inner Eulerian graph $G$, then the maximum number of edge-disjoint $\mathcal{S}$-paths is equal to $\frac{1}{2} \sum_{s \in \mathcal{S}} \lambda{s}$, where $\lambda{s}$ is the minimum cardinality of a cut between $s$ and $\mathcal{S} - s$.
\end{theorem}
This has been later extended by Karzanov and Lomonosov~\cite{karzanosov}, who proved the Locking Theorem.
These results explains when the maximum packing of edge-disjoint $\mathcal{S}$-paths has a characterization in terms of minimal cuts.
\section{Vertex minors and skew minors}
Given a graph $G=(V,E)$ and $v\in V$, \emph{deleting} $v$ in $G$ means considering the graph $G-v$ induced by $V-v$, that is:
$$G-v:=(V - v, E \setminus \delta_G(v))$$
\emph{Contracting} $v$ means considering the graph $G / v$ obtained by removing $v$ and replacing its neighborhood by a clique:
$$G/v:=(V - v, E \cup \{wx | w, x \in N_G(v)\} \setminus \delta_G(v))$$
For $e=xy \in E$ \emph{contracting} $e$ means considering the graph $G / e$ obtained by identifying
the end-nodes $x$ and $y$ of $e$.
$$G/e:=(V, E \cup \{xz | z \in N_G(y)\} \cup \{yz | z \in N_G(x)\} \setminus e)$$
A graph obtained from $G$ by any sequence of vertex deletions and vertex contractions is
a \emph{vertex-minor} of $G$. A graph obtained from $G$ by any sequence of vertex deletions, vertex contractions and edge contractions is a \emph{skew-minor} of $G$.
Vertex-minors can also be described in the following way:
\begin{proposition}
Let $G$ be a graph, and $G'$ be a vertex-minor of $G$. Let $D$ be the vertices deleted and $C$ be the vertices contracted to get $G'$ from $G$. Then, $u, v \in V(G')$ are adjacent in $G'$ if and only if there is a path with extremities $u$ and $v$ in $G$ and whose inner nodes are in $C$.\qed
\end{proposition}
This immediately implies:
\begin{lemma}\label{lemma:commutativity}
Vertex-deletions and vertex-contractions commute.\qed
\end{lemma}
By definition, for a class of graph, being closed under skew minors implies being closed under vertex minors, which in turn implies being closed under induced subgraphs.
Several important classes of graphs are closed under skew minors. Among them:
\begin{definition}$\quad$
\begin{itemize}
\item[-] The \emph{interval graphs} are the graphs of intersection of
intervals of the real line.
\item[-] The \emph{chordal graphs} are the graphs of intersection of subtree
of a tree. Equivalently, a graph is chordal if each of its cycles of length
at least $4$ has a chord.
\item[-] The \emph{cocomparability graphs} are the graphs whose complement is
the underlying graph of a partially ordered set.
\item[-] The \emph{Asteroidal-Triple-free (AT-free) graphs} are the graphs without
asteroidal triple. A stable set $S$ of cardinality $3$ is an
\emph{asteroidal triple} of $G$ if there is no $x \in S$ such that $S-x$ is
contained in a connected component of $G - (x \cup N(x))$.
\item[-] The $P_k$-free graphs, for $k \in \mathbb{N}$, are the graphs with no
induced path of length at least $k$.
\end{itemize}
\end{definition}
The following proposition is left as an exercise:
\begin{proposition}\label{lemma:closeness}
Interval graphs, chordal graphs, co-comparability graphs, AT-free graphs,
$P_k$-free graphs are closed under skew minors.\qed
\end{proposition}
The following lemma explains why we are interested in the vertex-minor operations.
\begin{lemma}
Given a graph $G$ and a set of terminal $\mathcal{S}$, if the
system~\eqref{eqn:blocking} is TDI, then it is also TDI for any vertex-minor of $G$.
\end{lemma}
\begin{proof}
Deleting $v \in V \setminus {\mathcal{S}}$ corresponds to setting $w_v=0$.
Contracting $v \in V \setminus {\mathcal{S}}$ corresponds to setting $w_v=+\infty$.
\end{proof}
\section{Integrality of the blocker of S-paths}\label{sec:bipartite}
For a given graph $G$ and a set $\mathcal{S}$ of terminal, we construct an
auxiliary graph $G_{\mathcal{S}}$ as follows. First, note that if a
non-terminal vertex $v$ is adjacent to two terminals $s$ and $t$, we may
assume that the maximum packing for a weight function $w$ contains $w(v)$
times the $2$-length paths $sv,vt$, and the minimal $\mathcal{S}$-cut contains
$v$. Hence, we first delete every non-terminal vertex adjacent to two or more
terminals.
We may also assume that no $\mathcal{S}$-path of a maximum packing contains
two vertices of $N_G(s)$ for some terminal $s$ (by taking chordless
paths). Therefore if $G - N_G(s)$ contains a component disjoint from
$\mathcal{S}$, we can delete all its vertices.
From now, we will always suppose that:
\begin{itemize}
\item[$(i)$] $G$ has no vertices adjacent to two distinct terminals.
\item[$(ii)$] for each $s \in \mathcal{S}$, every component of $G - N_G(s)$ intersects $\mathcal{S}$.
\item[$(iii)$] $G$ has no edge whose ends are both adjacent to the same terminal.
\end{itemize}
Then we consider the set $N = N_{G}(\mathcal{S})$ of vertices adjacent to
$\mathcal{S}$. $N$ is the vertex set of $G_{\mathcal{S}}$. We delete the
terminals, and contract the vertices in $V - (N \cup \mathcal{S})$. Then we
remove all the edges whose ends are adjacent to the same terminal in $G$ (the
contraction of a path of a maximum packing would not use these edges) . This
gives $G_{\mathcal{S}}$. By construction, this graph is
$|\mathcal{S}|$-partite, each part being the neighborhood of one terminal.
Note that $a, b \in N$ are adjacent in $G_{\mathcal{S}}$ if $a$ and $b$ are not
adjacent to a common terminal, and there is an $(a,b)$-path in $G$ whose inner
vertices are outside $\mathcal{S} \cup N_G(\mathcal{S})$.
\begin{figure}
\caption{A graph $G$ and the auxiliary graph $G_{\mathcal{S}
\label{fig:auxiliary}
\end{figure}
\begin{lemma}\label{lemma:main}
Given a graph $G$ and a set of terminal $\mathcal{S}$,
the system~\eqref{eqn:blocking} is TDI
if and only if the auxiliary graph $G_{\mathcal{S}}$ is bipartite.
\end{lemma}
\begin{proof}
Assume that $G_{\mathcal{S}}$ is not bipartite. Let $C^\ast$ be an induced odd cycle of $G_{\mathcal{S}}$.
We define a weight vector $w \in V(G)^{ \{0,1,+\infty\} }$ as follows:
\begin{equation}\label{eqn:auxiliary}
w_v:= \left\{\begin{array}{ll}
1 & \textrm{if } v \in C^* \\
0 & \textrm{if } v \in V(G_{\mathcal{S}}) \setminus C^* \\
+\infty & \textrm{otherwise}
\end{array}\right.
\end{equation}
To every edge $uv$ of $C^\ast$, we can associate an $\mathcal{S}$-path of $G$
intersecting $N$ exactly in $u$ and $v$. Then a maximum fractional $w$-packing
of ${\mathcal{S}}$-paths is given by taking $1/2$ for each of these paths and
a minimum fractional $\mathcal{S}$-cut of $G$ is given by $1/2$ on every node
of $C^\ast$, and $1$ on other vertices of $N$. The optimum value of the corresponding pair
of dual linear programs is then $|V(C^\ast)|/2$, hence the polyhedron
defined by~\eqref{eqn:blocking} is not integer.
Suppose now that $G_{\mathcal{S}}$ is bipartite, with bipartition $(A,B)$.
Let $H$ be the graph obtained by deleting $\mathcal{S}$ and add two new
non-adjacent vertices $s_a$ and $s_b$, adjacent to respectively $A$ and
$B$.
Let $P$ be a chordless $(s_a,s_b)$-path in $H$. Let $\{a,b\} := N \cap
V(P)$. We can associate a unique path $\hat{P}$ of $G$ to $P$, by replacing
its extremities by terminals of $G$ (because each vertex of $N$ is adjacent to
a unique terminal). We show that $\hat{P}$ cannot be a cycle. Let $Q =
V(\hat{P}) \setminus (\mathcal{S} \cup N)$. If $Q$ is empty, $\hat{P}$ is
clearly not a cycle because in $H$, the neighborhood of a terminal is a stable
set.
Else $Q$ is contained in a component $C$ of $G \setminus (N \cup
\mathcal{S})$. $C$ is adjacent to $N_G(s)$ and $N_G(t)$ for two distinct
terminals $s$ and $t$ by condition $(ii)$. We can suppose that $a \in
N_G(s)$. $\hat{P}$ is a cycle only if $b \in N_G(s)$. But if this was the
case, then for $c \in N_G(t)$ adjacent to $C$, $a,c,b$ would be a path in $H$,
hence $a$ and $b$ would be in the same part of the bipartition $(A,B)$,
contradiction. $\hat{P}$ is not a cycle, it is an $\mathcal{S}$-path.
By applying the vertex-disjoint version of Menger's theorem to $H$,
$\nu(G,w,\mathcal{S}) = \kappa(G,w,\mathcal{S})$ for any $w\in
\mathbb{Z}^{V\setminus{\mathcal{S}}}$.
\end{proof}
\section{A forbidden minor characterization}\label{sec:minor-charac}
In this section, we find a characterization of Mader-Mengerian graphs by excluded vertex-minors. We start from the proof of Lemma~\ref{lemma:main}, where we showed that if a graph is not Mader-Mengerian, its auxiliary graph has an odd cycle. In the auxiliary graph construction, we perform vertex-minor operations plus deletion of edges between two vertices adjacent to the same terminal. It follows that a graph that is not Mader-Mengerian contains a vertex-minor $G$ of the following form.
$G$ is a graph obtained by taking an odd cycle $C$ and the terminals adjacent to $C$. Each vertex of $C$ is adjacent to exactly one terminal, called the \emph{representant} of this vertex. We color the vertices depending on their representants: each representant gets a distinct color, each other vertex has the color of its representant. A color is thus a set of vertices adjacent to some terminal, plus this terminal. Two consecutive vertices of the odd cycle have distinct colors, while the extremities of each chord share the same color. Let $\mathcal{A}_n$ be the class of graphs obtained in this way with $n$ terminals.
One path of lemmas and proofs to obtain the following result is presented
in the Appendix.
\begin{theorem}\label{th:bad-graphs}
Let $G$ be a graph.
The system~\eqref{eqn:blocking} is TDI for every stable set $\mathcal{S}$ if and only if $G$ does not contain a vertex minor in $\mathcal{A}_3$.
\end{theorem}
\begin{proof}
Direct consequence of Lemmas~\ref{lemma:odd-colors},~\ref{lemma:7colors} and~\ref{lemma:5colors}.
\end{proof}
\begin{corollary}
System~\eqref{eqn:blocking} is TDI for every stable set $\mathcal{S}$ of $G$ if and only if it is TDI for every stable set $\mathcal{S}$ of cardinality $3$ of $G$.\qed
\end{corollary}
This gives a polynomial-time recognition algorithm for the related class of graphs, in combination with Lemma~\ref{lemma:main}: we only have to check for each independant subset of three vertices whether the associated auxiliary graph is bipartite. Another important consequence is that the class of graphs for which system~\eqref{eqn:blocking} is TDI for every stable set is large. Indeed, it contains at least the asteroidal-triple-free graphs:
\begin{corollary}
For every asteroidal-triple-free graph, the system~\eqref{eqn:blocking} is TDI.
\end{corollary}
\begin{proof}
Follows from Theorem~\ref{th:bad-graphs} and Proposition~\ref{lemma:closeness}, as every graph in $\mathcal{A}_3$ contains an asteroidal triple, namely the set of terminals.
\end{proof}
To conclude this section on vertex-minors, we prove that there is an infinite number of minimal graphs to exclude.
\begin{lemma}\label{lemma:infinite}
If $n=3$ and each color class induces a clique, then $G$ is a minimal excluded graph.
\end{lemma}
\begin{proof}
Let $U, V, W \subset V(G)$ be the three colors of $G$, Let $u$, $v$, $w$ be
the three terminals of a minimal excluded minor $G' = G - D / C$. The distance
between two terminals in $G'$ is at least $3$, in particular they cannot be
adjacent. If $x, y \in V(G')$ and $xy \in E(G)$ then $xy \in E(G')$, thus $u$,
$v$ and $w$ have distinct colors in $G$, say $u \in U$, $v \in V$, $w \in W$.
Let $U'$, $V'$ and $W'$ be the color classes of $u$, $v$ and $w$ respectively
in $G'$. Every vertex adjacent to $u$ in $G'$ must be in the same color class
$U'$ as $u$ in $G'$, proving that $U \setminus (D \cup C) \subset U'$. Because
color classes are a partition of the vertex set, we have equality, $U' =U
\setminus (C \cup D)$ and similary for $V'$ and $W'$.
Suppose $C$ is not empty, let $x \in C$. We may assume $x \in U$. If $x$ is
the representant of $U$, then $G' = G - (D + x) / (C - x)$. Else, if $x$ is not
the representant of $U$, $x$ has exactly two neighbors $y$ and $z$ outside
$U$. Because $u$, $v$ and $w$ must be at distance $3$ of each other in $G'$,
$y, z$ must be in $D$. Then we also have that $G' = G - (D + x) / (C -
x)$. Hence $G' = G - (C \cup D)$. But then, as the set of edges between colors
of $G'$ must be a cycle, $G' = G$, proving that $G$ is minimal.
\end{proof}
\section{Minimal skew-minors exclusion}\label{sec:skewminor}
A skew-minor of a graph $G$ is any graph obtained from $G$ via the following operations:
vertex deletion, vertex contraction and edge contraction.
Note that Mader-Mengerian graphs are not closed under edge contraction since
by inflating one of the central vertex of the net we get a Mader-Mengerian
graph. However, we can get a simple sufficient condition for the integrality
of system~\eqref{eqn:blocking} based on skew-minors:
\begin{theorem}\label{coro:rocket}
Any graph $G$ is either Mader-Mengerian or contains a net or a rocket as a skew minor.
\end{theorem}
\begin{figure}
\caption{The rocket}
\label{fig:rocket}
\end{figure}
\section{When the set of terminals is fixed}
Our arguments apply when we want to find the pairs $(G,\mathcal{S})$,
$\mathcal{S} \subset V(G)$, for which the system~\eqref{eqn:blocking} is
TDI. Up to now, we have only looked at graphs $G$ for which we have TDIness
for every set of terminals. To deal with a fixed set of terminals, we define
another notion of vertex-minor, the \emph{signed vertex-minor}, defined on
pairs $(G,\mathcal{S})$. Signed vertex-minor are defined like vertex-minor,
except that the set of terminals of the minor must be a subset of the
terminals of the original graph. More precisely, $(H,\mathcal{S}')$ is a
signed vertex-minor of $(G,\mathcal{S})$ if $H$ is a vertex-minor of $G$ and
$\mathcal{S}' \subseteq \mathcal{S}$.
Recall that $\mathcal{A}_3$ is the class of graphs built from a three-colored
odd cycle, by adding a terminal for each color, and chords with extremities of
the same color. We define similarly the class $\overline{\mathcal{A}_3}$ of signed vertex-minor
$(G,\mathcal{S})$, where $G \in \mathcal{A}_3$, and $\mathcal{S}$ is the set
of the three terminals in the construction of $G$.
This setting does not affect Lemma~\ref{lemma:main}, and then the following
theorem, close to Theorem~\ref{th:bad-graphs}, can be deduced by the same
proof. Indeed, the proofs in Section~\ref{sec:minor-charac} never create new
terminals when considering vertex-minors, and hence are still valid for signed
vertex-minors.
\begin{theorem}\label{th:signed}
Let $G$ be a graph and $\mathcal{S}$ a set of terminal in $G$. The
system~\eqref{eqn:blocking} is TDI if and only if $(G,\mathcal{S})$ does not have a
signed vertex-minor in $\overline{\mathcal{A}_3}$.\qed
\end{theorem}
\begin{corollary}
The system~\eqref{eqn:blocking} is TDI for $(G,\mathcal{S})$ if and only if it is TDI for every $(G,\mathcal{S}')$, with $\mathcal{S'} \subseteq \mathcal{S}$, $|\mathcal{S}| = 3$.\qed
\end{corollary}
Moreover, all the graphs of $\overline{\mathcal{A}_3}$ are minimal graphs by signed vertex-minors for which system~\eqref{eqn:blocking} is not TDI. Indeed, a potential minor would have the same set of terminals. Moreover, if we contract a vertex, then its two consecutive vertices in the odd cycle become adjacent to two terminals, hence must be deleted. Hence, the minor must be obtained without vertex contraction, and the minimality follows easily.
\section{Conclusion}
We studied the pairs $(G,\mathcal{S})$ of (graphs, subsets of terminals) for
which the cost of an $\mathcal{S}$-vertex-cut is equal to the maximum packing
of $\mathcal{S}$-paths. We proved that this property for a given $\mathcal{S}$
is polynomially checkable as it reduces to the bipartiteness of an auxiliary
graph. Moreover if this property is true, the minimal $\mathcal{S}$-cut and
maximum path-packing problems can be solved by finding a maximum vertex-capacitated
flow in a smaller graph.
We proved that if $(G,\mathcal{S})$ does not satisfy this property, then there
exists $\mathcal{S}' \subseteq \mathcal{S}$ with $|\mathcal{S}'|=3$ such that
$(G,\mathcal{S}')$ does not satisfy it either. Moreover, each signed
vertex-minor in $\overline{\mathcal{A}_3}$ is a minimal signed vertex-minor obstruction.
Concerning the graphs satisfying the min-max formula for any $\mathcal{S}$, we
proved that they can be recognized in polynomial time, that the list of
vertex-minor obstructions is infinite, but we were unable to provide an
explicit description of this list. We believe that this list is hard to
obtain, and somehow ugly. We also proved that this class of graphs is
interesting as it contains the asteroidal-triple-free graphs.
\begin{appendix}
\section*{Appendix to section~\ref{sec:minor-charac}}
The \emph{distance in $C$} between two vertices $u$ and $v$ is the minimum number of arcs in one of the two $(u,v)$-paths in $C$. We denote $d_C(u,v)$ this minimum. We say that $u$ and $v$ are \emph{consecutive} if $d_C(u,v) = 1$. We denote $\repr(u)$ the representant of a vertex $u$. We say that a vertex of $C$ is \emph{bicolored} if its two neighbors in $C$ have distinct colors. Two colors are \emph{adjacent} if there is an edge in $C$ whose ends have these two colors.
Note that the net is a forbidden minor of Mader-Mengerian graphs, and is minimal. We try to find other forbidden minors that do not have a net as vertex-minor. For a graph $H$, we say that $G$ is $H$-free if $H$ is not a vertex minor of $G$.
\begin{lemma}\label{lemma:local-bicolor}
Let $u$ be a bicolored vertex. Let $v$ be a vertex of the same color as $u$. Then, either $G$ contains a net, or every vertex consecutive to $v$ has the color of a vertex consecutive to $u$.
\end{lemma}
\begin{figure}
\caption{Illustration for Lemma~\ref{lemma:local-bicolor}
\label{fig:lemma1}
\end{figure}
\begin{figure}
\caption{Illustration for Lemma~\ref{lemma:bicolor}
\label{fig:bicolor}
\end{figure}
\begin{proof}
Let $u_1$ and $u_2$ be adjacent to $u$ in $C$, $v'$ is adjacent to $v$, and $u_1$, $u_2$ and $v'$ have distinct colors. First, suppose that $d_C(u,v) \geq 3$. There are two cases.
If $d_C(v',u) \geq 3$ (Figure~\ref{fig:lemma1}, $a$), let $G'$ be the graph obtained by contracting $u$ and $\repr(u)$ and by deleting all the vertices except $\repr(u_1)$, $\repr(u_2)$, $u_1$, $u_2$ and $v$. $G'$ is a net (Figure~\ref{fig:lemma1}, $b$).
If $d_C(v',u) = 2$ (Figure~\ref{fig:lemma1}, $c$), we may assume $v'u_1 \in E(C)$. Let $G'$ be the graph obtained from $G$ by contracting $u$, $v$ and $\repr(u)$ and deleting every other vertex except $v'$, $\repr(v')$, $\repr(u_1)$, $\repr(u_2)$, $u_1$ and $u_2$. Then $G'$ is a net (Figure~\ref{fig:lemma1}, $d$).
Now suppose that $d_C(u,v) = 2$. We may assume that $u_1$ is adjacent to $v$. Then the graph obtained from $G$ by contracting $\repr(u)$ and deleting every vertex except $u$, $v$, $u_1$, $u_2$, $v'$ and $\repr(u_1)$, is a net.
\end{proof}
\begin{lemma}\label{lemma:bicolor}
Every color is adjacent to at most two other colors, or $G$ contains a net vertex-minor.
\end{lemma}
\begin{proof}
Let $R$ be any color. By applying iteratively Lemma~\ref{lemma:local-bicolor}, if there is a vertex of color $R$ whose two consecutive vertices have distinct colors, then either $G$ contains a net, or $R$ is adjacent to exactly two colors.
Otherwise, each vertex in $C$ is consecutive to two vertices of the same color. Suppose that there are three vertices $u_1$, $u_2$, $u_3$ in $C$ of color $R$, such that their neighbors have three different colors. let $v_1$, $v_2$ and $v_3$ be the vertices following $u_1$, $u_2$, $u_3$ respectively in $C$ (Figure~\ref{fig:bicolor}, $a$). Then, by contracting $u_1$, $u_2$, $u_3$, $\repr(u_1)$ and deleting all the vertices except $v_1$, $v_2$, $v_3$ and their representants, we obtain a net (Figure~\ref{fig:bicolor}, $b$).
\end{proof}
From now, we suppose that $G$ does not have a net minor. We define the \emph{graph of colors}, whose vertices are the colors, by the adjacency relation introduced above. By Lemma~\ref{lemma:bicolor}, the graph of color has maximum degree two. By connexity, it is either a cycle or a path. We index the colors from $1$ to $n$, following the order defined by the path or the cycle. Thus, each edge of $C$ has extremities of colors $i$ and $i+1$, or $1$ and $n$. We have the following immediate consequence.
\begin{lemma}\label{lemma:odd-colors}
Let $G$ be net-free. The number $n$ of colors is odd, and the graph of colors is a cycle.
\end{lemma}
\begin{proof}
Suppose not. Then $C$ has a proper $2$-coloring (following the parity of the colors), thus is even, contradicting the assumption.
\end{proof}
\begin{lemma}\label{lemma:bicolored-vertices}
Let $G$ be net-free. Every color contains a bicolored vertex.
\end{lemma}
\begin{figure}
\caption{Illustration for Lemma~\ref{lemma:bicolored-vertices}
\label{fig:bicolored-vertices}
\end{figure}
\begin{proof}
Without loss of generality, it is sufficient to prove that there is a bicolored vertex of color $1$. Let $U$ be the vertices of color $1$ adjacent to a vertex of color $n$ and $U'$ be the other vertices of color $1$. Let $W$ the vertices of $C$ having an odd color minus $U'$, and $B$ its complement in $V(C)$(see Figure~\ref{fig:bicolored-vertices}). $B$ does not contain two consecutive vertices of $C$, and $(W,B)$ cannot be a proper two coloring of $C$. Thus there is an edge in $C$ with both extremities in $W$. But this can only be an edge between $U$ and color $n$, hence there is a bicolored vertex in $U$.
\end{proof}
\begin{lemma}\label{lemma:7colors}
If $G$ is net-free, the number $n$ of colors is at most $5$.
\end{lemma}
\begin{figure}
\caption{Illustration for Lemma~\ref{lemma:7colors}
\label{fig:7colors}
\end{figure}
\begin{proof}
By contradiction. Let $u$, $v$ and $w$ be bicolored vertices of colors $1$, $3$ and $5$ respectively (see Figure~\ref{fig:7colors}). Contract every vertex of other colors, and delete every remaining vertex except $u$, $v$, $w$ and their representants. If $n > 5$, the $6$-vertices graph obtained by this way is a net.
\end{proof}
\begin{lemma}\label{lemma:bicol-consec}
If $G$ is net-free and $n=5$, there are no two consecutive bicolored vertices in $C$.
\end{lemma}
\begin{proof}
By contradiction. Let $u$ be a bicolored vertex, of color $1$, and $v$ its bicolored neighbor of color $2$. Let $w$ be a bicolored vertex of color $4$. Then, the graph obtained by contracting vertices of colors $3$ and $5$, and deleting all the vertices of colors $1$, $2$ and $4$, except $u$, $v$, $w$ and their representants, is a net.
\end{proof}
\begin{lemma}\label{lemma:parity}
Suppose $G$ is net-free. The number of edges between any two color classes is zero or odd.
\end{lemma}
\begin{proof}
Choose two adjacent colors, and remove from $C$ every edge between these two colors. Every path thus obtained has its both extremities in the same color class, or in the two chosen colors. So every path has an even length. As $C$ is odd, this proves that we removed an odd number of edges.
\end{proof}
\begin{lemma}\label{lemma:odd-sequence}
Suppose $G$ is net-free. If $n=5$, there is a maximal sequence of consecutive edges in $C$ between any two given adjacent colors of length $2k+1$, for some $k \geq 1$.
\end{lemma}
\begin{proof}
Consider the subpaths of $C$ obtained by keeping only the edges between colors $1$ and $2$. Either there is a net minor or each of these paths has length at least $2$ by Lemma~\ref{lemma:bicol-consec}. Then, by Lemma~\ref{lemma:parity}, there is a path of odd length, proving the lemma.
\end{proof}
\begin{lemma}\label{lemma:5colors}
If $n=5$, then $G$ is not a minimally excluded graph by vertex minor.
\end{lemma}
\begin{figure}
\caption{Illustration for Lemma~\ref{lemma:5colors}
\label{fig:5colors}
\end{figure}
\begin{proof}
If $G$ contains a net minor, it is clearly not minimal. Suppose it does not.
Let $u_1$, $v_1$, $u_2$, \ldots $u_k$, $v_k$ be a maximum subpath of $C$ of odd length between colors $1$ and $2$. By Lemma~\ref{lemma:odd-sequence}, we have $k \geq 2$. Let $w$ be the vertex of color $5$ adjacent to $u_1$, and $w'$ the vertex of color $3$ adjacent to $v_k$. Let $s$ be a bicolored vertex of color $4$ (see Figure~\ref{fig:5colors}, $a$).
Consider the graph obtained by contracting vertices of colors $3$ and $5$, and deleting all the other vertices except $u_1$, $v_1$, $u_2$, \ldots $u_k$, $v_k$, $s$, $\repr(u_1)$, $\repr(u_2)$ and $\repr(s)$. This graph (Figure~\ref{fig:5colors}, $b$) is composed of a cycle of length $2k+1$ plus three terminals $\repr(u_1)$, $\repr(u_2)$ and $\repr(s)$. It obviously checks the condition for being an excluded graph. Thus $G$ is not minimal.
\end{proof}
\end{appendix}
\end{document} |
\begin{document}
\sloppy
\title{Characterizing Entanglement Sources}
\author{Pavel Lougovski$^{1}$ and S.J. van Enk$^{1,2}$}
\address{
$^1$Department of Physics and Oregon Center for Optics, University of Oregon\\
Eugene, OR 97403\\
$^2$Institute for Quantum Information, California Institute of Technology, Pasadena, CA 91125}
\begin{abstract}
We discuss how to characterize entanglement sources with finite sets of measurements. The measurements do not have to be tomographically complete, and may consist of POVMs rather than von Neumann measurements. Our method yields a probability that the source generates an entangled state as well as estimates of any desired calculable entanglement measures, including their error bars. We apply two criteria, namely Akaike's information criterion and the Bayesian information criterion, to compare and assess different models (with different numbers of parameters) describing entanglement-generating devices. We discuss differences between standard entanglement-verificaton methods and our present method of characterizing an entanglement source.
\end{abstract}
\pacs{03.67.Mn, 03.65.Ud}
\maketitle
\section{Introduction}
Entanglement is useful, but hard to generate, and even harder to detect. The most measurement-intense approach to the problem of experimentally detecting the presence of entanglement is to perform complete quantum-state tomography \cite{tomo}. Even for just two qubits this implies a reconstruction of all 15 independent elements of the corresponding density matrix. Subsequently applying the positive partial transpose (PPT) criterion to the reconstructed matrix gives a conclusive answer about entanglement or separability of the state~\cite{Witness,Peres}.
From the practical point of view it is desirable to have an entanglement detection tool that is more economical than full state tomography but nevertheless is decisive. Already in the original work on PPT~\cite{Witness} it was noticed that one can always construct an observable $\mathcal{W}$ with non-negative expectation values for all separable states $\rho_{s}$ and a negative expectation value for at least one entangled state $\rho_e$. In this way an experimentally detected violation of the inequality $\langle\mathcal{W}\rangle \ge 0$ is a sufficient condition for entanglement. The observable $\mathcal{W}$ is called an entanglement witness (EW). There always exists an optimal choice of {\em local} orthogonal observables such that a given EW can be expressed as a sum of their direct products~\cite{Guehne}, so that a witness can always be measured locally. The advantage of using EWs for entanglement detection will be appreciated better for multi-partite systems with more than two qubits, because the number of tomographic measurements would grow exponentially with the number of qubits. On the other hand, a given witness does not detect all entangled states and therefore a variety of different EWs should be tested in order to rule out false negative results.
EWs assume the validity of quantum mechanics, and also assume one knows what measurements one is actually performing. A valuable alternative to EW can be sought in using a violation of Bell-CHSH inequalities \cite{bell,chsh} as a sufficient condition for entanglement (although a Bell-inequality test can be formulated as a witness, too \cite{Bellwitness}). Because Bell inequalities are derived from classical probability theory without any reference to quantum mechanics, no assumption about what is being measured is necessary. This method is, therefore, safe in the sense of avoiding many pitfalls arising from unwarranted (hidden) assumptions about one's experiment \cite{entmeasurement}.
Here we propose a different method for characterizing an entanglement source that automatically takes into account finite data as well as imperfect measurements.
Our method consist of two parts. The first part, ``Bayesian updating,'' produces an estimate of the relative probabilities that entangled and separable states are consistent with a given finite set of data. This estimate depends on what {\em a priori} probability distribution (the {\em prior}) one chooses over all possible states (the more data one has, the less it depends on the prior). That is, there is an {\em a priori} probability of entanglement, and each single measurement updates this probability to an {\em a posteriori} probability of entanglement. The latter then has to be compared to the former, in order to reach the conclusion that one is now either more certain or less certain about having produced an entangled state. In fact, every experiment can only make such probabilistic statements about entanglement, although this is almost never explicitly stated in these terms.
Thus our method differs from those in
Refs~\cite{AudenaertWitness,JensWitness,GuehneWitness} which assume expectation values of EWs are known [corresponding effectively to an {\em infinite} data set] and try to find the {\em minimally}-entangled state consistent with those expectation values.
We use a numerical Bayesian updating method for a probability distribution over density matrices, which is similar to that recently discussed in Ref.~\cite{Blume-Kohout} in the context of quantum-state tomography. In particular, whereas
the reconstruction of a density matrix from experimental data is usually based on the maximum likelihood estimation (MLE), Ref.\cite{Blume-Kohout} discusses its drawbacks and proposes Bayesian updating in its stead as a superior method. Our aim, though, is not to give an estimate of the density matrix, but of entanglement. In fact, any quantity that can be calculated from a density matrix, such as the purity of one's state, can be estimated this way.
The second part of our method introduces two information criteria \cite{book} to judge how different models of a given entanglement generation process can be compared to each other quantitatively. It is probably best to
explain this part by giving an example. For simplicity, we consider the case of two-qubit states.
Suppose an experimentalist has a model for her entanglement generating source that contains, say, two parameters describing two physically different sources of noise in the final two-qubit state produced.
She may try to fit her data to her two-parameter model, but obviously there are always states in the full 15-dimensional set of all physical states that will fit the data better. There are a number of criteria, standard in the literature on statistical models, that compare quantitatively how different models fit the data. Here we will use Akaike's Information Criterion (AIC) and the Bayesian Information Criterion (BIC) \cite{book}. These information criteria aim to find the most informative model, not the best-fitting model.
The idea is that a two-parameter model fitting the data almost as well as the full quantum-mechanical description would provide more physical insight and a more economical (think Occam's razor) and transparent description. Each of the two information criteria, AIC and BIC,
produces a number $\Omega$.
One term in $\Omega$ is the logarithm of the maximum likelihood possible within each model, and the second term subtracts a penalty for each parameter used in the model. The model with the larger value of $\Omega$ is then deemed to be the more informative.
We propose here to combine information criteria with the Bayesian updating methods for entanglement estimation. Namely, we propose to use the more informative model to generate a ``substitute prior.'' In the case that the simpler model is the more informative, the numerical efforts required for our Bayesian updating method are much smaller, and yet should lead to correct descriptions of the entanglement generated by one's source.
This paper is organized as follows. In Section \ref{Bayesup}
we give a general formulation of our method of Bayesian updating applicable to any quantum system. We also formulate precisely the two information criteria for model selection. In Section \ref{exmpl}
we discuss numerical examples, which illustrate the Bayesian methods and the information criteria. For concreteness we consider measurements of Bell-CHSH correlations (although any sort of measurements would do).
The examples show that our method detects entangled two-qubit states that escape detection by any of the Bell-CHSH inequalities and even by violations of the stronger version of these bounds, which we call Roy-Uffink-Seevinck bounds \cite{roy, UffinkSeevinck,quantph}. In the Discussion and Conclusions Section we discuss the essential difference between our method of characterizing an entanglement source and the standard methods of entanglement verification \cite{guehne,entmeasurement}.
\section{Quantifying Entanglement via Bayesian Updating}\label{Bayesup}
Here we present a numerical Bayesian updating method for one's probability distribution over density matrices. A related Bayesian method was recently advocated in Ref.~\cite{Blume-Kohout} in the context of quantum-state tomography and quantum-state estimation. We note our aim is not to give an estimate of the density matrix, but, more modestly, to give estimates of entanglement, purity, and in principle any quantity that can be efficiently calculated from the density matrix. We first discuss the method in general, and subsequently we propose a new method to choose a prior probability distribution over density matrices.
\subsection{Method}
The method itself can be formulated as a five-step procedure:
\begin{enumerate}
\item For a system of $M$ qubits we first choose a finite test set of density matrices. We calculate the amount of entanglement (in fact, the negativity) for each state in the set \footnote{We remark that in higher dimensions (or for more than two parties) the negativity does not necessarily pinpoint all classes of entangled states. In this case more than one entanglement monotone should be used to characterize entanglement.}. The {\em a priori} probability that our unknown experimentally generated state, which we denote by $\rho_{?}$, equals a state $\rho$ in the set is chosen as ${\rm p}_{{\rm prior}}(\rho)=1/N_{{\rm s}}$, where $N_{{\rm s}}$ is the number of states in the set.
\item We assume some set of POVMs with elements $\{\Pi_i\}$ is measured. These POVMs can describe any (noisy) set of measurements one performs on the qubits.
\item For the acquired measurement record $d = \{d_{1},\cdots,d_{i}\}$ consisting of the number of times outcome $i$ was obtained \footnote{We thus implicitly assume identical and independent copies of states of $M$ qubits.}, we calculate the quantum-mechanical probability ${\rm p} (d|\rho)$ that a given state $\rho$ from the test set generates the measurements outcome $d$ (which follows directly from Tr$\rho\Pi_i$). Having at hand probabilities ${\rm p} (d|\rho)$ for all states $\rho$ in the test set we are now able to calculate the {\em a priori} probability ${\rm p}(d)=\sum\limits_{\rho}{\rm p} (d|\rho)/N_{{\rm s}}$ for the measurement record $d$ to occur.
\item We calculate -- using Bayes' rule -- the probability ${\rm p}(\rho|d)$ of having the state $\rho$ given the measurement outcomes $d$: ${\rm p} (\rho|d) ={\rm p} (d|\rho)/[N_{{\rm s}}{\rm p} (d)]
$.
\item We obtain the posterior probability distribution over density matrices in our test set: ${\rm p}_{{\rm posterior}} (\rho):={\rm p} (\rho|d)$ for all states $\rho$.
\end{enumerate}
We can then repeat steps 2-5 for a new set of measurements $d$, if needed.
This procedure gives us, in step 5, a numerical estimate of the {\em a posteriori} probability that the unknown state $\rho_{?}$ equals the state $\rho$ from the test set. From p$(\rho|d)$ we can estimate the probability ${\rm p}_{e}$ for the state $\rho_{?}$ to be entangled. We just sum the probabilities ${\rm p} (\rho|d)$ for all entangled states $\rho_{ent}$ in the set i.e.
\begin{equation}
{\rm p_e} (\rho_{?}) = \sum\limits_{\rho=\rho_{ent}} {\rm p} (\rho|d).
\end{equation}
Furthermore, we can calculate probability distributions for any function of the density matrix, such as the negativity and purity. We thus infer expectation values such as
\begin{equation}
\overline{N} = \sum\limits_{\rho=\rho_{ent}} {\rm p} (\rho|d)
N(\rho),\end{equation}
and
\begin{equation}
\overline{P } = \sum\limits_{\rho} {\rm p} (\rho|d)
{\rm Tr}(\rho^2),
\end{equation}
as well as standard deviations $\sigma_N=\sqrt{\overline{N^2}-\bar{N}^2}$ etc.
The meaning of our final probability distribution p$(\rho|d)$ and of the above expectation values is as follows. If we were forced to give a {\em single} density matrix that best describes all data and that includes error bars, we would give the mixed state $\bar{\rho} = \int d\rho {\rm p}(\rho|d)(\rho)\rho$, as explained in \cite{Blume-Kohout}. The purity and negativity of the state $\bar{\rho}$ are {\em not} equal to (in fact, smaller than) the estimates $\bar{N}$ and $\bar{P}$ that we use here. The difference is this: if one were to perform more measurements that are tomographically complete, $\bar{N}$ is the expected negativity of the final estimated density matrix. $N(\bar{\rho})$, on the other hand, would be the useful entanglement of a single copy available {\em without} performing more measurements. For most quantum information processing purposes (such as teleportation) one indeed needs more precise knowledge about the density matrix than just its entanglement. See Ref.~\cite{entmeasurement} for more discussions on this issue.
\subsection{Model testing and information criteria}
The only problem standing in the way of a straightforward application of the above Bayesian updating procedure is that a sufficiently dense test set (used in step 1) is in general too hard to handle numerically, since even for two-qubit density matrices the parameter space is 15-dimensional.
Although there are certainly ways out of this problem (in particular, sampling directly from the posterior probability distribution can be efficiently done with the Metropolis-Hastings algorithm, see e.g. \cite{MH}), here we stick to the idea of a set of test states by simplifying that set, as follows.
As an illustrative example (which we will again consider in great detail in the next Section), consider an experimentalist trying to produce a maximally entangled Bell state of 2 qubits, say, $(|00\rangle+|11\rangle)/\sqrt{2}$. She wants to test her entanglement-generating device by measuring some set of Bell correlations. In particular, suppose she measures $2<K<15$ independent observables.
From her previous experience with the same device, she models the generation process by assuming there is both Gaussian phase noise and white noise (mixing with the maximally mixed state $\propto \openone$). That is, she assumes her device generates states of the form
\begin{equation}\label{ps1}
\rho_{p,\sigma}=p\rho_\sigma+(1-p)\frac{\openone}{4},
\end{equation}
with $p\in [0,1]$ and
\begin{equation}\label{ps2}
\rho_\sigma=\frac{1}{2}\int_{-\pi}^{\pi} d\phi P(\phi)
(|00\rangle+\exp(i\phi)|11\rangle)
(\langle 00|+\exp(-i\phi)\langle 11|).
\end{equation}
Here $P(\phi)$ is a Gaussian phase distribution of the form
\begin{equation}\label{ps3}
P(\phi)=N_\sigma\exp(-\phi^2/\sigma^2)
\end{equation}
with the normalization factor $N_\sigma$ given by
\begin{equation}
N_\sigma=\frac{1}{\int_{-\pi}^\pi d\phi
\exp(-\phi^2/\sigma^2)}.
\end{equation}
So there are just two parameters the experimentalist has to determine from her measurement results, $p$ and $\sigma$.
As a measure to judge how well her data $d$ fit the model (\ref{ps1})--(\ref{ps3}), she considers the best likelihood for that model,
\begin{equation}
L_{p,\sigma}\equiv\max_{p,\sigma}P(d|\rho_{p,\sigma}).
\end{equation}
She would like to compare this to the maximum likelihood over {\em all} physical two-qubit states $\rho$,
\begin{equation}
L_a\equiv \max_{\rho}P(d|\rho).
\end{equation}
There are several ways to compare these two quantities \cite{book}. One criterion is called
Akaike's Information Criterion, and it defines the quantity
\begin{equation}
\Omega=\log(L)-k
\end{equation}
for each model, where $k$ is the number of parameters in the model, and $L$ is the maximum likelihood for the model. The quantity $\Omega$ rewards a high value of the best likelihood (indicating a good fit), but penalizes a large number of parameters (to guard against overfitting).
Now when measuring $2<K<15$ observables, the best complete model contains just $K$, not 15, independent parameters.
Thus the experimentalist would calculate two numbers
\begin{eqnarray}
\Omega_{p,\sigma}&=&\log (L_{p,\sigma})-2,\nonumber\\
\Omega_{a}&=&\log (L_a)-K.\label{Omega}
\end{eqnarray}
If $\Omega_{p,\sigma}>\Omega_a$ then
the Akaike Information Criterion judges the simple 2-parameter model to be more informative than the complete $K$-parameter model.
There is a Bayesian version of this criterion \cite{book}, and it is defined in terms of similar quantities
\begin{equation}
\Omega'=\log(L)-k\log(N_{{\rm m}})/2,
\end{equation}
where $L$ and $k$ have the same meaning as before, and $N_{{\rm m}}$ is the number of data taken.
Again, if $\Omega'_{p,\sigma}>\Omega'_m$, the 2-parameter model is considered more informative than the $K$-parameter description. For $N_{{\rm s}}>8$ the BIC puts a larger penalty on the number of parameters than does the AIC.
In the case that the simple model turns out to be more informative, according to at least one of the two criteria [this depends on the data], we propose that the experimentalist may well use the simple model to construct a test set of states.
For example, she could assume as prior probability distributions for $p$ and $\sigma$ that $p$ is uniformly distributed on the interval $[0,1]$, and that $\sigma$ is uniform on, say, the interval $[0,\pi]$ (this is somewhat arbitrary, of course, as every prior is). Then, the test set of
states could be sampled by simply choosing
$N_p$ uniformly spaced points in the
interval $[0,1]$ for $p$ and $N_\sigma$ uniformly spaced points in the interval $[0,\pi]$ for $\sigma$, thus creating a test set of $N_{{\rm s}}=N_p\cdot N_\sigma$ states.
The above model leads to states that are diagonal in the Bell basis,
\begin{eqnarray}
|\Phi_1\rangle&=&(|00\rangle+|11\rangle)/\sqrt{2}\nonumber\\
|\Phi_2\rangle&=&(|00\rangle-|11\rangle)/\sqrt{2}
\nonumber\\
|\Phi_3\rangle&=&(|01\rangle+|10\rangle)/\sqrt{2}
\nonumber\\
|\Phi_4\rangle&=&(|01\rangle-|10\rangle)/\sqrt{2}
\end{eqnarray}\label{Bb}
In the next Section we thus consider not only
the above two-parameter model, but also its obvious extension to a three-parameter model by allowing all Bell-diagonal states.
\section{Examples}\label{exmpl}
\subsection{Orthogonal spin measurements}
In the following we consider, as an example, two-qubit states with spin measurements performed on each qubit (considered as a spin-1/2 system) in {\em two} arbitrary spatial directions that are orthogonal, and we denote the corresponding spin operators by $A_1$ and $A_2$ for the first qubit, and $B_1$ and $B_2$ for the second qubit. This consitutes a measurement of 8 independent quantities,
four single-qubit expectation values and four correlations.
We note that in this case we can construct four Bell-CHSH operators from the four measured correlations:
\begin{eqnarray}
\mathcal{B}_1&:=& A_{1}\otimes(B_{1} + B_{2}) +A_{2}\otimes(B_{1} - B_{2}), \nonumber\\
\mathcal{B}_2&=& A_{1}\otimes(B_{1} + B_{2}) -A_{2}\otimes(B_{1} - B_{2}), \nonumber\\
\mathcal{B}_3&=& A_{1}\otimes(B_{1} - B_{2}) + A_{2}\otimes(B_{1} + B_{2}), \nonumber\\
\mathcal{B}_4&=& A_{1}\otimes(-B_{1} + B_{2}) + A_{2}\otimes(B_{1} + B_{2}).\label{AllBell}
\end{eqnarray}
We then test two-qubit states that may be entangled but that do {\em not} violate any of the four Bell inequalities that can be constructed from these four operators.
In fact, we will not even optimize the choice of spatial directions, given an initial guess of what state should be produced, for violating a Bell inequality.
Finally, we will add one more correlation to be measured, namely that involving the third dimension: $A_3 B_3$. That is, whenever $A_3$ is measured on the first qubit, $B_3$ is measured on the second qubit. This addition makes the measurements on each qubit separately tomographically complete, but it does not lead to additional Bell-CHSH operators. The total number of independent observables measured in this case is 11 (four are missing).
Thus, the parameter $K$ to be used for evaluating $\Omega_a$ of Eq.~(\ref{Omega}) is $K=11$.
\subsection{Analytical results}
Determining the AIC and BIC criteria can be done analytically in most cases that we will consider here.
First of all, we can bound the maximum likelihood over all states, given the sort of measurements from the preceding subsection.
There are 20 observed frequencies, as follows:
for each of the five correlation measurement $A_i B_j$, where
$i,j$ take on the values $(i,j)=(1,1), (1,2), (2,1), (2,2), (3,3)$
there are four different outcomes, which we can denote
by $(+,+), (+,-), (-,+), (-,-)$. If we denote these frequencies by $f_{ijk}$, for $k=1\ldots 4$, then the (log of the) maximum likelihood is bounded by
\begin{equation}
\log (L_a)\leq
\sum_{k,(ij)} N_{ij}f_{ijk}\log(f_{ijk}),
\end{equation}
where $N_{ij}$ is the number of times
the $A_iB_j$ correlation was measured.
If we assume all five correlations are measured equally often, then we have
\begin{equation}
\log (L_a)\leq\frac{ N_{{\rm m}}}{5}
\sum_{k,(ij)}f_{ijk}\log(f_{ijk}),
\end{equation}
The bound is achieved when
there is a physical state predicting the frequencies exactly as they were observed.
Let us choose directions of our spin measurements as
$A_1=B_1=X$, $A_2=B_2=Y$ and $A_3=B_3=Z$.
Then, there are two obvious models an experimentalist could choose from: the first is the Bell-diagonal model, containing three parameters, in which states are of the form
\begin{equation}
\rho=\sum_{i=1}^4 p_{i} |\Phi_{i}\rangle\langle \Phi_{i}|.
\end{equation}
The observed frequencies $f_{ijk}$ for the five correlations
cannot be all predicted to arbitrary accuracy by Bell-diagonal states. In fact,
most frequencies predicted by this model are independent of the values of $\{p_i\}$, and are equal to 1/4.
The only predicted frequencies (which we denote by $\tilde{f}$ so as to distinguish them from the observed frequencies $f$) that actually depend on the values of $\{p_i\}$ are
\begin{eqnarray}
\tilde{f}_{111}&=&\tilde{f}_{114}=p_1/2+p_3/2,\nonumber\\
\tilde{f}_{221}&=&\tilde{f}_{224}=p_2/2+p_3/2,\nonumber\\
\tilde{f}_{331}&=&\tilde{f}_{334}=p_1/2+p_2/2,\nonumber\\
\tilde{f}_{112}&=&\tilde{f}_{113}=p_2/2+p_4/2,\nonumber\\
\tilde{f}_{222}&=&\tilde{f}_{223}=p_1/2+p_4/2,\nonumber\\
\tilde{f}_{332}&=&\tilde{f}_{333}=p_3/2+p_4/2.
\end{eqnarray}
The best-fitting Bell-diagonal state can only predict
the correct correlations between $XX$, $YY$, and $ZZ$ measurements.
For example, there is a Bell-diagonal state predicting the correct value for the sum $\tilde{f}_{111}+\tilde{f}_{114}$, but its prediction for the difference will always be zero. Thus, the Bell-diagonal state fitting the data best will have the following values for $\{p_i\}$:
\begin{eqnarray}
p_1&=&[f_{111}+f_{114}-f_{221}-f_{224}+f_{331}+f_{332}]/2,\nonumber\\
p_2&=&[-f_{111}-f_{114}+f_{221}+f_{224}+f_{331}+f_{332}]/2,\nonumber\\
p_3&=&[f_{111}+f_{114}+f_{221}+f_{224}-f_{331}-f_{332}]/2,
\end{eqnarray}
provided the observed frequencies are such that the $\{p_i\}$ including $p_4=1-p_1-p_2-p_3$ are all nonnegative. In that case, the (log of the) maximum likelihood over all Bell-diagonal states is
\begin{eqnarray}
\log L_{Bd}&=&\frac{ N_{{\rm m}}}{5}\left[\sum_i
(f_{ii1}+f_{ii4})\log([f_{ii1}+f_{ii4}]/2)\right.\nonumber\\
&&+\sum_i(f_{ii2}+f_{ii3})\log([f_{ii2}+f_{ii3}]/2)\nonumber\\
&&+\left.\sum_{k,i\neq j}f_{ijk}\log(1/4)\right]
\end{eqnarray}
For the Bell-diagonal model
we can construct a prior distribution over Bell-diagonal states by choosing the numbers $\{p_i\}$
uniformly over the simplex, as explained in \cite{NegativityOriginal}.
The two-parameter model is similar in its predictions the the Bell-diagonal model. The only difference is that
the parameters $p_3$ and $p_4$ are equal.
Thus this model can predict only two correlations correctly, namely $ZZ$ and $XX-YY$.
The maximum likelihood for this model, then, is given by
\begin{eqnarray}
\log L_{p,\sigma}&=&\frac{ N_{{\rm m}}}{5}[
(f_{331}+f_{334})\log([f_{331}+f_{334}]/2)\nonumber\\
&&+(f_{332}+f_{333})\log([f_{332}+f_{333}]/2)\nonumber\\
&&+(f_{111}+f_{114}+f_{222}+f_{223})\times\nonumber\\
&&\log(1/4+(f_{111}+f_{114}-f_{221}-f_{224})/2)\nonumber\\
&&+(f_{221}+f_{224}+f_{112}+f_{113})\times\nonumber\\
&&\log(1/4+(f_{221}+f_{224}-f_{111}-f_{114})/2)\nonumber\\
&&+\sum_{k,i\neq j}f_{ijk}\log(1/4)],
\end{eqnarray}
provided all inferred frequencies are nonnegative.
The three parameters to be used for selecting the most informative model are then, in the case of AIC:
\begin{eqnarray}
\Omega_a&=&\log L_a-11,\nonumber\\
\Omega_{p,\sigma}&=&\log L_{p,\sigma}-2\nonumber\\
\Omega_{Bd}&=&\log L_{Bd}-3,
\end{eqnarray}
and similar expressions for the BIC.
\subsection{Numerics}
Let us first discuss the two-parameter substitute prior with $p$ and $\sigma$ drawn
uniformly from $[0,1]$ and $[0,\pi]$, respectively. As our favorite entanglement monotone we use the negativity \cite{VidalWerner, NegativityOriginal}.
The prior probability distribution for negativity is displayed in Figures \ref{negp} (the graph for concurrence is the same for his special case). The plot shows that states exist in the full range of separable to maximally entangled, with the prior probability of entanglement being $P_{{\rm ent}}=50.3\%$.
\begin{figure}
\caption{Prior probability distribution of the negativity, for the two-parameter states $\rho_{p,\sigma}
\label{negp}
\end{figure}
Using this prior, we consider the measurement of five different Bell correlations.
Sample results
are displayed and discussed in Figures~\ref{0404}--\ref{03330333}.
Figure \ref{0404} shows measurement results generated from an entangled state $\rho_?=\rho_{0.4,0.4}$, as defined in Eq.~(\ref{ps1}). There is no need to test either AIC or BIC for this case, since the state is chosen from the two-parameter set of states, so the two-parameter model is trivially more informative.
The Bayesian posterior probability for entanglement distribution is consistent with the actual entanglement properties of $\rho_?$, as discussed in the Figure caption.
\begin{figure}
\caption{The state considered here is of the form $\rho_{p,\sigma}
\label{0404}
\end{figure}
\begin{figure}
\caption{Same as Figure \ref{0404}
\label{Pur0404}
\end{figure}
We then also test a state that is just separable, the state $\rho_{1/3,1/3}$.
The results can be summarized as ``inconclusive'' about the question whether the data inform the experimentalist that the underlying state is entangled or not. This is not surprising given how close the actual state is to the separable/entangled boundary. The plot for purity is not shown, as it is very similar to Figure \ref{Pur0404}
(the estimate of the purity is, $\bar{P}=0.331 \pm 0.013$ perfectly consistent with the actual purity of 0.3303 of $\rho_{1/3,1/3}$).
\begin{figure}
\caption{Same as Figure \ref{0404}
\label{03330333}
\end{figure}
Next we consider the following family of states
\begin{equation}\label{rhok}
\rho_k=0.5|\psi_k\rangle\langle \psi_k|+0.5\openone/4,
\end{equation}
with $k\leq 1$ and
\begin{equation}
|\psi_k\rangle=(|00\rangle+k|11\rangle)/\sqrt{1+k^2}.
\end{equation}
For $k=1$ this state is in the two-parameter set, but for $k<1$ it is not. Obviously, the smaller $k<1$ is, the less well it is approximated by a state $\rho_{p,\sigma}$.
We investigate how well the two-parameter model does by calculating
\begin{eqnarray}
\Delta\Omega&\equiv& \Omega_{p,\sigma}-\Omega_a\nonumber\\
\Delta\Omega'&\equiv& \Omega'_{p,\sigma}-\Omega'_a,
\label{DO}
\end{eqnarray} and tabulating the values for several values of $k<1$ in Table \ref{tab1}.
We moreover give the estimated negativities and purities, plus their error bars, as compared to the actual values of those quantities for the states $\rho_k$.
\begin{table}[htdp]
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|}
\hline
$k$&$N$&$\bar{N}\pm\sigma_N$&$\bar{P}\pm\sigma_P$&$\Delta\Omega$&$\Delta\Omega'$\\\hline
0.9&0.247&$0.246\pm 0.024$&$0.436\pm 0.012$&7.2&36\\
0.8&0.238&$0.237\pm 0.024$&$0.431\pm 0.012$&0.9&30\\
0.7&0.220&$0.219\pm 0.024$&$0.423\pm 0.012$&-11&18\\
0.6&0.191&$0.190\pm 0.024$&$0.410\pm 0.011$&-29&0.8\\
0.5&0.150&$0.149\pm 0.025$&$0.392\pm 0.011$&-53&-24\\
\hline
\end{tabular}
\caption{Comparison, through the Akaike and Bayesian information criteria [using (\ref{DO})], of the two-parameter model based on the family of states $\rho_{p,\sigma}$ [Eq.~(\ref{ps1})] and the
full 15-parameter description of all two-qubit states, with measurement data generated from the family of states $\rho_k$ [Eq.~(\ref{rhok})]. Here the number of measurements is $N_{{\rm m}}=5\times 1000$.
The purity of $\rho_k$ is equal to $P=0.4375$ for any value of $0<k<1$. For decreasing values of $k$, $\Delta\Omega$ and $\Delta \Omega'$ decrease, indicating the two-parameter becomes less and less informative. The estimate of purity becomes, likewise, less and less reliable.}
\label{tab1}
\end{center}
\end{table}
What the table shows is that the two-parameter model ceases to be more informative when $k<1$ decreases.
At that point, the estimate of negativity is still perfectly fine, but the estimate of the purity starts to fail.
In the last entry, for $k=0.5$, the two-parameter's model's estimate of purity is definitely off by a large amount.
In order to consider the three-parameter Bell-diagonal model, we first display the prior distribution for negativity of that model in Fig.~\ref{negp3}.
\begin{figure}
\caption{Prior probability distribution of the negativity, for the three-parameter set of Bell-diagonal states. The point at zero negativity is left out for visual reason: separable states
occupy 50.0\% of the total volume. Here $10^7$ states were drawn from the prior distribution over states.}
\label{negp3}
\end{figure}
Next we us discuss a state that is not close to any state in the two-parameter set of states, but that is still reasonably well described by the three-parameter model,
\begin{equation}\label{rho1}
\rho_1=0.53 |\psi_1\rangle\langle \psi_1|+
0.47 |\phi_1\rangle\langle \phi_1|,
\end{equation}
with
\begin{eqnarray}
|\psi_1\rangle&=&(|00\rangle+0.9|11\rangle)/\sqrt{1.81},\nonumber\\
|\phi_1\rangle&=&(|01\rangle+0.9|10\rangle)/\sqrt{1.81}
\end{eqnarray}
We consider $N_{{\rm m}}=5\times 1000$ measurements, with each correlation being measured 1000 times.
(For the calculations with the three-parameter model, a test set of size $10^7$ was used. In contrast, for the two-parameter model, test sets of size $600\times600$ were sufficient in all cases. This illustrates that choosing a good physical model with as few parameters as possible pays large dividends.)
For this state we calculate the AIC and BIC and compare the two- and three-parameter (Bell-diagonal) models to the full-state model,
\begin{eqnarray}
\Omega_{Bd}-\Omega_a&=&2.4,\nonumber\\
\Delta \Omega=\Omega_{p,\sigma}-\Omega_a&=&-462,
\end{eqnarray}
for the AIC, and
\begin{eqnarray}
\Omega'_{Bd}-\Omega'_a&=&27,\nonumber\\
\Delta \Omega'=\Omega'_{p,\sigma}-\Omega'_a&=&-433,
\end{eqnarray}
for the BIC.
That is, the three-parameter model is considered more informative than the model containing all physical states. On the other hand, the two-parameter model is {\em much} less informative. The estimates for negativity and purity are, for the three-parameter model
\begin{eqnarray}
\bar{N}&\stackrel{Bd}{=}&0.059\pm 0.022\nonumber\\
\bar{P}&\stackrel{Bd}{=}&0.4977\pm 0.0025
\end{eqnarray}
where the actual values are
\begin{eqnarray}
N&=&0.059\nonumber\\
P&=&0.502.
\end{eqnarray}
Thus, both purity and negativity are estimated correctly within the three-parameter model; and this is what one would expect given the AIC and BIC criteria.
The posterior probability distribution for the negativity is plotted in Fig.~\ref{Post3}.
\begin{figure}
\caption{Posterior probability distribution of the negativity, using the three-parameter set of Bell-diagonal states as prior, for data generated from the state $\rho_1$ of Eq.~(\ref{rho1}
\label{Post3}
\end{figure}
For the two-parameter model, in contrast,
we get
\begin{eqnarray}
\bar{N}&\stackrel{p,\sigma}{=}&0.056\pm 0.025\nonumber\\
\bar{P}&\stackrel{p,\sigma}{=}&0.353\pm 0.009
\end{eqnarray}
so that again the purity estimated by the two-parameter model is way off, although the estimated negativity is still quite good.
Thus, when the AIC and/or BIC criteria tell one not to trust a certain model, it does not imply that {\em all} estimated quantities from that model are, in fact, incorrect.
Lest one starts to think that the two-parameter model in fact somehow always estimates the negativity correctly, even if the estimated purity is wrong, here is a counter example to that idea: when the $N_{{\rm m}}=5\times 1000$ data are generated by
the mixture
\begin{equation}
\rho_2=0.53 |\psi_2\rangle\langle \psi_2|+
0.47 |\phi_2\rangle\langle \phi_2|,
\end{equation}
with
\begin{eqnarray}
|\psi_2\rangle&=&(|00\rangle+0.5|11\rangle)/\sqrt{1.25},\nonumber\\
|\phi_2\rangle&=&(|01\rangle+0.5|10\rangle)/\sqrt{1.25}
\end{eqnarray}
whose negativity is $N=0.039$, the two-parameter
model concludes the state is separable with high probability, $P_{{\rm ent}}=3.1\%$,
and $\bar{N}=3.5\times 10^{-4}\pm 0.0025$ (and the estimated purity is incorrect as well: $\bar{P}=0.319\pm 0.008$ instead of the correct value $P=0.502$).
Here, $\Delta\Omega=-203$.
\section{Discussion and Conclusions}\label{conc}
We have demonstrated a method to characterize entanglement sources from finite sets of data, using Bayesian updating for the probability distribution over density matrices. One obtains a {\em posterior} probability distribution for any quantity that can be efficiently calculated from an arbitrary density matrix. For instance, one obtains a probability that one's state is entangled, as well as expectation values of any computable entanglement monotone, including estimates of statistical errors. These values should be compared to their {\em a priori} values to judge whether one's measurement results lead one to be more certain about entanglement or less.
For two qubits it is in principle sufficient for the purpose of detecting entanglement to measure spin on each qubit in just two orthogonal directions. On the other hand, empirically, we found that for accurately {\em quantifying} two-qubit entanglement, adding one more correlation measurement is very beneficial. Thus we concentrated on discussing measurements of five spin-spin correlation functions.
It is hard to say in general what sort of measurements, short of fully tomographic measurements, will be sufficient for estimating what sort of quantities.
An easy check, though, is to count by how many parameters a given quantity is determined. For instance, purity is determined by the eigenvalues of the density matrix. Thus for two qubits one needs only three parameters. Thus, reliably estimating the purity of one's output states ought to be easier than estimating entanglement. Our simulations confirm this suspicion, producing relatively smaller error bars for estimates of purity than for entanglement.
It is important to note that in the above we used the phrase ``characterizing entanglement sources,'' rather than ``verifying entanglement,'' because the latter method, in its standard interpretation, has a different meaning: in entanglement verification one tries to find a proof of entanglement convincing a skeptic outsider. But the Bayesian method rather describes one's own belief.
In particular, the difference is that one's prior belief of the entanglement-generating source is certainly to be included in a Bayesian description, but in entanglement verification methods such beliefs are not allowed. Nevertheless, Bayesian methods can be used for the stricter purpose of entanglement verification, as discussed in \cite{Robintbp}.
In order to characterize one's entanglement source, then, it is allowed to use a model describing one's source, based on, e.g., previous experiments and experiences with the same (or similar) device. We provided a criterion to judge whether a given model of one's source is more or less informative than other possible models. In particular, one can always parametrize the output states by using the full
quantum-mechanical description of an arbitrary state of correct Hilbert-space dimension. The latter model, though, while being complete, may have more parameters than wished for or needed.
Instead, one may be able to use a description of one's source in terms of a (small) number of physically relevant parameters. We proposed to use two criteria to judge the relative merits of such models, the Akaike Information Criterion (AIC), and the Bayesian Information Criterion (BIC) \cite{book}.
We then showed how the AIC and BIC can be used to choose a test set of states i.e., an {\em a priori} probability distribution over quantum states generated by one's source: a Bayesian method, of course, only produces probabilities of entanglement by first choosing a prior.
If a simple model described one's source very well, then one's test set can be based on that model. We applied the AIC and BIC criteria to several examples, all involving two qubits, and showed that indeed, such criteria indicate whether model's predictions about purity and entanglement of the output of the source (including a probability that one's output state is entangled, as well as an estimate of the amount of entanglement) can be expected to be reliable or not. We demonstrated this by showing that certain estimates produced from a simple model are wrong if the information criteria deem the model to be less informative than the full 15d description of two-qubit quantum states, whereas those estimates are right on the mark, when the criteria deem the simple model to be more informative.
\end{document} |
\begin{document}
\begin{abstract}
A meromorphic quadratic differential on a punctured Riemann surface induces horizontal and vertical measured foliations with pole-singularities. In a neighborhood of a pole such a foliation comprises foliated strips and half-planes, and its leaf-space determines a metric graph.
We introduce the notion of an asymptotic direction at each pole, and show that for a punctured surface equipped with a choice of such asymptotic data, any compatible pair of measured foliations uniquely determines a complex structure and a meromorphic quadratic differential realizing that pair.
This proves the analogue of a theorem of Gardiner-Masur, for meromorphic quadratic differentials. We also prove an analogue of the Hubbard-Masur theorem, namely, for a fixed punctured Riemann surface there exists a meromorphic quadratic differential with any prescribed horizontal foliation, and such a differential is unique provided we prescribe the singular-flat geometry at the poles.
\end{abstract}
\maketitle
{\Sigma}ection{Introduction}
A holomorphic quadratic differential on a Riemann surface has associated coordinate charts with transition maps that are half-translations ($z\mapsto \pm z+c$). This induces a \textit{singular-flat structure} on the surface, namely, a flat metric with conical singularities, together with a pair (horizontal and vertical) of \textit{measured foliations}. These structures have been useful in Teichm\"{u}ller theory, and the study of the mapping class group of a surface (see \cite{FLP}).
The correspondence between these analytical objects (the differentials) and their induced geometric structures is well-understood for a closed surface. In particular, the work of Hubbard-Masur in \cite{HubbMas} proved that for a fixed compact Riemann surface $X$ of genus $g\geq 2$, assigning the induced horizontal (or vertical) foliation to a holomorphic quadratic differential defines a homeomorphism between the space $Q(X)$, and the space of measured foliations $\mathcal{MF}_g$. Moreover, in \cite{GardMas} Gardiner-Masur proved that the pair of horizontal and vertical foliations uniquely determines the complex structure and holomorphic quadratic differential inducing those foliations (see Theorem 3.1 in that paper).
The main result in this article is the analogue of the Gardiner-Masur theorem for surfaces with punctures.
The analogue of the Hubbard-Masur theorem has been extended to the case of meromorphic quadratic differentials on a punctured surface $S$ of negative Euler characteristic, by the work in \cite{GuptaWolf2} (that deals with higher order poles at the punctures) and \cite{GuptaWolf0} (that deals with poles of order two).
It is well-known that poles of order one (i.e. simple poles) can be reduced to the classical theory (i.e. the holomorphic case) by taking a branched double cover; we therefore consider poles of order greater than one throughout. In the work of Gupta-Wolf, the behaviour of the measured foliations at a pole-singularity was analyzed in terms of the ``principal part" of the differential. This paper develops a more constructive approach in terms of various cut-and-paste operations; as an application we provide an alternative generalization of the Hubbard-Masur theorem in terms of the singular-flat geometry at the poles.
{\Sigma}ubsection*{Sphere with $\leq 2$ punctures} We shall first focus on the case when $S$ is the sphere with at most two punctures, that is, $S= {\mathbb C}$ or ${\mathbb C}^\ast$. As a special case of the classical Three-Pole theorem (see \cite{Jenkins2}), the trajectory structure of a meromorphic quadratic differential on $S$ comprises foliated strips and half-planes. Thus, the induced measured foliations can be described in terms of their leaf-spaces, that are metric graphs on the punctured sphere. These metric graphs have $(n-2)$ infinite-length edges incident on any puncture of order $n\geq 3$, and a loop or infinite edge for each pole of order $2$ (see \S2.3 for details).
The case that $S={\mathbb C}$ was dealt with in \cite{AuWan2}; in this case the holomorphic quadratic differential necessarily has a pole of order $n\geq 4$ at $\infty$, and has the form $p(z) dz^2$ where $p$ is a polynomial of degree $n-4$. By a conformal change of coordinates, it can be arranged that the polynomial is \textit{monic}, namely, that the leading coefficient is $1$, and \textit{centered}, namely, that the zeroes of the polynomial have vanishing mean. The leaf-spaces of the induced measured foliations are then planar trees, and the result for this case can be summarized as follows (see \S3 for details):
\begin{jthm}[Au-Wan, \cite{AuWan2}]
The space $\mathcal{MF}_0(n)$ of the measured foliations on $\mathbb{C}\mathrm{P}^1$ with a single pole-singularity of order $n > 4$ at $\infty$ admits a bijective correspondence with the space $\mathcal{T}(n-2) $ of planar metric trees with $(n-2)$ labelled infinite rays incident at $\infty$, and $\mathcal{MF}_0(n) \cong \mathcal{T}(n-2) \cong \mathbb{R}^{n-5}$.
Moreover, let $Q_0(n)\cong \mathbb{C}^{n-5}$ be the space of monic and centered polynomial quadratic differentials of degree $n-4$. Then the map
\begin{equation*}\label{mapPhi}
\Phi_1: Q_0(n) \to \mathcal{MF}_0(n) \times \mathcal{MF}_0(n)
\end{equation*}
that assigns to a polynomial quadratic differential its associated horizontal and vertical foliations, is a homeomorphism.
\end{jthm}
\noindent \textit{Remark.} The space of measured foliations $\mathcal{MF}_0(n)$ decomposes into regions corresponding to the different combinatorial types of planar trees with labelled ends, and there is exactly a Catalan number of them. This is closely related to the classification of the trajectory-structure for polynomial vector fields on $\mathbb{C}$ (see \cite{BranDias}, \cite{Dias}, \cite{DES} and the references therein). One of the differences is that a foliation induced by a quadratic differential is typically not orientable. \\
For $S= {\mathbb C}^\ast$, let $n,m\geq 2$ denote the orders of poles at $0$ and $\infty$ respectively. A meromorphic quadratic differential on ${\mathbb C}^\ast$ with poles of these prescribed orders has the form
\begin{equation}
q= \frac{p(z)}{z^n} dz^2
\end{equation}
where $p$ is a polynomial of degree $n+m-4$ such that $p(0) \neq 0$.
As we shall see in \S2.1, the argument of the leading order coefficient at the poles determines the asymptotic directions of the induced foliations at the poles. At a pole of order two, this asymptotic direction is the ``slope" of the leaves when lifted to the universal cover ${\mathbb H}$ of a neighborhood of the pole. At each higher order pole, the asymptotic direction of a single horizontal leaf determines the complete set of asymptotic directions of horizontal as well as vertical leaves. If we prescribe this asymptotic data, the remaining coefficients of $p(z)$ and the modulus of the leading order term at each pole, parametrize the space of such quadratic differentials $Q_0(n,m) \cong \mathbb{R} \times \mathbb{C}^{n+m-5}\times \mathbb{R} $ provided $n+m >4$.
Our first result is:
\begin{thm}\label{thm1} Let $n,m\geq 2$ such that $n,m$ are not both equal to $2$. Let $\mathcal{MF}_0(n,m)$ be the space of measured foliations on $\mathbb{C}\mathrm{P}^1$ with a pole-singularity of order $n$ at $0$ and of order $m$ at $\infty$, with prescribed asymptotic data at the poles.
Let
\begin{equation*}
\Phi_2: Q_0(n,m) \to \mathcal{MF}_0(n,m) \times \mathcal{MF}_0(n,m)
\end{equation*}
be the map that assigns to a quadratic differential with prescribed asymptotic data, its induced horizontal and vertical measured foliations. Then $\Phi_2$ defines a homeomorphism to the subspace comprising pairs of foliations that
\begin{itemize}
\item do not both have transverse measure zero around the punctures, and
\item in case that either $n$ or $m$ equals two, and both foliations have positive transverse measures around the punctures, then the two transverse measures are compatible with the prescribed asymptotic direction at the pole of order two (see Definition \ref{compat}).
\end{itemize}
\end{thm}
The key part in the proof of Theorem \ref{thm1} is defining an inverse map to $\Phi_2$ (see \S4.2). This uses a decomposition of the measured foliations on ${\mathbb C}^\ast$ into ``model foliations" on neighborhoods of the two punctures, and the remaining annulus. The desired meromorphic quadratic differential is then constructed by assembling the singular-flat surfaces that realize the corresponding pairs of foliations on each of these subsurfaces. On a punctured disk, realizing such a pair of model foliations crucially uses the work of Au-Wan from \cite{AuWan2}. The special case when $n=m=2$ is discussed in \S4.4.
{\Sigma}ubsection*{Surface of negative Euler characteristic} Now consider the case when $S$ is an oriented surface of genus $g$ and $k$ labelled punctures, such that the Euler characteristic $2-2g-k<0$. Let $\mathfrak{n} = (n_1, n_2, \ldots, n_k)$ be a $k$-tuple of integers, each greater than one. Let $\mathcal{MF}_g(\mathfrak{n})$ be the space of measured foliations with a pole-singularity of order $n_i$ at the $i$-th puncture, and with prescribed asymptotic directions at the poles.
Combining the results in \cite{GuptaWolf0} and \cite{GuptaWolf2}, we parametrize this space in \S2.2 (see Proposition \ref{mfgn-prop}). This work of Gupta-Wolf had also defined these spaces, but had done so relative to fixing a choice of a ``disk neighborhood" of the poles; the notion of asymptotic data of the foliations at the poles, introduced in this paper, provides a cleaner definition.
By the work in \cite{Bridgeland-Smith}, a \textit{generic} measured foliation in $\mathcal{MF}_g(\mathfrak{n})$ comprises foliated strips and half-planes, and thus has a leaf-space that can be represented as an embedded metric graph on the surface, exactly as in the Three-Pole case. There are, however, measured foliations with more complicated trajectory structure (e.g. dense leaves) whose corresponding leaf-space is described as a $\pi_1(S)$-invariant $\mathbb{R}$-tree in the universal cover of $S$, with an additional $\pi_1(S)$-invariant collection of infinite rays corresponding to the higher-order poles (see \S3.3. of \cite{GuptaWolf2}).
Let ${Q}_g(\mathfrak{n})$ be the space of meromorphic quadratic differentials on $S$, with a pole of order $n_i$ at the $i$-th puncture. Our main result is:
\begin{thm}\label{thm2}
Let $S$ be an oriented surface of genus $g$ and $k$ punctures such that $2-2g-k<0$. Let $\mathfrak{n} = (n_1, n_2, \ldots, n_k)$ be a $k$-tuple of positive integers, each greater than one, and fix a set $\mathfrak{a}$ of asymptotic data comprising a tangent direction at each pole.
Let $(\mathcal{H}, \mathcal{V}) \in \mathcal{MF}_g(\mathfrak{n}) \times \mathcal{MF}_g(\mathfrak{n})$ be a compatible pair of transverse measured foliations, that is,
\begin{itemize}
\item $\mathcal{H}$ has prescribed asymptotic directions given by $\mathfrak{a}$ at the poles, and $\mathcal{V}$ has the opposite set ${\Sigma}qrt{-1}\cdot \mathfrak{a}$ of asymptotic directions (see Definition \ref{opp}),
\item $\mathcal{H}$ and $\mathcal{V}$ do not simultaneously have transverse measure zero around any puncture, and
\item if $n_i=2$, and both $\mathcal{H}$ and $\mathcal{V}$ have positive transverse measure around the $i$-th puncture, then the two transverse measures are compatible with the prescribed asymptotic direction at the order two pole (see Definition \ref{compat}).
\end{itemize}
Then there exists a unique meromorphic quadratic differential in $Q_g(\mathfrak{n})$ that induces a horizontal foliation equivalent to $\mathcal{H}$ and vertical foliation equivalent to $\mathcal{V}$.
\end{thm}
The proof of Theorem \ref{thm2} in \S5.1 uses a decomposition of the desired pair of measured foliations to model foliations around each puncture (as in the proof of Theorem \ref{thm1}) together with a pair of measured foliations on a surface with boundary. Realizing the latter pair can be reduced to the case of a closed surface by doubling across the boundary; however, the final assembly of singular-flat surfaces requires the angle at which either foliation intersects the boundary to be prescribed. This is achieved in an intermediate step that involves truncating cylindrical ends that are attached to each boundary component. \\
The space of meromorphic quadratic differentials ${Q}_g(\mathfrak{n})$ forms a vector bundle over the ``appended Teichm\"{u}ller space" $\widehat{\T}_{g,k}$ of conformal structures on $S$ up to isotopy fixing a framing of the tangent-space at the punctures (see Definition 3.3. of \cite{GuptaMj1}). Here, the space $\widehat{\T}_{g,k}$ records, in addition to the $6g-6+2k$ parameters of the Teichm\"{u}ller space of $S$, a real ``twist" parameter at each puncture. Let $\pi: {Q}_g(\mathfrak{n})\to \widehat{\T}_{g,k}$ be the projection map; any fiber $\pi^{-1}(X)$ comprises quadratic differentials that are meromorphic with respect to the Riemann surface structure on $X$ and induces foliations that have asymptotic directions and integer twist parameters around each pole determined by the corresponding twist parameter on $X$ (\textit{c.f.} \S3.1 of \cite{GuptaMj1}, and see \S5.2 for details.)
Our final result is a generalization of the Hubbard-Masur theorem to the case of meromorphic quadratic differentials on punctured surfaces. This generalization was first proved in \cite{GuptaWolf0} (for order-two poles) and in \cite{GuptaWolf2} (for higher-order poles) using the theory of harmonic maps, and their work uses the complex-analytic notion of a ``principal part" of a quadratic differential at each pole, with respect to a choice of a coordinate disk. The following alternative generalization instead uses the space of ``model foliations" $\mathcal{P}_n$ in the neighborhood of a pole of order $n$ (introduced in \S2.3).
\begin{thm}\label{thm3} Let $S, \mathfrak{n}$ be as in Theorem \ref{thm2}.
Let ${X} \in \widehat{\T}_{g,k}$, and fix a measured foliation $\mathcal{H} \in \mathcal{MF}_g(\mathfrak{n})$, and model foliations $F_i \in \mathcal{P}_{n_i}$ for each $1\leq i\leq k$.
Suppose the asymptotic directions $\mathfrak{a}$ and real twist parameters of $\mathcal{H}$ at the poles are those determined by the twist parameters of $X$, and $\mathcal{H}$ restricts to the model foliations $F_i^H \in \mathcal{P}_{n_i}$ in a disk $D_i \cong \mathbb{D}^\ast$ around the $i$-th pole, where each pair $(F_i^H, F_i)$ is compatible, exactly as in Theorem \ref{thm2}.
Then there is a unique meromorphic quadratic differential $q\in Q_g(\mathfrak{n})$ satisfying $\pi(q)=X$, such that the horizontal foliation of $q$ is equivalent to $\mathcal{H}$, and the vertical foliation of $q$ restricts to the model foliation $F_i$ at the $i$-th pole.
\end{thm}
{\Sigma}mallskip
A key step in the proofs of Theorems \ref{thm1} and \ref{thm2} was the fact that a pair of model foliations in $\mathcal{P}_n$ uniquely determines a singular-flat metric on a neighborhood of that pole (see Proposition \ref{prop2}). Thus in Theorem \ref{thm3}, since $\mathcal{H}$ determines the horizontal model foliation at each pole, prescribing the vertical model foliations is equivalent to prescribing the geometry of the singular-flat end corresponding to each pole. The strategy of the proof of Theorem \ref{thm3} in \S5.2 is to reduce to the case when all poles have order two (and all ends are cylindrical) and use the main result of \cite{GuptaWolf0}.
\textbf{Acknowledgements.} This article has been in the works for several years, SG and MT are grateful for the support by NSF grants DMS-1107452, 1107263, 1107367 ``RNMS: GEometric structures And Representation varieties" (the GEAR Network). SG is also grateful for the support by the Danish National Research Foundation centre of Excellence, Centre for Quantum Geometry of Moduli Spaces (QGM), the Department of Science and Technology (DST) MATRICS Grant no. MT/2017/000706, the Infosys Foundation, and the UGC. The authors are grateful to Fred Gardiner, as well as an anonymous referee, whose comments improved a previous version of the paper.
{\Sigma}ection{Preliminaries}
{\Sigma}ubsection{Quadratic differentials and their induced geometry}
A holomorphic quadratic differential $q$ on a Riemann surface $X$ is a holomorphic section of the symmetric square of canonical bundle $K_X^2$. Locally, such a holomorphic quadratic differential can be expressed as $q(z)dz^2$ where $q(z)$ is a holomorphic function. A holomorphic quadratic differential induces a singular-flat metric and horizontal and vertical foliations on the underlying Riemann surface that we now describe. For an account of what follows, see \cite{Streb} or \cite{Gard}. A key new notion introduced in this paper is that of an ``asymptotic direction" at a pole -- see Definition \ref{adata}.
\begin{defn}[Singular-flat metric] A holomorphic quadratic differential induces a conformal metric locally of the form $\lvert q(z)\rvert \lvert dz\rvert^2$, which is a flat Euclidean metric with cone-type singularities at the zeroes, where a zero of order $n$ has a cone-angle of $(n+2)\pi$.
\end{defn}
\begin{defn}[Horizontal and vertical foliations]\label{fols} A holomorphic quadratic differential on $X=\mathbb{C} \text{ or } \mathbb{C}^\ast$ determines a bilinear form $q: T_xX \otimes T_x X \to \mathbb{C}$ at any point $x\in X$ away from the poles. Away from the zeroes, there is a unique (un-oriented) \textit{horizontal direction} $v$ where $q(v,v)\in \mathbb{R}^{+}$. Integral curves of this line field on $X$ determine the \textit{horizontal foliation} on $X$. Similarly, away from the zeroes, there is a unique (un-oriented) \textit{vertical direction} $h$ where $q(h,h)\in i\mathbb{R}^{+}$. Integral curves of this line field on $X$ determine the \textit{vertical foliation} on $\mathbb{C}$.
\end{defn}
\noindent \textit{Remarks.} 1. The terminology arises from the fact that for the quadratic differential $dz^2$ on any subset of $\mathbb{C}$ (equipped with the coordinate $z$), the horizontal and vertical foliations are exactly the foliations by horizontal and vertical lines.\\
2. Conversely, if we start with (possibly non-compact) domains on ${\mathbb C}$ whose boundaries comprise straight line intervals and identify pairs of such geodesic by half-translations to obtain an oriented surface, then the resulting surface acquires a Riemann surface structure as well as a holomorphic quadratic differential. The latter descends from the standard differential $dz^2$ on the domains, since $dz^2$ is invariant under half-translations. The condition that the boundary edges are identified by half-translations is equivalent to the requirement that the identification is by a (Euclidean) isometry and the horizontal foliation of the standard differential $dz^2$ intersects any pair of boundary edges being identified at the same angle.
\begin{defn}[Prong-singularities and natural coordinates]\label{pp} At the zero of order $k\geq 1$ of a quadratic differential $q$, the horizontal (and vertical) foliation has a \textit{$(k+2)$-prong singularity}. That is, in a neighborhood of the zero, the horizontal foliation is the pullback of the horizontal foliation on $\mathbb{C}$ by the map $z\mapsto \xi = z^{k/2 +1}$ (which is a branched cover, branched at the zero of the target $\xi$-plane). Here, $\xi$ is called the \textit{natural coordinate} for the quadratic differential, since $q= d\xi^2$ (up to a constant multiplicative factor).
\end{defn}
\begin{figure}
\caption{The horizontal foliation for $zdz^2$ has a $3$-prong singularity at the origin (left), and a pole-singularity of order $5$ at infinity (right). The red arrow shows a choice of an asymptotic direction at the pole. }
\label{singfig}
\end{figure}
\begin{defn}[Pole-singularities of higher order]\label{singh}
At a pole of order $n\geq 2$, the foliation induced by $q$ has a \textit{pole-singularity of order $n$}. For $n>2$, the induced singular-flat geometry comprises $(n-2)$ foliated Euclidean half-planes surrounding the pole in cyclic order; the horizontal leaves are asymptotic to $(n-2)$ directions at the pole, and the same for vertical leaves. See Figure 1, and \S6 of \cite{Streb} for details. Indeed, if the leading order term for $q$ is $ \frac{a^2}{z^n}$ in some local coordinate $z$ around the pole, for some $a \in {\mathbb C}^\ast$ with $\textit{Arg}(a) = \theta$, then the horizontal leaves are asymptotic to the directions at angles $\theta + j \cdot \frac{2\pi}{n-2}$ where $0\leq j <n-2$ and the vertical leaves are asymptotic to the directions $\theta + (j + \frac{1}{2}) \cdot \frac{2\pi}{n-2}$. \end{defn}
\begin{defn}[Pole-singularity of order $2$]\label{sing2} Around a pole of order two, the induced foliation looks either like a foliation by concentric circles, or leaves spiralling to the pole. That is, one can choose a local coordinate disk $U \cong \mathbb{D}^\ast$ around the pole such that $q = -\frac{a^2}{z^2} dz^2$ for some $\pm a\in {\mathbb C}^\ast$, called the \textit{residue} at the pole, which is in fact coordinate-indpendent. The case of concentric circles then arises for the horizontal foliation when $a^2 \in \mathbb{R}^+$, and for the vertical foliation when $a^2 \in \mathbb{R}^-$. In either case, in the singular-flat metric induced by $q$, a neighborhood of the pole is isometric to a semi-infinite Euclidean cylinder. (See Chapter III \S7.2 of \cite{Streb}, and \S2.2 of \cite{GuptaWolf0}.) \\
We also note that in the universal cover $p:\mathbb{H} \to \mathbb{D}^\ast$ given by $w\mapsto z=e^{2\pi i w}$, $q$ pulls back to the quadratic differential $\frac{a^2}{4\pi^2} dz^2$, and the induced foliation on $\mathbb{H}$ is by straight lines at an angle $\theta = - \text{Arg}(a)$. (See Figure 2.) This will be the definition of the asymptotic direction in this case (see Definition \ref{adata}).
\end{defn}
\begin{figure}
\caption{The angle $\theta$ of the leaves in $\mathbb{H}
\label{singfig}
\end{figure}
\noindent \textit{Remark.} In a neighborhood of a pole of order $1$, also called a \textit{simple} pole, this foliation looks like a ``fold", since it is the pullback of the horizontal foliation by the map $z\mapsto \xi = {\Sigma}qrt z$. As alluded to in the Introduction, this implies that the pole-singularity becomes a regular point on the double cover branched at the simple pole.
\begin{defn}[Transverse measure]\label{tm} The horizontal (resp. vertical) foliation induced by a holomorphic quadratic differential is equipped with a transverse measure, that is, any arc transverse to the foliation acquires a measure that is invariant under transverse homotopy of the arc. Namely, the \textit{transverse measure} of such an arc $\gamma$ transverse to the horizontal foliation is
$$\tau_h(\gamma) = \lvert \displaystyle\int\limits_\gamma\Im ({\Sigma}qrt q) (z) dz \lvert $$
assuming $\gamma$ is contained in a coordinate chart, and similarly the transverse measure $\tau_v(\gamma)$ of an arc $\gamma$ transverse to the \textit{vertical} foliation is given by the modulus of the integral of the real part $ \Re ({\Sigma}qrt q) (z) $.
In general one adds such distances along a cover of the arc comprising of coordinate charts; this is well-defined as the above integrals are preserved (up to sign) under change of coordinates.
Given a simple closed curve $\gamma$ that is homotopically non-trivial, we define the transverse measure of the homotopy class $[\gamma]$ to be the infimum of the transverse measures of curves homotopic to $\gamma$.
\end{defn}
These foliations equipped with a transverse measure induced by a holomorphic quadratic differential are examples of a \textit{measured foliation} on a smooth surface, that is defined purely as a topological object as follows:
\begin{defn}[Measured foliations]\label{mfdef} A \textit{measured foliation} on a (possibly punctured) smooth surface $S$ is a $1$-dimensional foliation that is smooth except finitely many prong-singularities (see Definition \ref{pp}), equipped with a transverse measure. We shall define two such measured foliations to be \textit{equivalent} if they differ by an isotopy and Whitehead-moves. If the surface has punctures, then the isotopy is relative to the punctures, in the sense that a choice of framing of the tangent space at the pole (given by a tangent direction $v$ and an orthogonal vector ${\Sigma}qrt{-1}\cdot v$) is kept fixed by the isotopy. Equivalently, if we consider a real oriented blow-up of each puncture to a boundary circle to obtain a surface-with-boundary, then the isotopy is required to fix each boundary component pointwise.
\end{defn}
\noindent \textit{Remarks.} 1. For a \textit{closed} surface, it can be shown that two measured foliations are equivalent if and only if the respective transverse measures of homotopy classes of all simple closed curves are equal. See \cite{FLP} for a comprehensive account of this. \\
2. As mentioned in the Introduction, for a closed Riemann surface $X$, \cite{HubbMas} showed that any such equivalence class of a measured foliation (on the underlying smooth surface) is in fact the horizontal (or vertical) foliation of a unique holomorphic quadratic differential. \\
The following fact about the global trajectory-structure is well-known (see \cite{Jenk} or \cite{Streb}):
\begin{prop}\label{fstruc} Let $F$ be a measured foliation on a compact surface $S$ with finitely many pole-singularities of order greater than one at the punctures. Then the surface can be decomposed into finitely many regions, such that the restriction of $F$ to any region yields one of the following:
\begin{enumerate}
\item a foliated half-plane, foliated by leaves parallel to the boundary.
\item a foliated strip, foliated by leaves parallel to the two boundary components. or
\item a foliated annulus, with leaves that are closed curves parallel to the two boundary components. (We shall continue to call this a ``ring-domain".)
\item a \textit{spiral domain} in which each leaf is dense.
\end{enumerate}
\end{prop}
At the pole-singularities of a measured foliation, we introduce a circle-valued parameter:
\begin{defn}[Asympotic direction]\label{adata} The \textit{asymptotic direction} of measured foliation $F$ at a pole-singularity of order $n>2$ is the asymptotic direction $\theta$ of a leaf at the pole, where $\theta$ can be thought of as a point on the unit tangent circle at the pole.
At a pole-singularity of order $n=2$, the asymptotic direction of $F$ is defined to be the angle $\theta \in [0,\pi)$ of the linear foliation on the universal cover $\mathbb{H}$ that descends to a foliation equivalent to $F$ in a punctured-disk neighborhood of the pole (\textit{c.f.} Definition \ref{sing2}). Note that in the case that $\theta = 0$ the foliation $F$ comprises closed leaves (concentric circles) around the pole; in this case the transverse measure around the pole is necessarily zero.
\end{defn}
{\Sigma}ubsection{Compatible pairs}
The horizontal and vertical foliations induced by a meromorphic quadratic differential are, by construction, transverse to each other away from the prong and pole singularities. In this section we list some such ``compatibility" criteria that are necessary for a pair of measured foliations to be equivalent to the horizontal and vertical foliations of some meromorphic quadratic differential.
First, the ``transversality" of the two foliations implies the following:
\begin{lem}
\label{transverse0}
Let $\mathcal{H}$ and $\mathcal{V}$ be the horizontal and vertical foliations, respectively, of a meromorphic quadratic differential on some surface $S$. For any simple closed curve $\gamma$ on $S$ that is homotopically non-trivial, let $\tau_h,\tau_v$ be the transverse measures of the homotopy class of $\gamma$, for $\mathcal{H}$ and $\mathcal{V}$ respectively. Then $\tau_h, \tau_v$ cannot both be zero.
\end{lem}
\begin{proof}
It suffices to show that if the vertical foliation has a ring domain with core curve $\gamma$, then the horizontal foliation cannot have a ring domain with the same core curve.
Suppose both the horizontal and vertical foliations have ring domains, with core curves homotopic to $\gamma$.
Let $\gamma_v$ be a leaf in the vertical ring domain, and $\gamma_h$ be a leaf in the horizontal ring domain.
There are two cases:
Case 1: The leaves $\gamma_v$ and $\gamma_h$ are disjoint:
Since they are homotopic to each other, they bound an annulus $A$ between them. Consider the restriction $F$ of, say, the horizontal foliation on $A$. One boundary component of $A$ is a leaf of $F$, and the other boundary (which is a vertical leaf) is transverse to $F$. This implies that $F$ must have singularities in $A$; however any prong-singularity has negative index, and since the Euler characteristic of $A$ is zero, we again have a contradiction to the Poincar\'{e}-Hopf theorem.
Case 2: The leaves $\gamma_v$ and $\gamma_h$ intersect:
By an ``innermost disk" argument we can choose two sub-arcs of $\gamma_v$ and $\gamma_h$ respectively, that bound a topological disk $D$. The horizontal foliation is transverse to the part of the boundary $\partial D$ that is vertical; we can assume, after an isotopy, that the leaves intersect the boundary orthogonally. Then doubling across it, we obtain a foliated disk such that the boundary is a leaf, and the foliation has only prong-type singularities. This contradicts the Poincar\'{e}-Hopf theorem, exactly as in the proof of the Claim above.
This contradicts our assumption that both the horizontal and vertical foliations have ring domains with core curve $\gamma$; hence, one the transverse measures $\tau_h, \tau_v$ of the homotopy class of $\gamma$ is positive. \end{proof}
We shall apply the above lemma, in particular, for asserting the transverse measures of the horizontal and vertical foliations around any pole-singularity (considered as a puncture on the surface) cannot both be zero.
At these pole-singularities, the horizontal and vertical foliations satisfy some additional compatibility conditions, which we now define:
\begin{defn}[Opposite parameters]\label{opp}
Given an asymptotic direction $a \in S^1$ at a pole of order $n>2$ (See Definition \ref{adata}), a direction $a^\prime \in S^1$ is \textit{opposite} if it differs from $a$ by an odd multiple of $\pi/(n-2)$.
For an asymptotic direction $\theta$ at a pole of order two, the opposite is the asymptotic direction $\theta + \pi/2$ (modulo $\pi$).
Note that the horizontal and vertical foliations induced by a meromorphic quadratic differential $q$ have opposite asymptotic directions at each pole (see Definition \ref{singh}).
\end{defn}
\begin{defn}[Compatible transverse measures]\label{compat} Let $F,G$ be two measured foliations with a pole-singularity of order two and asymptotic direction $\theta \in (0,\pi)$, such that the transverse measures $\tau_F$ and $\tau_G$ around the pole are positive. Then these transverse measures are said to be \textit{compatible with the asymptotic direction} $\theta$, if the ratio $\tau_F/\tau_G = \lvert \tan{\theta} \rvert$.
\end{defn}
\noindent \textit{Remark.} As in the previous definition, the motivation for this definition is that this compatibility of transverse measures is necessary if $F,G$ are the horizontal and vertical foliations induced by $q = -\frac{a^2}{z^2} dz^2$. Indeed, from our definitions, in that case $\theta = -\text{Arg}(a)$ (modulo $\pi$), and $\tau_F = \lvert a \rvert \lvert \cos \theta \rvert$ and $\tau_G = \lvert a \rvert {\Sigma}in \theta $. \\
Finally, we shall say:
\begin{defn}\label{compat2} Two measured foliations $\mathcal{H}$ and $\mathcal{V}$ on a punctured surface $S$ with pole-singularities at the punctures of identical orders, are said to be \textit{compatible} if
\begin{itemize}
\item[(i)] they are transverse to each other away from the prong and pole-singularities,
\item[(ii)] the transverse measures around any pole-singularity are not both zero,
\item[(iii)] the asymptotic directions at each pole-singularity are opposite, and
\item[(iv)] if both have positive transverse measures around a pole-singularity of order two, then the two transverse measures are compatible with the asymptotic direction, as in Definition \ref{compat}.
\end{itemize}
\end{defn}
{\Sigma}ubsection{Space of measured foliations}
We can define the following space of measured foliations (already introduced in the Introduction):
\begin{defn}\label{mfgn-def} For an integer $k\geq 1$, and an integer $k$-tuple $\mathfrak{n}= (n_1,n_2,\ldots, n_k)$ such that each $n_i\geq 2$, we define $\mathcal{MF}_g(\mathfrak{n})$ to be the space of (equivalence classes of) measured foliations on an oriented surface $S$ of genus $g$ and $k$ labelled points, such that the $i$-th point is a pole-singularity of order $n_i$, and the asymptotic direction (see Definition \ref{adata}) at each point is prescribed.
\end{defn}
In this section shall describe the parametrization of $\mathcal{MF}_g(\mathfrak{n})$, following the discussions in \cite{GuptaWolf0} and \cite{GuptaWolf2}. The topology on $\mathcal{MF}_g(\mathfrak{n})$ will also be described in the proof of Proposition \ref{mfgn-prop}. We shall start with:
\begin{defn}[Model foliations on $\mathbb{D}^\ast$]\label{model}
For any pole-singularity of order $n>2$, there is an (open) punctured disk neighborhood $U \cong \mathbb{D}^\ast $ that is (a) a `sink-neighborhood", that is, any leaf entering $U$ continues to the pole, after possibly passing through prong-singularites, and (b) satisfies the property that no leaf exits $U$ and then enters $U$ again. (See the discussion in Definition 12 of \cite{GuptaWolf2}.) The measured foliation $F\vert_U$ is then a ``model foliation" for that order $n$ of a pole-singularity. Note that $F\vert_U$ comprises the foliated half-planes and (possibly) foliated strips (either infinite, from the puncture to itself, or semi-infinite, from the puncture to $\partial U$). As before, we consider such foliations up to an equivalence: two model foliations on $U$ are equivalent if they differ by Whitehead moves, or an isotopy that fixes a framing of the tangent-space at the puncture (but is allowed to move points on the boundary $\partial U$).\end{defn}
Recall that the leaf-space $G$ of a measured foliation $F$ on a surface is defined as $$G := X/{\Sigma}im$$ where $x{\Sigma}im y$ if $x,y$ lie on the same leaf of $F$, or on leaves that are incident on a common prong-singularity. The leaf-space of $F\vert_U$ is then a metric graph $G$ with finitely many vertices and edges, where the finite-length edges of $G$ are the leaf-spaces of the strips, and the $(n-2)$ infinite-length rays are the leaf-spaces of the half-planes. See Figure 3. Conversely, given such a metric graph $G$, it is easy to construct a model foliation on a punctured disk with leaf-space $G$; this is uniquely defined once the asymptotic direction at the puncture is fixed.
\begin{figure}
\caption{A model foliation in a neighborhood of a pole of order 3 (inside the circle shown on the left) has a leaf-space that is a metric graph (show on the right) with one finite length cycle and one infinite ray. }
\label{singfig}
\end{figure}
For $n>2$, let ${\mathcal{P}}_n$ be the space of model foliations on a punctured disk with a pole-singularity of order $n$ at the puncture, with a prescribed asymptotic direction at the puncture. We can equip this space with the topology on the space of their leaf-spaces: two metric graphs are close if they combinatorially equivalent up to Whitehead moves on short edges, and the lengths of the finite-length edges are close. Following Proposition 17 of \cite{GuptaWolf2} we have:
\begin{prop}\label{prop-pn} Let $n>2$. The space ${\mathcal{P}}_n$ is homeomorphic to $\mathbb{R}^{n-3} \times \mathbb{R}_{\geq 0}$ where the first factor is parametrized by the edge-lengths of the leaf-spaces, the second factor records the transverse measure of $\partial U$.
\end{prop}
{\Sigma}ubsubsection*{Order two pole} For a pole of order $n=2$, there is a punctured disk neighborhood $U$ of the pole such that the leaf-space of $F\vert_U$ is either an infinite ray (in the case that the transverse measure is zero), or a circle of circumference equal to the transverse measure around $\partial U$. The foliation on $U$ is rotationally symmetric; however we can define the asymptotic direction of the leaves on the universal cover (see Definition \ref{sing2}). Once again, there is a space $\mathcal{P}_2$ of such model foliations on a punctured disk, where we note:
\begin{prop}\label{prop-p2} A model foliation on $\mathbb{D}^\ast$ with a pole singularity of order $2$ at the puncture is uniquely determined by the transverse measure, and the asymptotic direction at the pole. \end{prop}
\begin{proof}
Recall from Definition \ref{adata} that the asymptotic direction at the pole determines the angle $\theta \in [0,\pi)$ of the leaves of the straight-line foliation on the universal cover $\mathbb{H}$. Thus the lift of the model foliation to the universal cover is specified completely by the asymptotic direction. If the asymptotic direction is $0$, then the transverse measure is necessarily equal to zero, and the lifted foliation is by horizontal lines, which is the horizontal foliation of the quadratic differential $\tilde{q} = dz^2$ on ${\mathbb H}$.
Otherwise, the lifted foliation is the horizontal foliation of a constant quadratic differential $\tilde{q} = a^2dz^2$ on ${\mathbb H}$, where $a\in {\mathbb C}^\ast$ satisfies $\text{Arg}(a) = -\theta$. If the transverse measure is prescribed to be $\tau>0$, then from our definitions $\tau = \lvert a \rvert \lvert \cos \theta \rvert$, and hence $a$ is uniquely determined.
Thus, given $\theta$ and the transverse measure, the measured foliation in the quotient $\mathbb{D}^\ast = {\mathbb H}/\langle z \mapsto z+1\rangle$ is uniquely determined.
\end{proof}
{\Sigma}ubsubsection*{Foliations on a surface with boundary}
In \S3.1 of \cite{GuptaWolf2} and \S3.4 of \cite{ALPS}, the space of measured foliations $\mathcal{MF}_{g,k}$ on a compact oriented surface of genus $g$ and $k\geq 1$ boundary components, and negative Euler characteristic, was parametrized. In their work the foliations were considered up to isotopy that allowed points on the boundary to move, that is, there was no ``twist" parameter associated with the boundary components. They proved (see, Proposition 3.9 of \cite{ALPS} or Proposition 11 of \cite{GuptaWolf2}) that:
\begin{prop}\label{mfb} The space of measured foliations $\mathcal{MF}_{g,k}$ is homeomorphic to $\mathbb{R}^{6g-6 + 3k}$.
\end{prop}
Here, the topology on $\mathcal{MF}_{g,k}$ is such that two measured foliations are close if the transverse measures of (homotopy classes) of a filling set of arcs or simple closed curves on $S$ are close.
Note that in their parametrization, the transverse measure around a boundary component determines a \textit{real}-valued parameter $\tau$ (and not a non-negative real parameter); $\tau<0$ is interpreted as the foliation having a ring domain adjacent to the boundary (i.e.\ a cylinder foliated by closed leaves parallel to boundary) with transverse measure $\lvert \tau\rvert$.
{\Sigma}ubsubsection*{Parametrizing $\mathcal{MF}_g(\mathfrak{n})$}
The measured foliations with pole-singularities in $\mathcal{MF}_g(\mathfrak{n})$ have an additional real-valued twist parameter associated with each pole, since we consider foliations up to an isotopy that fixes a framing of the tangent space at each such point. Such a framing is determined by the asymptotic data at the pole and its opposite (see Definitions \ref{adata} and \ref{opp}).
Combining the cases of foliations on a surface-with-boundary, and on a punctured disk, as discussed above, we have:
\begin{prop}\label{mfgn-prop} Let $S$ be an oriented surface of negative Euler-characteristic, having genus $g$ and $k\geq 1$ punctures. Let $\mathfrak{n}$ be a $k$-tuple of integers greater than one, as in Definition \ref{mfgn-def}. Then the space of measured foliations $\mathcal{MF}_g(\mathfrak{n})$ is homeomorphic to $\mathbb{R}^\chi$ where $\chi = 6g-6 + {\Sigma}um\limits_{i=1}^k (n_i +1)$.
\end{prop}
\begin{proof}
Let $F$ be a measured foliation in $\mathcal{MF}_g(\mathfrak{n})$.
Deleting the neighborhoods $U_1,U_2,\ldots, U_k$ where $F$ restricts to a model foliation, we obtain a measured foliation $F_0$ on the surface-with-boundary $S^\prime = S {\Sigma}etminus (U_1 \cup U_2 \cup \cdots \cup U_k)$ that by Proposition \ref{mfb} is parametrized by $6g-6+3k$ parameters.
On a punctured disk $U_i$ around the $i$-th puncture with a pole-singularity of order $n_i>2$, the model foliation $F\vert_{U_i}$ is specified by $n_i-3$ additional parameters by Proposition \ref{prop-pn} (since the transverse measure parameter of $\partial U_i$ has to coincide with that of $F_0$). We have an additional real twist parameter around each puncture, which measures the gluing of $U_i$ with the corresponding boundary component of $S^\prime$. This is relevant only when the transverse measure around $\partial U_i$ is positive, since otherwise $\partial U_i$ is a closed leaf of the foliation, and foliations differing by a twist around it are in fact isotopic.
The data of the twist parameter ${\Sigma}igma_i$ can be thought of as measuring an additional twist associated with the boundary component $\partial U_i$ on $S^\prime$. As usual for Fenchel-Nielsen parameters, these twist parameters can be measured relative to a collection of reference arcs, each non-trivial in homotopy, between the boundary components of $S^\prime$.
Moreover, each twist parameter can be combined with the transverse measure of that boundary component: namely, following \cite{ALPS}, the two real parameters of the (non-nonegative) transverse measure $\tau$ around the boundary component and twist parameter ${\Sigma}igma$ constitute the parameter space
\begin{equation}\label{pr2}
\mathbb{R}^{[2]} = \mathbb{R}_{\geq 0} \times \mathbb{R}/{\Sigma}im\text{ where } (0, {\Sigma}igma) {\Sigma}im (0, - {\Sigma}igma)
\end{equation}
that is homeomorphic to $\mathbb{R}^2$.
Here, when the transverse measure $\tau=0$, the absolute value of the ${\Sigma}igma$ coordinate equals the transverse measure across the corresponding ring domain adjacent to the boundary. In particular, if the twist parameter ${\Sigma}igma$ is kept fixed, and the transverse measure $\tau \to 0$, then the foliations converge to a ring domain of length $ \lvert {\Sigma}igma \rvert$. This describes the phenomenon that foliations converge to one with a ring domain, as we twist more and more, and decrease the transverse measure at the appropriate rate so that the foliations converge. Note that one can converge to such a foliation both by positive or negative twists; this results in the identification of the positive and negative rays as described above.
On a punctured disk $U_i$ with pole order $n_i=2$, the model foliation $F\vert_{U_i}$ is uniquely determined from the prescribed asymptotic direction and the transverse measure of $\partial U_i$ (see Proposition \ref{prop-p2}). Hence $F_0$ admits a unique extension to $U_i$, and for such a punctured disk, there are no other parameters. Note that in the case that the asymptotic direction at the $i$-th puncture forces the transverse measure of $\partial U_i$ to be zero, then the (possibly degenerate) ring domain $R$ of $F_0$ adjacent to $\partial U_i$ extends to a ring domain on $U_i$, and we ignore the transverse measure across $R$.
Adding the parameters thus obtained, we have $\chi= 6g-6 + {\Sigma}um\limits_{i=1}^k (n_i +1)$ real parameters that specify $F$ uniquely.
Conversely, any such set of $\chi$ real parameters can be realized: the parameters determine a unique measured foliation $F_0$ on $S^\prime$ by Proposition \ref{mfb}, and on each $U_i$ by Propositions \ref{prop-pn} (if $n_i>2$) and \ref{prop-p2} (if $n_i=2$). Thus, it only remains to glue the foliated disk $U_i$ by identifying the boundary $\partial U_i$ with the $i$-th boundary component of $S^\prime$; here the twist parameter ${\Sigma}igma_i$ of $F_0$ associated with that boundary component plays a role. In the case that the transverse measure of the $i$-th boundary component $\tau_i >0$, this identification of the two circles is with a twist that, in the universal cover, corresponds identifying the boundary lines after a translation by a (signed) distance ${\Sigma}igma_i$, where the distance is on the line is the induced transverse measure.
In the case the transverse measure of the $i$-th boundary component $\tau_i = 0$, the twist parameter ${\Sigma}igma_i$ denotes the transverse measure across a ring domain $A_i$; here such a ring domain $A_i$ is inserted in between the boundary component of $S^\prime$ and the foliated disk $U_i$.
The topology on $\mathcal{MF}_g(\mathfrak{n})$ is defined to make this bijection a homeomorphism: namely, a pair of foliations (with the same set of asymptotic directions at the poles) are close if
\begin{itemize}
\item[(i)] their restriction on neighborhoods of the poles define a pair of model foliations that are close, in the corresponding space $\mathcal{P}_n$,
\item[(ii)] their restriction to the complement of these neighborhoods determines is pair of foliations that is close in $\mathcal{MF}_{g,k}$, and
\item[(iii)] the twist parameters that determines the gluing of each neighborhood in (i) with the complementary subsurface in (ii) are close.
\end{itemize} \end{proof}
\noindent \textit{Remark.} Proposition \ref{mfgn-prop} assumes that the underlying surface $S$ has negative Euler characteristic; the spaces of foliations on the complex plane ${\mathbb C}$ and the punctured plane $\mathbb{C}^\ast$ (relevant for Theorem \ref{thm1}) will be described in \S3 and \S4.1 respectively.
{\Sigma}ection{The work of Au-Wan}
In this section we recall the work of Au-Wan in \cite{AuWan2} that solved the problem of prescribing horizontal and vertical foliations of a meromorphic quadratic differential on $\mathbb{C}\mathrm{P}^1$ with exactly one pole, necessarily of order $n\geq 4$. As mentioned in \S1, the space of such quadratic differentials is
\begin{center}
$ {Q}_0(n) = \{(z^{n-4} + a_1z^{n-6} + \cdots + a_{n-2} z + a_{n-5})dz^2 \mid a_i \in \mathbb{C} \text{ for } i=1,\ldots, n-1\}$
\end{center}
and is thus homeomorphic to ${\mathbb C}^{n-5}$. Note that we have normalized our polynomial to be, in particular, monic; this fixes the asymptotic data (see Definition \ref{adata}) at the pole at $\infty$.
{\Sigma}ubsection*{Measured foliations on ${\mathbb C}$} On the other hand, a measured foliation on $\mathbb{C}\mathrm{P}^1$ with a single pole-singularity of order $n> 4$ at $\infty$ has $(n-2)$ foliated half-planes around $\infty$ and (possibly) foliated infinite strips. In what follows we shall assume that the positive real direction is an asymptotic direction at the pole at infinity. The leaf-space of such a foliation is thus a planar metric tree; the $(n-2)$ infinite-length edges corresponding to the half-planes are labelled by $\{1,2,\ldots, n-2\}$ in anti-clockwise order, where the ray corresponding to the positive real direction is labelled $1$. Note that this metric tree can be \textit{embedded} in ${\mathbb C}$, transverse to the foliation, such that each infinite ray eventually lies in the foliated half-plane it represents.
Following the work of Mulase-Penkava in \cite{MulPenk}, any such metric tree is obtained by a \textit{metric expansion} of a $(n-2)$-pronged star $G_{n-2}$ (where the prongs are infinite-length rays that are labelled) that replaces the central vertex of $G_{n-2}$ by a tree (with each new vertex of degree greater than two) that connects with the rest of the graph.
They proved:
\begin{thm}[Theorem 3.3 of \cite{MulPenk}]\label{mex} The space of metric trees $\mathsf{T}(n-2)$ with $(n-2)$ infinite rays, labelled in cyclic order, and all vertices of valence at least $3$, is homeomorphic to $\mathbb{R}^{n-5}$.
\end{thm}
\noindent \textit{Remark.} It is easy to check that a generic tree in $\mathsf{T}(n-2)$ is trivalent at each vertex, and has exactly $n-5$ edges of finite length. These (non-negative) lengths form parameters that parametrize a subset of $\mathsf{T}(n-2)$ corresponding to a fixed combinatorial type; there are Catalan number of types that are obtained by Whitehead moves and the corresponding regions fit together to form $\mathbb{R}^{n-5}$ (see Figure \ref{combtypegraph}).\\
\begin{figure}
\caption{The different combinatorial types of metric trees in $\mathsf{T}
\label{combtypegraph}
\end{figure}
As a consequence, we have:
\begin{prop}\label{mex2} For $n> 4$ the space of foliations $\mathcal{MF}_0(n)$ on $\mathbb{C}\mathrm{P}^1$ with exactly one pole-singularity of order $n$, is homeomorphic to $\mathbb{R}^{n-5}$.
\end{prop}
\begin{proof}
Let $\Psi_0: \mathcal{MF}_0(n) \to \mathsf{T}(n-2)$ be the map that assigns to a foliation its leaf-space.
It is not difficult to construct an inverse map: given a planar metric tree in $\mathsf{T}(n-2)$, we arrange foliated half-planes and foliated infinite strips in the pattern prescribed by the tree, and identify their boundaries. Note that the strip widths are prescribed by the edge-lengths of the tree.
The proposition then follows from Theorem \ref{mex}.
\end{proof}
{\Sigma}ubsection*{Prescribing horizontal and vertical trees}
To complete the proof of Au-Wan's theorem stated in \S1, it remains to show that the map
\begin{equation*}
\Phi_1: Q_0(n) \to \mathcal{MF}_0(n) \times \mathcal{MF}_0(n)
\end{equation*}
is a homeomorphism.
It suffices to define the inverse map, that is, given a pair of measured foliations (or equivalently, their metric trees), construct a holomorphic quadratic differential which has these as its vertical and horizontal foliations. Such a quadratic differential can be constructed by attaching Euclidean half-planes and bi-infinite strips to each other by isometries (half-translations) on their boundaries; the standard differential $dz^2$ on each piece then descends to a well-defined holomorphic quadratic differential on the resulting surface (\textit{c.f.} Remark (2) after Definition \ref{fols}).
This then becomes a combinatorial problem, which was solved by Au-Wan who gave a more general construction, that works for metric trees with countably many edges:
\begin{theorem}[Theorem 4.1 of \cite{AuWan2}]\label{aw} Given two properly embedded planar metric trees $H,V$ in $\mathbb{C}$ and a bijection $f$ between the infinite rays of $H$ and the complementary regions of $V$, there is a unique quadratic differential on $\mathbb{C}$ or $\mathbb{D}$ with induced horizontal and vertical foliations that have leaf-spaces $V$ and $H$ respectively. Moreover, the arrangement of their foliated half-planes induces the prescribed bijection $f$.
\end{theorem}
\noindent \textit{Remarks.} (i) In the case that $V$ and $H$ have finitely-many edges (as is the case for metric trees in $\mathcal{T}(n+2)$) they showed that the resulting quadratic differential is in fact defined on the complex plane ${\mathbb C}$ (see Theorem 4.5 of \cite{AuWan2}).
(ii) The uniqueness of the quadratic differential obtained is clarified in Theorem 4.2 of \cite{AuWan2}: they show that if there are homeomorphisms $F,G:\mathbb{C} \to \mathbb{C}$ that restrict to isometries of $V$ and $H$ respectively, then the quadratic differential that realizes $(V,H, f)$ is identical to the one that realizes $(V,H, G\circ f\circ F^{-1})$. \\
Thus, by Remark (ii) above, to define the inverse of $\Phi_1$, it suffices to prescribe the bijection $f$ uniquely. We can do this by defining, assigning to each $i$ in the cyclically ordered set $\{1,2,\ldots, n-2\}$, the complementary region of $V$ that is enclosed by the infinite rays of $V$ labelled $i-1$ and $i$ (and possibly other edges of $V$).
{\Sigma}ection{Proof of Theorem \ref{thm1}}
In \S4.1-4.3, we shall deal with the case when $S$ is the surface ${\mathbb C}^\ast$, and complete the proof of Theorem \ref{thm1}. In these sections $n,m\geq 2$ will be the orders of the poles at $0$ and $\infty$ respectively, such that at least one of $n,m$ is strictly greater than two, and we shall fix an asymptotic direction at each pole. The special case when $n=m=2$ is dealt with in \S4.4.
{\Sigma}ubsection{Foliations on ${\mathbb C}^\ast$}
Following the notation introduced earlier, $\mathcal{MF}_0(n,m)$ is the space of measured foliations on ${\mathbb C}^\ast$ with pole-singularities of orders $n$ and $m$ at $0$ and $\infty$ that have the prescribed asymptotic data at the two poles.
Topologically, a punctured plane can be thought of as a bi-infinite Euclidean cylinder. Any measured foliation in $\mathcal{MF}_0(n,m)$ can be decomposed into a foliation without any prong-singularities on a finite-modulus annulus $A$ in the middle of the cylinder, and two model foliations in punctured-disk neighborhoods of the two ends, i.e.\ around $0$ and $\infty$, that lie in $\mathcal{P}_n$ and $\mathcal{P}_m$ respectively.
In what follows, the \textit{transverse measure} $\tau_F$ of a foliation $F \in \mathcal{MF}_0(n,m)$ shall refer to the transverse measure of {the homotopy class of} a loop around the puncture(s), unless otherwise specified. (See Definition \ref{tm}.)
The leaf-space of the restriction of $F$ to $A$ is either
\begin{itemize}
\item[(a)] if $\tau_F>0$, an embedded circle homotopic to the core curve of the bi-infinite cylinder with length equal to $\tau_F$, or
\item [(b)] if $\tau_F=0$, an embedded interval corresponding to the ring domain $A$ of length equal to the transverse measure across $A$.
\end{itemize}
The leaf-space of the entire foliation $F$ then comprises metric trees that are the leaf-spaces of the model foliations on $D_0$ and $D_\infty$ respectively, attached to the circle or interval corresponding to $A$ as above. See Figure 5. Although we shall not need this fact, we mention here that this metric graph recovers the measured foliation $F$, except when the transverse measure $\tau_F>0$, in which case one needs the additional data of the number of Dehn-twists across $A$. \\
\begin{figure}
\caption{Possible measured foliations in $ \mathcal{MF}
\label{singfig}
\end{figure}
For the following parametrization of $\mathcal{MF}_0(n,m)$, we shall use the above decomposition of a measured foliation on ${\mathbb C}^\ast$, into model foliations on punctured disks and a foliated annulus $A$:
\begin{prop}\label{prop1} If $n,m\geq 3$, the space $\mathcal{MF}_0(n,m)$ is homeomorphic to $\mathbb{R}^{n+m-4}$. In the case that one of the poles has order $2$, say $n=2$, then $\mathcal{MF}_0(2,m)$ is homeomorphic to $\mathbb{R}^{m-3}$ if the asymptotic direction at the order two pole is $0$ (i.e.\ the transverse measure is zero), otherwise it is homeomorphic to $\mathbb{R}^{m-1}$.
\end{prop}
\begin{proof}
We first consider the case when $n,m>2$. Let $F\in \mathcal{MF}_0(n,m)$.
Let $D_0$ and $D_\infty$ be neighborhoods of $0$ and $\infty$ respectively, such that $F\vert_{D_0} \in \mathcal{P}_n$ and $F\vert_{D_\infty} \in \mathcal{P}_m$. Then $A: = \mathbb{C}^\ast {\Sigma}etminus (D_0 \cup D_\infty)$ is a ``central annulus" that we shall think of as a Euclidean cylinder of finite modulus.
In the case that the transverse measure $\tau_F>0$, we shall assume that all the ``twisting" of the leaves of the foliation across ${\mathbb C}^\ast$ happens in $A$. (For $\tau_F=0$ this twist parameter is absent, since $A$ is then a ring domain and foliations differing by a Dehn twist are isotopic to each other.)
The foliation $F\vert_A$ can be isotoped (relative the boundary) to a foliation by straight lines of constant slope; this foliation is parametrized by two real parameters, which are the transverse measure $\tau$ around $A$, and a twist parameter ${\Sigma}igma$ across $A$. These form a parameter space homeomorphic to $\mathbb{R}^2$, exactly as in equation \eqref{pr2} in the proof of Proposition \ref{mfgn-prop}.
By Proposition \ref{prop-pn} the model foliations on $D_0$ and $D_\infty$ are parametrized by $\mathbb{R}^{n-3}$ and $\mathbb{R}^{m-3}$ respectively, assuming we have fixed the transverse measure around the boundary to be equal to $\tau_F$, since they are glued with the boundary components of $A$. Adding the parameters, we see that the total parameter space is $\mathbb{R}^{n+m-4}$.
When one of the pole-singularities is of order two, say $n=2$, then recall that the prescribed asymptotic direction at $0$ determines the slope of the leaves on $D_0$. We consider two sub-cases:
\begin{itemize}
\item[(a)] If the transverse measure around the pole is zero, then so is the transverse measure around $A$. The foliations on $D_0$ and $A$ are both ring domains, and $A$ can be absorbed into $D_0$. The possible model foliations on $D_\infty$ are parametrized by $\mathbb{R}^{m-3}$ by Proposition \ref{prop-pn}.
\item[(b)] If the transverse measure is positive, then this agrees with the transverse measure of $A$. By Proposition \ref{prop-p2}, the model foliation on $D_0$ is then completely determined, since we have already fixed the asymptotic direction at $0$.
The parameters specifying the entire foliation then are the (positive) transverse measure around $A$, the twist parameter across $A$, and the $(m-3)$ parameters for the foliation on $D_\infty$ as before. Hence the parameter space is $\mathbb{R}^{m-1}$.
\end{itemize}
As in Proposition \ref{mfgn-prop}, the topology on the space $\mathcal{MF}_0(n,m)$ is defined to be the one for which this parametrization is a homeomorphism; namely, two foliations $F_1, F_2\in \mathcal{MF}_0(n,m)$ are close if their restrictions to $D_0$ and $D_\infty$ are close in the space of model foliations $\mathcal{P}_n$ and $\mathcal{P}_m$ respectively, and so are the pairs of transverse measures $\tau_1,\tau_2$ and twist parameters ${\Sigma}igma_1,{\Sigma}igma_2$.
\end{proof}
{\Sigma}ubsection{Prescribing horizontal and vertical foliations}
Recall from \S1 that the space $Q_0(n,m)$ is the space of meromorphic quadratic differentials on ${\mathbb C}^\ast$ with a pole of order $n$ and $m$ at $0$ and $\infty$ respectively (we shall continue with our assumption that one of $n,m$ is greater than two), and with prescribed asymptotic directions at the poles, denoted by the set $\mathfrak{a}$.
{\Sigma}ubsubsection*{Compatible pair} Throughout this section, $(\mathcal{H}, \mathcal{V}) \in \mathcal{MF}_0(n,m)\times \mathcal{MF}_0(n,m)$ will be a pair of foliations, where the space of measured foliations in the first factor has prescribed asymptotic directions given by $\mathfrak{a}$, and the second factor has opposite asymptotic directions given by ${\Sigma}qrt{-1}\cdot \mathfrak{a}$ (see Definition \ref{opp}).
We shall further assume that these two foliations are compatible in the sense defined in \S2.2 -- first, they do not both have zero transverse measure (for the non-trivial loop in ${\mathbb C}^\ast$ around the punctures) and second, in the case that one of the poles has order two and their transverse measures $\tau_H$ and $\tau_V$ are positive, then they are compatible for the asymptotic direction at the order two pole (see Definition \ref{compat}).
{\Sigma}ubsubsection*{Outline} Our goal in this section is to construct a meromorphic quadratic differential in $Q_0(n,m)$ whose horizontal and vertical foliations are $\mathcal{H}$ and $\mathcal{V}$, respectively. To do this, we consider the decomposition of each foliation into model foliations in the punctured-disk neighborhoods $D_0$ and $D_\infty$ of $0$ and $\infty$ respectively, and a foliation on a central annulus $A$, as in \S4.1.
Our strategy is to first construct
\begin{itemize}
\item[(a)] a flat annulus $A$ that realizes the prescribed pair of foliations $\mathcal{H}\vert_A$ and $\mathcal{V}\vert_A$,
\item[(b)] singular flat metrics on $D_0$ and $D_\infty$, induced by meromorphic quadratic differentials with poles of orders $n$ and $m$ respectively at the punctures, that realize the prescribed pairs of model foliations.
\end{itemize}
Finally, we shall glue these singular-flat pieces to get the desired meromorphic quadratic differential on ${\mathbb C}^\ast$.
{\Sigma}ubsubsection*{Constructing singular flat surfaces}
We start with describing the construction in (a) and (b) of the outline above; (a) is handled by Lemmas \ref{lem1a} and \ref{lem1b}, and (b) is handled by Lemmas \ref{lem2a} and \ref{lem2b}.
\begin{figure}
\caption{In the proof of Lemma \ref{lem1a}
\label{singfig}
\end{figure}
\begin{lem}\label{lem1a}
Let $t\in \mathbb{R}$ and $\mathcal{H}, \mathcal{V}$ be two measured foliations on an annulus $A$ with positive transverse measures (around $A$) $\tau_H$ and $\tau_V$. Then, there is a unique flat metric (with geodesic boundary) on $A$ induced by a (constant) holomorphic quadratic differential $q$, such that the horizontal and vertical foliations of $q$ are equivalent to $\mathcal{H}, \mathcal{V}$ respectively and the difference of the twist parameters is $t$.
\end{lem}
\begin{proof}
Passing to the universal cover, it suffices to show that there is a unique choice of $c\in \mathbb{C}^\ast$ and $L>0$ such that the desired flat annulus $A$ is the quotient of the infinite strip $S(L) = \{ w \in {\mathbb C}\ \vert \ 0\leq \Im (w) \leq L \}$ equipped with the quadratic differential metric $\tilde{q} = c^2dz^2$, with the infinite cyclic group $\mathbb{Z} = \langle w \mapsto w+1\rangle$.
Let $\text{Arg}(c) = -\beta$; the transverse measures $\tau_V = \lvert c \rvert \lvert {\Sigma}in \beta \rvert $ and $\tau_H = \lvert c \rvert \lvert \cos \beta \rvert $ since they are the absolute values of the imaginary and real parts of $\int_{[0,1]} {\Sigma}qrt{\tilde{q}}$ (\textit{c.f.} the remark following Definition \ref{compat}). Thus the two transverse measures determine $\lvert c\rvert$, and an angle $\beta \in (0,\pi)$ up to an ambiguity of sign, i.e\ either $\beta$ or $\pi - \beta$.
As we shall now see, the sign of the relative twist parameter $t$ fixes the ambiguity in $\beta$, and determines the remaining parameter $L$ uniquely.
Note that the horizontal foliation of $ \tilde{q}$ comprises straight lines at an angle $\beta$ or $\pi-\beta$, and the vertical foliation comprises straight lines at an angle $\pi/2+ \beta$ or $\pi/2 - \beta$. We shall assume that the twist parameter is measured relative to the two basepoints $b_0$ and $b_1$ on the top and bottom boundary components of $S(L)$ respectively, that lie on the same vertical line. Namely, the twist parameter ${\Sigma}igma_H$ of the horizontal foliation is measured as follows: consider a lift $l {\Sigma}ubset S(L)$ of a horizontal leaf that passes through $b_0$, and intersects the other boundary component at a point $b$; then ${\Sigma}igma_H$ equals the (signed) transverse measure of $\mathcal{H}$ of the interval between $b_1$ and $b$ on the other boundary component. (See Figure 6, which shows the case when the horizontal leaves make an angle $\beta$.) We can calculate ${\Sigma}igma_H$ by integrating ${\Sigma}qrt{\tilde{q}}$ along that interval, which has length $L \lvert \cot \beta\rvert$, and taking the real part of the integral. This yields ${\Sigma}igma_H = \pm L \lvert c\rvert \cot\beta \cos\beta $, where the sign depends on whether the angle of the horizontal foliation is $\beta$ or $\pi-\beta$ . Similarly, the twist parameter ${\Sigma}igma_V$ of the vertical foliation equals $\pm L \lvert c \rvert \cot\beta {\Sigma}in\beta$, with the opposite dependence on the two possibilities for $\beta$. Since the difference ${\Sigma}igma_H - {\Sigma}igma_V = t$ is prescribed, the sign of $\beta$, and $L$ are uniquely determined.
\end{proof}
\begin{lem}\label{lem1b}
Let $\mathcal{H}, \mathcal{V}$ are two measured foliations on an annulus $A$ such that
\begin{itemize}
\item the transverse measure $\tau_H$ of $\mathcal{H}$ around $A$ is zero, i.e.\ $\mathcal{H}$ on $A$ is a ring domain, and
\item $\mathcal{V}$ has positive transverse measure $\tau_V>0$.
\end{itemize}
Then, there is a flat metric (with geodesic boundary) on $A$ induced by a (constant) holomorphic quadratic differential $q$, such that the horizontal and vertical foliations of $q$ are equivalent to $\mathcal{H}, \mathcal{V}$ respectively. Moreover, $q$ is unique if we also specify the transverse measure of $\mathcal{H}$ across the annulus $A$.
\end{lem}
\begin{proof}
As in the proof of the previous lemma, we pass to the universal cover, and consider the infinite strip $S(L) = \{ w \in {\mathbb C}\ \vert \ 0\leq \Im (w) \leq L \}$ equipped with the metric induced by the quadratic differential $\tilde{q} = a^2dz^2$ for some $a\in {\mathbb C}^\ast$, such that $A$ is the quotient of $S(L)$ by the infinite cyclic group $\mathbb{Z} = \langle w \mapsto w+1\rangle$.
In this case, since $\mathcal{H}$ is a ring domain, the horizontal foliation of $\tilde{q}$ is by horizontal lines in $S(L)$, and the vertical foliation is by vertical lines; consequently, $a = \tau_V \in \mathbb{R}^+$. The remaining parameter $L$ (the ``height" of the flat annulus $A$) equals the transverse measure of $\mathcal{H}$ across $A$, if the latter is specified.
\end{proof}
\noindent \textit{Remark.} In the case the transverse measure of $\mathcal{H}$ across $A$ is zero, the flat annulus $A$ is degenerate, i.e. is a circle of length $\tau_V$; in this case we shall still refer to $A$ as a flat metric on an annulus. \\
The construction for (b) in the outline is easier in the case the model foliations on $\mathbb{D}^\ast$ have an order two pole:
\begin{lem}\label{lem2a} Let $\mathcal{H}, \mathcal{V} \in \mathcal{P}_2$ be two model foliations on a punctured disk that are compatible, that is,
\begin{itemize}
\item one of their transverse measures around the boundary of the disk is non-zero, and
\item if both transverse measures are positive, then they are compatible in the sense of Definition \ref{compat}.
\end{itemize}
Then there exists a meromorphic quadratic differential $q$ on $\mathbb{D}^\ast$ whose induced horizontal and vertical foliations are equivalent to $\mathcal{H}$ and $\mathcal{V}$ respectively, and the boundary circle $\partial \mathbb{D}$ is geodesic in the induced singular flat metric. Moreover, the quadratic differential $q$ is unique if we prescribe an asymptotic direction $\theta$ at the puncture, and in the case both transverse measures are positive, we require that the horizontal leaves are incident on the boundary at a prescribed angle $\beta\in (0,\pi)$.
\end{lem}
\begin{proof}
We start with the case when one of the transverse measures equals zero, say $\tau_H = 0$ and $\tau_V>0$, where $\tau_H, \tau_V$ are the transverse measures around $\partial \mathbb{D}$ of the model foliations $\mathcal{H}$ and $\mathcal{V}$ respectively. Passing to the universal cover ${\mathbb H}$, $\mathcal{H}$ and $\mathcal{V}$ lift to a foliation of ${\mathbb H}$ by horizontal lines and vertical lines, respectively. The quadratic differential $\tilde{q} = \tau_V^2 dw^2$ on ${\mathbb H}$ has these as its horizontal and vertical foliations; this is invariant under the translations $\langle w\mapsto w+1 \rangle$ and descends to the desired meromorphic quadratic differential $q$ on ${\mathbb D}^\ast$.
Note that in the case that $\tau_V=0$ and $\tau_H>0$, then we take $\tilde{q} = - \tau_H^2dw^2$ on ${\mathbb H}$; its horizontal and vertical foliations comprise the vertical and horizontal lines on ${\mathbb H}$ respectively, and once again, this quadratic differential descends to ${\mathbb D}^\ast$ to define the desired $q$.
In the case that both transverse measures $\tau_H,\tau_V>0$, then by their compatibility, we can write $\tau_H = \tau \lvert \cos \theta \rvert $ and $\tau_V = \tau {\Sigma}in \theta$ for some $\tau>0$ and $\theta \in [0, \pi)$, where $\theta$ is the prescribed asymptotic direction of $\mathcal{H}$ at the order two pole. This time, in the universal cover $\mathbb{H}$ we consider the quadratic differential $\tilde{q} = a^2dw^2$ where $a = \tau e^{-i\theta}$. The horizontal and vertical foliations of $\tilde{q}$ are then foliations by straight lines of slopes $\theta$ and $\theta + \pi/2$ (considered modulo $\pi$) respectively. In the quotient ${\mathbb D}^\ast = {\mathbb H}/\langle w \mapsto w+1 \rangle$, the horizontal and vertical transverse measures of the boundary $\partial \mathbb{D}$ correspond to the horizontal and vertical transverse measures of the interval $[0,1] {\Sigma}ubset \partial {\mathbb H}$ which are the absolute values of the imaginary and real parts of $a$, respectively. Thus, we have obtained our desired quadratic differential $q$.
Note that the induced metric on $\mathbb{D}^\ast$ determines semi-infinite Euclidean cylinder $E$ with geodesic boundary, and the horizontal foliation of $q$ intersects the boundary circle $\partial \mathbb{D}$ at the angle $\theta$ (which is necessarily $0$ if $\tau_H=0$). To obtain the desired angle $\beta \in (0,\pi)$ (in the case that $\tau_H>0$), we consider the cylindrical end $\overline{E}$ embedded in $E$, which is bounded by a geodesic circle $C$ chosen such that the horizontal foliation intersects $C$ at angle $\beta$. (See Figure 7.) Since $\overline{E} \cong \mathbb{D}^\ast$, the restriction $q\vert_{\overline{E}}$ defines the desired quadratic differential.
To show the uniqueness statement, observe that since these model foliations do not have any prong-singularities, the metric induced by $q$ is in fact flat, without any singularities. If $\partial \mathbb{D}$ is geodesic, then passing to the universal cover, we obtain a Euclidean half-plane bounded by a bi-infinite straight line, that we can realize as the upper half-plane ${\mathbb H}$ equipped with a constant quadratic differential $q = a^2 dz^2$ for some $a \in {\mathbb C}^\ast$. The constant $a$ is uniquely determined by the prescribed asymptotic direction $\theta$ and the requirement that the transverse measures on the quotient ${\mathbb D}^\ast = {\mathbb H}/\langle w \mapsto w+1 \rangle$ are equal to the prescribed $\tau_H, \tau_V$, exactly as described above. Finally, (in the case that $\tau_H>0$) the sub-cylinder $\overline{E}$ bounded by the geodesic circle intersecting the horizontal foliation at angle $\beta$ is unique up to isometry.
\end{proof}
\begin{figure}
\caption{A cylindrical end corresponding to a pole of order two with positive transverse measure has an embedded sub-cylinder $\overline{E}
\label{singfig}
\end{figure}
For poles of higher order, the construction for step (b) shall use the work of Au-Wan in the planar case by extending the model foliation on $\mathbb{D}^\ast$ to $\mathbb{C}^\ast$ and passing to the universal cover ${\mathbb C}$:
\begin{lem}\label{lem2b} Let $r > 2$ and let $\mathcal{H}, \mathcal{V} \in \mathcal{P}_r$ be two model foliations on a punctured disk such that at least one of their transverse measures around the boundary of the disk is positive. Then there exists a unique meromorphic quadratic differential $q$ on $\mathbb{D}^\ast$ such that
\begin{itemize}
\item[(i)] the horizontal and vertical foliations of $q$ are equivalent to $\mathcal{H}$ and $\mathcal{V}$ respectively,
\item[(ii)] the boundary circle $\partial \mathbb{D}$ is geodesic in the induced singular-flat metric, and when both transverse measures are positive, the horizontal leaves are incident on the boundary at a prescribed angle $\beta\in (0,\pi)$,
\item[(iii)] the induced metric has at least one prong-singularity on the boundary circle, and
\item[(iv)] $q$ has a prescribed asymptotic direction at the pole.
\end{itemize}
\end{lem}
\begin{proof}
The idea of the proof is to reduce to the planar case as in Theorem \ref{aw}, by considering the the lifts of the foliations to the universal cover $\mathbb{H}$, and then extending them to ${\mathbb C}$.
Here, the universal covering map is $\pi:\mathbb{H} \to \mathbb{D}^\ast$ defined by $\pi(w) = e^{2\pi i w}$. The group of deck-translations $\pi_1({\mathbb C}^\ast) = \mathbb{Z}$ acts on $\mathbb{H}$ by the group of translations generated by $w\mapsto w + 1$.
Let $\tau_H, \tau_V$ be the transverse measures of $\mathcal{H}$ and $\mathcal{V}$ respectively; we shall denote the lifts of the latter foliations by $\widetilde{\mathcal{H}}$ and $\widetilde{\mathcal{V}}$ respectively.
In the case the transverse measure $\tau_H>0$ (resp. $\tau_V>0$), one can isotope the leaves of $\widetilde{\mathcal{H}}$ (resp. $\widetilde{\mathcal{V}}$) so that they are orthogonal to $\mathbb{R}$, the boundary of the upper half-plane. Then we can extend $\widetilde{\mathcal{H}}$ (resp. $\widetilde{\mathcal{V}}$) to the entire complex plane ${\mathbb C}$ by appending the foliation by vertical lines on the lower half-plane.
On the other hand, in the case the transverse measure $\tau_H=0$ (resp. $\tau_V=0$), the entire boundary of the upper half-plane comprises leaf segments of the lifted foliation between prong-singularities. This lifted foliation can be extended to ${\mathbb C}$ by appending the foliation by \textit{horizontal} lines on the lower half-plane.
\begin{figure}
\caption{The $\mathbb{Z}
\label{singfig}
\end{figure}
Let $H, V$ be the metric trees that are the leaf-spaces for these extensions of the lifts of $\mathcal{H}, \mathcal{V}$ respectively. Both are metric trees with an action of the infinite cyclic group $\mathbb{Z}$ on them. The structure of these metric trees depends on the transverse measures; we now describe this for $H$ (see Figure 8):
\begin{itemize}
\item[(a)] If $\tau_H =0$, then the foliation on the lower half-plane corresponds to an infinite ray $L$ of $H$; the leaf-space corresponding to the chain of critical leaves constituting $\mathbb{R}$ is a single vertex $v$ that is the root of $L$. The rest of $H$ then comprises a collection of pairwise-isometric metric trees $T_i$, where $i\in \mathbb{Z}$. Each $T_i$ is rooted at $v$, and descends, via the covering map $\pi:\mathbb{H} \to \mathbb{D}^\ast$, to the metric graph that is the leaf space of $\mathcal{H}$. The generator of the group of deck-translations $\pi_1({\mathbb C}^\ast) = \mathbb{Z}$ acts on $H$ by fixing $v$ (and the infinite ray $L$) and takes $T_i$ isometrically to $T_{i+1}$, for each $i\in \mathbb{Z}$.
\item[(b)] if $\tau_H>0$, then the boundary $\mathbb{R}$ of the upper half-plane determines a bi-infinite line $L^\prime$ in $H$; note that $L^\prime$ is also the leaf-space of the foliation on the entire lower half-plane. The rest of $H$ comprises metric trees rooted at vertices on $L^\prime$ invariant under the group $\mathbb{Z}$ of deck-translations that acts on $H$ by translations, such that the quotient is the metric graph that is the leaf-space of $\mathcal{H}$ on ${\mathbb D}^\ast$. Note that $L^\prime$ descends to a cycle on this metric graph of length $\tau_H$.
\end{itemize}
The same description holds for $V$, with $\tau_V$ replacing the role of $\tau_H$ in (a) and (b) above.
As in \S3, these trees $H$ and $V$ can be topologically embedded in ${\mathbb C}$, via equivariant embeddings $i_H: H \to {\mathbb C}$ and $i_V:V\to {\mathbb C}$ where $\mathbb{Z}$ acts on the domain tree as described in (a) or (b) above, and on ${\mathbb C}$ by translations generated by $w\mapsto w+1$.
Moreover, the infinite rays of $H$, and the complementary regions of $V$, acquire a labelling as follows:
Let $\{\alpha_1, \alpha_2,\ldots, \alpha_{r-2}\}$ denote the cyclically ordered foliated half-planes surrounding the pole-singularity of $\mathcal{H}$ at $0$ on ${\mathbb D}^\ast$, where $\alpha_1$ is the half-plane whose boundary is asymptotic to the prescribed asymptotic direction $\theta$ and $\theta + 2\pi/(r-2)$. These correspond to the complementary regions of the metric tree for $\mathcal{V}$. Lifting the labelling of the foliated half-planes to the universal cover and the extended foliation, this induces a labelling of the complementary regions of $V$ in ${\mathbb C}$, by the index set $\{\alpha^i_j \mid i\in \mathbb{Z}, 1\leq j\leq r-2\}$.
We can also label the infinite rays of the metric tree for $\mathcal{H}$ on $\mathbb{D}^\ast$ by $\{a_1, a_2,\ldots, a_{r-2}\}$ in cyclic order, such that the label $a_1$ corresponds to the leaf-space of the half-plane $\alpha_1$ (as defined above).
Passing to the universal cover, and its extension, we obtain a labelling of the infinite rays of $H$ by the index set $\{a^i_j \mid i\in \mathbb{Z}, 1\leq j\leq r-2\}$.
We can now prescribe a bijection $f$ between the complementary regions of $V$ and the infinite rays of $H$ by the corresponding map of labels $\alpha^i_j \mapsto a^i_j$. Thus, we have a pair of metric trees $V$ and $H$ on $\mathbb{C}$, and a bijection $f$ between the complementary regions of $V$ and the infinite rays of $H$. By Theorem \ref{aw}, there is a singular-flat surface $\mathsf{S}$ that is conformally either $\mathbb{C}$ or $\mathbb{D}$, with horizontal and vertical foliations having metric graphs $V, H$, which induces the prescribed bijection $f$.
By construction, the bijection $f$ is $\mathbb{Z}$-equivariant. Namely, let $t_V$ be the relabelling of complementary regions of $V$ and $t_H$ be the relabelling of infinite rays of $H$ induced by the self-homeomorphisms of ${\mathbb C}$ that extends the respective actions of $\mathbb{Z}$ on the trees. Then $t_H\circ f \circ t_V^{-1} = f$. (Note that $t_V$ relabels $\alpha^i_j$ by $\alpha^{i-1}_j$ and $t_H$ relabels $a^i_j$ by $a^{i-1}_j$ for each $i\in \mathbb{Z}$.)
By the $\mathbb{Z}$-equivariance of the embeddings $i_H,i_V$ of $H$ and $V$ into ${\mathbb C}$, together with the uniqueness part of Au-Wan's theorem (see Remark (ii) following Theorem \ref{aw}), the singular-flat surface $\mathsf{S}$ is induced by a holomorphic quadratic differential on ${\mathbb C}$ that is invariant under the action of $\mathbb{Z} = \langle w \mapsto w + 1 \rangle$. The singular-flat metric thus passes to the quotient annulus $\overline{\mathsf{S}}$, with horizontal and vertical foliations equal to the extensions of $\mathcal{H}$ and $\mathcal{V}$ respectively.
Next, we show that in fact the singular-flat annulus $\overline{\mathsf{S}}$ thus obtained is conformally the punctured plane $\mathbb{C}^\ast$. Since the metric trees (and their quotients) are complete, so are the singular-flat metrics on $\overline{\mathsf{S}}$ and $\mathsf{S}$. Moreover, it is easy to prove that the circumference of a disk of radius $R$ on $\mathsf{S}$ grows linearly with $R$: since there is a lower bound on any arc cutting across any fundamental domain of the $\mathbb{Z}$-action on $\mathsf{S}$, such a disk will intersect at most $O(R)$ copies of the fundamental domain. In particular, such a disk will contain $O(R)$ singularities of the singular-flat metric, and an application of the Gauss-Bonnet theorem then completes the argument.
We can then invoke the main result of \cite{Ahlfors-paper} (see also pg. 329 of \cite{Sario}) to conclude that the underlying Riemann surface is parabolic, that is, $\mathsf{S}$ is conformally equivalent to the complex plane $\mathbb{C}$. The quotient $\overline{\mathsf{S}}$ must then be conformally equivalent to $\mathbb{C}/\mathbb{Z}$, namely the punctured plane $\mathbb{C}^\ast$.
It remains to finally restrict to a suitable punctured disk ${\mathbb D}^\ast {\Sigma}ubset {\mathbb C}^\ast$. For this, note that by construction, the singular flat metric on $\mathsf{S}$ has a half-plane $E$ where the horizontal and vertical foliations are transverse foliations without singularities, namely, the lower half-plane on ${\mathbb C}$. On $E$ the metric is induced by a constant quadratic differential invariant under a translation such that in the quotient we obtain a pole of order two at $\infty$ on ${\mathbb C}^\ast$ (\textit{c.f.} Definition \ref{sing2}) a neighborhood of which is isometric to a semi-infinite Euclidean cylinder. If both transverse measures $\tau_H,\tau_V$ are positive, consider the maximal (with respect to inclusion) isometrically embedded sub-cylinder $\overline{E}$ such that the horizontal foliation intersects the geodesic boundary $\partial \overline{E}$ at a constant angle $\beta \in (0,\pi)$. (See Figure 7.) Note that if one of the transverse measures $\tau_H$ or $\tau_V$ is zero, then the angle of intersection $\beta$ equals $0$ and $\pi/2$ respectively.
The maximality of $\overline{E}$ implies that the induced singular-flat metric necessarily has prong-singularities on the boundary (otherwise we could take a larger disk). Excising $\overline{E}$, we are left with a singular-flat metric on a conformal puctured disk ${\mathbb D}^\ast$. The corresponding meromorphic quadratic differential on ${\mathbb D}^\ast$ is the desired $q$, whose horizontal and vertical foliations are, by our construction, $\mathcal{H}$ and $\mathcal{V}$ respectively, and properties (i)-(iii) in the statement of the Lemma are satisfied.
This singular-flat metric on $\mathbb{D}^\ast$ satisfying properties (i)-(iii) is unique up to isometry: given any other such singular-flat punctured disk $D^\prime$ we can pass to the universal cover $\mathbb{H}$ and attach a Euclidean half-plane $E^\prime$ by an isometry on the boundary line (which is a straight line by property (ii)). Here, $E^\prime$ is equipped with the metric induced by a constant quadratic differential whose horizontal foliation intersects $\partial E^\prime$ at the prescribed angle $\beta \in (0, \pi)$ if both transverse measures are positive, and at an angle $0$ or $\pi/2$ otherwise. Thus, we obtain a singular-flat surface $\mathsf{S}^\prime$ realizing $H$ and $V$. By the uniqueness part of Theorem \ref{aw}, $\mathsf{S}^\prime$ is isometric to $\mathsf{S}$ via an equivariant isometry, and the quotient surface $\overline{\mathsf{S}^\prime}$ is isometric to $\overline{\mathsf{S}}$. This isometry takes the quotient $\overline{E^\prime}$ of the half-plane $E^\prime$, to the maximal semi-infinite Euclidean cylinder $\overline{E}$ we had defined on $\overline{\mathsf{S}}$. (Such a maximal semi-infinite cylinder $\overline{E}$ in a cylindrical end is unique when we fix the prescribed angle $\beta$.) Hence the isometry restricts to one between $\overline{\mathsf{S}^\prime} {\Sigma}etminus \overline{E^\prime}$ and $\overline{\mathsf{S}} {\Sigma}etminus \overline{E}$, that is, $D^\prime$ is isometric to the punctured disk with the singular flat metric induced by $q$. Since a conformal map between punctured disks is a rotation, the isometry equals the identity map if property (iv) is satisfied, that is, we prescribe the asymptotic direction of $q$ at the puncture. \end{proof}
We can now prove the main result of this section:
\begin{prop}\label{prop2} Let $\mathcal{H}$ and $\mathcal{V}$ be a compatible pair of measured foliations, as introduced in the beginning of \S4.2. Then there exists a unique meromorphic quadratic differential in $Q_0(n,m)$ that has horizontal foliation equivalent to $\mathcal{H}$ and vertical foliation equivalent to $\mathcal{V}$.
\end{prop}
\begin{proof}
From the proof of Proposition \ref{prop1}, the foliations $\mathcal{H}$ and $\mathcal{V}$ decompose into model foliations $H_0,V_0 \in \mathcal{P}_n$ respectively in an open punctured-disk neighborhood $D_0$ of $0$, into model foliations $H_\infty,V_\infty \in \mathcal{P}_m$ respectively in an open punctured-disk neighborhood $D_\infty$ of $\infty$, and a foliated annulus $A$ inbetween, such that ${\mathbb C}^\ast = D_0 \cup A\cup D_\infty$ for both.
Using Lemma \ref{lem1a} or \ref{lem1b}, there is a unique flat metric on $A$ determined by the parameters of the restrictions $\mathcal{H}\vert_A$ and $\mathcal{V}\vert_A$, such that they are the horizontal and vertical foliations, respectively, with the prescribed \textit{relative} twist parameter (i.e\ the difference of the twist parameters, as in Lemma \ref{lem1a}), and the boundary components are geodesic circles. From the proofs of these lemmas, the angle at which the horizontal foliation on $A$ intersects the boundary components is uniquely determined; we call this angle $\beta$.
Using Lemma \ref{lem2a} or \ref{lem2b}, there are uniquely defined singular-flat metrics on $D_0$ and $D_\infty$ respectively, such that
\begin{itemize}
\item[(a)] they realize the pairs $(H_0,V_0)$ and $(H_\infty,V_\infty)$ respectively, as their horizontal and vertical foliations,
\item[(b)] the boundary circle is geodesic in both cases, and the horizontal foliation intersects them at the angle $\beta$ as determined above, and
\item[(c)] the horizontal foliations $H_0$ and $H_\infty$ have the prescribed asymptotic directions at the poles at $0$ and $\infty$ respectively.
\end{itemize}
Finally, we glue these singular flat surfaces together to obtain the desired singular-flat metric on ${\mathbb C}^\ast$, and the corresponding meromorphic quadratic differential $q$ in $Q_0(n,m)$. Since the asymptotic directions at the poles $D_0$ and $D_\infty$ are prescribed, the only freedom in this gluing is the number of Dehn-twists in the gluing of $\partial D_0$ with a boundary component of $A$, and in the gluing of $\partial D_\infty$ with the other boundary component of $A$. Since $A$ is an annulus, it is the difference of these two integers that matters to determine the final marked singular-flat structure on ${\mathbb C}^\ast$. (Note that this discussion is relevant only if $A$ is not a ring domain, since otherwise all markings are equivalent by sliding around a closed leaf.)
This integer parameter $d\in \mathbb{Z}$ can be measured in terms of the gluing in the universal cover as follows: choose an integer labelling of the fundamental domains of the $\mathbb{Z}$-action on the lifts $\widetilde{D}_0, \widetilde{A}$ and $\widetilde{D}_\infty$, the lifts of $D_0,A$ and $D_\infty$ respectively, and let $b_0$ and $b_1$ be a choice of basepoints on the boundary components of $\tilde{A}$ that are on the same vertical line (as in Figure 6). Then, if the gluing identifies $b_0$ with a point in the boundary of the $r$-th fundamental domain of $\widetilde{D}_0$, and $b_1$ with a point in the boundary of the $s$-th fundamental domain of $\widetilde{D}_\infty$, we define $d := r-s$.
Recall that the flat metric on the central annulus $A$ takes care of the relative twist parameter of the two foliations $\mathcal{H}\vert_A$ and $\mathcal{V}\vert_A$. The actual twist parameters of $\mathcal{H}$ and $\mathcal{V}$ are then realized by choosing the integer parameter $d$ appropriately. There is a unique such choice, and the marked singular-flat metric on ${\mathbb C}^\ast$, and therefore $q$, is determined uniquely.
\end{proof}
{\Sigma}ubsection{Proof of Theorem \ref{thm1}}
We can now complete:
\begin{proof}[Proof of Theorem \ref{thm1}]
The image of the map $\Phi_2: Q_0(n,m) \to \mathcal{MF}_0(n,m) \times \mathcal{MF}_0(n,m)$ lies in the subspace $\mathcal{S}$ of pairs of foliations that are compatible in the sense defined in \S2.2. By Proposition \ref{prop2}, we obtain an inverse to the map $\Phi_2$ defined on $\mathcal{S}$. This implies that, in particular, $\Phi_2$ is an injective map that surjects on to $\mathcal{S}$. Note that the domain $Q_0(n,m) \cong \mathbb{R}^{2n+2m-8}$, and the the target $\mathcal{S}$ is a subspace of $\mathcal{MF}_0(n,m) \times \mathcal{MF}_0(n,m)$, which is homeomorphic to $\mathbb{R}^N$ for some $N$. Here $N$ depends on $n,m$ and, in the case that one of them equals $2$, the asymptotic direction at that pole. (In particular, $N = 2n+2m-8$ if both $n,m>2$.)
The continuity of the map $\Phi_2$, or more generally, the map $\Phi$ from the space of quadratic differentials to the space of measured foliations on any surface that assigns the induced horizontal (or vertical) foliation to any quadratic differential, is a standard fact. (In the case of a closed surface, see for example the proof of Theorem 4.7 of \cite{Kerckhoff}, and the references therein.) Briefly, since the topology of the leaf-space, i.e.\ the combinatorial structure of the metric tree of a measured foliation, is locally constant,
the induced horizontal and vertical foliations are locally determined by the corresponding transverse measures. The latter, in turn, are determined by the real and imaginary parts of the periods $\int_\gamma {\Sigma}qrt q$, where $\gamma$ varies over a collection of arcs between the prong-singularities and homotopically non-trivial simple closed curve on the underlying surface. The continuity of $\Phi$ then follows from the continuity of the relative period map, defined on the space of ``framed" quadratic differentials on the surface (see, for example, Theorem 4.12 of \cite{Bridgeland-Smith}).
Hence by the Invariance of Domain, the map $\Phi_2$ is a homeomorphism onto its image.
\end{proof}
\noindent \textit{Remark.} A consequence of this is that the subspace $\mathcal{S}$ of compatible pairs of foliations, is homeomorphic to $\mathbb{R}^{2n+2m-8}$; this can be verified independently, by analyzing the corresponding parameter space, as in \S4.1.
{\Sigma}ubsection{The case when $n=m=2$}
In the special case where both poles have order two, the meromorphic quadratic differential $q$ on ${\mathbb C}^\ast$ is necessarily of the form $q = \frac{a^2}{z^2} dz^2$ where $a\in {\mathbb C}^\ast$. In this case the asymptotic directions at the two poles must be the same, and equal to $-\text{Arg}(a)$ (\textit{c.f.} Definition \ref{sing2}). Thus the space $Q_0(2,2)$ of such quadratic differentials with prescribed (and necessarily equal) asymptotic directions at the poles is homeomorphic to $\mathbb{R}^+$, which can be thought of as the remaining parameter $\lvert a \rvert$.
Let $\mathcal{MF}_0(2,2)$ be the space of measured foliations on ${\mathbb C}^\ast$ with pole-singularities of order $2$ at $0$ and $\infty$, and with prescribed (and equal) asymptotic directions at the poles. By Proposition \ref{prop-p2}, a measured foliation in $\mathcal{MF}_0(2,2)$ is determined by the transverse measure around ${\mathbb C}^\ast$. Note that this transverse measure is zero if the asymptotic directions are $0$, and the foliation lifts to a foliation on ${\mathbb C}$ by horizontal lines.
We then have:
\begin{lem} A pair $(\mathcal{H}, \mathcal{V}) \in \mathcal{MF}_0(2,2) \times \mathcal{MF}_0(2,2)$ (where the prescribed asymptotic directions in the first and second factor are opposite) is realizable as the horizontal and vertical foliations of some $q\in Q_0(2,2)$ if and only if either (a) exactly one of the transverse measures is zero, and (b) both transverse measures are positive, and compatible in the sense of Definition \ref{compat}.
\end{lem}
\begin{proof}
The necessity of either (a) or (b) being satisfied, follows from the compatibility of the horizontal and vertical foliations (see \S2.2.).
In the other direction, a meromorphic quadratic differential $q \in Q_0(2,2)$ is obtained in either case as follows:
Let $\tau_H, \tau_V$ be the transverse measures around ${\mathbb C}^\ast$ of $\mathcal{H}, \mathcal{V}$ respectively.
If $\tau_H=0$ and $\tau_V>0$, the quadratic differential $\tilde{q} = \tau_V^2 dw^2$ on ${\mathbb C}$, is invariant under the group of translations $\mathbb{Z} = \langle w \mapsto w+1 \rangle$ and defines the desired quadratic differential $q$ on the quotient ${\mathbb C}^\ast = {\mathbb C}/\mathbb{Z}$. If $\tau_V =0$ and $\tau_H>0$, then the quadratic differential $\tilde{q} = - \tau_H^2 dw^2$ on ${\mathbb C}$ descends to the required quadratic differential $q$ on ${\mathbb C}^\ast = {\mathbb C}/\mathbb{Z}$. This handles the case (a).
Finally, for (b), recall from the compatibility of transverse measures that $\tau_V = \tau {\Sigma}in\theta$ and $\tau_H = \tau \lvert \cos \theta \rvert$ for some $\tau>0$ and $\theta \in (0, \pi)$ is the asymptotic direction at the poles. The quadratic differential $\tilde{q} = a^2 dz^2$ where $a = \tau e^{-i\theta} $ descends to the desired $q$ on ${\mathbb C}^\ast$.
\end{proof}
{\Sigma}ection{Proofs of Theorems \ref{thm2} and \ref{thm3}}
In this section, let $S$ be a surface of genus $g$ and $k\geq 1$ labelled punctures, where $2-2g-k<0$, that is, $S$ has negative Euler characteristic. We fix a $k$-tuple $\mathfrak{n} = (n_1,n_2,\ldots, n_k)$ such that each $n_i\geq 2$.
Our proofs shall use some of the constructions of singular-flat metrics described in \S4.2.
{\Sigma}ubsection{Proof of Theorem \ref{thm2}}
From the statement of Theorem \ref{thm2}, we are given a pair of measured foliations $(\mathcal{H}$, $\mathcal{V}) \in \mathcal{MF}_g(\mathfrak{n}) \times \mathcal{MF}_g(\mathfrak{n})$ where the set of asymptotic directions of the measured foliations in the first and second factors are $\mathfrak{a}$ and ${\Sigma}qrt{-1}\cdot \mathfrak{a}$ respectively. We also know that the pair $\mathcal{H}, \mathcal{V}$ are compatible in the sense of Definition \ref{compat2}. Our task, then, is to construct a meromorphic quadratic differential $q \in Q_g(\mathfrak{n})$ whose horizontal and vertical foliations are (equivalent to) $\mathcal{H}$ and $\mathcal{V}$ respectively.
We shall do this in the following the same strategy as the construction in Proposition \ref{prop2} in \S4.2.
\begin{proof}[Proof of Theorem \ref{thm2}]
From the proof of Proposition \ref{mfgn-prop}, the surface $S$ can be decomposed into punctured-disk neighborhoods $\{U_i\}_{1\leq i\leq k}$ of each puncture, a surface-with-boundary $S^\prime = S {\Sigma}etminus (U_1\cup U_2 \cup \cdots U_k)$. The measured foliations $\mathcal{H}$ and $\mathcal{V}$ restrict to measured foliations on $S^\prime$ that we denote by $H_0$ and $V_0$ respectively. Moreover, on each $U_i$ for $1\leq i\leq k$, the restrictions $\mathcal{H}\vert_{U_i}$ and $\mathcal{V}\vert_{U_i}$ are model foliations $H_i,V_i \in \mathcal{P}_{n_i}$. Moreover, the restrictions $\mathcal{H}\vert_{A_i}$ and $\mathcal{V}\vert_{A_i}$ define foliations $H_i^0,V_i^0$ on an annulus $A_i$ that is a collar of the boundary circle $\partial U_i$.
As in the proof of Proposition \ref{prop2}, by Lemmas \ref{lem1a} and \ref{lem1b}, we can construct a flat metric on each $A_i$ with horizontal and vertical foliations $H_i^0,V_i^0$ respectively. Similarly, by Lemmas \ref{lem2a} and \ref{lem2b}, we can construct a singular-flat metric on $U_i {\Sigma}etminus A_i \cong \mathbb{D}^\ast$ (induced by a quadratic differential $q_i$ with a pole of order $n_i$ at the puncture) whose horizontal and vertical foliations are $H_i$ and $V_i$ respectively. For each $i$, we call this singular-flat punctured disk $D_i$. It follows from the proofs of these Lemmas that one can choose each $q_i$ such that the boundary component shared by $A_i$ and $D_i$ is geodesic of the same length, such that the prescribed horizontal foliations intersect it at the same angle. We also impose that $q_i$ has an asymptotic direction at pole-singularity at the $i$-th puncture given by the corresponding entries of $\mathfrak{a}$. By the uniqueness statements in these Lemmas, the set of singular-flat annuli and punctured-disks thus obtained, are uniquely determined by $\mathcal{H}$ and $\mathcal{V}$.
On the surface-with-boundary $S^\prime$, we can construct a singular-flat metric realizing the pair $H_0$ and $V_0$, by reducing to the compact surface case by a doubling across the boundaries. Namely, consider the closed surface $\hat{S}$ obtained by taking two copies of $S^\prime$, and identifying the corresponding boundary components such that the closed surface obtained is orientable. This identification along the boundary components does not involve any further twist; if $\gamma_i$ is the simple closed curve arising from the $i$-th boundary component after identification, then there is a diffeomorphism $\phi: \hat{S}\to \hat{S}$ of order two, that fixes each $\gamma_i$ pointwise and locally, is a reflection across them. We can assume, after an isotopy, that for each boundary component $\partial U_i$ of $S^\prime$, the foliations $H_0$ and ${V}_0$ are either orthogonal to $\partial U_i$ or parallel to it. Let $\widehat{H_0}$ and $\widehat{V_0}$ be the measured foliations on $\hat{S}$, invariant under $\phi$, obtained by doubling $H_0$ and $V_0$ respectively
Note that since the original foliations $\mathcal{H}$ and $\mathcal{V}$ are compatible, the transverse measures of $H_0, V_0$ around $\partial U_i$ cannot both be zero; hence the measured foliations $\widehat{H_0}$ and $\widehat{V_0}$ we obtain on the closed surface $\hat{S}$ are transverse. Then there exists a unique holomorphic quadratic differential $\hat{q}$ (with respect to some complex structure) on the closed surface $\hat{S}$, whose horizontal and vertical foliations are equivalent to $\widehat{H_0}$ and $\widehat{V_0}$ respectively. (See, for example, the proof of Theorem 4.7 of \cite{Kerckhoff} and the references therein.) Since these prescribed foliations are invariant under $\phi$, it follows from the uniqueness that $\phi$ is an involutive isometry on the induced singular-flat surface. In particular, the quotient by $\phi$ yields a singular-flat metric on $S^\prime$ with geodesic boundary, whose horizontal and vertical foliations are equivalent to $H_0$ and $V_0$ respectively.
Note that in our preceding construction, the boundary components of singular-flat metric on $S^\prime$ are either completely horizontal (in the case the transverse measure of $H_0$ around it is zero) or completely vertical (in the case that transverse measure is positive). In what follows, we show how to further ensure that the horizontal foliation intersects the $i$-th boundary component at the same angle, as that of the horizontal foliation on the flat $A_i$ that was constructed earlier. Note that this modification is needed only if the transverse measures of both $H_0$ and $V_0$ around the $i$-th boundary component are positive; we call the desired angle $\beta_i\in (0,\pi)$.
For each $i$, take a semi-infinite Euclidean cylinder $R_i$, such that the boundary $\partial R_i$ is either completely horizontal or completely vertical (matching with the $i$-th boundary component $C_i$ on $S^\prime$), and identify $\partial R_i$ with $C_i$ with an isometry that does not introduce any further twists. We thus obtain a complete singular-flat surface $\hat{S}$ with cylindrical ends; not that the horizontal and vertical foliations extend to the whole surface. Now, for each $i$, consider the maximal (with respect to inclusion) open semi-infinite Euclidean cylinder $E_i$ that is isometrically embedded in the $i$-th end, such that the horizontal foliation intersects the geodesic boundary $\partial E_i$ at an angle $\beta_i$ (\textit{c.f.} Figure 7). Excising each $E_i$, we obtain the desired singular-flat surface $S^{\prime\prime}= \hat{S} {\Sigma}etminus (E_1 \cup E_2 \cup \cdots E_k)$ that realizes the horizontal and vertical foliations $H_0,V_0$, but now has the horizontal foliation intersecting each boundary component at a prescribed angle. Note that it is possible that $S^{\prime\prime}$ is topologically not a surface, but has degeneracies; this happens in the case that the closures of, say $E_i$ and $E_j$ intersect along a common boundary arc (\textit{c.f.} the remark following Lemma \ref{lem1b}). In that case, we shall continue to call $S^{\prime\prime}$ a singular-flat surface, despite such degeneracies.
\begin{figure}
\caption{The decomposition used in the proofs of Theorems \ref{thm2}
\label{singfig}
\end{figure}
It remains to glue these singular-flat surfaces $\{D_i, A_i\}_{1\leq i\leq k}$ and $ S^{\prime\prime}$ along their respective boundaries, as determined by the decomposition of $S$ into $\{U_i {\Sigma}etminus A_i, A_i\}_{1\leq i\leq k}$ and $S^\prime$, to obtain the singular-flat metric on $S$ with horizontal and vertical foliations $\mathcal{H}$ and $\mathcal{V}$. (See Figure 9.) As in the last part of the proof of Proposition \ref{prop2}, the only freedom in this gluing is an integer parameter $d_i \in \mathbb{Z}$ that measures the relative twist between $D_i$ and $S^{\prime\prime}$ across $A_i$, calculated as the difference in the number of Dehn-twists in the gluings of the corresponding boundary components. Recall that the flat metric on $A_i$ realizes the difference of the twist parameters of $\mathcal{H}$ and $\mathcal{V}$ at the $i$-th puncture; each $d_i$ is chosen such that the singular-flat metric on $S$ has the correct marking, and realizes the actual twist parameters for these foliations.
The singular-flat subsurfaces we obtained were unique, and so is the integer twist parameter $d_i$ for each $i$, such a singular-flat metric on $S$, and hence the corresponding quadratic differential $q \in Q_0(n,m)$ is unique.
\end{proof}
{\Sigma}ubsection{Proof of Theorem \ref{thm3}}
We now prove the analogue of the Hubbard-Masur Theorem (\cite{HubbMas}) for meromorphic quadratic differentials. As discussed in \S1, in contrast with the version proved in the work of Gupta-Wolf, our result dispenses with the need to choose a coordinate disk around each puncture. The proof below uses the constructions in \S4.2 to reduce to the case when all poles are of order two, where one can use the main result of \cite{GuptaWolf0}. Note that the latter result does not depend on such a choice of coordinate disk either, since the ``residue" at a pole of order two is coordinate-independent.
In what follows, we shall fix $X\in \widehat{\T}_{g,k}$; recall that $X$ represents a Riemann surface structure on the punctured surface $S$, that we shall denote by $\overline{X}$, together with the additional data of a real twist parameter at each puncture, that we record as a $k$-tuple $\mathfrak{S} = (s_1, s_2,\ldots, s_k)$.
Recall that throughout this paper, markings of $S$ are considered up to an isotopy that fixes a ``framing" at each puncture, or alternatively, fixes (pointwise) the boundary circles obtained by a real blow-up at each puncture. The twist parameter $s_i$ then records the data of the framing and the marking at the punctures, as follows (\textit{c.f.} Definition 3.3. of \cite{GuptaMj1}) :
\begin{itemize}
\item[(a)] The direction of a tangent vector $v_i$ at the $i$-th puncture, or alternatively, a point on the circle obtained as a real blowup of the $i$-th puncture, given by $\text{exp}(i 2\pi s_i)$. This determines a framing, namely, the one given by $v_i$ and ${\Sigma}qrt{-1} \cdot v_i$.
\item[(b)] The integer $\lfloor s_i \rfloor$ that denotes the number of Dehn twists about the $i$-th puncture.
\end{itemize}
Recall from \S1 that $\pi$ is the projection from $Q_g(\mathfrak{n})$ to $X\in \widehat{\T}_{g,k}$; in what follows $p: \widehat{\T}_{g,k} \to \T_{g,k}$ will be the further projection that forgets the data of the twist parameters. ( In particular, note that $p(X) = \overline{X}$.)
As in the hypotheses of Theorem \ref{thm3}, we fix a measured foliation $\mathcal{H} \in \mathcal{MF}_g(\mathfrak{n})$, and a choice of model foliations $F_i \in \mathcal{P}_{n_i}$ for each $1\leq i\leq k$. The set of asymptotic directions $\mathfrak{a}$ of $\mathcal{H}$ are determined by $\mathfrak{S}$: namely, at a pole of order $n_i>2$ the asymptotic direction is exactly the tangent directions as in (a) above, and if $n_i=2$, the asymptotic direction is equal to the the angle $2\pi s_i$ (modulo $\pi$). By compatibility, the set of asymptotic directions of $(F_1,F_2,\ldots, F_k)$ is the opposite set ${\Sigma}qrt{-1}\cdot \mathfrak{a}$ (see Definition \ref{opp}).
Moreover, the integer parameters determined by $\mathfrak{S}$ as in (b) above, are also required to match with integer twist parameters of any measured foliation $\mathcal{F}$ on a punctured surface equipped with a marking, defined as follows:
\begin{defn}[Integer twist parameter]\label{itw} Recall that a measured foliation $\mathcal{F}$ on $S$ restricts to a model foliation on the punctured disk $D_i$ that is a neighborhood of the $i$-th pole. Let $S^\prime = S{\Sigma}etminus (D_1,D_2,\ldots, D_k)$ be the surface-with-boundary obtained by deleting these neighborhoods.
In case the transverse measure of $\mathcal{F}$ around the $i$-th puncture is positive, the integer parameter of $\mathcal{F}$ associated with that puncture on $X$ is the number of Dehn-twists required in the gluing of $D_i$ to the corresponding boundary component of $S_i$, in order to obtain the chosen marking. In the case that the transverse measure of $\mathcal{F}$ around the $i$-th puncture is zero, the integer twist parameter is ill-defined, so we ignore such punctures. This is because in that case, the $i$-th boundary component of $S^\prime$ is the boundary of a (possibly degenerate) ring-domain, and changing $\mathcal{F}$ by a Dehn-twist around that puncture yields an equivalent measured foliation.
\end{defn}
Thus, in our case, we shall assume the integer twist parameters of $\mathcal{H}$ on $X$ are equal to the integer parameters determined by $\mathfrak{S}$ as in (b) above. Note that $\mathfrak{S}$ also determines the integer twist parameters of the \textit{vertical} foliation $\mathcal{V}$ of the meromorphic quadratic differential $q\in Q_g(\mathfrak{n})$ that we are aiming to construct.
\begin{proof}[Proof of Theorem \ref{thm3}]
Let $\mathcal{V}$ be the vertical foliation of the desired meromorphic quadratic differential $q\in Q_g(\mathfrak{n})$; since the horizontal foliation of $q$ would be $\mathcal{H}$, note that by Theorem \ref{thm2}, $q$ is uniquely determined by the pair $(\mathcal{H},\mathcal{V})$. The model foliations for $\mathcal{V}$ would be the prescribed foliations $F_1,F_2,\ldots, F_k$ in punctured-disk neighborhoods $D_1,D_2,\ldots, D_k$, respectively, around the punctures. It thus remains to specify the foliation $V_0$ on the surface-with-boundary $S^\prime = S{\Sigma}etminus (D_1,D_2,\ldots, D_k)$.
Recall that we want $\pi(q) = X$, where $X \in \widehat{\T}_{g,k}$ is a punctured Riemann surface equipped with a framing at the punctures, and a marking that remembers the number of Dehn-twists around each puncture, the data of which is encoded by the set $ \mathfrak{S}$. From the discussion above, $ \mathfrak{S}$ determines the asymptotic directions of the model foliations (of either foliation) on $D_i$, and the number of Dehn-twists in the gluing of $D_i$ to the corresponding boundary component of $S^\prime$. These in turn determine the real twist parameters of $\mathcal{H}$ and $\mathcal{V}$ at the punctures, as described in the the proof of Proposition \ref{mfgn-prop} ; we denote them by $\hat{{\Sigma}igma} = ({\Sigma}igma_1,{\Sigma}igma_2,\ldots, {\Sigma}igma_k)$ and $\hat{{\Sigma}igma}^\prime = ({\Sigma}igma^\prime_1,{\Sigma}igma^\prime_2,\ldots, {\Sigma}igma^\prime_k)$ respectively.
By Lemmas \ref{lem1a} and \ref{lem1b}, for each $1\leq i\leq k$, there is a unique flat annulus $A_i$ with its metric induced by a constant quadratic differential, such that its horizontal and vertical foliations have transverse measures equal to those of $F_i^H$ and $F_i$ around $\partial D_i$, and the difference of their twist parameters equals ${\Sigma}igma_i - {\Sigma}igma_i^\prime$, where ${\Sigma}igma_i$ and ${\Sigma}igma_i^\prime$ are as defined above. Let $\beta_i \in [0,\pi)$ be the angle at which the horizontal foliation intersects the boundary components of $A_i$. Note that if one of the transverse measures around the $i$-th pole is zero, then $\beta_i$ is necessarily $0$ or $\pi/2$.
By Lemma \ref{lem2a} (if $n_i=2$) or \ref{lem2b} (if $n_i>2$), there is a unique singular-flat metric on each $D_i$, induced by a meromomorphic quadratic differential $q_i$, such that
\begin{itemize}
\item[(a)] the horizontal and vertical foliations of $q_i$ are $F_i^H$ and $F_i$ respectively,
\item[(b)] the asymptotic directions of $q_i$ are those prescribed by $\mathfrak{a}$, and
\item[(c)] the horizontal foliation $F_i^H$ intersects the geodesic boundary $\partial D_i$ at an angle $\beta_i$, as defined above.
\end{itemize}
From the proofs of these Lemmas, the length of the boundaries of $A_i$ and $D_i$ are equal (they only depend on the transverse measures of $F_i^H$ and $F_i$ around $\partial D_i$, and the angle $\beta_i$). We identify each $\partial D_i$ with one of the boundary components of $A_i$ by an isometry to obtain a singular-flat disk that we denote by $U_i$. The only freedom is the number of Dehn-twists in this gluing; however this is determined by $\mathfrak{S}$, i.e.\ these are chosen such that the twist parameters of the foliations are precisely $\hat{{\Sigma}igma}$ and $\hat{{\Sigma}igma}^\prime$. Since the horizontal foliations intersect each boundary at the same angle by (c) above, the singular flat metric on $U_i$ is induced by a meromorphic quadratic differential $q_i$ on a punctured disk ${\mathbb D}^\ast$.
Let $\hat{\tau} = (\tau_1,\tau_2,\ldots, \tau_k)$ be the transverse measures of $F_1,F_2,\ldots, F_k$ respectively, around the boundaries of the corresponding punctured disks. Let $\mathcal{MF}_{g,k}(\hat{\tau})$ be the space of measured foliations on a compact surface of genus $g$ and $k$ labelled boundary components, such that the transverse measures around the boundary components are given by the $k$-tuple $\hat{\tau}$. By Proposition \ref{mfb}, since we are fixing the parameters corresponding to the transverse measures of the boundary, the space $\mathcal{MF}_{g,k}(\hat{\tau}) \cong \mathbb{R}^{6g-6 + 2k}$.
Let $H_0$ be the restriction of $\mathcal{H}$ to the surface-with-boundary $S^\prime$. Given $V_0 \in \mathcal{MF}_{g,k}(\hat{\tau})$, there is a unique singular-flat metric on $S^\prime$ realizing the pair $(H_0,V_0)$ as its horizontal and vertical foliations, obtained by a doubling across the boundaries to get a closed surface and applying the Hubbard-Masur theorem, exactly as in the proof of Theorem \ref{thm2}. Recall that the resulting horizontal and vertical foliations are either orthogonal or parallel to each boundary component, depending on whether its transverse measure around the boundary is zero or positive, respectively. However, we can make the horizontal foliation at the $i$-th boundary component intersect at the angle $\beta_i$ exactly as in the proof of Theorem \ref{thm2}, namely by appending a cylindrical end $R_i$, and truncating it along a suitable open sub-cylinder $E_i$ bounded by a geodesic circle that intersects the horizontal foliation at the desired angle $\beta_i$. Let $S^{\prime\prime}$ be the resulting singular-flat surface (with possible degeneracies when the closures of the sub-cylinders intersect). Recall that in Lemmas \ref{lem2a} and \ref{lem2b}, the angle $\beta_i$ was achieved by exactly the same truncation of a cylindrical end. Hence, the length of the resulting geodesic boundary component $\partial E_i$ of $S^{\prime\prime}$ matches the length of the boundary of $U_i$ obtained above.
Thus, we can identify these boundaries (i.e.\ ``cap off" the $i$-th boundary component in $S^{\prime\prime}$ by the singular-flat punctured-disk $U_i$) to obtain a singular-flat metric on $S$ induced by a meromorphic quadratic differential $q\in Q_g(\mathfrak{n})$. (See Figure 9.) Note that in this gluing we do not introduce any additional twist, since the twist parameter at the $i$-th puncture has already been taken care of by the gluing between $A_i$ and $D_i$.
Thus, by an appropriate gluing, the resulting marking on $S$ is the one on $X$, and the horizontal foliation of $q$ is exactly $\mathcal{H}$. From our construction, the vertical foliation of $q$ restricts to the desired model foliations $F_1,F_2,\ldots, F_k$ at the respective punctures.
Let $\Psi: \mathcal{MF}_{g,k}(\hat{\tau}) \to \T_{g,k}$ be the map defined by $\Psi(V_0) = p\circ \pi (q)$ where $q\in Q_g(\mathfrak{n})$ is the quadratic differential obtained from the construction we just described. It only remains to show that in this construction, there is a unique initial choice of foliation $V_0$ such that the Riemann surface underlying $q$ is the desired one, i.e.\ $\Psi(V_0) = \overline{X}$. This is immediate from the following:\\
\noindent \textit{Claim. The map $\Psi$ is a homeomorphism.} \\
\textit{Proof of claim.} The continuity of $\Psi$ follows from the fact that in the construction above, the singular-flat surface $S^{\prime\prime}$ depends continuously on $V_0$ from the continuity of the Hubbard-Masur map. Since both the target and domain are homeomorphic to $\mathbb{R}^{6g-6 + 2k}$, it suffices to show, by the invariance of domain, that $\Psi$ is a bijection.
Consider the map $\Psi_0: \mathcal{MF}_{g,k}(\hat{\tau}) \to \T_{g,k}$ that ``caps off" the boundary components of $S^{\prime\prime}$ in a different way, by attaching cylindrical ends as we now describe. Namely, given $V_0$, construct the singular-flat surface $S^{\prime\prime}$ realizing the pair $(H_0,V_0)$ exactly as above. Then, attach the boundary of a semi-infinite Euclidean cylinder $C_i$ to the $i$-th boundary component of $S^\prime$ where $C_i$ is chosen to have a circumference equal to the length of that boundary component, and $C_i$ is equipped with a holomorphic quadratic differential whose horizontal foliation intersects $\partial C_i$ at an angle $\beta_i \in (0,\pi)$, or at zero angle, depending on whether the corresponding transverse measure is positive or zero respectively, or equivalently, whether $\tau_i=0$ or $\tau_i>0$ respectively. (This looks like gluing in the shaded cylinder in Figure 7.) This defines a singular-flat metric on $S$ induced by a meromorphic quadratic differential $q_0 \in Q_g(\mathfrak{n}_0)$ where $\mathfrak{n}_0 = \underbrace{(2,2,\ldots,2)}_{k\ times}$ . We then define $\Psi_0(V_0) = p \circ \pi_0 (q_0)$, where $\pi_0$ and $p$ are the forgetful projections $\pi_0: Q_g(\mathfrak{n}_0) \to \widehat{\T}_{g,k}$ and $p: \widehat{\T}_{g,k} \to \T_{g,k}$.
Note that $C_i$ and $U_i$ are defined by two different quadratic differentials on the punctured-disk $\mathbb{D}^\ast$ such that the induced metric on the boundary circle $\partial \mathbb{D}$ is identical; thus, the two different ``capping off" constructions in $\Psi_0$ and $\Psi$ involve exactly the same identification map on the boundary circles. Thus, the resulting surfaces are conformally the same, i.e. the punctured Riemann surfaces underlying $q_0$ and $q$ are identical, and we have $\Psi_0 = \Psi$.
Now let $\mathcal{H}^\prime \in \mathcal{MF}_g(\mathfrak{n}_0)$ be the measured foliation on $S$ obtained by extending $H_0$ on $S^\prime$ as follows: attach cylindrical ends to the boundary components of $S^{\prime\prime}$, as above, and extend $H_0$ on each cylindrical end by geodesic lines spiralling down the end (if the corresponding transverse measure of $H_0$ is positive) or meridional circles (if the corresponding transverse measure of $H_0$ is zero). Note that the asymptotic directions of $\mathcal{H}^\prime$ at the $i$-th puncture is $\beta_i$. In the above construction, the quadratic differential $q_0$ on the punctured Riemann surface $\overline{X} = \Psi_0(V_0)$ has (a) horizontal foliation $\mathcal{H}^\prime$, and (b) a residue at the $i$-th pole that is prescribed by $\tau_i$ and $\beta_i$. By Theorem 1.2 of \cite{GuptaWolf0}, there exists a unique meromorphic quadratic differential $q_0$ on $\overline{X}$ satisfying (a) and (b). The existence of such a $q_0$ implies that $\Psi_0$ is surjective: one can obtain a $V_0$ such that $\Psi_0(V_0) = \overline{X}$ by truncating the cylindrical ends of the $q_0$-metric on $S$, and defining $V_0$ to be the vertical foliation of the resulting surface-with-boundary.
The uniqueness of $q_0$ implies that $\Psi_0$ is injective: if $\Psi_0(V_0) = \Psi_0(V_0^\prime) = \overline{X}$, then the corresponding quadratic differentials $q_0$ and $q_0^\prime$ obtained in the construction are identical; then so are their vertical foliations, and consequently $V_0 =V_0^\prime$. This proves the bijectivity of $\Psi_0$, and consequently of $\Psi$, and concludes the proof.
$\qed$
\end{proof}
\end{document} |
\begin{document}
\title{Controlled-Controlled-Phase Gates for Superconducting Qubits \\ Mediated by a Shared Tunable Coupler}
\author{Niklas J. Glaser}
\email{niklas.glaser@wmi.badw.de}
\affiliation{Physik-Department, Technische Universität München, 85748 Garching, Germany}
\affiliation{Walther-Mei{\ss}ner-Institut, Bayerische Akademie der Wissenschaften, 85748 Garching, Germany}
\author{Federico Roy}
\affiliation{Walther-Mei{\ss}ner-Institut, Bayerische Akademie der Wissenschaften, 85748 Garching, Germany}
\affiliation{Theoretical Physics, Saarland University, 66123 Saarbr\"ucken, Germany}
\author{Stefan Filipp}
\email{stefan.filipp@wmi.badw.de}
\affiliation{Physik-Department, Technische Universität München, 85748 Garching, Germany}
\affiliation{Walther-Mei{\ss}ner-Institut, Bayerische Akademie der Wissenschaften, 85748 Garching, Germany}
\affiliation{Munich Center for Quantum Science and Technology (MCQST), Schellingstra\ss e 4, 80799 München, Germany}
\begin{abstract}
Applications for noisy intermediate-scale quantum computing devices rely on the efficient entanglement of many qubits to reach a potential quantum advantage.
Although entanglement is typically generated using two-qubit gates, direct control of strong multi-qubit interactions can improve the efficiency of the process.
Here, we investigate a system of three superconducting transmon-type qubits coupled via a single flux-tunable coupler.
Tuning the frequency of the coupler by adiabatic flux pulses enables us to control the conditional energy shifts between the qubits and directly realize multi-qubit interactions.
To accurately adjust the resulting controlled relative phases, we describe a gate protocol involving refocusing pulses and adjustable interaction times.
This enables the implementation of the full family of pairwise controlled-phase (CPHASE) and controlled-controlled-phase (CCPHASE) gates.
Numerical simulations result in fidelities around \SI{99}{\percent} and gate times below \SI{300}{\nano\second} using currently achievable system parameters and decoherence rates.
\end{abstract}
\maketitle
\section{Introduction}\label{sec:introduction}
Superconducting quantum circuits with fast and high-fidelity single- and two-qubit operations are considered promising candidates for quantum applications~\cite{Kjaergaard2020}.
Recent experiments on quantum processors with dozens of superconducting qubits demonstrate the maturity of this platform~\cite{Arute2019, Jurcevic2021, Mooney2021, Gong2021} and move this technology well into the era of noisy intermediate-scale quantum (NISQ) devices~\cite{Preskill2018}.
Promising NISQ algorithms, such as the variational quantum eigensolver (VQE)~\cite{Kandala2017, Peruzzo2014, Moll2018, Googleaiquantum2020, Ganzhorn2019} for quantum chemistry simulation and the quantum approximate optimization algorithm (QAOA)~\cite{Lacroix2020, Hill2021, Farhi2014, Harrigan2021} for complex optimization tasks, provide useful applications that do not require error correction.
However, these rely on the efficient entanglement of many qubits to ensure that the coherence properties of the quantum system survive over the runtime of the algorithm.
The standard approach for generating multi-qubit entanglement is the digital decomposition of multi-qubit operations into a discrete set of native single- and two-qubit gates~\cite{Barenco1995, Vartiainen2004, Shende2009, Shi2002}.
This comes at the cost of a substantial overhead in runtime and qubit number, in particular if the connectivity between qubits is low.
In superconducting qubit architectures, this has led to the development of problem-specific continuous two-qubit gate sets to partially reduce the overhead~\cite{Ganzhorn2019, Lacroix2020, Foxen2020, Abrams2020}.
A complementary strategy is the direct use of multi-qubit entangling operations, which can significantly enhance the efficiency to create large-scale entanglement.
This strategy is employed in trapped ion systems with convincing demonstrations using common vibrational modes~\cite{Molmer1999, Kielpinski2002, Kranzl2022} and proposals using cavity-mediated interactions~\cite{Sorensen2003, Ramette2022}.
The challenge is, however, to design multi-qubit operations that are fast and accurate when compared to an equivalent two-qubit gate decomposition, to avoid decoherence and reach high fidelities.
For superconducting qubit architectures, several techniques for implementing multi-qubit operations have been investigated, e.g., utilizing non-computational qutrit states to implement more efficient digital decompositions~\cite{Mariantoni2011, Fedorov2012, Hill2021, Nikolaeva2022, Chu2021_QuAND}, applying simultaneous pairwise couplings to generate effective multi-qubit interactions~\mbox{\cite{Kim2022, Baker2021, Gu2021, Nagele2022, Zhang2022}} or introducing a shared coupling element acting as a multi-qubit coupler~\cite{Mezzacapo2014, Paik2016, Song2017, Lu2022, Song2019, Menke2022}.
Each approach comes with its specific advantages and challenges.
For instance, implementations based on qutrit decomposition are easy to calibrate since they use existing gate types but may suffer from decoherence due to long periods spent in higher excited states.
Simultaneous pairwise couplings can be implemented on existing architectures but may be limited by low effective interaction strengths and spurious qubit interactions.
And shared coupling elements provide high connectivity, but exhibit unwanted interaction terms caused by frequency crowding.
In this work we investigate a system of three transmon-type qubits coupled via a shared transmon-type coupler, which is tunable in frequency via an external flux.
Previous work has shown that two-qubit CPHASE gates can be realized by controlling the frequency of the tunable coupler~\cite{Yan2018, Collodo2020, Xu2020, Chu2021, Stehlik2021}.
Here, we extend this scheme to a three-qubit system, and demonstrate that two- and three-body interaction terms can be utilized to implement the full family of controlled-controlled-phase (CCPHASE) and simultaneous pairwise controlled-phase (CPHASE) gates.
These effective interactions are activated by adiabatic flux pulses and originate from conditional energy shifts due to hybridizations of qubit and coupler states within the same excitation manifold.
To control the acquired two- and three-qubit phases we devise a flexible refocusing scheme.
Therefore, strong couplings between the qubits and the tunable coupler are utilized to realize fast entangling gates with a low population in higher excited states and a large on-off coupling ratio.
\section{Description of the system}\label{sec:the-system}
\begin{figure}
\caption{
Circuit representation of the fixed-frequency transmon-type qubit setup. The qubits Q$_1$, Q$_2$ and Q$_3$ with frequencies $\omega_i$ and anharmonicity $\alpha_i$ are capacitively coupled
to a flux-($\Phi_\text{ext}
\label{fig:3Q_architecture}
\end{figure}
\begin{figure}
\caption{
(a) Energy-level diagram for selected states of the three-qubit system. Other states are discussed in Appendix~\ref{app:full_energies}
\label{fig:minimal_eigenergies}
\end{figure}
We consider three fixed-frequency transmon-type~\cite{Koch2007} qubits (Q$_1$, Q$_2$, Q$_3$), coupled via a frequency-tunable coupler (TC) \cite{Mckay2016, Yan2018, Sung2021, Chen2014} [see Fig.~\ref{fig:3Q_architecture}] described by the Hamiltonian
\begin{align}
\begin{aligned}
H_\text{sys} & = H_0 + H_{\text{int}} + H_\text{drive} \\
H_0 & = \sum_{i=1, 2, 3} \omega_i \hat{a}_i^\dagger \hat{a}_i + \frac{\alpha_i}{2} \hat{a}_i^\dagger \hat{a}_i^\dagger \hat{a}_i \hat{a}_i
\\&+ \omega_c(\Phi_\text{ext}) \hat{a}_c^\dagger \hat{a}_c + \frac{\alpha_c}{2} \hat{a}_c^\dagger \hat{a}_c^\dagger \hat{a}_c \hat{a}_c \\
H_{\text{int}} & = \frac{1}{2}\sum_{i \in \{1,2,3,c\}}\sum_{j \neq i} g_{ij} (\hat{a}_i^\dag - \hat{a}_i)(\hat{a}_j^\dag - \hat{a}_j) \\
H_\text{drive} & = \sum_{i=1,2,3} \Omega_i(t) (\hat{a}_i^\dagger + \hat{a}_i),
\label{eq:3Q_sys_Hamiltonian}
\end{aligned}
\end{align}
where we set $\hbar = 1$.
$H_0$ is the Hamiltonian of the bare uncoupled system with creation (annihilation) operators $\hat{a}_i^\dagger$ ($\hat{a}_i$), frequencies $\omega_i$ and anharmonicities $\alpha_i$ of the qubits and the coupler ($i=1,2,3,c$).
The coupler frequency $\omega_c$ can be tuned by applying an external flux $\Phi_\text{ext}$.
In the following we assume the experimentally realizable values $\omega_i/2\pi =\{3.5,4,4.5,4.5-5.8\}~\textrm{GHz}$ and $\alpha_i/2\pi = \{-200,-230,-200,-300\}~\text{MHz}$.
We define the bare basis with the eigenstates $\ket{n_c, n_1 n_2 n_3}^0$ of $H_0$, where $n_i$ is the excitation number of element $i$.
The interaction Hamiltonian $H_\text{int}$ models the capacitive couplings between the elements, for which we choose $g_{ic}/2\pi = \{150,150,120\}~\text{MHz}$ between qubits Q$_i$ and the tunable coupler TC and stray direct qubit-qubit couplings $g_{i,i+1}/2\pi = \{13,14,10\}~\text{MHz}$, which are an order of magnitude smaller.
The term $H_\text{drive}$ models microwave drives on qubits Q$_i$ with respective Rabi rates $\Omega_i(t)$.
At idle times and for single-qubit operations the coupler frequency $\omega_{c,\text{idle}}/ 2\pi = \SI{5.8}{\giga\hertz}$ is chosen to lie above the qubit frequencies where the effective coupling between the qubits is minimized~\cite{sete_2021a}.
Tuning the coupler frequency close to the qubit frequencies leads to energy shifts and enhanced interaction strengths between the qubits~\cite{Yan2018}, as shown in the energy-level diagram in Fig.~\ref{fig:minimal_eigenergies}(a).
To describe the protocol based on adiabatically modifying the frequency of the tunable coupler, we use the notation $\ket{n_1 n_2 n_3}(\omega_{c})$ for an instantaneous eigenstate of the system within the $(n_1+n_2+n_3)$-excitation manifold which is adiabatically connected to the initial state $\ket{n_1 n_2 n_3}(\omega_{c,\text{idle}}) \approx \ket{0, n_1 n_2 n_3}^0$ with zero excitations in the coupler.
Since we are interested only in the dynamics in the qubit subspace, we diagonalize the Hamiltonian in Eq.~\eqref{eq:3Q_sys_Hamiltonian} and restrict ourselves to the computational qubit states with zero excitations in the coupler.
This results in the Hamiltonian
\begin{equation}
\begin{aligned}
\tilde{H}_\text{comp} & = \sum_{n_1, n_2, n_3\in\{0,1\}} \tilde{\omega}_{n_1 n_2 n_3} \ket{n_1 n_2 n_3}\bra{n_1 n_2 n_3},
\label{eq:comp_Hamiltonian}
\end{aligned}
\end{equation}
where $\tilde{\omega}_{n_1 n_2 n_3}$ are the instantaneous eigenfrequencies of the adiabatic states $\ket{n_1 n_2 n_3}$.
Tuning the coupler frequency close to the qubit frequencies results in avoided crossings and energy shifts of the adiabatic states.
The first relevant avoided crossings occur when the tunable coupler and the qubit with the highest transition frequency (Q$_3$) become resonant ($\omega_c = \omega_3$).
These crossings, denoted by stars ($\star$) in Fig.~\ref{fig:minimal_eigenergies}, occur for all adiabatic states with one excitation in Q$_3$, i.e., $\ket{001},\ket{011},\ket{101}\text{(not shown)}$ and $\ket{111}$.
Lowering the coupler frequency further, leads to a hybridization and an energy shift $\chi_{011}$ on $\ket{011}$ when the second excited state of the coupler $\ket{2,000}^0$ and the state $\ket{1,010}^0$ become resonant ($\omega_c =\omega_2 - \alpha_c$), as denoted by the hexagon ($\hexagon$) in Fig.~\ref{fig:minimal_eigenergies}.
Similarly, the state $\ket{111}$ in the three-excitation manifold is shifted in energy by $\chi_{011}$ due to the $\ket{2,100}^0 \leftrightarrow \ket{1,110}^0$ interaction.
These additional energy shifts introduce a two-body interaction term between qubits Q$_2$ and Q$_3$.
Finally, the avoided crossing of the bare states $\ket{3,000}^0$ and $\ket{2,100}^0$, denoted by a square ($\square$) in Fig.~\ref{fig:minimal_eigenergies},
leads to a hybridization of the adiabatic state $\ket{111}$.
The resulting energy shift $\chi_\text{CCP}$ introduces the targeted three-body interaction for implementing controlled-controlled-phase gates.
We note that a negative coupler anharmonicity is required for the optimal succession of the avoided crossings for the generation of the discussed energy shifts.
The relevant two- and three-qubit energy shifts are determined from the diagonalized Hamiltonian as
\begin{align}
\begin{aligned}
\chi_{011} &= \tilde{\omega}_{011} - (\tilde{\omega}_{001} + \tilde{\omega}_{010}) \\
\chi_{101} &= \tilde{\omega}_{101} - (\tilde{\omega}_{001} + \tilde{\omega}_{100}) \\
\chi_{110} &= \tilde{\omega}_{110} - (\tilde{\omega}_{010} + \tilde{\omega}_{100}) \\
\chi_\text{CCP} &= \tilde{\omega}_{111} - (\tilde{\omega}_{001} + \tilde{\omega}_{010} + \tilde{\omega}_{100}) \\ &\qquad- (\chi_{011} + \chi_{101} + \chi_{110}).
\end{aligned}
\label{eq:frequency_shifts}
\end{align}
Tuning the coupler frequency close to the aforementioned avoided crossings results in large shifts of $\chi_\text{CCP}>\SI{150}{\mega\hertz}$ and $\chi_{011}>\SI{120}{\mega\hertz}$ [see Fig.~\ref{fig:minimal_eigenergies}(b)].
We choose the coupler frequency for gate operation at $\omega_c^{\text{op}}/ 2\pi = \SI{4.5}{\giga\hertz}$ to balance the magnitude of the energy shifts and the population losses due to non-adiabatic effects of the flux pulses, however, the optimal working points for idling and gate operation depend on the specific Hamiltonian parameters.
At the idling position $\omega_{c, \text{idle}}$ only minimal energy shifts, below $\SI{80}{\kilo\hertz}$, arise [see inset of Fig.~\ref{fig:minimal_eigenergies}(b)].
To describe the entangling action of the Hamiltonian $\tilde{H}$, we switch to a frame rotating at the individual qubit frequencies
\begin{equation}
\begin{aligned}
\tilde{H}= \chi_{011} H_{23} + \chi_{101} H_{13} + \chi_{110} H_{12} + \chi_\text{CCP} H_\text{CCP},
\label{eq:cphase_Hamiltonian}
\end{aligned}
\end{equation}
with the two-qubit CPHASE$_{kl}$ terms $H_{kl} = \ket{11}_{kl}\bra{11}_{kl}$,
acting on the qubit subspace Q$_k$ and Q$_l$, and a three-qubit CCPHASE term $H_\text{CCP} = \ket{111}\bra{111}$.
In experiments this frame rotation corresponds to applying single-qubit Z-phase gates~\cite{Mckay2017}.
In this frame, an evolution under $\tilde{H}$ corresponds to a combination of controlled-phase (CPHASE) and controlled-controlled-phase (CCPHASE) gates
\begin{align}
\begin{aligned}
U(\phi_{011},\phi_{101},\phi_{110},\phi_\text{CCP}) =
\exp(- i \int_0^{\tau} \tilde{H}(t) \diff t) \\
\quad\,\, = \text{CPHASE}_{12}(\phi_{110})\times \text{CPHASE}_{13}(\phi_{101})\times \\
\quad\quad\text{CPHASE}_{23}(\phi_{011})\times \text{CCPHASE}(\phi_\text{CCP}),
\end{aligned}
\end{align}
with the entangling phases
\begin{align}
\label{eq:cond_phases}
\phi_j = - \int_0^{\tau} \chi_j \diff t & & \text{for } j\in\{001,101,110,\text{\small CCP}\}.
\end{align}
\section{Multi-qubit gates by Hamiltonian refocusing}\label{sec:hamiltonian-refocusing}
To implement a pure CCPHASE$(\phi_\text{CCP})$ gate without two-qubit CPHASE contributions the conditions \mbox{$\phi_{011}=k\cdot2\pi,\ \phi_{101} = l\cdot2\pi,\ \phi_{110} = m\cdot2\pi$} for Eq.~\eqref{eq:cond_phases} need to be fulfilled with integers $k,l,m$.
However, using only the external flux applied on the coupler as a control parameter the gate duration is bounded from below by the time needed to accumulate at least a phase of $2\pi$ on all states.
Additionally, matching all phases may require complex trajectories or a greater time overhead.
These problems can be resolved by utilizing a refocusing scheme~\cite{Meiboom1958, Haeberlen1968, Brinkmann2016}, where the evolution under $\tilde{H}$ is interleaved with single-qubit $\pi$-pulses to permute the states accumulating the entangling phases.
Effective two- and three-qubit gate Hamiltonians such as
\begin{align}
\label{eq:wanted_eff_ham}
H_\text{eff}^{kl}=\chi_\text{eff}H_{kl} & & \text{and} & & H_\text{eff}^\text{CCP}=\chi_\text{eff}H_\text{CCP}
\end{align}
can then be realized by choosing interaction times such that unwanted phase contributions cancel out, as discussed in the following.
\subsection{Controlled-phase gate}
\begin{figure}
\caption{
Numerical simulation of a CPHASE$_{13}
\label{fig:figure_CPHASE}
\end{figure}
\begin{table}[t]
\centering
\includegraphics[width=0.9\linewidth]{figures/Tab1_eff_ham}
\caption{
Sign of the coefficients of the effective Hamiltonians in Eq.~\eqref{eq:cphase_Hamiltonian} after applying leading and trailing $\pi$-pulses on the respective qubits denoted by an X ($\mathbb{1}$ denotes no pulse).
Each Hamiltonian term $H_{kl}, H_\text{CCP}$ has positive or negative coefficients from any of the conditional shifts $\chi_{011},\chi_{101},\chi_{110}, \chi_{\text{CCP}}$, empty cells denote vanishing coefficients.
The implementation of the proposed CCPHASE gate makes use of the first four permutations in the table (light grey).
}
\label{tab:permutated_shifted_freqs}
\end{table}
To generate a two-qubit CPHASE gate the non-interacting qubit needs to be dynamically decoupled~\mbox{\cite{Hahn1950,Carr1954}} from the rest of the system so that only interactions between the other two qubits remain.
As an example, we consider a CPHASE gate between Q$_1$ and Q$_3$, for which two interaction periods of equal duration $\tau$ are interleaved with two $\pi$-pulses on $Q_2$ ($X_2$) as shown in Fig.~\ref{fig:figure_CPHASE}(a).
The two $\pi$-pulses alter the action of the second interaction Hamiltonian as
\begin{align}
\label{eq:refocusing}
\begin{aligned}
\tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}} = & \,\, X_2\,\tilde{H}\,X_2\\
\cong & - \chi_{011} H_{23} + (\chi_{101}+\chi_\text{CCP}) H_{13} \\
& - \chi_{110} H_{12} - \chi_\text{CCP} H_\text{CCP},
\end{aligned}
\end{align}
where $\cong$ defines an equality that neglects global and single-qubit terms (see Appendix~\ref{app:refocusing-derivation} for further details).
In $\tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}}$ the sign of all components is inverted except for $H_{13}$, which increases by $\chi_\text{CCP}$.
The action of the full CPHASE sequence is then given by
\begin{align}
\begin{aligned}
&X_2 e^{-i\tau\tilde{H}} X_2 e^{-i\tau\tilde{H}}
=e^{-i\tau[\tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}} + \tilde{H}]}
\\&=e^{-i\tau[2\chi_{101}+\chi_\text{CCP}]H_{13}}
=\text{CPHASE}_{13}(\phi_{101}),
\end{aligned}
\end{align}
where we have used the equality $U e^{- i \tilde{H}} U^{-1} = e^{-i U\tilde{H}U^{-1}}$ and that the interaction Hamiltonians are diagonal and, hence, commute with each other.
This is equivalent to evolving for $2\tau$ under the effective Hamiltonian of Eq.~\eqref{eq:wanted_eff_ham} with $\chi_\text{eff}=\chi_{101}+\chi_\text{CCP}/2$ and $\phi_{101}=\tau[2\chi_{101}+\chi_\text{CCP}]$.
To verify the performance of the CPHASE gate we numerically simulate the CPHASE$_{13}(\pi)$ gate resulting in a fidelity of $99.77\%$ at a gate time of approximately \SI{100}{\nano\second} (see Appendix~\ref{app:simulation} for simulation details), assuming that decoherence times are much longer than the gate times.
To tune the coupler from its idle position~$\omega_c^\text{id}=\SI{5.8}{\giga\hertz}$ to the operation point $\omega_c^\text{op}=\SI{4.5}{\giga\hertz}$ we use rectangular pulses with Gaussian edges:
\begin{equation}
\label{eq:half_Gaussian_square_pulse}
\omega_c(t) = \omega_c^\text{id} + (\omega_c^\text{op}-\omega_c^\text{id}) \erf\left(\frac{t}{\tau_R}\right) \erf\left(\frac{\tau-t}{\tau_R}\right),
\end{equation}
where $\tau=\SI{27}{\nano\second}$ is the width of the pulse and $\erf$ is the Gaussian error function with a rise time characterized by $\tau_R=\SI{5}{\nano\second}$ [Fig.~\ref{fig:figure_CPHASE}(b)].
The $\pi$-pulses are driven by a microwave pulse, with a \SI{20}{\nano\second} DRAG-corrected Gaussian envelope~\cite{Motzoi2009}, with fidelities $> \SI{99.96}{\percent}$.
The numerically simulated evolution of the entangling phases is shown in Fig.~\ref{fig:figure_CPHASE}(c).
Initially all entangling phases evolve at a positive rate.
However, after the $\pi$-pulse on Q$_2$ all accumulation rates are inverted except for $\phi_{110}$, resulting in the desired entangling phases of $\phi_{110}=\phi_{011}=\phi_\text{CCP} = 0$ and $\phi_{101} = \pi$.
With this approach we can realize all pairwise two-qubit CPHASE$_{kl}$($\phi_{kl}$) gates by performing interleaved $\pi$-pulses on the qubit not participating in the gate.
The effective Hamiltonians for the different combinations of enclosing $\pi$-pulses are listed in Tab.~\ref{tab:permutated_shifted_freqs}.
\subsection{Controlled-controlled-phase gate}
To implement a CCPHASE gate the refocusing technique can be extended to four interaction periods interleaved with single-qubit $\pi$-pulses on different qubits, as shown in Fig.~\ref{fig:ccphase_gate}(a).
For the observed relation,
\onecolumngrid
$\chi_{101} > \chi_\text{CCP} > \chi_{110}$, we choose the permutations $\mathbb{1}\mathbb{1}\mathbb{1}$, $\mathbb{1}\mathrm{X}\mathbb{1}$, $\mathrm{X}\mathrm{X}\mathbb{1}$ and $\mathbb{1}\mathrm{X}\mathrm{X}$.
We skip repeated single-qubit $\pi$-rotations which appear between permutations and thus apply six single qubit gates in total.
To find the correct duration of each flux pulse we solve the system of equations
\begin{equation}
\label{eq:full_ccphase_lin_equation}
\begin{pmatrix}
\phi_{23} + k_{23} 2\pi \\
\phi_{13} + k_{13} 2\pi \\
\phi_{12} + k_{12} 2\pi \\
\phi_\text{CCP} + k 2\pi
\end{pmatrix}
=
\begin{pmatrix}
\chi_{011} & - \chi_{011} & - \chi_{011} - \chi_\text{CCP} & \chi_{011}
\\
\chi_{101} & \chi_{101} + \chi_\text{CCP} & - \chi_{101} - \chi_\text{CCP} & - \chi_{101} - \chi_\text{CCP}
\\
\chi_{110} & - \chi_{110} & \chi_{110} & - \chi_{110} - \chi_\text{CCP}
\\
\chi_\text{CCP} & - \chi_\text{CCP} & \chi_\text{CCP} & \chi_\text{CCP}
\end{pmatrix}
\begin{pmatrix}
\tau_{\mathbb{1}\mathbb{1}\mathbb{1}} \\ \tau_{\mathbb{1}\mathrm{X}\mathbb{1}} \\ \tau_{\mathrm{X}\mathrm{X}\mathbb{1}} \\ \tau_{\mathbb{1}\mathrm{X}\mathrm{X}}
\end{pmatrix}
\end{equation}
\twocolumngrid
\noindent derived from the Hamiltonian coefficients in Table~\ref{tab:permutated_shifted_freqs},
assuming a linear increase of entangling phases with the duration of the flux pulses (see Appendix~\ref{app:pulse_par_opt}).
As for the two-qubit CPHASE gate above, we simulate the coherent dynamics for a $\text{CCPHASE}(\pi)$ gate, with pulse durations (55,25,65,20)~\si{\nano\second} and plot the evolution of the entangling phases in Fig.~\ref{fig:ccphase_gate}(b).
With these timings, we obtain final entangling phases of $\phi_{110} = 0, \phi_{011} = \phi_{101} = -2\pi$ and $\phi_\text{CCP} = 3\pi$ and a simulated fidelity of \SI{99.58}{\percent} in \SI{245}{\nano\second} including the single qubit gates [see Fig.~\ref{fig:ccphase_gate}(c)]. Leakage caused by imperfectly adiabatic pulses as the main coherent error contribution as discussed in Section~\ref{sec:error_contribution}.
\begin{figure}
\caption{
Numerical simulation of a CCPHASE$(\pi)$ gate. (a)
Four independent adiabatic pulses modulate the frequency $\omega_c$ of the coupler, with durations $\tau_{\mathbb{1}
\label{fig:ccphase_gate}
\end{figure}
\begin{figure}
\caption{
Continuous CCPHASE($\phi_\text{CCP}
\label{fig:gate_times_param_gate}
\end{figure}
\subsection{Generalized controlled-controlled-phase gate}
Applying the same refocusing scheme, Eq.~\eqref{eq:full_ccphase_lin_equation} can be used to determine pulse durations that result in arbitrary two- and three-qubit entangling phases.
This method therefore provides full control over all entangling phases and directly allows for the implementation of the generalized three-qubit controlled-phase gate
\begin{align}
\begin{aligned}
U_{3\text{Q}\phi} & (\phi_{12},\phi_{13},\phi_{23},\phi_\text{CCP}) = \\
= & \text{CPHASE}_{12}(\phi_{12})\times \text{CPHASE}_{13}(\phi_{13})\times \\
& \text{CPHASE}_{23}(\phi_{23})\times \text{CCPHASE}(\phi_\text{CCP}),
\end{aligned}
\end{align}
which corresponds to the simultaneous application of pairwise two- and three-qubit controlled-phase gates.
In particular, we can implement a CCPHASE gate with an arbitrary angle $\phi_\text{CCP}$.
We numerically evaluate the gate fidelities and pulse durations for $\phi_\text{CCP}$ continuously varying between $0$ and $2\pi$, as shown in Fig.~\ref{fig:gate_times_param_gate}.
We find that all phase combinations can be realized with total gate lengths between \SI{195}{\nano\second} and \SI{270}{\nano\second}.
Without including decoherence, the fidelity for all implementation lies between $99.1\%-99.6\%$, with oscillations due to periodic leakage effects (see Appendix~\ref{app:pulse_par_opt} for more details).
Similarly, $U_{3\text{Q}\phi}$ gates with arbitrary settings of both the two- and three-qubit phases $\phi_{ij}$ and $\phi_\text{CCP}$ result in gate times below \SI{300}{\nano\second} and gate fidelities above $99\%$ (not shown).
\section{Error Contributions}\label{sec:error_contribution}
To assess the expected performance of the gate operations, we evaluate the error contributions from coherent errors and from decoherence on the CCPHASE($\pi$) gate.
The coherent errors of \SI{0.42}{\percent} are dominated by leakage, i.e., all population losses $\Lambda_s = \sum_{f \neq i} {\bra{f} U \ket{i}}^2$ from computational states $\ket{i}\in\mathcal{C}$ to states outside the computational subspace $\ket{f}\in s=\bar{\mathcal{C}}$ [blue bars in Fig.~\ref{fig:ccphase_gate}(d)], which are caused by imperfect adiabatic pulses.
Transitions to other computational states ${\ket{f}\in s=\mathcal{C}\setminus \{\ket{i}\}}$ [orange bars in Fig.~\ref{fig:ccphase_gate}(d)] caused by imperfections in single-qubit gates are another coherent error source leading to off-diagonal elements in the propagator [Fig.~\ref{fig:ccphase_gate}(c)].
For a single flux pulse most of the losses occur from the states
$\ket{111}$ and $\ket{011}$ (see Appendix~\ref{app:pulse_par_opt}).
However, permuting the states with the interleaved single-qubit pulses distributes the leakage over all computational states.
Note that the coherent errors scale with the number of flux pulses.
For the CPHASE gate, which uses two flux pulses, coherent errors amount to \SI{0.23}{\percent} roughly a factor two smaller than CCPHASE gate, which uses four.
In addition to leakage, decoherence of both qubits and the coupler will limit the achievable gate fidelities.
In particular, because of the transmon-type tunable coupler charge noise may induce errors due to the hybridization in higher-excitation manifolds.
We therefore simulate the open-system dynamics of the system by solving the time-evolution under the Lindblad master equation
\begin{align}
\begin{aligned}
\dot{\rho}&=-i[H, \hat{\rho}(t)]+\sum_{k} \left(\hat{L}_{k} \hat{\rho} \hat{L}_{k}^{\dagger}-\frac{1}{2}\left\{\hat{L}_{k}^{\dagger} \hat{L}_{k}, \hat{\rho(t)}\right\}\right) \label{eq:Lindblad_Master_equation}
\end{aligned}
\end{align}
with the Hamiltonian $H$, the density matrix $\rho$ and the collapse operators $\hat{L}_k$ accounting for relaxation and dephasing.
We assume relaxation rates that are linearly increasing with excitation number
$\Gamma_1^{(m+1,m)} = \frac{m}{T_1}$
where $\Gamma_1^{(j,m)}$ is the decay rate from state $\ket{j}$ to $\ket{m}$, and $T_1$ is the relaxation time of the first excited state~\cite{Koch2007, Peterer2015}.
The pure dephasing rate $\Gamma_{\phi}^{(m)} = \Gamma_{\phi,c}^{(m)} + \Gamma_{\phi,e}^{(m)}$ between states $\ket{m}$ and $\ket{m+1}$ is modelled as the sum of a constant term $\Gamma_{\phi,c}^{(m)} = \frac{1}{T_{\phi}}$
and an energy-level-dependent term $\Gamma_{\phi,e}^{(m)}$ incorporating charge noise~\cite{Koch2007, Burnett2019, Rol2019}.
We account for a charge noise dephasing rate $\Gamma_{\phi, n}^{(m)} = \pi A_n |\epsilon_m|$ given the charge noise strength $A_n$ and the approximated charge dispersion amplitude~\cite{Koch2007}
\begin{align}
\epsilon_{m} \simeq(-1)^{m} E_{C} \frac{2^{4 m+5}}{m !} \sqrt{\frac{2}{\pi}}\left(\frac{E_{J}}{2 E_{C}}\right)^{\frac{m}{2}+\frac{3}{4}} e^{-\sqrt{8 E_{j} / E_{C}}}. \label{eq:charge_dispersion}
\end{align}
We assume equal decoherence rates on all qubits and couplers. Although, flux-tunable transmons tend to have higher dephasing rates due to their sensitivity to flux noise, this can be mitigated by introducing a coupler with an asymmetric SQUID \cite{Koch2007} and choosing idling and operating points to be at the flux sweetspots.
We determine the individual contributions of each error channel by comparing the simulations with the respective error channel turned on and off.
We find that the errors are independent of each other and can be linearly added.
Using typical values of $T_1 = \SI{84}{\micro\second}$, $T_\phi=\SI{124}{\micro\second}$ and $A_n=\SI{6e-5}{}\,e$ (see e.g.\ Peterer et al.~\cite{Peterer2015}), we obtain a total CCPHASE$(\pi)$ gate infidelity of \SI{1.30}{\percent}, as compared to only coherent errors of \SI{0.42}{\percent}, as shown in Fig~\ref{fig:CCPHASE_decoherences}(a).
Keeping the noise amplitude fixed we evaluate infidelities for varying values of $T_1$ and $T_\phi$, assuming that as the coherence times improve the ratio between them will remain approximately $T_1=T_\phi/2$.
For $T_1=\SI{50}{\micro\second}$ we find that decoherence is the dominant contribution, however, for state-of-the-art coherence times $T_1 > \SI{100}{\micro\second}$~\cite{Place2021,Wang2022} decoherence and coherent errors become comparable, with total errors of approximately $\SI{1}{\percent}$ and below.
The error caused by charge noise with a typical strength $A_n=\SI{6e-5}{}\,e$~\cite{Christensen2019, Peterer2015} is expected to be below $\SI{0.1}{\percent}$, with the error scaling roughly linearly with the charge noise amplitude, as shown in Fig.~\ref{fig:CCPHASE_decoherences}(b).
\begin{figure}
\caption{Effect of coherent errors and decoherence on CCPHASE gate fidelities.
(a) The infidelity of the numerically simulated CCPHASE gate for different relaxation times $T_1$ and pure dephasing times $T_\phi$.
The first bar shows the results of a model according to~\cite{Peterer2015}
\label{fig:CCPHASE_decoherences}
\end{figure}
\section{Discussion and Outlook}\label{sec:discussion-and-outlook}
In summary, we propose a system of three qubits coupled via a shared tunable coupler and develop a pulse scheme that implements generalized three-qubit controlled-phase gates.
Adiabatic flux pulses allow us to tune the two- and three-body interaction strengths.
The resulting entangling phases are controlled by interleaving interaction periods with single-qubit refocusing pulses.
With this method, we show in numerical simulations that three-qubit controlled-controlled-phase gates can be realized in less than \SI{300}{ns} with fidelities above \SI{99}{\percent} for all desired entangling phases when the effects of decoherence can be ignored.
Taking realistic values for qubit and coupler coherence times into account we expect an added gate error below \SI{1}{\percent}.
Recent implementations of three-qubit gates include cross-resonance-type iToffoli gates~\cite{Kim2022} with fidelities of \SI{98.3}{\percent} in 353ns, simultaneous parametric drive gates~\cite{Warren2022} with fidelities of \SI{97.9}{\percent} in 250ns, M{\o}lmer-S{\o}rensen-type gates~\cite{Lu2022} with fidelities of \SI{90.5}{\percent} in 217ns, and decomposed CCPHASE gates~\cite{Hill2021} with fidelities of \SI{87.1}{\percent} in 402ns.
By directly utilizing the strong qubit-coupler interactions and a flexible pulse scheme, the proposed CCPHASE gate has the potential to improve both speed and fidelity as compared to these recent realizations of three-qubit gates on superconducting qubits.
Moreover, the studied three-qubit coupler refocusing scheme allows for the implementation of pairwise controlled-phase gates with adjustable phases, thus providing greater connectivity and flexibility in comparison to two-qubit couplers.
The proposed protocol can be modified in a number of ways to account for different experimental conditions.
In the presence of strong charge noise the operating point of the coupler $\omega_c^\text{op}$ could be lowered, thus reducing charge noise sensitivity at the cost of longer gate times.
Furthermore, the refocusing scheme can be adapted to result in a net-zero total applied flux, a technique known to reduce sensitivity to long-term correlated flux noise if present~\cite{Rol2019}.
To reduce leakage errors, flux pulses could be individually optimized to harness destructive interference between multiple transitions~\cite{Shevchenko2010} and ensure local adiabaticity~\cite{Roland2002}.
In general, a refocusing scheme similar to that presented can be applied in other superconducting qubit architectures.
For example, in systems with two-qubit couplers and simultaneous interactions~\cite{Baker2021} it could provide greater control of all interaction terms and shorter gate durations.
Moreover, the number of qubits connected to the coupler could be increased further, allowing for strong and controllable many-body Hamiltonians with application in variational algorithms and Hamiltonian simulations.
Finally, in the context of quantum applications, the investigated architecture is a promising candidate for the implementation of variational algorithms designed to solve optimization problems.
In particular, the problem Hamiltonian for MAX-3-SAT problems can be directly implemented by the generalized controlled-phase gate, providing an improvement in speed and accuracy over a gate decomposition into single- and two-qubit gates.
\section{Acknowledgments}\label{sec:acknowledgments}
We thank Ivan Tsitsilin, Gerhard Huber and Franz Haslbeck for insightful discussions.
We acknowledge funding from the European Commission Marie Curie
ETN project QuSCo (Grant No. 765267), from the German Federal Ministry of Education and Research via the funding program “Quantum Technologies-From Basic Research to the Market” (project GeQCoS) under Contract No. 13N15680, and from the European FET OPEN project Quromorphic (Grant No. 828826).
We also acknowledge funding by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Project No. \mbox{FI2549/1-1}.
We further acknowledge support by the Leibniz Supercomputing Centre, providing computing time on its \mbox{Linux-Cluster}.
\begin{appendices}
\section{\texorpdfstring{\\*
}~Simulations}\label{app:simulation}
The density matrix dynamics are simulated with the q-optimize~\cite{Wittler2021} package, using time-ordered piecewise exponentiation of Hamiltonian $H_\text{sys}$ in Eq.~\eqref{eq:3Q_sys_Hamiltonian}.
The dynamics are sampled at a rate of \SI{30}{GS\per\second}, while the control signals are sampled at \SI{2.4}{GS\per\second} with additional Gaussian filtering according to the specifications of a typically used arbitrary waveform generator, such as the HDAWG from Zurich Instruments~\cite{Zurichinstrumentsag2022}.
We restrict the energy of the bare states to $E/(2\pi)$= $\omega_{th} / 2\pi=\SI{16}{\giga\hertz}$ and therefore take only the lowest-lying energy levels of the qubits (4,4,3 for Q$_1$,Q$_2$,Q$_3$) and the tunable coupler TC (5) into account.
We further truncate the Hilbert space to allow only for up to four excitations in the system.
\section{\texorpdfstring{\\*
}~Interacting energy levels}\label{app:full_energies}
\begin{figure*}
\caption{Energy level diagram of the three-qubit system.
Frequencies with respect to the ground state $\ket{0,000}
\label{fig:full_energies}
\end{figure*}
While in Section~\ref{sec:the-system} we discussed only the avoided crossings dominantly contributing to the energy shifts of states $\ket{011}$ and $\ket{111}$, here we extend the discussion to all crossings affecting the adiabatic computational states $\ket{n_1 n_2 n_3}, n_k \in \{0,1\}$, as shown in Fig.~\ref{fig:full_energies}.
The energy shift for the adiabatic state $\ket{111}$ occurs due to hybridization first with the $\ket{1,110}^0$ state, then with $\ket{2,100}^0$ and finally with $\ket{3,000}^0$ [star, circle, and square, respectively, in Fig.~\ref{fig:full_energies}(c)].
Likewise, the energy shift for the adiabatic state $\ket{011}$ occurs due to hybridization first with the $\ket{1,010}^0$ state and then with $\ket{2,000}^0$ [top star and circle, respectively, in Fig.~\ref{fig:full_energies}(b)].
In both situations, lowering the coupler frequency $\omega_c$ past the last transition should further increase the energy shifts.
However, this behaviour is inhibited by hybridization with other bare states, such as $\ket{0,101}^0$ for $\chi_{110}$ and $\ket{0,201}^0, \ket{0,120}^0$ for $\chi_\text{CCP}$.
At a higher coupler frequency, instead, two further avoided crossings affect the $\ket{111}$ and $\ket{011}$ states when tuning the coupler into the interaction area: $\ket{1,200}^0\leftrightarrow\ket{0,111}^0$ and $\ket{1,100}^0\leftrightarrow\ket{0,011}^0$, respectively.
These avoided crossings have a gap of only a few megahertz, as both are caused by four-photon transitions, and therefore need to be passed diabatically, i.e.\ with a fast passage.
For the choice of circuit parameters it is thus essential to ensure that these four-photon crossings are located at a tunable coupler frequency $\omega_c$ above the idling point and separated from the chosen interaction area, such that the requirements on adiabaticity as well as diabaticity can both be fulfilled.
Similarly to $\ket{011}$, the adiabatic state $\ket{101}$ would experience a conditional energy shift $\chi_{101}$ due to hybridization first with the $\ket{1,100}^0$ state [lower star in Fig.~\ref{fig:full_energies}(b)] and then with $\ket{2,000}^0$ (not shown), and the adiabatic state $\ket{110}$ would experience a conditional energy shift $\chi_{110}$ due to hybridization first with the $\ket{1,100}^0$ state [triangle in Fig.~\ref{fig:full_energies}(b)] and then with $\ket{2,000}^0$ (not shown).
However, the interaction with $\ket{2,000}^0$ occurs at a coupler frequency $\omega_c=\omega_1 - \alpha_c$ below all qubit frequencies, making it difficult to reach areas with large energy shifts without causing leakage in at least some of the computational states.
Even then, for the adiabatic state $\ket{101}$ a previous hybridization with the $\ket{0,020}^0$ state suppresses the energy shift $\chi_{101}$ and introduces the risk of leakage.
Therefore, large energy shifts are only achievable on states $\ket{011}$ and $\ket{111}$.
\section{\texorpdfstring{\\*
}~Method of refocusing}\label{app:refocusing-derivation}
By interleaving $\pi$-pulses in the conditional phase accumulation, unwanted phase terms can be cancelled to realize CPHASE and CCPHASE gates.
Here we consider the interaction Hamiltonian (see also Eq.~\eqref{eq:cphase_Hamiltonian} in the main text)
\begin{equation}
\tilde{H} = \chi_{011} H_{23} + \chi_{101} H_{13} + \chi_{110} H_{12} + \chi_\text{CCP} H_\text{CCP}.
\end{equation}
Applying a leading and a trailing $\pi$-X-pulse on Q$_2$ ($X_2$) results in $\tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}}$, which can be used in conjunction with $\tilde{H}$ to realize a controlled-phase gate between Q$_1$ and Q$_3$.
A straightforward extension of this scheme to other qubit combinations results in the effective Hamiltonians listed in Tab.~\ref{tab:permutated_shifted_freqs}.
The effective Hamiltonian $\tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}}$ is given by
\begin{align}
X_2\, e^{- i \tilde{H} \tau}\, X_2 = e^{-i X_2\,\tilde{H}\,X_2 \tau} = e^{-i \tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}} \tau},
\end{align}
where the first equality is given by $X_2 X_2 = \mathbb{1}$.
The transformed Hamiltonian terms are then given by
\begin{align}
\begin{aligned}
{H}
^{\mathbb{1}\mathrm{X}\mathbb{1}}_{13} = X_2 H_{13} X_2 &\cong H_{13}, \\
{H}^{\mathbb{1}\mathrm{X}\mathbb{1}}_{12} = X_2 H_{12} X_2 &\cong -H_{12}, \\
{H}^{\mathbb{1}\mathrm{X}\mathbb{1}}_{23} = X_2 H_{23} X_2 &\cong -H_{23}, \\
{H}^{\mathbb{1}\mathrm{X}\mathbb{1}}_\text{CCP} = X_2 H_\text{CCP} X_2 &\cong -H_\text{CCP}+H_{13}
\end{aligned}
\end{align}
using the relation $X_{j}Z_{j}X_{j} = -Z_j$ and the Hamiltonian terms
\begin{align}
\label{eq:ham_terms}
\begin{split}
H_{kl} &= \ket{11}_{kl}\bra{11}_{kl} \\
&= \left(\mathbb{1}_{k}\mathbb{1}_{l}-\mathbb{1}_{k}Z_{l}+Z_{k}\mathbb{1}_{l}+Z_{k}Z_{l}\right)/4 \\
&\cong Z_{k}Z_{l}/4 \\
H_\text{CCP}&=\ket{111}\bra{111}\\ &=\big(\mathbb{1}\mathbb{1}\mathbb{1}-\mathbb{1}\mathbb{1}Z-\mathbb{1}Z\mathbb{1}-Z\mathbb{1}\mathbb{1}\\&\hspace{0.8cm}+ZZ\mathbb{1}+Z\mathbb{1}Z+\mathbb{1}ZZ-ZZZ\big)/8 \\
&\cong \left(ZZ\mathbb{1}+Z\mathbb{1}Z+\mathbb{1}ZZ-ZZZ\right)/8,
\end{split}
\end{align}
written in terms of the Pauli operators $I$ and $Z$.
Here we neglect global and single-qubit terms, denoted by $\cong$ in Eqs.~\eqref{eq:ham_terms}.
The resulting effective Hamiltonian is then given by
\begin{align}
\begin{aligned}
\tilde{H}^{\mathbb{1}\mathrm{X}\mathbb{1}} \cong
&- \chi_{011} H_{23} + (\chi_{101}+\chi_\text{CCP}) H_{13} \\
&- \chi_{110} H_{12} - \chi_\text{CCP} H_\text{CCP}.
\end{aligned}
\end{align}
\begin{figure}
\caption{
\label{fig:phase_leak_amp_t_plot}
\label{fig:phase_leak_amp_t_plot}
\end{figure}
\begin{figure}
\caption{
Frequency shifts and leakage as a function of coupling strength and anharmonicity $\alpha_c$. (a) Two-qubit energy shifts $\chi_{n_1 n_2 n_3}
\label{fig:simple_param_dep_Toff}
\end{figure}
\section{\texorpdfstring{\\*
}~Optimization of Pulse Parameters}
\label{app:pulse_par_opt}
To maximize the gate fidelity we investigate the effect of control parameters on the system dynamics, specifically, the accumulated phase during a flux pulse and the associated leakage.
The flat-top pulse tuning the coupler from the idling to the interaction region is described by Eq.~\eqref{eq:half_Gaussian_square_pulse} with a rise-fall parameter $\tau_R=\SI{5}{\nano\second}$, a pulse length $\tau$ and the operation point $\omega_c^\text{op}$.
Varying the pulse length $\tau$ linearly increases the accumulated phases $\phi_j$ [Eq.~\eqref{eq:cond_phases}], as shown in Fig.~\ref{fig:phase_leak_amp_t_plot}(a).
Fitting this phase with a linear function provides the effective energy shifts $\chi_j$ used in Eq.~\eqref{eq:full_ccphase_lin_equation}, plus an offset given by the leading and trailing ramps.
The population losses $\Lambda_{n_{1}n_{2}n_{3}}$ from the adiabatic states $\ket{n_{1}n_{2}n_{3}}$ remain approximately constant as a function of the pulse duration, with oscillations due to Landau-Zener-St\"{u}ckelberg interference~\cite{Shevchenko2010}, suggesting that leakage is introduced only during the ramps of the pulse and not while in the interaction region [see Fig.~\ref{fig:phase_leak_amp_t_plot}(b)].
With this pulse the slope of the ramp and thus its adiabaticity can be tuned by the rise-fall parameter $\tau_R$. Individual pulses could be optimized to significantly suppress leakage by enforcing local adiabaticity, as well as utilizing destructive Landau-Zener-St\"{u}ckelberg interference~\cite{Rol2019, Negirneac2021}.
Varying, instead, the operation point $\omega_c^\text{op}$ for a pulse with fixed duration $\tau = \SI{55}{\nano\second}$ will affect the magnitudes of the energy shifts so that lower $\omega_c^\text{op}$ leads to larger accumulated phases, as shown in Fig.~\ref{fig:phase_leak_amp_t_plot}(c).
When choosing $\omega_c^\text{op}$ close to the four-photon avoided crossings $\ket{1,200}^0\leftrightarrow\ket{0,111}^0$ and $\ket{1,100}^0\leftrightarrow\ket{0,011}^0$ the involved states experience strong population losses [see Fig.~\ref{fig:phase_leak_amp_t_plot}(d)].
Instead, an operation point detuned from these crossings results in an improved diabatic passage and thus reduced leakage.
Indeed, the results presented in the main text use an operation point near Q$_3$ ($\omega_c^\text{op} \approx \omega_3 = \SI{4.5}{\giga\hertz}$) where we find a trade-off between low leakage and significant amounts of accumulated phase.
Lowering the coupler frequency below this point leads to very strong hybridizations between states with excitations in the coupler and states with excitations in Q$_3$, which would experience more leakage.
\section{\texorpdfstring{\\*
}~Optimization of Design Parameters}
\label{app:des_par_opt}
In optimizing design parameters we aim to maximize the strength of the energy shifts, which determines the gate speed, and minimize the amount of flux pulse leakage, which represents the dominant contribution of coherent errors.
We limit our investigation to the coupling strengths $g_{ic}$ between qubits and coupler and the anharmonicity $\alpha_c$ of the tunable coupler, as these will determine the width and position of the avoided crossings relevant for the gate [see Appendix~\ref{app:full_energies}].
We find that the conditional energy shifts $\chi_j$, as determined by Hamiltonian diagonalization, increase with increasing coupling strengths $g_{ic}$ due to a greater energy gap of the avoided crossing [see Fig.~\ref{fig:simple_param_dep_Toff}(a)].
However, numerical simulation of the total final leakage $\Lambda=\Sigma_{n_1,n_2,n_3}\Lambda_{n_{1}n_{2}n_{3}}$ as a function of the coupling strengths $g_{ic}$ exhibits nonmonotonic behaviour, with oscillations due to Landau-Zener-St\"{u}ckelberg interference~\cite{Shevchenko2010}, and a stable minimum around $g_{ic} \sim \SI{120}{\mega\hertz}$ [see Fig.~\ref{fig:simple_param_dep_Toff}(b)].
For smaller coupling strengths $g_{ic}$ the avoided crossings with excited coupler states become too narrow to be passed adiabatically and for larger coupling strengths $g_{ic}$ the avoided crossings that should be passed diabatically become too wide to do so.
Increasing the magnitude of the anharmonicity of the coupler $\alpha_c$
brings the three main avoided crossings introducing energy shifts closer together.
Therefore, the energy shifts at an operation point, here $\omega_c^\text{op} \approx \SI{4.5}{\giga\hertz}$ increase as a function of $-\alpha_c$, as shown in Fig.~\ref{fig:simple_param_dep_Toff}(c).
The stronger energy shifts come at the cost of more simultaneous hybridizations and frequency crowding, which increase the amount of leakage [Fig.~\ref{fig:simple_param_dep_Toff}(d)].
Nonetheless, the introduced leakage is limited to approximately $10^{-3}$ and the choice of anharmonicity will be mostly guided by charge noise considerations for transmon qubits.
\end{appendices}
\end{document} |
\begin{document}
\title{Quantum bouncer with quadratic dissipation}
\author{Gabriel Gonz\'alez}\email{ggonzalez@physics.ucf.edu}
\address{NanoScience Technology Center, University of Central
Florida, Orlando, FL 32826, USA}
\address{Department of Physics, University of Central
Florida, Orlando, FL 32816-2385, USA}
\pacs{03.65.$-$w, 03.65.Sq}
\keywords{Quantum Bouncer, Dissipative Systems, Canonical Quantization}
\begin{abstract}
The energy loss due to a quadratic velocity dependent force on a quantum particle bouncing on a perfectly reflecting surface is obtained for a full cycle of motion. We approach this problem by means of a new effective phenomenological Hamiltonian which corresponds to the actual energy of the system and obtained the correction to the eigenvalues of the energy in first order quantum perturbation theory for the case of weak dissipation. \\ \\
{\it Resumen} : La p\'erdida de energ{\'i}a debido a una fuerza proporcional al cuadrado de la velocidad se obtiene para el movimiento de una part{\'i}cula en el campo gravitacional uniforme. Se propone un nuevo Hamiltoniano efectivo para obtener las correciones a los eigenvalores utilizando la teor{\'i}a de perturbaciones para el caso de disipaci\'on d\'ebil.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:introduction}
Recently there has been a lot of attention to the experimental realization of gravitationally bound quantum systems \cite{obs,obs1}. Quantum states in the Earth's gravitational field have been observed with ultra-cold neutrons falling under gravity \cite{obs2}. The toy model known as the {\it quantum bouncer} which describes a quantum particle bouncing in a linear gravitational field has been used to compare the theoretical calculation with the experimental results. In all these experiments they have found energy losses due to different processes \cite{obs3}. One way of simulating this energy loss is by adding an additional external
velocity-dependent force acting on the conservative system
and transforming it into a non conservative system. The resulting classical dissipative
system thus contains this phenomenological
velocity-dependent force.
The classical Hamiltonian is used as a basis for the so-called canonical quantization by obtaining the corresponding Hamiltonian operator.
For dissipative systems, i.e. systems where mechanical energy is lost due to frictional forces, difficulties arise in defining a Hamiltonian function \cite{GG,GG1,GG2}. Although formal Lagrangian functions yielding the correct equations of motion can always be given for one dimensional non conservative systems, one can not always find the corresponding Hamiltonian, and even if the corresponding Hamiltonian exists we find problems in their physical interpretation. This becomes even more obvious when these Hamiltonians are quantized in the usual canonical way \cite{GG3}.\\
In this article we propose a new effective phenomelogical Hamiltonian for the motion of a particle in a uniform gravitational field and under a frictional force which is proportional to the square of the velocity. This new Hamiltonian allows a physical interpretation in terms of the energy of the system. Using this Hamiltonian we obtain the energy loss for a full cycle of motion for the quantum bouncer with quadratic dissipation by means of canonical quantization and perturbation theory.
\section{Model Hamiltonian}
\label{sec:model}
Suppose we drop a particle of mass $m$ from a distance $d$ above the surface of the Earth and we consider that during its motion there is a frictional force which is proportional to the square of the particle's velocity. The equation of motion which describes the dynamics of the particle is given by
\begin{equation}
m\frac{dv}{dt}=-mg-\gamma v |v|=
\left\{ \begin{array}{ll}
-mg-\gamma v^2 & \mbox{if $v>0$}\\
-mg+\gamma v^2 & \mbox{if $v<0$},
\end{array} \right.
\label{eq1}
\end{equation}
where $\gamma>0$ is the dissipation parameter. One can verify that the Hamiltonian for Eq. (\ref{eq1}) is given by
\begin{equation}
H_{\downarrow}=\frac{p^2}{2m}e^{2\gamma x/m}-\frac{m^2 g}{2\gamma}[e^{-2\gamma x/m}-1],
\label{eq2a}
\end{equation}
\begin{equation}
H_{\uparrow}=\frac{p^2}{2m}e^{-2\gamma x/m}+\frac{m^2 g}{2\gamma}[e^{2\gamma x/m}-1],
\label{eq2b}
\end{equation}
where $H_{\downarrow}$ and $H_{\uparrow}$ represents the Hamiltonian for the case when $v<0$ and $v>0$ respectively.
The canonical quantization of $H_{\downarrow}$ and $H_{\uparrow}$ has been obtained and studied by several authors \cite{obs4, obs5, obs6, obs7}, but a number of well known difficulties arise when one tries to interpret the results using this type of Hamiltonian in the quantum regime \cite{obs8, obs9}, in particular we see that $H_{\downarrow}$ and $H_{\uparrow}$ can be obtained from each other by making the substitution $\gamma\rightarrow-\gamma$, due to this symmetry and for the case of weak dissipation, the correction to the eigenvalues for a full cycle cancels out for odd powers in the dissipation parameter and only even powers of the dissipation parameter remain by using quantum perturbation theory. \\
The main goal of this article is to find a new Hamiltonian that describes Eq. (\ref{eq1}) and calculate the change in energy for a full cycle for the case of weak dissipation using quantum perturbation theory. To do this we are going to use the fact that the dynamics of Eq. (\ref{eq1}) can be completely determined in the classical sense and allows us to express the square of the velocity in terms of the particle's position
\begin{equation}
v^{2}_{\downarrow}(x)=\frac{mg(1-e^{-2\gamma(d-x)/m})}{\gamma},
\label{eq3a}
\end{equation}
\begin{equation}
v^{2}_{\uparrow}(x)=\frac{mg(2e^{-2\gamma x/m}-e^{-2\gamma(d+x)/m}-1)}{\gamma},
\label{eq3b}
\end{equation}
where we have taken into account that $v(d)=0$ and that the particle undergoes a perfectly elastic collision when it bounces on the surface of the Earth. One can easily see that when the dissipation parameter goes to zero, i.e. $\gamma \rightarrow 0$, we obtain the usual kinematic expressions for the square of the velocity for a particle in a uniform gravitational field. \\ Plugging Eq. (\ref{eq3a}) and Eq. (\ref{eq3b}) into Eq. (\ref{eq1}) we have
\begin{equation}
m\frac{dv_{\uparrow}}{dt}=-mg(2e^{-2\gamma x/m}-e^{-2\gamma(d+x)/m}),
\label{eq4a}
\end{equation}
\begin{equation}
m\frac{dv_{\downarrow}}{dt}=-mge^{-2\gamma(d-x)/m}.
\label{eq4b}
\end{equation}
In this way we can construct the Hamiltonian for Eq. (\ref{eq4a}) and Eq. (\ref{eq4b}) in the usual way which are given by
\begin{equation}
{\cal H}_{\downarrow}=\frac{p^2}{2m}+\frac{m^2 g}{2\gamma}e^{-2\gamma d/m}\left(e^{2\gamma x/m}-1\right),
\label{eq5a}
\end{equation}
\begin{equation}
{\cal H}_{\uparrow}=\frac{p^2}{2m}+\frac{m^2 g}{2\gamma}\left(e^{-2\gamma d/m}-2\right)\left(e^{-2\gamma x/m}-1\right),
\label{eq5b}
\end{equation}
where we have taken into account that one must obtain the usual Hamiltonian when the dissipation parameter goes to zero. \\
Using Eq. (\ref{eq5a}) and Eq. (\ref{eq5b}) we can write an effective Hamiltonian in the following form
\begin{equation}
H_{eff}=\frac{{\cal H}_{\uparrow}+{\cal H}_{\downarrow}}{2}+\frac{p}{|p|}\left(\frac{{\cal H}_{\uparrow}-{\cal H}_{\downarrow}}{2}\right).
\label{EH}
\end{equation}
\section{Canonical Quantization}
\label{sec:CQ}
To see the effects of dissipation in the eigenvalues of the quantum bouncer we are going to consider the case when we have weak dissipation such that ${\cal H}_{\uparrow}\approx {\cal H}_{\downarrow}$, for this case we can neglect the second term of Eq. (\ref{EH}) since it cancels during a full cycle. Expanding Eq. (\ref{eq5a}) and Eq.~(\ref{eq5b}) in a Taylor series in the following way
\begin{equation}
{\cal H}_{\downarrow}=\frac{p^2}{2m}+A\left(x+\frac{2\gamma}{m}x^2+\cdots \right),
\label{eq6a}
\end{equation}
\begin{equation}
{\cal H}_{\uparrow}=\frac{p^2}{2m}-B\left(-x+ \frac{2\gamma}{m}x^2- \cdots\right),
\label{eq6b}
\end{equation}
where $A=mge^{-2\gamma d/m}$ and $B=mg(2-e^{-2\gamma d/m})$, and keeping only the first two terms we end up with the following effective Hamiltonian
\begin{equation}
H_{eff}=\frac{p^2}{2m}+\left(\frac{A+B}{2}\right)x+\left(\frac{A-B}{2}\right)\left(\frac{2\gamma x^2}{m}\right).
\label{EH1}
\end{equation}
Treating the last term in Eq. (\ref{EH1}) as a perturbation we can estimate the correction to
the energy due to dissipation by using quantum perturbation theory where the unperturbed Hamiltonian is given by \begin{equation}
\hat H_{0}=\frac{\hat{p}^2}{2m}+mg\hat{x}.
\label{eq7}
\end{equation}
It is very well known that the normalized eigenfunctions for Eq. (\ref{eq7}) are given by Airy functions and its first derivative evaluated at its $n$th zero \cite{obs10}
\begin{equation}
\psi_{n}^{(0)}(z)=\frac{Ai(z-z_{n})}{|Ai'(-z_{n})|},
\label{eq8}
\end{equation}
where $z=x/\ell_{g}$ and $z_{n}=(E_{n}^{(0)}/mg\ell_{g})$ are defined in terms of the {\it gravitational length} $\ell_{g}=(\hbar^2/2m^2 g)^{1/3}$, respectively. \\
Using the above results we can determine the correction to the eigenvalues of the energy as
\begin{equation}
\delta E_{n}^{(1)}=-2\gamma g\ell_{g}^2(1-e^{-2\gamma d/m})\langle \psi_{n}^{(0)}|z^2|\psi_{n}^{(0)}\rangle,
\label{eq9}
\end{equation}
and using the fact that \cite{obs10}
\begin{equation}
\langle \psi_{n}^{(0)}|z^2|\psi_{n}^{(0)}\rangle=\frac{8}{15}z_{n}^2,
\label{eq10}
\end{equation}
we have the following approximate expression for the energy levels of the quantum bouncer with quadratic dissipation
\begin{equation}
E_{n}=mg\ell_{g}z_{n}-\frac{16}{15}\gamma g\ell_{g}^2(1-e^{-2\gamma d/m})z_{n}^2,
\label{eq11}
\end{equation}
where $\delta E_{n}^{(1)}<0$ as one would expect from the dissipation in the system. In Fig. (\ref{Fig1}) we show the graph for the energy loss for a neutron in a gravitational field. From the figure one can see that Eq. (\ref{eq11}) is only valid for the first quantum states.
\begin{figure}
\caption{The graph shows the energy loss of a neutron in a gravitational field where $\ell_{g}
\label{Fig1}
\end{figure}
\\
\section{Conclusions}
\label{sec:Con}
We have shown that for the particle in a uniform gravitational field and with a dissipative force proportional to $v^2$ we can construct an effective Hamiltonian which corresponds to the energy of the system. We found that it is possible to obtain the energy loss during a full cycle of motion for the quantum bouncer with quadratic dissipation by means of canonical quantization and quantum perturbation theory.
\end{document} |
\begin{document}
\begin{center}
{\large \bf A flexible forecasting model for production systems}\\
{R.\;Hosseini, K.\;Yang, A.\;Chen, S.\;Patra},\\
{Linkedin Data Science Applied Research Team}\\
\end{center}
\begin{abstract}
This paper discusses desirable properties of forecasting models
in production systems. It then
develops a family of models
which are designed to satisfy these properties:
highly customizable to capture complex patterns;
accommodates a large variety of objectives;
has interpretable components;
produces robust results; has automatic changepoint detection for trend and seasonality;
and runs fast -- making it a good choice for reliable and scalable production systems.
The model allows for seasonality at various time scales, events/holidays,
and change points in trend and seasonality.
The volatility is fitted separately
to maintain flexibility and speed and
is allowed to be a function of specified features.
\end{abstract}
\section{Introduction}
\label{sect:introduction}
Forecasting business metrics and quantifying their volatility is of paramount importance
for many industries, including the technology industry.
Long-term forecasts can inform the company executives about expectations
about future growth (e.g.\;daily active users)
or about future resource requirements (e.g.\;server capacity needed).
Short-term forecasts with uncertainty intervals can be used
to detect anomalies in the system,
by comparing the forecasts with observed values.
The area of forecasting time series has a long history
with many models and techniques developed in the past decades.
Some important examples include:
Classical time series models such as ARIMA
(e.g.\;\cite{book-hyndman-2014}) and GARCH (\cite{book-tsay-2010});
Exponential Smoothing Based methods (see \cite{winters-1960});
State-space models (see \cite{kalman-1960}, \cite{book-durbin-2012}, \cite{book-west-1997});
Generalized Linear Models extensions to time series
(\cite{book-kedem-2002}, \cite{hosseini-takemura-2015});
Deep Learning based models such as LSTM (\cite{hochreiter-1997}).
Our framework utilizes the powerful aspects of these various models.
We provide more details below, after discussing the motivations for its development.
Here we discuss the desirable properties of a forecasting method in practice,
especially for production systems in the technology industry.
These properties are the motivation for our framework.
\begin{itemize}
\item Flexibility to accommodate complex patterns for the metric of interest:
Flexibility is needed to achieve high accuracy.
For example, in practice, often the growth can differ
for weekdays versus weekends for a daily metric
and this needs to be taken into account explicitly or implicitly to achieve accurate forecasts.
\item Flexibility in the objective: As an example, depending on the application, the user might
intend to capture the average values well (e.g.\;in Revenue forecasting)
or the peak values well (e.g.\;in capacity planning).
\item Interpretability: This can benefit
users to inspect and validate forecasting models with expert knowledge.
As an example, if the model is able to decompose the forecast
into various components (e.g.\;seasonal, long-term growth, holidays),
the users can inspect those components not only to validate the models
but also to get insights about the dynamics of the metric.
\item Robustness: It is important for the forecast values to be robust
and have low chance of returning values which are implausible.
This can indeed occur in the time series context for various reasons,
including the divergence of the simulated values (see e.g.\;\cite{hosseini-takemura-2015}).
\item Speed: In many applications it is important to quickly train the model and produce forecasts.
Speed can help with auto-tuning over parameter spaces and producing a
massive number of forecasts, even when there are many training data points.
\end{itemize}
In our model, we have decomposed the problem into two phases:
\begin{itemize}
\item Phase (1) the conditional mean model;
\item Phase (2) the volatility / error model.
\end{itemize}
In (1) a fitted model is utilized to predict the metric of interest
and in (2) a volatility model is fitted to the residuals.
This choice helps us with flexibility and speed as integrated models
are often more susceptible
to being poorly tractable (convergence issues for parameter estimates), and
demonstrate divergence issues when Monte Carlo Methods are used to generate future values,
which is the case for many of the aforementioned methods such State-Space Models or GLM based models
(\cite{book-tong-1990}, \cite{hosseini-pcpn-2017}).
As an example, estimating a model with complex conditional mean and complex volatility
(i.e.\; with a large number of parameters)
can run into stability and speed issues (\cite{hosseini-bk-2020}).
Phase (1) can be broken down to these steps:
\begin{itemize}
\item[(1.a)] extract raw features from timestamps, events data and history of the series;
\item[(1.b)] transform the features to basis functions;
\item[(1.c)] apply a change-point detection algorithm to the data to discover changes
in the trend and seasonality over time;
\item[(1.d)] apply an appropriate machine learning algorithm to fit those features
(depending on objective).
\end{itemize}
Note that, the purpose of Step (1.b) is to transform the features into a space which can used
in ``additive'' models when interpretability is needed.
For Step (1.d), our recommended choices are explicit regularization based algorithms
such as Ridge.
Note that if the objective is to predict peaks,
quantile regression or its regularized versions are desirable choices.
In the next sections,
we provide more details on how these various features are built
to capture various properties of the series.
In Phase (2), a simple conditional variance model can be fitted to the residuals
which allows for the forecast volatility to be a function of specified factors (e.g.\;day of week,
if volatility depends on that).
The main contribution of this work is to develop a model which combines various techniques to
achieve a highly customizable model which can run fast; have interpretable options and support
variable objectives. Our events model is very flexible and allows for the deviation due to events
to be of arbitrary shape even within a day. Also the regularization-based
changepoint algorithm is a novel method which can capture changes in both trends and seasonality --
we demonstrate it works well in practice.
The paper is organized as follows.
Section \ref{sect:model} discusses the model details.
Section \ref{sect:change-points} provides the details
for the automatic change-point detection component.
Section \ref{sect:use_case} demonstrate how our models work using a particular use case.
In Section \ref{sect:assessment},
we discuss methodology for assessing performance of the
models in terms of accuracy and in cross-validation.
We also compare the performance of our models
with some widely used open source libraries.
Finally Section \ref{sect:discussion} concludes with a summary
and discussion of possible extensions.
\section{Conditional Mean and Volatility Models}
\label{sect:model}
This section discusses the details of our model.
We refer to our model as Silverkite for clarity in the following.
First, we introduce the mathematical notation.
Suppose $\{Y(t)\}, t=0,1,\cdots$ is a real-valued time series where $t$ denotes time.
We denote the available information up to time $t$ by $\mathcal{F}(t)$.
We assume this information is given in terms of a covariate process, denoted by
${\bf Z}(t)$, as discussed in \cite{book-kedem-2002}.
The covariate process is assumed to be a
multivariate real-valued process and encodes the features to be utilized in the models.
For example, we can consider the covariate process:
\[{\bf Z}(t-1) = (1, Y(t-1), Y(t-2)),\]
which means the features used in the model are simply a constant and two lags.
(This corresponds to a simple auto-regressive model of order 2, if we further
assume that the conditional mean is linear and the conditional distribution is Gaussian.)
Figure \ref{fig:silverkite-diagram} illustrates the model components diagram where
the (sky) blue square nodes show the compute nodes; the green parallelograms show the
(potential) user inputs; the dark cylinders show the (potential) input databases such as
country holidays; the yellow clouds show the inline comment / descriptions.
\begin{figure}
\caption{This diagram summarizes the steps involved in the Silverkite Algorithm.
The square blue notes represents the computational nodes and include a number which represent the step.
The nodes (1.a) to (1.d) correspond to Phase (1) which is the conditional mean phase.
The nodes (2.a) and (2.b) correspond to Phase (2) which is the volatility / error model.}
\label{fig:silverkite-diagram}
\end{figure}
As we discussed in the introduction, the algorithm has two main phases.
\begin{itemize}
\item Phase (1): The conditional mean model
\item Phase (2): The volatility / error model, fitted to residuals
\end{itemize}
As we discussed, this choice is made deliberately to increase the flexibility of
the model in capturing complex mean patterns / objectives in modeling the mean
and overall speed of the algorithm. The main reason is models which encapsulate
the mean parameters and volatility parameters into one model often require slow
Maximum Likelihood Estimation (e.g.\;in \cite{hosseini-takemura-2015})
or intensive Bayesian computation (e.g.\; \cite{book-west-1997})
due to parametric form of the volatility.
\subsection*{The conditional mean component}
\label{subsect:mean-component}
The very first step in building the model is generating the raw features.
Here are some examples of raw features:
\begin{itemize}
\item Time of day (TOD): This is a continuous measure of what time in the day $t$ is,
in hour units, and ranges from 0 to 24.
For example 12.5 is half past noon.
\item Day of week (DOW):
Categorical variable denoting day of week, from Sunday (0) to Saturday (7).
\item Time of week (TOW):
This is a continuous measure of when in the week $t$ is, in day units,
starting from Sunday.
For example, 1.5 denotes Monday at noon.
\item Time of year (TOY): This is a continuous measure of when in the year $t$ is, in
year units and ranges from 0 to 1. For example 1.5/365 denotes the second of January at noon
in a non-leap year.
\item Time of month (TOM) and time of quarter (TOQ) can be defined similarly and range from 0 to 1.
\item Event / holiday labels: assume a holiday or re-occurring events database is available.
For example, timestamps
on January 1st could be labeled with ``New Year''.
Custom events can also be treated in the same way.
If multiple event databases are relevant (e.g.\;Gregorian and Lunar calendar), multiple
features are to be generated per database.
\item Continuous Time (CT): This is a variable which measures the time elapsed from
a particular date in year units. Typically, the reference date is the beginning of the time series,
so that the first time point has value $0$.
For example if the series starts on Jan 1st, 2015,
then end of 2016 will have the value of $2$ since two years have elapsed.
\end{itemize}
\subsubsection*{Seasonality}
\label{subsubsect:seasonality}
The seasonality in a time series can appear in various time-scales.
For example for hourly data, one can expect a periodic daily pattern across the day, a periodic
weekly pattern across the week, and similarly periodic patterns across month, quarter and
year.
We use Fourier series as the primary tool for modeling seasonality in various scales.
For example for hourly data and within day variations, we use Fourier terms of the form:
\[s_k = \sin (k \omega_d d(t)),\;c_k = \cos (k \omega_d d(t)), \;\omega_d = 2 \pi / 24,\]
\[k=1,\cdots,K\]
where $d(t)$ denotes TOD (time of day), discussed above; and $K$ is the appropriate order of
Fourier series which can be determined in a Model Selection Phase.
Note that the frequency $w_d$ is set to be $2 \pi / 24$ since TOD changes from 0 to 24.
Similarly, we can define appropriate Fourier Series for weekly, monthly, quarterly and annual
periodicity.
\subsubsection*{Growth}
\label{subsubsect:growth}
In order to model growth, we introduce some basis functions and allow for piece-wise
continuous versions of those functions.
By slight abuse of notation let $t$ denote the
continuous time (CT) introduced in the raw features.
Then consider the following basis functions:
\[f(t) = t^p, p = 1/3, 1/2, 1, 2, 3.\]
We allow for the growth to change with time continuously at given change-points
$t_1, \cdots, t_k$.
Given the change-points and a basis function $f(t)$, we define
\[growth(t) = \alpha_0 f(t) + \sum_{i=1}^k \alpha_i 1_{t > t_i} (f(t) - f(t_i))\]
Note that $growth(t)$ is a continuous function of $t$, but allows the derivative of the
function to change at the change-points.
In some forecast applications, we might have external information about the change-points,
which can then be fed to the algorithm.
However, in other applications such information might be unavailable or the growth shape
might not conform to any such basis function.
Therefore Section \ref{sect:change-points} presents a method for automatic change-point detection
both for growth and seasonality.
\subsubsection*{Events and holidays}
\label{subsubsect:events}
Here we present a method for modeling reoccurring events.
This approach allows the period of the event to take any possible deviation
from general trends. It assumes the impact of the event does not vary over time.
A prime example of such events are national/religious holidays in various countries
and the days surrounding those holidays.
Suppose, the event occurs in the known periods $[t_i, t_i + l]$, where each $i$
corresponds to a single occurrence of the event.
Define the event time-coverage of the re-occurring event $e$ to be:\\
\begin{equation*}
TC(e) = \cup_i [t_i, t_i + l],\;\;\; i \in \{1, 2, \cdots\}
\end{equation*}
Then we define the basis function:
\begin{equation*}
s_k(t;\;e) =
\begin{cases}
0 & t \notin TC(e) \\
\sin(2\pi(t - t_i)/l) & t \in TC(e)
\end{cases}
\end{equation*}
Similarly, we define $c_k(t; e)$ as the cosine counterpart.\\
In order to model the effects of event $e$, we add the basis functions
\begin{equation*}
\{s_k(t; e), c_k(t; e), k=1,\cdots,k_e\},
\end{equation*}
where $k_e$ is the appropriate Fourier Series order, to the set of basis functions. \\
Note that for a holiday $e$, the time coverage can be defined to include an
expanded window surrounding that holiday instead of solely the holiday.
Moreover, when $\{K=k_e\}$ (same Fourier order used for seasonality as for the holiday),
the basis functions can be expressed as interaction terms e.g.
\begin{eqnarray*}
s_k(t; e) = s_k(t) * \mathbbm{1}_{TC(e)}, \\
\end{eqnarray*}
where $\mathbbm{1}_{TC(e)}$ is the indicator function.
\subsubsection*{Remaining Temporal Dependence}
After taking into account trends, seasonality,
events, change-points (discussed in the next section),
and other important features, often the residuals still
show a temporal dependence
(albeit often orders of magnitudes smaller than the original series).
The remaining temporal correlation can be exploited
to improve the forecast especially for short-term forecast horizons.
We allow for an auto-regressive structure in the model e.g.\;by including lags in
the model: $Y(t-1), \cdots, Y(t-r)$ for some appropriate $r$.
While auto-regression can account for the remaining correlation in the series,
for many applications a large $r$
might be needed to capture long-term dependence in the chain.
To remedy this issue, \cite{hosseini-bin-pcpn} suggested a
technique to develop parsimonious models by aggregating the lags.
As an example, for a daily series, consider
the averaged lag series
\[AVG(Y(t); 1,2,\cdots,7) = \Sigma_{i=1}^7 Y(t-i) / 7.\]
This covariate then represent the average value for the value of the series over the past week.
As another example, consider
\[AVG(Y(t); 7,7\times2,7\times3) = \Sigma_{i\in (1, 2, 3)} Y(t-7i) / 3.\]
This covariate represent the average value of the series on the same day of week in the past 3 weeks.
Similar series can be defined for other frequencies as well.
\subsubsection*{Lagged regressors and regressors}
Suppose $p$ other time series are provided with the same frequency as the
target time series to forecast:
${\bf X}(t) = X_1(t), \cdots, X_p(t)$.
If such other metrics are available while future values are unknown,
one can still use their lags in the models, similar to
previous sections.
In the case that the future values of the these variables are known,
or can be forecasted reliably based on other models, we can directly
use $X_1(t), \cdots, X_p(t)$ as regressors.
This also has applications in scenario-based forecasting where some underlying variables
are to behave differently into the future.
\subsubsection*{Accommodating complex patterns}
One of the advantages of our model compared
to many commonly used models in practice is the ability
to easily accommodate complex patterns.
This is done by specifying interactions (between features).
To mitigate the risk of the model
becoming degenerate or over-fitting,
regularization can be used in the machine learning
fitting algorithm for the conditional model (e.g.\;Ridge).
In fact, regularization also helps in minimizing the risk of divergence
of the simulations of future series for the model which are
discussed in \cite{hosseini-bk-2020}.
Here we provide some examples of how complex patterns can be accommodated with interactions.
\begin{itemize}
\item Growth is different on weekdays and weekends.
This is possible and unobserved in many series of interest.
For example it could be the case that the usage of a particular app surges much more
rapidly on weekends as compared to weekdays.
Suppose the time frequency is daily
and the model includes the categorical term DOW to model the weekly patterns.
To accommodate this pattern we can interact the growth term e.g.\;$f(t) = t$ with
DOW:
\[f(t) * DOW\]
where $*$ denotes the interaction as commonly used
in standard softwares such as R (https://www.R-project.org/)
or Python's patsy package (https://patsy.readthedocs.io/).
If a Fourier series is used to model weekly patterns,
then one can interact the Fourier series terms or a subset of it with the
growth function.
\item Different months have different patterns for day of week.
It might be possible that during different times of the year,
the weekly patterns differ.
Then one can use this interaction:
\[f(t) * month\]
where $month$ is a categorical variable denoting the month of the year.
Similar interactions can be considered if Fourier series are used to model the annual trends.
\end{itemize}
\subsection{The volatility component}
\label{subsect:volatility}
As discussed in the introduction,
we elect to separately fit the conditional mean model and the volatility / error model.
Integrated models of course can be considered where the model includes all components
(e.g.\;\cite{hosseini-takemura-2015} or \cite{book-west-1997}).
In theory, the advantage of an integrated model is accounting for all uncertainty in one model.
However, we consider a two-component model
for the following reason:
(a) more flexibility in the mean components model in terms of features and algorithm;
(b) more flexibility in the
volatility model;
(c) considerable speed gain by avoiding computationally heavy Maximum Likelihood Estimation
(e.g.\;\cite{hosseini-takemura-2015})
or Monte Carlo Methods
(e.g.\;\cite{book-west-1997}).
These factors are very important in a production environment
where often fast and reliable forecasts are needed.
The increased reliability is due to (a) more stable / robust estimates of the
model parameters (b) less chance of the forecasted values into the future
to diverege to unreasonable values
(as reported by \cite{book-tong-1990} and \cite{hosseini-bk-2020}).
Here we discuss the details for the volatility model.
Suppose $Y(t)$ is the target series and $\widehat{Y}(t)$ is the forecasted series.
Then define the residual series as follows:
\[R(t) = Y(t) - \widehat{Y}(t)\]
Assume the volatility depends on given categorical features
$F_1, \cdots, F_p$ which are also known into the future.
As an example, these features could be the raw time features defined previously such as
``Day of week (DOW)'' or ``Is Holiday''
which determines if time $t$ is a holiday or not.
Then given any combination of features $F_1, \cdots, F_p$,
we consider the empirical distribution
$(R | F_1, \cdots, F_p)$ and fit a parametric or non-parametric distribution to the combination
as long as the sample size for that
combination, denoted by $n(F_1, \cdots, F_p)$ is sufficiently large
e.g.\; $n(F_1, \cdots, F_p) > N,\; N=20$.
Note that one can find an appropriate $N$ using data e.g.\;during cross-validation steps
by monitoring the distribution of the residuals.
Then from this distribution, we estimate the quantiles:
$Q(F_1, \cdots, F_p)$ to form the 95\% prediction interval:
\[\widehat{Y}(t) + Q(F_1, \cdots, F_p)(0.025), \widehat{Y}(t) + Q(F_1, \cdots, F_p)(0.0975),\]
and similarly for other prediction intervals.
One choice for a parametric distribution is the Gaussian distribution
$\mathcal{N}(0, \sigma^2(F_1, \cdots, F_p))$
which can be appropriate for some uses cases.
The reason, that we have forced the mean to be zero is, we do
not want to volatility model to modify the forecasted value which
is the result of often much more complex mean model with many more features.
Regarding normality assumption, note that while the original series can be heavily skewed,
it could be the case that the residual series
is close to normal, especially after conditioning on $F_1, \cdots, F_p$.
For use cases where the normality of the conditional residuals
is not reasonable,
we can use non-parametric estimates of the quantiles using various approaches e.g.\;by simply
calculating sample quantiles for sufficiently large samples.
We also need to address the case where the sample size for a given combination is smaller than $N$.
In this case, we need to fall back to another reasonable way to come up with prediction interval.
To determine the fall back values: we calculate the Interquartile Range $IQR(c) = (Q(0.75) - Q(0.25))$
for each combination $c$; then order these IQR increasingly and for a large $p \in [0, 1]$, e.g.\;$p=0.9$,
we pick $c_0$ which attains that IQR. Then we use the quantiles of $c_0$ as the fall back value.
The idea behind this approach is to fall back to some quantiles which are coming from more variable combinations.
Note that in practice, the combinations should be chosen in a way that no fall-back is necessary and this mechanism is
only there to cover rare cases when a combination gets assigned smaller sample sizes. For future work, one can also consider
parametric models for volatility in residuals to be able to accommodate a large number of features in volatility.
The concrete steps for fitting the volatility model are described in Algorithm \ref{alg:volatility}.
\begin{algorithm}[H]
\label{alg:volatility}
\SetAlgoLined
\KwData{Apply the forecast model to generate predictions}
\SetKw{Pp}{Calculate residuals}
\Pp{\\}
{
\Indp
}
\SetKw{Rg}{Split data based on feature values}
\mathbb{R}g{\\}
{
\Indp
}
\SetKw{Pop}{Estimate distribution for large combinations}
\Pop{\\}
{
\Indp
Consider combinations $C_{large}$ with sample size $\geq N$\;
\mathbb{F}or{combination $c$ in $C_{large}$}{
Calculate $IQR(c)$\\
Calculate desired lower and upper bounds: $q_1(c),q_2(c)$
}
}
\SetKw{Pop}{Estimate fall-back volatility for small combinations}
\Pop{\\}
{
\Indp
Order the combinations $C_{large}$ with respect to $IQR(c)$.\\
Find the combination, $c_0$, which is percentile of $p$ in $C_{large}$.\\
Set fall back quantiles to $q_1(c_0), q_2(c_0)$.\\
Consider combinations $C_{small}$ with sample size $< N$\;
\mathbb{F}or{combination $c$ in $C_{small}$}{
Set lower and upper bounds to the fall back quantiles
}
}
\KwResult{Return $(q_1(c), q_2(c))$ for all $c$; and fall back quantiles $q_1(c_0), q_2(c_0)$}
\caption{Volatility algorithm}
\end{algorithm}
\section{Changepoint Detection}
\label{sect:change-points}
Changepoints play an important role in forecasting problems.
By a changepoint, we refer to a time point in a time series,
with the pattern in the data segment after the time point exhibiting a change
from the data segment prior to it.
Capturing the changepoints can help the model adapt to the
most recent data patterns and learn the right behavior in forecasts.
This section discusses a changepoint detection algorithm
that focuses on trend changepoints.
The algorithm is based on the adaptive lasso \citep{zou2006adaptive}
to select significant changepoints from a large number of potential changepoints.
While fully automatic detection is possible, tuning parameters are provided to allow flexibility.
\subsection{Trend Changepoint Detection}
\label{subsect:trend-change-points}
The trend of a time series describes
the long-term growth that ignores seasonal effects or short-term fluctuations.
As discussed in Subsection \ref{subsubsect:growth},
in general the trend can be denoted as a function of time, i.e.,
$$\text{long-term trend}=g(t)$$
for a continuous function $g$.
This growth function can be approximated with basis functions such as
linear, quadratic, cubic and logistic.
Here, we focus on the case with linear basis functions
as it suffices to be a good starting point for most applications we have encountered.
In fact, according to the Stone-Weierstrass Theorem
(Chapter 7 of \cite{book-rudin-1976}), any continuous
function can be approximated by a piece-wise linear functions.
(Note however, this does not imply that there are
no merits in considering other basis functions in other applications,
as other basis functions could
potentially capture some complex trends more parsimoniously,
but we do not discuss that here further for brevity.)
Given a list of changepoints $t_1,\cdots,t_k$,
we can approximate the continuous function $f$ with piece-wise linear function
$$g(t)=a_0t + \sum_{i=1}^ka_i\mathbbm{1}_{\{t>t_i\}}(t-t_i).$$
The approximation error gets smaller when we have more changepoints,
while the risk of over-fitting also gets higher, thus making the forecast less reliable.
Therefore, the method discussed here intends to only select significant trend changepoints.
The proposed method here is a regularized regression based algorithm with
a few filters being applied for practical considerations,
and is able to identify significant trend changepoints.
The steps of this procedure are given in algorithm \ref{alg:trend}.
First, we apply an optional aggregation.
For example, aggregating daily data into weekly data.
The main purpose of applying this aggregation is to eliminate short-term fluctuations.
For example, a short holiday effect on daily data should not be picked up
as long-term trend changes. Note that this aggregation process
may not be necessary if data frequency is already sufficiently coarse e.g.\;for weekly data.
The next step is to place a fine regular grid of potential changepoints uniformly over
the span of time of the series.
For example for daily data with weekly aggregation,
placing potential changepoints every week or every two weeks can be considered.
However, it also would introduce false or pseudo changepoints.
Therefore, a balanced approach is needed, which we will consider shortly.
Large number of potential changepoints,
also guarantees the existence of
a potential changepoint, sufficiently close to
any ``true'' trend changepoint.
We allow for some other restrictions in the procedure to find the appropriate change points.
For example, we allow for the user to specify a period at the end of the series
where no change point is allowed.
The reasoning is that the position of last changepoint and its slope afterwards has a significant
impact on the forecasted values.
This way, we allow for expert knowledge of the nature of the series of interest to be
incorporated into the model.
The core step applies the Adaptive Lasso \citep{zou2006adaptive}
to the regression problem with the aggregated time series
as the response and the changepoints and yearly seasonality as regressors, i.e.,
\begin{equation}
\label{eq:adalasso}
y_{t, agg} = a_0t + \sum_{i=1}^ka_i\mathbbm{1}_{\{t>t_i\}}(t-t_i) + \sum_{j=1}^K\left(\beta_{ci}c_j+\beta_{si}s_j\right),
\end{equation}
where $c_j$ and $s_j$ are the sin and cosine functions corresponding to the
yearly Fourier series,
as discussed in Subsection \ref{subsubsect:seasonality},
$$ s_k = \sin (k \omega_y y(t)),\;c_k = \cos (k \omega_y y(t)), \;\omega_y = 2 \pi, k=1,\cdots,K_1,$$
where $y(t)$ denotes TOY (time of year) defined in Subsection \ref{subsect:mean-component}.
The adaptive lasso adds weights to the $L_1$ norm penalization of the lasso \citep{tibshirani1996regression},
where the weights are chosen based on various rules,
for example, the reciprocal of some initial estimations of the coefficients.
The reason that we use the adaptive lasso over the lasso
is that the adaptive lasso can gain the desired sparsity level
without over-shrinking the significant coefficients,
with properly chosen weights,
as discussed in \cite{zou2006adaptive}.
If the time series has a long history,
yearly seasonality change may be falsely captured as trend change,
so it's better to refit yearly seasonality after some period, e.g., every 2 years.
In the regression formulation above,
this can be handled by introducing extra regressors that account for yearly seasonality change, e.g.,
$$s_{k1} = \mathbbm{1}_{\{t>t_1\}}\sin (k \omega_y y(t)),\;c_k = \mathbbm{1}_{\{t>t_1\}}\cos (k \omega_y y(t)), \;\omega_y = 2 \pi, k=1,\cdots,K_1,$$
for some change period $t_1$.
Introducing theses terms will fit different yearly seasonality before and after $t_1$.
Note that we should only penalize the changepoint parameters i.e.,
\begin{align}
\label{eq:partialreg}
&\hat{a}_0, \cdots, \hat{a}_k, \hat{\beta}_{c1}, \hat{\beta}_{s1}, \cdots, \hat{\beta}_{cK}, \hat{\beta}_{sK}\nonumber\\
=&\argmin\sum_{m=1}^n\left[y_{t,agg}-\left(a_0t + \sum_{i=1}^ka_i\mathbbm{1}_{\{t>t_i\}}(t-t_i) + \sum_{j=1}^K\left(\beta_{ci}c_j+\beta_{si}s_j\right)\right)\right]^2+\lambda\sum_{i=1}^kw_i|a_i|,
\end{align}
for some weights $w_i$, $i=1,...,k$.
This partial penalized regression can be solved
with a fast coordinate descent algorithm \citep{tseng2001convergence} .
An alternative is to use projection to split the optimization problem into two steps.
The first step fixes the $L_1$-norm penalized coefficients and estimate
the other coefficients as a ridge regression problem.
The second step uses the estimated coefficients from the first step and reduce
the problem into a Lasso regression problem.
Both methods requires some math and programming.
A derivation of the algorithm is given in appendix \ref{subsect:mix-regularization}.
It is worth noting that
we are not losing too much by directly optimizing (\ref{eq:adalasso})
over (\ref{eq:partialreg}).
The only difference is that penalizing the $\boldsymbol{x}_0$ term
brings in a pseudo-changepoint at the very beginning of the time series to
``pick up" the baseline trend.
This can be avoided by not allowing potential changepoints
to be placed near the very beginning of the time series.
Existing libraries such as \textit{sklearn} \citep{pedregosa2011scikit} can be utilized
to solve the problem efficiently.
\begin{algorithm}
\label{alg:trend}
\SetAlgoLined
\KwData{Time series data.}
\SetKw{Pp}{Pre-processing:}
\Pp{\\}
{
\Indp
Aggregation (optional)\;
Put a large number of potential changepoints uniformly\;
Eliminate the potential changepoints in forbidden region (e.g.\;the end)\;
}
\SetKw{Rg}{Regularization:}
\mathbb{R}g{\\}
{
\Indp
Create trend features (including changepoints) and yearly seasonality features\;
Fit a regression model with the features (initial estimation)\;
Compute feature weights\;
Fit regression model with adaptive l1 norm penalty on the changepoint features\;
}
\SetKw{Pop}{Post-pressing:}
\Pop{\\}
{
\Indp
Group selected changepoints with distance less than a threshold\;
\mathbb{F}or{group in groups}{
\While{can add or remove changepoints}{
Remove the changepoint with smaller change if two or more are too close\;
Trace back to see if any changepoint can be added back\;
}
}
}
\KwResult{The detected trend changepoints.}
\caption{The automatic trend changepoint detection algorithm.}
\end{algorithm}
After the regularization,
we have selected significant trend changepoints.
However, there can be detected changepoints that are too close to each other.
These types of changes usually account for consecutive changes or gradual changes.
As for pure trend changepoint detection, the result does not need further handling.
If one wants to use these changepoints as input for a forecast model and prefers parsimony,
a rule-based post-filtering method can be applied to remove redundant trend changepoints.
Here we provide more details about filtering redundant change points. First, we group changepoints
that are close to at least one another with respect to some threshold.
For each group, we start with the first changepoint and look at the second one.
The one with smaller changes are dropped, and we go the the third one, and so on.
If we dropped the second changepoint,
and the distance between the first and the third changepoints is greater than the threshold,
we won't need to compare those two changepoints,
and will continue to compare the third and the fourth changepoints.
In a special case when $c_1<c_2<c_3$,
where $c_i$ is the magnitude of the $i^{th}$ changepoint,
after dropping the first and the second changepoints,
the first changepoint can be added back,
if the distance between the first and the third changepoints is large enough.
Each time we remove a changepoint,
we check back to see if any deleted changepoints can be added back.
This rule-based filtering method not only enforces minimal distance
to the detected trend changepoints,
but also retains as many changepoints as possible.
Through the whole algorithm, the following components can be customized to fit specific use cases
\begin{itemize}
\item The aggregation frequency.
\item The distance between potential changepoints or how many potential changepoints.
\item The time period(s) with no changepoints.
\item The yearly seasonality Fourier series order.
\item How often yearly seasonality changes.
\item The initial estimation method for adaptive Lasso.
\item The regularization strength.
\item The minimum distance between detected changepoints.
\item Any customized trend changepoints to be added to detected changepoints.
\item The minimum distance between a detected changepoint and a customized changepoint.
\end{itemize}
The above algorithm can stand alone as a trend changepoint detection algorithm.
It runs fast and gives significant trend changepoints to help capture
long-term events, product feature launches, changed in underlying dynamics, etc.
On the other hand,
the output can also be used to specify the trend changepoints as in Section \ref{sect:model}.
The advantage of detecting trend changepoints independently from performing it
while fitting the model is to allow more flexible algorithms in the fitting phase,
because it separates estimation for more interpretability,
and makes it easier to tune sub-modules.
\subsection{Seasonality Changepoint Detection}
\label{subsect:seasonality-change-points}
In some time series, seasonality effect can change over time similarly to trend.
Product features and news events can increase or decrease the volatility of the time series.
Gradual changes in volatility can be modeled with interactions,
however, seasonality changepoints are a more flexible and automatic way to capture this effect.
Seasonality effect is modeled with Fourier series terms in the model,
and this makes it easy to include seasonality changepoints.
We use truncated seasonality features to capture seasonality changes.
For every Fourier series term $s_k$ and $c_k$,
the truncated terms $s_k\mathbbm{1}_{\{t>t_{scp}\}}$ and $c_k\mathbbm{1}_{\{t>t_{scp}\}}$
models the change after a seasonality changepoint $t_{scp}$.
A regularized regression can be used to choose significant seasonality changepoints
from a large number of potential seasonality changepoints as we do in
automatic trend changepoint detection.
The main difference between trend changepoints and seasonality changepoints lies on two parts.
The first part is that the seasonality has multiple components,
for example,
yearly seasonality,
quarterly seasonality,
monthly seasonality,
weekly seasonality,
daily seasonality, etc.
The second part is that for each component,
there are multiple Fourier series bases that account for the same seasonality changepoint.
In our model, a group lasso type of regularization \citep{yuan2006model} is
used on the the Fourier bases for each potential changepoint of each component.
This method drops all Fourier bases of one changepoint in one component entirely,
however, the resulted seasonality is not smooth because of the entire changes.
Using $L_1$-norm over all terms can still give the desired sparsity level
and also gives smoother seasonality changes.
Therefore, the same adaptive method discussed in trend changepoints
also works for seasonality changepoint detection in practice.
\section{Example application to bike-sharing data}
\label{sect:use_case}
This section demonstrates how various model components of our proposed model work
through an example. We consider a dataset with hourly counts of rented bikes in a bike-sharing system.
The bike-sharing data include the hourly counts of rented bikes in
Washington DC during 2011 to 2019 and were obtained from
\textit{www.capitalbikeshare.com}.
We have also joined this data with daily weather data
from a nearby station (BWI Marshall Airport).
The weather data was downloaded from
``Global Historical Climatology Network'' (\textit{https://www.ncdc.noaa.gov/}) and
contains the daily minimum and maximum temperature and precipitation total.
Figure \ref{fig:bikeshare_rawplot} shows the time series.
\begin{figure}
\caption{The raw time series (left) and
the trend by a moving average of 365 days
(right).}
\label{fig:bikeshare_rawplot}
\end{figure}
To build a Silverkite model, we will go over the components
and try to find out what are the
key components to be used in the model.
First, the Right Panel in Figure \ref{fig:bikeshare_rawplot}
shows the rolling mean of
the raw time series with a rolling window size of $24 \times 365$ (1 year).
We observe that the trend is roughly increasing before late 2017 and starts
to decrease after that.
There are some slight growth rate changes but overall the trend can be modeled
with a linear trend with changepoints.
\begin{figure}
\caption{Yearly seasonality of the bike-sharing data.}
\label{fig:bikeshare_yearly}
\end{figure}
\begin{figure}
\caption{Different weekly seasonality for cherry season (Mar 20-Apr 30) and non-cherry season.}
\label{fig:bikeshare_weekly_cherry}
\end{figure}
The seasonality components include different length of periods.
We mainly focus on yearly seasonality, weekly seasonality and daily seasonality,
because the other periods do not have apparent patterns or convincing seasonal reasons.
Figure \ref{fig:bikeshare_yearly}
shows the yearly seasonality which indicates that there are more rides during warm months.
From the "mean" line in Figure \ref{fig:bikeshare_weekly_cherry}, the weekly seasonality
is not very clear at first.
However, upon inspecting the weekly seasonality broken down by month,
we observe that the weekly seasonality in April differs significantly from other months.
The reason is that Washington DC has one of its most significant events
around April -- The Cherry Blossom Festival.
Figure \ref{fig:bikeshare_weekly_cherry} shows the contrast in
weekly seasonality pattern
between the Cherry Blossom Season (approximately March 20 -- April 30)
and the rest of the year.
This observation can be modeled using an interaction
between weekly seasonality and an indicator variable determining
if the observation is in Cherry Blossom Season or not.
\begin{figure}
\caption{Different daily seasonality for week-days and weekend.}
\label{fig:bikeshare_daily_weekend}
\end{figure}
The daily seasonality pattens are very significant but differ
on weekdays versus weekends. This is typical for time series that
involve human activity.
The daily seasonality effect interacting with weekend is shown in
Figure \ref{fig:bikeshare_daily_weekend}.
The figure suggests to differentiate between daily patterns
of weekdays and weekends.
This can be done in various ways, for example, by allowing for
an interaction between seasonal daily patterns
and a weekend indicator.
Alternatively we could consider a sufficiently
large number of Fourier series terms for
a weekly pattern (or a combination of both).
The holiday effect is hard to see from component plots,
but from Figure \ref{fig:bikeshare_yearly},
we can see there are some small dips that are due to holidays.
In summary, the models will include the following components
\begin{itemize}
\item CT (continuous time to model the long-term trends)
\item changepoints
\item yearly seasonality
\item weekly seasonality and its interaction with Cherry Blossom Season
\item daily seasonality : weekend interaction
\item holidays
\item weather regressors
\item autoregression
\end{itemize}
First we consider a forecast horizon of $24 \times 14$ (2 weeks).
Because the number of rides is always non-negative,
we clip negative values at zero.
A linear growth is used as long-term trend function.
For trend changepoints,
we create a grid of potential changepoints every 15 days but skip the last 30 days
(to avoid detecting artificial change points toward the end of the series).
A yearly seasonality of order 15 is used in the changepoint detection algorithm.
For the Lasso problem,
it's easy to verify by KKT conditions that
$\|\boldsymbol{s}ymbol{X}^T\boldsymbol{y}\|_{\infty}/n$ is the minimal
tuning parameter that corresponds to no non-zero coefficients.
The regularization parameter, we use in detecting changepoints is $10^{-3}$ of that value,
which provides moderate trend changepoints.
An aggregation of 3 days is used for the changepoint detection aggregation process
(Algorithm \ref{alg:trend}).
For seasonality, we use order 15, 3 and 12 for yearly,
weekly and daily seasonality, respectively.
Because the volatility changes over time as well,
we also introduce seasonality changepoints,
which allows seasonality components to refit after changes, however,
the seasonality changepoints is not allowed within the last 365 days of data,
to avoid a potentially poor fit of yearly seasonality.
A list of common holidays together with their plus/minus 1 day were created as separate indicators.
We also include the two interaction groups we discussed above:
weekly seasonality and Cherry-Blossom Season indicator;
daily seasonality and weekend indicator.
Figure \ref{fig:bikeshare_forecast} compares
the forecast with the actual data during the test period,
which is located at the end of the time series.
We can see that with our daily seasonality and weekend indicator interaction,
the model is able to capture the bimodal daily seasonality on weekdays
and unimodal daily seasonality on weekends.
Figure \ref{fig:bikeshare_trend_change}
shows the detected trend and seasonality changepoints and the estimated trend in the model.
It aligns with our expectations.
Figure \ref{fig:bikeshare_forecast_holiday}
shows the forecast versus the actual data around Thanksgiving 2018.
From the figure, we see the holiday effect is picked up by the model,
by observing that the number of rides is lower compared to regular
non-holiday number of rides on the same weekday.
Moreover, the interaction between whether
it is weekend and daily seasonality also captures the uni-modal shape
on these holidays (non-workdays).
A gap in prediction happens on Nov 15 in the same plot.
The reason is that Washington DC was hit with biggest November snowfall in 29 years,
and people wasn't ready for a snow riding commuting yet.
We weren't explicitly capturing this effect so there is the gap,
however, any knowledge-based observations can be modeled with extra regressors.
Autoregressive components are useful in picking up remaining trends which are not
explained by seasonality, growth, events and change points.
The effect of autoregression cannot be easily observed from figures.
A cross-validation study is used to demonstrate its usefulness.
The cross-validation includes 20 folds.
The validation folds are taken from the end of the data set,
with a 2-week window between each fold.
The forecast horizon is set to 24 hours for short-term use case.
From the cross-validation study, we have an RMSE of 102.3
when including autoregressive components,
compared to 123.0 when not including autoregressive components.
Let $Y(t)$ be a time point in the forecast phase,
the autoregresive terms used include
$Y(t-24)$, $Y(t-25)$, $Y(t-26)$
as autoregressive terms.
They also include these three aggregated terms:
\[AVG(Y(t); 7 \times 24, 14 \times 24, 21 \times 24),\]
which is taking an average of observed values in the same hour of last three weeks.
\[AVG(Y(t); 24, 25, \cdots, 191),\]
which is the average of the last 7 days;
and
\[AVG(Y(t); 192, 193, \cdots, 359)\]
which is the average of the week prior to last.
These are the default choices (for hourly data with forecast horizon of 24)
in our model and are not optimized
for this use case in particular.
However, the user can use grid search to optimize these choices further.
Note that the minimum lag used in the model is 24 which is
the forecast horizon. This will assure that, at prediction time,
all the lags used in the model to forecast are observed and not simulated.
This sometimes can help with accuracy, and is also beneficial in terms of
speed as we do not need to perform simulations at the prediction phase
(to fill in the lags needed into the future).
It is worth mentioning however, that the optimality of such choices
depend on underlying use case and user should experiment with various
models.
\begin{figure}
\caption{Forecast versus actual for Silverkite on bike-sharing data
during test period.}
\label{fig:bikeshare_forecast}
\end{figure}
\begin{figure}
\caption{Detected trend and seasonality changepoints for bike-sharing data.}
\label{fig:bikeshare_trend_change}
\end{figure}
\begin{figure}
\caption{Forecast versus actual for Silverkite
on bike-sharing data during Thanksgiving.}
\label{fig:bikeshare_forecast_holiday}
\end{figure}
\section{Assessment and Benchmarking}
\label{sect:assessment}
This section presents details on appropriate methods to assess the performance of
forecasting algorithms in terms of accuracy.
To estimate how accurately a forecasting model (e.g.\; Silverkite) performs in practice,
we use cross-validation (CV).
Cross-validation is a technique for assessing how
the results of a predictive model generalizes to
new data. In the time series context the new data refers to values in the future. Below
we describe an assessment method which is appropriate for forecasting applications.
We benchmark the prediction accuracy of Silverkite against
that of popular state of the art algorithms such as
Auto-Arima and (Facebook) Prophet.
The prediction accuracy is measured by
Mean Average Percentage Error (MAPE) which is defined as
\begin{equation}
\text{MAPE}(Y) = \sum_{t=1}^T \Bigg \lvert \frac{Y(t) - \hat{Y}(t)}{Y(t)} \Bigg \rvert
\end{equation}
MAPE is more popular in applications as compared to RMSE (Root Mean Square Error), as it
provides a relative (scale-free) measure of error compared to the observation.
We use a rolling window CV for our benchmarking,
which closely resembles the well known $K$-fold CV method.
In $K$-fold CV, the original data is
randomly partitioned into $K$ equal sized subsamples.
A single subsample is held out as the validation data,
and the model is trained on the remaining $(K-1)$ subsamples (\cite{book-hastie-2009}).
The trained model is used to predict on the held-out validation set.
This process is repeated $K$ times so that each of the $K$ subsamples
is used exactly once as the validation set.
Average testing error across all the $K$ iterations provides
an unbiased estimate of the true testing error of the machine learning (ML) model on the data.
Due to the temporal dependency in time-series data the standard $K$-fold CV is not
appropriate.
Choosing a hold-out set randomly has two fundamental issues in time series context:
\begin{enumerate}
\item Future data is utilized to predict the past.
\item Some time series models can not be trained realistically with a random sample,
e.g. the autoregressive models due to missing lags.
\end{enumerate}
\begin{figure}
\caption{Fold structure for a Rolling Window Cross Validation.
Expanding window configuration shown,
where each training set has the same train start date.}
\label{fig:benchmark_cv}
\end{figure}
Rolling window CV addresses this by creating a series of $K$ test sets.
For each test set, the observations prior to the test set are used as
a training set.
Within each training set, a series of CV folds are created, each
containing a validation set.
For time consideration, we choose to create a single CV fold for
each training set.
Number of data points in every test and validation set equals forecast
horizon (CV horizon in Table \ref{tab:benchmark-cv}).
Observations that occur prior to that of the validation set
are used to train the models for the corresponding CV fold (Figure \ref{fig:benchmark_cv}).
Thus, no future observations can be used in constructing the forecast,
either in validation or testing phase. The parameters minimizing
average error on the validation sets are chosen.
This model is then retrained on the training data for the corresponding becnh-mark (BM) fold.
The average error across all test sets provides a robust estimate of the model performance
with this forecast horizon.
\input{tables/tables_benchmark_cv.tex}
All the models are run on the $2$ different forecast horizons (1 day and 7 day) for daily data sets (Table \ref{tab:benchmark-cv}).
These horizons roughly represent short-term, average-term forecasts
for the corresponding frequency.
We plan to publish more bench marking results on more data sets, more frequencies (e.g.\; hourly, weekly)
and more time horizons in the future.
We require the datasets to have at least $2$ years worth of training data
so that the models can accurately estimate yearly seasonality patterns.
The number of periods between successive test sets and total number of
splits are chosen for each frequency to ensure the following:
\begin{enumerate}
\item The predictive performance of the models are measured over an year
to ensure that cumulatively the test sets represent real data across
time properties e.g.\;seasonality, holidays etc.
For daily data, $\text{periods between splits} (25)*\text{max splits}(16) = 400 > 365$,
hence the models are tested over a year.
\item The test sets are completely randomized in terms of time features.
For daily data, setting ``periods between splits'' to any multiple of $7$
results in the training and test set always ending on the same day of the week.
This lack of randomization would have produced a biased estimate
of the prediction performance.
Similarly setting it to a multiple of $30$ has the same problem for day of month.
A gap of $25$ days between test sets ensures that no such
confounding factors are present.
\item Minimize total computation time while maintaining the previous points.
For daily data, setting ``periods between splits'' to $1$ and maximum number of splits to
$365$ is a more thorough CV procedure.
But it increases the total computation time $25$ fold and hence is avoided.
We chose consistent benchmark settings suitable for all algorithms, including
the slower ones.
\end{enumerate}
We have used out-of-box configuration for Auto-Arima and (Facebook) Prophet and Silverkite.
Silverkite uses a ridge regression to fit the model and contains
linear growth,
appropriate seasonality (e.g.\;quarterly, monthly and yearly seasonality for daily data),
automatic changepoint detection,
holiday effects, autoregression, and daily and weekly seasonality
interaction terms with trend and changepoints.
The benchmarking was run on three datasets for every forecast
horizon.
\begin{itemize}
\item Peyton-Manning Dataset from fbprophet package (Facebook Prophet)
\item Daily Australia Temperature Dataset, Temperature column
\item Beijing PM2.5 Dataset
\end{itemize}
For consistency, no regressors were used for any dataset.
The entire process is executed on a system equipped
with 64 GB RAM and Intel Xeon Silver 4108 CPU @1.80 GHz.
The CPU has 8 cores, each with 2 threads.
The average
test MAPE and runtime across datasets are summarized in
Table \ref{tab:benchmark-summary}.
We can see that the default Silverkite MAPE is on par
with Auto-Arima in short-term forecasts, outperforms other algorithms in average-term forecasts.
However, Silverkite has a
clear speed advantage over Prophet. This makes prototyping quicker in Silverkite
and aids in building a customized more accurate model.
Note that the average test MAPE values are high due to values close to 0 in the Beijing PM2.5 dataset.
These are a few benchmarks on public datasets.
We plan to include more datasets, forecast horizons, and data frequencies that better match our industry applications and publish those results in the future.
\input{tables/tables_benchmark_summary.tex}
\section{Discussion}
\label{sect:discussion}
This paper introduced a flexible framework for forecasting
which is designed for scalable and reliable forecasting
in production environments (Section \ref{sect:assessment}).
We showed how this design helps in generating flexible and interpretable forecasts
as well as volatility estimates.
As a particular example, the ability to use regularization algorithms as the
training algorithm of the mean component, allows us to accommodate complex patterns in the
model via feature interactions.
Having a separate volatility model, allows us to ensure fast speeds in production environments
where updating the forecast for many series might be needed.
However, this separation also helps in avoiding issues such as divergence of the simulated
series which are a common issue when using integrated models
(see \cite{hosseini-takemura-2015} and \cite{hosseini-bk-2020}).
The speed in fitting the model is also key in variable selection as many models can be fit to
optimize the choice of the component and parameters e.g.\;the Fourier series order for various time-scales
or the auto-regressive component complexity.
\section{Appendix}
\label{sect:appendix}
\begin{comment}
This section contains additional details about the model.
\subsection{Assessment and Benchmarking}
\label{subsect:appendix-benchmarking}
Additional details about the cross-validation model
and Silverkite model configuration used for Benchmarking are provided here.
The default Silverkite configurations, referred to as templates,
vary based on data frequency and forecast horizon (Table \ref{tab:benchmark-silverkite-config}).
These templates can be a single template or a multi template.
A multi template is a group of 3 or 4 single templates.
The model components of a single template is encoded via a string.
For example, the template for daily datasets with forecast horizon $1$ is
"DAILY\_SEAS\_NMQM\_GR\_LINEAR\_CP\_NM\_HOL\_SP2\_FEASET\_AUTO\_ALGO\_
RIDGE\_AR\_AUTO\_DSI\_AUTO\_WSI\_AUTO``'.
This string can be read as follows:
\begin{itemize}
\item DAILY: Data frequency is daily
\item SEAS\_NMQM: Seasonality terms with "NMQM" configuration. For daily data it contains yearly, quarterly, monthly and weekly seasonalities with "Normal" configuration
\item GR\_LINEAR: Growth term is linear
\item CP\_NM: Changepoint with "Normal" configuration
\item HOL\_SP2: Holidays with "SP2" configuration
\item FEASET\_AUTO: Feature set with "auto" configuration
\item ALGO\_RIDGE: Algorithm is Ridge
\item AR\_AUTO: Autoregression with "auto" configuration
\item DSI\_AUTO: Daily seasonality interaction with "auto" configuration
\item WSI\_AUTO: Weekly seasonality interaction with "auto" configuration.
\end{itemize}
For details on the configurations check the Silverkite open-source package
(link to simple silverkite template configs). Holiday component of the
default configurations is overwritten to include the most impactful holidays
and countries for general time series data. The holidays include New Year's Day,
Chinese New Year, Christmas Day, Independence Day, Thanksgiving", Labor Day,
Good Friday, Easter Monday, Memorial Day and Veterans Day. The country list
contain UnitedStates, UnitedKingdom, India, France and China.
\input{tables/tables_benchmark_silverkite_config.tex}
\end{comment}
\subsection{Solving the Mixed Regularization Problem}
\label{subsect:mix-regularization}
In this subsection,
we derive the two-step solution for the mixed penalty regression problem.
Without loss of generality,
we consider all weights equals 1, otherwise,
the same formulation can be obtained with a re-scale on the design matrix
and on the estimated coefficients.
Consider the regression problem
$$\hat{{\bf \beta}}_0,\hat{{\bf \beta}}_1,\hat{{\bf \beta}}_2=\argmin_{{\bf \beta}_0,{\bf \beta}_1,{\bf \beta}_2}\|\boldsymbol{y} - \boldsymbol{s}ymbol{X}_0{\bf \beta}_0 - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1 - \boldsymbol{s}ymbol{X}_2{\bf \beta}_2\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2$$
Let,
$$\boldsymbol{s}ymbol{X}_{02} = [\boldsymbol{s}ymbol{X}_0,\boldsymbol{s}ymbol{X}_2]$$
$${\bf \beta}_{02}=[{\bf \beta}_0^T,{\bf \beta}_2^T]^T$$
$$H_{02} = \boldsymbol{s}ymbol{X}_{02}\left(\boldsymbol{s}ymbol{X}_{02}^T\boldsymbol{s}ymbol{X}_{02}\right)^{-1}\boldsymbol{s}ymbol{X}_{02}^T$$
$$H_{\lambda02} = \boldsymbol{s}ymbol{X}_{02}\left(\boldsymbol{s}ymbol{X}_{02}^T\boldsymbol{s}ymbol{X}_{02}+\lambda_2\boldsymbol{s}ymbol{D}\right)^{-1}\boldsymbol{s}ymbol{X}_{02}^T$$
where $\boldsymbol{s}ymbol{D}$ is identity matrix with the first $m$ diagonal entries equal to zero,
and $m$ is the number of columns in $\boldsymbol{s}ymbol{X}_0$. It's easy to verify that
$$H_{02}\boldsymbol{s}ymbol{X}_0=\boldsymbol{s}ymbol{X}_0$$
$$H_{02}\boldsymbol{s}ymbol{X}_2=\boldsymbol{s}ymbol{X}_2$$
We have:
\begin{align*}
&\hat{{\bf \beta}}_0,\hat{{\bf \beta}}_1,\hat{{\bf \beta}}_2\\
=&\argmin_{{\bf \beta}_0,{\bf \beta}_1,{\bf \beta}_2}\|\boldsymbol{y} - \boldsymbol{s}ymbol{X}_0{\bf \beta}_0 - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1 - \boldsymbol{s}ymbol{X}_2{\bf \beta}_2\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2\\
=&\argmin_{{\bf \beta}_0,{\bf \beta}_1,{\bf \beta}_2}\|H_{02}(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1) - \boldsymbol{s}ymbol{X}_0{\bf \beta}_0 - \boldsymbol{s}ymbol{X}_2{\bf \beta}_2 + (\boldsymbol{s}ymbol{I}-H_{02})(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2\\
=&\argmin_{{\bf \beta}_0,{\bf \beta}_1,{\bf \beta}_2}\|H_{02}(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1) - \boldsymbol{s}ymbol{X}_{02}{\bf \beta}_{02}\|_2^2 + \|(\boldsymbol{s}ymbol{I}-H_{02})(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2\\
=&\argmin_{{\bf \beta}_1}\argmin_{{\bf \beta}_{02}|{\bf \beta}_1}\|H_{02}(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1) - \boldsymbol{s}ymbol{X}_{02}{\bf \beta}_{02}\|_2^2 + \|(\boldsymbol{s}ymbol{I}-H_{02})(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2\\
\end{align*}
We have
\begin{align*}
\hat{{\bf \beta}}_{02}|{\bf \beta}_1=&\argmin_{{\bf \beta}_{02}|{\bf \beta}_1}\|H_{02}(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1) - \boldsymbol{s}ymbol{X}_{02}{\bf \beta}_{02}\|_2^2 + \|(\boldsymbol{s}ymbol{I}-H_{02})(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2\\
=&\argmin_{{\bf \beta}_{02}|{\bf \beta}_1}\|H_{02}(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1) - \boldsymbol{s}ymbol{X}_{02}{\bf \beta}_{02}\|_2^2 + \lambda_2\|{\bf \beta}_2\|_2^2\\
=&\left(\boldsymbol{s}ymbol{X}_{02}^T\boldsymbol{s}ymbol{X}_{02}+\lambda_2\boldsymbol{s}ymbol{D}\right)^{-1}\boldsymbol{s}ymbol{X}_{02}^T(\boldsymbol{y}-\boldsymbol{s}ymbol{X}_1{\bf \beta}_1)
\end{align*}
Plugging back into the original equation, we have
\begin{align*}
\hat{{\bf \beta}}_1&=\argmin_{{\bf \beta}_1}\|(H_{02}-H_{\lambda02})(\boldsymbol{y}-\boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2+\|(\boldsymbol{s}ymbol{I}-H_{02})(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1+\lambda_2\|{\bf \beta}_2\|_2^2\\
&=\argmin\|(\boldsymbol{s}ymbol{I}-H_{\lambda02})(\boldsymbol{y} - \boldsymbol{s}ymbol{X}_1{\bf \beta}_1)\|_2^2 + \lambda_1\|{\bf \beta}_1\|_1\\
&=\argmin\|(\boldsymbol{s}ymbol{I}-H_{\lambda02})\boldsymbol{y} - (\boldsymbol{s}ymbol{I}-H_{\lambda02})\boldsymbol{s}ymbol{X}_1{\bf \beta}_1\|_2^2+\lambda_1\|{\bf \beta}_1\|_1
\end{align*}
This can be solved with the conventional Lasso algorithm with:
$$X=(\boldsymbol{s}ymbol{I}-H_{\lambda02})\boldsymbol{s}ymbol{X}_1$$
$$y=(\boldsymbol{s}ymbol{I}-H_{\lambda02})\boldsymbol{y}$$
\end{document} |
\begin{document}
\begin{frontmatter}
\author{Jian Wang}
\author{Yong Wang\corref{cor2}}
\varepsilonad{wangy581@nenu.edu.cn}
\cortext[cor2]{Corresponding author}
\address{School of Mathematics and Statistics, Northeast Normal University,
Changchun, 130024, P.R.China}
\title{On the Geometry of Tangent Bundles \\
with the Rescaled Metric}
\begin{abstract}
For a Riemannian manifold $M$, we determine some curvature properties of a tangent bundle equipped with the rescaled metric.
The main aim of this paper is to give explicit formulae for the rescaled metric on $TM$, and investigate the geodesics on the
tangent bundle with respect to the rescaled Sasaki metric.
\varepsilonnd{abstract}
\begin{keyword}
Tangent bundle; Rescaled Sasaki metric; Rescaled Cheeger-Gromoll metric; Geodesics.
\varepsilonnd{keyword}
\varepsilonnd{frontmatter}
\section{Introduction}
\label{1}
Tangent boundles of differentiable manifolds are of great importance in many areas of mathematics and physics.
Geometry of the tangent bundle $TM$ of a Riemannian manifold $(M,g)$ with the metric $\bar{g}$ defined by Sasaki in \cite{Sa} had
been studied by many authors. Its construction is based on a natural splitting of the tangent bundle $TTM$ of $TM$ into its
vertical and horizontal subbundles by means of the Levi-Civita connection $\nabla$ on $(M,g)$. The Levi-Civita connection
$\hat{\nabla}$ of the Sasaki metric on $TM$ and its Riemannian curvature tensor $\hat{R}$ were calculated by Kowalski in \cite{Ko}.
With this in hand, the authors derived interesting connections between the geometric properties of $(M,g)$ and $(TM,\hat{g})$ in \cite{Ko}
and \cite{MT}. In \cite{MT}, the authors proved that the Sasaki metric on $TM$ is rather rigid under the scalar curvature
of $(TM,\bar{g})$ is constant.
Another metric nicely fitted to the tangent bundle is the so-called Cheeger-Gromoll metric in \cite{CG}. This can be used to obtain a
natural metric $\tilde{g}$ on the tangent bundle $TM$ of a given Riemannian manifold $(M,g)$. It was expressed more explicitly by
Musso and Tricerri in \cite{MT}. In \cite{Se}, Sekizawa calculated the Levi-Civita connection $\tilde{\nabla}$ and the curvature tensor
$\tilde{R}$ of the tangent bundle $(TM,\tilde{g})$ equipped with the Cheeger-Gromoll metric. Gudmundsson and Kappos derived correct
relations between the geometric properties of $(M,g)$ and $(TM,\tilde{g})$ in \cite{GK1}. In \cite{GK2}, Explicit formulae for the
Cheeger-Gromoll metric on $TM$ was given. The motivation of this paper is to study the geometry of tangent bundles with the
rescaled Sasaki and Cheeger-Gromoll metrics.
This paper is organized as follows: In Section 2, for a Riemannian manifold $(M, g)$, we introduce a natural class of rescaled metrics.
In Section 3, we calculate its Levi-Civita connection, its Riemann curvature tensor associated to the rescaled Sasaki metric.
In Section 4, we investigate geodesics on the tangent bundle with respect to the rescaled Sasaki metric. The main purpose of Section 5
is to obtain some interesting connections between the geometric properties of the manifold $(M, g)$ and its tangent bundle equipped
with the rescaled Cheeger-Gromoll metric.
\section{Natural Metrics}
In this section we introduce a natural class of rescaled metrics on the tangent bundle $TM$ of a given Riemannian manifold $(M, g)$. This class contains
both the rescaled Sasaki and rescaled Cheeger-Gromoll metrics studied later on.
Throughout this paper we shall assume that $M$ is a smooth $m-$dimensional manifold with maximal atlas
$\mathcal{A}=\{(U_{\alpha},x_{\alpha})|\alpha\in I\}$. For a point $p\in M$, let $T_{p}M$ denote the tangent space of $M$ at $p$.
For local coordinates $(U,x)$ on $M$ and $p\in U$ we define $(\frac{\phiartial}{\phiartial x_{k}})_{p}\in T_{p}M$ by
\begin{equation}
(\frac{\phiartial}{\phiartial x_{k}})_{p}:f \mapsto \frac{\phiartial f}{\phiartial x_{k}}(p)=\phiartial_{e_{k}}(f\circ x^{-1})(x(p))
\varepsilonnd{equation}
where $\{e_{k}|k=1,\ldots,m\}$ is the standard basis of $\mathbb{R}^{m}$. Then
$\{(\frac{\phiartial}{\phiartial x_{k}})_{p}|k=1,\ldots,m\}$ is a basis for $T_{p}M$. The set $TM=\{(p,u)|p\in M,u\in T_{p}M\}$ is called the
tangent bundle of $M$ and bundle map $\phii:TM\rightarrow M$ is given by $\phii:(p,u)\mapsto p$.
As a direct consequence of the Theorem 2.1 in \cite{GK2} we see that the bundle map $\phii:TM\rightarrow M$ is smooth. For each point $p\in M$
the fiber $\phii^{-1}(p)$ is the tangent space $T_{p}M$ of $M$ at $p$ and hence an $m-$dimensional vector space. For local coordinates
$(U,x)\in\mathcal{A}$ we define $\bar{x}:\phii^{-1}(U)\rightarrow U\times \mathbb{R}^{m}$ by
\begin{equation}
\bar{x}:(p,\sum_{k=1}^{m}u_{k}\frac{\phiartial}{\phiartial x_{k}}|_{p})\mapsto \big(p,(u_{1},\ldots,u_{m})\big).
\varepsilonnd{equation}
The restriction $\bar{x}_{p}=\bar{x}|_{T_{p}M}:T_{p}M\rightarrow \{p\}\times\mathbb{R}^{m}$ to the tangent space $T_{p}M$ is given by
\begin{equation}
\bar{x}_{p}:\sum_{k=1}^{m}u_{k}\frac{\phiartial}{\phiartial x_{k}}|_{p}\mapsto (u_{1},\ldots,u_{m})
\varepsilonnd{equation}
so it is obviously a vector space isomorphism. This implies that $\bar{x}:\phii^{-1}(U)\rightarrow U\times \mathbb{R}^{m}$ is
a bundle chart for $TM$. This implies that
\begin{equation}
\mathcal{B}=\{\big(\phii^{-1}(U)\big),\bar{x}|(U,x)\in\mathcal{A}\}
\varepsilonnd{equation}
is a bundle atlas transforming $(TM,M,\phii)$ into an $m-$dimensional topological vector bundle. Since the
manifold $(M,\mathcal{A})$ is smooth the vector bundle $(TM,M,\phii)$ together with the maximal bundle atlas
$\hat{\mathcal{B}}$ induced by $\mathcal{B}$ is a smooth vector bundle.
\begin{defn}
Let $(M, g)$ be a Riemannian manifold. Let $f>0$ and $f\in C^{\infty}(M)$, specially when $f=1$, $\bar{g}^{1}=\bar{g}$.
A Riemannian rescaled metric $\bar{g}^{f}$ on the
tangent bundle $TM$ is said to be natural with respect to $g$ on $M$ if
\begin{eqnarray}
i) \ \bar{g}^{f}_{(p,u)}(X^{h},Y^{h})&=&f(p) g_{p}(X,Y),\\
ii) \ \bar{g}^{f}_{(p,u)}(X^{h}, Y^{v})&=&0
\varepsilonnd{eqnarray}
for all vector fields $X, Y\in C^{\infty}(TM)$ and $(p, u)\in TM.$
\varepsilonnd{defn}
A rescaled natural metric $\bar{g}^{f}$ is constructed in such a way that the vertical and horizontal subbundles are orthogonal and the bundle map
$\phii: (TM, \bar{g}^{f})\rightarrow (M, f g)$ is Riemannian submersion. The rescaled metric $\bar{g}^{f}$ induces a norm on each tangent space of $TM$
which we denote
by $\phiarallel \cdot \phiarallel$.
\begin{lem}\label{le:22}
Let $(M, g)$ be a Riemannian manifold and $TM$ be the tangent bundle of $M$. Let $f>0$ and $f\in C^{\infty}(M)$. If the rescaled Riemannian metric
$\bar{g}^{f}$ on $TM$ is natural with respect to $g$ on $M$ then the corresponding Levi-Civita connection $\overline{\nabla}^{f}$ satisfies
\begin{eqnarray}
i) \ \bar{g}(\overline{\nabla}^{f}_{X^{h}}Y^{h}, Z^{h})&=&\frac{1}{2f}\Big(X(f)g(Y, Z)+Y(f)g(Z, X)-Z(f)g(X, Y)\Big)+g(\nabla_{X}Y,Z),\\
ii) \ \bar{g}(\overline{\nabla}^{f}_{X^{h}}Y^{h}, Z^{v})&=&-\frac{1}{2}\bar{g}\Big((R(X, Y)u)^{v}, Z^{v}\Big) ,\\
iii) \ \bar{g}(\overline{\nabla}^{f}_{X^{h}}Y^{v}, Z^{h})&=&\frac{1}{2f}\bar{g}\Big((R(X, Z)u)^{v}, Y^{v}\Big),\\
iv) \ \bar{g}(\overline{\nabla}^{f}_{X^{h}}Y^{v}, Z^{v})&=&\frac{1}{2}\Big(X^{h}(\bar{g}(Y^{v}, Z^{v}))-\bar{g}(Y^{v}, (\nabla_{X}Z)^{v})
+\bar{g}(Z^{v}, (\nabla_{X}Y)^{v})\Big),
\varepsilonnd{eqnarray}
\begin{eqnarray}
v) \ \bar{g}(\overline{\nabla}^{f}_{X^{v}}Y^{h}, Z^{h})&=&\frac{1}{2f}\bar{g}\Big((R(Y, Z)u)^{v}, X^{v}\Big),\\
vi) \ \bar{g}(\overline{\nabla}^{f}_{X^{v}}Y^{h}, Z^{v})&=&\frac{1}{2}\Big(Y^{h}(\bar{g}(Z^{v}, X^{v}))-\bar{g}(X^{v}, (\nabla_{Y}Z)^{v})
-\bar{g}(Z^{v},(\nabla_{Y}X)^{v})\Big),\\
vii) \ \bar{g}(\overline{\nabla}^{f}_{X^{v}}Y^{v}, Z^{h})&=&\frac{1}{2f}\Big(-Z^{h}(\bar{g}(X^{v}, Y^{v}))+\bar{g}(Y^{v}, (\nabla_{Z}X)^{v})
+\bar{g}(X^{v},(\nabla_{Z}Y)^{v})\Big),\\
viii) \ \bar{g}(\overline{\nabla}^{f}_{X^{v}}Y^{v}, Z^{v})&=&\frac{1}{2}\Big(X^{v}(\bar{g}(X^{v}, Z^{v}))+Y^{v}(\bar{g}(Z^{v}, X^{v}))
-Y^{v}(\bar{g}(X^{v},Y^{v}))\Big)
\varepsilonnd{eqnarray}
for all vector fields $X, Y, Z\in C^{\infty}(TM)$ and $(p, u)\in TM.$
\varepsilonnd{lem}
\begin{proof}
We shall repeatedly make use of the Kozul formula for the Levi-Civita connection $\overline{\nabla}^{f}$ stating that
\begin{eqnarray}
2\bar{g}^{f}(\overline{\nabla}^{f}_{X^{i}}Y^{j}, Z^{k})&=&X^{i}(\bar{g}^{f}(Y^{j},Z^{k}))+Y^{j}(\bar{g}^{f}(Z^{k}, X^{i}))
-Z^{k}(\bar{g}^{f}(X^{i},Y^{j})) \nonumber\\
&&-\bar{g}^{f}(X^{i},\ [Y^{j}, Z^{k}])+\bar{g}^{f}(Y^{j},[Z^{k},X^{i}])
+\bar{g}^{f}(Z^{k},\ [X^{i},Y^{j}])
\varepsilonnd{eqnarray}
for all vector fields $X, Y, Z\in\mathcal{C}^{\infty}(TM)$ and $i, j, k\in \{h, v\}.$
$i)$ The result is a direct consequence of the following calculations using Definition 2.1 and Proposition 5.1 in \cite{GK2},
\begin{eqnarray}
2\bar{g}^{f}(\overline{\nabla}^{f}_{X^{h}}Y^{h}, Z^{h})&=&X^{h}(\bar{g}^{f}(Y^{h}, Z^{h}))+Y^{h}(\bar{g}^{f}(Z^{h}, X^{h}))
-Z^{h}(\bar{g}^{f}(X^{h}, Y^{h}))\nonumber\\
&&-\bar{g}^{f}(X^{h}, [Y^{h}, Z^{h}])+\bar{g}^{f}(Y^{h}, [Z^{h}, X^{h}])
+\bar{g}^{f}(Z^{h}, [X^{h}, Y^{h}])\nonumber\\
&=&X^{h}(f g(Y,Z)\circ\phii)+Y^{h}(f g(Z, X)\circ\phii)
-Z^{h}(f g(X, Y)\circ\phii)\nonumber\\
&&-\bar{g}^{f}(X^{h}, [Y, Z]^{h})+\bar{g}^{f}(Y^{h}, [Z, X]^{h})
+\bar{g}^{f}(Z^{h},[X, Y]^{h})\nonumber\\
&=& X(f)g(Y, Z)+Y(f)g(Z, X)-Z(f)g(X, Y)+2f\bar{g}^{f}(\nabla_{X}Y), Z).
\varepsilonnd{eqnarray}
$ii)$ The statement is obtained as follows.
\begin{eqnarray}
2\bar{g}^{f}(\overline{\nabla}^{f}_{X^{h}}Y^{v}, Z^{h})&=&X^{h}(\bar{g}^{f}(Y^{h},Z^{v}))+Y^{h}(\bar{g}^{f}(Z^{v}, X^{h}))
-Z^{v}(\bar{g}^{f}(X^{h}, Y^{h})) \nonumber\\
&&-\bar{g}^{f}(X^{h},[Y^{h}, Z^{v}])+\bar{g}^{f}(Y^{h}, [Z^{v}, X^{h}])
+\bar{g}^{f}(Z^{v}, [X^{h}, Y^{h}]) \nonumber\\
&=&-Z^{v}(f g(X, Y))+\bar{g}^{f}(Z^{v}, [X^{h},Y^{h}]) \nonumber\\
&=&-\bar{g}^{f}(Z^{v},(R(X, Y)u)^{v})
\varepsilonnd{eqnarray}
$iii)$ and $v)$ are analogous to $ii)$.
$iv)$ Again using Definition 2.1 and Proposition 5.1 in \cite{GK2} we yield
\begin{eqnarray}
2\bar{g}^{f}(\overline{\nabla}^{f}_{X^{h}}Y^{v}, Z^{v})&=&X^{h}(\bar{g}^{f}(Y^{v}, Z^{v}))+Y^{v}(\bar{g}^{f}(Z^{v}, X^{h}))
-Z^{v}(\bar{g}^{f}(X^{h}, Y^{v})) \nonumber\\
&&-\bar{g}^{f}(X^{h}, [Y^{v}, Z^{v}])+\bar{g}^{f}(Y^{v}, [Z^{v}, X^{h}])
+\bar{g}^{f}(Z^{v}, [X^{h}, Y^{v}]) \nonumber\\
&=&X^{h}(\bar{g}(Y^{v}, Z^{v}))-\bar{g}(Y^{v},(\nabla_{X}Z)^{v})
+\bar{g}(Z^{v}, (\nabla_{X}Y)^{v})
\varepsilonnd{eqnarray}
$vi)$ and $vii)$ are analogous to iv).
$viii)$ The statement is a direct consequence of the fact that the Lie bracket of two vertical vector fields vanishes.
\varepsilonnd{proof}
\begin{cor}\label{co:23}
Let $(M, g)$ be a Riemannian manifold and $\bar{g}^{f}$ be a rescaled natural rescaled metric on the tangent bundle $TM$ of $M$.
Then the Levi-Civita connection
$\overline{\nabla}^{f}$ satisfies
\begin{equation}
(\overline{\nabla}^{f}_{X^{h}}Y^{h})_{(p, u)}=(\nabla^{f}_{X}Y)^{h}_{(p, u)}-\frac{1}{2}\Big(R(X, Y)u\Big)^{v}+
\frac{1}{2f(p)}\Big(X(f)Y+Y(f)X-g(X, Y)\circ\phii(\texttt{d}(f\circ\phii))^{*}\Big)^{h}_{p}
\varepsilonnd{equation}
for all vector fields $X, Y\in C^{\infty}(TM)$ and $(p, u)\in TM.$
\varepsilonnd{cor}
\begin{proof}
By proposition 3.5 in \cite{GK2}, each tangent vector $Z\in T_{(p,u)}TM$ can be decomposed as $Z=Z^{h}_{1}+Z^{v}_{2}$.
Using $i)$ and $ii)$ of Lemma 2.2, we have
\begin{eqnarray}
\bar{g}(\overline{\nabla}^{f}_{X^{h}}Y^{h}, Z^{h}_{1}+Z^{v}_{2})&=&-\frac{1}{2}\bar{g}\Big((R(X, Y)u)^{v}, Z^{h}_{1}+Z^{v}_{2}\Big)
+g\Big((\nabla_{X}Y)^{h},Z^{h}_{1}+Z^{v}_{2}\Big)\nonumber\\
&&+\frac{1}{2f}\Big(X(f)g(Y^{h}, Z^{h}_{1}+Z^{v}_{2})+g((Y f X)^{h}, Z^{h}_{1}+Z^{v}_{2})\nonumber\\
&& -\langle g(X^{h}, Y^{h})\texttt{d}(f\circ\phii), Z^{h}_{1}+Z^{v}_{2}\rangle\Big) \nonumber\\
&=&(\nabla^{f}_{X}Y)^{h}-\frac{1}{2}\Big(R(X, Y)u\Big)^{v}+\frac{1}{2f}\Big(X(f)Y+Y(f)X\nonumber\\
&&-g(X, Y)\circ\phii(\texttt{d}(f\circ\phii))^{*}\Big)^{h}.
\varepsilonnd{eqnarray}
\varepsilonnd{proof}
\begin{defn}
Let $(M, g)$ be a Riemannian manifold and $F:TM\rightarrow TM$ be a smooth bundle endomorphism of the tangent bundle $TM$. Then we define
the vertical and horizontal lifts $F^{v}:TM\rightarrow TTM$, $F^{h}:TM\rightarrow TTM$ of $F$ by
\begin{equation}
F^{v}(\varepsilonta)=\sum_{i=1}^{m}\varepsilonta_{i}F(\phiartial_{i})^{v} \quad and \quad F^{h}(\varepsilonta)=\sum_{i=1}^{m}\varepsilonta_{i}F(\phiartial_{i})^{h},
\varepsilonnd{equation}
where $\sum_{i=1}^{m}\varepsilonta_{i}\phiartial_{i}\in\phii^{-1}(V)$ is a local representation of $\varepsilonta\in C^{\infty}(TM)$.
\varepsilonnd{defn}
\begin{lem}\label{le:25}
Let $(M, g)$ be a Riemannian manifold and the tangent bundle $TM$ be equipped with a rescaled metric $\bar{g}^{f}$ which is natural with respect to $g$
on $M$. If $F:TM\rightarrow TM$ is a smooth bundle endomorphism of the tangent bundle, then
\begin{eqnarray}
i) \ (\overline{\nabla}^{f}_{X^{v}}F^{v})_{\xi}&=&F(X_{p})^{v}_{\xi}+\sum_{i=1}^{m}u(x_{i})(\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{v})_{\xi},\\
ii) \ (\overline{\nabla}^{f}_{X^{v}}F^{h})_{\xi}&=&F(X_{p})^{h}_{\xi}+\sum_{i=1}^{m}u(x_{i})(\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{h})_{\xi},\\
iii) \ (\overline{\nabla}^{f}_{X^{h}}F^{v})_{\xi}&=&(\overline{\nabla}^{f}_{X^{h}}F(u)^{v})_{\xi},\\
iv) \ (\overline{\nabla}^{f}_{X^{h}}F^{h})_{\xi}&=&(\overline{\nabla}^{f}_{X^{h}}F(u)^{h})_{\xi},
\varepsilonnd{eqnarray}
for any $X\in C^{\infty}(TM)$, $\xi=(p, u)\in TM$ and $\varepsilonta=\sum_{i=1}^{m}\varepsilonta_{i}\phiartial_{i}\in\phii^{-1}(V)$.
\varepsilonnd{lem}
\begin{proof}
Let $(x_{1},\cdots,x_{m})$ be local coordinates on $M$ in a neighborhood $V$ of $p$. Then, using the abbreviation $X_{i}$ for
$\frac{\phiartial}{\phiartial x_{i}}$, we have $X^{v}(\texttt{d}x_{i})=\texttt{d}x_{i}(X)=X(x_{i})$ and $\texttt{d}x_{i}(p,u)=\varepsilonta_{i}(p)$
for $i\in\{1,\cdots,m\}$. Hence
\begin{eqnarray}
(\overline{\nabla}^{f}_{X^{v}}F^{v})_{\xi}&=&\sum_{i=1}^{m}\overline{\nabla}^{f}_{X^{v}}(\varepsilonta_{i}F(\phiartial_{i})^{v})
=\sum_{i=1}^{m}X^{v}(\texttt{d}x_{i})F(\phiartial_{i})^{v}
+\varepsilonta_{i}\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{v} \nonumber\\
&=&\sum_{i=1}^{m}X(x_{i})F(\phiartial_{i})^{v}
+\varepsilonta_{i}\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{v}
=F(X_{p})^{v}_{\xi}+\sum_{i=1}^{m}u(x_{i})(\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{v})_{\xi}.
\varepsilonnd{eqnarray}
Similarly we have
\begin{eqnarray}
(\overline{\nabla}^{f}_{X^{v}}F^{h})_{\xi}&=&\sum_{i=1}^{m}\overline{\nabla}^{f}_{X^{v}}(\varepsilonta_{i}F(\phiartial_{i})^{h})
=\sum_{i=1}^{m}X^{v}(\texttt{d}x_{i})F(\phiartial_{i})^{h}
+\varepsilonta_{i}\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{h} \nonumber\\
&=&\sum_{i=1}^{m}X(x_{i})F(\phiartial_{i})^{h}
+\varepsilonta_{i}\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{h}
=F(X_{p})^{h}_{\xi}+\sum_{i=1}^{m}u(x_{i})(\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{h})_{\xi}.
\varepsilonnd{eqnarray}
For the last two equations of the lemma we use a differentiable curve $\gammaamma: [0, 1]\rightarrow M$ such that $\gammaamma(0)=p$ and $\gammaamma'(0)=X_{p}$
to get a differentiable curve $U\circ\gammaamma: [0, 1]\rightarrow TM$ such that $U\circ\gammaamma(0)=\xi$ and $(U\circ\gammaamma)'(0)=X^{h}_{\xi}$. By the
definition of $F^{v}$ and $F^{h}$ we get
\begin{eqnarray}
F^{v}|_{U\circ\gammaamma(t)}&=&\sum_{i=1}^{m}\texttt{d}x_{i}F(\phiartial_{i})^{v}|_{U\circ\gammaamma(t)}
=\sum_{i=1}^{m}\texttt{d}x_{i}(U\circ\gammaamma(t))F(\phiartial_{i})^{v}|_{U\circ\gammaamma(t)} \nonumber\\
&=&F(\sum_{i=1}^{m}u(x_{i})_{p}e_{j})^{v}|_{U\circ\gammaamma(t)}=(F\circ U)^{v}|_{U\circ\gammaamma(t)}.
\varepsilonnd{eqnarray}
Similarly $F^{h}|_{U\circ\gammaamma}=(F\circ U)^{h}_{U\circ\gammaamma}$. This proves parts $iii)$ and $iv)$.
\varepsilonnd{proof}
\section{The Rescaled Sasaki Metric}
This section is devoted to the Sasaki metric $\hat{g}$ on the tangent bundle $TM$ introduced by Sasaki in the famous paper \cite{Sa}.
We calculate its Levi-Civita connection $\hat{\nabla}^{f}$, its Riemann curvature tensor and obtain some interesting connections between the
geometric properties of the manifold $(M, g)$ and its tangent bundle $(TM, \hat{g}^{f})$ equipped with the rescaled Sasaki metric.
\begin{defn}
Let $(M, g)$ be a Riemannian manifold. Let $f>0$ and $f\in C^{\infty}(M)$. Then the rescaled Sasaki metric $\hat{g}^{f}$ on the tangent
bundle $TM$ of $M$ is given by
\begin{eqnarray}
i) \ \hat{g}_{(x,u)}^{f}(X^{h}, Y^{h})&=&f(p)g_{p}(X, Y),\\
ii) \ \hat{g}_{(x,u)}^{f}(X^{v}, Y^{h})&=&0 ,\\
iii) \ \hat{g}_{(x,u)}^{f}(X^{v}, Y^{v})&=&g_{p}(X, Y).
\varepsilonnd{eqnarray}
for all vector fields $X, Y\in C^{\infty}(TM)$.
\varepsilonnd{defn}
The rescaled Sasaki metric is obviously contained in the class of rescaled $g-$natural metrics. It is constructed in such a manner that inner
products are respected not only by lifting vectors horizontally but vertically as well.
\begin{prop}\label{pr: 32}
Let $(M, g)$ be a Riemannian manifold and $\hat{\nabla}^{f}$ be Levi-Civita connection of the tangent bundle $(TM, \hat{g}^{f})$ equipped with
the rescaled Sasaki metric. Then
\begin{eqnarray}
i) \ (\hat{\nabla}^{f}_{X^{h}}Y^{h})_{(p,u)}&=&(\nabla_{X}Y)^{h}_{(p,u)}+\frac{1}{2f(p)}
\Big((X(f)Y+Y(f)X)-g(X,Y)\circ\phii(\texttt{d}(f\circ\phii))^{*}\Big)^{h}_{p}\nonumber\\
&&-\frac{1}{2}\Big(R_{p}(X,Y)u\Big)^{v},\\
ii) \ (\hat{\nabla}^{f}_{X^{h}}Y^{v})_{(p,u)}&=&(\nabla_{X}Y)^{v}_{(p,u)}+\frac{1}{2f(p)}\Big(R_{p}(u,Y)X\Big)^{h},\\
iii) \ (\hat{\nabla}^{f}_{X^{v}}Y^{h})_{(p,u)}&=&\frac{1}{2f(p)}\Big(R_{p}(u,X)Y\Big)^{h},\\
iv) \ (\hat{\nabla}^{f}_{X^{v}}Y^{v})_{(p,u)}&=&0
\varepsilonnd{eqnarray}
for any $X,Y\in C^{\infty}(TM)$, $\xi=(p, u)\in TM$.
\varepsilonnd{prop}
\begin{proof}
$i)$ The statement is a direct consequence of Corollary 2.3.
$ii)$ By applying Lemma 2.2 we obtain the following for the horizontal part
\begin{eqnarray}
2\hat{g}^{f}(\hat{\nabla}^{f}_{X^{h}}Y^{v}, Z^{h})&=&-\hat{g}^{f}((R(Z,X)u)^{v},Y^{v})=-g(R(u,Y)Z,X)\nonumber\\
&=&g(R(u,Y)X,Z)=\frac{1}{f}\hat{g}^{f}((R(u,Y)X)^{h},Z^{h}),
\varepsilonnd{eqnarray}
As for the vertical part note that
\begin{eqnarray}
2\hat{g}^{f}(\hat{\nabla}^{f}_{X^{h}}Y^{v}, Z^{v})&=&X^{h}(\hat{g}^{f}(Y^{v},Z^{v}))+\hat{g}^{f}(Z^{v},(\nabla_{X}Y)^{v})
-\hat{g}^{f}(Y^{v},(\nabla_{X}Z)^{v}) \nonumber\\
&=&X(g(Y,Z))+g(Z,\nabla_{X}Y)-g(Y,\nabla_{X}Z) \nonumber\\
&=&2\hat{g}^{f}((\nabla_{X}Y)^{v},Z^{v}).
\varepsilonnd{eqnarray}
$iii)$ For the horizontal part we get calculations similar to those above
\begin{eqnarray}
2\hat{g}(\hat{\nabla}^{f}_{X^{v}}Y^{h}, Z^{h})&=&\frac{1}{f}\hat{g}(X^{v},(R(Y,Z)u)^{v})=\frac{1}{f}g(X,R(Y,Z)u)\nonumber\\
&=&\frac{1}{f}g(R(u,X)Y,Z).
\varepsilonnd{eqnarray}
The rest follows by
\begin{eqnarray}
2\hat{g}(\hat{\nabla}^{f}_{X^{v}}Y^{h}, Z^{v})&=&Y^{h}(\hat{g}(Z^{v},X^{v}))-\hat{g}(Z^{v},(\nabla_{Y}X)^{v})
-\hat{g}(X^{v},(\nabla_{Y}Z)^{v}) \nonumber\\
&=&Y(g(Z,X))-g(Z,\nabla_{Y}X)-g(X,\nabla_{Y}Z)=0.
\varepsilonnd{eqnarray}
$iv)$ Using Lemma 2.2 again we yield
\begin{eqnarray}
2f\hat{g}(\hat{\nabla}^{f}_{X^{v}}Y^{v}, Z^{h})&=&-Z^{h}(\hat{g}(X^{v},Y^{v}))+\hat{g}(Y^{v},(\nabla_{Z}X)^{v})
+\hat{g}(X^{v},(\nabla_{Z}Y)^{v}) \nonumber\\
&=&-Z(g(X,Y))+g(Y,\nabla_{Z}X)+g(X,\nabla_{Z}Y)=0,
\varepsilonnd{eqnarray}
and
\begin{eqnarray}
2\hat{g}(\hat{\nabla}^{f}_{X^{v}}Y^{v}, Z^{v})&=&X^{v}(\hat{g}(Y^{v},Z^{v}))+Y^{v}(\hat{g}(Z^{v},X^{v}))
-Z^{v}(\hat{g}(X^{v},Y^{v})) \nonumber\\
&=&X^{v}(g(Y,Z))+Y^{v}(g(Z,X))-Z^{v}(g(X,Y))=0.
\varepsilonnd{eqnarray}
This completes the proof.
\varepsilonnd{proof}
We shall now turn our attention to the Riemann Curvature tensor $\hat{R}^{f}$ of the tangent bundle $TM$ equipped with the
rescaled Sasaki metric $\hat{g}^{f}$. For this we need the following useful Lemma.
\begin{lem}\label{le:33}
Let $(M, g)$ be a Riemannian manifold and $\hat{\nabla}^{f}$ be the Levi-Civita connection of the tangent bundle $(TM, \hat{g}^{f})$,
equipped with the rescaled Sasaki metric $\hat{g}^{f}$. Let $F:TM\rightarrow TM$ is a smooth bundle endomorphism of the tangent bundle, then
\begin{equation}
(\hat{\nabla}^{f}_{X^{v}}F^{v})_{\xi}=F(X_{p})^{v}_{\xi},
\varepsilonnd{equation}
and
\begin{equation}
(\hat{\nabla}^{f}_{X^{v}}F^{h})_{\xi}=F(X_{p})^{h}_{\xi}+\frac{1}{2f(p)}\Big(R(u,X)F(u)\Big)^{h}_{\xi}
\varepsilonnd{equation}
for any $X\in C^{\infty}(TM)$ and $\xi=(p, u)\in TM$.
\varepsilonnd{lem}
\begin{proof}
By applying $i)$ of Lemma 2.5 and $iv)$ of Proposition 3.2 we obtain the following
\begin{equation}
(\hat{\nabla}^{f}_{X^{v}}F^{v})_{\xi}=F(X_{p})^{v}_{\xi}+\sum_{i=1}^{m}u(x_{i})(\overline{\nabla}^{f}_{X^{v}}F(\phiartial_{i})^{v})_{\xi}
=F(X_{p})^{v}_{\xi}.
\varepsilonnd{equation}
By applying $ii)$ of Lemma 2.5 and $iii)$ of Proposition 3.2, we get
\begin{equation}
(\hat{\nabla}^{f}_{X^{v}}F^{h})_{\xi}=F(X_{p})^{h}_{\xi}+(\overline{\nabla}^{f}_{X^{v}}F(u)^{h})_{\xi}
=F(X_{p})^{h}_{\xi}+\frac{1}{2f(p)}\Big(R(u,X)F(u)\Big)^{h}_{\xi}.
\varepsilonnd{equation}
\varepsilonnd{proof}
\begin{prop}\label{pr: 34}
Let $(M, g)$ be a Riemannian manifold and $\hat{R}^{f}$ be the Riemann curvature tensor of the tangent bundle $(TM, \hat{g}^{f})$ equipped with
the rescaled Sasaki metric. Then the following formulae hold
\begin{eqnarray}
i) \ \hat{R}^{f}_{(p,u)}(X^{v},Y^{v})Z^{v}&=&0,\\
ii)\ \hat{R}^{f}_{(p,u)}(X^{h},Y^{v})Z^{v}&=&\Big(-\frac{1}{2f(p)}R(Y,Z)X-\frac{1}{4f^{2}(p)}R(u,Y)(R(u,Z)X)\Big)^{h}_{p},
\varepsilonnd{eqnarray}
\begin{eqnarray}
iii)\ \hat{R}^{f}_{(p,u)}(X^{v},Y^{v})Z^{h}&=&\Big(-\frac{1}{2f(p)}R(Y,X)Z-\frac{1}{4f^{2}(p)}R(u,Y)(R(u,X)Z)\Big)^{h}_{p} \nonumber\\
&&+\Big(\frac{1}{2f(p)}R(X,Y)Z+\frac{1}{4f^{2}(p)}R(u,y)(R(u,Y)Z)\Big)^{h}_{p},
\varepsilonnd{eqnarray}
\begin{eqnarray}
iv) \ \hat{R}^{f}_{(p,u)}(X^{h},Y^{v})Z^{h}&=&\Big(\nabla_{X}(\frac{1}{2f(p)}R(u,Y)Z)\Big)^{h}_{p}+\frac{1}{4f(p)}R((R(u,Y)Z),X)u\nonumber\\
&&+A_{f}\Big(X,\frac{1}{2f(p)}(R(u,Y)Z)\Big) \nonumber\\
&&-\frac{1}{2f(p)}\Big(R(u,Y)(\nabla_{X}Z+A_{f}(X,Z))\Big)^{h}_{p} \nonumber\\
&&+\frac{1}{2}\Big(R(X,Z)u\Big)^{v}_{p}-\frac{1}{2f(p)}\Big(R(u,\nabla_{X}Y)Z\Big)^{h}_{p},
\varepsilonnd{eqnarray}
\begin{eqnarray}
v) \ \hat{R}^{f}_{(p,u)}(X^{h},Y^{h})Z^{v}&=&\Big(\nabla_{X}(\frac{1}{2f(p)}R(u,Z)Y)\Big)^{h}_{p}-\Big(\nabla_{Y}
(\frac{1}{2f(p)}R(u,Z)X)\Big)^{h}_{p}\nonumber\\
&& +\frac{1}{4f(p)}R(R(u,Z)Y,X)u-\frac{1}{4f(p)}R(R(u,Z)X,Y)u \nonumber\\
&&+\frac{1}{2f(p)}A_{f}(X,R(u,Z)Y)-\frac{1}{2f(p)}A_{f}(Y,R(u,Z)X) \nonumber\\
&&+\frac{1}{2f(p)}R(u,Z)[Y,X]+\Big(R(X,Y)u\Big)^{v}_{p}\nonumber\\
&&+\frac{1}{2f(p)}\Big(R(u,\nabla_{Y}Z)X\Big)^{h}_{p}-\frac{1}{2f(p)}\Big(R(u,\nabla_{X}Z)Y\Big)^{h}_{p},
\varepsilonnd{eqnarray}
\begin{eqnarray}
vi) \ \hat{R}^{f}_{(p,u)}(X^{h},Y^{h})Z^{h} &=&\hat{\nabla}^{f}_{X^{h}}\hat{\nabla}^{f}_{Y^{h}}Z^{h}-\hat{\nabla}^{f}_{Y^{h}}\hat{\nabla}^{f}_{X^{h}}Z^{h}
-\hat{\nabla}^{f}_{[X^{h},Y^{h}]}Z^{h}\nonumber\\
&=&\hat{\nabla}^{f}_{X^{h}}(F_{1}^{h})-\hat{\nabla}^{f}_{Y^{h}}\Big((\nabla_{X}Z)^{h}
+A_{f}(X,Z)^{h}+F_{2}^{h}\Big)-\hat{\nabla}^{f}_{(\nabla_{X}Y)^{h}}Z^{h} \nonumber\\
&=&\nabla_{X}\Big(\nabla_{Y}Z+A_{f}(Y,Z)\Big)^{h}+A_{f}\Big(X,\nabla_{Y}Z+A_{f}(Y,Z)\Big)^{h}\nonumber\\
&&-\frac{1}{2}\Big(R(X,\nabla_{Y}Z+A_{f}(Y,Z))u\Big)^{v}
-\nabla_{Y}\Big(\nabla_{X}Z+A_{f}(X,Z)\Big)^{h} \nonumber\\
&&-A_{f}\Big(Y,\nabla_{X}Z+A_{f}(X,Z)\Big)^{h}+\frac{1}{2}\Big(R(Y,\nabla_{X}Z+A_{f}(X,Z))u\Big)^{v} \nonumber\\
&&-\Big(\nabla_{[X,Y]}Z\Big)^{h}-A_{f}([X,Y],Z)^{h}-\frac{1}{2}\Big(R([X,Y],Z)u\Big)^{v}\nonumber\\
&&+\frac{1}{2f}\Big(R(u,R(X,Y)u)Z\Big)^{h}+\frac{1}{2}\Big(\nabla_{Y}(R(X,Z)u)\Big)^{v}\nonumber\\
&&+\frac{1}{4f}\Big(R(u,R(X,Z)u)Y\Big)^{h}-\frac{1}{2}\Big(\nabla_{X}(R(Y,Z)u)\Big)^{v}\nonumber\\
&&-\frac{1}{4f}\Big(R(u,R(Y,Z)u)X\Big)^{h}.
\varepsilonnd{eqnarray}
for any $X, Y, Z\in T_{p}M$.
\varepsilonnd{prop}
\begin{proof}
$i)$ The result follows immediately from Proposition 3.2.
$ii)$ Let $F: TM\rightarrow TM$ be the bundle endomorphism given by
\begin{equation}
F: u\mapsto \frac{1}{2f}R(u,Z)X.
\varepsilonnd{equation}
Applying Proposition 3.2 and Lemma 3.3 we have
\begin{equation}
\hat{\nabla}^{f}_{Y^{v}}F^{h}=F(Y)^{h}+\frac{1}{2f}\Big(R(u,Y)F(u)\Big)^{h}.
\varepsilonnd{equation}
This implies that
\begin{eqnarray}
\hat{R}^{f}(X^{h},Y^{v})Z^{v}&=&\hat{\nabla}^{f}_{X^{h}}\hat{\nabla}^{f}_{Y^{v}}Z^{v}-\hat{\nabla}^{f}_{Y^{v}}\hat{\nabla}^{f}_{X^{h}}Z^{v}
-\hat{\nabla}^{f}_{[X^{h},Y^{v}]}Z^{v}\nonumber\\
&=&-\hat{\nabla}^{f}_{Y^{v}}\hat{\nabla}^{f}_{X^{h}}Z^{v}
=-\hat{\nabla}^{f}_{Y^{v}}\Big((\nabla_{X}Z)^{v}+F^{h}\Big)\nonumber\\
&=&-\hat{\nabla}^{f}_{Y^{v}}F^{h}=-F(Y)^{h}-\frac{1}{2f}\Big(R(u,Y)F(u)\Big)^{h} \nonumber\\
&=&\Big(-\frac{1}{2f}R(Y,Z)X-\frac{1}{4f^{2}}R(u,Y)(R(u,Z)X)\Big)^{h}.
\varepsilonnd{eqnarray}
$iii)$ Using $ii)$ and $1^{st}$ Bianchi identity we get
\begin{equation}
\hat{R}^{f}(X^{v},Y^{v})Z^{h}=\hat{R}^{f}(Z^{h},Y^{v})X^{v}-\hat{R}^{f}(Z^{h},X^{v})Y^{v}
\varepsilonnd{equation}
which gives
\begin{eqnarray}
\hat{R}^{f}(X^{v},Y^{v})Z^{h}&=&\Big(-\frac{1}{2f}R(Y,X)Z-\frac{1}{4f^{2}}R(u,Y)(R(u,X)Z)\Big)^{h} \nonumber\\
&&+\Big(\frac{1}{2f}R(X,Y)Z+\frac{1}{4f^{2}}R(u,X)(R(u,Y)Z)\Big)^{h}.
\varepsilonnd{eqnarray}
$iv)$ Let $F_{1}, F_{2}: TM\rightarrow TM$ be the bundle endomorphisms given by
\begin{equation}
F_{1}(u)\mapsto \frac{1}{2f}R(u,Y)Z \quad and \quad F_{2}(u)\mapsto -\frac{1}{2f}R(X,Z)u.
\varepsilonnd{equation}
Then Proposition 3.2 implies that
\begin{eqnarray}
\hat{R}^{f}(X^{h},Y^{v})Z^{h}&=&\hat{\nabla}^{f}_{X^{h}}\hat{\nabla}^{f}_{Y^{v}}Z^{h}-\hat{\nabla}^{f}_{Y^{v}}\hat{\nabla}^{f}_{X^{h}}Z^{h}
-\hat{\nabla}^{f}_{[X^{h},Y^{v}]}Z^{h}\nonumber\\
&=&\hat{\nabla}^{f}_{X^{h}}(F_{1}^{h})-\hat{\nabla}^{f}_{Y^{v}}\Big((\nabla_{X}Z)^{h}
+A_{f}(X,Z)^{h}+F_{2}^{v}\Big)-\hat{\nabla}^{f}_{(\nabla_{X}Y)^{v}}Z^{h}\nonumber\\
&=&(\nabla_{X}F_{1}(u))^{h}-\frac{1}{2}\Big(R(X,F_{1}(u))u\Big)^{v}+A_{f}(X,F_{1}(u))^{h}\nonumber\\
&&-\frac{1}{2f}\Big(R(u,Y)(\nabla_{X}Z+A_{f}(X,Z))\Big)^{h}-F_{2}(Y)^{v}
-\frac{1}{2f}\Big(R(u,\nabla_{X}Y)Z\Big)^{h}\nonumber\\
&=&\Big(\nabla_{X}(\frac{1}{2f}R(u,Y)Z)\Big)^{h}+\frac{1}{4f}R(R(u,Y)Z,X)u\nonumber\\
&&+A_{f}\Big(X,\frac{1}{2f}(R(u,Y)Z)\Big)-\frac{1}{2f}\Big(R(u,Y)(\nabla_{X}Z+A_{f}(X,Z))\Big)^{h} \nonumber\\
&&+\frac{1}{2}\Big(R(X,Z)u\Big)^{v}-\frac{1}{2f}\Big(R(u,\nabla_{X}Y)Z\Big)^{h}.
\varepsilonnd{eqnarray}
$v)$ Applying part $iv)$ and $1^{st}$ Bianchi identity
\begin{equation}
\hat{R}^{f}(X^{h},Y^{h})Z^{v}=\hat{R}^{f}(X^{h},Z^{v})Y^{h}-\hat{R}^{f}(Y^{h},Z^{v})X^{h},
\varepsilonnd{equation}
we get
\begin{eqnarray}
\hat{R}^{f}(X^{h},Y^{h})Z^{v}&=&\Big(\nabla_{X}(\frac{1}{2f}R(u,Z)Y)\Big)^{h}+\frac{1}{4f}R(R(u,Z)Y,X)u+A_{f}\Big(X,\frac{1}{2f}(R(u,Z)Y)\Big)\nonumber\\
&&-\frac{1}{2f}\Big(R(u,Z)(\nabla_{X}Y+A_{f}(X,Y))\Big)^{h}+\frac{1}{2}(R(X,Y)u)^{v}
-\frac{1}{2f}\Big(R(u,\nabla_{X}Z)Y\Big)^{h}\nonumber\\
&&-\Big(\nabla_{Y}(\frac{1}{2f}R(u,Z)X)\Big)^{h}-\frac{1}{4f}R(R(u,Z)X,Y)u-A_{f}\Big(Y,\frac{1}{2f}R(u,Z)X\Big)\nonumber\\
&&+\frac{1}{2f}\Big(R(u,Z)(\nabla_{Y}X+A_{f}(Y,X))\Big)^{h}-\frac{1}{2}(R(Y,X)u)^{v}
+\frac{1}{2f}\Big(R(u,\nabla_{Y}Z)X\Big)^{h},\nonumber\\
\varepsilonnd{eqnarray}
from which the result follows.
$vi)$ By $i)$ of Proposition 3.2 and direct calculation we get
\begin{eqnarray}
\hat{R}^{f}(X^{h},Y^{h})Z^{h}&=&\hat{\nabla}^{f}_{X^{h}}\hat{\nabla}^{f}_{Y^{h}}Z^{h}-\hat{\nabla}^{f}_{Y^{h}}\hat{\nabla}^{f}_{X^{h}}Z^{h}
-\hat{\nabla}^{f}_{[X^{h},Y^{h}]}Z^{h}\nonumber\\
&=&\hat{\nabla}^{f}_{X^{h}}(F_{1}^{h})-\hat{\nabla}^{f}_{Y^{h}}\Big((\nabla_{X}Z)^{h}
+A_{f}(X,Z)^{h}+F_{2}^{h}\Big)-\hat{\nabla}^{f}_{(\nabla_{X}Y)^{h}}Z^{h} \nonumber\\
&=&\nabla_{X}\Big(\nabla_{Y}Z+A_{f}(Y,Z)\Big)^{h}+A_{f}\Big(X,\nabla_{Y}Z+A_{f}(Y,Z)\Big)^{h}\nonumber\\
&&-\frac{1}{2}\Big(R(X,\nabla_{Y}Z+A_{f}(Y,Z))u\Big)^{v}
-\nabla_{Y}\Big(\nabla_{X}Z+A_{f}(X,Z)\Big)^{h} \nonumber\\
&&-A_{f}\Big(Y,\nabla_{X}Z+A_{f}(X,Z)\Big)^{h}+\frac{1}{2}\Big(R(Y,\nabla_{X}Z+A_{f}(X,Z))u\Big)^{v} \nonumber\\
&&-\Big(\nabla_{[X,Y]}Z\Big)^{h}-A_{f}([X,Y],Z)^{h}-\frac{1}{2}\Big(R([X,Y],Z)u\Big)^{v}\nonumber\\
&&+\frac{1}{2f}\Big(R(u,R(X,Y)u)Z\Big)^{h}+\frac{1}{2}\Big(\nabla_{Y}(R(X,Z)u)\Big)^{v}\nonumber\\
&&+\frac{1}{4f}\Big(R(u,R(X,Z)u)Y\Big)^{h}-\frac{1}{2}\Big(\nabla_{X}(R(Y,Z)u)\Big)^{v} \nonumber\\
&&-\frac{1}{4f}\Big(R(u,R(Y,Z)u)X\Big)^{h}.
\varepsilonnd{eqnarray}
\varepsilonnd{proof}
We shall now compare the geometries of the manifold $(M,g)$ and its tangent bundle $TM$ equipped with the rescaled Sasaki metric $\hat{g}^{f}$.
\begin{thm}\label{th:35}
Let $(M, g)$ be a Riemannian manifold and $TM$ be its tangent bundle with the rescaled Sasaki metric $\hat{g}^{f}$. Then $TM$ is flat if and only if
$M$ is flat and $f=C(constant)$.
\varepsilonnd{thm}
\begin{proof}
Applying proposition 3.4 and
\begin{equation}
A_{f}(X,Y)=\frac{1}{2f}\Big(X(f)Y+Y(f)X-g(X,Y)(\texttt{d}f)^{*}\Big)^{h}.
\varepsilonnd{equation}
If $A_{f}=0$, we have
\begin{equation}
X(f)Y+Y(f)X-g(X,Y)(\texttt{d}f)^{*}=0,
\varepsilonnd{equation}
then $R\varepsilonquiv 0$ implies $\hat{R}^{f}\varepsilonquiv 0$. If we assume that $\hat{R}^{f}\varepsilonquiv 0$ and calculate the Riemann curvature tensor for three horizontal
vector fields at $(p, 0)$ we have
\begin{eqnarray}
\hat{R}^{f}(X^{h},Y^{h})Z^{h}&=&R(X,Y)Z+A_{f}(Y,Z)-A_{f}(X,Z)+A_{f}\Big(X,\nabla_{Y}Z+A_{f}(Y,Z)\Big)\nonumber\\
&&-A_{f}\Big(Y,\nabla_{X}Z+A_{f}(X,Z)\Big)-A_{f}([X,Y],Z)=0,
\varepsilonnd{eqnarray}
then $R=0$ and $f=C(constant)$.
\varepsilonnd{proof}
\begin{cor}\label{co:36}
Let $(M, g)$ be a Riemannian manifold and $TM$ be its tangent bundle with the rescaled Sasaki metric $\hat{g}^{f}$. If $f\neq C(constant)$, then
$(TM,\hat{g}^{f})$ is unflat.
\varepsilonnd{cor}
For the sectional curvatures of the tangent bundle we have the following.
\begin{prop}\label{pr: 37}
Let $(M, g)$ be a Riemannian manifold and equip the tangent bundle $(TM, \hat{g}^{f})$ with
the rescaled Sasaki metric $\hat{g}^{f}$. Let $(p,u)\in TM$ and $X,Y\in T_{p}M$ be two orthonormal tangent vectors at $p$. Let
$\hat{K}^{f}(X^{i},Y^{j})$ denote the sectional curvature of the plane spanned by $X^{i}$ and $Y^{j}$ with $i,j\in \{h,v\}$.
Then we have the following
\begin{eqnarray}
i) \ \hat{K}^{f}_{(p,u)}(X^{v},Y^{v})&=&0,\\
ii)\ \hat{K}^{f}_{(p,u)}(X^{h},Y^{v})&=&\frac{1}{4f^{2}(p)}|R(u,Y)X|^{2},\\
iii) \ \hat{K}^{f}_{(p,u)}(X^{h},Y^{h})&=&\frac{1}{f(p)}K(X,Y)-\frac{3}{4f^{2}(p)}|R(X,Y)u|^{2}+L_{f}(X,Y) ,
\varepsilonnd{eqnarray}
where
\begin{eqnarray*}
L_{f}(X,Y)&=&\frac{1}{f}\Big(g(\nabla_{X}A_{f}(Y,Y)-\nabla_{Y}A_{f}(X,Y),X)-g(A_{f}(X,\nabla_{Y}Y+A_{f}(Y,Y)),X)\nonumber\\
&&-g(A_{f}(Y,\nabla_{X}Y+A_{f}(X,Y)),X)-g(A_{f}([X,Y],Y),X)\Big) .
\varepsilonnd{eqnarray*}
\varepsilonnd{prop}
\begin{proof}
$i)$ It follows directly from Proposition 3.4 that the sectional curvature for a plane spanned by two vertical vectors vanishes.
$ii)$ Applying part $ii)$ of proposition 3.4 we get
\begin{eqnarray}
\hat{K}^{f}(X^{h},Y^{v})&=&\frac{\hat{g}^{f}(\hat{R}^{f}(X^{h},Y^{v})Y^{v},X^{h})}
{\hat{g}^{f}(X^{h},X^{h})\hat{g}^{f}(Y^{v},Y^{v})} \nonumber\\
&=&\frac{1}{f}\Big(-\frac{1}{2f}\hat{g}^{f}((R(Y,Y)X)^{h},X^{h})
-\frac{1}{4f^{2}}\frac{\hat{g}^{f}(R(u,Y)R(u,Y)X,X^{h})}{f g(R(u,Y)R(u,Y)X,X)}\Big) \nonumber\\
&=&\frac{1}{4f^{2}}g(R(u,Y)X, R(u,Y)X) =\frac{1}{4f^{2}}|R(u,Y)X|^{2}.
\varepsilonnd{eqnarray}
$iii)$ It follows immediately from proposition 3.4 that
\begin{eqnarray}
\hat{K}^{f}(X^{h},Y^{h})&=&\frac{1}{f^{2}}\hat{g}^{f}(\hat{R}^{f}(X^{h},Y^{h})Y^{h},X^{h}) \nonumber\\
&=&\frac{1}{f}g(R(X,Y)Y,X)+\frac{3}{4f^{2}}g(R(Y,X)u,R(X,Y)u)\nonumber\\
&=&\frac{1}{f}K(X,Y)-\frac{3}{4f^{2}}|R(X,Y)u|^{2}+\frac{1}{f}\Big(g(\nabla_{X}A_{f}(Y,Y)\nonumber\\
&&-\nabla_{Y}A_{f}(X,Y),X)-g(A_{f}(X,\nabla_{Y}Y+A_{f}(Y,Y)),X)\nonumber\\
&&-g(A_{f}(Y,\nabla_{X}Y+A_{f}(X,Y)),X)-g(A_{f}([X,Y],Y),X)\Big).
\varepsilonnd{eqnarray}
\varepsilonnd{proof}
\begin{thm}\label{th:38}
Let $(M, g)$ be a Riemannian manifold and equip the tangent bundle $(TM, \hat{g}^{f})$ with the rescaled Sasaki metric $\hat{g}^{f}$.
If the sectional curvature of $(TM,\hat{g}^{f})$ is upper bounded, then $(M,g)$ is flat; if $M$ compact and the sectional curvature
of $(TM,\hat{g}^{f})$ is lower bounded, then $(M,g)$ is flat.
\varepsilonnd{thm}
\begin{proof}
The statement follows directly from Proposition 3.7.
\varepsilonnd{proof}
\begin{prop}\label{pr: 39}
Let $(M, g)$ be a Riemannian manifold and equip the tangent bundle $(TM, \hat{g}^{f})$ with
the rescaled Sasaki metric $\hat{g}^{f}$. Let $(p,u)\in TM$ and $X,Y\in T_{p}M$ be two orthonormal tangent vectors at $p$. Let
$S$ denote the scalar curvature of $g$ and $\hat{S}^{f}$ denote the scalar curvature of $\hat{g}^{f}$. Then the following equation
holds
\begin{equation}
\hat{S}^{f}=\frac{1}{f}S-\frac{1}{4f^{2}}\sum_{i,j=1}^{m}\mid R(X_{i},Y_{j})u\mid^{2}+\sum_{i,j=1}^{m}L_{f}(X_{i},Y_{j})
\varepsilonnd{equation}
where $\{X_{1},\cdots,X_{m}\}$ is a local orthonormal frame for $TM$.
\varepsilonnd{prop}
\begin{proof}
For a local orthonormal frame $\{\frac{1}{\sqrt{f}}Y_{1},\cdots,\frac{1}{\sqrt{f}}Y_{m}, Y_{m+1},\cdots, Y_{2m}\}$ for $TTM$ with
$X^{h}_{i}=Y_{i}$ and $X^{v}_{i}=Y_{m+i}$ we get from proposition 3.7
\begin{eqnarray}
\hat{S}^{f}&=&\sum_{i,j=1}^{m}\hat{K}^{f}(\frac{1}{\sqrt{f}}X^{h}_{i},\frac{1}{\sqrt{f}}X^{h}_{j})
+2\sum_{i,j=1}^{m}\hat{K}^{f}(\frac{1}{\sqrt{f}}X^{h}_{i},X^{v}_{j})
+\sum_{i,j=1}^{m}\hat{K}^{f}(X^{v}_{i},X^{v}_{j}) \nonumber\\
&=&\sum_{i,j=1}^{m}[\hat{K}^{f}(X^{h}_{i},X^{h}_{j})+2\hat{K}^{f}(X^{h}_{i},X^{v}_{j})+\hat{K}^{f}(X^{v}_{i},X^{v}_{j})] \nonumber\\
&=&\sum_{i,j=1}^{m}[\frac{1}{f}K(X_{i},X_{j})-\frac{3}{4f^{2}}\mid R(X_{i},X_{j})u\mid^{2}+L_{f}(X_{i},X_{j})]
+2\sum_{i,j=1}^{m}\frac{1}{4f^{2}}|R(X_{j},u)X_{i}|^{2}.
\varepsilonnd{eqnarray}
In order to simplify this last expression we put $u=\sum_{i=1}^{m}u_{i}X_{i}$ we get
\begin{eqnarray}
\sum_{i,j=1}^{m}|R(X_{j},u)X_{i}|^{2}
&=&\sum_{i,j,k,l=1}^{m}u_{k}u_{l}g(R(X_{j},X_{k})X_{i},R(X_{j},X_{l})X_{i})\nonumber\\
&=&\sum_{i,j,k,l,s=1}^{m}u_{k}u_{l}g(R(X_{j},X_{k})X_{i},X_{s})g(R(X_{j},X_{l})X_{i},X_{s})\nonumber\\
&=&\sum_{i,j,k,l,s=1}^{m}u_{k}u_{l}g(R(X_{s},X_{i})X_{k},X_{j})g(R(X_{s},X_{i})X_{l},X_{j})\nonumber\\
&=&\sum_{i,j,k,l=1}^{m}u_{k}u_{l}g(R(X_{j},X_{i})X_{k},R(X_{j},X_{i})X_{l})\nonumber\\
&=&\sum_{i,j=1}^{m}|R(X_{j},X_{i})u|^{2}.
\varepsilonnd{eqnarray}
This completes the proof.
\varepsilonnd{proof}
\begin{cor}\label{co:40}
Let $(M, g)$ be a Riemannian manifold and $TM$ be its tangent bundle with the rescaled Sasaki metric $\hat{g}^{f}$.
Then $(TM,\hat{g}^{f})$ has constant scalar curvature if and only if $(M,g)$ is flat and $\sum_{i,j=1}^{m}L_{f}(X_{i},X_{j})=C(constant)$.
\varepsilonnd{cor}
\begin{proof}
The statement follows directly from Proposition 3.9.
\varepsilonnd{proof}
\section{Geodesics of The Rescaled Sasaki Metric}
Let $M$ be a Riemannian manifold with metric $g$. We denote by $\Im^{p}_{q}(M)$ the set of all tensor fields of type $(p,q)$ on
$M$. Manifolds, tensor fields and connections are always assumed to be differentiable and of class $C^{\infty}$. Let $T(M)$ be a
tangent bundle bundle of $M$, and $\phii$ the projection $\phii:T(M)\rightarrow M$. Let the manifold $M$ be covered
by system of coordinate neighbourhoods $(U,x^{i})$, where $(x^{i}), i=1,\cdots, n$ is a local coordinate system defined in the
neighbourhood $U$. Let $y^{i}$ be the Cartesian coordinates in each tangent spaces $T_{p}(M)$ and $P\in M$ with respect to the
natural base $\frac{\phiartial}{\phiartial x^{i}}$, $P$ being an arbitrary point in $U$ whose coordinates are $x^{i}$. Then we can
introduce local coordinates $(x^{i},y^{i})$ in open set $\phii^{-1}(U)\subset T(M_{n})$. We call them coordinates induced in
$\phii^{-1}(U)$ from $(U,x^{i})$. The projection $\phii$ is represented by $(x^{i},y^{i})\rightarrow (x^{i})$. The indices $i,j,\cdots$
run from $1$ to $2n$.
Let $\hat{C}$ be a curve on $T(M_{n})$ and locally expressed by $x=x(\sigma),$ with respect to induced coordinates
$\frac{\phiartial}{\phiartial x_{i}}$ in $\phii^{-1}(U)\subset T(M_{n})$. The curve $\hat{C}$ is said to be a lift of
the curve $C$ and denoted by $C^{h}=(x(\sigma),x'(\sigma))$. The tangent vector field of $\hat{C}$ defined by
$T=(\frac{dx}{dt},\frac{dy}{dt})=x'^{h}+(\nabla_{x'}y)^{v}$. If the curve $\hat{C}$ is a geodesic, we get
$\nabla_{x'}x'=0$, then $y=x'$.
\begin{thm}\label{th:41}
Let $C$ be a geodesic on $T(M)$, if $f\neq c(constant)$ in any geodesics on $M$ , then the curve $C$
cannot be lifted to the geodesic of $\hat{g}^{f}$.
\varepsilonnd{thm}
\begin{proof}
By applying Proposition 3.2 we have
\begin{eqnarray}
\hat{\nabla}^{f}_{x'^{h}+(\nabla_{x'}y)^{v}}(x'^{h}+(\nabla_{x'}y)^{v})
&=&(\nabla_{x'}x')^{h}+A_{f}(x',x')^{h}-\frac{1}{2}(R(x',x')u)^{v} \nonumber\\
&&+(\nabla_{x'}\nabla_{x'}y)^{v}+\frac{1}{2f}[R_{p}(u,\nabla_{x'}y)x']^{h}
+\frac{1}{2f}[R_{p}(u,\nabla_{x'}y)x']^{h} \nonumber\\
&=&(\nabla_{x'}x')^{h}+\frac{1}{f}[R_{p}(u,\nabla_{x'}y)x']^{h}+A_{f}(x',x')^{h}+(\nabla_{x'}\nabla_{x'}y)^{v}.
\varepsilonnd{eqnarray}
For the curve $C$ is a geodesic on $M_{n}$, with respect to the adapted frame and taking account of
$\hat{\nabla}^{f}_{T}T=0 $, then we get
\begin{eqnarray}
&& (a) \ \nabla_{x'}x'=-\frac{1}{f(x(t))}[R_{p}(y(t),\nabla_{x'}y(t))x'(t)]^{h}-A_{f}(x',x')^{h} , \nonumber\\
&& (b) \ \nabla_{x'}\nabla_{x'}y=0.
\varepsilonnd{eqnarray}
Applying part $iv)$ of proposition 3.4 and
\begin{equation}
A_{f}(x',x')=\frac{1}{2f}[2x'(t)x'-g(x',x')\texttt{grad} f],
\varepsilonnd{equation}
if $\langle A_{f}(x',x'), x'\rangle =0$, we get
\begin{equation}
2X'(f)g(x',x')-g(x',x')X'(f)=g(x',x')X'(f)=0.
\varepsilonnd{equation}
Then $X'(f)=0$, $grad(f)=0$ and
$\frac{\texttt{d}f(x(t))}{\texttt{d}t}=0$, so we get $f=c(constant)$ in any
geodesics on $M$.
\varepsilonnd{proof}
\begin{cor}\label{co:42}
If $(x(t),y(t))$ is geodesic and $|y(t)|=C$, then $\nabla_{x'}x'=-A_{f}(x',x')$.
\varepsilonnd{cor}
\begin{proof}
By applying $(a)$ of equation $(4.2)$ we have
\begin{equation}
0=\nabla_{x'}\langle y, y\rangle =\langle \nabla_{x'}y, y\rangle
+\langle y, \nabla_{x'}y\rangle,
\varepsilonnd{equation}
and
\begin{equation}
0=\nabla_{x'}\langle \nabla_{x'}y, y\rangle =\langle
\nabla_{x'}\nabla_{x'}y, y\rangle +\langle \nabla_{x'}y,
\nabla_{x'}y\rangle.
\varepsilonnd{equation}
Then we get $\langle \nabla_{x'}y, y\rangle=0$ and $\nabla_{x'}y=0$,
from which the result follows.
\varepsilonnd{proof}
\begin{thm}\label{th:43}
Let $C_{1}$ and $C_{2}$ be two geodesics on $M_{n}$ departure from the same arbitrary point,
and their initial tangent vectors are not parallel. If the lifts of two geodesics on $M$ are
geodesics on $T(M)$ with the metric $\hat{g}^{f}$, then
$f=c(constant)$.
\varepsilonnd{thm}
\begin{proof}
By applying $(a)$ of equation $(4.2)$ we have
\begin{equation}
2X'(f)x'-g(x',x')X'(f)=2\tilde{X'}(f)\tilde{x}'-g(\tilde{x}',\tilde{x}')X'(f).
\varepsilonnd{equation}
Using $X'(0) \nparallel\tilde{X'}(0)$ we get
$\texttt{grad}f(x_{0})=0$, then we obtain $f=c(constant)$.
\varepsilonnd{proof}
The submersion geodesic $C$ is said to be the image under $\phii$ of the geodesic $\hat{C}$ on $TM$.
Let $C=\phii\circ \hat{C}$ be a submersion geodesic on $M$, then $\hat{\nabla}^{f}_{T}T=0 $.
Using this condition we have
\begin{thm}\label{th:44}
Let $M$ be a flat manifold, the submersion geodesic is always geodesics on $M$, then $f=c(constant)$.
\varepsilonnd{thm}
\section{The Rescaled Cheeger-Gromoll Metric}
In \cite{CG}, Cheeger and Gromoll studied complete manifolds of nonnegative curvature and suggest a construction of Riemannian metrics useful
in that context. This can be used to obtain a natural metric $\tilde{g}^{f}$ on the tangent bundle $TM$ of a given Riemannian manifold $(M, g)$.
For a vector field $u\in C^{\infty}(TM)$ we shall by $U$ denote its canonical vertical vector field on $TM$ which in local coordinates
is given by
\begin{equation}
U=\sum_{i=1}^{m}v_{m+i}(\frac{\phiartial}{\phiartial v_{m+i}})_{(p,u)},
\varepsilonnd{equation}
where $u=(v_{m+1},\cdots,v_{2})$. To simplify our notation we define the function $r: TM\rightarrow\mathbb{R}$
by $r(p,u)=|u|=\sqrt{g_{p}(u,u)}$
and $\alpha=1+r^{2}.$
\begin{defn}
Let $(M,g)$ be a Riemannian manifold. Let $f>0$ and $f\in C^{\infty}(M)$. Then the rescaled Cheeger-Gromoll metric $\tilde{g}^{f}$ on the tangent
bundle $TM$ of $M$ is given by
\begin{eqnarray}
i) \ \tilde{g}_{(p,u)}^{f}(X^{h}, Y^{h})&=&f(p)g_{p}(X, Y),\\
ii) \ \tilde{g}_{(p,u)}^{f}(X^{v}, Y^{h})&=&0 ,\\
iii) \ \tilde{g}_{(p,u)}^{f}(X^{v}, Y^{v})&=&\frac{1}{1+r^{2}}(g_{p}(X, Y)+g_{p}(X, u)g_{p}(Y, u))
\varepsilonnd{eqnarray}
for all vector fields $X, Y\in C^{\infty}(TM)$.
\varepsilonnd{defn}
It is obvious that the rescaled Cheeger-Gromoll metric $\tilde{g}^{f}$ is contained in the class of rescaled natural metrics introduced earlier.
\begin{prop}\label{pr: 42}
Let $(M, g)$ be a Riemannian manifold and $\tilde{\nabla}^{f}$ be Levi-Civita connection of the tangent bundle $(TM, \tilde{g}^{f})$ equipped
with the rescaled Cheeger-Gromoll metric. Then
\begin{eqnarray}
i) \ (\tilde{\nabla}^{f}_{X^{h}}Y^{h})_{(p,u)}&=&(\nabla_{X}Y)^{h}_{(p,u)}+\frac{1}{2f(p)}
\Big((X(f)Y+Y(f)X)-g(X,Y)(\texttt{d}f)^{*}\Big)^{h}_{p}\nonumber\\
&&-\frac{1}{2}\Big(R_{p}(X,Y)u\Big)^{v},\\
ii) \ (\tilde{\nabla}^{f}_{X^{h}}Y^{v})_{(p,u)}&=&(\nabla_{X}Y)^{v}_{(p,u)}+\frac{1}{2\alpha f(p)}\Big(R_{p}(u,Y)X\Big)^{h},\\
iii) \ (\tilde{\nabla}^{f}_{X^{v}}Y^{h})_{(p,u)}&=&\frac{1}{2\alpha f(p)}\Big(R_{p}(u,X)Y\Big)^{h},\\
iv) \ (\tilde{\nabla}^{f}_{X^{v}}Y^{v})_{(p,u)}&=&-\frac{1}{\alpha}\Big(\tilde{g}_{(p,u)}^{f}(X^{v}, U)Y^{v}
+\tilde{g}_{(p,u)}^{f}(Y^{v}, U)X^{v}\Big)\nonumber\\
&&+\frac{1+\alpha}{\alpha}\tilde{g}_{(p,u)}^{f}(X^{v},Y^{v})U-\frac{1}{\alpha}\tilde{g}_{(p,u)}^{f}(X^{v}, U)\tilde{g}_{(p,u)}^{f}(Y^{v}, U)U
\varepsilonnd{eqnarray}
for any $X,Y\in C^{\infty}(TM)$, $\xi=(p, u)\in TM$.
\varepsilonnd{prop}
\begin{proof}
$i)$ The statement is a direct consequence of Corollary 2.3.
$ii)$ By applying Lemma 2.2 and Definition 4.1 we get
\begin{eqnarray}
2\tilde{g}(\tilde{\nabla}^{f}_{X^{h}}Y^{v}, Z^{h})&=&-\frac{1}{f}\tilde{g}(Y^{v},(R(Z,X)u)^{v})\nonumber\\
&=&-\frac{1}{\alpha f}\Big(g(Y,R(Z,X)u)+g(Y,u)g(R(Z,X)u,u)\Big)\nonumber\\
&=&\frac{1}{\alpha f}g\Big(R(u,Y)X,Z\Big).
\varepsilonnd{eqnarray}
From Definition 3.7 and Lemma 4.1 in \cite{GK2} it follows that
\begin{equation}
X^{h}(\frac{1}{\alpha})=0 \quad and \quad X^{h}(g(Y,u))\circ\phii=g(\nabla_{X}Y,u)\circ\phii,
\varepsilonnd{equation}
so
\begin{equation}
X^{h}(\tilde{g}^{f}(Y^{v},Z^{v}))=\tilde{g}^{f}((\nabla_{X}Y)^{v},Z^{v})+\tilde{g}^{f}(Y^{v},(\nabla_{X}Z)^{v}).
\varepsilonnd{equation}
This means that
\begin{eqnarray}
2\tilde{g}^{f}(\tilde{\nabla}^{f}_{X^{h}}Y^{v}, Z^{v})&=&X^{h}(\tilde{g}^{f}(Y^{v},Z^{v}))+\tilde{g}^{f}(Z^{v},(\nabla_{X}Y)^{v})
-\tilde{g}^{f}(Y^{v},(\nabla_{X}Z)^{v}) \nonumber\\
&=&2\tilde{g}^{f}((\nabla_{X}Y)^{v},Z^{v}).
\varepsilonnd{eqnarray}
$iii)$ Calculations similar to those in $ii)$ give
\begin{eqnarray}
2\tilde{g}(\tilde{\nabla}^{f}_{X^{v}}Y^{h}, Z^{h})&=&\frac{1}{f}\tilde{g}(X^{v},(R(Y,Z)u)^{v})
=\frac{1}{\alpha f}\tilde{g}((R(u,X)Y)^{h},Z^{h}).
\varepsilonnd{eqnarray}
The rest follows by
\begin{eqnarray}
2\tilde{g}^{f}(\tilde{\nabla}^{f}_{X^{v}}Y^{h}, Z^{v})&=&Y^{h}(\tilde{g}^{f}(Z^{v},X^{v}))-\tilde{g}^{f}(Z^{v},(\nabla_{Y}X)^{v})
-\tilde{g}^{f}(X^{v},(\nabla_{Y}Z)^{v}) \nonumber\\
&=&\tilde{g}^{f}(Z^{v},(\nabla_{Y}X)^{v})+\tilde{g}^{f}(X^{v},(\nabla_{Y}Z)^{v})
-\tilde{g}^{f}(Z^{v},(\nabla_{Y}X)^{v})-\tilde{g}^{f}(X^{v},(\nabla_{Y}Z)^{v})\nonumber\\
&=&0.
\varepsilonnd{eqnarray}
$iv)$ Using Lemma 2.2 we yield
\begin{eqnarray}
2f\tilde{g}^{f}(\tilde{\nabla}^{f}_{X^{v}}Y^{v}, Z^{h})&=&-Z^{h}(\tilde{g}^{f}(X^{v},Y^{v}))+\tilde{g}^{f}(Y^{v},(\nabla_{Z}X)^{v})
+\tilde{g}^{f}(X^{v},(\nabla_{Z}Y)^{v}) \nonumber\\
&=&-\tilde{g}^{f}(Y^{v},(\nabla_{Z}X)^{v})-\tilde{g}^{f}(X^{v},(\nabla_{Z}Y)^{v})
+\tilde{g}^{f}(Y^{v},(\nabla_{Z}X)^{v})+\tilde{g}^{f}(X^{v},(\nabla_{Z}Y)^{v}) \nonumber\\
&=&0.
\varepsilonnd{eqnarray}
Using $X^{v}(f(r^{2}))=2f'(r^{2})g(X,u)$ and $\alpha=1+r^{2}$ we get
\begin{eqnarray}
X^{v}\tilde{g}^{f}(Y^{v}, Z^{v})&=&-\frac{2}{\alpha^{2}}g(X,u)\Big(g(Y,Z)+g(Y,u)g(Z,u)\Big) \nonumber\\
&&+\frac{1}{\alpha}\Big(g(X,Y)g(Z,u)+g(X,Z)g(Y,u)\Big).
\varepsilonnd{eqnarray}
The definition of the rescaled Cheeger-Gromoll metric implies that
\begin{equation}
\tilde{g}^{f}(X^{v}, U)=\frac{1}{\alpha}\Big(g(X,u)+g(X,u)g(u,u)\Big)=g(X,u).
\varepsilonnd{equation}
This leads to the following
\begin{eqnarray}
\alpha^{2}\tilde{g}^{f}(\tilde{\nabla}^{f}_{X^{v}}Y^{v}, Z^{v})&=&\frac{\alpha^{2}}{2}\Big(X^{v}(\tilde{g}^{f}(Y^{v},Z^{v}))
+Y^{v}(\tilde{g}^{f}(Z^{v},X^{v}))-Z^{v}(\tilde{g}^{f}(X^{v},Y^{v}))\Big) \nonumber\\
&=&-g(X,u)\Big(g(Y,Z)+g(Y,u)g(Z,u)\Big) \nonumber\\
&&+\frac{\alpha}{2}\Big(g(X,Y)g(Z,u)+g(X,Z)g(Y,u)\Big)\nonumber\\
&&-g(Y,u)\Big(g(Z,X)+g(Z,u)g(X,u)\Big) \nonumber\\
&&+\frac{\alpha}{2}\Big(g(Y,Z)g(X,u)+g(Y,X)g(Z,u)\Big) \nonumber\\
&&+g(Z,u)\Big(g(X,Y)+g(X,u)g(Y,u)\Big) \nonumber\\
&&-\frac{\alpha}{2}\Big(g(Z,X)g(Y,u)+g(Z,Y)g(X,u)\Big) \nonumber\\
&=&g\Big(\big(g(X,Y)-g(X,u)g(Y,u)\big)u+\alpha g(X,Y)u \nonumber\\
&&-g(X,u)Y-g(Y,u)X,Z\Big).
\varepsilonnd{eqnarray}
By using the definition of the metric we see that this gives the statement to proof.
\varepsilonnd{proof}
Having determined the Levi-Civita connection we are ready to calculate the Riemann curvature tensor of $TM$. But first we state the
following useful Lemma.
\begin{lem}\label{le:43}
Let $(M, g)$ be a Riemannian manifold and $\tilde{\nabla}^{f}$ be the Levi-Civita connection of the tangent bundle $(TM, \tilde{g}^{f})$,
equipped with the rescaled Cheeger-Gromoll metric $\tilde{g}^{f}$. Let $F:TM\rightarrow TM$ is a smooth bundle endomorphism of the
tangent bundle, then
\begin{eqnarray}
(\tilde{\nabla}^{f}_{X^{v}}F^{v})_{\xi}&=&F(X)^{v}_{\xi}-\frac{1}{\alpha}\Big(\tilde{g}^{f}(X^{v},U)F^{v}+\tilde{g}^{f}(F^{v},U)X^{v} \nonumber\\
&&-(1+\alpha)\tilde{g}^{f}(F^{v},X^{v})U+\tilde{g}^{f}(X^{v},U)\tilde{g}^{f}(F^{v},U)U\Big)_{\xi}
\varepsilonnd{eqnarray}
and
\begin{equation}
(\tilde{\nabla}^{f}_{X^{v}}F^{h})_{\xi}=F(X)^{h}_{\xi}+\frac{1}{2\alpha f(p)}\Big(R(u,X)F(u)\Big)^{h}_{\xi}
\varepsilonnd{equation}
for any $X\in C^{\infty}(TM)$ and $\xi=(p, u)\in TM$.
\varepsilonnd{lem}
\begin{proof}
The statement is a direct consequence of Lemma 2.5 and Proposition 5.2.
\varepsilonnd{proof}
\begin{prop}\label{pr: 54}
Let $(M, g)$ be a Riemannian manifold and $\tilde{R}^{f}$ be the Riemann curvature tensor of the tangent bundle $(TM, \tilde{g}^{f})$
equipped with the rescaled Sasaki metric. Then the following formulae hold
\begin{eqnarray*}
i) \ \tilde{R}^{f}(X^{h},Y^{h})Z^{h}&=&\nabla_{X}(\nabla_{Y}Z+A_{f}(Y,Z))^{h}+A_{f}(X,\nabla_{Y}Z+A_{f}(Y,Z))^{h} \nonumber\\
&&-\frac{1}{2}[R(X,\nabla_{Y}Z+A_{f}(Y,Z))u]^{v}-\nabla_{Y}(\nabla_{X}Z+A_{f}(X,Z))^{h}\nonumber\\
&&-A_{f}(Y,\nabla_{X}Z+A_{f}(X,Z))^{h}+\frac{1}{2}\Big(R(Y,\nabla_{X}Z+A_{f}(X,Z))u\Big)^{v}\nonumber\\
&&-(\nabla_{[X,Y]}Z)^{h}-A_{f}([X,Y],Z)^{h}-\frac{1}{2}\Big(R([X,Y],Z)u\Big)^{v}\nonumber\\
&&+\frac{1}{2\alpha f}\Big(R(u,R(X,Y)u)Z\Big)^{h}\nonumber\\
\varepsilonnd{eqnarray*}
\begin{eqnarray}
&&+\frac{1}{2}[\nabla_{Y}(R(X,Z)u)]^{v}+\frac{1}{4\alpha f(p)}\Big(R(u,R(X,Z)u)Y\Big)^{h} \nonumber\\
&&-\frac{1}{2}[\nabla_{X}(R(Y,Z)u)]^{v}-\frac{1}{4\alpha f(p)}\Big(R(u,R(Y,Z)u)X\Big)^{h},
\varepsilonnd{eqnarray}
\begin{eqnarray}
ii) \ \tilde{R}^{f}(X^{h},Y^{h})Z^{v}&=&(R(X,Y)Z)^{v}+\frac{1}{2\alpha }\Big(\nabla_{Z}(\frac{1}{f}R(u,Z)Y)
-\nabla_{Y}(\frac{1}{f}R(u,Z)X)\Big)^{h} \nonumber\\
&&-\frac{1}{4\alpha f(p)}\Big(R(X,R(u,Z)Y)u-R(Y,R(u,Z)X)u\Big)^{v} \nonumber\\
&&+\frac{1}{\alpha}\Big(A_{f}(X,\frac{1}{2f}R(u,Z)Y)-A_{f}(Y,\frac{1}{2f}R(u,Z)X)\Big)^{h} \nonumber\\
&&-\frac{1}{\alpha}\tilde{g}^{f}(Z^{v},u)(R(X,Y)u)^{v}+\frac{1+\alpha}{\alpha}\tilde{g}^{f}((R(X,Y)u)^{v},Z^{v})U.
\varepsilonnd{eqnarray}
\begin{eqnarray}
iii) \ \tilde{R}^{f}(X^{h},Y^{v})Z^{h} &=& \frac{1}{2\alpha }\tilde{\nabla}^{f}_{X^{h}}(\frac{1}{f}R(u,Y)Z)^{h}\nonumber\\
&&-\frac{1}{2\alpha f}(R(u,\nabla_{X}Y)Z)^{h}-\frac{1}{2\alpha f}(R(u,Y)\nabla_{X}Z)^{h}
+\frac{1}{2}(R(X,Z)Y)^{v} \nonumber\\
&&-\frac{1}{2\alpha}\tilde{g}^{f}(Y^{v},U)(R(X,Z)u)^{v}-\frac{1}{2\alpha}\tilde{g}^{f}((R(X,Z)u)^{v},U)Y^{v}\nonumber\\
&&+\frac{1+\alpha}{2\alpha}\tilde{g}^{f}((R(X,Z)u)^{v},Y^{v})U
-\frac{1}{2\alpha}\tilde{g}^{f}(Y^{v},U)\tilde{g}^{f}((R(X,Z)u)^{v},U)U \nonumber\\
&& -\frac{1}{2\alpha f}(R(u,Y)A_{f}(X,Z))^{h},
\varepsilonnd{eqnarray}
\begin{eqnarray}
iv)\ \tilde{R}^{f}(X^{h},Y^{v})Z^{v}&=&-\frac{1}{2\alpha f}\Big(R(Y,Z)X\Big)^{h}
-\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,Y)R(u,Z)X\Big)^{h}\nonumber\\
&&+\frac{1}{2\alpha^{2} f}[g(Y,U)(R(u,Z)X)^{h}-g(Z,u)(R(u,Y)X)^{h}],
\varepsilonnd{eqnarray}
\begin{eqnarray}
v)\ \tilde{R}^{f}_{(p,u)}(X^{v},Y^{v})Z^{h}&=&-\frac{1}{2\alpha f}\Big(R(X,Y)Z\Big)^{h}
-\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,X)R(u,Y)Z\Big)^{h}\nonumber\\
&&+\frac{1}{2\alpha f}\Big(R(Y,X)Z\Big)^{h}
+\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,Y)R(u,X)Z\Big)^{h},
\varepsilonnd{eqnarray}
\begin{eqnarray}
vi) \ \tilde{R}^{f}_{(p,u)}(X^{v},Y^{v})Z^{v}&=&\frac{1+\alpha+\alpha^{2}}{\alpha^{2}}
(\tilde{g}^{f}(Y^{v},Z^{v})X^{v}-\tilde{g}^{f}(X^{v},Z^{v})Y^{v})+ \nonumber\\
&&+\frac{2+\alpha}{\alpha^{2}}
(\tilde{g}^{f}(X^{v},Z^{v})g(Y,u)U-\tilde{g}^{f}(Y^{v},Z^{v})g(X,u)U)+ \nonumber\\
&&+\frac{2+\alpha}{\alpha^{2}}
(g(X,u)g(Z,u)Y^{v}-g(Y,u)g(Z,u)X^{v}).
\varepsilonnd{eqnarray}
for any $X, Y, Z\in T_{p}M$.
\varepsilonnd{prop}
\begin{proof}
$i)$ By $i)$ of Proposition 4.2 and direct calculation we get
\begin{eqnarray}
\tilde{R}^{f}(X^{h},Y^{h})Z^{h}&=&\tilde{\nabla}^{f}_{X^{h}}\tilde{\nabla}^{f}_{Y^{h}}Z^{h}-\tilde{\nabla}^{f}_{Y^{h}}\tilde{\nabla}^{f}_{X^{h}}Z^{h}
-\tilde{\nabla}^{f}_{[X^{h},Y^{h}]}Z^{h}\nonumber\\
&=&\tilde{\nabla}^{f}_{X^{h}}((\nabla_{Y}Z)^{h}+A_{f}(Y,Z)^{h}-\frac{1}{2}(R(Y,Z)u)^{v}) \nonumber\\
&& -\tilde{\nabla}^{f}_{Y^{h}}((\nabla_{X}Z)^{h}+A_{f}(X,Z)^{h}-\frac{1}{2}(R(X,Z)u)^{v})\nonumber\\
&&-\tilde{\nabla}^{f}_{[X,Y]^{h}-(R(X,Y)u)^{v}}Z^{h}\nonumber\\
&=&\nabla_{X}(\nabla_{Y}Z+A_{f}(Y,Z))^{h}+A_{f}(X,\nabla_{Y}Z+A_{f}(Y,Z))^{h} \nonumber\\
&&-\frac{1}{2}\Big(R(X,\nabla_{Y}Z+A_{f}(Y,Z))u\Big)^{v}-\nabla_{Y}(\nabla_{X}Z+A_{f}(X,Z))^{h}\nonumber\\
&&-A_{f}(Y,\nabla_{X}Z+A_{f}(X,Z))^{h}+\frac{1}{2}\Big(R(Y,\nabla_{X}Z+A_{f}(X,Z))u\Big)^{v}\nonumber\\
&&-(\nabla_{[X,Y]}Z)^{h}-A_{f}([X,Y],Z)^{h}-\frac{1}{2}\Big(R([X,Y],Z)u\Big)^{v}\nonumber\\
&&+\frac{1}{2\alpha f}\Big(R(u,R(X,Y)u)Z\Big)^{h}\nonumber\\
&&+\frac{1}{2}\Big(\nabla_{Y}(R(X,Z)u)\Big)^{v}+\frac{1}{4\alpha f}\Big(R(u,R(X,Z)u)Y\Big)^{h} \nonumber\\
&&-\frac{1}{2}\Big(\nabla_{X}(R(Y,Z)u)\Big)^{v}-\frac{1}{4\alpha f}\Big(R(u,R(Y,Z)u)X\Big)^{h}.
\varepsilonnd{eqnarray}
$ii)$ Note that the equation $\tilde{g}^{f}_{(p,u)}(X^{v},U)=g_{p}(X,u)$ implies that
\begin{equation}
\tilde{g}^{f}_{(p,u)}((R(X,Y)u)^{v},U)=g_{p}(R(X,Y)u,u)=0,
\varepsilonnd{equation}
Hence
\begin{eqnarray*}
\alpha\tilde{R}^{f}(X^{h},Y^{h})Z^{v}&=&\alpha\tilde{\nabla}^{f}_{X^{h}}\tilde{\nabla}^{f}_{Y^{h}}Z^{v}
-\alpha\tilde{\nabla}^{f}_{Y^{h}}\tilde{\nabla}^{f}_{X^{h}}Z^{v}
-\alpha\tilde{\nabla}^{f}_{[X^{h},Y^{h}]}Z^{v}\nonumber\\
&=&\tilde{\nabla}^{f}_{X^{h}}(\alpha(\nabla_{Y}Z)^{v}+\frac{1}{2f}(R(u,Z)Y)^{h}) \nonumber\\
&& -\tilde{\nabla}^{f}_{Y^{h}}(\alpha(\nabla_{X}Z)^{h}+\frac{1}{2f}(R(u,Z)X)^{h})
-\alpha\tilde{\nabla}^{f}_{[X,Y]^{h}-(R(X,Y)u)^{v}}Z^{v} \nonumber\\
&=&(\nabla_{X}(\frac{1}{2f}R(u,Z)Y))^{h}-\frac{1}{4f}(R(X,R(u,Z)Y)u)^{v}\nonumber\\
&&+\frac{1}{2f}(R(u,\nabla_{Y}Z)X)^{h}\nonumber\\
&& +A_{f}(X,\frac{1}{2f}R(u,Z)Y)^{h}+\alpha(\nabla_{X}\nabla_{Y}Z)^{v} \nonumber\\
&&-(\nabla_{Y}(\frac{1}{2f}R(u,Z)X))^{h}+\frac{1}{4f}(R(Y,R(u,Z)X)u)^{v}\nonumber\\
&&-\frac{1}{2f}(R(u,\nabla_{X}Z)Y)^{h}\nonumber\\
&&-A_{f}(Y,\frac{1}{2f}R(u,Z)X)^{h}-\alpha(\nabla_{Y}\nabla_{X}Z)^{v} \nonumber\\
&&-\frac{1}{2f}(R(u,Z)[X,Y])^{h}-\alpha(\nabla_{[X,Y]}Z)^{v} \nonumber\\
\varepsilonnd{eqnarray*}
\begin{eqnarray}
&&-[\tilde{g}^{f}((R(X,Y)u)^{v},U)Z^{v}+\tilde{g}^{f}(Z^{v},U)(R(X,Y)u)^{v}] \nonumber\\
&&+(1+\alpha)\tilde{g}^{f}((R(X,Y)u)^{v},Z^{v})U-\tilde{g}^{f}((R(X,Y)u)^{v},U)\tilde{g}^{f}(Z^{v},U)U \nonumber\\
&=&\alpha(R(X,Y)Z)^{v}+\frac{1}{2f}[\nabla_{Z}(R(u,Z)Y)-\nabla_{Y}(R(u,Z)X)]^{h} \nonumber\\
&&-\frac{1}{4f}[R(X,R(u,Z)Y)u-R(Y,R(u,Z)X)u]^{v} \nonumber\\
&&+[A_{f}(X,\frac{1}{2f}R(u,Z)Y)-A_{f}(Y,\frac{1}{2f}R(u,Z)X)]^{h} \nonumber\\
&&-\tilde{g}^{f}(Z^{v},u)(R(X,Y)u)^{v}+(1+\alpha)\tilde{g}^{f}((R(X,Y)u)^{v},Z^{v})U.
\varepsilonnd{eqnarray}
$iii)$ Calculations similar to those above produce the third formula
\begin{eqnarray}
\tilde{R}^{f}(X^{h},Y^{v})Z^{h}&=&\tilde{\nabla}^{f}_{X^{h}}\tilde{\nabla}^{f}_{Y^{v}}Z^{h}
-\tilde{\nabla}^{f}_{Y^{v}}\tilde{\nabla}^{f}_{X^{h}}Z^{h}
-\tilde{\nabla}^{f}_{[X^{h},Y^{v}]}Z^{h}\nonumber\\
&=&\frac{1}{2\alpha }\tilde{\nabla}^{f}_{X^{h}}(\frac{1}{f}R(u,Y)Z)^{h}
-\tilde{\nabla}^{f}_{(\nabla_{X}Y)^{v}}Z^{h} \nonumber\\
&& -\tilde{\nabla}^{f}_{Y^{v}}[(\nabla_{X}Z)^{h}-\frac{1}{2}(R(X,Z)u)^{v}+A_{f}(X,Z)^{h}] \nonumber\\
&=& \frac{1}{2\alpha }\tilde{\nabla}^{f}_{X^{h}}(\frac{1}{f}R(u,Y)Z)^{h}\nonumber\\
&&-\frac{1}{2\alpha f}(R(u,\nabla_{X}Y)Z)^{h}-\frac{1}{2\alpha f}(R(u,Y)\nabla_{X}Z)^{h}
+\frac{1}{2}(R(X,Z)Y)^{v} \nonumber\\
&&-\frac{1}{2\alpha}\tilde{g}^{f}(Y^{v},U)(R(X,Z)u)^{v}-\frac{1}{2\alpha}\tilde{g}^{f}((R(X,Z)u)^{v},U)Y^{v}\nonumber\\
&&+\frac{1+\alpha}{2\alpha}\tilde{g}^{f}((R(X,Z)u)^{v},Y^{v})U
-\frac{1}{2\alpha}\tilde{g}^{f}(Y^{v},U)\tilde{g}^{f}((R(X,Z)u)^{v},U)U \nonumber\\
&& -\frac{1}{2\alpha f}(R(u,Y)A_{f}(X,Z))^{h}.
\varepsilonnd{eqnarray}
$iv)$ Since $X^{v}_{(p,u)}(f(r^{2}))=2f'(r^{2})g_{p}(X,u)$ and $(\tilde{\nabla}^{f}_{X^{h}}U)_{(p,u)}=0$ we get
\begin{eqnarray*}
2\alpha\tilde{R}^{f}(X^{h},Y^{v})Z^{v}&=&2\alpha[\tilde{\nabla}^{f}_{X^{h}}\tilde{\nabla}^{f}_{Y^{v}}Z^{v}
-\tilde{\nabla}^{f}_{Y^{v}}\tilde{\nabla}^{f}_{X^{h}}Z^{v}
-\tilde{\nabla}^{f}_{[X^{h},Y^{v}]}Z^{v}]\nonumber\\
&=&-2\tilde{\nabla}^{f}_{X^{h}}[\tilde{g}^{f}(Y^{v},U)Z^{v}
-(1+\alpha)\tilde{g}^{f}(Y^{v},Z^{v})U \nonumber\\
&& +\tilde{g}^{f}(Z^{v},U)Y^{v} +\tilde{g}^{f}(Y^{v},U)\tilde{g}^{f}(Z^{v},U)U ] \nonumber\\
&& -\alpha\tilde{\nabla}^{f}_{Y^{v}}(\frac{1}{\alpha f}R(u,Z)X)^{h}
-2\alpha[\tilde{\nabla}^{f}_{Y^{v}}(\nabla_{X}Z)^{v}+\tilde{\nabla}^{f}_{(\nabla_{X}Y)^{v}}Z^{v}] \nonumber\\
&=&-g(Y,u)[\frac{1}{\alpha f}(R(u,Z)X)^{h}+2(\nabla_{X}Z)^{v}]\nonumber\\
&&-g(Z,u)[\frac{1}{\alpha f}(R(u,Y)X)^{h}+2(\nabla_{X}Y)^{v}]\nonumber\\
&&+\frac{2}{\alpha f}g(Y,u)(R(u,Z)X)^{h}\nonumber\\
&&-\tilde{\nabla}^{f}_{Y^{v}}(\frac{1}{f}R(u,Z)X)^{h}\nonumber\\
&&+2[g(Y,u)(\nabla_{X}Z)^{v}+g(\nabla_{X}Z,u)Y^{v}]\nonumber\\
&&-(1+\alpha)\tilde{g}^{f}(Y^{v},(\nabla_{X}Z)^{v})U+g(Y,u)g(\nabla_{X}Z,u)U\nonumber\\
\varepsilonnd{eqnarray*}
\begin{eqnarray}
&&+g(\nabla_{X}Y,u)Z^{v}+g(Z,u)(\nabla_{X}Y)^{v}\nonumber\\
&&-(1+\alpha)\tilde{g}^{f}((\nabla_{X}Y)^{v},Z^{v})U+g(\nabla_{X}Y,u)g(Z,u)U\nonumber\\
&=&-\tilde{\nabla}^{f}_{Y^{v}}(\frac{1}{f}R(u,Z)X)^{h}\nonumber\\
&&+\frac{1}{\alpha f}[g(Y,U)(R(u,Z)X)^{h}-g(Z,u)(R(u,Y)X)^{h}]\nonumber\\
&=&-\frac{1}{f}\Big(R(Y,Z)X\Big)^{h}
-\frac{1}{2\alpha f^{2}}\Big(R(u,Y)R(u,Z)X\Big)^{h}\nonumber\\
&&+\frac{1}{\alpha f}[g(Y,U)(R(u,Z)X)^{h}-g(Z,u)(R(u,Y)X)^{h}]
\varepsilonnd{eqnarray}
For the last equation we have to show that all the terms not containing the Riemann curvature tenson $R$ vanish. But since
\begin{equation}
\tilde{g}^{f}(Y^{v},(\nabla_{X}Z)^{v})U=\frac{1}{\alpha}[g(Y,\nabla_{X}Z)+g(Y,u)g(\nabla_{X}Z,u)]U,
\varepsilonnd{equation}
the rest becomes
\begin{equation}
-\frac{2}{\alpha}[g(Y,\nabla_{X}Z)+g(Y,u)g(\nabla_{X}Z,u)+g(Z,\nabla_{X}Y)+g(Z,u)g(\nabla_{X}Y,u)]U,
\varepsilonnd{equation}
which vanishes, because
\begin{equation}
-\frac{2}{\alpha}X^{h}[\tilde{g}^{f}(Y^{v},Z^{v})+\tilde{g}^{f}(Y^{v},U)\tilde{g}^{f}(Z^{v},U)]U=0.
\varepsilonnd{equation}
$v)$ First we notice that
\begin{eqnarray}
\tilde{\nabla}^{f}_{X^{v}}\tilde{\nabla}^{f}_{Y^{v}}Z^{h}&=&\frac{1}{2\alpha}\tilde{\nabla}^{f}_{X^{v}}(\frac{1}{f}R(u,Y)Z)^{h}\nonumber\\
&=&-\frac{1}{2\alpha f}\Big(R(X,Y)Z\Big)^{h}
-\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,X)R(u,Y)Z\Big)^{h}.
\varepsilonnd{eqnarray}
By using the fact that $[X^{v},Y^{v}]=0$ we get
\begin{eqnarray}
\tilde{R}^{f}(X^{v},Y^{v})Z^{h}&=&\tilde{\nabla}^{f}_{X^{v}}\tilde{\nabla}^{f}_{Y^{v}}Z^{h}
-\tilde{\nabla}^{f}_{Y^{v}}\tilde{\nabla}^{f}_{X^{v}}Z^{h} \nonumber\\
&=&\frac{1}{2\alpha}\tilde{\nabla}^{f}_{X^{v}}(\frac{1}{f}R(u,Y)Z)^{h}
-\frac{1}{2\alpha}\tilde{\nabla}^{f}_{Y^{v}}(\frac{1}{f}R(u,X)Z)^{h}\nonumber\\
&=&-\frac{1}{2\alpha f}\Big(R(X,Y)Z\Big)^{h}
-\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,X)R(u,Y)Z\Big)^{h}\nonumber\\
&&+\frac{1}{2\alpha f}\Big(R(Y,X)Z\Big)^{h}
+\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,Y)R(u,X)Z\Big)^{h}.
\varepsilonnd{eqnarray}
$vi)$ The result similar to Proposition 8.5 in \cite{GK2}.
\varepsilonnd{proof}
In the following let $\tilde{Q}^{f}(V,W)$ denote the square of the area of the parallelogram with sides $V$ and $W$
for $V,W\in C^{\infty}(TTM)$ given by
\begin{equation}
\tilde{Q}^{f}(V,W)=\|V\|^{2}\|W\|^{2}-\tilde{g}^{f}(V,W)^{2}.
\varepsilonnd{equation}
\begin{lem}\label{le:45}
Let $X,Y\in C^{\infty}(T_{p}M)$ be two orthonormal vectors in the tangent spaces $T_{p}M$ of $M$ at $p$. Then
\begin{eqnarray}
i) \ \tilde{Q}^{f}(X^{h},Y^{h})&=&f^{2}, \\
ii) \ \tilde{Q}^{f}(X^{h},Y^{v})&=&\frac{f}{\alpha}(1+g(Y,u)^{2}), \\
iii) \ \tilde{Q}^{f}(X^{v},Y^{v})&=&\frac{1}{\alpha^{2}}(1+g(Y,u)^{2}+g(X,u)^{2}).
\varepsilonnd{eqnarray}
\varepsilonnd{lem}
\begin{proof}
$i)$ The statement is a direct consequence of the definition of the Rescaled Cheeger-Gromoll Metric.
$ii)$ This is a direct consequence of
\begin{eqnarray}
\tilde{Q}^{f}(X^{h},Y^{v})&=&\tilde{g}^{f}(X^{h},X^{h})\tilde{g}^{f}(Y^{v},Y^{v})-\tilde{g}^{f}(X^{h},Y^{v})^{2}\nonumber\\
&=&\frac{f}{\alpha}(1+g(Y,u)^{2}).
\varepsilonnd{eqnarray}
$iii)$ This last part follows from
\begin{eqnarray}
\tilde{Q}^{f}(X^{v},Y^{v})&=&\tilde{g}^{f}(X^{v},X^{v})\tilde{g}^{f}(Y^{v},Y^{v})-\tilde{g}^{f}(X^{h},Y^{v})^{2}\nonumber\\
&=&\frac{1}{\alpha}(1+g(X,u)^{2})\frac{1}{\alpha}(1+g(Y,u)^{2})\nonumber\\
&&-[\frac{1}{\alpha^{2}}(g(X,Y)+g(X,u)g(Y,u))]^{2} \nonumber\\
&=&\frac{1}{\alpha^{2}}(1+g(Y,u)^{2}+g(X,u)^{2}).
\varepsilonnd{eqnarray}
\varepsilonnd{proof}
Let $\tilde{G}^{f}$ be the $(2,0)-$tensor on the tangent bundle $TM$ given by
\begin{equation}
\tilde{G}^{f}(V,W)\mapsto \tilde{g}^{f}(\tilde{R}^{f}(V,W)W,V)
\varepsilonnd{equation}
for $V,W\in C^{\infty}(TTM)$.
\begin{lem}\label{le:46}
Let $X,Y\in C^{\infty}(T_{p}M)$ be two orthonormal vectors in the tangent spaces $T_{p}M$ of $M$ at $p$. Then
\begin{eqnarray}
i) \ \tilde{G}^{f}(X^{h},Y^{h})&=&\frac{1}{f} K(X,Y)-\frac{3}{4\alpha^{2}f^{2}}|R(X,Y)u|^{2}+\tilde{L}^{f}(X,Y), \\
ii) \ \tilde{G}^{f}(X^{h},Y^{v})&=&\frac{1}{4\alpha^{2} f^{2}}|R(u,Y)X|^{2}, \\
iii) \ \tilde{G}^{f}(X^{v},Y^{v})&=&\frac{1+\alpha+\alpha^{2}}{\alpha^{2}}\tilde{Q}^{f}(X^{v},Y^{v})
-\frac{2+\alpha}{\alpha^{3}}(g(X,u)^{2}+g(Y,u)^{2}).\\
\varepsilonnd{eqnarray}
\varepsilonnd{lem}
\begin{proof}
$i)$ The statement follows by
\begin{eqnarray}
\alpha\tilde{G}^{f}(X^{h},Y^{h})&=&\alpha\tilde{g}^{f}(\tilde{R}^{f}(X^{h},Y^{h})Y^{h},X^{h}) \nonumber\\
&=& \tilde{g}^{f}\Big(\nabla_{X}(\nabla_{Y}Y+A_{f}(Y,Y))^{h},X^{h}\Big)\nonumber\\
&&+\tilde{g}^{f}\Big(A_{f}(X,\nabla_{Y}Y+A_{f}(Y,Y))^{h},X^{h}\Big)\nonumber\\
&& -\tilde{g}^{f}\Big(\nabla_{Y}(\nabla_{X}Y+A_{f}(X,Y))^{h},X^{h}\Big)\nonumber\\
&&-\tilde{g}^{f}\Big(A_{f}(Y,\nabla_{X}Y+A_{f}(X,Y))^{h},X^{h}\Big)\nonumber\\
&& -\tilde{g}^{f}\Big((\nabla_{[X,Y]}Y)^{h}+A_{f}([X,Y],Y)^{h},X^{h}\Big)\nonumber\\
&&+\tilde{g}^{f}\Big(\frac{1}{2\alpha f(p)}\Big(R(u,R(X,Y)u)Y\Big)^{h},X^{h}\Big)\nonumber\\
&& +\tilde{g}^{f}\Big(\frac{1}{4\alpha f(p)}\Big(R(u,R(X,Y)u)Y\Big)^{h},X^{h}\Big)\nonumber\\
&&-\tilde{g}^{f}\Big(\frac{1}{4\alpha f(p)}(R(u,R(Y,Y)u)X)^{h},X^{h}\Big)\nonumber\\
&=&\frac{1}{f} K(X,Y)-\frac{3}{4\alpha^{2}}|R(X,Y)u|^{2}+\tilde{L}^{f}(X,Y).
\varepsilonnd{eqnarray}
The properties of the Riemann curvature tensor give
\begin{equation}
g(R(u,R(X,Y)u)Y,X)=-|R(X,Y)u|^{2},
\varepsilonnd{equation}
from which the result follows.
$ii)$ The statement follows by
\begin{eqnarray}
\alpha^{2}\tilde{G}^{f}(X^{h},Y^{v})&=&\alpha^{2}\tilde{g}^{f}(\tilde{R}^{f}(X^{h},Y^{v})Y^{v},X^{h}) \nonumber\\
&=&-\alpha^{2}\tilde{g}^{f}\Big(-\frac{1}{2\alpha f}\Big(R(Y,Z)X\Big)^{h} ,X^{h}\Big) \nonumber\\
&&-\alpha^{2}\tilde{g}^{f}\Big(-\frac{1}{4\alpha^{2} f^{2}}\Big(R(u,Y)R(u,Z)X\Big)^{h} ,X^{h}\Big) \nonumber\\
&&+\alpha^{2}\tilde{g}^{f}\Big(\frac{1}{2\alpha^{2} f}g(Y,u)(R(u,Y)X)^{h},X^{h}\Big)\nonumber\\
&&-\alpha^{2}\tilde{g}^{f}\Big(\frac{1}{2\alpha^{2} f}g(Y,u)(R(u,Y)X)^{h},X^{h}\Big)\nonumber\\
&=&\frac{1}{4 f^{2}}|R(u,Y)X|^{2}.
\varepsilonnd{eqnarray}
$iii)$ In the last case we have
\begin{eqnarray}
\tilde{G}^{f}(X^{v},Y^{v})&=&\tilde{g}^{f}(\tilde{R}^{f}(X^{v},Y^{v})Y^{v},X^{v}) \nonumber\\
&&+\frac{\alpha+2}{\alpha^{2}}(\tilde{g}^{f}(X^{v},Y^{v})g(Y,u)g(X,u)-\tilde{g}^{f}(Y^{v},Y^{v})g(X,u)^{2}) \nonumber\\
&&+\frac{1+\alpha+\alpha^{2}}{\alpha^{2}}(\tilde{g}^{f}(Y^{v},Y^{v})\tilde{g}^{f}(X^{v},X^{v})
-\tilde{g}^{f}(X^{v},Y^{v})) \nonumber\\
&&+\frac{\alpha+2}{\alpha^{2}}(g(X,u)g(Y,u)\tilde{g}^{f}(X^{v},Y^{v})
-g(Y,u)^{2}\tilde{g}^{f}(X^{v},X^{v}) ) \nonumber\\
&=&\frac{1+\alpha+\alpha^{2}}{\alpha^{2}}\tilde{Q}^{f}(X^{v},Y^{v})
-\frac{2+\alpha}{\alpha^{3}}(g(X,u)^{2}+g(Y,u)^{2}).
\varepsilonnd{eqnarray}
\varepsilonnd{proof}
\begin{prop}\label{pr: 47}
Let $(M, g)$ be a Riemannian manifold and $TM$ be its tangent bundle equipped with the rescaled Cheeger-Gromoll
metric $\tilde{g}^{f}$. Then the sectional curvature $\tilde{K}^{f}$ of $(TM,\tilde{g}^{f})$ satisfy the following:
\begin{eqnarray}
i) \ \tilde{K}^{f}(X^{h},Y^{h})&=&\frac{1}{f^{3}} K(X,Y)-\frac{3}{4\alpha f^{4}}|R(X,Y)u|^{2}
+\frac{1}{f^{2}}\tilde{L}^{f}(X,Y), \\
ii) \ \tilde{K}^{f}(X^{h},Y^{v})&=&\frac{1}{4\alpha f^{3}}\frac{|R(u,Y)X|^{2}}{(1+g(Y,u)^{2})}, \\
iii) \ \tilde{K}^{f}(X^{v},Y^{v})&=&\frac{1-\alpha}{\alpha^{2}}+\frac{2+\alpha}{\alpha}\frac{1}{(1+g(Y,u)^{2}+g(X,u)^{2})}.\\
\varepsilonnd{eqnarray}
\varepsilonnd{prop}
\begin{proof}
The division of $\tilde{G}^{f}(X^{i},Y^{j})$ by $\tilde{Q}^{f}(X^{i},Y^{j})$ for $i,j\in\{h,v\}$ gives the result.
\varepsilonnd{proof}
\begin{prop}\label{pr: 48}
Let $(M, g)$ be a Riemannian manifold of constant sectional curvature $\kappa$ .Let $TM$ be its tangent bundle equipped with
the rescaled Cheeger-Gromoll metric $\tilde{g}^{f}$. Then the sectional curvature $\tilde{K}^{f}$ of $(TM,\tilde{g}^{f})$
satisfy the following:
\begin{eqnarray}
i) \ \tilde{K}^{f}(X^{h},Y^{h})&=&\frac{1}{f^{3}}\kappa -\frac{3\kappa^{2}}{4\alpha f^{4}}(g(u,X)^{2}+g(u,Y)^{2})
+\frac{1}{f^{2}}\tilde{L}^{f}(X,Y), \\
ii) \ \tilde{K}^{f}(X^{h},Y^{v})&=&\frac{1}{4\alpha f^{3}}\frac{\kappa^{2}g(X,u)^{2}}{(1+g(Y,u)^{2})}, \\
iii) \ \tilde{K}^{f}(X^{v},Y^{v})&=&\frac{1-\alpha}{\alpha^{2}}+\frac{2+\alpha}{\alpha}\frac{1}{(1+g(Y,u)^{2}+g(X,u)^{2})}.\\
\varepsilonnd{eqnarray}
for any orthonormal vectors $X,Y\in T_{p}M$.
\varepsilonnd{prop}
\begin{proof}
This is a simple calculation using the special form of the curvature tensor.
\varepsilonnd{proof}
For a given point $(p,u)\in TM$ with $u\neq0$. Let $\{e_{1},\cdots,e_{m}\}$ be an orthonormal basis for the tangent space $T_{p}M$
of $M$ at $p$ such that $e_{1}=\frac{u}{|u|}$, where $|u|$ is the norm of $u$ with respect to the metric $g$ on $M$. Then for
$i\in\{1,\cdots,m\}$ and $k\in\{2,\cdots,m\}$ define the horizontal and vertical lifts by $t_{i}=e_{i}^{h}$, $t_{m+1}=e_{1}^{v}$
and $t_{m+k}=\sqrt{\alpha}e_{k}^{v}$. Then $\{t_{1},\cdots,t_{2m}\}$ is an orthonormal basis of the tangent space $T_{(p,u)}M$
with respect to the rescaled Cheeger-Gromoll metric.
\begin{lem}\label{le:411}
Let $(p,u)$ be a point on $TM$ and $\{t_{1},\cdots,t_{2m}\}$ be an orthonormal basis of the tangent space $T_{(p,u)}M$ as above.
Then the sectional curvature $\tilde{K}^{f}$ satisfy the following equations
\begin{eqnarray}
\tilde{K}^{f}(t_{i},t_{j})&=&\frac{1}{f^{3}}K(e_{i},e_{j}) -\frac{3}{4\alpha f^{4}}|R(e_{i},e_{j})u|^{2}
+\frac{1}{f^{2}}\tilde{L}^{f}(X,Y), \\
\tilde{K}^{f}(t_{i},t_{m+1})&=& 0, \\
\tilde{K}^{f}(t_{i},t_{m+k})&=& \frac{1}{4f^{3}}|R(u,e_{k})e_{i}|^{2} \\
\tilde{K}^{f}(t_{m+1},t_{m+k})&=& \frac{3}{\alpha^{2}} \\
\tilde{K}^{f}(t_{m+k},t_{m+l})&=& \frac{\alpha^{2}+\alpha+1}{\alpha^{2}} \\
\varepsilonnd{eqnarray}
for $i,j\in\{1,\cdots,m\}$ and $k,l\in\{2,\cdots,m\}$.
\varepsilonnd{lem}
\begin{prop}\label{pr: 412}
Let $(M, g)$ be a Riemannian manifold with scalar curvature $S$. Let $TM$ be its tangent bundle equipped with
the rescaled Cheeger-Gromoll metric $\tilde{g}^{f}$ and $(p,u)$ be a point on $TM$. Then the scalar curvature $\tilde{S}^{f}$
of $(TM,\tilde{g}^{f})$ satisfy the following:
Then
\begin{eqnarray}
\tilde{S}^{f}_{(p,u)}&=& S_{p}+\frac{2\alpha-3}{4\alpha f^{4}}\sum_{i,j=1}^{m}|R(e_{i},e_{j})u|^{2}+\frac{1}{f^{2}}\sum_{i,j=1}^{m}\tilde{L}^{f}(X,Y) \nonumber\\
&&+\frac{m-1}{\alpha^{2}}[6+(m-2)(\alpha^{2}+\alpha+1)].
\varepsilonnd{eqnarray}
\varepsilonnd{prop}
\begin{proof}
Let $\{t_{1},\cdots,t_{2m}\}$ be an orthonormal basis of the tangent space $T_{(p,u)}TM$ as above. By the definition of the scalar
curvature we know that
\begin{eqnarray}
\tilde{S}^{f}&=&\sum_{i,j=1}^{m}\tilde{K}^{f}(t_{i},t_{j})\nonumber\\
&=&2\sum_{i,j=1,i<j}^{m}\tilde{K}^{f}(t_{i},t_{j})+2\sum_{i,j=1}^{m}\tilde{K}^{f}(t_{i},t_{m+j})
+2\sum_{i,j=1,i<j}^{m}\tilde{K}^{f}(t_{m+i},t_{m+j}) \nonumber\\
&=&\sum_{i\neq j}^{m}\tilde{K}^{f}(t_{i},t_{j})-\frac{-3}{4\alpha f^{2}}\sum_{i,j=1}^{m}\tilde{R}^{f}(t_{i},t_{j})\nonumber\\
&&+\frac{1}{2}\sum_{i,j=1}^{m}\tilde{R}^{f}(t_{i},t_{j})+2\sum_{i=2}^{m}\frac{3}{\alpha^{2}}
+\sum_{i,j=1,i\neq j}^{m}\frac{\alpha^{2}+\alpha+1}{\alpha^{2}} \nonumber\\
&=& S+\frac{2\alpha-3}{4\alpha f^{4}}\sum_{i,j=1}^{m}|R(e_{i},e_{j})u|^{2} \nonumber\\
&&+\frac{1}{f}\sum_{i,j=1}^{m}\tilde{L}^{f}(X,Y)+\frac{m-1}{\alpha^{2}}[6+(m-2)(\alpha^{2}+\alpha+1)].
\varepsilonnd{eqnarray}
For the fact that
\begin{equation}
\sum_{i,j=1}^{m}|R(e_{i},e_{j})u|^{2}=\sum_{i,j=1}^{m}|R(u,e_{j})e_{i}|^{2}
\varepsilonnd{equation}
see the proof of Proposition 3.9.
\varepsilonnd{proof}
\section*{ Acknowledgements}
The second author was partially supported by National Science Foundation of China under Grant No.10801027, and Fok Ying Tong
Education Foundation under Grant No.121003.
\begin{thebibliography}{00}
\bibitem{Sa} S. Sasaki.: On the differential geometry of tangent bundles of Riemannian manifolds. Tohoku Math. J. 10, 338-354, (1958).
\bibitem{Ko} O. Kowalski.: Curvature of the induced Riemannian metric of the tangent bundle of a Riemannian manifold. J.Reine Angew.math.
250. 124-129, (1971).
\bibitem{MT} E. Musso and F. Tricerri.: Riemannian metri on Tangent Bundle, Ann. Math. Pura. Appl. 150(4), 1-19, (1988).
\bibitem{CG} J. Cheeger and D. Gromoll.: On the Structure of Complete Manifolds of Nonnegative Curvature. Ann. of Math. 96, 413-443, (1972).
\bibitem{Se} M. Sekizawa.: Curvatures of Tangent Bundles with Cheeger-Gromoll metric, Tokyo J. Math. 14, 407-417, (1991).
\bibitem{GK1} S. Gudmundsson and E. Kappos.: On the Geometry of the Tangent Bundle with the Cheeger-Gromoll metric,
Tokyo J. Math. 25, no.1, 75-83, (2002).
\bibitem{GK2} S. Gudmundsson and E. Kappos.: On the Geometry of the Tangent Bundle, Expo. Math. 20, no.1, 1-41, (2002).
\bibitem{Do} P. Dombrowski.: On the Geometry of the Tangent Bundle. J. Reine Angew Math. 210, 73-88, (1962).
\bibitem{Ne} B. O'Neill.: The fundamental Equations of a Submersion, Michigan Math. J. 13, 459-469, (1966).
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title[Linear operators and the {L}iouville theorem]{The {L}iouville theorem and linear operators satisfying the maximum principle
}
\author[N.~Alibaud]{Natha\"el Alibaud}
\address[N.~Alibaud]{ENSMM\\
26 Chemin de l'Epitaphe\\ 25030 Besan\c{c}on cedex\\ Fran\-ce
and\\
LMB\\ UMR CNRS 6623\\ Universit\'e de Bourgogne Franche-Comt\'e (UBFC)\\ France}
\email{nathael.alibaud\@@{}ens2m.fr}
\urladdr{https://lmb.univ-fcomte.fr/Alibaud-Nathael}
\author[F.~del Teso]{F\'elix del Teso}
\address[F.~del Teso]{Departamento de An\'alisis Matem\'atico y Matem\'atica Aplicada\\
Universidad Complutense de Madrid (UCM)\\
28040 Madrid, Spain}
\email{fdelteso\@@{}ucm.es}
\urladdr{https://sites.google.com/view/felixdelteso}
\author[J. Endal]{J\o rgen Endal}
\address[J. Endal]{Department of Mathematical Sciences\\
Norwegian University of Science and Technology (NTNU)\\
N-7491 Trondheim, Norway}
\email{jorgen.endal\@@{}ntnu.no}
\urladdr{http://folk.ntnu.no/jorgeen}
\author[E.~R.~Jakobsen]{Espen R. Jakobsen}
\address[E.~R.~Jakobsen]{Department of Mathematical Sciences\\
Norwegian University of Science and Technology (NTNU)\\
N-7491 Trondheim, Norway}
\email{espen.jakobsen\@@{}ntnu.no}
\urladdr{http://folk.ntnu.no/erj}
\subjclass[2010]{
{35B10,
35B53,
35J70,
35R09,
60G51,
65R20}}
\keywords{
Nonlocal degenerate elliptic operators, Courr\`ege theorem, L\'evy-Khintchine formula, Liouville theorem, periodic solutions, propagation of maximum, subgroups of $\ensuremath{\mathbb{R}}^d$, Kronecker theorem}
\begin{abstract}
A result by Courr\`ege says that linear translation invariant operators satisfy the
maximum principle if and only if they are of the form
$\ensuremath{\mathcal{L}}=\ensuremath{\mathcal{L}}^{\sigma,b}+\ensuremath{\mathcal{L}}^\mu$ where
$$
\ensuremath{\mathcal{L}}^{\sigma,b}[u](x)=\textup{tr}(\sigma \sigma^{\texttt{T}} D^2u(x))+b\cdot
Du(x)
$$
and
$$
\ensuremath{\mathcal{L}}^\mu[u](x)=\int_{\ensuremath{\mathbb{R}}^d\setminus\{0\}} \big(u(x+z)-u(x)-z\cdot Du(x) \mathbf{1}_{|z| \leq
1}\big) \,\mathrm{d} \mu(z).$$
This class of operators coincides with the infinitesimal generators of L\'evy
processes in probability theory.
In this paper we give a complete characterization of the operators of this form that satisfy the Liouville theorem:
Bounded solutions $u$ of $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$ are constant. The
Liouville property is obtained as a consequence of a periodicity
result that completely characterizes bounded distributional solutions
of $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$.
The proofs combine arguments from PDEs and group theory. They are simple and short.
\end{abstract}
\maketitle
\section{Introduction and main results}
\label{sec:intro}
The classical Liouville theorem states that bounded
solutions of $\Delta u=0$ in $\ensuremath{\mathbb{R}}^d$ are constant. The Laplace
operator $\Delta$ is the most classical example of an operator $\ensuremath{\mathcal{L}}:C^\infty_\textup{c} (\ensuremath{\mathbb{R}}^d) \to C(\ensuremath{\mathbb{R}}^d)$
satisfying the maximum principle in the sense that
\begin{equation}\label{mp}
\mbox{$\ensuremath{\mathcal{L}} [u](x) \leq 0$ at any global maximum point $x$ of $u$.}
\end{equation}
In the class of linear
translation invariant\footnote{Translation invariance means that
$\ensuremath{\mathcal{L}}[u(\cdot+y)](x)=\ensuremath{\mathcal{L}}[u](x+y)$ for all $x,y$.} operators
(which includes
$\Delta$), a result by Courr\`ege \cite{Cou64}\footnote{If \eqref{mp}
holds at any {\it nonnegative} maximum point, then by definition the
{\it positive} maximum principle holds and by \cite{Cou64}
there is an extra term $c u(x)$ with $c \leq 0$ in
\eqref{def:localOp1}. For the purpose of this paper (Liouville and periodicity), the case $c<0$ is trivial since then $u=0$
is the unique bounded solution of $\mathcal{L}[u]=0$.}
says that the maximum
principle holds if and only if
\begin{equation}\label{eq:GenOp1}
\ensuremath{\mathcal{L}}=\ensuremath{\mathcal{L}}^{\sigma,b}+\ensuremath{\mathcal{L}}^\mu,
\end{equation}
where
\begin{align}\label{def:localOp1}
\ensuremath{\mathcal{L}}^{\sigma,b}[u](x)&=\textup{tr}(\sigma \sigma^{\texttt{T}} D^2u(x))+b \cdot Du(x),\\
\label{def:levy1}
\ensuremath{\mathcal{L}}^\mu[u](x)&=\int_{\ensuremath{\mathbb{R}}^d\setminus\{0\}} \big(u(x+z)-u(x)-z \cdot Du(x) \mathbf{1}_{|z| \leq 1}\big) \,\mathrm{d} \mu(z),
\end{align}
and
\begin{align}
& b\in\ensuremath{\mathbb{R}}^d, \ \ \text{and}
\ \ \text{$\sigma=(\sigma_1,\ldots,\sigma_P)\in\ensuremath{\mathbb{R}}^{d\times P}$ for
$P\in\ensuremath{\mathbb{N}}$, $\sigma_j \in\ensuremath{\mathbb{R}}^d$,}\label{as:sigmab}\tag{$\textup{A}_{\sigma,b}$}\\
&\mu\geq0 \ \text{is a Radon measure on $\ensuremath{\mathbb{R}}^d\setminus\{0\}$, $\int_{\ensuremath{\mathbb{R}}^d\setminus\{0\}} \min\{|z|^2,1\} \,\mathrm{d} \mu(z)<\infty$.}\label{as:mus}\tag{$\textup{A}_{\mu}$}
\end{align}
These
elliptic operators have a local part $\ensuremath{\mathcal{L}}^{\sigma,b}$ and a nonlocal part
$\ensuremath{\mathcal{L}}^\mu$, either of which could be zero.\footnote{The representation
\eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1} is
unique up to the choice of a cut-off function in
\eqref{def:levy1} and a square root $\sigma$ of
$a=\sigma\sigma^\texttt{T}$. In this paper we always use
$\mathbf{1}_{|z| \leq 1}$ as a cut-off function.}
Another point of view
of these operators
comes from probability and
stochastic processes: Every operator mentioned above is the generator
of a L\'evy process, and conversely, every generator of a L\'evy process
is of the form given above. L\'evy processes are Markov
processes with stationary independent increments and are the
prototypical models of noise in
science, engineering, and finance. Well-known examples are Brownian
motions, Poisson processes,
stable processes, and various other types of jump processes.
{\em The main contributions of this paper are the following:
\begin{enumerate}[ \bf 1.]
\item We give necessary and sufficient conditions for $\ensuremath{\mathcal{L}}$ to have
the Liouville property: Bounded solutions $u$ of $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$ are
constant.
\item For general $\ensuremath{\mathcal{L}}$, we show that all bounded solutions of $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$ are periodic and we identify the set of admissible periods.
\end{enumerate}}
Let us now state our results.
For a set $S\subseteq \ensuremath{\mathbb{R}}^d$, we let $G(S)$ denote the smallest additive subgroup of $\ensuremath{\mathbb{R}}^d$
containing $S$ and define the subspace $V_S\subseteq \overline{G(S)}$ by
\begin{equation*}
V_S:=\Big\{g \in \overline{G(S)} \ :\ t g \in \overline{G(S)} \mbox{ } \forall t \in \ensuremath{\mathbb{R}} \Big\}.
\end{equation*}
Then we take $\supp(\mu)$ to be the support of the measure $\mu$ and
define
\begin{align*}
G_\mu:=\overline{G(\supp(\mu))},\quad V_\mu:=V_{\supp (\mu)},\quad\text{and}\quad
c_\mu:=-\int_{\{|z|\leq1\}\setminus V_\mu} z \,\mathrm{d}\mu(z).
\end{align*}
Here $c_\mu$ is well-defined and uniquely determined by $\mu$, cf. Proposition \ref{def-prop}. We also need the subspace
$W_{\sigma,b+c_\mu}:=\textup{span}_{\ensuremath{\mathbb{R}}}\{\sigma_1,\ldots,\sigma_P,b+c_\mu\}$.
\begin{theorem}[General Liouville]\label{thm:Liouville}
Assume \eqref{as:sigmab} and \eqref{as:mus}. Let $\ensuremath{\mathcal{L}}$ be given by \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}. Then the
following statements are equivalent:
\begin{enumerate}[\rm(a)]
\item\label{a4}
If $u\in L^\infty(\ensuremath{\mathbb{R}}^d)$ satisfies
$\ensuremath{\mathcal{L}}[u]=0$ in $\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$, then $u$ is a.e. a constant.
\item\label{label:thmLiou:c}
$\overline{G_\mu+W_{\sigma,b+c_\mu}}=\ensuremath{\mathbb{R}}^d$.
\end{enumerate}
\end{theorem}
The above Liouville result is a consequence of a periodicity result
for bounded solutions of $\ensuremath{\mathcal{L}}[u]=0$
in $\ensuremath{\mathbb{R}}^d$. For a set $S\subseteq \ensuremath{\mathbb{R}}^d$, a function $u\in L^\infty(\ensuremath{\mathbb{R}}^d)$ is a.e.
\emph{$S$-periodic} if
$u(\cdot+s)=u(\cdot)$ in $\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$ $\forall s\in S$. Our
result is the following:
\begin{theorem}[General periodicity]\label{thm:PeriodGeneralOp}
Assume \eqref{as:sigmab}, \eqref{as:mus}, and $u\in
L^\infty(\ensuremath{\mathbb{R}}^d)$. Let $\ensuremath{\mathcal{L}}$ be given by \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}. Then the
following statements are equivalent:
\begin{enumerate}[\rm(a)]
\item
\label{a3}
$\ensuremath{\mathcal{L}}[u]=0$ in $\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$.
\item\label{b3} $u$ is a.e. $\overline{G_\mu+W_{\sigma,b+c_\mu}}$-periodic.
\end{enumerate}
\end{theorem}
This result characterizes the bounded solutions for all operators
$\ensuremath{\mathcal{L}}$ in our class, also those not satisfying the Liouville
property. Note that if $\overline{G_\mu+W_{\sigma,b+c_\mu}}=\ensuremath{\mathbb{R}}^d$, then $u$
is constant and the Liouville result follows. Both theorems are proved in Section \ref{sec:periodandliou}.
We give examples
in
Section \ref{sec:examples}. Examples \ref{ex1} and \ref{ex2} provide an overview of different possibilities,
and Examples \ref{ex:finitenumberofpoints} and \ref{ex:kro} are concerned with the case where $\textup{card} \left(\textup{supp} (\mu) \right)<\infty$. The Liouville property holds in the latter case if and only if $\textup{card} \left( \textup{supp} (\mu) \right) \geq d-\textup{dim} \left(W_{\sigma,b+c_\mu} \right)+1$ with additional algebraic conditions in relation with Diophantine approximation. The Kronecker theorem (Theorem
\ref{thm:CharKron}) is a key ingredient in this discussion and
a slight change
in the data may destroy the Liouville property.
The class of operators $\ensuremath{\mathcal{L}}$ given by \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1} is large and
diverse.
In addition to the processes
mentioned above,
it
includes also discrete random
walks, constant coefficient It\^{o}- and L\'evy-It\^{o} processes, and most
processes used as driving noise in finance.
Examples of nonlocal operators
are fractional
Laplacians \cite{Lan72}, convolution
operators \cite{Cov08,A-VMaRoT-M10,BrChQu12},
relativistic Schr\"odinger operators \cite{FaWe16},
and the
CGMY model in finance \cite{CoTa04}.
We mention that discrete finite difference operators can be written
in the form
\eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1},
cf. \cite{DTEnJa18b}. For more examples, see Section
\ref{sec:examples}.
There is a huge literature on the Liouville theorem.
In the local case, we simply refer to the survey \cite{Far07}. In the nonlocal case, the Liouville theorem is more or less understood
for fractional Laplacians or variants
\cite{Lan72,BoKuNo02,CaSi14, ChDALi15, Fal15},
certain L\'evy operators
\cite{BaBaGu00,PrZa04, ScWa12, R-OSe16a,DTEnJa17a},
relativistic Schr\"{o}dinger operators \cite{FaWe16}, or convolution
operators \cite{CD60,BrChQu12, BrCo18, BrCoHaVa19}. The techniques vary from Fourier analysis, potential theory, probabilistic methods, to classical PDE arguments.
To prove that solutions of $\ensuremath{\mathcal{L}}[u]=0$ are $G_\mu$-periodic, we rely on propagation of maximum points
\cite{CD60,Cov08,Cio12,DTEnJa17b,DTEnJa17a,HuDuWu18,BrCo18, BrCoHaVa19} and a
localization technique \`a la
\cite{CD60,BeHaRo07,Ros09,BrCoHaVa19}. As far as we know, Choquet and
Deny \cite{CD60} were the first to obtain such results. They were concerned with
the equation $u \ast \mu-u=0$
for some bounded measure $\mu$. This is a particular case of our equation since
$u \ast \mu-u=\ensuremath{\mathcal{L}}^\mu[u]+\int_{\ensuremath{\mathbb{R}}^d \setminus \{0\}} z \mathbf{1}_{|z| \leq 1} \,\mathrm{d} \mu(z) \cdot Du$.
For general $\mu$, the drift $\int_{\ensuremath{\mathbb{R}}^d \setminus \{0\}} z \mathbf{1}_{|z| \leq 1} \,\mathrm{d} \mu(z) \cdot Du$ may not make sense and
the identification of the full drift $b+c_\mu$ relies on a standard decomposition of closed subgroups of $\ensuremath{\mathbb{R}}^d$, see e.g. \cite{Mar03}. The idea is to establish $G_\mu$-periodicity of solutions of $\ensuremath{\mathcal{L}}[u]=0$ as in \cite{CD60}, and then use that $G_\mu=V_\mu \oplus \Lambda$ for the vector space $V_\mu$ previously defined and some discrete group $\Lambda$. This will roughly speaking remove the singularity $z=0 \in V_\mu$ in the computation of $c_\mu$ because $\int_{\ensuremath{\mathbb{R}}^d \setminus \{0\}}\mathbf{1}_{z \in V_\mu}z \mathbf{1}_{|z| \leq 1} \,\mathrm{d} \mu(z) \cdot Du=0$ for any $G_\mu$-periodic function. See Section \ref{sec:periodandliou} for details.
Our approach then combines PDEs and group arguments, extends the results of \cite{CD60} to Courr\`ege/L\'evy operators,
yields necessary and sufficient conditions for the Liouville property, and provides short and simple proofs.
\subsubsection*{Outline of the paper}
Our main results (Theorems \ref{thm:Liouville} and \ref{thm:PeriodGeneralOp}.) were stated in Section \ref{sec:intro}. They are proved in Section \ref{sec:periodandliou} and examples are given in Section \ref{sec:examples}.
\subsubsection*{Notation and preliminaries}
The {\it support} of a measure
$\mu$
is defined as
\begin{equation}\label{def-support}
{\supp(\mu)} := \left\{z\in \ensuremath{\mathbb{R}}^d \setminus \{0\} \ : \ \mu(B_r(z))>0, \ \forall r>0\right\},
\end{equation}
where $B_r(z)$ is the ball of center $z$
and radius $r$.
To continue, we assume \eqref{as:sigmab}, \eqref{as:mus}, and $\ensuremath{\mathcal{L}}$ is given by \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}.
\begin{definition}
For any $u \in L^\infty( \ensuremath{\mathbb{R}}^d )$, $\mathcal{L}[u] \in \mathcal{D}'(\ensuremath{\mathbb{R}}^d)$ is defined by
\begin{equation*}
\langle \ensuremath{\mathcal{L}}[u],\psi \rangle:=\int_{\ensuremath{\mathbb{R}}^d }u(x)\ensuremath{\mathcal{L}}^*[\psi](x)\,\mathrm{d} x \quad \forall \psi\in C_\textup{c}^\infty( \ensuremath{\mathbb{R}}^d )
\end{equation*}
with $\ensuremath{\mathcal{L}}^*:=\ensuremath{\mathcal{L}}^{\sigma,-b}+\ensuremath{\mathcal{L}}^{\mu^*}$ and $\,\mathrm{d} \mu^*(z):= \,\mathrm{d} \mu(-z)$.
\end{definition}
The above distribution is well-defined since $\ensuremath{\mathcal{L}}^*:W^{2,1}(\ensuremath{\mathbb{R}}^d) \to L^1(\ensuremath{\mathbb{R}}^d)$ is bounded.
\begin{definition}
Let $S \subseteq \ensuremath{\mathbb{R}}^d$ and $u \in L^\infty(\ensuremath{\mathbb{R}}^d)$, then $u$
is a.e. \emph{$S$-periodic} if
\[
\int_{\ensuremath{\mathbb{R}}^d} \big(u(x+s)-u(x)\big) \psi(x) \,\mathrm{d} x=0 \quad \forall s\in S, \forall \psi \in C^\infty_\textup{c}(\ensuremath{\mathbb{R}}^d).
\]
\end{definition}
The following technical result will be needed to regularize
distributional solutions of $\ensuremath{\mathcal{L}}[u]=0$ and a.e. periodic
functions. Let the mollifier
$\rho_\varepsilon(x):=\frac1{\varepsilon^{d}}\rho(\frac x\varepsilon)$, $\varepsilon>0$, for
some $0\leq\rho\in
C_{\textup{c}}^\infty(\ensuremath{\mathbb{R}}^d)$ with $\int_{\ensuremath{\mathbb{R}}^d}\rho =1$.
\begin{lemma}\label{lem:smoothReduction}
Let $u\in
L^\infty(\ensuremath{\mathbb{R}}^d)$ and $u_\varepsilon:=\rho_\varepsilon*u$. Then:
\begin{enumerate}[{\rm (a)}]
\item\label{a1} $\ensuremath{\mathcal{L}}[u]=0$ in $\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$ if and only if
$\ensuremath{\mathcal{L}}[u_\varepsilon]=0$ in $\ensuremath{\mathbb{R}}^d$ for all $\varepsilon>0$.
\item\label{b1} $u$ is a.e. $S$-periodic if and only if $u_\varepsilon$ is $S$-periodic for all $\varepsilon>0$.
\end{enumerate}
\end{lemma}
\begin{proof}
The proof of \eqref{a1} is standard since $\ensuremath{\mathcal{L}}[u_\varepsilon]=\ensuremath{\mathcal{L}}[u] \ast \rho _\varepsilon$ in $\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$.
Moreover \eqref{b1} follows from \eqref{a1} since for any $s\in S$ we can
take $\ensuremath{\mathcal{L}}[\phi](x)= \phi(x+s)-\phi(x)$ by choosing $\sigma,b=0$ and
$\mu=\delta_s$ (the Dirac measure at $s$) in \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}.
\end{proof}
\section{Proofs}\label{sec:periodandliou}
This section is devoted to the proofs of Theorems \ref{thm:Liouville} and \ref{thm:PeriodGeneralOp}. We first reformulate the classical Liouville theorem for local operators in terms of periodicity, then study the influence of the nonlocal part.
\subsection{ $W_{\sigma,b}$-periodicity for local operators}
Let us recall the Liouville theorem for
operators of the form \eqref{def:localOp1}, see e.g. \cite{Nel61,Miy15}.
In
the result we use the set
\begin{equation*}
W_{\sigma,b}=\textup{span}_{\ensuremath{\mathbb{R}}}\{\sigma_1,\ldots,\sigma_P,b\}.
\end{equation*}
Note that $\textup{span}_{\ensuremath{\mathbb{R}}}\{\sigma_1,\ldots,\sigma_P\}$ equals the
span of the eigenvectors of $\sigma\sigma^\texttt{T}$ corresponding to nonzero eigenvalues.
\begin{theorem}[Liouville for $\ensuremath{\mathcal{L}}^{\sigma,b}$]\label{thm:LiouvilleLocal}
Assume \eqref{as:sigmab} and $\ensuremath{\mathcal{L}}^{\sigma,b}$ is given by
\eqref{def:localOp1}. Then the following statements are equivalent:
\begin{enumerate}[{\rm (a)}]
\item If $u\in L^\infty(\ensuremath{\mathbb{R}}^d)$ solves $\ensuremath{\mathcal{L}}^{\sigma,b}[u]=0$ in
$\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$, then $u$ is a.e. constant in~$\ensuremath{\mathbb{R}}^d$.
\item $W_{\sigma,b}=\ensuremath{\mathbb{R}}^d$.
\end{enumerate}
\end{theorem}
Let us now reformulate and prove this classical result as a consequence of a periodicity result, a type
of argument that will be crucial in the nonlocal case. We will consider $C^\infty_{\textup{b}}(\ensuremath{\mathbb{R}}^d)$ solutions, which will be enough later during the proofs of Theorem \ref{thm:Liouville} and \ref{thm:PeriodGeneralOp}, thanks to Lemma \ref{lem:smoothReduction}.
\begin{proposition}[Periodicity for $\ensuremath{\mathcal{L}}^{\sigma,b}$]\label{thm:PeriodLiouvilleLocal}
Assume \eqref{as:sigmab}, $\ensuremath{\mathcal{L}}^{\sigma,b}$ is given by
\eqref{def:localOp1}, and $u\in C^\infty_{\textup{b}}(\ensuremath{\mathbb{R}}^d)$. Then the following
statements are
equivalent:
\begin{enumerate}[{\rm (a)}]
\item\label{a2} $\ensuremath{\mathcal{L}}^{\sigma,b}[u]=0$ in $\ensuremath{\mathbb{R}}^d$.
\item\label{b2} $u$ is $W_{\sigma,b}$-periodic.
\end{enumerate}
\end{proposition}
Note that part \eqref{b2} implies that $u$ is constant in the
directions defined by the vectors $\sigma_1,\ldots,\sigma_P,b$. If
their span then covers all of $\ensuremath{\mathbb{R}}^d$, Theorem \ref{thm:LiouvilleLocal} follows trivially.
To prove Proposition \ref{thm:PeriodLiouvilleLocal}, we adapt the ideas of \cite{Miy15} to our setting.
\begin{proof}[Proof of Proposition \ref{thm:PeriodLiouvilleLocal}]\
\noindent\eqref{b2} $\ensuremath{\mathbb{R}}ightarrow$ \eqref{a2} \ We have $b \cdot Du(x)=\frac{\,\mathrm{d}}{\,\mathrm{d} t} u(x+t b)_{|_{t=0}}=0$ for any $x \in \ensuremath{\mathbb{R}}^d$ since the function $t \mapsto u(x+t b)$ is constant. Similarly $(\sigma_j \cdot D)^2 u(x):=\frac{\,\mathrm{d}^2}{\,\mathrm{d} t^2} u(x+t \sigma_j)_{|_{t=0}}=0$ for any $j=1,\dots,P$.
Using then that $\textup{tr}(\sigma\sigma^\texttt{T} D^2u)=
\sum_{j=1}^P
(\sigma_j \cdot
D)^2 u$, we conclude that $\ensuremath{\mathcal{L}}^{\sigma,b}[u]=0$ in $\ensuremath{\mathbb{R}}^d$.
\noindent\eqref{a2} $\ensuremath{\mathbb{R}}ightarrow$ \eqref{b2} \ Let
$v(x,y,t):=u(x+\sigma y-bt)$ for $x \in \ensuremath{\mathbb{R}}^d$, $y\in\ensuremath{\mathbb{R}}^P$, and $t \in \ensuremath{\mathbb{R}}$. Direct computations show that
$$
\Delta_y v(x,y,t)=\sum_{j=1}^P(\sigma_j\cdot D)^2u(x+\sigma y-bt)=\textup{tr}\big[\sigma \sigma^{\texttt{T}} D^2u(x+\sigma y-bt)\big]
$$
and $ \partial_tv(x,y,t)=-b \cdot Du(x+\sigma y-bt)$. Hence for all
$(x,y,t)\in \ensuremath{\mathbb{R}}^d \times \ensuremath{\mathbb{R}}^P \times \ensuremath{\mathbb{R}}$,
$$
\Delta_y
v(x,y,t)-\partial_tv(x,y,t)=\mathcal{L}^{\sigma,b}[u](x+\sigma y-bt)=0.
$$
Since $v(x,\cdot,\cdot)$ is
bounded, we conclude by uniqueness of the heat equation that for any
$s<t$,
\begin{equation}\label{eq:convFormulaHE}
v(x,y,t)=\int_{\ensuremath{\mathbb{R}}^P}v(x,z,s)K_P(y-z,t-s)\,\mathrm{d} z,
\end{equation}
where $K_P$ is the standard heat kernel in $\ensuremath{\mathbb{R}}^P$. But then
$$
\|\Delta_yv(x,\cdot,t)\|_\infty\leq \|v(x,\cdot,s)\|_\infty\|\Delta_y K_P(\cdot,t-s)\|_{L^1(\ensuremath{\mathbb{R}}^P)},
$$
and since $\|\Delta_y K_P(\cdot,t-s)\|_{L^1}\to0$ as $s\to-\infty$,
we deduce that
$\Delta_yv=0$ for all $x,y,t$.
By the classical
Liouville theorem (see e.g. \cite{Nel61}), $v$ is constant in
$y$. It is also constant in $t$ by \eqref{eq:convFormulaHE}
since $\int_{\ensuremath{\mathbb{R}}^P}K_P(z,t-s)\,\mathrm{d} z=1$. We conclude that $u$ is $W_{\sigma,b}$-periodic since
$$
u(x)=v(x,0,0)=v(x,y,t)=u(x+\sigma y-bt)
$$
and $W_{\sigma,b}=\{\sigma y-bt:y \in \ensuremath{\mathbb{R}}^P, t \in \ensuremath{\mathbb{R}}\}$.
\end{proof}
\subsection{ $G_\mu$-periodicity for general operators}
Proposition \ref{thm:PeriodLiouvilleLocal} might seem artificial in the
local case, but not so in the nonlocal case. In fact we will prove our general
Liouville result as a consequence of a periodicity result. A key step
in this direction is the lemma below.
\begin{lemma}\label{lem:L0ImpusuppmuPer}
Assume \eqref{as:sigmab}, \eqref{as:mus}, $\ensuremath{\mathcal{L}}$ is given by \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}, and $u\in C^\infty_{\textup{b}}(\ensuremath{\mathbb{R}}^d)$. If $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$, then $u$ is $\supp(\mu)$-periodic.
\end{lemma}
To prove this result, we use propagation of maximum
(see e.g. \cite{CD60,Cov08,Cio12}).
\begin{lemma}\label{max-prop} If $u\in C^\infty_{\textup{b}}(\ensuremath{\mathbb{R}}^d)$
achieves its global maximum at some $\bar x$ such that $\ensuremath{\mathcal{L}}[u](\bar x)\geq 0$, then $u(\bar x+z)=u(\bar x)$ for any $z \in \textup{supp} (\mu)$.
\end{lemma}
\begin{proof}
At $\bar x$, $u=\sup u$, $Du=0$ and $D^2u\leq 0$, and hence
$\ensuremath{\mathcal{L}}^{\sigma,b}[u](\bar x)\leq 0$ and
$$0 \leq\ensuremath{\mathcal{L}}[u](\bar x) \leq \ensuremath{\mathcal{L}}^{\mu}[u](\bar x) = \int_{\ensuremath{\mathbb{R}}^d \setminus \{0\}} \big(u(\bar
x+z) - \sup_{\ensuremath{\mathbb{R}}^d} u\big)\, d\mu(z).$$
Using that $\int_{\ensuremath{\mathbb{R}}^d\setminus\{0\}} f \,\mathrm{d} \mu \geq 0$ and $f \leq 0$ implies $f=0$ $\mu$-a.e., we deduce that $u(\bar x+z)-\sup_{\ensuremath{\mathbb{R}}^d} u=0$ for $\mu$-a.e. $z$. Since $u$ is continuous, this equality holds for all $z \in \textup{supp} (\mu)$.\footnote{ If not, we would find some $z_0$ and $r_0>0$ such that $f(z):=u(\bar x+z)-\sup u<0$ in $B_{r_0}(z_0)$ where as $\mu(B_{r_0}(z_0))>0$ by \eqref{def-support}.}
\end{proof}
To exploit Lemma \ref{max-prop}, we need to have a maximum point.
For this sake, we use a localization technique \`a la \cite{CD60,BeHaRo07,Ros09,BrCoHaVa19}.
\begin{proof}[Proof of Lemma \ref{lem:L0ImpusuppmuPer}]
Fix an arbitrary $ \bar z \in \supp(\mu)$, define
\[
v(x):=u(x+ \bar z )-u(x),
\]
and let us show that $v(x)=0$ for all $x \in \ensuremath{\mathbb{R}}^d$.
We first
show that $v \leq 0$. Take $M$ and a sequence $\{x_n\}_{n}$ such that
\[
v(x_n)\stackrel{n\to \infty}{\longrightarrow} M:=\sup v,
\]
and define
$$u_n(x):=u(x+x_n)\quad\text{and}\quad v_n(x):=v(x+x_n).$$ Note that
$\ensuremath{\mathcal{L}}[v_n] = 0$ in $\ensuremath{\mathbb{R}}^d$. Now since $v \in C_\textup{b}^\infty(\ensuremath{\mathbb{R}}^d)$, the
Arzel\`a-Ascoli theorem implies that there exists $v_\infty$ such that
$v_n \to v_\infty$ locally uniformly (up to a subsequence). Taking
another subsequence if necessary, we can assume that the derivatives
up to second order converge and pass to the limit in the equation
$\ensuremath{\mathcal{L}}[v_n] = 0$ to deduce that $\ensuremath{\mathcal{L}}[v_\infty] = 0$ in $\ensuremath{\mathbb{R}}^d$.
Moreover, $v_\infty$ attains its maximum at $x=0$ since $v_\infty\leq
M$ and
\[
v_\infty(0)=\lim_{n\to\infty}v_n(0)=\lim_{n\to \infty}v(x_n)=M.
\]
A similar argument shows that there is a $u_\infty$ such that $u_n \to
u_\infty$ as $n \to \infty$ locally uniformly. Taking further subsequences
if necessary, we can assume that $u_n$ and $v_n$ converge along
the same sequence. Then by construction
$$
v_\infty(x)=u_\infty(x+ \bar z )-u_\infty(x).
$$
By Lemma \ref{max-prop} and an iteration, we find that
$M=v_\infty(m \bar z )=u_\infty((m+1) \bar z )-u_\infty(m \bar z )$ for any $m \in
\ensuremath{\mathbb{Z}}$. Then by another
iteration,
\[
u_\infty((m+1) \bar z )=u_\infty(m \bar z )+M=\ldots = u_\infty(0)+(m+1)M.
\]
But since $u_\infty$ is bounded, the only choice is $M=0$ and thus $v\leq M=0$. A similar argument shows that $v \geq 0$, and hence, $0=v(x)=u(x+ \bar z )-u(x)$ for any $ \bar z \in \supp(\mu)$ and all $x\in\ensuremath{\mathbb{R}}^d$.
\end{proof}
We can give a more general result than Lemma \ref{lem:L0ImpusuppmuPer} if we consider groups.
\begin{definition}
\begin{enumerate}[{\rm (a)}]
\item A set
$G \subseteq \ensuremath{\mathbb{R}}^d$ is an {\it additive subgroup} if $G \neq \emptyset$ and
$$
\forall g_1,g_2 \in G, \quad g_1+g_2\in G \quad \text{and}\quad-g_1 \in G.
$$
\item The \textit{subgroup generated} by a set $S \subseteq \ensuremath{\mathbb{R}}^d$, denoted $G(S)$, is the smallest additive group containing $S$.
\end{enumerate}
\end{definition}
Now we return to
a key set for our analysis:
\begin{equation}\label{def-Gmu}
G_\mu=\overline{G(\supp (\mu))}.
\end{equation}
This set appears naturally because of the elementary result below.
\begin{lemma}\label{suppmuPerGsuppmuPer}
Let $S \subseteq \ensuremath{\mathbb{R}}^d$. Then $w\in C(\ensuremath{\mathbb{R}}^d)$ is $S$-periodic if and only if $w$ is $\overline{G(S)}$-periodic.
\end{lemma}
\begin{proof}
It suffices to show that $G:=\{g \in \ensuremath{\mathbb{R}}^d:w(\cdot+g)=w(\cdot)\}$ is a closed subgroup of $\ensuremath{\mathbb{R}}^d$. It is obvious that it is closed by continuity of $w$. Moreover, for any $g_1,g_2 \in \ensuremath{\mathbb{R}}^d$ and $x \in \ensuremath{\mathbb{R}}^d$,
\begin{equation*}
w(x+g_1-g_2)=w(x-g_2)=w(x-g_2+g_2)=w(x).\qedhere
\end{equation*}
\end{proof}
By Lemmas
\ref{lem:L0ImpusuppmuPer} and \ref{suppmuPerGsuppmuPer}, we have proved that:
\begin{proposition}[$G_\mu$-periodicity]\label{lem:L0ImpusuppmuPer-bis}
Assume \eqref{as:sigmab}, \eqref{as:mus}, $\ensuremath{\mathcal{L}}$ is given by \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}, and $G_\mu$ by \eqref{def-Gmu}. Then any solution $u\in C^\infty_{\textup{b}}(\ensuremath{\mathbb{R}}^d)$ of $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$ is $G_\mu$-periodic.
\end{proposition}
\subsection{The role of $c_\mu$}
Propositions \ref{thm:PeriodLiouvilleLocal} and
\ref{lem:L0ImpusuppmuPer-bis} combined may seem to imply that $\ensuremath{\mathcal{L}}[u]=0$ gives
$(G_\mu+W_{\sigma,b})$-periodicity of $u$, but this is not true in
general. The correct periodicity result depends on a new
drift $b+c_\mu$, where $c_\mu$ is defined in \eqref{cmu}
below. To give this definition, we need to decompose
$G_\mu$ into
a direct sum of a vector subspace and a relative lattice.
\begin{definition}
\begin{enumerate}[{\rm (a)}]
\item If two subgroups $G,\tilde{G} \subseteq \ensuremath{\mathbb{R}}^d$ satisfy $G \cap \tilde{G}=\{0\}$, their sum is said to be {\em direct} and we write $G+\tilde{G}=G \oplus \tilde{G}$.
\item A \textit{full lattice} is a subgroup $\Lambda \subseteq \ensuremath{\mathbb{R}}^d$ of the form
$
\Lambda=
\oplus_{n=1}^d a_n \ensuremath{\mathbb{Z}}
$
for some basis $\{a_1,\dots,a_d\}$ of $\ensuremath{\mathbb{R}}^d$.
A \textit{relative lattice} is a lattice of a vector subspace of $\ensuremath{\mathbb{R}}^d$.
\end{enumerate}
\end{definition}
\begin{theorem}[Theorem 1.1.2 in \cite{Mar03}]\label{decom:opt}
If $G$ is a closed subgroup of $\ensuremath{\mathbb{R}}^d$, then
$
G=V \oplus \Lambda
$
for some vector space $V \subseteq \ensuremath{\mathbb{R}}^d$ and some relative lattice $\Lambda \subseteq \ensuremath{\mathbb{R}}^d$ such that $V \cap \textup{span}_\ensuremath{\mathbb{R}} \Lambda=\{0\}$.
\end{theorem}
In this decomposition the space $V$ is unique and can be
represented by \eqref{def-V} below.
\begin{lemma}\label{dis}
Let $V$ be a vector subspace and $\Lambda$ a relative lattice of $\ensuremath{\mathbb{R}}^d$ such that $V \cap\textup{span}_\ensuremath{\mathbb{R}} \Lambda =\{0\}$. Then for any $\lambda \in \Lambda$, there is an open ball $B$ of $\ensuremath{\mathbb{R}}^d$ containing $\lambda$ such that $B \cap (V \oplus \Lambda)=B \cap (V+\lambda)$.
\end{lemma}
\begin{proof}
If the lemma does not hold, there exists $v_n+\lambda_n \to \lambda$
as $n \to \infty$ where $v_n \in V$, $\lambda_n \in \Lambda$,
$\lambda_n \neq \lambda$. Note that $v_n, \lambda_n,
\lambda\in V \oplus \textup{span}_\ensuremath{\mathbb{R}} \Lambda$, and that
$$
\lambda=\!\!\underset{\ \, \in V}0+\!\!\underset{\ \, \in \Lambda}{\lambda}.
$$
By continuity of the projection from $V \oplus \textup{span}_\ensuremath{\mathbb{R}}\Lambda$ onto $\textup{span}_\ensuremath{\mathbb{R}} \Lambda$, $\lambda_n \to \lambda$ and this contradicts the fact that each point of $\Lambda$ is isolated.
\end{proof}
\begin{lemma}\label{pro:def-V}
Let $G$, $V$ and $\Lambda$ be as in Theorem \ref{decom:opt}. Then
\begin{equation}
\label{def-V}
V=V_G:=\left\{g \in G \ :\ t g \in G \mbox{ } \forall t \in \ensuremath{\mathbb{R}} \right\}.
\end{equation}
\end{lemma}
\begin{proof}
It is clear that $V \subseteq V_G$. Now given $g \in V_G$, there is
$(v,\lambda) \in V \times \Lambda$ such that
$g=v+\lambda$. For any $t \in \ensuremath{\mathbb{R}}$, $t g=t v+t \lambda \in G$ and thus
$t \lambda \in G$ since $t v \in V \subseteq G$. Let $B$ be an open
ball containing $\lambda$ such that $B \cap G=B \cap
(V+\lambda)$. Choosing $ t$ such that $t \neq 1$ and $t \lambda \in
B$, we infer that $t \lambda={ \tilde{v} }+\lambda$ for some ${
\tilde{v} } \in V$. Hence $\lambda=(t-1)^{-1} { \tilde{v} } \in V$
and this implies that $\lambda=0$.
In other words $V_G \subseteq V$, and the proof is complete.
\end{proof}
\begin{remark}\label{rem:per}
Any $G$-periodic function $w \in C^1(\ensuremath{\mathbb{R}}^d)$ is such that $z \cdot Dw (x)=\lim_{t \to 0} \frac{w(x+t { z })-w(x)}{t}=0$ for any $x \in \ensuremath{\mathbb{R}}^d$ and $z \in V_G$.
\end{remark}
By Theorem \ref{decom:opt} and Lemma
\ref{pro:def-V}, we decompose the set $G_\mu$ in
\eqref{def-Gmu} into a lattice and the subspace
$V_\mu:=V_{G_\mu}$. The new drift can then be defined as
\begin{equation}\label{cmu}
c_\mu=-\int_{\{|z| \leq 1\} \setminus V_\mu} z\, \,\mathrm{d} \mu(z).
\end{equation}
\begin{proposition}\label{def-prop}
Assume \eqref{as:mus} and $c_\mu$ is given by
\eqref{cmu}. Then $c_\mu \in \ensuremath{\mathbb{R}}^d$ is well-defined
and uniquely determined by $\mu$.
\end{proposition}
\begin{proof}
Using that $\textup{supp}(\mu) \subset G_\mu=V_\mu\oplus \Lambda$,
\begin{equation*}
\begin{split}
\int_{\{|z| \leq 1\} \setminus V_\mu} |z| \,\mathrm{d} \mu(z) & = \int_{G_\mu \setminus (V_\mu+0)} |z| \mathbf{1}_{|z| \leq 1} \,\mathrm{d} \mu(z)\\
& \leq \int_{G_\mu \setminus B} |z| \mathbf{1}_{|z| \leq 1} \,\mathrm{d} \mu(z)
\end{split}
\end{equation*}
for some open ball $B$ containing $0$ given by Lemma \ref{dis}. This integral is finite by \eqref{as:mus} which completes the proof.
\end{proof}
\begin{proposition}\label{lem:drift}
Assume \eqref{as:mus} and $\ensuremath{\mathcal{L}}^\mu$, $G_\mu$, $c_\mu$ are given by
\eqref{def:levy1}, \eqref{def-Gmu}, \eqref{cmu}. If $w\in C^\infty_{\textup{b}}(\ensuremath{\mathbb{R}}^d)$
is
$G_\mu$-periodic,
then
$$\ensuremath{\mathcal{L}}^\mu [w] = c_\mu\cdot Dw \quad \text{in}\quad \ensuremath{\mathbb{R}}^d.$$
\end{proposition}
\begin{proof}
Using that $\int_{\ensuremath{\mathbb{R}}^d\setminus\{0\}} f \,\mathrm{d} \mu=\int_{\textup{supp} (\mu)} f \,\mathrm{d} \mu$, we have
\begin{align*}
\ensuremath{\mathcal{L}}^\mu[w](x)
= - \int_{\ensuremath{\mathbb{R}}^d \setminus \{0\}} z\cdot Dw(x)
\mathbf{1}_{|z| \leq 1}\,\mathrm{d}\mu(z)
\end{align*}
because $w(x+z)-w(x)=0$ for all $x \in \ensuremath{\mathbb{R}}^d$ and $z \in \textup{supp}(\mu) \subset G_\mu$. The result is thus immediate from Remark \ref{rem:per} and Proposition \ref{def-prop}.
\end{proof}
\subsection{ Proofs of Theorems \ref{thm:Liouville} and \ref{thm:PeriodGeneralOp}}
We are now in a position to prove our main results. We start with
Theorem \ref{thm:PeriodGeneralOp} which characterizes
all bounded solutions of $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}^d$ as periodic functions
and specifies the set of admissible periods.
\begin{proof}[Proof of Theorem \ref{thm:PeriodGeneralOp}]
By Lemma \ref{lem:smoothReduction} we can assume that $u \in
C_\textup{b}^\infty(\ensuremath{\mathbb{R}}^d)$.
\noindent\eqref{a3} $\ensuremath{\mathbb{R}}ightarrow$ \eqref{b3} \ Since $\ensuremath{\mathcal{L}}[u] =0$ in $\ensuremath{\mathbb{R}}^d$, $u$
is $G_\mu$-periodic by
Proposition \ref{lem:L0ImpusuppmuPer-bis}.
Proposition \ref{lem:drift} then implies that
$$
0=\ensuremath{\mathcal{L}}[u] = \ensuremath{\mathcal{L}}^{\sigma,b}[u]+c_\mu\cdot Du=
\ensuremath{\mathcal{L}}^{\sigma,b+c_\mu}[u]\quad \text{in}\quad \ensuremath{\mathbb{R}}^d,
$$
which by Proposition \ref{thm:PeriodLiouvilleLocal} shows that $u$ is also
$W_{\sigma,b+c_\mu}$-periodic. It is now easy to see that $u$ is $\overline{G_\mu+W_{\sigma,b+c_\mu}}$-periodic.
\noindent\eqref{b3} $\ensuremath{\mathbb{R}}ightarrow$ \eqref{a3} \ Since $u$ is both $G_\mu$ and
$W_{\sigma,b+c_\mu}$-periodic, by first applying Proposition \ref{lem:drift} and then
Proposition \ref{thm:PeriodLiouvilleLocal},
$\ensuremath{\mathcal{L}}[ u]
=\ensuremath{\mathcal{L}}^{\sigma,b+c_\mu}[u]=0$ in $\ensuremath{\mathbb{R}}^d$.
\end{proof}
We now prove Theorem \ref{thm:Liouville} on necessary and sufficient
conditions for $\ensuremath{\mathcal{L}}$ to satisfy the Liouville
property. We will use the following consequence of Theorem \ref{decom:opt}.
\begin{corollary}\label{pro:group-multid}
A subgroup $G$ of $\ensuremath{\mathbb{R}}^d$ is dense if and only if there are no $c \in
\ensuremath{\mathbb{R}}^d$ and codimension 1 subspace $H \subset \ensuremath{\mathbb{R}}^d$ such that $G \subseteq H+c
\ensuremath{\mathbb{Z}}$.
\end{corollary}
\begin{proof}
Let us argue by contraposition for both the ``only if'' and ``if'' parts.
\noindent ($\ensuremath{\mathbb{R}}ightarrow$) \ Assume $G \subseteq H+c \ensuremath{\mathbb{Z}}$ for some codimension 1 space $H$ and $c \in \ensuremath{\mathbb{R}}^d$. If $c \in H$, then $\overline{G} \subseteq \overline{H}=H \neq \ensuremath{\mathbb{R}}^d$. If $c \notin H$, then $
\ensuremath{\mathbb{R}}^d=H \oplus \textup{span}_{\ensuremath{\mathbb{R}}} \{c\}
$,
and each $x \in \ensuremath{\mathbb{R}}^d$ can be written as $x=x_H+\lambda_x c$ for a
unique $(x,\lambda_x) \in H \times \ensuremath{\mathbb{R}}$. Hence $H+c \ensuremath{\mathbb{Z}}=\{x:\lambda_x
\in \ensuremath{\mathbb{Z}}\}$ is closed by continuity of the projection $x \mapsto
\lambda_x$, and $\overline{G} \subseteq H+c \ensuremath{\mathbb{Z}} \neq \ensuremath{\mathbb{R}}^d$.
\noindent ($\Leftarrow$) \ Assume $\overline{G} \neq \ensuremath{\mathbb{R}}^d$. By Theorem
\ref{decom:opt}, $\overline{G}=V \oplus
\Lambda$ for a subspace $V$ and lattice
$\Lambda$ with $V \cap \textup{span}_\ensuremath{\mathbb{R}} \Lambda=\{0\}$. It
follows that the dimensions $n$ of $V$ and $m$ of the vector space
$\textup{span}_\ensuremath{\mathbb{R}} \Lambda$ satifsfy $n<d$ and $n+m \leq d$. If $m=0$, $G \subseteq V \subseteq H$ for some
codimension 1 space $H$. If $m \geq 1$, then
$\Lambda=\oplus_{i=1}^m a_i \ensuremath{\mathbb{Z}}$ for some basis $\{a_1,\dots,a_m\}$ of
$\textup{span}_\ensuremath{\mathbb{R}} \Lambda$. Let $W:=V \oplus
\textup{span}_\ensuremath{\mathbb{R}} \{a_i:i\neq m\}$ for $m>1$ and $W:=V$ for $m=1$. Then
$W$ is of dimension $n+m-1 \leq d-1$ and contained in some codimension 1 space
$H$. Hence $G \subseteq H+c \ensuremath{\mathbb{Z}}$ with $c=a_m$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:Liouville}]\
\noindent\eqref{label:thmLiou:c} $\ensuremath{\mathbb{R}}ightarrow$ \eqref{a4} \ If $u\in
L^\infty(\ensuremath{\mathbb{R}}^d)$ satisfy
$\ensuremath{\mathcal{L}}[u]=0$ in $\mathcal{D}'(\ensuremath{\mathbb{R}}^d)$, then $u$ is
$\overline{G_\mu+W_{\sigma,b+c_\mu}}$-periodic by Theorem
\ref{thm:PeriodGeneralOp}. Hence $u$ is
constant
by \eqref{label:thmLiou:c}.
\noindent\eqref{a4} $\ensuremath{\mathbb{R}}ightarrow$ \eqref{label:thmLiou:c} \ Assume \eqref{label:thmLiou:c} does not hold and let us construct a nontrivial $\overline{G_\mu+W_{\sigma,b+c_\mu}}$-periodic $L^\infty$-function. By Corollary \ref{pro:group-multid},
\begin{equation}\label{cep2}
\overline{G_\mu+W_{\sigma,b+c_\mu}} \subseteq H+ c \ensuremath{\mathbb{Z}},
\end{equation}
for some $c \in
\ensuremath{\mathbb{R}}^d$ and codimension 1 subspace $H \subset \ensuremath{\mathbb{R}}^d$.
We can assume $c
\notin H$ since otherwise \eqref{cep2} will hold if we redefine $c$ to
be any element in $H^c$. As before, each $x \in \ensuremath{\mathbb{R}}^d$ can be written as $x=x_H+\lambda_x c$ for a unique pair $(x_H,\lambda_x) \in H \times \ensuremath{\mathbb{R}}$. Now let
$
U(x):=\cos (2\pi \lambda_x)
$
and note that for any $h \in H$ and ${ n } \in \ensuremath{\mathbb{Z}}$,
$$
x+h+{ n }c=\underbrace{(x_H+h)}_{\in H}+\underbrace{(\lambda_x+{ n })}_{\in \ensuremath{\mathbb{R}}} c,
$$
so that
$$
U(x+h+{ n }c) =\cos (2\pi (\lambda_x+{ n }))=\cos (2\pi \lambda_x)= U(x).
$$
This proves that $U$ is $(H+c \ensuremath{\mathbb{Z}})$-periodic and thus also
$\overline{G_\mu+W_{\sigma,b+c_\mu}}$-periodic. By Theorem
\ref{thm:PeriodGeneralOp}, $\ensuremath{\mathcal{L}}[U]=0$, and we have a nonconstant
counterexample of \eqref{a4}. Note indeed that $u \in L^\infty(\ensuremath{\mathbb{R}}^d)$ since it is everywhere bounded by construction and $C^\infty$ (thus measurable) because the projection $x \mapsto \lambda_x$ is linear. We therefore conclude that \eqref{a4}
implies \eqref{label:thmLiou:c} by contraposition.
\end{proof}
\section{Examples}\label{sec:examples}
Let us give examples for which the Liouville property holds or fails. We will use Theorem \ref{thm:Liouville} or the following reformulation:
\begin{corollary}\label{label:thmLiou:d}
Under the assumptions of Theorem \ref{thm:Liouville}, $\ensuremath{\mathcal{L}}$ does \textup{not} satisfy the Liouville property if and only if
\begin{equation}\label{failb}
\supp(\mu)+W_{\sigma,b+c_\mu} \subseteq H + c \ensuremath{\mathbb{Z}},
\end{equation}
for some codimension 1 subspace $H$ and vector $c$ of $\ensuremath{\mathbb{R}}^d$.
\end{corollary}
\begin{proof}
Just note that $\overline{G(\supp(\mu)+W_{\sigma,b+c_\mu})}=\overline{G_\mu+W_{\sigma,b+c_\mu}}$ and apply Theorem \ref{thm:Liouville} and Corollary \ref{pro:group-multid}.
\end{proof}
\begin{example}\label{ex1}
\begin{enumerate}[{\rm (a)}]
\item For nonlocal operators $\ensuremath{\mathcal{L}}=\ensuremath{\mathcal{L}}^\mu$ with $\mu$ symmetric, \eqref{failb} reduces to
\begin{equation}\label{fail}
\supp(\mu) \subseteq H+c \mathbb{Z},
\end{equation}
for some $H$ of codimension $1$ and $c$. This fails for fractional Laplacians, relativistic Schr\"odinger operators, convolution
operators, or most nonlocal operators appearing in finance whose L\'evy measures contain an open ball in their supports. In particular all these operators have the Liouville property.
\item Even if $\textup{supp} (\mu)$ has an empty interior,
\eqref{fail} may fail and Liouville still hold. This is e.g. the case for the mean value operator
\begin{equation}\label{mean-value}
\mathcal{M}[u](x)=\int_{|z|=1} \big(u(x+z)-u(x)\big) \,\mathrm{d} S(z),
\end{equation}
where $S$ denotes the $d-1$-dimensional surface measure.
\item We may have in fact the Liouville property with just a finite number of points in the support of $\mu$, see Example \ref{ex:finitenumberofpoints}.
\item The way we have defined the nonlocal operator, if $\ensuremath{\mathcal{L}}=\ensuremath{\mathcal{L}}^\mu$ with general $\mu$,
\eqref{failb} reduces to
\begin{equation}
\label{failc}
\supp(\mu) \subseteq H+c \mathbb{Z} \quad \mbox{and} \quad c_\mu \in H,
\end{equation}
for some $H$ of codimension 1 and $c\in \ensuremath{\mathbb{R}}^d$.
We can have \eqref{fail} without \eqref{failc} as e.g. for the 1--$d$ measure $\mu=\delta_{-1}+2\delta_{1}$. Indeed $\supp (\mu) \subset \ensuremath{\mathbb{Z}}$ but $c_\mu=1 \neq 0$. The associated operator $\ensuremath{\mathcal{L}}^\mu$ then has the Liouville property even though it would not for any symmetric measure with the same support.
\item
A general operator $\ensuremath{\mathcal{L}}=\ensuremath{\mathcal{L}}^{\sigma,b}+\ensuremath{\mathcal{L}}^\mu$ may satisfy the Liouville property even though each part $\ensuremath{\mathcal{L}}^{\sigma,b}$ and $\ensuremath{\mathcal{L}}^{\mu}$ does not.
A simple 3--$d$ example is given by $\ensuremath{\mathcal{L}}=\partial_{x_1}^2+\partial_{x_2}+(\partial_{x_3}^2)^{\alpha}$, $\alpha \in (0,1)$.
Indeed $\sigma=(1,0,0)^\texttt{T}$, $b=(0,1,0)$, $\,\mathrm{d} \mu(z)=\frac{c(\alpha) \,\mathrm{d} z_3}{|z_3|^{1+2 \alpha}}$ with $c(\alpha)>0$, thus $c_\mu=0$, $W_{\sigma,b}=\ensuremath{\mathbb{R}} \times \ensuremath{\mathbb{R}} \times \{0\}$, and $G_\mu=\{0\} \times \{0\}\times \ensuremath{\mathbb{R}}$, so the result follows from Theorem \ref{thm:Liouville}.
\item For other kinds of interactions between the local and nonlocal parts, see Example \ref{ex:kro}.
\end{enumerate}
\end{example}
\begin{remark}
The Liouville property for the nonlocal operator \eqref{mean-value}
implies the classical Liouville result for the Laplacian, since
$\mathcal{M}[u]=0$ for harmonic functions~$u$.
\end{remark}
In the 1--$d$ case, the general form of the operators which do not satisfy the Liouville property is very explicit.
\begin{corollary}
Assume $d=1$ and $\ensuremath{\mathcal{L}}:C^\infty_\textup{c} (\ensuremath{\mathbb{R}}) \to C(\ensuremath{\mathbb{R}})$ is a linear translation invariant operator satisfying the maximum principle \eqref{mp}. Then the following statements are
equivalent:
\begin{enumerate}[{\rm (a)}]
\item\label{1da} There are nonconstant $u\in L^\infty(\ensuremath{\mathbb{R}})$ satisfying $\ensuremath{\mathcal{L}}[u]=0$ in
$\mathcal{D}'(\ensuremath{\mathbb{R}})$.
\item\label{label:thmLiou:cb} There are $g> 0$ and a nonnegative $\{\omega_n\}_{n} \in l^1(\ensuremath{\mathbb{Z}})$ such that
\begin{equation*}
\ensuremath{\mathcal{L}}[u](x)=\sum_{n \in \mathbb{Z}}(u(x+n g)-u(x)) \omega_n .
\end{equation*}
\end{enumerate}
\end{corollary}
\begin{proof}
If \eqref{label:thmLiou:c} holds, any $g$-periodic
function satisfies $\ensuremath{\mathcal{L}}[u]=0$ in $\ensuremath{\mathbb{R}}$. Conversely, if \eqref{1da}
holds then $\ensuremath{\mathcal{L}}$ is of the form
\eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1} by
\cite{Cou64}. By Corollary \ref{label:thmLiou:d}, there is $g \geq0$
such that $\textup{supp} (\mu)+W_{\sigma,b+c_\mu} \subseteq g
\mathbb{Z}$. In particular $\sigma=b+c_\mu=0$ and $\mu$ is a a sum of
Dirac measures: $\mu=\sum_{n \in \mathbb{Z}} \omega_n \delta_{n g}$.\footnote{If $g=0$ then $\mu=0$ and the rest of the proof is trivial.} By \eqref{as:mus}, each $\omega_n \geq 0$ and $\sum_{n \in \mathbb{Z}} \omega_n<\infty$. Injecting these facts into \eqref{eq:GenOp1}--\eqref{def:localOp1}--\eqref{def:levy1}, we can easily rewrite $\ensuremath{\mathcal{L}}$ as in \eqref{label:thmLiou:cb}.
\end{proof}
\begin{example}\label{ex2}
\begin{enumerate}[{\rm (a)}]
\item In 1--$d$, the Liouville property holds for any nontrivial operator with nondiscrete L\'evy measure.
\item For discrete L\'evy measures, we need $\sigma \neq 0$ or $b \neq
-c_\mu$ or $G_\mu=\ensuremath{\mathbb{R}}$ for Liouville to hold. The condition
$G_\mu=\ensuremath{\mathbb{R}}$ is typically satisfied if $\overline{\textup{supp} (\mu)}^{\ensuremath{\mathbb{R}}}$
has an accumulation point or if $\textup{supp} (\mu)$ contains two points $z_1,z_2$ with
irrationial ratio $\frac{z_1}{z_2}$ (see Theorem
\ref{thm:CharKron}). Another example is when $\textrm{supp} (\mu) =\{\frac{n^2+1}{n}\}_{n \geq 1}$, which has no accumulation point or contains any pair with irrational ratio.
\end{enumerate}
\end{example}
Let us continue with interesting consequences of the Kronecker theorem
on Diophantine approximation (p. 507 in \cite{GoMo16}).
\begin{theorem}[Kronecker theorem]\label{thm:CharKron}
Let $c=(c_1,\dots,c_d)\in \ensuremath{\mathbb{R}}^d$. Then $\overline{c \ensuremath{\mathbb{Z}}+\ensuremath{\mathbb{Z}}^d}=\ensuremath{\mathbb{R}}^d$ if and only if
$\{1,c_1,\dots,c_d\}$ is linearly independent over $\mathbb{Q}$.
\end{theorem}
We can use this result to get the Liouville property with just a finite number of points in the support of the L\'evy measure.
\begin{example}\label{ex:finitenumberofpoints}
\begin{enumerate}[{\rm (a)}]
\item Consider the operator
\[
\ensuremath{\mathcal{L}}[u](x)=u(x+c) +\sum_{i=1}^d u(x+e_i) - (d+1) u(x)
\]
for some $c=(c_1,\ldots,c_d)\not=0$ where $\{e_1,\dots,e_d\}$ is the canonical basis. Liouville holds if and only if $\{1,c_1,\ldots,c_d\}$ is linearly independent over $\mathbb{Q}$. Indeed $G_\mu=\overline{c \ensuremath{\mathbb{Z}}+\ensuremath{\mathbb{Z}}^d}$, so the result follows from Theorems \ref{thm:Liouville} and \ref{thm:CharKron}.
\item For more general operators $\ensuremath{\mathcal{L}}[u](x)=\sum_{z \in S}
(u(x+z)-u(x))\omega(z)$, with $S$ finite and $\omega(\cdot)>0$, we
may have similar results by applying Theorem \ref{thm:CharKron} (or
variants) and changing coordinates.
\end{enumerate}
\end{example}
Let us end with an illustration of how the local part may interact
with such nonlocal operators. We give 2--$d$ examples of the form
$$
\ensuremath{\mathcal{L}}[u](x)=\tilde{b}_1u_{x_1}+\tilde{b}_2u_{x_2}+u(x+z_1)+u(x+z_2)-2 u(x)
$$
where $\tilde{b}$ represents the full drift $b+c_\mu$.
\begin{example}\label{ex:kro}
\begin{enumerate}[\rm (a)]
\item If $\tilde{b}, z_1, z_2$ are collinear, Liouville does not hold by Theorem~\ref{thm:Liouville}.
\item If $z_1$ and $z_2$ are collinear and linearly independent
of $\tilde{b}$ as in
\begin{equation*}
\ensuremath{\mathcal{L}}[u](x)=u_{x_1}(x)+u(x_1,x_2+\alpha)+u(x_1,x_2+\beta)-2 u(x),
\end{equation*}
then the Liouville property holds if and only if $\frac{\alpha}{\beta} \notin
\mathbb{Q}$.
Indeed, here we have $G_{\mu}=\{0\} \times \overline{\alpha \ensuremath{\mathbb{Z}}+\beta \ensuremath{\mathbb{Z}}}$ and
$\textup{span}_\ensuremath{\mathbb{R}} \{b+c_\mu=(1,0)\}=\ensuremath{\mathbb{R}} \times \{0\}$, so we conclude by Theorems \ref{thm:Liouville} and~\ref{thm:CharKron}.
\item If $\{z_1,z_2\}$ is a basis of $\ensuremath{\mathbb{R}}^2$ as in
\begin{equation*}
\begin{split}
\hspace{6mm} \ensuremath{\mathcal{L}}[u](x) =&\tilde{b}_1 u_{x_1}(x)+\tilde{b}_2 u_{x_2}(x)+u(x_1+1,x_2)+u(x_1,x_2+1)-2 u(x),
\end{split}
\end{equation*}
then Liouville holds if and only if $\tilde{b}_1\not=0$ and
$\frac{\tilde{b}_2}{\tilde{b}_1} \notin \mathbb{Q}$.
Indeed, let us define $G:=G_\mu+W_{\sigma,b+c_\mu}$ where
we note that $G_\mu=\ensuremath{\mathbb{Z}}^2$ and
$W_{\sigma,b+c_\mu}=\textup{span}_{\ensuremath{\mathbb{R}}}\{(\tilde{b}_1,\tilde{b}_2)\}$. If
$\tilde{b}_1=0$ or $\tilde{b}_2=0$, then $\overline{G}\subseteq \ensuremath{\mathbb{Z}} \times \ensuremath{\mathbb{R}}$ or $\ensuremath{\mathbb{R}}
\times \ensuremath{\mathbb{Z}}$ which is not $\ensuremath{\mathbb{R}}^2$. Assume now that
$\tilde{b}_1,\tilde{b}_2\not=0$ and $\frac{\tilde{b}_2}{\tilde{b}_1}
\in \mathbb{Q}$, i.e., $\frac{\tilde{b}_2}{\tilde{b}_1}=\frac{p}{q}$ with $p,q
\neq 0$. Then
$$G\subseteq T:=\Big(\frac{1}{p},0\Big)\ensuremath{\mathbb{Z}} +
\textup{span}_{\ensuremath{\mathbb{R}}}\Big\{\Big(1,\frac{\tilde{b}_2}{\tilde{b}_1}\Big)\Big\}=\Big\{\Big(\frac{k}{p}+r,
r\frac{p}{q}\Big): k \in \ensuremath{\mathbb{Z}}, \ r \in \ensuremath{\mathbb{R}}\Big\}$$
since
$\textup{span}_{\ensuremath{\mathbb{R}}}\{(\tilde{b}_1,\tilde{b}_2)\}=\textup{span}_{\ensuremath{\mathbb{R}}}\{(1,\frac{\tilde{b}_2}{\tilde{b}_1})\}\subset
T$ and $\ensuremath{\mathbb{Z}}^2\subset T$. The last statement follows since for any
$(m,n)\in \ensuremath{\mathbb{Z}}^2$, we can take $k=pm-qn\in \ensuremath{\mathbb{Z}}$ and $r=n \frac{q}{p}\in
\ensuremath{\mathbb{R}}$. Since $\overline{T}\neq \ensuremath{\mathbb{R}}^2$, Liouville does not hold by Theorem \ref{thm:Liouville} and Corollary
\ref{pro:group-multid}.
Conversely, assume $\tilde{b}_1,\tilde{b}_2\not=0$ and
$\frac{\tilde{b}_2}{\tilde{b}_1} \notin \mathbb{Q}$. Then $(0,\frac{\tilde{b}_2}{\tilde{b}_1})=(-1,0)+(1,\frac{\tilde{b}_2}{\tilde{b}_1})\in G$ and since $(0,1) \in G$, we get that $\{0\} \times (\ensuremath{\mathbb{Z}}+\frac{\tilde{b}_2}{\tilde{b}_1} \ensuremath{\mathbb{Z}}) \subset G$.
By Theorem \ref{thm:CharKron}, $\{0\} \times \ensuremath{\mathbb{R}} \subset
\overline{G}$. Arguing similarly with
$(\frac{\tilde{b}_1}{\tilde{b}_2},0)$, we find that $\ensuremath{\mathbb{R}} \times
\{0\}\subset \overline{G}$. Hence $\overline{G}=\ensuremath{\mathbb{R}}^2$ and
Liouville holds by Theorem \ref{thm:Liouville}.
\end{enumerate}
\end{example}
\let\oldaddcontentsline\addcontentsline
\renewcommand{\addcontentsline}[3]{}
\end{document} |
\begin{document}
\begin{abstract}
We construct a nearest-neighbour interacting particle system of exclusion type, which illustrates a transition from slow to fast diffusion. More precisely, the hydrodynamic limit of this microscopic system in the diffusive space-time scaling is the parabolic equation $\partial_t\rho=\nabla (D(\rho)\nabla \rho)$, with diffusion coefficient $ D(\rho)=m\rho^{m-1} $ where $ m\in(0,2] $, including therefore the fast diffusion regime in the range $ m\in(0,1) $, and the porous {medium} equation for $ m\in(1,2) $. The construction of the model is based on the generalized binomial theorem, and interpolates continuously in $ m $ the already known microscopic \textit{porous {medium} model} with parameter $ m=2 $, the \textit{symmetric simple exclusion process} {with} $ m=1 $, going down to a \textit{fast diffusion model} up to {any} $ m>0$. The derivation of the hydrodynamic limit for the local density of particles on the one-dimensional torus is achieved \textit{via} the entropy method -- with additional technical difficulties depending on the regime (slow or fast diffusion) {and where new properties of the \textit{porous medium model} {need} to be derived.}
\end{abstract}
\maketitle
\section{Introduction}
\subsection{Scientific context} A typical question in the field of statistical mechanics is related to the derivation of the macroscopic evolution equations from the stochastic dynamical interaction of microscopic particles. Over the last four decades, there has been a remarkable progress in the derivation of these equations, which are partial differential equations (PDEs), governing the space-time evolution of the conserved quantities of the microscopic system, \textit{i.e.~}the well-known \emph{hydrodynamic limit}, see for instance \cite[Chapter 3]{SPOHN:book} {for an introduction on the subject}. {In particular, \emph{stochastic lattice gases}, a specific type of models where particles interact on a lattice and evolve according to a Markovian dynamics, have been the subject of intense scrutiny \cite{KL:book} and
many results have been obtained by both physicists and mathematicians on their microscopic and macroscopic
behavior.}
The nature {of the hydrodynamic equations} depends on the dynamics at the particle level and it can be for instance: parabolic, hyperbolic, or even of a fractional form.
An equation {which} has received a lot of attention in the last years in the PDE's community is the following equation, posed for every $(t,u) \in \mathbb{R}_+\times \mathbb{T}$ where $\mathbb{T}$ is the one-dimensional torus $[0,1)$ with $0\equiv 1$, and given for $ m\in\mathbb{R}$, $m\neq 0$, by
\begin{align}\label{PDE:formal}
\partial_t\rho=\partial_{{uu}}(\rho^m), \qquad (t,{u})\in \mathbb{R}_+\times\mathbb T.
\end{align}
This is a parabolic equation, with diffusion coefficient given by
\begin{align}\label{diffusion}
D(\rho)=m\rho^{m-1}.
\end{align}
For $ m>1 $, \eqref{PDE:formal} is the \emph{porous {medium} equation}, referred to as PME; for $ m=1 $ it is the standard heat equation (HE), while for $ m<1 $ it belongs to the class of fast diffusion equations, and in this case we will refer to it as FDE. The rigorous analysis of \eqref{PDE:formal} has attracted a lot of interest in the past decades, we refer the reader to \cite{vazquez} for a review on this subject.
From the particle systems' point of view, the rigorous derivation of \eqref{PDE:formal} has been successfully achieved for particular values of $m$, in several different ways.
The HE has been obtained as the hydrodynamic limit of the \emph{symmetric simple exclusion process} (SSEP) (see, for example, \cite[Chapter 4]{KL:book}). In this process, particles evolve on the discrete torus $ \mathbb{T}_N=\mathbb{R}/N\mathbb{Z} $ and after an exponential clock of rate one, a particle jumps to one of its {two} nearest-neighbours chosen with equal probability, but the jump only occurs if the destination site is empty ({this is the \emph{exclusion rule})}, otherwise it is suppressed and all the clocks, which are independent of each other, restart. The configuration of particles in the system at time $t>0$ is denoted by $\eta_t=(\eta_t(x))_{x\in\mathbb{T}_N}$ and it is an element of $\Omega_N:=\{0,1\}^{\mathbb{T}_N}$, where $\eta(x)\in\{0,1\}$ denotes the number of particles at position $x$. {Moreover, the process $\{\eta_t\}_{t\geqslantslant 0}$ is a Markov process on $\Omega_N$, and the \emph{jump rate} from a site $x$ to site $x+1$ is given by $\eta(x)(1-\eta(x+1))$ while the jump rate from site $x+1$ to site $x$ is given by $(1-\eta(x))\eta(x+1)$.}
In \cite{GLT}, the authors derived the PME for any integer value of $m\geqslant 2$ by considering an exclusion process with \emph{degenerate} rates. More precisely, as above, particles evolve on the discrete torus $ \mathbb{T}_N$ according to the exclusion rule, but the jump rate depends on the number of particles in the vicinity of the edge where the jump occurs. To be concrete, if, for example, $m=2$, then the jump rate from a site $x$ to the site $x+1$ is given by $\eta(x)(1-\eta(x+1))(\eta(x-1)+\eta(x+2))$ and the rate from $x+1$ to $x$ is given by $\eta(x+1)(1-\eta(x))(\eta(x-1)+\eta(x+2))$. This means that {for a jump from $x$ to $x+1$ to happen, one imposes to have \textit{at least one} particle in the vicinity $\{x-1,x+2\}$} ({see Figure \ref{fig:PMM2} for an example of transition rates for this model}). {Besides, one can easily compute} the {microscopic} instantaneous current of the system, \emph{i.e.}~the difference between the jump rate from $x$ to $x+1$ and the jump rate from $x+1$ to $x$, {which} is then equal to $(\eta(x)-\eta(x+1))(\eta(x-1)+\eta(x+2))$. {Remarkably, this microscopic current} can be rewritten as a {discrete} gradient of some function $h({\eta}),$ {see Lemma \ref{lem:grad} below}. In {fact}, the choice for those specific rates {is made in order} to have the aforementioned gradient property of the instantenous current, which turns the system into a \emph{gradient} one, and classical methods can be explored without too many complications, {see \cite[Chapters 5 and 6]{KL:book}}. Since particles only swap positions on the torus, the number of particles is conserved by the dynamics. The PME {\eqref{PDE:formal} with $m=2$} has {then} been obtained as the hydrodynamic limit {of} the {empirical} density of particles. This rationale was extended to any integer $ m\geqslant2 $, and the resulting microscopic system is now called the \textit{porous medium model}, denoted by PMM($ m-1 $) {with} hydrodynamic equation {\eqref{PDE:formal}}. {Later} in \cite{BDGN}, {the same} PME for any integer $ m\geqslant2 $ {has been obtained} on the interval $[0,1]$, with different types of boundary conditions (Dirichlet, Robin and Neumann), again as the hydrodynamic limit of the same constrained exclusion process, but in contact with stochastic reservoirs, {which inject and destroy particles at the two extremities with some rate} which is regulated by a parameter, {giving} rise to the aforementioned boundary conditions.
{Another approach had previously been developed in} \cite{SU93,ES96, FIS97}. {First}, the porous medium equation when $ m=2$ was derived in \cite{SU93,ES96} from a model in which the occupation number is a continuous variable {(therefore belonging to another class of models)}. More precisely, the model consists of configurations of sticks or energies; the configurations evolve randomly through exchanges of stick portions between nearest-neighbours through a \emph{zero-range} pressure mechanism, and the conservation {law} is the total stick-length. Later in \cite{FIS97} the authors extended the derivation of the hydrodynamic limit from the previous model, {and obtained the PME for all range $m>1$}.
{Finally,} {concerning} the fast diffusion case, {few results are available in the literature.} In \cite{HJV20} the FDE with $ m=-1 $ {has been} derived {as the hydrodynamic limit of a} \textit{zero-range process} ({the number of particles per site can be any non-negative integer}) evolving on {the discrete torus}, with {a jump} rate function adjusted to observe frequently a large number of particles, with a specific "weight" associated to each particle. The formalization of the hydrodynamic limit was achieved by using Yau's relative entropy method \cite{yau} with some adaptations including spectral gap estimates. The derivation of the FDE for general $ m<1 $ was left there as an open problem.
\subsection{Construction of new models} {In this paper we address two questions}: first, how can we generalize the family of PMMs, {namely exclusion processes}, to $ m $ not being an integer? Second, due to the different nature of the interacting particle systems constructed to derive \eqref{PDE:formal} under the slow-diffusion regime and the fast-diffusion regime, is there a single family parametrized by $ m $ that interpolates between the slow and the fast diffusion?
{Here we} give some answers in the direction of the first question, and a positive answer regarding the second. We {construct} a family of {exclusion processes} parametrized by $ m\in{[0,2]} $ and evolving on the one-dimensional (discrete) torus $ \mathbb{T}_N $ {and we prove that their} hydrodynamic limit is given by \eqref{PDE:formal}. The motivation for the definition of our models comes from the analysis of the diffusion coefficient $D(\rho)=m\rho^{m-1} $ and the generalized binomial theorem (Proposition \ref{th:gen_bin} below). As a consequence, the resulting family of models interpolates continuously in $ m $ between the SSEP and the PMM(1), in a sense that we shall explain more precisely later on ({see \eqref{intro:interpol}} below). The point is that the generalized binomial theorem allows representing the diffusion coefficient $D(\rho)=m(1-(1-\rho))^{m-1}$ in terms of a series, \emph{i.e.}
\begin{align}\label{into:series}
D(\rho)=\sum_{k\geqslant 1} \binom{m}{k} (-1)^{k-1} k (1-\rho)^{k-1},
\end{align}
{which} can be properly truncated into a polynomial. Above $ \binom{m}{k} $ is the generalized binomial coefficient, see \eqref{eq:binom} {for the definition}. In the construction {of} the new interpolating model based {on} \eqref{into:series}, the family $\{\text{PMM}(k)\}_{k\geqslantslant 0}$ can be seen as a ``polynomial basis''. {Remember that} the porous medium models PMM($k$) considered in \cite{GLT} are of gradient type, and moreover {it can be easily seen that} the Bernoulli product measures with constant parameter are invariant for {each PMM($k$)}. {Remarkably}, the {interpolating} model {keeps both properties}, and {moreover it becomes} irreducible, {in the sense that every particle configuration can be changed into any other configuration with the same number of particles through successive jumps that happen with positive probability. {We note that} this \emph{irreducibility property} was not verified for the original PMM($k$), and in fact one of the technical difficulties of \cite{GLT} was to work with the so-called \emph{mobile clusters}, \textit{i.e.}~couple of particles at distance at most two, that allow the transport of blocked particles in the system, {but they are not needed here}.}
{Let us now} be more precise. {As before, $\{\eta_t\}_{t\geqslantslant 0}$ is a Markov process on $\Omega_N$, and it can be entirely defined through its \emph{infinitesimal generator}, denoted below by $\mathcal{L}_N^{m-1}$, which is an operator acting on functions defined on $\Omega_N$. In order to give a precise definition, we first need to introduce the infinitesimal generators related to the basis mentioned above: let $ \mathcal{L}_N^{\overline{\text{PMM}}({k})} $ } be the generator of a process {defined like} {the} PMM($ k $), but with the constraints acting on \textit{empty} sites, {instead of} particles (in other words, {for $ k=1 $}, the jump from $x$ to $x+1$ happens if there is at least one empty site in $\{x-1,x+2\}$, {see Figure \ref{fig:PMMholes}}). {We are now ready to introduce the infinitesimal generator of the interpolating model, which is a linear combination of the latter}, and is defined for any $m\in(0,2]$ by
\begin{align}\label{intro:gen}
\mathcal{L}_N^{(m-1)} = \sum_{k=1}^{\ell_N} \binom{m}{k}(-1)^{k-1} \mathcal{L}_N^{\overline{\text{PMM}}(k-1)}
,
\quad
\text{where} \quad 2\leqslant\ell_N \xrightarrow[N\to+\infty]{}+\infty.
\end{align}
The treatment of a linear combination of models with $ \ell_N\to+\infty $ as $N\to+\infty$ is one of the novelties of this work. It is also worth pointing out that although \eqref{PDE:formal} only has local interactions, we do \textit{not} require that $ \ell_N=o(N) $, and it can be of any order as long as $ {N}\geqslantslant \ell_N\to+\infty $. {In fact several} difficulties in this paper arise from maintaining $ \ell_N $ with no order restrictions. To achieve this, some new ideas and properties of the family $ \{\text{PMM}(k)\}_{k\geqslant 0} $ are explored. The interpolating property {invoked above} is a consequence of the definition of the generalized binomial coefficients. Concretely, denoting by $ r_N^{(m-1)}(\eta) $ the jump rate {appearing in $\mathcal{L}_N^{(m-1)}$ at the edge} $ \{0,1\} $ ({for a jump happening from $0$ to $1$ or $1$ to $0$}), for some fixed configuration $ \eta $ and fixed $ N $ it holds that
\begin{align}\label{intro:interpol}
\lim_{m \nearrow 1}r_N^{(m-1)}(\eta)
=\mathbf{r}_{0,1}^{\text{SSEP}}(\eta)
=\lim_{m\searrow 1}r_N^{(m-1)}(\eta)
\qquad \text{and}\qquad
\lim_{m\nearrow 2}r_N^{(m-1)}(\eta)
=\mathbf{r}_{0,1}^{\text{PMM}(1)}(\eta),
\end{align}
where $ \mathbf{r}_{0,1}^{\text{SSEP}}(\eta) $ and $ \mathbf{r}_{0,1}^{\text{PMM}(1)}(\eta) $ are the {jump} rates at the edge $ \{0,1\} $, for the SSEP and PMM($ 1 $), respectively. To better visualize how these rates can deform the SSEP into a slow or fast diffusion model we refer the reader to Figure \ref{fig:1} and to the discussion just before it.
We remark that the sign of the generalized binomial coefficients $ \binom{m}{k} $ changes {according to} the values of $ m $ and $ k $. This oscillating nature is {the reason why one may} find rates for which \eqref{intro:gen} is not well-defined for $ m>2 $ and {why} an extension of our models to $m>2$ is still out of reach. For $ m\in(0,2) $, the sign of these coefficients lead to an interpretation of the resulting models as the SSEP with either a \textit{penalization} or \textit{reinforcement} given by porous medium models (with constraints on the empty sites), as explained in \eqref{PMM_rewrite}, {and this also explains why the interpolating model becomes irreducible}.
This is presented in more details in Proposition \ref{prop:low_bound_r}.
\subsection{Main result and strategy}
Proving a \textit{hydrodynamic limit} is, in plain terms, a law of large numbers for the conserved quantity of the system, in our case the density of particles. Concretely, the empirical measure associated to the {particle} density {at time $t>0$} is defined {for any
$\eta \in \Omega_N$}, as follows
\begin{align*}
\pi_t^N(\eta,\mathrm{d}u)=\frac1N \sum_{x\in\mathbb{T}_N}\eta_t(x)\delta_{x/N}(\mathrm{d}u).
\end{align*}
In other words $\pi_t^N(\eta,\mathrm{d}u)$ is a {random} measure on the continuous {torus} $\mathbb{T}$ and performs the link between the microscopic and macroscopic space scales, {\emph{via}} $ x\mapsto N^{-1}x $. The main result of this paper states that starting from a \emph{local equilibrium} distribution ({see Definition \ref{def:ass}}), this {random empirical measure, {taken under the diffusive time-rescaling $ t\mapsto N^2t $}, converges in probability as $N\to+\infty$, to a deterministic measure $\rho_t(u)du$, where $\rho_t(u)$ is {the unique} weak solution of the \textit{hydrodynamic equation} \eqref{PDE:formal} for $ m\in(0,2) $.}
Our proof follows the entropy method introduced by \cite{GPV}, which highly relies on the fact that the microscopic model of particles is gradient and has the irreducibility property.
The overall strategy can be split into three steps: (i) {we prove} tightness of the sequence of measures induced by the density empirical measure; (ii) {we obtain an} \emph{energy estimate} {which gives information on the regularity of the density profile, and this information is crucial for the proof of uniqueness of weak solutions}; (iii) {we characterize} uniquely the limiting points. Different {technical} problems arise for both slow ($m>1$) and fast ($m<1$) regimes. Since we deal with systems whose jump rates are of polynomial form, we need to show that these polynomials are such that the equations for the empirical measures can be recovered. This is known in the literature as the \emph{replacement lemmas} which are one of the most difficult challenges in the derivation of hydrodynamic limits from microscopic systems. In particular, the replacement lemmas are specific to each regime (see Lemmas \ref{lem:rep_shift}, \ref{lem:rep_boxes} for the slow regime and Lemmas \ref{lem:rep_FDM-tight}, \ref{lem:rep_FDM} for the fast regime). Fundamental to the proof of those lemmas is the energy lower bound (Proposition \ref{prop:energy}) which compares the Dirichlet form of our process with the ``Carr\'e-du-Champ" operator, and the results of Subsection \ref{sec:main_model}, {where} we derive some new properties of the family $\{\text{PMM}(k)\}_{k\geqslant 0} $, in particular we prove {several bounds on their rates} {{which also show} that our models are well-defined.}
In the fast regime, {it is surprising that} the tightness step {requires} the replacement Lemma \ref{lem:rep_FDM-tight}, due to the supremum of the rates being unbounded as $ N\to+\infty $. {Finally} the characterization of the limit points is the most technical part, and also uses several replacement lemmas. We note that the scheme which is implemented for the slow regime is a simplification of the scheme of \cite{BDGN}.
The application of those replacement lemmas involves some novelties due to the summation with binomial coefficients in the definition of $ \mathcal{L}^{(m-1)} $. {Roughly speaking}, the replacement lemmas link the microscopic and macroscopic scales by approximating the product of $ k $ occupation variables by $ k $ empirical averages over independent boxes -- first by \textit{microscopic} boxes (``one-block estimate''), then by approximating the microscopic boxes by \textit{mesoscopic} boxes (``two-blocks estimate''). {Here}, very importantly, the size of these boxes needs to be adjusted dynamically with $ k $ for the series of errors to vanish in the limit $ N\to+\infty $. However, {this dynamical argument alone would require to impose} stronger assumptions on the explosion of $ \ell_N $. To avoid this, it is fundamental to first slow down the explosion by replacing $ \ell_N $ by $ (\ell_N)^n $ with $ 0<n<1 $. This argument depends on the order of the tail of the series $ \sum_{k\geqslant1}\abs{\binom{m}{k}} $. Naturally, the treatment of this series also requires a sharp non-asymptotic estimate on the binomial coefficients, {see} Lemma \ref{lem:bin_bound}.
{Finally,} there were some technical issues regarding the \textit{energy estimate}, {precisely when} showing that the (weak) solution of \eqref{PDE:formal} (Definition \ref{def:weak}) belongs to the target Sobolev space. This is {crucial} because it allows us to argue that the solution to the PDE is H\"{o}lder continuous, which in turn is essential to show that it is well approximated locally by the empirical measure. {The weak differentiability of specific functions of $ \rho $ is also needed to prove uniqueness, giving us that the whole sequence of measures converges thanks to tightness. Specifically, if $ \rho^m $ belongs to the target Sobolev space (which is the case for $ m\in(1,2) $), uniqueness follows by simple energy arguments (see Lemma \ref{lem:uniq_PME}), while if $ \rho $ only belongs to the target Sobolev space (when $ m\in(0,1)$), then the proof is more involved (see Lemma \ref{lem:uniq_FDE}), and it is an adaptation of the argument for \textit{very weak} solutions in \cite{vazquez}.
}
\subsection{Extensions and future work}\label{sec:ext}
Now we comment a bit on possible extensions of our results. First we note that for $ m>2 $ there are configurations where the rates $ r_N^{(m-1)}(\eta) $ {are negative} and {therefore} the model is not well-defined. An example is $ m\in(2,3) $ with $ \eta(0)+\eta(1)=1 $ and $ \eta(-1)=0,\;\eta(x)=1\ $ for $ x\neq-1,0,1 $. The extension to $ m>2 $ requires a different approach and will be the subject of study on a forthcoming work.
We also highlight that the derivation of fractional equations from microscopic systems has attracted a lot of attention recently. In another forthcoming work we will use the mechanism based on the generalized binomial theorem to construct a well-defined Markov generator interpolating the long-range SSEP (introduced in \cite{LR:JARA08}) and the long-range PMM($ 1 $) (introduced in \cite{LR:CDG2022}), whose hydrodynamic limit follows $ \partial_t\rho=-(-\Delta)^{\frac{\gamma}{2}}\rho^m $ with $ m\in(0,2] $ and $ \gamma\in(1,2) $. This is work in progress.
As a final note, our main goal was to introduce a toy model in the simplest context. From the stochastic process point of view, it would be interesting to extend our results to higher (finite) dimensions. Moreover, the lower bound in Proposition \ref{prop:energy} could be used to extend our results to the open boundary setting, following similar arguments as in \cite{BDGN} and {using} our approach {for} the treatment of the sum up to $ \ell_N $, with some adaptations. Fixing the rate of creation/annihilation of particles to be proportional to $ N^{-\theta} $ for $ \theta\geqslant0 $, one could obtain {different} boundary regimes: Dirichlet ($\theta\in[0,1)$), non-linear Robin ($ \theta=1 $) and Neumann ($ \theta>1 $); with the specific expressions as in \cite{BDGN} but with $ m=2 $ there extended to $ m\in(0,2)\backslash\{1\} $.
All this is left for future work.
\subsection{Outline of the paper} The present work is organized as follows: Section \ref{sec:models} is devoted to introducing the family of porous medium models which will be the building blocks to construct our new models {and used to prove some of the important properties of the latter}; particularly, in Subsection \ref{sec:main_model} we construct the interpolating models, prove that they are well-defined, and in Subsection \ref{subsec:interp_prop} we study some of their monotonicity properties {and present our main result.}
Then we prove the {convergence towards the} hydrodynamic limit in Section \ref{sec:HL}. Section \ref{sec:replace} is devoted to the statement and proof of the so-called \textit{replacement Lemmas}, which are in the heart of the proof of the hydrodynamic limit. Finally, in Section \ref{sec:energy} we obtain the \textit{energy estimates}. In Appendix \ref{app:aux_res} we prove an auxiliary result regarding the generalized binomial coefficients and in Appendix \ref{app:PDE} uniqueness and regularity results regarding the weak solution of the hydrodynamic equations are derived.
\section{Microscopic models and Main Result}\label{sec:models}
Let $ \mathbb{N}_+ $ be the set of positive natural numbers and denote by $ N \in\mathbb{N}_+ $ a scaling parameter. Denote by $ \mathbb{T}_N $ the one dimensional discrete torus, that is, $ \mathbb{T}_N=\{1,\dots, N\} $ with the identification $ 0\equiv N $. For any $ x < y\in\mathbb{Z} $, that can be viewed as elements in $\mathbb{T}_N$ by considering their standard projections, we define $ \llbracket x,y\rrbracket $ as the discrete interval composed by all the discrete points between $ x,y $ (including $ x,y $) in $ \mathbb{T}_N $, where the order has been inherited from the one in $\mathbb{Z}$.
The microscopic dynamics at the core of this paper is a system of particles which {evolves according to a Markov process}, satisfying the exclusion rule and situated on the discrete torus $\mathbb{T}_N$. A particle configuration $\eta$ is an element of $\Omega_N=\{0,1\}^{\mathbb{T}_N}$, namely $ \eta(x)\in\{0,1\} $ for any $ x\in\mathbb{T}_N $.
Particles can jump to nearest-neighbour sites only, providing the latter are not already occupied. Before defining the generator of the dynamics, let us introduce the following operators:
\begin{Def}[Exchange of occupation variables]
For any $ x,y,z\in\mathbb{T}_N $ let us consider the exchange of occupation variables $ \eta\mapsto \eta^{x,y} $ given by
\begin{align*}
\eta^{x,y}(z)
=\mathbf{1}_{z\neq x,y}\;\eta(z)
+\mathbf{1}_{z=x}\;\eta(y)
+\mathbf{1}_{z=y}\;\eta(x).
\end{align*}
We define the operator $ \nabla_{x,y} $ associated to the occupation exchange, given on any $ f:\Omega_N\to\mathbb{R} $ by
\begin{align*}
\nabla_{x,y} f(\eta)
=f(\eta^{x,y})-f(\eta).
\end{align*}
Finally, for any $ x\in\mathbb{T}_N $, define the translation $ \tau_{x}\eta(y)=\eta(x+y) $ for $y\in\mathbb T_N$, and extend it to functions $ f:\Omega_N\to\mathbb{R} $ by $
\tau_xf(\eta)=f(\tau_x\eta).$
\end{Def}
The rest of the section is organized as follows: first of all, we recall the definition of the \emph{porous medium models} from \cite{GLT}, which correspond to a microscopic description of the PME for any integer $m\geqslant 2$. Then, we define its \emph{flipped} version, in the sense that the kinetic constraint is imposed on empty sites instead of particles. Finally, we define a new microscopic family of models parametrized by $ m\in(0,2)\backslash\{1\}, $ which we call \textit{non-integer fast diffusion model} when $ m\in(0,1) $, and \textit{non-integer porous medium model} when $ m\in(1,2) $.
\subsection{Porous media model with dynamical constraints on vacant sites}
Roughly speaking, our models can be seen as the SSEP either reinforced or penalized by a linear combination of \emph{kinetically constrained exclusion processes} (KCEP), which the family PMM($k$) belongs to. Let us first recall the definition of the known models which will come into play.
\begin{Def}[Symmetric Simple Exclusion Process]
We denote by SSEP on $ \mathbb{T}_N $ the Markov process with state space $\Omega_N$ generated by the following operator $ \mathcal{L}_N^{\text{SSEP}} $, which acts on $ f:\Omega_N\to\mathbb{R} $ as:
\begin{align*}
(\mathcal{L}_N^{\text{SSEP}}f)(\eta)=\sum_{x\in\mathbb{T}_N}
\mathbf{a}_{x,x+1}(\eta)
(\nabla_{x,x+1}f)(\eta)
\end{align*} for any $\eta\in\Omega_N$,
where
\begin{equation}\label{eq:s}
\mathbf{a}_{0,1}(\eta) = \eta(0)(1-\eta(1))+\eta(1)(1-\eta(0)), \qquad \mathbf{a}_{x,x+1}(\eta) =\mathbf{a}_{x+1,x}(\eta)=\tau_x \mathbf{a}_{0,1}(\eta).
\end{equation}
Note that the latter equals $ 1 $ if exactly one site among $\{x,x+1\}$ is occupied by a particle, and $ 0 $ otherwise. Due to the symmetry of the rates we will short-write $
\mathbf{a}:= \mathbf{a}_{0,1}=\mathbf{a}_{1,0}.$
\end{Def}
\begin{Def}[Porous Medium Model for any integer $ k\geqslant 1$, \cite{GLT}]
For any $ k\in\mathbb{N}_+ $ let us denote by PMM($ k $) the \emph{porous medium model} on $ \mathbb{T}_N $ with parameter $ k $, as the Markov process with state space $\Omega_N$ generated by the following operator $ \mathcal{L}_N^{\text{PMM}(k)} $, which acts on $ f:\Omega_N\to\mathbb{R} $ as:
\begin{align*}
(\mathcal{L}_N^{\text{PMM}(k)}f)(\eta)=\sum_{x\in\mathbb{T}_N}
\mathbf{c}^{(k)}_{x,x+1}(\eta)
\mathbf{a}_{x,x+1}(\eta)
(\nabla_{x,x+1}f)(\eta)
\end{align*} for any $\eta\in\Omega_N$,
where $\; \mathbf{c}^{(k)}_{x,x+1}(\eta) = \tau_x \mathbf{c}_{0,1}^{(k)}(\eta) $ with
\begin{align}\label{rate:pmm_int}
\mathbf{c}_{0,1}^{(k)}(\eta) =\sum_{j=1}^{k+1}
\mathbf{s}_j^{(k)}(\eta)
\quad\text{and}\quad
\mathbf{s}_j^{(k)}(\eta)=\prod_{\substack{i=-(k+1)+j\\i\neq0,1}}^j\eta(i).
\end{align}
\end{Def}
\begin{Not}
We write
\begin{equation}\label{eq:ck}
\mathbf{r}^{(k)}_{x,x+1}(\eta)
=\mathbf{c}^{(k)}_{x,x+1}(\eta)\;\mathbf{a}_{x,x+1}(\eta) = \mathbf{r}^{(k)}_{x+1,x}(\eta)
\end{equation}
for the rate at which the occupation variables $\eta(x)$ and $\eta(x+1)$ are exchanged in PMM($k$). The quantity $\mathbf{c}_{x,x+1}^{(k)}(\eta)$ is the constraint to be satisfied for the jump to happen.
Again due to the symmetry of the rate and constraint, we short-write
\begin{equation}\label{eq:defs}
\mathbf{c}^{(k)}(\eta)\equiv \mathbf{c}_{0,1}^{(k)}(\eta)
\quad\text{ and }\quad
\mathbf{r}^{(k)}(\eta)\equiv \mathbf{r}_{0,1}^{(k)}(\eta).
\end{equation}
\end{Not}
As it can be seen from \eqref{rate:pmm_int} and Figure \ref{fig:constraint}, a jump crossing the bond $\{x,x+1\}$ is allowed only if at least $k$ consecutive particles out of the edge $\{x,x+1\}$ are situated in the box $\llbracket x-k, x+(k+1) \rrbracket $.
\begin{figure}
\caption{PMM($ 2 $) valid constraints for which a particle swaps positions in the edge $ \{0,1\}
\label{fig:constraint}
\end{figure}
An illustration of the dynamics for $k=1$ is also provided in Figure \ref{fig:PMM2}.
\begin{Rem}[$k=0$]\label{rem:SSEP=PMM(0)} Note that for $k=0$, $\mathbf{c}^{(0)}(\eta)\equiv 1$ and therefore $ \mathbf{r}^{(0)}(\eta)=\mathbf{a}(\eta)$, which corresponds to the exchange rate in SSEP. It will be useful to interpret $\text{PMM}(0)=\text{SSEP}$.
\end{Rem}
\begin{Def}[Flipped configuration]
For any $ \eta\in\Omega_N $, let $ \eta\mapsto\overline{\eta} $ be the map that flips holes with particles, namely: for any $x\in\mathbb{T}_N$, $ \overline{\eta}(x)=1-\eta(x).$
\end{Def}
We are now ready to introduce the flipped porous medium model.
\begin{Def}
For any $ k\in\mathbb{N}_+ $, let us denote by $\overline{\text{PMM}}(k)$ the \emph{flipped porous medium model} with parameter $k$ with dynamical constraints on the vacant sites, as the Markov process on $\Omega_N$ generated by the following operator $ \mathcal{L}_N^{\overline{\text{PMM}}(k)} $, which acts on functions $ f:\Omega_N\to\mathbb{R} $ as
\begin{equation}\label{eq:dualPM}
\big(\mathcal{L}_N^{\overline{\text{PMM}}(k)}f\big)(\eta)
=\sum_{x\in\mathbb{T}_N}\mathbf{c}^{(k)}_{x,x+1}(\overline{\eta})\mathbf{a}_{x,x+1}(\eta)
(\nabla_{x,x+1}f)(\eta)
\end{equation} for any $\eta\in\Omega_N$.
\end{Def}
Note that the process above can be interpreted as the \textit{empty sites} following the same constraint as in PMM($ k $): a jump crossing the bond $\{x,x+1\}$ is allowed only if at least $k$ ``consecutive" \emph{empty sites} out of the edge $\{x,x+1\}$ are situated in the box $\llbracket x-k, x+(k+1) \rrbracket
$. An illustration of the dynamics is provided in Figure \ref{fig:PMMholes}. We also highlight that the parameter $ k $ in the PMM($ k $) corresponds to the exponent of the diffusion coefficient, $ D(\rho)=(k+1)\rho^k $, hence to the equation \eqref{PDE:formal} with $ m=k+1 $.
\begin{figure}
\caption{PMM($ 1 $) transition rates.}
\label{fig:PMM2}
\caption{$\overline{\text{PMM}
\label{fig:PMMholes}
\end{figure}
\subsection{The interpolating model}\label{sec:main_model}
Recall Remark \ref{rem:SSEP=PMM(0)}, where we made the observation that SSEP$=$PMM($ 0 $). The construction of the interpolating model will be based on two main ingredients: the generalized binomial theorem and the fact that the family $\{\text{PMM}(k)\}_{k\geqslant0} $ can be seen as a "polynomial basis" for the diffusion coefficient $ D(\rho)=m\rho^{m-1} $.
\subsubsection{Construction}
We base our analysis in the next identity: for any $\rho \in (0,1)$
\begin{equation}\label{eq:expand} m\rho^{m-1} = m(1-(1-\rho))^{m-1} = m\sum_{k\geqslant 0} \binom{m-1}{k}(-1)^k (1-\rho)^k= \sum_{k\geqslant 1} \binom{m}{k} (-1)^{k-1} k (1-\rho)^{k-1} \end{equation} where the generalized binomial coefficient is given by the formula
\begin{equation}\binom{c}{k} = \frac{(c)_k}{k!} = \frac{c(c-1)\cdots(c-(k-1))}{k!},\qquad c\in\mathbb{R}\label{eq:binom}\end{equation} and therefore we have the identity $m\binom{m-1}{k} = (k+1) \binom{m}{k+1} $. This is a particular case of the generalized binomial expansion for real coefficients:
\begin{Prop}[Generalized Binomial Theorem]\label{th:gen_bin}
For any $ x,y,c\in\mathbb{R} $ such that $ \abs{x}>\abs{y} $ we have that
\begin{align*}
(x+y)^c&=\sum_{k=0}^\infty\binom{c}{k}x^{c-k}y^k,
\end{align*}
where $\binom{c}{k}$ has been defined in \eqref{eq:binom}.
\end{Prop}
\begin{proof}
The proof is standard and as such we only outline the main steps. Without loss of generalization let $ x\neq0 $. Writing $ z=y/x $ we have $ (x+y)^c=x^c(1+z)^c $. Let $f(z)=(1+z)^c$ be defined for $|z|<1$. Then, by induction we see that $ \frac{d^kf}{dz^k}(z)=(c)_k(1+z)^{c-k} $ for any $ k\geqslant1 $ integer. To conclude we recall the Taylor expansion of $ f $ and apply Lemma \ref{lem:bin_bound} stated below, which guarantees the convergence.
\end{proof}
Proposition \ref{th:gen_bin} implies the convergence of the series appearing in \eqref{eq:expand} for any $ \rho\in(0,1) $.
For $ \rho\in\{0,1\} $ and $ m\in(1,2) $ or $ \rho=1 $ and $ m\in(0,1) $ one can also easily guarantee the convergence by replacing $ \rho $ by $ 1 $ or $ 0 $ in each term of the series as written in \eqref{eq:expand}. For $ m\in(0,1) $ and $ \rho=0 $ the series is divergent. This will not be a problem, since due to the gradient property of the model {we shall see that the main object of study will be $ \rho^m $ and not $ \rho^{m-1} $.}
\begin{Def}[Interpolating model] Let $m\in{[0,2]}$, $ N\in\mathbb{N}_+ $ and $ \ell_N\in\mathbb{N},$ with $\ell_N\geqslant 2 $. We define the generator
\begin{align}
\mathcal{L}_N^{(m-1)}: = \sum_{k=1}^{\ell_N} \binom{m}{k}(-1)^{k-1} \mathcal{L}_N^{\overline{\text{PMM}}(k-1)} \label{PMM:m1}
\end{align} where $\mathcal{L}_N^{\overline{\text{PMM}}(k)}$ has been defined in \eqref{eq:dualPM}.
More precisely, this generator acts on functions $ f:\Omega_N\to\mathbb{R} $ as
\begin{align*}
(\mathcal{L}_N^{(m-1)}f)(\eta)
=\sum_{x\in\mathbb{T}_N}c_N^{(m-1)}(\tau_x\eta)\mathbf{a}_{x,x+1}(\eta)(\nabla_{x,x+1}f)(\eta),
\end{align*}
where \begin{equation}\label{eq:transitionrates}
c_N^{(m-1)}(\eta)=\sum_{k=1}^{\ell_N} \binom{m}{k} (-1)^{k-1} \mathbf{c}^{(k-1)}(\overline\eta)
\end{equation}
and we shorten the rate $ r_N^{(m-1)}(\eta)=c_N^{(m-1)}(\eta)\;\mathbf{a}(\eta) $. We call \emph{non integer porous medium model} (resp.~\emph{fast diffusion model}), and we denote it by PMM($ m-1 $) (resp.~by FDM($ m-1 $)), the Markov process whose infinitesimal generator is given by \eqref{PMM:m1} with $ m\in(1,2) $ (resp.~$ m\in(0,1) $).
\end{Def}
\begin{Rem}[About the restrictions on $ \ell_N $]
Although there is no particular assumption on the order at which $ \ell_N\to+\infty $, note that if $ \ell_N>N $ then for $ N\leqslant k \leqslant\ell_N $ we have that $ \mathbf{r}^{(k)}(\eta)\neq0 $ if, and only if, every site is occupied except one at the node $ \{0,1\} $. Due to the mass conservation, this would be achievable only by starting from a configuration with one empty site only, hence no macroscopic evolution of the local density. This is a particular technical consequence of working on the torus, therefore we assume throughout the paper that $ \ell_N\leqslant N $.
\end{Rem}
The goal now is to show that the model is well-defined. In other words, {we are going to prove} that the map $ \eta\mapsto c_N^{(m-1)}(\eta) $ is non-negative. The key argument is the following remark about the sign of $(-1)^{k-1} \binom{m}{k}$. By definition, \begin{itemize} \item if $m\in (0,1)$, then $
(-1)^{k-1} \binom{m}{k} >0$ for any $ k \geqslant 1$,
\item if $m\in(1,2)$, then \[
(-1)^{k-1} \binom{m}{k} >0 \quad \text{ if } k =1, \quad \text{ and } \quad (-1)^{k-1} \binom{m}{k} <0 \quad \text{ if } k \geqslant 2.\] \end{itemize} Therefore we can rewrite
\begin{align}\label{PMM_rewrite}
\mathcal{L}_N^{(m-1)}
=
m\mathcal{L}_N^{\text{SSEP}}
-\text{sign}(m-1)
\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}\mathcal{L}_N^{\overline{\text{PMM}}(k-1)}
,
\qquad m\in(0,2)\backslash\{1\}.
\end{align}
We also need non-asymptotic bounds for the generalized binomial coefficients: from Lemma \ref{lem:bin_bound} one can extract that for $ m\in\mathbb{R} $ and $ k\geqslant2 $
\begin{align}\label{eq:usefulbound}
\frac{1}{(k+1)^m}\lesssim \abs{\binom{m-1}{k}}\lesssim \frac{1}{k^m}.
\end{align}
{The notation $ f(k)\lesssim g(k) $ shortens that {there exists $ C>0 $}, {such that} for all $ k\in\mathbb{N}$, $ \abs{f(k)}\leqslant C\abs{g(k)} $.}
Now we state and prove the main technical result of this section, which contains two estimates: {the lower bounds show that the generators are well-defined and permit to prove an energy bound (given in Proposition \ref{prop:energy}), which is essential to the proof of the forthcoming \emph{replacement lemmas}; the upper bounds reflect the boundedness of the rates as $ N\to+\infty $.}
\begin{Prop}\label{prop:low_bound_r} If $ \ell_N\gg1 $, then for any $\eta\in\Omega_N$,
\begin{align*}
r_N^{(m-1)}(\eta)\geqslant
\begin{cases}
m \; \mathbf{r}^{(0)}(\eta), &m\in(0,1), \vphantom{\Big)}\\
m\delta_N \; \mathbf{r}^{(0)}(\eta)+\binom{m}{2}\;\mathbf{r}^{(1)}(\eta), &m\in(1,2),
\end{cases}
\quad\text{and}\quad
r_N^{(m-1)}(\eta)
\leqslant\begin{cases} \displaystyle
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}k, &m\in(0,1),\\
\displaystyle m \mathbf{r}^{(0)}(\eta), &m\in(1,2),\vphantom{\bigg(}
\end{cases}
\end{align*}
where $ (\ell_N+1)^{-(m-1)}\lesssim\delta_N=\sum_{k\geqslant \ell_N}\abs{\binom{m-1}{k}}\lesssim (\ell_N)^{-(m-1)} $. Moreover, when $m\in(0,1)$,
\begin{align*}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}k
=
\max_{\eta\in\Omega_N}r_N^{(m-1)}(\eta)\xrightarrow[N\to+\infty]{}+\infty.
\end{align*}
\end{Prop}
\begin{proof}
We start with the case $ m\in (1,2) $. From \eqref{PMM_rewrite}, we rewrite
\begin{align*}
r_N^{(m-1)}(\eta)
&=m-\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}k
+\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}\big(k-\mathbf{r}^{(k-1)}(\overline{\eta})\big)
\geqslant
m-\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}k
+\binom{m}{2}\big(2-\mathbf{r}^{(1)}(\overline{\eta})\big),
\end{align*}
where for the last inequality we used the fact that, by definition, $ \mathbf{c}^{(k-1)}(\overline{\eta})\leqslant k $, and we bounded from below all but the first term of the second summation in $ k $ by zero. Then, since the alternating sum of the binomial coefficients vanishes, we obtain, for any $ \ell_N\in\mathbb{N}_+ $, that
\begin{align}\label{ineq:sum_coeff}
m-\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}k
= m\bigg(1-\sum_{k=1}^{\ell_N-1}\abs{\binom{m-1}{k}}\bigg) > m \bigg(1-\sum_{k=1}^{+\infty}\abs{\binom{m-1}{k}}\bigg)=0
\end{align}
and therefore we get that $ r_N^{(m-1)}>0 $. To conclude, we note that $2-\mathbf{c}^{(1)}(\overline{\eta}) = \mathbf{c}^{(1)}(\eta)$ and we set
\begin{align*}
\delta_N:
=1-\sum_{k=1}^{\ell_N-1}\abs{\binom{m-1}{k}}
=\sum_{k\geqslant\ell_N}\abs{\binom{m-1}{k}} >0.
\end{align*}
Recalling \eqref{eq:usefulbound}, we are reduced to estimate the tail of the $ m-$series:
\begin{align}\label{eq:m-series}
\frac{c}{(m-1)(\ell_N+1)^{m-1}}
\leqslant
\sum_{k\geqslant \ell_N+1}\frac{1}{k^m}\leqslant \frac{C}{(m-1)(\ell_N)^{m-1}}
\end{align}
with $ c,C>0 $ being constants independent of $N $. Putting the inequalities together, the proof of the lower bound follows. To prove the upper bound, we only keep the first term in the definition \eqref{eq:transitionrates} of $ r_N^{(m-1)} $, since the other ones are negative.
The case $ m\in (0,1) $ is straightforward from \eqref{PMM_rewrite}.
To conclude, we see that the maximum is obtained when $ \mathbf{r}^{(k-1)}(\overline{\eta})=k $, that is, when the window $ \llbracket -\ell_N+1,\ell_N\rrbracket\backslash \{0,1\} $ is completely empty and $ \eta(0)+\eta(1)=1 $. The lower bound for the binomial coefficients in \eqref{eq:usefulbound} then shows that this maximum tends to infinity as $ N\to+\infty $.
\end{proof}
\begin{Rem}[On the sharpness of the bounds in Proposition \ref{prop:low_bound_r}]\label{rem:bound} The estimates of Proposition \ref{prop:low_bound_r} are not sharp. Instead, the goal of the lower bound for $ m\in(1,2) $ is to relate our process with the simpler process induced by the generator
\begin{align*}
m\delta_N\mathcal{L}_N^{\text{PMM}(0)}+\tfrac{m(m-1)}{2}\mathcal{L}_N^{\text{PMM}(1)},
\end{align*}
which is very close to the one studied in \cite{GLT}, where the porous medium model is perturbed by a "small" SSEP dynamics.
The lower bound for $ m\in (0,1) $ is here to emphasize that the transition rates will always be \textit{greater} than those of the SSEP (modulo a multiplicative constant), as expected, since under this regime the macroscopic diffusion is faster than the one of the heat equation ($ m=1 $). This will be useful, in particular, for the proof of the replacement Lemma \ref{lem:rep_FDM}.
Finally, let us highlight that the divergence $ \max_{\eta\in\Omega_N}r_N^{(m-1)}(\eta)\to +\infty $ as $N \to+\infty$ gives us an extra difficulty in the proof of tightness (see in particular \eqref{h:treat_FDM}) {and makes it impossible to argue, as for $ m\in(1,2) $, that $ \rho^m $ is weak differentiable (see the last step in the proof of Proposition \ref{prop:energy_est_PME})}.
\end{Rem}
\subsection{Characterization of the interpolating family}\label{subsec:interp_prop}
In this subsection we present further properties of the interpolating model. We start by explaining how this model interpolates between the SSEP and the PMM($ 1 $).
\begin{Prop}[Interpolation property] \label{prop:interp}
For $ m\in(1,2) $, $ N\in\mathbb{N} $ and $ \ell_N\geqslant2 $ fixed, the process $\mathcal{L}_N^{(m-1)}$ interpolates between $\mathcal{L}_N^{\mathrm{PMM}(0)}$ and $\mathcal{L}_N^{\mathrm{PMM}(1)}$ in the following sense: for all $ \eta\in\Omega_N $,
\begin{align}\label{interp_1}
\lim_{m \nearrow 1}r_N^{(m-1)}(\eta)
=\mathbf{r}^{\mathrm{PMM}(0)}(\eta)
=\lim_{m\searrow 1}r_N^{(m-1)}(\eta)
\qquad \text{and}\qquad
\lim_{m\nearrow 2}r_N^{(m-1)}(\eta)
=\mathbf{r}^{\mathrm{PMM}(1)}(\eta).
\end{align}
\end{Prop}
\begin{proof}
The limit to SSEP as $ m\to1 $ from either above or below is a direct consequence of the interpolation property of the binomial coefficients, while the limit to PMM($ 1 $) is both consequence of this, but also of {some rearrangement in the summation which defines the rates, and which implies $2-\mathbf{c}^{(1)}(\overline{\eta}) = \mathbf{c}^{(1)}(\eta)$,} see also \eqref{eq:summation} below.
\end{proof}
{From \cite{GLT},} the \emph{grand-canonical invariant measures} for the PMM($k$) (and therefore for the $\overline{\text{PMM}}(k)$) are the Bernoulli product measures $\nu_\rho^N$ of parameter $\rho\in[0,1]$, namely, their marginal is given on $x\in\mathbb{T}_N$ by
\begin{equation} \label{eq:bernoulli} \nu_\rho^N\big(\eta\in\Omega_N:~\eta(x)=1\big)=\rho.\end{equation}
The next lemma gives information on the invariant measures of our models.
\begin{Lemma}[Invariant measures and irreducibility]\label{lem:first}
{Let $m\in(0,2)$.} For any $\rho\in[0,1]$, the Bernoulli product measure $\nu_\rho^N$ defined in \eqref{eq:bernoulli} is invariant for the Markov process generated by $\mathcal{L}_N^{(m-1)}$. Moreover, for any $k\in\{0,\dots,N\}$, the hyperplane
\[ \mathscr{H}_{N,k}=\Big\{\eta\in\Omega_N \; : \; \sum_{x\in\mathbb{T}_N}\eta(x)=k\Big\}
\] is irreducible under the Markov process generated by $\mathcal{L}_N^{({m-1})}$.
\end{Lemma}
\begin{proof}
The irreducibility of the process on the above hyperplanes is consequence of the fact that $ c_N^{({m-1})}(\eta)>0 $ for any $ \eta\in\Omega_N $, as shown in Proposition \ref{prop:low_bound_r}, and so the exclusion rule is the only constraint. We already know from \cite{GLT} that the product measure $ \nu_\rho^N $ is invariant for $\overline{\text{PMM}}$($ k $), for any $ k\in\mathbb{N}_+ $. In particular, it is also invariant for linear combinations of such models.
\end{proof}
For a good understanding of the interpolating model it is important to describe some properties of the integer family $ \{\text{PMM}(k)\}_{k\in\mathbb{N}} $. {In the following,} Lemma \ref{lem:up_speed} and Proposition \ref{prop:m_mono} describe new properties of the aforementioned family which will be important later on. Moreover, {thanks to some preliminary computations}, it can be seen that the macroscopic density of particles evolves \emph{diffusively}, with a diffusion coefficient that can be computed explicitly. More precisely, let us introduce the following operator:
\begin{Def}[Translation operators] \label{def:translation}
Let $ \mathbf{1} $ be the identity function on $ \Omega_N $, and consider the operators $ \nabla^\pm $ associated to the translation operator given by $ \nabla^+=\tau_1-\mathbf{1} $ and $ \nabla^-=\mathbf{1}-\tau_{-1} $, that is, for any function $ f:\Omega_N\to\mathbb{R} $, we define $ (\nabla^+f)(\eta)=f(\tau_{1}\eta)-f(\eta) , (\nabla^-f)(\eta)=f(\eta)-f(\tau_{-1}\eta) $, and for any $ x\in\mathbb{T}_N $ consider
$
(\nabla_x^\pm f)(\eta)=(\nabla^\pm f)(\tau_x\eta).
$
\end{Def}
{As noted in \cite{GLT}}, it is straightforward to check
that, for any $x\in\mathbb{T}_N$,
\begin{equation}\label{eq:gen-current} \mathcal{L}_N^{{\text{PMM}}(k)}\big(\eta(x)\big)
=\nabla^- \left(
\mathbf{c}^{(k)}(\tau_x\eta)\nabla^+\eta(x)
\right).
\end{equation}
Therefore, the \textit{microscopic density current} for PMM($k$) between sites $x$ and $x+1$, is equal to
\begin{align*}
{-\mathbf{c}^{(k)}(\tau_x\eta)\nabla^+\eta(x)
}=:\mathbf{j}_{\{x,x+1\}}^{(k)}(\eta).
\end{align*}
It turns out, see \cite{GLT}, that this quantity is itself a discrete gradient, namely
\begin{align*}
\mathbf{j}_{\{x,x+1\}}^{(k)}(\eta)=\nabla^+\mathbf{h}^{(k)}(\eta),
\end{align*} where $\mathbf{h}^{(k)}$ is given in Lemma \ref{lem:grad}.
We highlight that although this gradient property {was already} known (see \cite{GNP21} for instance), the expression \eqref{expr:h2} for $\mathbf{h}^{(k)}$ is new ({we give the original expression of $\mathbf{h}^{(k)}$ in the appendix, see \eqref{expr:h1}}). {Then, note that} the expectation of $\mathbf{c}^{(k)}(\tau_x\eta)$ under the invariant measure $\nu_\rho^N$ is
\begin{align}\label{diff:nu}
\int \mathbf{c}^{(k)}(\tau_x\eta) \mathrm{d}\nu_\rho^N(\eta)= (k+1)\rho^{k}=D(\rho)
\end{align}
which is the diffusion coefficient of the PME($ k $) \eqref{PDE:formal}, \textit{i.e.}, for $m=k+1 \in \mathbb{N}_+$. Similarly, since $ \eta(1)-\eta(0)=-(\overline{\eta}(1)-\overline{\eta}(0)) $, the gradient property is also true for $\overline{\text{PMM}}$($k$). One can readily check that the expected diffusion equation associated to the microscopic dynamics of $\overline{\text{PMM}}(k)$ has diffusion coefficient $ \overline{D}(\rho)=(k+1)(1-\rho)^{k}. $
{Let us now state more precisely the aforementioned gradient property, which} will be proved in Appendix \ref{app:aux_res}. We recall the definition of $ \mathbf{s}_j^{(k)} $ in \eqref{rate:pmm_int}.
\begin{Lemma}[Gradient property]\label{lem:grad}
For any $ k\in\mathbb{N} $, \emph{PMM}($ k $) is a gradient model. Precisely, for any $\eta\in\Omega_N$ we have that
$
\mathbf{c}^{(k)}(\eta)\nabla^+\eta(0)
=
\nabla^+\mathbf{h}^{(k)}(\eta),
$
where
\begin{align}
\mathbf{h}^{(k)}(\eta)
&=\prod_{i=0}^{k}\eta(i)
+\sum_{i=0}^{k-1}
(\eta(i)-\eta(i+1))\sum_{j=1}^{k-i}\mathbf{s}_j^{(k)}(\tau_i\eta).
\label{expr:h2}
\end{align}
\end{Lemma}
Now, for the interpolating model {generated by $\mathcal{L}_N^{(m-1)}$}, similarly to \eqref{eq:gen-current}, a straightforward computation gives for all $ x\in\mathbb{T}_N $
\begin{align}\label{curr}
\mathcal{L}_N^{(m-1)}\big(\eta(x)\big)=N^2\nabla^{-}\left( c_N^{(m-1)}(\tau_x\eta)\nabla^{+}\eta(x)\right),
\end{align}
and we can {easily deduce from the previous lemma that}
\begin{align}\label{grad:non_int}
c_N^{(m-1)}(\eta)\nabla^{+}\eta(0)
=
\nabla^+h_N^{(m-1)}(\eta), \quad\text{where}\quad
h_{N}^{(m-1)}(\eta)
=\sum_{k=1}^{\ell_N}\binom{m}{k}(-1)^k
\mathbf{h}^{(k-1)}(\overline{\eta}).
\end{align}
\subsection{{Properties on the rates}}
We {start} by stating and proving two important properties {of the basis family $\{\text{PMM}(k)\}_{k\in\mathbb N}$}. The first one {(Lemma \ref{lem:up_speed})} {will be used later} in Propositions \ref{prop:tight}, \ref{prop:energy} and Lemma \ref{lem:rep_FDM-tight}, while the second {one (Proposition \ref{prop:r_seq}) will provide some interesting monotonicity property of the rates for both} the integer and non-integer families, see Propositions \ref{prop:m_mono} and \ref{prop:m_mono2} {at the end of this section}. Recall the definition of $ \mathbf{r}^{(k)} $ from \eqref{eq:ck}.
\begin{Lemma}[Bound on the rates]\label{lem:up_speed}
For all $ \ell,k\in\mathbb{N}_+ $ such that $ \ell\geqslant k $ and any $ \eta\in\Omega_N $ we have that
\begin{align*}
\sum_{n=1}^\ell\mathbf{r}^{(k)}(\tau_n\eta)
\leqslant 2(\ell+k).
\end{align*}
\end{Lemma}
\begin{proof}
Note that
\begin{align*}
\sum_{n=1}^\ell\mathbf{r}^{(k)}(\tau_n\eta)
=\sum_{n=1}^\ell
\mathbf{a}(\tau_n\eta)
\sum_{j=1}^{k+1}
\mathbf{s}_j^{(k)}(\tau_n\eta)
=\sum_{p=2}^{\ell+k+1}
\sum_{n=1}^{\ell}
\sum_{j=1}^{k+1}
\mathbf{a}(\tau_n\eta)\mathbf{s}_j^{(k)}(\tau_n\eta)
\mathbf{1}_{\{j+n=p\}}
\leqslant
\sum_{p=2}^{\ell+k+1}2=2(\ell+k).
\end{align*}
The inequality can be justified as follows. Fixed $ p $, the quantity $ \mathbf{s}_j^{(k)}(\tau_n\eta) $ depends on the occupation of the sites
\begin{align*}
\llbracket -(k+1)+j+n,j+n\rrbracket\backslash\{n,n+1\}
=\llbracket -(k+1)+p,p\rrbracket\backslash\{n,n+1\}.
\end{align*}
Because $ 1\leqslant j\leqslant k+1 $, then $ \{n,n+1\}\in \llbracket -(k+1)+p,p\rrbracket $ for sure. There are a number of pairs $ (j,n) $ such that $ j+n=p $, but for all of those pairs the box $ \llbracket -(k+1)+p,p\rrbracket$ is the same. Thus, for each $ p $ fixed, there are at most two pairs $ (n,j),(n',j') $ such that $ p=n+j=n'+j' $ and $ \mathbf{a}(\tau_n\eta)\mathbf{s}_j^{(k)}(\tau_n\eta)=\mathbf{a}(\tau_{n'}\eta)\mathbf{s}_{j'}^{(k)}(\tau_{n'}\eta)=1 $. Specifically, if $ (n,j) $ is as previously, then $ (n',j')=(n+1,j-1) $ or $ (n',j')=(n-1,j+1) $.
\begin{figure}
\caption{Configuration with $ \mathbf{a}
\end{figure}
\end{proof}
Now we state a monotonicity property. {The following proposition} is used {right after} in Proposition \ref{prop:m_mono2} to prove an analogous property for the {interpolating model}.
\begin{Prop}\label{prop:r_seq}
For any $ \eta\in\Omega_N $, the sequence $ \big\{\frac1k \mathbf{c}^{(k-1)}(\eta)\big\}_{k\geqslant 1} $ is non-increasing.
\end{Prop}
\begin{proof} {In order to prove the result, it is enough to show that
\[u_k(\eta):=\frac{k+1}{k} \mathbf{c}^{(k-1)}(\eta) - \mathbf{c}^{(k)}(\eta) \geqslantslant 0,\] for any $\eta\in\Omega_N$.
It turns out that this expression can we rewritten in terms of the products $\mathbf{s}_j^{(k)}$ defined in \eqref{eq:defs}, after flipping some of the configuration values $\eta(x)$. Let us be more precise. }
To simplify the presentation let us introduce some notation: for any $ A\subseteq\mathbb{T}_N $ define the flip $ \eta\mapsto\overline{\eta}^A $ as
$
\overline{\eta}^A(x)
=\overline{\eta}(x)\mathbf{1}_{x\in A}
+\eta(x)\mathbf{1}_{x\notin A}
$.
{Straightforward computations show that
\begin{equation}\label{eq:summation}u_k(\eta)= \sum_{j=1}^{k+1} \bigg\{\frac{k-(j-1)}{k}
\mathbf{s}_j^{(k)}
\big(\overline{\eta}^{\{-(k+1)+j\}}\big)
+
\frac{j-1}{k}
\mathbf{s}_j^{(k)}
\big(\overline{\eta}^{\{j\}}\big)\bigg\}. \end{equation}
Indeed, this is a consequence of the fact that}
{\begin{itemize}\item for any $ j\in\llbracket 1,k\rrbracket $ it holds
\begin{align*}
\mathbf{s}_j^{(k)}\big(\eta^{\{-(k+1)+j\}}\big)
&=\overline{\eta}(-(k+1)+j)\mathbf{s}_j^{(k-1)}(\eta)
=\mathbf{s}_j^{(k-1)}(\eta)-\mathbf{s}_j^{(k)}(\eta)
\end{align*}
\item and for any $ j\in\llbracket 2,k+1\rrbracket $ we have
\begin{align*}
\mathbf{s}_j^{(k)}\big(\eta^{\{j\}}\big)
&=
\mathbf{s}_{j-1}^{(k-1)}(\eta)\overline{\eta}(j)
=\mathbf{s}_{j-1}^{(k-1)}(\eta)-\mathbf{s}_j^{(k)}(\eta).
\end{align*}\end{itemize}}
{Two changes of variables in the two terms of the summation in \eqref{eq:summation} then lead to the desired result.}
\end{proof}
Due to the analytical nature of the generalized binomial coefficients, a combinatorial interpretation of the whole model is not appropriate, as opposed to the integer case. Additionally, the problem of quantifying how, fixed some configuration, the rates change by varying $ m $ is not easy since the rates {depend in a complex manner on} $ m $ and the behaviour of the rate (with respect to $ m $) is different for distinct configurations. Instead of doing an extensive study of the form of the rates, we gather information about some simple monotonicity aspects of the model. We show that the reinforcement/penalization of the SSEP {given} in \eqref{PMM_rewrite} {is} non-increasing in $ k $; {then we derive} a property of the interpolating family analogous to Proposition \ref{prop:r_seq}; and {finally} we plot on Figure \ref{fig:1} the rates in some equivalence classes of configurations which cover the values of $ c_N^{(m-1)}(\eta) $. This is, to our mind, a satisfying solution to observe the continuous deformation of the SSEP into a slow or fast diffusion model.
\begin{Prop}\label{prop:m_mono}
Fixed any $ \eta\in\Omega_N $ and $ m\in [0,2] $ the sequence $ \{\abs{\binom{m}{k}}\mathbf{c}^{(k-1)}(\overline{\eta})\}_{k\geqslant 2} $
is decreasing up to the smallest $ k $ such that $ \mathbf{c}^{(k-1)}(\overline{\eta})=0 $.
\end{Prop}
\begin{proof}
Recall that we proved in Proposition \ref{prop:r_seq} that for any $ \eta\in\Omega_N $ the sequence $ \big\{\frac1k \mathbf{c}^{(k-1)}(\eta)\big\}_{k\geqslant 1} $ is non-increasing. From the definition of the binomial coefficients, for $m\in(0,2)$ the sequence $\big\{k\abs{\binom{m}{k}}\big\}_{k\geqslant 2}$ is decreasing, since
\[ (k+1)\abs{\binom{m}{k+1}} = k \abs{\binom{m}{k}} \; \frac{|m-k|}{k},\]
and whenever $k\geqslant 2$ and $m\in(0,2)$ we have $|m-k|=k-m<k$.
\end{proof}
Before stating the monotonicity property, note that we have the following limit
\begin{align*}
\lim_{m \searrow 0}\frac{1}{m}c_N^{(m-1)}(\eta)
=\sum_{k=0}^{\ell_N-1}\frac{\mathbf{c}^{(k)}(\overline{\eta})}{k+1}.
\end{align*}
\begin{Prop} \label{prop:m_mono2}
For any $ \eta\in\Omega_N $ the sequence $ \{\frac1mc_N^{(m-1)}(\eta)\}_{m\in[0,2]} $ is non-increasing.
\end{Prop}
\begin{proof}
From Proposition \ref{prop:low_bound_r} we can extract that $ \frac1mc_N^{(m-1)}\geqslant \mathbf{c}^{(0)} $ for $ m\in(0,1) $, and $ \mathbf{c}^{(0)}\geqslant \frac1mc_N^{(m-1)} $ for $ m\in(1,2) $. It remains to see the monotonicity of the sequence in the statement {according to the values of} $ m\in[0,2]\backslash\{1\} $. Assuming that the aforementioned sequence is non-increasing, since the binomial coefficients are continuous functions of $ m $ the interpolation property allows us to take the limit $ m\to 2 $ and as such we only need to focus on $ m\in[0,2)\backslash\{1\} $. Rewrite
\begin{align*}
\frac1m c_N^{(m-1)}(\eta)
&=
\mathbf{1}_{\{m\in(0,1)\}}\sum_{k=0}^{\ell_N-1} \abs{\binom{m-1}{k}}\frac{\mathbf{c}^{(k)}(\overline{\eta})}{k+1}
+
\mathbf{1}_{\{m\in(1,2)\}}
\left(
1-\sum_{k=1}^{\ell_N-1} \abs{\binom{m-1}{k}}\frac{\mathbf{c}^{(k)}(\overline{\eta})}{k+1}
\right).
\end{align*}
For any $ k\geqslant 2 $ we compute
\begin{align*}
\frac{\mathrm{d}}{\mathrm{d} m}\abs{(m-1)_k}
&=-\abs{(m-1)_k}f_k(m)
\quad{\text{where}}\quad f_k(m):=\sum_{j=1}^k\frac{1}{j-m}.
\end{align*}
This means that
\begin{align}\label{mono_0}
\frac{\mathrm{d} }{\mathrm{d} m}\bigg(\frac1m c_N^{(m-1)}(\eta)\bigg)
&=-\frac{1}{2}\mathbf{c}^{(1)}(\overline{\eta})
+\text{sign}(m-1)\sum_{k=2}^{\ell_N-1} \abs{\binom{m-1}{k}}
f_k(m)
\frac{\mathbf{c}^{(k)}(\overline{\eta})}{k+1}.
\end{align}
If $ m\in[0,1) $ then $ f_k(m)>0 $ which concludes the proof. For $ m\in(1,2) $ we need some extra work. We claim that differentiating with respect to $ m $ both sides of
\begin{align}\label{mono_1}
0=1-\sum_{k=1}^{+\infty} \abs{\binom{m-1}{k}}
\quad\text{one obtains that}\quad
1=\sum_{k=2}^{+\infty} \abs{\binom{m-1}{k}}f_k(m).
\end{align}
For $ m\in[\frac32,2) $ we have $ f_k(m)>0 $ for all $ k\geqslant2 $ since $ f_2(m)>0 $ and $ f_k(m) $ is increasing in $ k $. If $ m\in(1,\frac32) $ then for each $ m $ there must be some $ k_0>2 $ such that $ f_k>0 $ for all $ k\geqslant k_0 $ so that the second summation on the previous display is equal to one. Let $ \ell_N $ be large enough so that $ k_0<\ell_N $ (otherwise the {result is obvious}). Then we can bound \eqref{mono_0} from above by taking the limit $ \ell_N\to+\infty $. Since the sequence of maps $ \{\frac{1}{k+1}\mathbf{c}^{(k)}\}_{k\geqslant0} $ is non-increasing, for any $ j\leqslant k_0\leqslant i$ we have
\begin{align*}
\frac{1}{i+1}\mathbf{c}^{(i)}
\leqslant
\frac{1}{k_0+1}\mathbf{c}^{(k_0)}
\leqslant
\frac{1}{j+1}\mathbf{c}^{(j)}.
\end{align*}
Then we can bound
\begin{align}\label{mono_2}
\begin{split}
\frac{\mathrm{d} }{\mathrm{d} m}\bigg(\frac1m c_N^{(m-1)}(\eta)\bigg)
&\leqslant
-\frac{1}{2}\mathbf{c}^{(1)}(\overline{\eta})
+\sum_{k=2}^{k_0-1} \abs{\binom{m-1}{k}}
f_k(m)
\frac{\mathbf{c}^{(k_0)}(\overline{\eta})}{k_0+1}
+\sum_{k\geqslant k_0}\abs{\binom{m-1}{k}}
f_k(m)
\frac{\mathbf{c}^{(k_0)}(\overline{\eta})}{k_0+1}
\\
&=
-\frac{\mathbf{c}^{(1)}(\overline{\eta})}{2}+\frac{\mathbf{c}^{(k_0)}(\overline{\eta})}{k_0+1}\leqslant 0.
\end{split}
\end{align}
To conclude the proof, it is enough to show that the sequence $ (a_n)_{n\geqslant2} $ given by $ 0<a_n:=\sum_{k=2}^{n} \abs{\binom{m-1}{k}}f_k(m) $ is uniformly bounded. Since $ f_1(m)<0 $, we first bound $ f_k(m) $ by the {corresponding} integral for $ k\geqslant2 $:
\begin{align*}
f_k(m)\lesssim\log(k-m)-\log(2-m).
\end{align*}
Recall the inequality $ \log x\leqslant \frac1s x^s $ for any $ x,s\in\mathbb{R}_+ $. From this and \eqref{eq:usefulbound} it holds
\begin{align*}
\sum_{k=2}^{n} \abs{\binom{m-1}{k}}f_k(m)
\lesssim
\sum_{k=2}^{n}\frac{1}{(k-m)^{m-s}}
-
\log(2-m)
\sum_{k=2}^{n}\abs{\binom{m-1}{k}}.
\end{align*}
Setting $ 0<s $ such that $ m-s>1 $, observing that the quantity on the right-hand side of the previous display is increasing in $ n $ and taking $ n\to+\infty $ we end the proof.
\end{proof}
We now plot the evolution of $ c_N^{(m-1)}(\eta)$ with respect to $m$ (for a fixed configuration $\eta$). To that aim, let us start with the following remark: for any $ k\geqslant {1} $ the value of $ \mathbf{c}^{(k)}(\overline\eta) $ is uniquely determined by the positions of the first particle to the left of $ 0 $ and the first particle to the right of $ 1 $. More precisely, for any $ x_0,x_1\in\mathbb{T}_N $ consider the set $ \Omega_N^{x_0,x_1}=\big\{\eta\in\Omega_N\; :\; \eta(-x_0)=\eta(x_1)=1,\; \eta(x)=0, \;\text{ for all }\: x\in\llbracket -x_0+1,x_1-1\rrbracket\backslash \{0,1\} \big\}. $
\begin{figure}
\caption{Configuration belonging to $\Omega_N^{2,4}
\end{figure}
It is simple to see that if $ \eta_0,\eta_1\in\Omega_N^{x_0,x_1}$ then $ \mathbf{c}^{(k)}(\overline\eta_0)=\mathbf{c}^{(k)}(\overline\eta_1) $ for all $ k \geqslant {1}$. Therefore we obtain $ c_N^{(m-1)}(\eta_0)=c_N^{(m-1)}(\eta_1) $, and for every $ \eta\in\Omega_N^{x_0,x_1} $ one can plot $ c_N^{(m)}(\eta) $ as a function of $m$, as in Figure \ref{fig:1}. To that end, for each $ m,\ell_N,x_0 $ and $ x_1 $ fixed and $ \xi\in\Omega_N^{x_0,x_1} $ we introduce $ \tilde{c}_N(x_0,x_1,m)\equiv c_N^{(m-1)}(\xi) $ .
\begin{figure}\label{fig:1}
\end{figure}
We stress that the previous figure presents the value of the constraint $ c_N^{(m-1)}(\eta) $ (equivalently, the rate $ r_N^{(m-1)}(\eta) $ when $ \eta(0)+\eta(1)=1 $) fixed $ x_0,x_1 $ and a representative $ \eta\in\Omega_N^{x_0,x_1} $, that is, a configuration with the first particle to the \textit{left} of the site $ 0 $ located at the site $ -x_0 $, and the first particle to the \textit{right} of the site $ 1 $ located at the site $ x_1 $. Note the symmetry of the plots with respect to $ x_0=x_1 $, which is consequence of the symmetry of the jumps. Fixed $ m $ and $ \ell_N $, varying $ x_0 $ and $ x_1 $ allow us to see all the possible values of the constraints. For example, for $ m=1 $ the rate is equal to $ 1 $ independently of $ x_0,x_1 $, hence the sub-figure, in this case, has the same colour for all $ x_0,x_1\leqslant 40 $. For $ m=2 $ the rate is non-zero if and only if there is at least one particle located at the site $ -1 $ or at the site $ 2 $. In other words, $ \mathbf{c}^{(1)}(\eta)=\eta(-1)+\eta(2) $. Therefore, we obtain in the respective sub-figure the horizontal and vertical orange lines, where $ \mathbf{c}^{(1)}(\eta)=1 $ for any $ \eta\in \Omega_N^{1,x_1}\cup\Omega_N^{x_0,2} $ with $ x_0\geqslant2 $ and $ x_1\geqslant3 $ and $ \mathbf{c}^{(1)}=0 $ otherwise; while at $ x_0=1 $ and $ x_1=2 $ the constraint attains its largest value, \textit{i.e.,} $ \mathbf{c}^{(1)}(\eta)=2 $ for all $ \eta\in\Omega_N^{1,2} $. In the \textit{fast-diffusion} regime, we see a "continuous" increase of the rates as $ x_0,x_1 $ increase, while the opposite in the \textit{slow-diffusion} regime. This is a clear consequence of the penalization/reinforcement terms, as seen in \eqref{PMM_rewrite}.
\subsection{Main result}
To expose our main result about the hydrodynamic limit of the interpolating model we first introduce some definitions. Let us fix a finite time horizon $[0,T]$, let $\mu_N$ be an initial probability measure on $\Omega_N$, and let $\{\eta_{N^2t}\}_{t\geqslant 0}$ be the Markov process generated by $N^2\mathcal{L}_N^{(m-1)}$ for $ m\in(0,2)\backslash\{1\} $, given in \eqref{PMM:m1}.
\begin{Def}[Empirical measure]
For any $ \eta\in\Omega_N $ define the empirical measure $ \pi^N(\eta,\mathrm{d}u) $ on the continuous torus $\mathbb{T}$ by
\begin{align*}
\pi^N(\eta,\mathrm{d}u)=\frac1N \sum_{x\in\mathbb{T}_N}\eta(x)\delta_{x/N}(\mathrm{d}u)
\end{align*}
where $ \delta_{x/N} $ is the Dirac measure at the macroscopic point $ x/N $. Moreover, we define its time evolution {in the diffusive time scale} by $ \pi^N_t(\eta,\mathrm{d} u)=\pi^N(\eta_{N^2t},\mathrm{d} u) $. For any function $ G:\mathbb{T}\to\mathbb{R} $, we define the integral of $ G $ with respect to the empirical measure as
\begin{align}\label{int:emp}
\inner{\pi_t^N,G}
=\int_{\mathbb{T}}G(u)\pi_t^N(\eta,\mathrm{d} u)
=\frac{1}{N}\sum_{x\in\mathbb{T}_N}G(\tfrac{x}{N})\eta_{N^2t}(x).
\end{align}
\end{Def}
Let $ \mathcal{M}_+ $ be the space of positive measures on $ [0,1] $ with total mass no larger than $ 1 $ and endowed with the weak topology.
Let $ \mathcal{D}([0,T],\Omega_N) $ be the Skorokhod space of trajectories induced by $ \{\eta_{N^2t}\}_{t\in[0,T]} $ with initial measure $ \mu_N $. Denote by $ \mathbb{P}_{\mu_N} $ the induced probability measure on the space of trajectories $ \mathcal{D}([0,T],\Omega_N) $ and by $ \mathbb{Q}_N=\mathbb{P}_{\mu_N}\circ(\pi^N)^{-1} $ the probability measure on $ \mathcal{D}([0,T],\mathcal{M}_+) $ induced by $ \{\pi^N_t\}_{t\in[0,T]} $ and $ \mu_N $.
Now we introduce the notion of weak solutions to equation \eqref{PDE:formal} for $ m\in(0,2)$. For that purpose, for $ n\in\mathbb{N}_+\cup\{\infty\} $ let $ C^n(\mathbb{T}) $ be the set of $ n $ times continuously differentiable, real-valued functions defined on $ \mathbb{T} $; and let $ C^{n,p}([0,T]\times \mathbb{T}) $ be the set of all real-valued functions defined on $ [0,T]\times \mathbb{T} $ that are $ n $ times differentiable on the first variable and $ p $ times differentiable on the second variable and with continuous derivatives. Finally, for two functions $f,g\in L^2(\mathbb{T})$, $\langle f,g\rangle$ denotes their standard euclidean product in $L^2(\mathbb{T})$ and $\|\cdot\|_{L^2(\mathbb{T})}$ is the associated norm. We remark that we use the notation $ \inner{\cdot,\cdot} $ {twice}, for the inner-product just introduced, and {also} in \eqref{int:emp}, although their difference will be clear from the context.
\begin{Def}[Sobolev space] \label{def:sob}
The semi inner-product $ \inner{\cdot,\cdot}_1 $ on the set $ C^\infty(\mathbb{T}) $ is given on $ G,H\in C^\infty(\mathbb{T}) $ by $ \inner{G,H}_1=\inner{\partial_uG,\partial_u H}=\int_\mathbb{T}\partial_uG(u)\partial_u H(u) \mathrm{d} u, $ and the associated semi-norm is denoted by $ \norm{\cdot}_1 $. Let $ \mathcal{H}^1(\mathbb{T}) $ be the Sobolev space on $ \mathbb{T} $, defined as the completion of $ C^\infty(\mathbb{T}) $ for the norm $ \norm{\cdot}_{\mathcal{H}^1(\mathbb{T})}^2=\norm{\cdot}^2_{L^2}+\norm{\cdot}_{1}^2 $, and let $ L^2([0,T];\mathcal{H}^1(\mathbb{T})) $ be the set of measurable functions $ f:[0,T]\to\mathcal{H}^1(\mathbb{T}) $ such that $ \int_0^T\norm{f_s}^2_{\mathcal{H}^1(\mathbb{T})}\mathrm{d} s<\infty $.
\end{Def}
\begin{Def}[Weak solutions to \eqref{PDE:formal}]\label{def:weak}
Let $ \rho^{\rm ini}:\mathbb{T}\to[0,1] $ be a measurable function. We say that $ \rho:[0,T]\times \mathbb{T}\mapsto [0,1] $ is a weak solution of the FDE (resp. PME) with $ m\in(0,1) $ (resp. $ m\in(1,2) $) if
\begin{enumerate}
\item
\begin{enumerate}
\item For $ m\in(0,1) $ it holds $ \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $,
\item For $ m\in(1,2) $ it holds $ \rho^m\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $.
\end{enumerate}
\item For any $ t\in[0,T] $ and $ G\in C^{1,2}([0,T]\times \mathbb{T}) $ it holds that
\begin{equation}\label{weak}
F(\rho^{\rm ini},\rho,G,t)
:=
\inner{\rho_t,G_t}-\inner{\rho^{\rm ini},G_0}
-\int_0^t
\big\{
\inner{\rho_s,\partial_sG_s}
+\inner{(\rho_s)^m,\partial_{uu} G_s}
\big\}
\mathrm{d} s \equiv 0.
\end{equation}
\end{enumerate}
\end{Def}
In the appendix, Lemmas \ref{lem:uniq_FDE} and \ref{lem:uniq_PME}, we will show that the weak solution given {by} last definition is unique, for $ m\in(0,1) $ and $ m\in(1,2) $, respectively.
\begin{Def}[Local equilibrium distribution]\label{def:ass}
Let $ \{\mu_N\}_{N\geqslant 1} $ be a sequence of probability measures on $ \Omega_N $, and let $f:\mathbb{T}\to[0,1] $ be a measurable function. If for any continuous function $ G:\mathbb{T}\to\mathbb{R} $ and every $ \delta>0 $ it holds
\begin{align*}
\lim_{N\to+\infty}\mu_N
\left(
\eta\in\Omega_N
:\abs{\inner{\pi^N,G}-\inner{f,G}}>\delta
\right)
=0,
\end{align*}
we say that the sequence $ \{\mu_N\}_{N\geqslant 1} $ is a \emph{local equilibrium measure} associated to the profile $ f $.
\end{Def}
\begin{Ex} An example of a measure satisfying Definition \ref{def:ass} is the product Bernoulli measure, given on $x\in\mathbb{T}_N$ by
\[ \nu_{\rho^{\rm ini}(\cdot)}^N(\eta\in\Omega_N:\;\eta(x)=1) = \rho^{\rm ini}\big(\tfrac x N\big), \] where $ \rho^{\rm ini}:\mathbb{T}\to[0,1] $ is a measurable Lipschitz profile. Then $\nu^N_{\rho^{\rm ini}(\cdot)}$ is a local equilibrium measure associated to $\rho^{\rm ini}$. For more details see, for instance, the proof of \cite[Theorem 2.2]{BMNS}.
\end{Ex}
{We are now ready to state the main result of this paper:}
\begin{Th}[Hydrodynamic limit] \label{theo:hydro}
Let $ \rho^{\rm ini}:\mathbb{T}\to[0,1] $ be a measurable function and let $ \{\mu_N\}_{N\geqslant 1} $ be a local equilibrium measure associated to it.
Then, for any $ t\in [0,T] $, $ \delta>0 $ and $\mathbb{N}\ni\ell_N \to \infty$ such that $\ell_N\leqslant N $, it holds
\begin{align*}
\lim_{N\to+\infty}
\mathbb{P}_{\mu_N}
\left(
\abs{\inner{\pi_t^N,G}-\inner{\rho_t,G}}>\delta
\right)=0,
\end{align*}
where $ \rho $ is the unique weak solution of \eqref{PDE:formal} in the sense of Definition \ref{def:weak}, with initial condition $\rho^{\rm ini}$.
\end{Th}
\section{Proof of Theorem \ref{theo:hydro}}
\label{sec:HL}
We first outline the proof. As previously mentioned, to prove the hydrodynamic limit we use the classical entropy method introduced in \cite{GPV}. The general scheme is the following: we prove that the sequence of empirical measures is tight (as proved in Subsection \ref{subsec:tight}), which implies the existence of weakly convergent subsequences; and then we prove that the limiting measure is concentrated on paths of absolutely continuous measures with respect to the Lebesgue measure, whose density is a weak solution to the hydrodynamic equation \eqref{PDE:formal} (proved in Section \ref{subsec:char}). To do so we shall need an energy estimate (Section \ref{sec:energy}), which gives us some regularity of the solution to the PDE, and replacement lemmas (Section \ref{sec:replace}) whose role is to close the equations for {the limiting profile} at the microscopic level. Proving uniqueness of weak solutions (see Appendix \ref{app:PDE}), we see that the limit of the sequence of measures is then unique and we can conclude that the whole sequence converges to that limit.
We introduce some discrete operators that will be important in what follows. Let us extend Definition \ref{def:translation} to functions defined on $\mathbb{T}_N$ (instead of $\Omega_N)$.
Without loss of generality, we adopt the same notation. Namely, if $f:\mathbb{T}_N\to\mathbb{R}$ then its gradients are $ {\nabla}^+f=(\tau_1-\mathbf{1})f $ and $ {\nabla}^-f=(\mathbf{1}-\tau_{-1})f $, where $ \mathbf{1} $ is now the identity function defined on $ \mathbb{T}_N $.
Finally, for any $ N \in \mathbb{N}_+ $, we also define the \emph{rescaled gradients} on $ \mathbb{T}_N $ as
$ {\nabla}^{\pm,N}=N{\nabla}^\pm,
$
and the \emph{rescaled Laplacian} as
$ \Delta^N={\nabla}^{+,N}\circ {\nabla}^{-,N}
={\nabla}^{-,N}\circ {\nabla}^{+,N}.$
\subsection{Tightness}\label{subsec:tight}
Let us start by exploiting the gradient property of our model. Recall that we consider the evolution in the diffusive time scale $ tN^2$, that is, given by the generator
$
\mathcal{L}:=N^2\mathcal{L}_N^{(m-1)}.
$
From Dynkin's formula \cite[Appendix 1, Lemma 5.1]{KL:book}, we know that for any $ G\in C^{1,2}([0,T]\times\mathbb{T}) $
\begin{align}\label{dynk_0}
M_t^N(G)
:=\inner{\pi_t^N,G_t}-\inner{\pi_0^N,G_0}
&-\int_0^t
(\partial_s+\mathcal L) \inner{\pi_s^N, G_s}\mathrm{d} s
\end{align}
is a martingale with respect to the natural filtration of the process.
Observe that Lemma \ref{lem:grad} and a summation by parts imply that
\begin{align}\label{gen-inner}
\mathcal{L}\inner{\pi^N_s,G_s}
&=\frac{1}{N}\sum_{x\in\mathbb{T}_N}\Delta^NG_s(\tfrac{x}{N})\sum_{k=1}^{\ell_N}\binom{m}{k}(-1)^k\mathbf{h}^{(k-1)}_s(\tau_x\overline{\eta}),
\end{align}
where we defined for any $ k\in\mathbb{N} $ and any $ s\in[0,t] $ the time evolution $ \mathbf{h}_s^{(k-1)}(\eta)=\mathbf{h}^{(k-1)}(\eta_{N^2s}) $. We highlight the flip $ \eta\mapsto \overline{\eta} $ which comes from the definition of the rates in \eqref{eq:transitionrates}. Therefore the martingale rewrites as
\begin{align}\label{dynk}
\begin{split}
M_t^N(G)
=\inner{\pi_t^N,G_t}-\inner{\pi_0^N,G_0}
&-\int_0^t
\inner{\pi_s^N,\partial_s G_s}\mathrm{d} s
\\&-\int_0^t
\frac{1}{N}\sum_{x\in\mathbb{T}_N}\Delta^NG_s(\tfrac{x}{N})\sum_{k=1}^{\ell_N}\binom{m}{k}(-1)^k\mathbf{h}_s^{(k-1)}(\tau_x\overline{\eta})
\mathrm{d} s.
\end{split}
\end{align}
\begin{Prop}[Tightness]\label{prop:tight}
The sequence of probability measures $ (\mathbb{Q}_N)_{N\in\mathbb{N}} $ is tight with respect to the Skorokhod topology of $ \mathcal{D}\left([0,T],\mathcal{M}_+\right) $.
\end{Prop}
\begin{proof}
To prove tightness we resort to Aldous' conditions (see, for instance, \cite[proof of Proposition 4.1]{GMO22} or, equivalently, \cite[Proposition 3.3]{BDGN} for more details). Since the occupation variable is bounded {by 1}, it is enough to show that for all $ \epsilon>0 $
\begin{align}
\limsup_{\gamma\to0}\limsup_{N\to+\infty}
\mathbb{P}_{\mu_N}
\left(
\sup_{\abs{t-s}\leqslant\gamma}
\abs{
\inner{\pi^N_t,G}-\inner{\pi^N_s,G}
}>\epsilon
\right),
\end{align}
where $ G $ is a time-independent function belonging to a dense subset of $ C([0,1]) $ with respect to the uniform topology. From the fact that $ M_t^N(G) $ is a martingale (with respect to the natural filtration of the process), the previous condition can be reduced to the study of the quadratic variation of \eqref{dynk_0} and the boundedness of the generator, \textit{i.e.,} it is enough to prove that
\begin{align}\label{aldous0}
\lim_{\gamma\to0}\limsup_{N\to+\infty}
\left\{\mathbb{P}_{\mu_N}\left(
\sup_{\abs{t-s}\leqslant\gamma}
\abs{
M^N_{t}(G)-M^N_{s}(G)
}
>\frac{\epsilon}{2}\right)
+
\mathbb{P}_{\mu_N}\left(
\sup_{\abs{t-s}\leqslant\gamma}\abs{
\int_s^t
{\mathcal{L}}\inner{\pi_s^N,G}\mathrm{d} s
}
>\frac{\epsilon}{2}
\right)
\right\}=0.
\end{align}
We apply the triangular, Jensen and Doob's inequalities in the first term above, and Proposition \ref{prop:pedro} in the second term, reducing to the treatment of
\begin{align}\label{aldous}
\limsup_{N\to+\infty}
\mathbb{E}_{\mu_N}\left[
\left(
M^N_{T}(G)
\right)^2
\right]^\frac12 =0
\quad\text{and}\quad
\lim_{\gamma\to0}\limsup_{N\to+\infty}
\mathbb{E}_{\mu_N}\left[
\abs{
\int_s^t
{\mathcal{L}}\inner{\pi_s^N,G}\mathrm{d} s
}
\right]=0.
\end{align}
Recalling from \cite[Appendix A, Lemma 5.1]{KL:book} the expression for the quadratic variation of the martingale, we have that the first expectation in \eqref{aldous} equals
\begin{align*}
\mathbb{E}_{\mu_N}\left[
\int_0^{T}F_s^N(G)\mathrm{d} s
\right],
\quad\text{where}\quad
F_s^N(G)=N^2\left(
\mathcal{L}_N^{(m-1)}\inner{\pi_s^N,G}^2
-2\inner{\pi_s^N,G}\mathcal{L}_N^{(m-1)}\inner{\pi_s^N,G}
\right).
\end{align*}
Since our transition rates are symmetric, we get
\begin{align*}
F_s^N(G)
&=\frac{1}{N^2}\sum_{x\in\mathbb{T}_N}c_N^{(m-1)}(\tau_x\eta_{N^2s})
\left(\eta_{N^2s}(x+1)-\eta_{N^2s}(x)\right)^2
\left(\nabla^{+,N}G(\tfrac{x}{N})\right)^2
\\
&
{ \lesssim
\frac{1}{N^2}\norm{\partial_uG}_{L^\infty(\mathbb{T})}^{2}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}\sum_{x\in\mathbb{T}_N}\mathbf{r}^{(k-1)}(\tau_x\eta_{N^2s})
}
{ \lesssim\frac{1}{N}\norm{\partial_uG}_{L^\infty(\mathbb{T})}^{2},
}
\end{align*}
{where we used Lemma \ref{lem:up_speed} for the last inequality.} This concludes the proof of the first condition in \eqref{aldous}. For the second, we split the proof in two cases $m\in(0,1)$ and $m\in(1,2)$.
Assume first that $m \in (1,2)$. From {\eqref{expr:h2}} (or {more obviously} \eqref{expr:h1}) we have that $ |\mathbf{h}^{(k-1)}(\eta)|\leqslant k $. Therefore, using the inequality \eqref{ineq:sum_coeff}, the quantity \eqref{gen-inner} can be bounded from above by
\begin{align*}
\frac{1}{N}\sum_{x\in\mathbb{T}_N}\abs{\Delta^NG_s(\tfrac{x}{N})}\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}k
\lesssim
\norm{\partial_{uu} G}_{L^1(\mathbb{T})}+\frac1N,
\end{align*}
which implies the second requirement in \eqref{aldous}. This finishes the proof in the case $m\in(1,2)$.
For $ m\in(0,1) $ we need some extra work. Recalling that in the fast diffusion case the generator can be rewritten as in \eqref{PMM_rewrite}, we see that the second expectation in \eqref{aldous} equals
\begin{align}\label{tight:expect_fast}
\mathbb{E}_{\mu_N}\bigg[
\bigg|
\int_s^t
\frac1N\sum_{x\in\mathbb{T}_N}\Delta_NG(\tfrac{x}{N})\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}\tau_x\mathbf{h}^{(k-1)}(\overline{\eta}_{N^2s})
\mathrm{d} s
\bigg|
\bigg].
\end{align}
It will be fundamental to identify $ \mathbf{h}^{(k-1)} $ as a function of the constraints $ \mathbf{c}^{(k-1)} $, as in \eqref{expr:h2}. From the triangular inequality we bound the expectation \eqref{tight:expect_fast} from above by
\begin{multline}\label{h:treat_FDM}
\mathbb{E}_{\mu_N}\bigg[
\bigg|
\int_s^t
\frac1N\sum_{x\in\mathbb{T}_N}\Delta_NG(\tfrac{x}{N})\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\prod_{j=0}^{k-1}\overline{\eta}_{N^2s}(x+j)
\mathrm{d} s
\bigg|
\bigg]
\\
+
\frac1N\sum_{x\in\mathbb{T}_N}\abs{\Delta_NG(\tfrac{x}{N})}
\mathbb{E}_{\mu_N}\bigg[
\bigg|
\int_s^t
\sum_{k=1}^{\ell_N}
\abs{\binom{m}{k}}
\tau_x
\left\{
\sum_{i=0}^{k-2}
(\overline{\eta}_{N^2s}(i)-\overline{\eta}_{N^2s}(i+1))\sum_{j=1}^{k-1-i}\mathbf{s}_j^{(k-1)}(\tau_i\overline{\eta}_{N^2s})
\right\}
\mathrm{d} s
\bigg|
\bigg]
\end{multline}
where, by convention, $ \sum_{\emptyset}\equiv 0 $. Since $ m\in(0,1) $ and the process is of exclusion type, we have
\begin{align*}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\prod_{j=0}^{k-1}\overline{\eta}_{N^2s}(x+j)
\leqslant
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}< 1
\end{align*}
and due to the regularity of the test function the first expectation in \eqref{h:treat_FDM} can be bounded as:
\begin{align*}
\mathbb{E}_{\mu_N}\bigg[
\bigg|
\int_s^t
\frac1N\sum_{x\in\mathbb{T}_N}\Delta_NG(\tfrac{x}{N})\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\prod_{j=0}^{k-1}\overline{\eta}_{N^2s}(x+j)
\mathrm{d} s
\bigg|
\bigg]
\lesssim
\abs{t-s}\left(\norm{\partial_{uu}G}_{L^1(\mathbb{T})}+\frac1N\right).
\end{align*}
{The treatment of the second expectation in \eqref{h:treat_FDM} is more demanding. Concretely, since $ m\in(0,1) $ the tail of the series $ \sum_{k\geqslant1}\abs{\binom{m}{k}} $ is too heavy to either argue directly via Lemma \ref{lem:rep_FDM} or slow down the speed of explosion of $ \ell_N $ (as we shall do in a different context shortly), while maintaining $ \ell_N $ with no particular order of explosion. One then needs to invoke {the forthcoming replacement} Lemma \ref{lem:rep_FDM-tight} instead, by taking advantage of the particular expression of $ \mathbf{h}^{(k-1)} $ in \eqref{expr:h2} and bounding from above as
\begin{align*}
\sum_{i=1}^{k-2}
(\overline{\eta}(i)-\overline{\eta}(i+1))\sum_{j=1}^{k-1-i}\mathbf{s}_j^{(k-1)}(\tau_i\overline{\eta})
\leqslant
\sum_{i=1}^{k}
\abs{\overline{\eta}(i)-\overline{\eta}(i+1)}\mathbf{c}^{(k-1)}(\tau_i{\overline{\eta}})
=\sum_{i=1}^{k}\mathbf{r}^{(k-1)}(\tau_i{\overline{\eta}}).
\end{align*}
{One can now} use Lemma \ref{lem:rep_FDM-tight} {for each term of the summation over $ x\in\mathbb{T}_N $, with $ \varphi_i^{(k)}(\eta)=\sum_{j=1}^{k-i}\mathbf{s}_j^{(k)}(\tau_i\eta) $,} and obtain the {final} upper bound
\begin{align*}
\frac1B+\sigma B\frac{(\ell_N)^{1-m}}{N}.
\end{align*}
Recalling that $ \ell_N\leqslant N $ and $ 1-m\in(0,1) $, taking the limit $ N\to+\infty $ and then $ B\to+\infty $ we finish the proof.
}
\end{proof}
\subsection{Characterization of limit points}\label{subsec:char}
The goal of this subsection is to show that the limiting points of $ (\mathbb{Q}_N)_{N\in\mathbb{N}} $, which we know to {exist} as a consequence of the results of the previous section, are concentrated on trajectories of absolutely continuous measures with respect to the Lebesgue measure, whose density is a weak solution to either the FDE or the PME, depending on the value of $ m $. Showing the aforementioned {absolute} continuity is simple since we deal with an exclusion process, and its proof can be found (modulo small adaptations) for instance in \cite[page 57]{KL:book}. From this and the previous proposition, we know (without loss of generality) that for any $ t\in[0,T] $, the sequence $ (\pi_t^{N}(\eta,\mathrm{d} u))_{N\in\mathbb{N}} $ converges weakly with respect to $ \mathbb{Q}_N $ to an absolutely continuous measure $ \pi_{\cdot}(\mathrm{d} u)=\rho_\cdot(u)\mathrm{d} u $. In the next result we obtain information about the profile $ \rho $.
\begin{Prop}\label{prop:char}
For any limit point $ \mathbb{Q} $ of $ (\mathbb{Q}_N)_{N\in\mathbb{N}} $ it holds \[ \mathbb{Q}
\bigg(\pi\in\mathcal{D}([0,T],\mathcal{M}_+)\; : \; \begin{cases}
\text{for any } t\in[0,T],\; \pi_t(du)=\rho_t(u)du,\; \text{where } \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) \\ \text{for any } t \in [0,T] \text{ and any } G\in C^{1,2}([0,T]\times\mathbb T), \; F(\rho^{\rm ini},\rho,G,t) =0 \end{cases} \bigg)
=1, \]
where $F(\rho^{\rm ini},\rho,G,t)$ is given in \eqref{weak}.
\end{Prop}
Before showing Proposition \ref{prop:char}, we introduce some definitions and technical results.
\begin{Def}
For any $ x\in\mathbb{T}_N $ and $ \ell\in\mathbb{N} $ consider the following microscopic box of size $\ell$, and the empirical average over it, given by
\begin{align*}
\Lambda_x^\ell=\llbracket x,x+\ell-1\rrbracket,
\quad\text{and}\quad
\eta^\ell(x)=\frac1\ell \sum_{y\in\Lambda_x^\ell}\eta(y).
\end{align*}
Moreover, for $ \epsilon>0 $ and $ u,v\in\mathbb{T} $, let $ {\iota}_\epsilon^u(v)=\frac{1}{\epsilon}\mathbf{1}_{v\in[u,u+\epsilon)}$.
\end{Def}
\begin{Lemma}\label{lem:rho_emp-approx}
Let $ m\in(0,2)\backslash\{1\} $ be fixed. For any $ \epsilon>0 $, for a.e.~$ u\in\mathbb{T} $ and $ s\in[0,T] $ it holds that
\begin{align*}
\abs{\rho_s(u)-\inner{\pi_s,{\iota}^u_\epsilon}}\lesssim \epsilon^{\alpha},
\quad \text{ where }
\alpha\equiv \alpha(m):=\frac12 \mathbf{1}_{\{m\in(0,1)\}}+\frac14 \mathbf{1}_{\{m\in(1,2)\}}.
\end{align*}
\end{Lemma}
\begin{proof}
This is a direct consequence of the fact that $ \pi_t(\mathrm{d} u)=\rho_t(u)\mathrm{d} u $, plus two facts: first, we have that $\rho \text{ (resp.~} \rho^m\text{)}$ {belongs to} $L^2([0,T];\mathcal{H}^1(\mathbb{T}))$ for $ m\in(0,1) $ (resp.~$ m\in(1,2) $), and this will be proved in Section \ref{sec:energy}; and second, we have the H\"{o}lder continuity of $ \rho $ (see Proposition \ref{prop:continuity_FDE} {in the case $m \in (0,1)$} and Corollary \ref{cor:cont} {in the case $m\in (1,2)$}).
\end{proof}
\begin{Lemma}\label{lem:eps_seq}
Let $ m\in(0,2)\backslash\{1\} $ be fixed and take $ \alpha $ as in the previous lemma. For any $ \epsilon>0 $, consider the sequence $ (\epsilon_k)_{k\geqslant1} $ defined by
\[
\epsilon_k=k^{-\beta}\epsilon, \quad \text{ for some } \beta>\frac{2-m}{\alpha}>0.
\]
Then, for any $ k\in\mathbb{N}_+ $, a.e.~$ u\in\mathbb{T} $ and $s\in[0,T]$, it holds that
\begin{align*}
\bigg|\sum_{k\geqslant 2}\binom{m}{k}(-1)^k(1-\rho_s(u))^{k}
-
\sum_{k\geqslant 2}\binom{m}{k}(-1)^k\prod_{j=0}^{k-1}
\left(1-\inner{\pi_s,{\iota}_{\epsilon_{k}}^{u+j\epsilon_{k}}}\right)
\bigg|\lesssim\epsilon^\alpha.
\end{align*}
\end{Lemma}
\begin{proof}
We first observe that for any $ a_0, b_0, a_1, b_1 $ we can rewrite $ a_0a_1=a_0(a_1-b_1)+(a_0-b_0)b_1+b_0b_1 $. With this rationale, summing and subtracting appropriate terms we can rewrite
\begin{align*}
(1-\rho_s(u))^k
=
\prod_{j=0}^{k-1}
\left(1-\inner{\pi_s,{\iota}_{\epsilon_{k}}^{u+j\epsilon_{k}}}\right)
+\delta_{k,s}(u),
\quad\text{with}\quad
\delta_{k,s}(u)
\leqslant
\sum_{i=0}^{k-1}\abs{\rho_s(u)-\inner{\pi_s,{\iota}_{\epsilon_{k}}^{u+i\epsilon_{k}}}}
\end{align*}
since for any $ u\in\mathbb{T} $ and $s\in[0,T]$ it holds that $ \rho_s(u)\leqslant 1 ,$ and $\inner{\pi_s,{\iota}_{\epsilon_{k}}^{u+i\epsilon_{k}}}\leqslant 1 $.
From Lemma \ref{lem:rho_emp-approx} and the H\"{o}lder continuity of $ \rho $ ({Proposition \ref{prop:continuity_FDE} and Corollary \ref{cor:cont}}), for any $ i\geqslant 1 $ we can estimate
\begin{align}\label{eps_seq:eq0}
\abs{\rho_s(u)-\inner{\pi_s,{\iota}_{\epsilon_{k}}^{u+i\epsilon_{k}}}}
\leqslant
\abs{
\rho_s(u+i\epsilon_{k})
-\inner{\pi,{\iota}_{\epsilon_{k}}^{u+i\epsilon_{k}}}
}
+\abs{
\rho_s(u+i\epsilon_{k})
-\rho_s(u)
}
\lesssim i\epsilon_k^\alpha.
\end{align}
For $ i=0 $ we resort directly to Lemma \ref{lem:rho_emp-approx}. In this way, and from the upper bound {of the binomial coefficient given} in Lemma \ref{lem:bin_bound} we have
\begin{align*}
\delta_{k,s}(u)
\lesssim
\epsilon_{k}^\alpha
\left(1
+
\sum_{i=1}^{k-1}
i
\right)
\lesssim
k^{2}\epsilon_{k}^\alpha
\quad \text{ which implies } \quad
\sum_{k\geqslant 2}\abs{\binom{m}{k}}\delta_{k,s}(u)
\lesssim
\epsilon^\alpha\sum_{k\geqslant 2}
\frac{1}{k^{m-1+\alpha\beta}}.
\end{align*}
The condition on $ \beta $ given in the statement of the lemma guarantees the convergence of the series above.
\end{proof}
The largest issue now is how to handle the products of occupation variables in the martingale decomposition \eqref{dynk}. The final goal is to close the equation, relating the correlation terms with the power terms in the weak formulation \eqref{weak}. The idea behind the forthcoming approach is to replace a product of $ \rho's $ by a product of empirical averages with respect to different, non-intersecting boxes. This last requirement avoids the correlations between the occupation variables on these microscopic boxes. For the macroscopic replacements to be justified, we need information on the regularity of the weak solution.
In order to prove the Proposition \ref{prop:char} we will make use of several replacement lemmas, whose statements and proofs will be given in Section \ref{sec:replace}. The fact that the limiting measure $\mathbb{Q}$ concentrates on absolute continuous trajectories of measures that have a density in the right Sobolev space is also provided by Proposition \ref{prop:power_in_sob}, proved in Section \ref{sec:energy}.
\begin{proof}[Proof of Proposition \ref{prop:char}.]
From Proposition \ref{prop:power_in_sob} we know that $ \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $. If $ \mathbb{Q} $ is a limit point of $ (\mathbb{Q}_N)_{N\in\mathbb{N}} $ then \[ \mathbb{Q}\big( \text{for any } t\in[0,T],\; \pi_t(du)=\rho_t(u)du,\; \text{where } \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T}))\big)=1 .\] In the weak formulation \eqref{weak}, let us replace $ \rho^m $ by its binomial expansion as in \eqref{eq:expand}. Since we are on the torus we have $ \inner{\partial_{uu}G,1}=0 $, and therefore the binomial series starts from the second term. {Otherwise, this would lead to boundary conditions.} In this way, it is enough to show that for any $ \delta>0 $ it holds
\begin{align}\label{q:prob_equiv}
\mathbb{Q}\left(
\sup_{t\in[0,T]}
\bigg|
\inner{G_t,\rho_t}-\inner{G_0,\rho^{\rm ini}}-\int_0^t\inner{\rho_s,\partial_sG_s}\mathrm{d} s
-\int_0^t
\Big\langle
\partial_{uu} G_s,
\sum_{k\geqslant 1}\binom{m}{k}(-1)^k
(1-\rho_s)^{k}
\mathrm{d} s
\Big\rangle\bigg|
>\delta
\right)=0.
\end{align}
Last probability is bounded from above by
\begin{align} \notag
\mathbb{Q}\Bigg(
\sup_{t\in[0,T]}
&\bigg|
\inner{G_t,\rho_t} -\inner{G_0,\rho_0}-\int_0^t\inner{\rho_s,\partial_sG_s}\mathrm{d} s
+m
\int_0^t
\inner{\partial_{uu}G_s,1-\rho_s}
\mathrm{d} s
\\
& \qquad\qquad \qquad \qquad
-\int_0^t
\sum_{k\geqslant 2}\binom{m}{k}(-1)^k
\Big\langle
\partial_{uu} G_s,
\prod_{j=0}^{k-1}
\big\langle
1-\pi_s,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}
\big\rangle
\Big\rangle \mathrm{d} s\bigg|>\frac{\delta}{2^2}
\Bigg)\label{prob_eq0:1}
\\\label{prob_eq0:2}
&+\mathbb{Q}\left(
\sup_{t\in[0,T]}
\bigg|
\int_0^t
\sum_{k\geqslant 2}\binom{m}{k}(-1)^k
\Big\langle
\partial_{uu} G_s,
(1-\rho_s)^{k}
-\prod_{j=0}^{k-1}
\big\langle
1-\pi_s,{\iota}_{\epsilon_{k}}^{\cdot\;+j\epsilon_{k}}
\big\rangle
\Big\rangle \mathrm{d} s
\bigg|>\frac{\delta}{2}\right)
\\
&+\mathbb{Q}
\left(
\big|
\inner{G_0,\rho_0-\rho^{\rm ini}}
\big|
>\frac{\delta}{2^2}
\right),\label{prob_eq0:3}
\end{align}
with $ (\epsilon_k)_{k\geqslant0} $ as in Lemma \ref{lem:eps_seq}, with $ \beta $ {there} still to be fixed. Observe that the third probability \eqref{prob_eq0:3} is equal to zero since the initial probability measure $ \mu_N $ is a local equilibrium measure associated to the profile $ \rho^{\rm ini} $. From Markov's inequality and Lemma \ref{lem:eps_seq}, the second probability \eqref{prob_eq0:2} is no larger than $ 2\epsilon^\alpha/\delta $, reducing us to treat the first probability \eqref{prob_eq0:1}.
\par
We now want to apply Portmanteau's Theorem, and relate the micro and macro scales. For that purpose we need to argue that the whole function of our trajectories is continuous with respect to the Skorokhod topology, thus preserving the open sets. Although this is not the case due to the cutoff functions $\iota_\varepsilon$, one can perform approximations of these functions by continuous functions, as in \cite{FGN} and \cite{BDGN}. Moreover, one has to be careful with the martingale \eqref{dynk} which involves a \textit{finite} sum. We first treat the truncation problem, then the continuity. Let us fix $ 1<\ell_{1/\epsilon}\xrightarrow[\epsilon\to0]{}+\infty $.
Note that for any bounded sequence $ (a_k)_{k\geqslant1} $ we have
\begin{align*}
\bigg| \sum_{k\geqslant 2}
\binom{m}{k}(-1)^ka_k
-
\sum_{k=2}^{\ell_{1/\epsilon}}
\binom{m}{k}(-1)^ka_k\bigg|
\lesssim
\frac{1}{(\ell_{1/\epsilon})^m}.
\end{align*}
In this way, we truncate the sum in some $ \ell_{1/\epsilon} $ step, approximate the necessary functions by continuous functions, apply Portmanteau's Theorem and then replace back the approximated functions with a vanishing error, as $\varepsilon \to 0$. As for the continuity problem, since $ G $ is continuous, one needs only to mollify the terms involving the cut-off functions $ {\iota_\varepsilon} $. Since these terms are bounded in $ L^\infty(\mathbb{T}) $ this approximation by smooth functions converges a.e.~to the original functions (see for instance the proof of Lemma \ref{lem:uniq_FDE}). However, it will be important to perform this approximation with some care and take advantage of $ (1-\rho)^k $ being decreasing in $ k $ to mollify each product of $ k $ terms in a small neighbourhood depending on $ k $. More precisely, fix $ \epsilon>0 $ and $ t\in(0,T], $ and for any $ 0\leqslant s\leqslant t $ consider the map
\begin{align*}
\pi
\mapsto
\Phi_\pi^\epsilon(s,\cdot)
=\inner{\pi_s,{\iota}^{\cdot}_\epsilon}
,\qquad \pi\in\mathcal{D}([0,T],\mathcal{M}_+).
\end{align*}
Note that $ \Phi_\pi^\epsilon(s,\cdot) $ can be discontinuous. Let $ \varphi $ be some mollifier and for each $\tilde{\epsilon}>0$, define $ \varphi_{\tilde{\epsilon}}(u)={\tilde{\epsilon}}^{-1}\varphi({\tilde{\epsilon}}^{-1}u) $. One can argue that the convolution function $ \Phi_\pi^\epsilon\star\varphi_{\tilde{\epsilon}} $ is a continuous approximation of $ \Phi_\pi^\epsilon $ from the fact that $ \rho $ is $ \alpha-$H\"{o}lder continuous, with $ \alpha $ as in Lemma \ref{lem:rho_emp-approx} and the convergence is uniform:
\begin{align*}
\left(\Phi_\pi^\epsilon(s,\cdot)\star\varphi_{\tilde{\epsilon}}\right)(u)
-\Phi_\pi^\epsilon(u)
&=\int_{\mathbb{T}}
\varphi_{\tilde{\epsilon}}(z)
\left(
\Phi_\pi^\epsilon(s,u-z)-\Phi_\pi^\epsilon(s,u)
\right)\mathrm{d} z,
\end{align*}
hence by continuity of $ \rho $
\begin{align*}
\abs{\Phi_\pi^\epsilon(s,u-z)-\Phi_\pi^\epsilon(s,u)}
=\frac{1}{\epsilon}\abs{\int_u^{u+\epsilon}\rho_s(w-z)-\rho_s(w)\mathrm{d} w}
\lesssim
\frac{1}{\epsilon}\int_u^{u+\epsilon}z^\alpha \mathrm{d} w
=z^\alpha.
\end{align*}
In this way, since $ \varphi_{\tilde{\epsilon}} $ is normalized we have that $ \abs{\left(\Phi_\pi^\epsilon(s,\cdot)\star\varphi_{\tilde{\epsilon}}\right)(u)-\Phi_\pi^\epsilon(u)}\lesssim \tilde{\epsilon}^\alpha $,
and we conclude that
\begin{align}\label{conv-to-moll}
\sup_{u\in\mathbb{T}}
\abs{
\left(\Phi_\pi^\epsilon(s,\cdot)\star\varphi_{\tilde{\epsilon}}\right)(u)
-\Phi_\pi^\epsilon(s,u)
}
\lesssim
\tilde{\epsilon}^\alpha.
\end{align}
In particular, consider the sequence $ (\tilde{\epsilon_k})_{k\geqslant1} $ with $ \tilde{\epsilon}_k=k^{-\beta}\tilde{\epsilon} $, similarly to the sequence $ (\epsilon_k)_{k\geqslant1} $ in Lemma \ref{lem:eps_seq},
\begin{align*}
\abs{
\sum_{k=2}^{\ell_{1/\epsilon}}\binom{m}{k}(-1)^k
\left[
\prod_{j=0}^{k-1}
\big(
1-\Phi_\pi^{\epsilon_{k}}(s,u+j\epsilon_{k})
\big)
-\prod_{j=0}^{k-1}
\Big(
\big(1-(\Phi_\pi^{\epsilon_{k}}(s,\cdot+j\epsilon_{k})\star \varphi_{{\tilde{\epsilon_{k}}}})(u)\big)
\Big)
\right]
}
\\
\leqslant
\sum_{k=2}^{\ell_{1/\epsilon}}\abs{\binom{m}{k}}
\sum_{j=0}^{k-1}\big|
\Phi_\pi^{\epsilon_{k}}(s,u+j\epsilon_{k})
-(\Phi_\pi^{\epsilon_{k}}(s,\cdot+j\epsilon_{k})\star \varphi_{{\tilde{\epsilon_{k}}}})(u)
\big|
\end{align*}
where from \eqref{conv-to-moll} we see that the right-hand side of last display can be bounded from above by some positive constant times
\begin{align*}
\sum_{k=2}^{\ell_{1/\epsilon}}
\frac{\tilde{\epsilon_k^\alpha}}{k^m}
=\tilde{\epsilon}^\alpha
\sum_{k=2}^{\ell_{1/\epsilon}}
\frac{1}{k^{m+\alpha\beta}}
\lesssim \tilde{\epsilon}^\alpha,
\end{align*}
{and in the last estimate above} we used the fact that $ m+\alpha\beta>1 $, since by hypothesis we have $ m+\alpha\beta>2 $. At this point, it remains to show that the map
\begin{multline*}
\pi\mapsto
\sup_{t\in[0,T]}
\bigg|
\inner{\pi_t,G_t}-\inner{G_0,\rho_0}-\int_0^t\inner{\pi_s,\partial_sG_s}\mathrm{d} s
+m\int_0^t\inner{\partial_u^2G_s,1-\pi_s}\mathrm{d} s
\\
-\int_0^t
\sum_{k=2}^{\ell_{1/\epsilon}}\binom{m}{k}(-1)^k
\Big\langle
\partial_{uu} G_s,
\prod_{j=0}^{k-1}
\left(\inner{1-\pi_s,{\iota}_{\epsilon_{k}}^{\cdot\;+j\epsilon_{k}}}
\star \varphi_{{\tilde{\epsilon_k}}}\right)(\cdot)
\Big\rangle\mathrm{d} s\bigg|
\end{multline*}
is continuous with respect to the Skorokhod weak topology. From \cite[Proposition A.3]{FGN} it is enough to show the continuity of the map
\begin{align*}
\pi\mapsto
\sup_{t\in[0,T]}
\bigg|
\int_0^t
\sum_{k=2}^{\ell_{1/\epsilon}}\binom{m}{k}(-1)^k
\Big\langle
\partial_u^2 G_s,
\prod_{j=0}^{k-1}
\left(\inner{1-\pi_s,{\iota}_{\epsilon_{k}}^{\cdot\; +j\epsilon_{k}}}
\star \varphi_{{\tilde{\epsilon_k}}}\right)(\cdot)
\Big\rangle \mathrm{d} s\bigg|,
\end{align*}
which can be done using the definition of the Skorokhod metric and is also consequence of our definition of the sequences $ (\tilde{\epsilon}_k)_{k\geqslant1} $ and $ (\epsilon_k)_{k\geqslant1} $. Applying Portmanteau's Theorem, we are reduced to treat
\begin{align}\label{on_Q_N}
\begin{split}
\liminf_{N\to+\infty}\mathbb{Q}_N
\bigg(
\sup_{t\in[0,T]}
\bigg|
\inner{\pi_t^N,G_t}
&-\inner{G_0,\rho_0}
-\int_0^t\inner{\pi_s^N,\partial_sG_s}\mathrm{d} s
+m\int_0^t\inner{\partial_{uu}G_s,1-\pi_s^N}\mathrm{d} s
\\
&
-\int_0^t
\sum_{k=2}^{\ell_{1/\epsilon}}\binom{m}{k}(-1)^k
\Big\langle
\partial_{uu} G_s,
\prod_{j=0}^{k-1}
\left(
\inner{1-\pi_s^N,{\iota}_{\epsilon_{k}}^{\cdot\; +j\epsilon_{k}}}
\star \varphi_{{\tilde{\epsilon_k}}}
\right)(\cdot)
\Big\rangle \mathrm{d} s
\bigg|>\frac{\delta}{2^4}
\bigg).
\end{split}
\end{align}
We stress that, although for small $ \epsilon>0 $ we can have $ \ell_{1/\epsilon}>N $, for $ N $ fixed, the sum
\begin{align*}
\sum_{k=2}^{\ell_{1/\epsilon}}\binom{m}{k}(-1)^k
\prod_{j=0}^{k}
\left(\inner{1-\pi_s^N,{\iota}_{\epsilon_{k}}^{\cdot\; +j\epsilon_{k}}}
\star \varphi_{{\tilde{\epsilon_k}}}\right)(u)
\end{align*}
is indeed well-defined for any $ u\in\mathbb{T} $ and one obtains, for $ k $ large enough, repeated terms in the product above. Now we can replace back $ \big(\inner{\pi_s^N,{\iota}_{\epsilon_{k}}^{\cdot\;+j\epsilon_{k}}}\star \varphi_{{\tilde{\epsilon_k}}}\big)(\cdot) $ by $ \inner{\pi_s^N,{\iota}_{\epsilon_{k}}^{\cdot\;+j\epsilon_{k}}} $ with the previous rationale. Fixed $ N $, since the martingale \eqref{dynk} involves a sum up to $ \ell_N $, we compare again
\begin{align}\label{truncate2}
\abs{
\sum_{k= 2}^{\ell_N+1}\binom{m}{k}(-1)^ka_k
-\sum_{k=2}^{\ell_{1/\epsilon}}\binom{m}{k}(-1)^ka_k
}
\lesssim
\abs{(\ell_{1/\epsilon})^{-m}-(\ell_N)^{-m}}.
\end{align}
Summing and subtracting the appropriate terms, and recalling \eqref{dynk}, the first probability \eqref{prob_eq0:1}, after the aforementioned replacements, is no larger than the sum of terms of order $ (\ell_{1/\epsilon})^{-m},\epsilon^\alpha,\tilde{\epsilon}^\alpha $ and also of order $ \abs{(\ell_{1/\epsilon})^{-m}-(\ell_N)^{-m}} $ plus
\begin{align}
\notag&\liminf_{N\to+\infty}\mathbb{Q}_N\left(
\sup_{t\in[0,T]}
\left|
M_t^N(G)+\sum_{k=2}^{\ell_N}\binom{m}{k}(-1)^k\int_0^t
\frac{1}{N}\sum_{x\in\mathbb{T}_N}\Delta^NG_s(\tfrac{x}{N})\mathbf{h}^{(k-1)}_s(\tau_x\overline{\eta})
\mathrm{d} s
\right.\right.\\
\notag&\qquad\qquad\qquad\qquad\qquad\;\left.\left.
+\sum_{k=2}^{\ell_N}\binom{m}{k}(-1)^k\int_0^t
\Big\langle
\partial_{uu}G_s,
\prod_{j=0}^{k-1}
\inner{1-\pi_s^N,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}
\Big\rangle \mathrm{d} s\right|>\frac{\delta}{2^6}
\right)\\
\notag&\leqslant
\mathbb{P}_{\mu_N}
\left(
\sup_{t\in[0,T]}
\abs{
\sum_{k=2}^{\ell_N}\binom{m}{k}(-1)^k
\int_0^t
\Big\langle
\partial_{uu}G_s-\Delta^NG_s,
\prod_{j=0}^{k-1}
\inner{1-\pi_s^N,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}
\Big\rangle \mathrm{d} s
}>\frac{\delta}{3\times2^6}
\right)
\\
\notag&\quad+
\mathbb{P}_{\mu_N}
\left(
\sup_{t\in[0,T]}
\left|
\sum_{k=2}^{\ell_N}\binom{m}{k}(-1)^k
\int_0^t
\frac1N \sum_{x\in\mathbb{T}_N}
\Delta^N G_s(\tfrac{x}{N})
\bigg[
\prod_{j=0}^{k-1}
\inner{1-\pi_s^N,{\iota}_{\epsilon_{k}}^{\frac{x}{N}+j\epsilon_{k}}}
-\mathbf{h}_s^{(k-1)}(\tau_x\overline{\eta})
\bigg]\mathrm{d} s
\right|>\frac{\delta}{3\times2^6}
\right)
\\
&
\quad+\mathbb{P}_{\mu_N}
\left(
\sup_{t\in[0,T]}
\abs{
M_t^N(G)
}>\frac{\delta}{3\times2^6}
\right)
.
\end{align}
Note that the linear term $ \inner{\partial_{uu}G_s,1-\pi_s^N} $ in \eqref{on_Q_N} was absorbed into the martingale $ M_t^N(G) $, and so the challenge is to treat the non-linear terms. The first probability on the right-hand side above vanishes as $ N\to+\infty $ since $ G_s\in C^2(\mathbb{T}) $ for all $ s\in[0,t] $; the second probability is treated using the replacement lemmas with a scheme that we present shortly; the third with Doob's inequality and the proof of the first condition in \eqref{aldous}. Let us give more details for the second one. {Recall the second expression of $ \mathbf{h}^{(k)} $ from Lemma \ref{lem:grad}. We split the second probability on the right-hand side of last display into
\begin{align}\label{eq:h_prob}
&\mathbb{P}_{\mu_N}
\left(
\sup_{t\in[0,T]}
\left|
\sum_{k=2}^{\ell_N}\binom{m}{k}(-1)^k
\int_0^t
\frac1N \sum_{x\in\mathbb{T}_N}
\Delta^N G_s(\tfrac{x}{N})
\times\right.\right.
\\\notag&
\qquad\qquad\qquad\left.\left.
\times\tau_x
\left\{
\sum_{i=0}^{k-2}
(\eta_{N^2s}(i)-\eta_{N^2s}(i+1))\sum_{j=1}^{k-1-i}\mathbf{s}_j^{(k-1)}(\tau_i\overline{\eta}_{N^2s})
\right\}
\mathrm{d} s
\right|>\frac{\delta}{3\times2^7}
\right)
\\
\notag&+\mathbb{P}_{\mu_N}
\left(
\sup_{t\in[0,T]}
\left|
\sum_{k=2}^{\ell_N}\binom{m}{k}(-1)^k
\int_0^t
\frac1N \sum_{x\in\mathbb{T}_N}
\Delta^N G_s(\tfrac{x}{N})
\bigg[
\prod_{j=0}^{k-1}
\inner{1-\pi_s^N,{\iota}_{\epsilon_{k}}^{\frac{x}{N}+j\epsilon_{k}}}
-\prod_{i=0}^{k-1}\overline{\eta}_{N^2s}(x+i)
\bigg]\mathrm{d} s
\right|>\frac{\delta}{3\times2^7}
\right).
\end{align}
Focus on the first probability in the previous display. We apply Proposition \ref{prop:pedro} and triangle's inequality and then pass the summation over $ x $ to outside the expectation. For $ m\in(0,1) $, since the summation starts at $ k=2 $, the resulting quantity is treated using both Lemma \ref{lem:rep_FDM-tight} and Lemma \ref{lem:rep_FDM} for each term of the summation over $ x $ with, for each $ x $ fixed and $ i\in\{0,\dots,k-2\} $,
\begin{align*}
\varphi_i^{(k-1)}(s,\eta)=\Delta^N G_s(\tfrac{x}{N})\sum_{j=1}^{k-1-i}\mathbf{s}_j^{(k-1)}(\tau_{i+x}\overline{\eta})
\leqslant
\norm{\Delta^N G}_{L^\infty([0,T]\times\mathbb{T}_N)}
\mathbf{c}^{(k-1)}(\tau_{i+x}\overline{\eta}_{N^2s}),
\end{align*}
estimating it by
\begin{align*}
\frac1B+TB\frac{(\ell_N)^{1-m}}{N},
\end{align*}
for any $ B>0 $, which will be taken to infinity after $ N\to+\infty $.
For $ m\in(1,2) $ we could either prove an analogue of Lemma \ref{lem:rep_FDM-tight} for the slow regime, or take advantage of the tail of the sum of the binomial coefficients being just light enough, in this regime, to slow down the explosion of $ \ell_N $, avoiding further restrictions. We present the second alternative. {Let
\begin{align}\label{PMM:n0}
0<n<\frac{2-m}{5-m}.
\end{align}}
Since
\begin{align*}
\sum_{i=1}^{k-2}
(\eta_{N^2s}(i)-\eta_{N^2s}(i+1))\sum_{j=1}^{k-1-i}\mathbf{s}_j^{(k-1)}(\tau_i\eta_{N^2s})
\leqslant k
\end{align*}
we can estimate
\begin{align*}
\sum_{k=(\ell_N)^n+1}^{\ell_N}&\binom{m}{k}(-1)^k
\frac1N \sum_{x\in\mathbb{T}_N}
\Delta^N G_s(\tfrac{x}{N})
\sum_{i=1}^{k-2}
\sum_{j=1}^{k-1-i}
\tau_x
\left\{
(\eta_{N^2s}(i)-\eta_{N^2s}(i+1))\mathbf{s}_j^{(k-1)}(\tau_i\overline{\eta}_{N^2s})
\right\}
\\
& \leqslant
\frac1N \sum_{x\in\mathbb{T}_N}
\abs{\Delta^N G_s(\tfrac{x}{N})}
\sum_{k=(\ell_N)^n}^{\ell_N}\abs{\binom{m}{k}}
k
\\
& \lesssim
\frac1N \sum_{x\in\mathbb{T}_N}
\abs{\Delta^N G_s(\tfrac{x}{N})}
\left(\frac{1}{(\ell_N)^{n(m-1)}}-\frac{1}{(\ell_N)^{m-1}}\right),
\end{align*}
which vanishes by taking the limit $ N\to+\infty $. This means that we can replace the summation up to $ \ell_N $ by a summation up to $ (\ell_N)^n $. In this way,
from Proposition \ref{prop:pedro}, the previous truncation at $ (\ell_N)^n $ and triangle's inequalities, we are then reduced to treating
\begin{align*}
\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}\sum_{i=1}^{k-2}
\sum_{j=1}^{k-1-i}
\frac1N \sum_{x\in\mathbb{T}_N}
\mathbb{E}_{\mu_N}
\left[
\abs{
\int_0^t\Delta^N G_s(\tfrac{x}{N})
\tau_x
\left\{
(\eta_{N^2s}(i)-\eta_{N^2s}(i+1))\mathbf{s}_j^{(k-1)}(\tau_i\overline{\eta}_{N^2s})
\right\}
\mathrm{d} s
}
\right].
\end{align*}
Applying the replacement Lemma \ref{lem:rep_shift} to each term of the sum over $ j $ with $ \varphi(s,\eta)=\Delta^N G_s(\tfrac{x}{N})\mathbf{s}_j^{(k-1)}(\tau_{i+x}\overline{\eta}) $
we obtain an upper bound of the order of
\begin{align*}
\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}k^2
\left(
\frac{1}{B_k}
+
TB_k\frac{(\ell_N)^{m-1}}{N}
\right).
\end{align*}
{
Let $ B_k=kB>0 $. Then last display is bounded from above by some constant times
\begin{align*}
\frac1B \sum_{k=2}^{(\ell_N)^n}
\frac{1}{k^{m}}
+
TB\frac{(\ell_N)^{m-1}}{N}
\sum_{k=2}^{(\ell_N)^n}\frac{1}{k^{m-2}}
\lesssim \frac1B
+
TB\left(\frac{(\ell_N)^{m-1+n(3-m)}}{N}
+\frac{(\ell_N)^{m-1}}{N}\right),
\end{align*}
and the right-hand side converges to zero as $ N\to+\infty $ and $ B\to+\infty $ since by the definition of $ n $ in \eqref{PMM:n0} we have $ m-1+n(3-m)<1 $.
}
Now the main goal is to estimate {for $ m\in(0,2)\backslash\{1\} $} the quantity
\begin{align}\label{rep1:eq1}
&\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}\;
\mathbb{E}_{\mu_N}
\left[
\bigg|
\int_0^t
\frac1N \sum_{x\in\mathbb{T}_N}
\Delta^N G_s(\tfrac{x}{N})
\bigg(\prod_{j=0}^{k-1}
\left(1-\inner{\pi_s^N,{\iota}_{\epsilon_{k}}^{\frac{x}{N}+j\epsilon_{k}}}\right)
-
\prod_{i=0}^{k-1}\overline{\eta}_{N^2s}(x+i)\bigg)
\mathrm{d} s \bigg|
\right]
\end{align}
where, again, we applied Proposition \ref{prop:pedro}. It will be important to slow down the explosion $ \ell_N\to+\infty $ for $ m\in(0,1) $ too before applying repeatedly the replacement lemmas. Consider the sequence $ (a_k)_{k\geqslant1} $ with $ a_k\equiv a_k(t,G,\eta) $ defined by
\begin{align*}
a_k
=\bigg|
\int_0^t
\frac1N \sum_{x\in\mathbb{T}_N}
\Delta^N G_s(\tfrac{x}{N})
\bigg(\prod_{j=0}^{k-1}
\left(1-\inner{\pi_s^N,{\iota}_{\epsilon_{k}}^{\frac{x}{N}+j\epsilon_{k}}}\right)
-
\prod_{i=0}^{k-1}\overline{\eta}_{N^2s}(x+i)\bigg)
\mathrm{d} s \bigg|.
\end{align*}
From the triangle inequality and the fact that $ G_s\in C^2(\mathbb{T}) $ it is simple to see that the sequence $ (a_k)_k $ is uniformly bounded by $ \int_0^t N^{-1}\sum_{x\in\mathbb{T}_N}\abs{\Delta^NG_s(\tfrac{x}{N})}\mathrm{d} s\xrightarrow[N\to+\infty]{}\norm{\partial_{uu}G}_{L^1([0,T]\times\mathbb{T})}<\infty $. In particular,
\begin{align*}
\abs{
\sum_{k=2}^{\ell_N}\abs{\binom{m}{k}}a_k
-\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}a_k
}
=\sum_{k=(\ell_N)^n+1}^{\ell_N}\abs{\binom{m}{k}}a_k
\lesssim (\ell_N)^{-nm}-(\ell_N)^{-m}\xrightarrow[N\to+\infty]{}0.
\end{align*}
In this way, the treatment of \eqref{rep1:eq1} gives place to the treatment of
\begin{align}\label{rep1:eq0}
&\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}
\frac1N \sum_{x\in\mathbb{T}_N}\;
\mathbb{E}_{\mu_N}
\left[
\bigg|
\int_0^t
\Delta^N G_s(\tfrac{x}{N})
\bigg(\prod_{j=0}^{k-1}
\left(1-\inner{\pi_s^N,{\iota}_{\epsilon_{k}}^{\frac{x}{N}+j\epsilon_{k}}}\right)
-
\prod_{i=0}^{k-1}\overline{\eta}_{N^2s}(x+i)\bigg)
\mathrm{d} s \bigg|
\right].
\end{align}}
To treat \eqref{rep1:eq0} we now split into the slow and fast diffusion cases. In what follows, we fix $ \beta=4 $ in Lemma \ref{lem:eps_seq}, considering thus the sequence $ (\epsilon_k)_{k\geqslant1} $ with $ \epsilon_k=k^{-4}\epsilon $.
\textsc{$\bullet$ Slow-diffusion,} $ m\in(1,2) $: We can follow a slightly simplified version of the scheme in \cite{BDGN}. Consider a {non-increasing} sequence $ (L_k)_{k\geqslant 1}\subseteq \mathbb{N} $ having in mind that for each $ k,N\in\mathbb{N} $ we have $ L_k\equiv L_k(N) $. We will fix this sequence shortly. In what follows, we define $ \prod_{\emptyset}=1 $. The forthcoming lemmas will be applied to each term of the summation over $ x\in\mathbb{T}_N $.
\begin{enumerate}\addtocounter{enumi}{0}
\item \textit{Rearrangements}: rewrite
\end{enumerate}
\begin{align}\label{step1}
\prod_{j=0}^{k-1}\overline{\eta}(jL_{k})
-\prod_{j=0}^{k-1}\overline{\eta}(j)
=\left(
\eta(iL_{k})-\eta(i)
\right)
\tilde{\varphi}_i^{(1)}(\eta)
\end{align}
where for every $ i\in\{1,\dots,k-1\} $ we defined $ \tilde{\varphi}_i^{(1)}(\eta)=\prod_{j=0}^{i-1}\overline{\eta}(j)\prod_{j=i+1}^{k-1}\overline{\eta}(jL_{k}) $. The random variable $\varphi_i^{(1)}(s,\eta)\equiv \Delta^NG_s(\tfrac{x}{N})\tilde{\varphi}_i^{(1)}(\tau_x\eta) $ is independent of the occupation variables at sites $ \llbracket i,iL_{k}\rrbracket $ and, fixed $ x $ and applying the triangle inequality we treat each term of the summation over $ i $ in \eqref{step1} with Lemma \ref{lem:rep_shift}. With the choice $ B_k=Bk^{-b_1} $ for $ k\geqslant 2 $, with $ B>0 $ {and $ 0<b_1<m-1 $}, we obtain an upper bound of the order of
\begin{equation}\label{step1:est}
\begin{split}
\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}\sum_{i=1}^{k-1}
\left\{
\frac{1}{B_k}+TB_kiL_{k}\frac{(\ell_N)^{m-1}}{N}
\right\}
&\lesssim
\sum_{k=2}^{(\ell_N)^n}\frac{1}{k^m}
\left\{
\frac{1}{B_k}+\frac{(\ell_N)^{m-1}}{N}TB_kL_kk
\right\}
\\
&=\frac1B\sum_{k=2}^{(\ell_N)^n}\frac{1}{k^{m-b_1}}
+
\frac{(\ell_N)^{m-1}}{N}TBL
\sum_{k=2}^{(\ell_N)^n}\frac{1}{k^{m+b_1+3}}.
\end{split}
\end{equation}
{Note that both summations converge when taking the limit $ N\to+\infty $ since $ m-b_1,m+b_1+3>1 $.} At this point, we can set $ L=(\ell_N)^{2-m} $, then recall that $ \ell_N\leqslant N $ and take the limits accordingly;
\begin{enumerate}\addtocounter{enumi}{1}
\item \textit{One-block estimates}: rewrite
\end{enumerate}
\begin{equation}\label{step2}
\prod_{j=0}^{k-1}\overline{\eta}^{L_{k}}(jL_{k})
-\prod_{j=0}^{k-1}\overline{\eta}(jL_{k})
=
\sum_{i=0}^{k-1}
\left(
\eta^{L_{k}}(iL_{k})-\eta(iL_{k})
\right)
\tilde{\varphi}_i^{(2)}(\eta)
\end{equation}
where for every $ i\in\{1,\dots,k-1\} $ we defined $ \tilde{\varphi}_i^{(2)}(\eta)=\prod_{j=0}^{i-1}\overline{\eta}^L_{k}(jL_{k})\prod_{j=i+1}^{k-1}\overline{\eta}(jL_{k}) $. The random variable $ \varphi_i^{(2)}(s,\eta)\equiv \Delta^NG_s(\tfrac{x}{N})\tilde{\varphi}_i^{(2)}(\tau_x\eta) $ is independent of the occupation variables at sites $ \llbracket iL_{k},(i+1)L_{k}-1 \rrbracket $ and, fixed $ x $ and applying the triangle inequality we treat each term of the summation over $ i $ in \eqref{step2} with Corollary \ref{cor:rep_ell_box}. We obtain an upper bound of the order of
\begin{align*}
\sum_{k=2}^{(\ell_N)^n}
\abs{\binom{m}{k}}
\sum_{i=0}^{k-1}
\left\{
\frac{1}{B_k}
+TB_kL_{k}\frac{(\ell_N)^{m-1}}{N}
\right\}.
\end{align*}
This quantity is no larger than the quantity on the left-hand side of \eqref{step1:est}, therefore the same rationale used there guarantees that these errors vanish by taking the limits;
\begin{enumerate}\addtocounter{enumi}{2}
\item \textit{Two-block estimates}: rewrite
\end{enumerate}
\begin{align}\label{step3}
\prod_{j=0}^{k-1}\overline{\eta}^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})
-\prod_{j=0}^{k-1}\overline{\eta}^{L_{k}}(jL_{k})
=\sum_{i=0}^{k-1}
\left(
\eta^{\floor{N\epsilon_{k}}}(i\floor{N\epsilon_{k}})
-\eta^{L_{k}}(iL_{k})
\right)
\tilde{\varphi}_i^{(3)}(\eta)
\end{align}
where for every $ i\in\{1,k-1\} $ we defined $ \tilde{\varphi}_i^{(3)}(\eta)=\prod_{j=0}^{i-1}\overline{\eta}^{L_{k}}(jL_{k})\prod_{j=i+1}^{k-1}\overline{\eta}^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}}) $. The random variable $ \varphi_i^{(3)}(s,\eta)\equiv \Delta^NG_s(\tfrac{x}{N})\tilde{\varphi}_i^{(3)}(\tau_x\eta) $ is independent of the occupation variables at sites contained in \[ \llbracket iL_{k},(i+1)\floor{N\epsilon_{k}}-1 \rrbracket\cup \llbracket iL_{k},iL_{k}+\floor{N\epsilon_{k}}-1\rrbracket \] provided $ \floor{N\epsilon_{k}}\geqslant L_{k} $, that is, $ \floor{N\epsilon}\geqslant L. $ Fixed $ x $ and applying the triangle inequality we treat each term of the summation over $ i $ in \eqref{step3} with Lemma \ref{lem:rep_boxes}, leading to an upper bound of the order of
\begin{multline*}
\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}
\sum_{i=0}^{k-1}
\left\{\frac{1}{B_k}
+T
\left[
\frac{1}{L_{k}}
+B_k
\left(
\frac{L_{k}(\ell_N)^{m-1}}{N}
+\frac{iL_{k}}{N}
+\epsilon_{k}(i+1)
\right)
\right]\right\}
\\
\lesssim
\sum_{k=2}^{(\ell_N)^n}\frac{1}{B_kk^{m}}
+T\sum_{k=2}^{(\ell_N)^n}\frac{1}{L_kk^{m}}
+\frac{T(\ell_N)^{m-1}}{N}\sum_{k=2}^{(\ell_N)^n}\frac{B_kL_k}{k^{m}}
+T\sum_{k=2}^{(\ell_N)^n}\frac{B_k}{k^{m-1}}\left(\frac{L_k}{N}+\epsilon_k\right).
\end{multline*}
{Fix $ B_k=Bk^{-b_3}>0 $ with $ B>0 $ and $ 0<b_3<m-1 $.} We analyse each term above. From $ m-1>b_3 $ and $ m-4<1 $ we have that
\begin{align*}
\sum_{k=2}^{(\ell_N)^n}\frac{1}{B_kk^{m}}
\lesssim \frac1B
\quad\text{and}\quad
\sum_{k=2}^{(\ell_N)^n}\frac{1}{L_kk^{m}}
\lesssim
\frac{1}{L}
\ell_N^{n(5-m)}
=(\ell_N)^{m-2+n(5-m)},
\end{align*}
respectively. From \eqref{PMM:n0} it holds $ m-2+n(5-m)<0 $. Note that this is, indeed, the constraint \eqref{PMM:n0}, and defines the largest interval $ n $ can belong to. Similarly, since $ b_3>0 $ we have
\begin{align*}
\frac{(\ell_N)^{m-1}}{N}\sum_{k=2}^{(\ell_N)^n}\frac{B_kL_k}{k^{m}}
\lesssim
B\frac{\ell_N}{N}
\quad\text{and}\quad
\sum_{k=2}^{(\ell_N)^n}\frac{B_k}{k^{m-1}}\left(\frac{L_k}{N}+\epsilon_k\right)
\lesssim
B\left(\frac{(\ell_N)^{2-m}}{N}+\epsilon\right),
\end{align*}
respectively;
\begin{enumerate}\addtocounter{enumi}{3}
\item \textit{Conclusion}: rewrite
\end{enumerate}
\begin{multline}\label{step4:1}
\prod_{j=0}^{k-1}
\left(1-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{j\epsilon_{k}}}\right)
-\prod_{j=0}^{k-1}\left(1-\eta^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})\right)
\\
=\sum_{i=0}^{k-1}
\left\{
\bigg[
\prod_{j=1}^{i-1}1-{\eta}^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})
\bigg]
\left(
{\eta}^{\floor{N\epsilon_{k}}}(i\floor{N\epsilon_{k}})-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{i\epsilon_{k}}}
\right)
\bigg[
\prod_{j=i+1}^{k-1}1-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{j\epsilon_{k}}}
\bigg]
\right\},
\end{multline}
and since $ \mid\inner{\pi_s^N,{\iota}_{\epsilon}^{\frac{x}{N}}}-\eta_{N^2s}^{\floor{N\epsilon}}(x)\mid\leqslant\floor{N\epsilon}^{-1} $, previous display is no larger than $ k\floor{N\epsilon}^{-1} $. This way, we need to bound from above
\begin{align*}
\sum_{k=2}^{(\ell_N)^n}\abs{\binom{m}{k}}\frac{k}{\floor{N\epsilon_{k}}}
\lesssim
\frac{1}{\floor{N\epsilon}}\sum_{k=2}^{(\ell_N)^n}\frac{1}{k^{m-4}}
\lesssim\frac{\ell_N^{n(5-m)}}{\floor{N\epsilon}}.
\end{align*}
{Note that since $ 2-m<1 $, by the definition of $ n $ in \eqref{PMM:n0} we have $ n(5-m)<1 $.}
To conclude the proof it is enough to recall that $ \ell_N\leqslant N $, and then take the limit $ N\to+\infty $ and $ \epsilon\to0 $ and then $ B\to+\infty $.
\textsc{$\bullet$ Fast-diffusion,} $ m\in(0,1) $: Recall that the goal is to treat \eqref{rep1:eq0}. The strategy now is similar but simpler than for the slow diffusion case. The specific maps $ \varphi:\Omega_N\to\mathbb{R} $ in the statement of the replacement lemmas in Subsection \ref{lem:rep_FDM-tight} can be introduced analogously to the slow-diffusion case, therefore we omit their definition.
\begin{enumerate}\addtocounter{enumi}{0}
\item \textit{Rearrangements}: rewrite
\end{enumerate}
\begin{align}\label{FDMstep1}
\prod_{j=0}^{k-1}\overline{\eta}(j\floor{N\epsilon_{k}})
-\prod_{j=0}^{k-1}\overline{\eta}(j)
=\sum_{i=1}^{k-1}\bigg[\prod_{j=0}^{i-1}\overline{\eta}(j)\bigg]
\left(
\eta(i\floor{N\epsilon_{k}})-\eta(i)
\right)
\bigg[\prod_{j=i+1}^{k-1}\overline{\eta}(j\floor{N\epsilon_{k}})\bigg]
\end{align}
and apply Lemma \ref{lem:rep_FDM} to each term of the summation in $ i $, with the total cost of the order of
\begin{align*}
\sum_{k=1}^{(\ell_N)^n}\abs{\binom{m}{k}}
\sum_{i=1}^{k-1}
\left\{
\frac{1}{B_k}+TB_k\frac{i(\floor{N\epsilon_{k}}-1)}{N}
\right\}
\lesssim
\sum_{k=1}^{(\ell_N)^n}
\left\{
\frac{1}{B_kk^{m}}
+\epsilon T\frac{B_k}{k^{3+m}}
\right\}
,
\end{align*}
for any $ B_k>0.$ {The choice $ B_k=Bk^b_1>0 $ with $ B>0 $ and $ 1-m<b_1<m+2 $ guarantees the convergence of the series as $ N\to+\infty $.}
\begin{enumerate}\addtocounter{enumi}{1}
\item \textit{One-block and two-blocks estimates}: rewrite
\end{enumerate}
\begin{multline}\label{FDMstep2}
\prod_{j=0}^{k-1}\overline{\eta}^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})
-\prod_{j=0}^{k-1}\overline{\eta}(j\floor{N\epsilon_{k}})\\
=
\sum_{i=0}^{k-1}
\bigg[\prod_{j=0}^{i-1}\overline{\eta}^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})\bigg]
\left(
\eta^{\floor{N\epsilon_{k}}}(i\floor{N\epsilon_{k}})-\eta(i\floor{N\epsilon_{k}})
\right)
\bigg[ \prod_{j=i+1}^{k-1}\overline{\eta}(j\floor{N\epsilon_{k}}) \bigg]
\end{multline}
and apply Lemma \ref{lem:rep_FDM} to each term of the summation in $ i $, leading to an upper bound of the order of
\begin{align*}
\sum_{k=1}^{(\ell_N)^n}\abs{\binom{m}{k}}
\sum_{i=0}^{k-1}
\bigg\{
\frac{1}{B_k}
+TB_k\frac{1}{\floor{N\epsilon_{k}}}
\sum_{y\in \Lambda^{\floor{N\epsilon_{k}}}_{i\floor{N\epsilon_{k}}}}
\frac{\abs{i\floor{N\epsilon_{k}}-y}}{N}
\bigg\}
&\lesssim
\sum_{k=1}^{(\ell_N)^n}
\frac{1}{B_kk^{m}}
+
\epsilon T\sum_{k=1}^{(\ell_N)^n}
\frac{B_k}{k^{4+m}}.
\end{align*}
The choice $ B_k=Bk^b_2>0 $ with $ B>0 $ and $ 1-m<b_2<3+m $ guarantees the convergence as $ N\to+\infty $.
\begin{enumerate}\addtocounter{enumi}{2}
\item \textit{Conclusion}: rewrite
\end{enumerate}
\begin{align}
\prod_{j=0}^{k-1}
\left(1-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{j\epsilon_{k}}}\right)
&-\prod_{j=0}^{k-1}\left(1-\eta^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})\right)
\notag \\
&=\sum_{i=0}^{k-1}
\bigg[\prod_{j=1}^{i-1}1-\eta^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})\bigg]
\left(
\eta^{\floor{N\epsilon_{k}}}(j\floor{N\epsilon_{k}})-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{i\epsilon_{k}}}
\right)
\bigg[\prod_{j=i+1}^{k-1}1-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{j\epsilon_{k}}}\bigg],\label{FDMstep3}
\end{align}
and proceed as in \eqref{step4:1}, leading to an upper bound of the order of
\begin{align*}
\sum_{k=1}^{(\ell_N)^n}\abs{\binom{m}{k}}
\frac{k}{\floor{N\epsilon_{k}}}
\lesssim
\frac{1}{\floor{N\epsilon}}
\sum_{k=1}^{(\ell_N)^n}
\frac{1}{k^{m-4}}
\lesssim
\frac{1}{\floor{N\epsilon}}(\ell_N)^{n(5-m)}.
\end{align*}
It is enough to fix $ n>0 $ such that $ n(5-m)<1 $.
To conclude one takes the corresponding limits as previously.
\end{proof}
\section{Replacement Lemmas}\label{sec:replace}
\subsection{Dirichlet forms}
We start with some definitions and notation.
\begin{Def}[Dirichlet Form and Carré du Champ]
For a probability measure $ \mu $ on $ \Omega_N $ and $ f:\Omega_N\to\mathbb{R} $ density with respect to $ \mu $, we define the Dirichlet form for any $ m\in(0,2] $ as
\begin{align*}
\mathcal{E}_N^{(m-1)}(f,\mu)
=\inner{f,\big(-\mathcal{L}_N^{(m-1)}\big)f}_\mu
=\int_{\Omega_N} f(\eta)\sum_{x\in\mathbb{T}_N}\big(-r_N^{(m-1)}(\tau_x\eta)\big)(\nabla_{x,x+1}f)(\eta) \mu(\mathrm{d}\eta)
\end{align*}
and the non-negative quadratic form
\begin{align*}
\Gamma_N^{(m-1)}(f,\mu)
=\int_{\Omega_N}\sum_{x\in\mathbb{T}_N}r_N^{(m-1)}(\tau_x\eta)\left[\left(\nabla_{x,x+1}f\right)(\eta)\right]^2\mu(\mathrm{d}\eta).
\end{align*}
\end{Def}
We remark that rewriting $- a(b-a)=(a-b)^2/2+(a^2-b^2)/2 $, one obtains the identity
\begin{align}\label{id:dir-car}
\frac12\Gamma_N^{(m-1)}(\sqrt{f},\mu)
=\mathcal{E}_N^{(m-1)}(\sqrt{f},\mu)
+\frac{1}{2}\big\langle \mathcal{L}_N^{(m-1)}f\big\rangle_\mu.
\end{align}
The key observation in order to proceed similarly to \cite{BDGN} is the following proposition.
\begin{Prop}[Energy lower bound]\label{prop:energy}
Let $ \nu_\gamma^N $ be the Bernoulli product measure on $ \Omega_N $ where $ \gamma:[0,1]\to(0,1) $ {is either Lipschitz non-constant or constant,} and let $ f $ be a density with respect to $ \nu_\gamma^N $. For any $ m\in(0,2)\backslash \{1\} $ and any $ N\in\mathbb{N}_+ $ such that $ \ell_N\geqslant 2 $ it holds
\begin{align}\label{dir:bound}
\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\geqslant
& \; \mathbf{1}_{\{m\in(1,2)\}}\frac{m}{4}
\left(
\delta_N\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N)
+\frac{m-1}{2}\Gamma_N^{(1)}(\sqrt{f},\nu_\gamma^N)
\right)
+\mathbf{1}_{\{m\in(0,1)\}}\frac14\Gamma_N^{(0)}(\sqrt{f},\mu)
-\frac{\mathbf{c}_\gamma}{4N} \vphantom{\Bigg(}
\end{align}
where $ \mathbf{c}_\gamma>0 $ if $ \gamma $ is Lipschitz {non-constant}, or $ \mathbf{c}_\gamma=0 $ if $ \gamma $ is constant.
\end{Prop}
\begin{Rem}
We highlight that, throughout the rest of the article, we will fix $ \nu_{\gamma}^N $, with $ \gamma(\cdot)\equiv \gamma\in(0,1) $ a constant function as the reference measure. We chose to present the previous result with $ \gamma(\cdot) $ also not constant since it is a fundamental step in order to extend the present model to the open boundary setting.
\end{Rem}
\begin{proof}[Proof of Proposition \ref{prop:energy}]
Recalling the identity \eqref{id:dir-car}, let us focus on the rightmost term there. Note that $ b-a=\sqrt{a}(\sqrt{b}-\sqrt{a})+\sqrt{b}(\sqrt{b}-\sqrt{a}) $, thus
\begin{align*}
\big\langle \mathcal{L}_N^{(m-1)}f\big\rangle_{\nu_\gamma^N}
&=\sum_{x\in\mathbb{T}_N}\int_{\eta\in\Omega_N}
r_N^{(m-1)}(\tau_x\eta)\sqrt{f}(\eta)\left(\nabla_{x,x+1}\sqrt{f}\right)(\eta)\nu^N_\gamma(\mathrm{d}\eta)\\
&\quad +\sum_{x\in\mathbb{T}_N}\int_{\eta\in\Omega_N}
r_N^{(m-1)}(\tau_x\eta)\sqrt{f}(\eta^{x,x+1})\left(\nabla_{x,x+1}\sqrt{f}\right)(\eta)\nu^N_\gamma(\mathrm{d}\eta).
\end{align*}
Performing the change of variables $ \eta\mapsto\eta^{x,x+1} $ on the second term above and using the symmetry of the rates, $ r_N^{(m-1)}(\tau_x\eta^{x,x+1})=r_N^{(m-1)}(\tau_x\eta) $, we obtain
\begin{align*}
\big\langle \mathcal{L}_N^{(m-1)}f\big\rangle_{\nu_\gamma^N}
=\sum_{x\in\mathbb{T}_N}\sum_{\eta\in\Omega_N}
r_N^{(m-1)}(\tau_x\eta)\sqrt{f}(\eta)\left(\nabla_{x,x+1}\sqrt{f}\right)(\eta)
\bigg(1-\frac{\nu_\gamma^N(\eta^{x,x+1})}{\nu_\gamma^N(\eta)}\bigg)
\nu_\gamma^N(\eta).
\end{align*}
Note that the previous quantity equals zero if $ \gamma(\cdot) $ is constant. Otherwise, applying Young's inequality with $ A>0 $,
\begin{equation*}
\sqrt{f}(\eta)\left(\nabla_{x,x+1}\sqrt{f}\right)(\eta)
\bigg(1-\frac{\nu_\gamma^N(\eta^{x,x+1})}{\nu_\gamma^N(\eta)}\bigg)
\leqslant
\frac{1}{2A}\abs{\left(\nabla_{x,x+1}\sqrt{f}\right)(\eta)}^2
+\frac{A}{2}
f(\eta)\bigg|1-\frac{\nu_\gamma^N(\eta^{x,x+1})}{\nu_\gamma^N(\eta)}\bigg|^2
\end{equation*}
and therefore
\begin{align*}
\big\langle \mathcal{L}_N^{(m-1)}f\big\rangle_{\nu_\gamma^N}
\leqslant
\frac{1}{2A}\Gamma_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
+
\frac{A}{2}\sum_{x\in\mathbb{T}_N}\sum_{\eta\in\Omega_N}
r_N^{(m-1)}(\tau_x\eta)\bigg|1-\frac{\nu_\gamma^N(\eta^{x,x+1})}{\nu_\gamma^N(\eta)}\bigg|^2f(\eta)\nu_\gamma^N(\eta),
\end{align*}
where $ \abs{1-\nu_\gamma^N(\eta^{x,x+1})/\nu_\gamma^N(\eta)}^2\leqslant \mathbf{c}_\gamma N^{-2} $ with $ \mathbf{c}_\gamma>0 $ for $ \gamma(\cdot) $ a Lipschitz function. From Lemma \ref{lem:up_speed} we can bound
\begin{align*}
\sum_{x\in\mathbb{T}_N}
r_N^{(m-1)}(\tau_x\eta)
\leqslant
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\sum_{x\in\mathbb{T}_N}
\mathbf{r}^{(k-1)}(\tau_x\eta)
\leqslant
2\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}(N+k-1)\lesssim N.
\end{align*}
In this way, recalling that $ f $ is a density with respect to $ \nu_\gamma^N $, we obtain the upper bound
\begin{align*}
\big\langle \mathcal{L}_N^{(m-1)}f\big\rangle_{\nu_\gamma^N}
&\leqslant
\frac{1}{2A}\Gamma_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
+
\frac{A}{2}\frac{\mathbf{c}_\gamma}{N}.
\end{align*}
Plugging this upper bound into identity \eqref{id:dir-car} with the choice $ A=1 $ we obtain
\begin{align}\label{energyup:eq0}
\mathcal{E}_N^{(m-1)}
\geqslant
\frac14\Gamma_N^{(m-1)}(\sqrt{f},\mu)
-\frac{\mathbf{c}_\gamma}{4N}.
\end{align}
To finish the proof, we see that the lower bounds in Proposition \ref{prop:low_bound_r} imply that
\begin{align*}
\Gamma_N^{(m-1)}(\sqrt{f},\mu)
\geqslant
\mathbf{1}_{m\in(1,2)}
m\left[
\delta_N\Gamma_N^{(0)}(\sqrt{f},\mu)+\frac{m-1}{2}\Gamma_N^{(1)}(\sqrt{f},\mu)
\right]
+\mathbf{1}_{m\in(0,1)}\Gamma_N^{(0)}(\sqrt{f},\mu).
\end{align*}
\end{proof}
The next two technical results are standard but will invoked in the proof of the replacement lemmas and in their applications. As such, we present them here for future reference.
\begin{Lemma}\label{lem:change}
Consider $ x,y\in\mathbb{T} $ and let $ \varphi:[0,T]\times\Omega_N\to\mathbb{R} $ be invariant for the map $ \eta\mapsto\eta^{x,y} $. Moreover, consider the measure $ \nu_{\gamma}^N $ with $ \gamma(\cdot)\in(0,1) $ a constant function and let $ f:\Omega_N\to\mathbb{R} $. For all $ s\in[0,T] $ it holds that
\begin{align*}
\int_{\Omega_N}\varphi(s,\eta)(\eta(x)-\eta(y))f(\eta)\nu_{\gamma}^N(\mathrm{d}\eta)
=\frac12\int_{\Omega_N}\varphi(s,\eta)(\eta(y)-\eta(x))(f(\eta^{x,y})-f(\eta))\nu_{\gamma}^N(\mathrm{d}\eta)
\end{align*}
\end{Lemma}
\begin{proof}
Summing and subtracting the appropriate term we have
\begin{align*}
\int_{\Omega_N}\varphi(s,\eta)(\eta(x)-\eta(y))f(\eta)\nu_{\gamma}^N(\mathrm{d}\eta)
&=\frac12 \int_{\Omega_N}\varphi(s,\eta)(\eta(x)-\eta(y))(f(\eta)-f(\eta^{x,y}))\nu_{\gamma}^N(\mathrm{d}\eta)
\\
&+\frac12 \int_{\Omega_N}\varphi(s,\eta)(\eta(x)-\eta(y))(f(\eta)+f(\eta^{x,y}))\nu_{\gamma}^N(\mathrm{d}\eta).
\end{align*}
To see that the second term in the right-hand side equals zero, simply note that performing the change of variables $ \eta\mapsto\eta^{x,y} $ and using that $ \varphi(s,\eta^{x,y})=\varphi(s,\eta) $ and $ \nu_\gamma^N(\eta^{x,y})=\nu_\gamma^N(\eta) $ we obtain
\begin{align*}
\int_{\Omega_N}\varphi(s,\eta)(\eta(x)-\eta(y))f(\eta^{x,y})\nu_{\gamma}^N(\mathrm{d}\eta)
&=-\int_{\Omega_N}\varphi(s,\eta)(\eta(x)-\eta(y))f(\eta)\nu_{\gamma}^N(\mathrm{d}\eta).
\end{align*}
\end{proof}
The next proposition is applied in the second term in \eqref{aldous0} and in \eqref{eq:h_prob}
\begin{Prop}\cite[Lemma 4.3.2]{phd:pedro}.\label{prop:pedro}
Assume there exists a family $ \mathcal{F} $ of functions $ F_{N,\epsilon}:[0,T]\times \mathcal{D}([0,T],\Omega)\to\mathbb{R} $ satisfying
\begin{align*}
\sup_{\substack{\epsilon\in(0,1),N\geqslant1\\s\in[0,T],\eta\in\mathcal{D}([0,T],\Omega)}}
\abs{F_{N,\epsilon}(s,\eta)}\leqslant M<\infty.
\end{align*}
Above, the interval for (0,1) for $ \epsilon $ is arbitrary. We also assume that for all $ t\in[0,T] $,
\begin{align*}
\limsup_{\epsilon\to0^+}\limsup_{N\to+\infty}
\mathbb{E}_{\mu_N}
\left[
\abs{
\int_0^tF_{N,\epsilon}(s,\eta_s)\mathrm{d} s
}
\right]=0.
\end{align*}
Then we have for all $ \delta>0 $,
\begin{align*}
\limsup_{\epsilon\to0^+}\limsup_{N\to+\infty}\mathbb{P}_{\mu_N}
\left(
\sup_{t\in[0,T]}
\abs{
\int_0^t F_{N,\epsilon}(s,\eta_s)\mathrm{d} s
}
>\delta
\right)=0.
\end{align*}
\end{Prop}
\subsection{Replacement Lemmas for \texorpdfstring{$m\in(1,2)$}.}
\begin{Lemma}\label{lem:rep_shift}
Consider $ x,y\in\mathbb{T}_N $. Let $ \varphi:[0,T]\times\Omega_N\to\mathbb{R} $ be such that $ \norm{\varphi}_{L^\infty([0,T]\times\Omega_N)}\leqslant c_\varphi<\infty $ and invariant for the map $ \eta\mapsto\eta^{z,z+1} $ with $ z\in\llbracket x,y-1\rrbracket $. Then for all $ B>0 $ and for all $ t\in[0,T] $
\begin{align*}
\mathbb{E}_{\mu_N}
\left[\abs{
\int_0^t
\varphi(s,\eta_{N^2s})
(
\eta_{N^2s}(x)-\eta_{N^2s}(y)
)
\mathrm{d} s} \right]
\lesssim
\frac{1}{B}
+
TB\abs{y-x}\frac{(\ell_N)^{m-1}}{N}
.
\end{align*}
\end{Lemma}
\begin{proof}
From the entropy inequality (see \cite[Appendix 1, Chapter 8]{KL:book}) with $ \nu_\gamma^N $ as reference measure and Feynman Kac's formula (see \cite[page 14]{BMNS} for instance), we bound the previous expectation from above by
\begin{align}\label{rep0:var0}
\frac{c_\gamma}{B}
+\int_0^T
\sup_{f
}
\left\{
\abs{
\inner{
\varphi(s,\eta)
(
\eta(x)-\eta(y)
)
,f}_{\nu_\gamma^N}
}
-\frac{N}{B}\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}
\mathrm{d} s,
\end{align}
where the supremum is over densities with respect to $ \nu_\gamma^N $. Rewriting $ \eta(x)-\eta(y)=\sum_{z=x}^{y-1}\eta(z)-\eta(z+1) $, from Lemma \ref{lem:change} the first term inside the supremum in \eqref{rep0:var0} can be rewritten as
\begin{align}\label{rep0:eq0}
\frac12\int_{\Omega_N}\sum_{z=x}^{y-1}
\varphi(s,\eta)
(\eta(z)-\eta(z+1))
(f(\eta)-f(\eta^{z,z+1}))
\nu_\gamma^N(\mathrm{d}\eta).
\end{align}
From Young's inequality we bound this from above by $ c_\varphi $ times
\begin{align*}
\frac{1}{4 A}\int_{\Omega_N} & \sum_{z=x}^{y-1}
\left(\sqrt{f}(\eta^{z,z+1})+\sqrt{f}(\eta)\right)^2
\nu_\gamma^N(\mathrm{d}\eta)
+
\frac{A}{4}\int_{\Omega_N}\sum_{z=x}^{y-1}
\left(\nabla_{z,z+1}\sqrt{f}(\eta)\right)^2
\nu_\gamma^N(\mathrm{d}\eta)
\\
& \leqslant
\frac{\abs{y-x}}{2A}
+
\frac{A}{2}\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N),
\end{align*}
where we performed a change of variables on the first term. Summarizing, applying Proposition \ref{prop:energy} on \eqref{rep0:var0} we bound \eqref{rep0:var0} from above by
\begin{align*}
\frac{c_\gamma}{B}
+T
\left(
\frac12 \Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N)
\left(\frac{c_\varphi}{4}A
-\frac{N}{B}\delta_N
\right)
+c_\varphi
\frac{\abs{y-x}}{2A}
\right).
\end{align*}
Fixing $ A=4N\delta_N/c_\varphi B $ and recalling from Proposition \ref{prop:low_bound_r} that $ 0<\delta_N=\sum_{k\geqslant\ell_N}\abs{\binom{m-1}{k}}\lesssim (\ell_N)^{-(m-1)} $, the proof is concluded.
\end{proof}
\begin{Cor}\label{cor:rep_ell_box}
Fixed $ N $, for any $ i\in\mathbb{N}_+ $ and $ L\in\mathbb{N}_+ $ such that $ L<N $, let $ \varphi:[0,T]\times\Omega_N\to\mathbb{R} $ be {such that} $ \norm{\varphi}_{L^\infty([0,T]\times\Omega_N)}\leqslant c_\varphi<\infty $ and invariant for the map $ \eta\mapsto\eta^{z,z+1} $ with $ z\in\llbracket iL,(i+1)L-2\rrbracket $.
Then, for all $ B>0 $ and for all $ t\in[0,T] $ it holds
\begin{align*}
\mathbb{E}_{\mu_N}
\left[
\abs{
\int_0^t
\varphi(s,\eta_{N^2s})
\left(\eta_{N^2s}(i L)-\eta_{N^2s}^L(iL)\right)
\mathrm{d} s }
\right]
\lesssim
\frac1B
+TB\frac{(L+1)(\ell_N)^{m-1}}{N}
.
\end{align*}
\end{Cor}
\begin{proof}
Observing that $ \eta(0)-\eta^L(0)=\frac{1}{L}\sum_{y\in \Lambda_0^L}\left(\eta(0)-\eta(y)\right) $, from Lemma \ref{lem:rep_shift} we can bound from above the expectation in the statement of the corollary by a constant times
\begin{align*}
\frac{1}{L}\sum_{y\in \Lambda_0^L}
\frac{1}{B}
+TBy\frac{(\ell_N)^{m-1}}{N}
\lesssim
\frac1B
+TB\frac{(L+1)(\ell_N)^{m-1}}{N}.
\end{align*}
\end{proof}
Let us now state the two-blocks estimate:
\begin{Lemma}\label{lem:rep_boxes}
Fix $ \epsilon>0 $ and $ N\in\mathbb{N} $. For $ i\in\mathbb{N} $ and $ L<\epsilon N $ fixed, let $ \varphi:[0,T]\times\Omega_N\to\mathbb{R}_+ $ be such that $ \norm{\varphi}_{L^\infty([0,T]\times\Omega_N)}\leqslant c_\varphi<\infty $ and invariant for the map $ \eta\mapsto\eta^{z,z+1} $ with $ z\in\llbracket iL,iL+\floor{N\epsilon}-1\rrbracket $. Then for all $ B>0 $ and for all $ t\in[0,T] $ it holds that
\begin{align*}
\mathbb{E}_{\mu_N}
\left[
\abs{
\int_0^t
\varphi(s,\eta_{N^2s})
\left(
\eta_{N^2s}^{\floor{N\epsilon}}(i\floor{N\epsilon})
-\eta_{N^2s}^{L}(iL)
\right)
\mathrm{d} s }
\right]
\lesssim
\frac{1}{B}
+T
\left[
\frac{1}{L}
+B
\bigg(\frac{L(\ell_N)^{m-1}}{N}
+i
\frac{L}{N}
+
\epsilon(i+1)
\bigg)
\right].
\end{align*}
\end{Lemma}
Before proving this lemma, let us comment on the proof: we will follow closely the path argument in \cite{BDGN}, although with some warm up before its application and some minor adjustments. Although for $ m\in(1,2) $ the state-space is irreducible, the exclusion rates are not fast enough to travel along $ \floor{N\epsilon} $--distances for every configuration, which would avoid the use of the path argument below (as it is the case for $ m\in(0,1) $). A simple way to see this quantitatively is to take $ \abs{y-x}=\epsilon N $ in Lemma \ref{lem:rep_shift}. The main reason for the resulting blow up is that the rate decreases as $ \inf\{k\in\llbracket1,\ell_N\rrbracket : \mathbf{c}^{(k)}(\overline{\eta})=0\} $ increases, and so for certain configurations the jumping rate can be as small as $ \delta_N\lesssim (\ell_N)^{-(m-1)} $ (see Proposition \ref{prop:low_bound_r}).
In order not to use the path argument we would need to replace the lower bound \eqref{dir:bound} by $ \kappa\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N) $, for some constant $ \kappa>0 $ independent of $ N $, which cannot be done because there is no such constant such that $ \inf_{\eta\in\Omega_N} c_N^{(m-1)}(\eta)\geqslant \kappa $.
Then again, we cannot relate the function inside the expectation in the statement of the two-blocks estimate solely with $ \Gamma_N^{(1)} $, since this would require an initial shuffling of the configuration in order to move the particles with the PMM(1), hence there is the need to compare it with a SSEP term as well. In this way, we are restricted to finding some useful lower bound, such as \eqref{dir:bound}. This introduces a second issue: in \cite{BDGN}, the replacement scheme relies on the treatment of
\begin{align*}
\mathbb{E}_{\mu_N}
\left[\abs{\int_0^t
\eta_{N^2s}^L(-L)
\left(
\eta_{N^2s}^{\floor{N\epsilon}}(0)-\eta_{N^2s}^{L}(0)
\right)\mathrm{d} s}\right],
\end{align*}
analogously to \eqref{step3}. There, the authors start by conditioning on the number of particles in $ \eta_{N^2s}^L(-L) $, which allows them to introduce the PMM($ 1 $) rates via Young's inequality. In our case however, we have $ \overline{\eta}_{N^2s}^L(0) $, meaning that we must condition on the number of \textit{holes} instead. Controlling the holes does not allow us to introduce the PMM($ 1 $) rates, but the $ \overline{\text{PMM}}( 1 ) $ rates instead, which are incompatible with the lower bound \eqref{dir:bound}. To avoid this, one could distribute the products of empirical averages in \eqref{step3}, but doing so would necessarily lead to restrictions on the explosion rate of $ \ell_N $. The simple workaround is to replace \textit{directly} $ \eta_{N^2s}^{\floor{N\epsilon}}(0) $ by $ \eta_{N^2s}^{L}(0) $ with the conditioning happening inside either the $ \floor{N\epsilon}$ or $ L $--boxes and not outside, and at the final step of the proof invoke Proposition \ref{prop:energy}.
Let us now go into the proof.
\begin{proof}[Proof of Lemma \ref{lem:rep_boxes}]
Analogously to the previous replacement lemmas, the expectation in the statement of the lemma can be estimated by some constant times
\begin{align}\label{rep_boxes_eq0}
\frac{c_\gamma}{B}
+\int_0^t
\sup_{f
}
\left\{
\abs{
\big\langle
\varphi(s,\eta)
\left(
\eta^{\floor{N\epsilon}}(i\floor{N\epsilon})
-\eta^{L}(iL)
\right)
,f\big\rangle_{\nu_\gamma^N}
}
-\frac{N}{B}\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}
\mathrm{d} s,
\end{align}
where the supremum is over densities with respect to $ \nu_\gamma^N $ and $ \gamma(\cdot)\in(0,1) $ is a constant function. Now we break the box $ \Lambda^{\floor{N\epsilon}}_0 $ into $ K $ smaller $ L-$sized boxes:
\begin{align*}
\llbracket 0,\floor{N\epsilon}-1\rrbracket
=
\llbracket 0,KL-1\rrbracket
=\bigcup_{j=1}^{K}\llbracket (j-1)L,jL-1\rrbracket
,\qquad K=\frac{\floor{N\epsilon}}{L},
\end{align*}
leading to
\begin{align*}
\eta^{\floor{N\epsilon}}(i\floor{N\epsilon})
-\eta^{L}(iL)
=\frac{1}{K}\sum_{j=1}^{K}
\left(
\eta^{L}(i\floor{N\epsilon}+(j-1)L)
-\eta^{L}(iL)
\right).
\end{align*}
Note that we can do this only if $ \floor{N\epsilon}>L $, which is the case given that $ L/N<\epsilon $. Moreover, $ K $ might not be an integer. Nevertheless, since for any bounded function $ \psi:\Omega_N\to\mathbb{R} $ we have
\begin{align}
\frac{1}{\ceil{K}}\sum_{j=1}^{\ceil{K}}\psi(\tau_j\eta)
-
\frac{1}{\floor{K}}\sum_{j=1}^{\floor{K}}\psi(\tau_j\eta)
\lesssim
\frac{1}{\ceil{K}}+\left(1-\frac{\floor{K}}{\ceil{K}}\right)\xrightarrow{K\to+\infty}0
\end{align}
we proceed as if $ K\in\mathbb{N}_+ $.
For each $ j\in\{1,\dots,K\} $ consider the event
\begin{align*}
X_j=
\left\{
\eta\in\Omega_N :
\eta^L (iL)\geqslant \frac{3}{L}
\right\}
\bigcup
\left\{
\eta\in\Omega_N :
\eta^L (i\floor{N\epsilon}+(j-1)L)\geqslant \frac{3}{L}
\right\},
\end{align*}
meaning that there are at least $ 3 $ particles in at least one of the boxes
\begin{align*}
\Lambda_{iL}^L=\llbracket iL,(i+1)L-1\rrbracket
\quad\text{or}\quad
\Lambda_{i\floor{N\epsilon}+(j-1)L}^L=\llbracket i\floor{N\epsilon}+(j-1)L,i\floor{N\epsilon}+jL-1\rrbracket.
\end{align*}
The integral, over $ (X_j)^c $, of the first term in the variational formula \eqref{rep_boxes_eq0} is of order $ L^{-1} $, therefore we can bound from above the first term in the aforementioned variational formula by a term of order $ L^{-1} $ plus
\begin{align}\label{rep_boxes_eq1}
\frac{1}{2KL}
\sum_{j=1}^K
\sum_{z\in \Lambda^L}
\left|
\int_{X_j}
\varphi(s,\eta)
\left(\eta(z+i\floor{N\epsilon}+(j-1)L)-\eta(z+iL)\right)
\left(f(\eta)-f(\eta^{z+i\floor{N\epsilon}+(j-1)L,z+iL})\right)
\nu_\gamma^N(\mathrm{d}\eta)
\right|,
\end{align}
where we used Lemma \ref{lem:change}. To estimate the quantity in the previous display, we use a path argument in the same spirit as in \cite[Lemma 5.8]{BDGN}, we claim that we can decompose
\begin{align}\label{path}
f(\eta)-f(\eta^{z+i\floor{N\epsilon}+(j-1)L,z+iL})
=\sum_{n\in J^{\text{PMM}(0)}}\left(f(\eta^{(n-1)})-f(\eta^{(n)})\right)
+\sum_{n\in J^{\text{PMM}(1)}}\left(f(\eta^{(n-1)})-f(\eta^{(n)})\right)
\end{align}
where
\begin{itemize}
\item $ \eta^{(0)}=\eta,\; \eta^{(n+1)}=(\eta^{(n)})^{x(n),x(n)+1} $;
\item $ \left(x(n)\right)_{n=0,\dots,N(x_1)} $ is a sequence of moves (following the procedure to be described shortly) taking values in the set $ \{x_1,\dots,z+i\floor{N\epsilon}+(j-1)L\} $, with $ N(x_1) $ the number of nodes we have to exchange;
\item $ J^{\text{PMM}(0)},J^{\text{PMM}(1)} $ are the sets of indexes that count the nodes used with the PMM($ 0 $) and PMM($ 1 $) dynamics, respectively, and are such that
\begin{align*}
\big|J^{\text{PMM}(0)}\big|\leqslant J_0 L
\quad\text{and}\quad
\big|J^{\text{PMM}(1)}\big|\leqslant J_1( iL+jL+ i\floor{N\epsilon})
\end{align*}
for some finite constants $ J_0,J_1>0 $;
\item for each $ n\in J^{\text{PMM}(1)} $ we have $ \mathbf{c}^{(1)}(\tau_{x(n-1)}\eta^{(n-1)})>0 $.
\end{itemize}
Assuming all this, for $ i\in\{0,1\} $ and $ j\in\{1,\dots,K\} $ we have that
\begin{align}
\begin{split}\label{path1}
\sum_{n\in J^{\text{PMM}(i)}}\int_{X_j}&\abs{f(\eta^{(n-1)})-f(\eta^{(n)})}\nu_\gamma^N(\mathrm{d}\eta)
\\
&=\sum_{n\in J^{\text{PMM}(i)}}\int_{X_j}
\abs{\sqrt{f}(\eta^{(n-1)})-\sqrt{f}(\eta^{(n)})}
\abs{\sqrt{f}(\eta^{(n-1)})+\sqrt{f}(\eta^{(n)})}
\nu_\gamma^N(\mathrm{d}\eta)
\\
&\leqslant
\frac{A_i}{2}\sum_{n\in J^{\text{PMM}(i)}}\int_{X_j}
\mathbf{r}^{(i)}(\tau_{x(n-1)}\eta^{(n-1)})\abs{\sqrt{f}(\eta^{(n-1)})-\sqrt{f}(\eta^{(n)})}^2
\nu_\gamma^N(\mathrm{d}\eta)
\\
&\quad +\frac{1}{2A_i}\sum_{n\in J^{\text{PMM}(i)}}\int_{X_j}
\frac{1}{\mathbf{r}^{(i)}(\tau_{x(n-1)}\eta^{(n-1)})}\abs{\sqrt{f}(\eta^{(n-1)})+\sqrt{f}(\eta^{(n)})}^2
\nu_\gamma^N(\mathrm{d}\eta)
\end{split}
\end{align}
for any $ A_i>0 $. The inequality requires some justification. Fix some $ n\in J^{\text{PMM}(0)}\cup J^{\text{PMM}(1)} $ and let us write $ \xi=\eta^{(n-1)} $. Then
\begin{align*}
f(\eta^{(n-1)})-f(\eta^{(n)})
&=\mathbf{1}_{\{\eta\in\Omega_N:\;\eta_{x(n-1)}+\eta_{x(n-1)+1}=1\}}(\xi)
\left(
f(\xi)-f(\xi^{x(n-1),x(n-1)+1})
\right)
\\
&=\mathbf{r}^{(0)}(\tau_{x(n-1)}\xi)
\left(
f(\xi)-f(\xi^{x(n-1),x(n-1)+1})
\right).
\end{align*}
If $ n\in J^{\text{PMM}(0)} $ we are done. Otherwise, since $ n\in J^{\text{PMM}(1)} $, we have $ \xi_{x(n-1)-1}+\xi_{x(n-1)+2}>0 $. Consider the set $ \Omega_x^{(1)}=\{\eta\in\Omega_N:\;\mathbf{c}^{(1)}(\tau_x\eta)>0\} $. Then $ f(\xi)=f(\xi)\mathbf{1}_{\{\Omega_{x(n-1)}^{(2)}\}}(\xi) $, and since the constraints are independent of the occupation at the sites $ x(n-1),x(n-1)+1 $ we also have that $ \xi^{x(n-1),x(n-1)+1}\in\Omega_{x(n-1)}^{(1)} $. As such,
\begin{align*}
f(\eta^{(n-1)})-f(\eta^{(n)})
=
\mathbf{r}^{(0)}(\tau_{x(n-1)}\xi)
\left(
f(\xi)\mathbf{1}_{\Omega_{x(n-1)}^{(1)}}(\xi)
-f(\xi^{x(n-1),x(n-1)+1})
\mathbf{1}_{\Omega_{x(n-1)}^{(1)}}(\xi^{x(n-1),x(n-1)+1})\right).
\end{align*}
And since the change of variables $ \xi\mapsto \xi^{x(n-1),x(n-1)+1}\in\Omega_{x(n-1)}^{(1)}$ is a bijection of $\Omega_{x(n-1)}^{(1)} $, we conclude that
\begin{align*}
f(\eta^{(n-1)})-f(\eta^{(n)})
&=
\mathbf{1}_{\Omega_{x(n-1)}^{(1)}}(\xi)\mathbf{r}^{(0)}(\tau_{x(n-1)}\xi)
\left(
f(\xi)
-f(\xi^{x(n-1),x(n-1)+1})
\right)
\end{align*}
and the rates for the PMM($ 1 $) can be introduced by using Young's inequality.
We treat the integral on the first term on the right-hand side of \eqref{path1}. Recall that $ \xi=\eta^{(n-1)} $. For $ i\in\{0,1\} $ and $ j\in\{1,\dots,K\} $,
\begin{multline*}
\sum_{\eta\in X_j}
\mathbf{r}^{(i)}(\tau_{x(n-1)}\xi)\abs{\sqrt{f}(\xi)-\sqrt{f}(\xi^{x(n-1),x(n-1)+1})}^2
\nu_\gamma^N(\eta)
\\
\leqslant
\sum_{\eta\in \Omega_N}
\mathbf{r}^{(i)}(\tau_{x(n-1)}\xi)\abs{\sqrt{f}(\xi)-\sqrt{f}(\xi^{x(n-1),x(n-1)+1})}^2
\nu_\gamma^N(\xi)
.
\end{multline*}
Since $ \eta\in\Omega_N\Leftrightarrow\xi=\eta^{(n-1)}\in\Omega_N $, rearranging the first summation in the previous display and relabelling the terms yields
\begin{align*}
\sum_{\eta\in \Omega_N}
\mathbf{r}^{(i)}(\tau_{x(n-1)}\eta)\abs{\sqrt{f}(\eta)-\sqrt{f}(\eta^{x(n-1),x(n-1)+1})}^2
\nu_\gamma^N(\eta).
\end{align*}
Consequently, the first term on the right-hand side of \eqref{path1} can be bounded from above by $ A_i\Gamma_N^{(i)}(\sqrt{f},\nu_\gamma^N) $, while the second can be bounded from above by
\begin{align*}
\frac{1}{A_i}\sum_{n\in J^{\text{PMM}(i)}}\int_{\Omega_N}
\left(f(\eta^{(n-1)})+f(\eta^{(n)})\right)
\nu_\gamma^N(\mathrm{d}\eta)
=\frac{2}{A_i}\abs{J^{\text{PMM}(i)}}.
\end{align*}
In this way, \eqref{rep_boxes_eq1} is no larger than
\begin{align*}
\frac12 A_0\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N)
+\frac12 A_1\Gamma_N^{(1)}(\sqrt{f},\nu_\gamma^N)
+
J_0\frac{L}{A_0}
+J_1
\left(\frac{iL+i\floor{N\epsilon}}{A_1}
+\frac{KL}{A_1}
\right).
\end{align*}
Recalling Proposition \ref{prop:energy}, the quantity \eqref{rep_boxes_eq0} is overestimated by
\begin{multline*}
\frac{c_\gamma}{B}+T
\sup_f
\left\{
3\frac{c_\varphi}{L}
+J_0\frac{L}{A_0}
+J_1
\frac{iL+(i+1)\floor{N\epsilon}}{A_1}
\right.\\\left.
+
\frac12\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N)
\left(
A_0-\frac{N}{B}\frac{m}{2}\delta_N
\right)
+\frac12\Gamma_N^{(1)}(\sqrt{f},\nu_\gamma^N)
\left(
A_1-\frac{N}{B}\frac{m-1}{4}
\right)
\right\},
\end{multline*}
where we recall that $ KL=\floor{N\epsilon} $. Setting
\begin{align*}
A_0=\delta_N\frac{N}{B}\frac{m}{2}
\quad\text{and}\quad
A_1=\frac{N}{B}\frac{m-1}{4}
\end{align*}
we obtain an upper bound of the order of
\begin{align*}
\frac{1}{B}
+T
\left[
\frac{1}{L}
+B
\bigg(\frac{L(\ell_N)^{m-1}}{N}
+i
\frac{L}{N}
+
\epsilon(i+1)
\bigg)
\right].
\end{align*}
Now we prove our claim with the path argument.
The goal is to exchange the occupation variables of the sites
\begin{align*}
z_{i,\epsilon,L}:=z+i\floor{N\epsilon}+(j-1)L \quad\text{and}\quad z_{i,L}:=z+iL,
\qquad
z\in \llbracket 0\;, L-1 \rrbracket.
\end{align*}
Recall that there are at least three particles either in $ \Lambda^{L}_{iL} $ or in $ \Lambda_{i\floor{N\epsilon}+(j-1)L}^L $. We outline the argument only for the case of at least three particles in $ \Lambda^{L}_{iL} $ since the other one is analogous and leads to an equivalent estimate. It is sufficient to consider configurations in \eqref{rep_boxes_eq1} such that $ \eta(z_{i,\epsilon,L})+\eta(z_{i,L})=1 $. The decomposition \eqref{path} illustrates a path on the state-space starting from the configuration $ \eta $ and ending at $ \eta^{z_{i,\epsilon,L},z_{i,L}} $. Note that we can consider without loss of generalization that $ \eta(z_{i,L})=1 $, since if $ \eta(z_{i,L})=0 $ then we construct an analogous path starting from $ \eta^{z_{i,\epsilon,L},z_{i,L}} $ and ending at $ \eta $.
Recall that a \textit{mobile cluster} with respect to the PMM($ 1 $) is a local configuration which can be translated on the lattice by a sequence of jumps dictated by the PMM($ 1 $). For example, the smallest mobile cluster for the PMM($ 1 $) corresponds to a local configuration where $ \eta(x)+\eta(x+1)+\eta(x+2)=2 $, for some $ x\in\mathbb{T}_N $.
Since $ \eta(z+iL)=1 $, there are at least two other particles in $ \Lambda_{iL}^L $. Pick the two closest to the site $ z+iL $ and label them as $ P_1 $ and $ P_2 $. Let us also denote the particle at site $ z_{i,L} $ by $ P_{z_{i,L}} $. We use the SSEP dynamics to move $ P_1 $ and $ P_2 $ to the vicinity of $ P_{z_{i,L}} $, forming a "mobile cluster". This can be done with a number of steps of order $ L $. We arrive at one of the following three local configurations.
Note that we still need an empty site in the vicinity of these three particles to construct a mobile cluster. Nevertheless, if this is not the case we can assume that they are part of a larger mobile cluster. Moreover, we can relabel the particles and use the SSEP dynamics to have the local configuration (for example) as in the first case of the previous figure. Now we move this mobile cluster to the left of the (empty) site $ z_{i,\epsilon,L} $ with the PMM($ 1 $) dynamics.
The number of steps can be crudely bounded above by a term of order $ L+(i\floor{N\epsilon}+(j-1)L) $.
By hypothesis, $ \eta(z_{i,\epsilon,L})=0 $ and so we leave $ P_{z_{i,L}} $ at site $ z_{i,\epsilon,L} $ using either the SSEP or the PMM($ 1 $) dynamics, and transport the hole to the site $ z_{i,L} $ with the PMM($ 1 $) dynamics.
If the site to the left of $ P_1 $ is either empty or occupied, we can perform the following transport with either the PMM($ 1 $) or a relabelling in the last step.
If the aforementioned site was occupied, we can exchange the hole and the particle at site $ z+i\floor{N\epsilon}+(j-1)L-4 $ with the PMM($ 1 $) dynamics, otherwise there is nothing to do and we relabel the hole, obtaining
This procedure is repeated at most an order of $ L+(i\floor{N\epsilon}+(j-1)L) $ steps, moving the mobile cluster to the vicinity of the site $ z_{i,L} $. The SSEP dynamics is then used to shuffle the configuration restricted to the box $ \Lambda_{iL}^L $, moving $ P_1 $ and $ P_2 $ to their original sites with a cost of at most an order of $ L $ steps.
\end{proof}
\subsection{Replacement Lemmas for \texorpdfstring{$m\in(0,1)$}.}
{\begin{Lemma}\label{lem:rep_FDM-tight}
For each $ n,k\in\mathbb{N}_+ $ such that $ n\leqslant k $, let $ \varphi_n^{(k)}:[0,T]\times\Omega_N\to\mathbb{R} $ be invariant for the map $ \eta\mapsto\eta^{n,n+1} $ and such that for $ \eta\in\Omega_N $ and every $ t\in [0,T] $
\begin{align*}
\varphi_n^{(k)}(t,\eta)\leqslant M(t)\mathbf{c}^{(k)}(\tau_n\eta)
\end{align*}
where $ M:[0,T]\to\mathbb{R}_+$ is uniformly bounded by some constant $ M>0 $. Then for all $ B>0 $ and $ \mathcal{T}\subseteq[0,T] $ it holds that
\begin{align*}
\mathbb{E}_{\mu_N}
\left[
\abs{
\int_{\mathcal{T}}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\sum_{n=0}^{k}
(\overline{\eta}_{N^2s}(n)-\overline{\eta}_{N^2s}(n+1))
\varphi_n^{(k-1)}(s,\overline{\eta}_{N^2s})
\mathrm{d} s
} \right]
\lesssim
\frac{1}{B}
+
\abs{\mathcal{T}}
B\frac{(\ell_N)^{1-m}}{N}
.
\end{align*}
\end{Lemma}
}\begin{proof}
Proceeding as previously, we have to estimate
\begin{align}\label{rep:FDM_var}
\frac{c_\gamma}{B}
+\int_{\mathcal{T}}
\sup_{f
}
\left\{
\abs{
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}\sum_{n=0}^{k}\inner{\varphi_n^{(k-1)}(s,\overline{\eta})
(\overline{\eta}(n)-\overline{\eta}(n+1))
,f}_{\nu_\gamma^N}
}
-\frac{N}{B}\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}
\mathrm{d} s.
\end{align}
By the hypothesis on $ \varphi_n^{(k-1)} $ and Young's inequality we can bound from above the summation over $ n $ by:
\begin{multline*}
\frac{M}{4A}\int_{\Omega_N}
\sum_{n=0}^{k}
\abs{\eta(n)-\eta(n+1)}\mathbf{c}^{(k-1)}(\tau_n\overline{\eta})
\left(\sqrt{f}(\eta^{n,n+1})+\sqrt{f}(\eta)\right)^2
\nu_\gamma^N(\mathrm{d}\eta)
\\
+
\frac{AM}{4}\int_{\Omega_N}
\sum_{n=0}^{k}
\abs{\eta(n)-\eta(n+1)}\mathbf{c}^{(k-1)}(\tau_n\overline{\eta})
\left(\nabla_{n,n+1}\sqrt{f}(\eta)\right)^2
\nu_\gamma^N(\mathrm{d}\eta)
.
\end{multline*}
Taking the binomial coefficients into consideration, and since $ \abs{\eta(n)-\eta(n+1)}=\mathbf{a}(\tau_n\eta), $ which is the exclusion constraint, we bound from above
\begin{align*}
\int_{\Omega_N}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\sum_{n=0}^{k}
\abs{\eta(n)-\eta(n+1)}\mathbf{c}^{(k-1)}(\tau_n\overline{\eta})
\left(\nabla_{n,n+1}\sqrt{f}(\eta)\right)^2
\nu_\gamma^N(\mathrm{d}\eta)
\leqslant
\Gamma_N^{(m-1)}(\sqrt{f},\nu_{\gamma}^N).
\end{align*}
We have the following upper bounds
\begin{align*}
\int_{\Omega_N}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
& \sum_{n=0}^{k}
\abs{\eta(n)-\eta(n+1)}\mathbf{c}^{(k-1)}(\tau_n\overline{\eta})
\left(\sqrt{f}(\eta^{n,n+1})+\sqrt{f}(\eta)\right)^2
\nu_\gamma^N(\mathrm{d}\eta)
\\
& \leqslant
2\int_{\Omega_N}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\sum_{n=0}^{k}
\abs{\eta(n)-\eta(n+1)}\mathbf{c}^{(k-1)}(\tau_n\overline{\eta})
\left(f(\eta^{n,n+1})+f(\eta)\right)
\nu_\gamma^N(\mathrm{d}\eta)
\\
& \leqslant
4\int_{\Omega_N}
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}
\left(
\sum_{n=0}^{k}
\abs{\eta(n)-\eta(n+1)}\mathbf{c}^{(k-1)}(\tau_n\overline{\eta})
\right)
f(\eta)
\nu_\gamma^N(\mathrm{d}\eta)
\\
& \leqslant
8M
\sum_{k=1}^{\ell_N}\abs{\binom{m}{k}}k
\int_{\Omega_N}
f(\eta)
\nu_\gamma^N(\mathrm{d}\eta)
\leqslant
8M (\ell_N)^{1-m}.
\end{align*}
The previous inequalities follow, respectively, from Young's inequality, Lemma \ref{lem:change}, Lemma \ref{lem:up_speed}, the fact of $ f $ being a density and then Lemma \ref{lem:bin_bound} and an integral comparison. With all this, we obtain the following estimate for \eqref{rep:FDM_var}
\begin{align*}
\frac{c_\gamma}{B}
+\abs{\mathcal{T}}
\left(
2M\frac{(\ell_N)^{1-m}}{A}
+\Gamma^{(m-1)}_N(\sqrt{f},\nu_{\gamma}^N)
\left(
\frac14AM-\frac{N}{B}
\right)
\right).
\end{align*}
Fixing $ A=4N/BM $ concludes the proof.
\end{proof}
\begin{Lemma}\label{lem:rep_FDM}
Consider $ x,y\in\mathbb{T}_N $. Let $ \varphi:[0,T]\times\Omega_N\to\mathbb{R} $ such that $ \norm{\varphi}_{L^\infty([0,T]\times\Omega_N)}<\infty $ and invariant for the map $ \eta\mapsto\eta^{z,z+1} $ with $ z\in\llbracket x,y-1\rrbracket $. Then, for all $ B>0 $ and for all $ t\in[0,T] $ it holds
\begin{align*}
\mathbb{E}_{\mu_N}
\left[\abs{
\int_0^t
\varphi(s,\eta_{N^2s})
(
\eta_{N^2s}(x)-\eta_{N^2s}(y)
)
\mathrm{d} s} \right]
\lesssim
\frac{1}{B}
+
T
B\frac{\abs{y-x}}{N}
.
\end{align*}
\end{Lemma}
\begin{proof}
Repeating the computations in the proof of Lemma \ref{lem:rep_shift}, there exist constants $ c_0,c_1,c_2>0 $ such that we can overestimate the expectation by
\begin{align*}
\frac{c_0}{B}
+T\sup_{f}
\left\{
c_1A\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N) + c_2\frac{\abs{y-x}}{A}
-\frac{N}{B}\mathcal{E}^{(m-1)}_N(\sqrt{f},\nu_\gamma^N)
\right\}.
\end{align*}
Recalling the lower bound for the Dirichlet form in Proposition \ref{prop:energy} we can choose $ A=mN/c_1B $.
\end{proof}
\section{Energy Estimate}\label{sec:energy}
We recall some classical results that we will invoke throughout this section.
Let $ \mathcal{H} $ be a Hilbert space with corresponding norm $ \norm{\cdot}_{\mathcal{H}} $ and $ \mathcal{f}f:\mathcal{H}\to\mathbb{R} $ a linear functional. The (dual) norm of the linear functional $ f $ is defined as
\begin{align*}
\nnorm{f}=\sup_{\norm{x}_{\mathcal{H}}\leqslant1,x\in\mathcal{H}}\abs{f(x)}.
\end{align*}
We know that (see for instance \cite[Proposition A.1.1.]{phd:adriana}) if there exists $ K_0>0 $ and
a positive real number $ \kappa $ such that
$
\sup_{x\in\mathcal{H}}
\left\{
f(x)-\kappa \norm{x}_{\mathcal{H}}^2
\right\}
\leqslant K_0,
$
then $ f $ is bounded.
Let us now introduce:
\begin{Def}
Let $ L^2([0,T]\times \mathbb{T}) $ be the (Hilbert) space of measurable functions $ G:[0,T]\times \mathbb{T}\to\mathbb{R} $ such that
\begin{align*}
\int_0^T\norm{G_s}_{L^2(\mathbb{T})}^2\mathrm{d} s<\infty,
\end{align*}
endowed with the scalar product $ \llangle G,H\rrangle $ defined by
\begin{align*}
\llangle G,H\rrangle=\int_0^T\inner{G_s,H_s}\mathrm{d} s.
\end{align*}
For any $ r\in\mathbb{R}_+ $ fixed, define the linear functional $ \ell^{(r)} $ on $ C^{0,1}\left([0,T]\times\mathbb{T}\right) $ by $ \ell_\rho^{(r)}(G)=\llangle\partial_u G,\rho^r\rrangle $.
\end{Def}
An important result is the following:\begin{Lemma}\cite[Lemma A.1.9]{phd:adriana}.\label{lem:reg_xi}
If $ \xi\in L^2([0,T]\times \mathbb{T}) $ is such that there exists a function $ \partial\xi\in L^2([0,T]\times \mathbb{T}) $ satisfying for all $ G\in C^{0,1}([0,T]\times\mathbb{T}) $ the identity
\begin{align*}
\llangle\partial_uG,\xi\rrangle=-\llangle G,\partial\xi\rrangle,
\end{align*}
then $ \xi\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $.
\end{Lemma}
\begin{Def}
For $ G\in C^{0,1}([0,T]\times\mathbb{T}),\; r,\kappa\in\mathbb{R}_+ $ define $ \mathscr{E}_{G,\kappa}^{(m)}:\mathcal{D}([0,T],\mathcal{M}_+)\to\mathbb{R}\cup\{\infty\} $ by
\begin{align*}
\mathscr{E}_{G,\kappa}^{(r)}(\pi)
=
\begin{cases}
\ell^{(r)}(G)-\kappa\norm{G}^2_2, & \text{ if }
\pi\in\mathcal{D}([0,T],\mathcal{M}_+),\\
+\infty, & \text{ otherwise},
\end{cases}
\end{align*}
and the energy functional $ \mathscr{E}^{(r)}_\kappa:\mathcal{D}([0,T],\mathcal{M}_+)\to\mathbb{R}\cup\{\infty\} $ by
\begin{align*}
\mathscr{E}^{(r)}_\kappa(\pi)=\sup_{G\in C^{0,1}([0,T]\times\mathbb{T})}\mathscr{E}_{G,\kappa}^{(r)}(\pi).
\end{align*}
\end{Def}
\begin{Rem}
Note that $ \mathscr{E}^{(r)}_\kappa(\pi)\geqslant0 $. To see this it is enough to take $ G=0 $.
\end{Rem}
Recall that the measure $ \mathbb{Q} $ is the weak limit of a subsequence of $ \mathbb{Q}_N $ as $ N\to+\infty $, where $ \mathbb{Q}_N $ is the measure induced by the empirical measure in the Skorokhod space of trajectories $ \mathcal{D}([0,T],\Omega_N) $. Recall also the definition of the target Sobolev space (Definition \ref{def:sob}). The main goal of this section is to prove the next proposition.
\begin{Prop}\label{prop:power_in_sob}
The measure $ \mathbb{Q} $ is concentrated on trajectories of absolutely continuous measures with respect to the Lebesgue measure, $ \pi_\cdot(\mathrm{d}u)=\rho_\cdot(u)\mathrm{d}u $, such that $ \rho^m\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $, for $ m\in(1,2) $, and $ \rho\in L^2([0,T],\mathcal{H}^1(\mathbb{T})) $, for $ m\in(0,1) $.
\end{Prop}
This will be shown to be consequence of existing positive real numbers $ \kappa_0,\kappa_1,K_0 $ and $ K_1 $ such that for $ m\in(0,1) $ holds $ E_\mathbb{Q}\left[
\mathscr{E}_{\kappa_0}^{(1)}(\pi)
\right]\leqslant K_0 $, and for $ m\in(1,2) $ holds $ E_\mathbb{Q}\left[
\mathscr{E}_{\kappa_1}^{(m)}(\pi)
\right]\leqslant K_1 $,
{where $E_\mathbb{Q}$ denotes the expectation with respect to $\mathbb{Q}$.} This will be proved in Proposition \ref{prop:energy_est_FDE} and \ref{prop:energy_est_PME}, respectively. Recall \eqref{grad:non_int}. For the slow diffusion case, the argument is analogous to \cite[Section $ 6 $]{BDGN} but we make evident that this argument works due to the fact that the rates are uniformly bounded by a constant independent of $ N $ and the fact that the model is gradient. In particular, the argument is suited to show that the "macroscopic" quantity
\begin{align*}
\rho^m=\lim_{N\to+\infty} \int
h_N^{(m-1)}(\eta) \nu_\rho^N(\mathrm{d} \eta)
\end{align*}
lives in the target Sobolev space, where $ \rho(\cdot)\in(0,1) $ is a constant function. As in \cite{BDGN}, the argument does not allow us to show that $ \rho $ has a weak derivative, the reason being that $ \rho^m\leqslant \rho $.
For $ m\in(0,1) $ we have the opposite problem. Without imposing any restriction on the initial profile, we cannot show that $ \rho^m\in L^2([0,T],\mathcal{H}^1(\mathbb{T})) $, the reason being that (see Remark \ref{rem:bound})
\begin{align*}
\lim_{N\to+\infty}\sup_{\eta\in\Omega_N}r_N^{(m-1)}(\eta)=+\infty.
\end{align*}
This is the discrete analogous to $ \rho^{m-1}\to +\infty $ as $\rho\to0$. Yet, we can show that $ \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $ because the transition rates, in this case, are larger than the ones for the SSEP (analogous to $ \rho\leqslant \rho^{m} $ in this case), which is again a gradient model. \begin{proof}[Proof of Proposition \ref{prop:power_in_sob}]
Recall that up to this point we have proved that the measure $ \mathbb{Q} $ is a \textit{Dirac measure}, namely $ \mathbb{Q}=\delta_{\pi} $ with $ \pi_\cdot $ the trajectory of absolutely continuous measures $ \pi_\cdot(\mathrm{d} u)=\rho_\cdot(u)\mathrm{d} u $, where $ \rho $ satisfies the weak formulation \eqref{weak}. For $ m\in(1,2) $, from Proposition \ref{prop:energy_est_PME} the functional $ \ell^{(m)} $ is bounded $ \mathbb{Q}-$a.s. Since $ C^{0,1}([0,T]\times\mathbb{T}) $ is dense in $ L^2([0,T]\times\mathbb{T}) $, we can extend $ \ell^{(m)} $ to a $ \mathbb{Q}-$a.s.~bounded functional in $ L^2([0,T]\times\mathbb{T}) $. One can thus invoke Riesz's representation Theorem and conclude that for any $ m\in(1,2) $ there exists a function $ \partial\rho^{m}\in L^2([0,T]\times\mathbb{T}) $ such that
\begin{align*}
\ell_\rho^{(m)}(G)=-\llangle G,\partial\rho^{m}\rrangle.
\end{align*}
To finish the proof, since $ \rho^m\in L^2([0,T]\times\mathbb{T}) $, one invokes Lemma \ref{lem:reg_xi}.\par
For $ m\in(0,1) $ the same argument leads to $ \rho\in L^2([0,T],\mathcal{H}^1(\mathbb{T})) $ but now one should invoke instead Proposition \ref{prop:energy_est_FDE} which states that the functional $ \ell^{(1)} $ is bounded.
\end{proof}
\begin{Prop}\label{prop:energy_est_PME}
For any $ m\in(1,2) $ there are finite constants $ \kappa,K>0 $ such that
\begin{align*}
E_{\mathbb{Q}}
\left[
\mathscr{E}^{(m)}_{\kappa}(\pi)
\right]
\leqslant K.
\end{align*}
\end{Prop}
\begin{proof}
Recall that from the binomial theorem we can expand
\begin{align*}
\rho^m=\sum_{k\geqslant 0}\binom{m}{k}(-1)^k(1-\rho)^k,
\end{align*}
and since we are on the torus we can treat
\begin{align*}
E_{\mathbb{Q}}
\left[
\sup_{G\in C^{0,1}\left([0,T]\times\mathbb{T}_N\right)}
\bigg\{
\sum_{k\geqslant 1}\binom{m}{k}(-1)^{k}
\llangle
(1-\rho)^{k}
,\partial_u G
\rrangle-\kappa_1\norm{G}_2^2
\bigg\}
\right]
.
\end{align*}
Recalling that $ C^{0,1}\left([0,T]\times \mathbb{T}\right) $ is separable with respect to the norm $ \norm{\cdot}_{\mathcal{H}^1(\mathbb{T})} $, consider a countable dense subset, $ \{G_p\}_{p\in\mathbb{N}} $, in $ C^{0,1}\left([0,T]\times \mathbb{T}\right) $. An application of the monotone convergence theorem then reduces the problem to that of treating
\begin{align*}
\lim_{\ell\to+\infty}
E_{\mathbb{Q}}
\bigg[\max_{\substack{G_p\\p\leqslant \ell}}
\left\{\mathcal{E}_{G_p}(\pi)\right\}\bigg].
\end{align*}
Fixed $ G_p $, Lemma \ref{lem:eps_seq} allow us to replace $ (1-\rho)^{k}
$ by $ \prod_{j=0}^{k-1}
\big(1-\inner{\pi,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\big) $, with the sequence $ (\epsilon_k)_{k\geqslant0} $ depending on the regime of $ m $ and with a cost of $ \mathcal{O}(\epsilon^{\frac14}) $, leaving us with
\begin{align*}
E_{\mathbb{Q}}
\bigg[\max_{\substack{G_p\\p\leqslant \ell}}
\left\{\mathcal{E}_{G_p}(\pi)\right\}\bigg]
\leqslant
E_{\mathbb{Q}}
\bigg[
\max_{\substack{G_p\\p\leqslant \ell}}
\left\{
\sum_{k\geqslant 1}\binom{m}{k}(-1)^k
\Big\llangle{
\prod_{j=0}^{k-1}
\left(1-\inner{\pi,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\right)
,\partial_u G_p
}\Big\rrangle
-\kappa_1\norm{G_p}_2^2
\right\}
\bigg]
+\mathcal{O}(\epsilon^\frac14).
\end{align*}
Recalling that the $ \limsup $ is monotone, we can take $ \limsup_{\epsilon\to0} $ on both sides of the inequality above. Note that we need to take the $ \limsup_{\epsilon\to0} $ outside of the expectation, since otherwise we get from the reverse of Fatou's lemma that $ E_\mathbb{Q}\limsup\geqslant \limsup E_\mathbb{Q} $. And so, we further reduce the problem to the study of
\begin{align*}
\lim_{\ell\to+\infty}\limsup_{\epsilon\to 0}
E_{\mathbb{Q}}
\bigg[
\max_{\substack{G_p\\p\leqslant \ell}}
\left\{
\sum_{k\geqslant 1}\binom{m}{k}(-1)^k
\Big\llangle{
\prod_{j=0}^{k-1}
\left(1-\inner{\pi,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\right)
,\partial_u G_p
}\Big\rrangle
-\kappa_1\norm{G_p}_2^2
\right\}
\bigg].
\end{align*}
To make the link between the microscopic system and the macroscopic PDE we want to express $ \mathbb{Q} $ as the limit of a subsequence of $ (\mathbb{Q}_N)_{N\geqslant 0} $, thus replacing $ \pi $ by $ \pi^N $ and then recovering the occupation variables from the application of replacement lemmas. To do this, as previously, one wants to argue that the map
\begin{align*}
\pi\mapsto\Psi(\pi)
=\max_{\substack{G_p\\p\leqslant \ell}}
\left\{
\sum_{k\geqslant 1}\binom{m}{k}(-1)^k
\Big\llangle{
\prod_{j=0}^{k-1}
\left(1-\inner{\pi,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\right)
,\partial_u G_p
}\Big\rrangle
-\kappa_1\norm{G}_2^2
\right\}
\end{align*}
is continuous with respect to the Skorokhod topology, hence lower semicontinuous and therefore $ \Psi(\pi)\leqslant \liminf_{N\to+\infty}\Psi(\pi^N). $ Although this is not the case, one can first truncate the series at an $ \ell_{1/\epsilon} $ step, then replace $ \inner{\pi,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}} $ by $ \inner{\pi,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\star \varphi_{\tilde{\epsilon}_k} $ as in \eqref{conv-to-moll}, and then argue by lower semicontinuity. Next, we replace the sum up to $ \ell_{1/\epsilon} $ by a sum up to $ \ell_N $, as in \eqref{truncate2}, and finally we replace back $ \inner{\pi^N,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\star \varphi_{\tilde{\epsilon}_k} $ by $ \inner{\pi^N,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}} $. In this way, we have to treat
\begin{align*}
\lim_{\ell\to+\infty}\limsup_{\epsilon\to 0}
E_{\mathbb{Q}_N}\bigg[
\liminf_{N\to+\infty}
\max_{\substack{G_p\\p\leqslant \ell}}
\left\{
\sum_{k= 1}^{\ell_N}\binom{m}{k}(-1)^k
\Big\llangle{
\prod_{j=0}^{k-1}
\left(1-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\right)
,\partial_u G_p
}\Big\rrangle
-\kappa_1\norm{G_p}_2^2
\right\}
\bigg],
\end{align*}
where we recall again that $ \mathbb{Q} $ is a Dirac measure. Now we apply Fatou's lemma to exchange the expectation and the $ \liminf $. Hence, it is enough to show that there exists some constant $ K_1 $ independent of $ \{G_p\}_{p\leqslant\ell} $ for any $ \ell\in\mathbb{N} $ such that
\begin{align*}
\mathbb{E}_{\mu_N}\bigg[
\max_{\substack{G_p\\p\leqslant \ell}}
\left\{
\sum_{k= 1}^{\ell_N}\binom{m}{k}(-1)^k
\Big\llangle{
\prod_{j=0}^{k-1}
\left(1-\inner{\pi^N,{\iota}_{\epsilon_{k}}^{\cdot+j\epsilon_{k}}}\right)
,\partial_u G_p
}\Big\rrangle
-\kappa_1\norm{G_p}_2^2
\right\}
\bigg]
\leqslant K_1.
\end{align*}
Because $ \partial_uG_p $ is bounded in $ L^1 $ and the products involving the empirical measure are bounded by $ 1 $, we can replace $ \ell_N $ by $ (\ell_N)^n $ with $ n $ as previously (see \eqref{PMM:n0} and the computations that follow it). Now we are able to proceed backwards in the replacement lemmas' scheme (from \eqref{step1} to \eqref{step4:1}), approximating the space integral by the Riemann sum along the way. At this point we have to estimate
\begin{align*}
\mathbb{E}_{\mu_N}
\bigg[
\max_{p\leqslant \ell}
\left\{
\int_0^T
\bigg(
\frac{1}{N}\sum_{x\in\mathbb{T}_N}\sum_{k=1}^{(\ell_N)^n}\binom{m}{k}(-1)^k
\prod_{j=0}^{k-1}\overline{\eta}_{N^2s}(x+j)
\partial_u G_p(s,\tfrac{x}{N})
-\kappa_1\norm{G_p(s,\cdot)}_2^2
\bigg)
\mathrm{d} s
\right\}
\bigg],
\end{align*}
where we recall that $ (\ell_N)^n $ can be replaced back by $ \ell_N $ since the terms involving $ \eta $ are bounded and $ \partial_uG_p $ is bounded in $ L^1 $. We are able to introduce
\begin{align*}
\tau_x
\left\{
\sum_{i=1}^{k-2}
(\eta(i)-\eta(i+1))\sum_{j=1}^{k-1-i}\mathbf{s}_j^{(k-1)}(\tau_i\eta)
\right\}
\end{align*}
inside the summations over $ x $ and $ k $ (see the treatment of the first probability in \eqref{eq:h_prob}). This is important because now we have
\begin{align*}
\mathbb{E}_{\mu_N}
\bigg[
\max_{p\leqslant \ell}
\left\{
\int_0^T
\bigg(
\frac{1}{N}\sum_{x\in\mathbb{T}_N}
h_{N}^{(m-1)}(\tau_x\eta_{N^2s})
\partial_u G_p(s,\tfrac{x}{N})
-\kappa_1\norm{G_p(s,\cdot)}_2^2
\bigg)
\mathrm{d} s
\right\}
\bigg]
\end{align*}
which will be used to exploit the gradient property of the model. Analogously to the replacement lemmas, we obtain the upper bound
\begin{align}\label{energy:var}
c_\gamma+\int_0^T\sup_{f}
\left\{
\bigg\langle{\frac{1}{N}\sum_{x\in\mathbb{T}_N}
h_{N}^{(m-1)}(\tau_x\overline{\eta})
\partial_u G_p(s,\tfrac{x}{N})
,\;f}\bigg\rangle_{\nu^N_\gamma}
-\kappa_1\norm{G_p(s,\cdot)}_2^2
-N\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}\mathrm{d} s
\end{align}
where $ c_\gamma>0 $ is a constant.
Let us now focus on the inner product above, specifically on
\begin{align*}
\frac{1}{N}\sum_{x\in\mathbb{T}_N}
\partial_u G_p(s,\tfrac{x}{N})
\sum_{\eta\in \Omega_N}
h_{N}^{(m-1)}(\tau_x\overline{\eta})
f(\eta)\nu_{\gamma}^N(\eta).
\end{align*}
One can replace the space derivative by its discrete version with a cost
\begin{align*}
\frac{1}{N}\sum_{x\in\mathbb{T}_N}
\abs{\left(\partial_u -N\nabla^+\right)G_p(s,\tfrac{x}{N})}
\sum_{\eta\in \Omega_N}
h_{N}^{(m-1)}(\tau_x\overline{\eta})
f(\eta)\nu_{\gamma}^N(\eta)
\leqslant
\norm{\left(\partial_u -N\nabla^+\right)G_p(s,\cdot)}_{\infty}
\sup_{\eta\in\Omega_N}h_{N}^{(m-1)}(\eta).
\end{align*}
This vanishes on the limit $ N\to+\infty $ since $ \norm{\left(\partial_u -N\nabla^+\right)G_p(s,\cdot)}_{\infty}\lesssim \frac1N $ and, since $ \mathbf{h}^{(k)}\leqslant k $, we have that $h_{N}^{(m-1)}(\eta)\lesssim 1 $.
At this point the discrete derivative can be passed to $ h^{(m-1)}_{N} $ by performing a summation by parts, which puts us in place to use the gradient property of the model:
\begin{align*}
\sum_{x\in\mathbb{T}_N}
\nabla^{+}G_p(s,\tfrac{x}{N})
& \sum_{\eta\in \Omega_N}
h_{N}^{(m-1)}(\tau_x\overline{\eta})
f(\eta)\nu_{\gamma}^N(\eta)
=
-\sum_{x\in\mathbb{T}_N}
G_p(s,\tfrac{x+1}{N})
\sum_{\eta\in \Omega_N}
c_N^{(m-1)}(\overline{\eta})
\nabla^{+}\eta(x)
f(\eta)\nu_{\gamma}^N(\eta).
\end{align*}
From Lemma \ref{lem:change},
\begin{align*}
\sum_{\eta\in \Omega_N}
c_N^{(m-1)}(\tau_x\overline{\eta})\nabla^{+}\eta(x)
f(\eta)\nu_{\gamma}^N(\eta)
&=
-\frac12
\sum_{\eta\in \Omega_N}
c_N^{(m-1)}(\tau_x\overline{\eta})\nabla^{+}\eta(x)
\nabla_{x,x+1}f(\eta)\nu_{\gamma}^N(\eta).
\end{align*}
and we are left with
\begin{align*}
\frac12&\sum_{x\in\mathbb{T}_N}
G_p(s,\tfrac{x+1}{N})\sum_{\eta\in \Omega_N}
c_N^{(m-1)}(\tau_x\overline{\eta})\nabla^{+}\eta(x)
\nabla_{x,x+1}f(\eta)\nu_{\gamma}^N(\eta)
\\
& \leqslant
\frac{1}{4A}\sum_{x\in\mathbb{T}_N}\sum_{\eta\in \Omega_N}
c_N^{(m-1)}(\tau_x\overline{\eta})
\left(G_p(s,\tfrac{x+1}{N})\right)^2
\left(\sqrt{f}(\eta)+\sqrt{f}(\eta^{x,x+1})\right)^2
\nu_{\gamma}^N(\eta)
\\
&\quad +\frac{A}{4}\sum_{x\in\mathbb{T}_N}\sum_{\eta\in \Omega_N}
c_N^{(m-1)}(\tau_x\overline{\eta})\left(\nabla^{+}\eta(x)\right)^2
\left(
(\nabla_{x,x+1}\sqrt{f})(\eta)
\right)^2
\nu_{\gamma}^N(\eta)
\\
&\leqslant
\frac{1}{A}\sup_{\eta\in\Omega_N}\{c_N^{(m-1)}(\eta)\}\sum_{x\in\mathbb{T}_N}\left(G_p(s,\tfrac{x+1}{N})\right)^2
+\frac{A}{4}\Gamma_N^{(m-1)}(\sqrt{f},\nu_\gamma^N).
\end{align*}
Recalling that $ \sup_{\eta\in\Omega_N}\{c_N^{(m-1)}(\eta)\}\leqslant m $, fixing $ A=N $ and replacing all this into \eqref{energy:var}, then taking the corresponding limits finishes the proof.
\end{proof}
\begin{Prop}\label{prop:energy_est_FDE}
For any $ m\in(0,1) $ there are finite constants $ \kappa,K>0 $ such that
\begin{align*}
E_{\mathbb{Q}}
\left[
\mathscr{E}^{(1)}_{\kappa}(\pi)
\right]
\leqslant K.
\end{align*}
\end{Prop}
\begin{proof}
The proof is almost identical to the one for the SSEP (see for example \cite[Proposition B.1]{EGN1}). Besides the fact that there the authors have a boundary term, the differences lie in that we apply the replacement lemmas in the present text, and that in the final step we need to invoke Proposition \ref{prop:energy}. We outline the main steps. The treatment of the expectation in the statement can be reduced to the treatment of
\begin{align}\label{energy:var_FDE}
\sup_{f}
\left\{
\bigg\langle{\frac{1}{N}\sum_{x\in\mathbb{T}_N}
\eta^{\epsilon N}(x)
\partial_u G_p(s,\tfrac{x}{N})
-\kappa_0\norm{G_p(s,\cdot)}_2^2,f}\bigg\rangle_{\nu^N_\gamma}
-N\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}
.
\end{align}
Above, the $ \sup $ is taken over the set of densities with respect to $ \nu_\gamma^N $, and $ \{G_p\}_{p\in\mathbb{N}} $ is a countable dense subset in $ C^{0,1}([0,T]\times\mathbb{T}) $. Exchanging the continuous derivative by a discrete one, then performing a summation by parts we end up having to treat
\begin{align*}
\sup_{f}
\left\{
\bigg\langle{-\sum_{x\in\mathbb{T}_N}
\nabla^+\eta^{\epsilon N}(x)
G_n(s,\tfrac{x}{N})
-\kappa_0\norm{G_n(s,\cdot)}_2^2,f}\bigg\rangle_{\nu^N_\gamma}
-N\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}.
\end{align*}
Then again, from Lemma \ref{lem:change} we have that
\begin{align*}
\int_{\Omega_N}
\nabla^+\eta^{\epsilon N}(x)
f(\eta)\nu_\gamma^N(\mathrm{d}\eta)
=\frac{1}{2\epsilon N}\sum_{i\in\Lambda_N^{\epsilon N}}
\int_{\Omega_N}
\left(\eta(x+i+1)-\eta(x+i)\right)
\left(f(\eta)-f(\eta^{x+i,x+i+1})\right)\nu_\gamma^N(\mathrm{d}\eta).
\end{align*}
Taking our function $ G_p $ back into consideration and recalling that the process is of exclusion type we have that
\begin{align*}
\sum_{x\in\mathbb{T}_N}
G_p(s,\tfrac{x}{N})&\frac{1}{2\epsilon N}\sum_{i\in\Lambda_N^{\epsilon N}}
\int_{\Omega_N}
\left(\eta(x+i+1)-\eta(x+i)\right)
\left(f(\eta)-f(\eta^{x+i,x+i+1})\right)\nu_\gamma^N(\mathrm{d}\eta)
\\
&\leqslant
\frac{1}{4A}
\sum_{x\in\mathbb{T}_N}
\left(G_p(s,\tfrac{x}{N})\right)^2
\int_{\Omega_N}
\left(\sqrt{f}(\eta)+\sqrt{f}(\eta^{x+i,x+i+1})\right)^2\nu_\gamma^N(\mathrm{d}\eta)
+\frac{A}{4}\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N).
\end{align*}
Since $ f $ is a density, last display is no larger than
\begin{align*}
\frac{1}{2A}
\sum_{x\in\mathbb{T}_N}
\left(G_p(s,\tfrac{x}{N})\right)^2
+\frac{A}{4}\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N).
\end{align*}
Plugging this into \eqref{energy:var_FDE}, we obtain
\begin{align*}
\sup_{f}
\left\{
\frac{1}{2A}
\sum_{x\in\mathbb{T}_N}
\left(G_p(s,\tfrac{x}{N})\right)^2
-\kappa\norm{G_p(s,\cdot)}_2^2
+\frac{A}{4}\Gamma_N^{(0)}(\sqrt{f},\nu_\gamma^N)
-N\mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N)
\right\}.
\end{align*}
Applying the lower bound for $ \mathcal{E}_N^{(m-1)}(\sqrt{f},\nu_\gamma^N) $ from Proposition \ref{prop:energy} and
hence setting $ A=4N $ and $ \kappa=1/8 $, to conclude we just need to perform the necessary limits.
\end{proof}
\appendix
\section{Auxiliary results}\label{app:aux_res}
\begin{Lemma}\label{lem:bin_bound}
For any $ m\in\mathbb{R}_+ $ and any $ k\in\mathbb{N}_+ $ such that $ k\geqslant 2 $, it holds
\begin{align*}
\frac{\Gamma(m)\abs{\sin(\pi(k-m))}}{\pi(k+1)^m}
<
\abs{\binom{m-1}{k}}
<
\frac{\Gamma(m)\abs{\sin(\pi(k-m))}}{\pi(k-m)^m}
\lesssim \frac{1}{k^m},
\end{align*}
where the $ \Gamma-$function is defined, for any $ z\in\mathbb{C} $ such that $ \mathrm{Re}(z)>0 $, as
\begin{align*}
\Gamma(z)=\int_0^{+\infty} u^{z-1}e^{-u}\mathrm{d} u.
\end{align*}
\end{Lemma}
\begin{proof}
The binomial coefficients have the following classical representation in terms of the $ \Gamma-$function
\begin{align*}
\binom{m-1}{k}
=\frac{\Gamma(m)}{\Gamma(k+1)\Gamma(m-k)}.
\end{align*}
From the reflection formula
\begin{align*}
\Gamma(m-k)\Gamma(k+1-m)=\frac{\pi}{\sin(\pi(m-k))},
\end{align*}
we can rewrite
\begin{align*}
\binom{m-1}{k}
=\frac{\sin(\pi(m-k))}{\pi}\frac{\Gamma(m)\Gamma(k+1-m)}{\Gamma(k+1)}.
\end{align*}
Recall now the $ \mathrm{B}-$function, defined on $ z,w\in\mathbb{C}:\; \mathrm{Re}(z),\mathrm{Re}(w)>0 $, as
\begin{align*}
\mathrm{B}(z,w)=\int_0^1v^{z-1}(1-v)^{w-1}\mathrm{d} v
=
\int_0^{+\infty}\frac{s^{w-1}}{(s+1)^{w+z}}\mathrm{d} s,
\end{align*}
where the equality of the representations above can be checked by performing the change of variables $ v=s/(s+1) $ on the first integral.
From the definition of $ \Gamma $, one can show the following classical relationship between the $ \Gamma $ and $ \mathrm{B} $ functions, for all $ z,w\in\mathbb{C}:\; \mathrm{Re}(z),\mathrm{Re}(w)>0 $:
\begin{align*}
\mathrm{B}(z,w)=\frac{\Gamma(z)\Gamma(w)}{\Gamma(z+w)}.
\end{align*}
In this way, we can rewrite
\begin{align}\label{eq:bin-beta}
\binom{m-1}{k}
=\frac{\sin(\pi(m-k))}{\pi}\mathrm{B}(m,k+1-m).
\end{align}
Recall that for $ k\geqslant 2 $ holds $ (m-1)_k=(-1)^{k-\floor{m}}\abs{(m-1)_k} $. Noticing that $ \mathrm{B}(m,k+1-m)>0 $, we then have that $ \sin(\pi(m-k))=(-1)^{k-\floor{m}}\abs{\sin(\pi(m-k))} $ and we need only to find an upper and lower bound for the $ \mathrm{B}-$function. From the inequality $ e^x\geqslant 1+x $, valid for $ x\in\mathbb{R} $, the rescaling $ v=u/(w-1) $ with $ w>1 $ on
\begin{align*}
\Gamma(z)
=\int_0^{+\infty}u^{z-1}e^{-u}du
=(w-1)^z\int_0^{+\infty}v^{z-1}e^{-(w-1)v}dv
>
(w-1)^z\mathrm{B}(z,w),
\end{align*}
and from the rescaling $ v=u/(z+w)^z $,
\begin{align*}
\Gamma(z)
=\int_0^{+\infty}u^{z-1}e^{-u}du
=(z+w)^z\int_0^{+\infty}v^{z-1}e^{-(w-1)v}dv
<
(z+w)^z\mathrm{B}(z,w).
\end{align*}
We conclude that
\begin{align*}
\frac{\Gamma(m)}{(k+1)^m}<\mathrm{B}(m,k+1-m)<\frac{\Gamma(m)}{(k-m)^m}.
\end{align*}
\end{proof}
We now prove Lemma \ref{lem:grad}.
\begin{proof}
From \cite{GNP21} we have the following expression
\begin{align}\label{expr:h1}
\mathbf{h}^{(k)}(\eta)=\sum_{j=1}^{k+1}\prod_{i=j-(k+1)}^{j-1}\eta(i)
-\sum_{j=1}^{k}\prod_{\substack{i=-(k+1)+j\\i\neq0}}^j\eta(i).
\end{align}
Expression \eqref{expr:h2}
is a consequence of a rearrangement which turns out to be fundamental for maintaining $ \ell_N $ with no restrictions. Indeed, we can rewrite
\begin{align*}
\sum_{j=1}^{k+1}\prod_{i=j-(k+1)}^{j-1}\eta(i)
-\sum_{j=1}^{k}\prod_{\substack{i=-(k+1)+j\\i\neq0}}^j\eta(i)
=\prod_{i=0}^{k}\eta(i)
+\sum_{j=1}^{k}\left(\eta(0)-\eta(j)\right)\prod_{\substack{i=-(k+1)+j\\i\neq 0}}^{j-1}\eta(i).
\end{align*}
Note that
\begin{align*}
\left(\eta(0)-\eta(j)\right)
\prod_{\substack{i=-(k+1)+j\\i\neq 0}}^{j-1}\eta(i)
=\prod_{i=-(k+1)+j}^{j-1}\eta(i)
-\prod_{\substack{i=-(k+1)+j\\i\neq 0}}^{j}\eta(i).
\end{align*}
Now we reorganize the products on the second term above. For $ n\in\llbracket -(k+1)+j,j-1\rrbracket $ we have
\begin{align*}
\prod_{\substack{i=-(k+1)+j\\i\neq n+1}}^{j}\eta(i)
=
\left(\eta(n)-\eta(n+1)\right)
\prod_{\substack{i=-(k+1)+j\\i\neq n,n+1}}^{j}\eta(i)
+\prod_{\substack{i=-(k+1)+j\\i\neq n}}^{j}\eta(i).
\end{align*}
Observing that a change of variables yields
\begin{align*}
\prod_{\substack{i=-(k+1)+j\\i\neq n,n+1}}^{j}\eta(i)
=\prod_{\substack{i=-(k+1)+j-n\\i\neq 0,1}}^{j-n}\eta(i+n)
=\mathbf{s}_{j-n}^{(k)}(\tau_n\eta),
\end{align*}
by iteration we see that
\begin{align*}
\prod_{\substack{i=-(k+1)+j\\i\neq 0}}^{j}\eta(i)
=
\prod_{\substack{i=-(k+1)+j\\i\neq j}}^{j}\eta(i)
-\sum_{i=0}^{j-1}
(\eta(i)-\eta(i+1))\mathbf{s}_{j-i}^{(k)}(\tau_i\eta).
\end{align*}
Exchanging the summations and performing a change of variables,
\begin{align*}
\sum_{j=1}^{k}
\sum_{i=0}^{j-1}
(\eta(i)-\eta(i+1))\mathbf{s}_{j-i}^{(k)}(\tau_i\eta)
=\sum_{i=0}^{k-1}
(\eta(i)-\eta(i+1))
\sum_{j=1}^{k-i}
\mathbf{s}_{j}^{(k)}(\tau_i\eta),
\end{align*}
which ends the proof.
\end{proof}
\section{PDE results}\label{app:PDE}
\subsection{Slow diffusion}
The following result extends \cite[Lemma $ 6.2 $]{BDGN} to the case $ m\in(1,2) $.
\begin{Prop}\label{prop:power_diff}
Let $ f,g\in[0,1] $ with $ f\neq g $. If $ m\in(1,2) $ then, for all $ A>0 $ we have
\begin{align*}
\abs{f-g}\leqslant \frac{(f)^m-(g)^m}{V^{(m)}(f,g)+A}+A\frac{2}{m(m-1)}.
\end{align*}
where
\begin{align*}
0<V^{(m)}(f,g)
=\sum_{k\geqslant1}\binom{m}{k}(-1)^{k+1}v_k(1-f,1-g)<\infty
\end{align*}
and
\begin{align*}
v_k(f,g)
=\mathbf{1}_{k=1}
+\mathbf{1}_{k=2}(f+g)
+\mathbf{1}_{k\geqslant 3}\left(f^{k-1}+g^{k-1}+\sum_{i=1}^{k-2}g^if^{k-1-i}\right).
\end{align*}
\end{Prop}
\begin{proof}
We start with $ f,g\in(0,1) $.
\begin{align*}
(f)^m-(g)^m
&=\sum_{k\geqslant 1}\binom{m}{k}(-1)^k\left((1-f)^k-(1-g)^k\right).
\end{align*}
We now recall that one can rewrite, for any $ k\in\mathbb{N}_+ $,
\begin{align}\label{diff_powers}
a^k-b^k
=(a-b)
\left[
\mathbf{1}_{k=1}
+\mathbf{1}_{k=2}(a+b)
+\mathbf{1}_{k\geqslant 3}\left(a^{k-1}+b^{k-1}+\sum_{i=1}^{k-2}b^ia^{k-1-i}\right)
\right]
=(a-b)v_k(a,b).
\end{align}
In this way,
\begin{align*}
(f)^m-(g)^m
=(f-g)\sum_{k\geqslant1}\binom{m}{k}(-1)^{k+1}v_k(1-f,1-g)
=(f-g)V^{(m)}(f,g).
\end{align*}
We show that $ V^{(m)}(f,g)>0 $. Assume $ f,g\in(0,1) $ with $ f>g $. Then, $ f^m-g^m>0 $ implies $ V^{(m)}(f,g)>0 $. Similarly, if $ f<g $ then $ f^m-g^m<0\implies V^{(m)}(f,g)>0 $. With this in mind, we can rewrite
\begin{align*}
(f)^m-(g)^m=(f-g)\left(V^{(m)}(f,g)\pm A\right)
\Leftrightarrow
f-g=\frac{(f)^m-(g)^m}{V^{(m)}(f,g)+A}+A\frac{f-g}{V^{(m)}(f,g)+A}, \quad \text{for any }A>0.
\end{align*}
Now we will treat the second term on the right-hand side of last display. Note that
\begin{align*}
V^{(m)}(f,g)
=m\sum_{k\geqslant0}\binom{m-1}{k}(-1)^{k}\frac{v_{k+1}(1-f,1-g)}{k+1}.
\end{align*}
Since $ m\in(1,2) $ and $ v_{1}(1-f,1-g)=1 $, then
\begin{align*}
V^{(m)}(f,g)
&=m\left(1-\sum_{k\geqslant1}\abs{\binom{m-1}{k}}\frac{v_{k+1}(1-f,1-g)}{k+1}\right)
=m\sum_{k\geqslant1}\abs{\binom{m-1}{k}}\left(1-\frac{v_{k+1}(1-f,1-g)}{k+1}\right),
\end{align*}
where we note that
\begin{align*}
1-\sum_{k\geqslant1}\abs{\binom{m-1}{k}}=0.
\end{align*}
Since $ f,g\in(0,1) $ we also have $ 0<\frac{v_{k+1}(1-f,1-g)}{k+1}<1 $, and so let us introduce
\begin{align*}
W^{(m)}(f,g)=m\sum_{k\geqslant2}\abs{\binom{m-1}{k}}\left(1-\frac{v_{k+1}(1-f,1-g)}{k+1}\right)>0.
\end{align*}
In this way, we can write
\begin{align*}
V^{(m)}(f,g)
&=m(m-1)\left(1-\frac{v_2(1-f,1-g)}{2}\right)+W^{(m)}(f,g)
=m\frac{m-1}{2}(f+g)+W^{(m)}(f,g).
\end{align*}
Now back to our main problem,
\begin{align*}
A\frac{f-g}{V^{(m)}(f,g)+A}
=A\frac{2}{m(m-1)}\frac{m\frac{m-1}{2}(f+g)+W^{(m)}(f,g)+A-\left(m(m-1)g+W^{(m)}(f,g)+A\right)}{m\frac{m-1}{2}(f+g)+W^{(m)}(f,g)+A},
\end{align*}
hence,
\begin{align*}
A\frac{f-g}{V^{(m)}(f,g)+A}\leqslant A\frac{2}{m(m-1)}.
\end{align*}
If $ f=1 $ we can write $ 1-(g)^m=(1-g)V(1,g), $
while if $ f=0 $, we use instead that $ 0= \sum_{k\geqslant 0}\binom{m}{k}(-1)^k. $
For either $ f\in\{0,1\} $, the rest of the proof is analogous.
To check that $ V^{(m)} $ is bounded is enough to bound from above $ v_k\leqslant k $ and use the estimate for the binomial coefficients from Lemma \ref{lem:bin_bound}.
\end{proof}
\begin{Cor}[$ \frac14-$H\"{o}lder continuity]\label{cor:cont}
If $ \rho^m\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $, with $ m\in(1,2) $, then {for any} $t\in[0,T] $
\begin{align*}
\abs{\rho_t(u)-\rho_t(v)}\leqslant
\abs{v-u}^{\frac14}\left(\frac{2}{m(m-1)}
+\norm{\partial_u(\rho_t^m)}_{L^2(\mathbb{T})}\right)\quad a.e.\;u,v\;{ \in\mathbb T.}
\end{align*}
\end{Cor}
\begin{proof}
Since $ \rho^m $ is in the target Sobolev space, we have a weak derivative of $ \rho $ and can write a.e., from the previous proposition
\begin{align*}
\abs{\rho_t(u)-\rho_t(v)}
\leqslant
\frac{\int_u^v\partial_w(\rho_t^m)\mathrm{d} w}{V^{(m)}(\rho_t(u),\rho_t(v))+A}+\frac{2A}{m(m-1)}
\leqslant
\frac{1}{A}\int_u^v\partial_w(\rho_t^m)\mathrm{d} w+\frac{2A}{m(m-1)}.
\end{align*}
We now apply Cauchy-Schwarz's inequality and set $ A=\abs{v-u}^{\frac14} $.
\end{proof}
\begin{Lemma}[Uniqueness of weak solutions]\label{lem:uniq_PME}
For $ \rho^{\rm ini}:\mathbb{T}\to[0,1] $ a measurable initial profile the weak solution of \eqref{PDE:formal}, in the sense of Definition \ref{def:weak}, is unique.
\end{Lemma}
\begin{proof}
The proof relies on the same choice of test function as in \cite[Lemma 6.3]{HJV20}, there for solutions of the FDE with $ m=-1 $. Note that for $ m\in(1,2) $ holds $ \rho^m\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $. A solution $ \rho $ of \eqref{PDE:formal} satisfies then the formulation \eqref{weak} or, equivalently,
\begin{align*}
0=\inner{\rho_t,G_t}-\inner{\rho^{\rm ini},G_0}
-\int_0^t
\inner{\rho_s,\partial_sG_s} \mathrm{d} s
+\int_0^t
\inner{\partial_u(\rho_s)^m,\partial_u G_s}
\mathrm{d} s
\end{align*}
for any $ G\in C^{1,2}([0,T]\times\mathbb{T}) $. In particular, one can consider the alternative formulation where the regularity of $ G $ above is reduced to $ G\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $ and $ \partial_tG\in L^2([0,T];L^2[0,1]) $ (satisfying the equality on the previous display), and then show the equivalence of formulations by approximating $ G $ by a sequence of functions in $ C^{1,2}([0,T]\times\mathbb{T}) $.
Assume that $ \rho^{(1)},\rho^{(2)} $ are two solutions starting from the same profile $ \rho^{\rm ini} $ and write $ w=\rho^{(1)}-\rho^{(2)} $.
Then $ w $ satisfies the equality
\begin{align*}
\inner{w_t,G_t}
=\int_0^t\inner{w_s(u),\partial_sG_s}\mathrm{d} s
-\int_0^t\inner{\partial_u\left((\rho_s^{(1)})^m-(\rho_s^{(2)})^m\right),\partial_u G_s}\mathrm{d} s.
\end{align*}
With the choice of test function
\begin{align}\label{uniq:test}
G_s(u)=\int_s^t
(\rho_r^{(1)}(u))^m-(\rho_r^{(2)}(u))^m
\mathrm{d} r,
\end{align}
we obtain
\begin{align*}
\inner{w_t,G_t}
=0
=-\int_0^t
\big\langle{
w_s,
\left((\rho_s^{(1)})^m-(\rho_s^{(2)})^m\right)
}\big\rangle
\mathrm{d} s
-\frac12 \norm{\int_0^t\partial_u
\left(
(\rho_r^{(1)})^m-(\rho_r^{(2)})^m
\right)\mathrm{d} r}_2^2
.
\end{align*}
It is simple to see that
$
w_s(u)
\left(\rho_1^m(s,u)-\rho_2^m(s,u)\right)
\geqslant 0
$
for a.e. $ u\in\mathbb{T} $, implying $ w=0 $ almost everywhere.
\end{proof}
\subsection{Fast diffusion}
\begin{Prop}[$ \frac12-$H\"{o}lder continuity]\label{prop:continuity_FDE}
If $ \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $ then {for any} $ t\in[0,T] $ it holds that
\begin{align*}
\abs{\rho_t(u)-\rho_t(v)}\leqslant \abs{u-v}^\frac12\norm{\partial\rho_t}_{L^2(\mathbb{T})}\quad a.e.\;u,v { \in \mathbb{T}}
\end{align*}
\end{Prop}
\begin{proof}
{This is a simple consequence of Cauchy-Schwarz's inequality. }
\end{proof}
\begin{Lemma}[Uniqueness of weak solutions]\label{lem:uniq_FDE}
For $ \rho^{\rm ini}:\mathbb{T}\to[0,1] $ a measurable initial profile the weak solution of \eqref{PDE:formal} in the sense of Definition \ref{def:weak} is unique.
\end{Lemma}
\begin{proof}
For $ m\in(0,1) $ our weak formulation can be shown to be equivalent to
\begin{equation}
\inner{\rho_t,G_t}-\inner{\rho^{\rm ini},G_0}
=\int_0^t
\big\{
\inner{\rho_s,\partial_sG_s}
+\inner{(\rho_s)^m,\partial_{uu} G_s}
\big\}
\mathrm{d} s, \qquad\forall t\in(0,T],
\end{equation}
where $ G\in C^{1,2}([0,T]\times\mathbb{T})$. Recall also that we already showed, in Proposition \ref{prop:power_in_sob}, that there exists a solution $ \rho\in L^2([0,T];\mathcal{H}^1(\mathbb{T})) $. Let $ \rho^{(1)},\rho^{(2)} $ be two solutions starting from the same initial data and write $ w=\rho^{(1)}-\rho^{(2)} $. Then we have the following equation
\begin{align*}
\inner{w_t,G_t}
=\int_0^t
\big\{
\inner{w_s,\partial_sG_s}
+\inner{(\rho^{(1)}_s)^m-(\rho^{(2)}_s)^m,\partial_{uu} G_s}
\big\}
\mathrm{d} s=0.
\end{align*}
We will write $ (\rho^{(1)})^m-(\rho^{(2)})^m $ as a function of $ w $. To do so, we consider the binomial expansion of these powers. Since $ m\in(0,1) $ we have
\begin{align*}
(\rho^{(1)})^m-(\rho^{(2)})^m
=\sum_{k\geqslant1}\abs{\binom{m}{k}}\left((1-\rho^{(2)})^k-(1-\rho^{(1)})^k\right)
.
\end{align*}
It is important to truncate now the series at some step $ \ell $ which will be taken to infinity later on. Let $ \ell\in\mathbb{N}_+ $. Then
\begin{align*}
\sum_{k\geqslant\ell+1}\abs{\binom{m}{k}}\left((1-\rho^{(2)})^k-(1-\rho^{(1)})^k\right)
\leqslant
\sum_{k\geqslant\ell+1}\abs{\binom{m}{k}}=\mathcal{O}\left(\ell^{-m}\right).
\end{align*}
As such, from \eqref{diff_powers}
\begin{align*}
\sum_{k=1}^\ell\abs{\binom{m}{k}}\left((1-\rho^{(2)})^k-(1-\rho^{(1)})^k\right)
=w\sum_{k=1}^\ell\abs{\binom{m}{k}}v_k(1-\rho^{(2)},1-\rho^{(1)})=:wV^\ell
\end{align*}
where we shorten $ V_s^\ell(u)\equiv V^\ell(\rho_s^{(1)}(u),\rho_s^{(2)}(u)) $ and $ v_k(s,u)\equiv v_k(1-\rho_s^{(1)}(u),1-\rho_s^{(2)}(u)) $.
Note that for each $ \ell $ fixed we have the crude upper bound
\begin{align}\label{V_l:upper}
V_s^\ell(u)\leqslant
\sum_{k=1}^\ell\abs{\binom{m}{k}}k=\mathcal{O}\left(\ell^{1-m}\right).
\end{align}
This truncation allows us to obtain
\begin{align*}
\int_0^t
\inner{(\rho_s^1)^m-(\rho_s^2)^m,\partial_{uu} G_s}
\mathrm{d} s
\lesssim
\int_0^t
\inner{w_sV_s^\ell,\partial_{uu} G_s}
\mathrm{d} s
+
\frac{1}{\ell^m}\int_0^t
\int_{\mathbb{T}}
\abs{\partial_{uu} G_s(u)}
\mathrm{d} u \mathrm{d} s.
\end{align*}
Because for each fixed $ \ell $ we have $ V^\ell\in L^p([0,t]\times\mathbb{T}) $, for any $ 1\leqslant p\leqslant \infty $, one can approximate $ V^\ell $ by a sequence of functions in $ C^\infty([0,t];L^\infty(\mathbb{T})) $, with $ t\in[0,T] $, and with respect to the $ L^p([0,t]\times\mathbb{T}) $ norm. Let $ \varphi $ be some positive mollifier and define $ \varphi_{{\epsilon}}={\epsilon}^{-1}\varphi({\epsilon}^{-1}\;\cdot) $ for $ \epsilon>0 $. Define $ $
\begin{align*}
V_\cdot^{\ell,\epsilon}(u)=V_{\cdot}^\ell(u)\star\varphi_{{\epsilon}}.
\end{align*}
Note that $ V^{\ell,\epsilon}\in L^p([0,T]\times\mathbb{T}) $ for any $ 1\leqslant p\leqslant\infty $ because $ V^\ell $ is uniformly bounded in both time and space. Denote by $ \hat{f} $ the Fourier transformation of a function $ f $ defined on $ [0,t] $. From Parseval-Plancherel's identity we have the isometry
\begin{align*}
\norm{V_{\cdot}^{\ell,\epsilon}(u)
-
V_{\cdot}^\ell(u)}_{L^2([0,t])}
&=
\norm{
\widehat{V_{\cdot}^{\ell,\epsilon}(u)}
-
\widehat{V_{\cdot}^\ell(u)}
}_{L^2([0,t])}
=
\left[\int_0^t
\abs{
\widehat{V_{\cdot}^\ell(u)}(\xi)
}^2
\abs{
1-\widehat{\varphi_{{\epsilon}}}(\xi)
}^2
\mathrm{d} \xi\right]^\frac12.
\end{align*}
Because the mollifier is normalized and positive,
\begin{align*}
\abs{
1-\widehat{\varphi_{{\epsilon}}}(\xi)
}
\leqslant
\int_{B_{\epsilon}(0)}
\varphi_{\epsilon}(v) \abs{(1-e^{-iv\xi})} \mathrm{d} v,
\end{align*}
where $ B_\epsilon(0) $ is the open ball in $ \mathbb{T} $ centred in zero and with radius $ \epsilon>0 $. Since $ e^{-x}\geqslant 1-x $ we can see that
\begin{align*}
\sup_{v\in B_{\epsilon}(0)}\abs{(1-e^{-iv\xi})}
\leqslant
\sup_{v\in B_{\epsilon}(0)}\abs{iv\xi}
\leqslant
{\epsilon}\abs{\xi}.
\end{align*}
With this we obtain the estimate
\begin{align*}
\norm{V_{\cdot}^{\ell,\epsilon}(u)
-
V_{\cdot}^\ell(u)}_{L^2([0,t])}
\leqslant
\epsilon
\left[\int_{0}^t
\abs{
\widehat{V_{\cdot}^\ell(u)}(\xi)
}^2
\abs{\xi}^2
\mathrm{d} \xi\right]^\frac12
\leqslant
t\epsilon
\left[\int_{0}^t
\abs{
\widehat{V_{\cdot}^\ell(u)}(\xi)
}^2
\mathrm{d} \xi\right]^\frac12
=t\epsilon
\norm{
V_{\cdot}^\ell(u)
}_{L^2([0,t])}
\end{align*}
and the right-hand side of the previous display is no larger than a constant times $ t^\frac{3}{2}\epsilon\ell^{1-m} $.
In particular, from Cauchy-Schwarz's inequality
\begin{align*}
\int_0^t
\inner{w_sV_s^\ell,\partial_{uu} G_s}
\mathrm{d} s
&\leqslant
\int_0^t
\inner{w_sV_s^{\ell,\epsilon},\partial_{uu} G_s}\mathrm{d} s
\\
&\quad +
\int_{\mathbb{T}}\left[\int_0^t\abs{V_s^\ell(u)-V_s^{\ell,\epsilon}(u)}^2\mathrm{d} s\right]^\frac12\left[\int_0^t\abs{\partial_{uu}G_s(u)}^2 \mathrm{d} s\right]^\frac12\mathrm{d} u.
\end{align*}
From the previous computations and again from the Cauchy-Schwarz's inequality, the second line in last display is bounded above by $ t^{\frac32}\epsilon\ell^{1-m}\norm{\partial_{uu}G}_{L^2([0,t]\times\mathbb{T})} $.
We just showed that
\begin{align*}
\inner{w_t,G_t}
\lesssim
\int_0^t
\int_{\mathbb{T}}
w_s(u)
\big\{
\partial_sG_s(u)
+V_s^{\ell,\epsilon}(u)\partial_{uu} G_s(u)
\big\}
\mathrm{d} u\mathrm{d} s
+
t^{\frac12}\ell^{-m}
\left(
1
+ \epsilon t\ell
\right)
\norm{\partial_{uu}G}_{L^2([0,t]\times\mathbb{T})}
.
\end{align*}
We want to fix $ G $ as a solution to the backwards problem
\begin{align}\label{dual:prob}
\begin{cases}
\partial_sf+\lambda\partial_{uu}f=0, & (s,u)\in[0,t)\times\mathbb{T},\\
f(t,u)=\phi(u), &u\in\mathbb{T},
\end{cases}
\end{align}
with $ \phi $ to be chosen suitably later on. This is a well-posed problem and has a solution $ f\in C^{1,2}([0,t]\times\mathbb{T}) $ given some conditions on $ \phi $ and $ \lambda $: under the new time $ \tau=t-s $ a solution to this problem is equivalently a solution to
\begin{align*}
\begin{cases}
\partial_\tau g=\lambda\partial_{uu}g, & (\tau,u)\in(0,t]\times\mathbb{T},\\
g(0,u)=\phi(u), &u\in\mathbb{T}.
\end{cases}
\end{align*}
According to \cite[Thm.~4.5, Ch.~6, Sec.~4]{sde:friedman}, for $ \lambda $ positive and bounded uniformly in $ [0,t]\times\mathbb{T} $, continuous with respect to time (uniformly in $ \mathbb{T} $) and $ \alpha-$H\"{o}lder continuous with respect to the space variable; and $ \phi $ a continuous function, there exists a solution to this Cauchy problem in $ C^{1,2}([0,t]\times\mathbb{T}) $. Note that we have already checked that $ V^{\ell,\epsilon} $ satisfies all the requirements for $ \lambda $ above (for $ \ell $ fixed) except the H\"{o}lder continuity condition. Noting that $ \rho^{(1)},\rho^{(2)} $ is $ \frac12-$H\"{o}lder so is $ V_{\epsilon} $. To see this we sum and subtract appropriate terms and use the triangle inequality to estimate
\begin{align*}
\abs{
v_k(s,x)-v_k(s,y)
}
&\leqslant
\abs{
\rho_s^{(1)}(y)-\rho_s^{(1)}(x)
}
\sum_{i=0}^{k-1}
v_i(1-\rho_s^{(1)}(x),1-\rho_s^{(1)}(y))
(1-\rho_s^{(2)}(x))^{k-1-i}
\\
&\quad +
\abs{
\rho_s^{(2)}(y)-\rho_s^{(2)}(x)
}
\sum_{i=0}^{k-1}
v_{k-1-i}(1-\rho_s^{(2)}(x),1-\rho_s^{(2)}(y))(1-\rho_s^{(1)}(x))^i
\lesssim
k^2\abs{x-y}^\frac12.
\end{align*}
In this way,
\begin{align*}
\abs{
(v_k(\cdot,x)-v_k(\cdot,y))\star\varphi_{\epsilon}(s)
}
=\int_{0}^t\varphi_{\epsilon}(s-r)(v_k(r,x)-v_k(r,y))\mathrm{d} r
\leqslant
k^2\abs{x-y}^{\frac12}\int_{0}^t\varphi_{\epsilon}(s-r)\mathrm{d} r.
\end{align*}
Recalling that the integral on the right-hand side equals one, we see that
\begin{align*}
\abs{
V_s^{\ell,\epsilon}(x)-V_s^{\ell,\epsilon}(y)
}
\leqslant
\sum_{k=1}^\ell\abs{\binom{m}{k}}
\abs{
v_k(\cdot,x)-v_k(\cdot,y)
\star\varphi_{\epsilon}(s)
}
\lesssim
\abs{x-y}^\frac12
\ell^{2-m}.
\end{align*}
In this way, fixing our test function as $ G=f $ with $ \lambda=V^{\ell,\epsilon} $ we see that
\begin{align*}
\inner{w_t,\phi}
\lesssim
t^{\frac12}\ell^{-m}(1+\epsilon t\ell)
\norm{\partial_{uu}G}_{L^2([0,t]\times\mathbb{T})}
\end{align*}
and we need to estimate the integral on the right-hand side above.
Let us multiply both sides of \eqref{dual:prob} by $ \partial_{uu}G $ and integrate once in space and time, obtaining
\begin{align*}
0
=\int_0^t\int_{\mathbb{T}}\partial_sG\partial_{uu}G\mathrm{d} u\mathrm{d} s
+\int_0^t\int_{\mathbb{T}}V^{\ell,\epsilon}\abs{\partial_{uu}G}^2\mathrm{d} u\mathrm{d} s.
\end{align*}
An integration by parts on the first integral on the right-hand side above yields
\begin{align*}
-
\int_0^t\int_{\mathbb{T}}\partial_{u}(\partial_sG)\partial_{u}G\mathrm{d} u\mathrm{d} s
=&-\frac12
\int_0^t\int_{\mathbb{T}}
\partial_s\left(\partial_uG\right)^2
\mathrm{d} u\mathrm{d} s
\\
=&
-\frac12\int_{\mathbb{T}}
\left\{
\left(\partial_uG_t(u)\right)^2
-\left(\partial_uG_0(u)\right)^2
\right\}
\mathrm{d} u
.
\end{align*}
Using the terminal condition and bounding from below $ (\partial_uG_0(u))^2\geqslant 0 $ and $ V_\epsilon^\ell>m $ we conclude that
\begin{align*}
\int_0^t\int_{\mathbb{T}}\abs{\partial_{uu}G}^2\mathrm{d} u\mathrm{d} s
\leqslant
\frac{1}{2m}\norm{\phi'}_{L^2(\mathbb{T})}^2.
\end{align*}
With this, and fixing $ \epsilon=1/\ell $ we obtain
\begin{align}\label{almost}
\inner{w_t,\phi}
\lesssim
t^{\frac12}\ell^{-m}
(
1+t
)
\norm{\phi'}_{L^2(\mathbb{T})}.
\end{align}
Denoting by $ w^\pm $ the positive/negative part of $ w $, we want to fix $ \phi(\cdot)=\mathbf{1}_{\{u\in\mathbb{T}:\; w_t(u)\geqslant0\}}(t,\cdot) $, obtaining that $ \rho^{(1)}\leqslant \rho^{(2)} $ a.e., and analogously take $ \phi(\cdot)=\mathbf{1}_{\{u\in\mathbb{T}:\; w_t(u)\leqslant0\}}(t,\cdot) $, obtaining instead $ \rho^{(1)}\geqslant \rho^{(2)} $ and leading to $ \rho^{(1)}=\rho^{(2)} $ a.e. To do so we need to consider in \eqref{almost} a sequence $ (\phi_n)_n\subset C(\mathbb{T}) $ converging to $ \phi $ at least in $ L^2 $ and such that $ \norm{\phi_n'}_{L^2(\mathbb{T})}<\infty $ for all $ n>0 $. Regarding the convergence, since $ \phi\in L^2(\mathbb{T}) $ and $ C(\mathbb{T}) $ is dense in $ L^p(\mathbb{T}) $ for all $ 1\leqslant p <\infty $, there is a sequence of continuous functions $ (\phi_n)_n $ approximating $ \phi $ in $ L^2(\mathbb{T}) $. This sequence of continuous functions can be approximated (in $ L^2 $) by a sequence of smooth functions $ (\phi_{n,k})_k $ \emph{via} mollification. We fix one of these smooth representatives as the terminal condition on the problem \eqref{dual:prob}. Taking the limit $ \ell\to+\infty $ in \eqref{almost} and then the limits on $ n $ and $ k $, and recalling that $ t\in[0,T] $ is arbitrary concludes the proof.
\end{proof}
\end{document} |
\begin{document}
\title{Limits of the trivial bundle on a curve}
\begin{prelims}
\defAbstract{Abstract}
\abstract{We attempt to describe the vector bundles on a curve $C$ which are specializations of $\mathcal{O}_C^2\,$. We get a complete classification when $C$ is Brill-Noether-Petri general, or when it is hyperelliptic; in both cases all limit vector bundles are decomposable. We give examples of indecomposable limit bundles for some special curves.}
\keywords{Vector bundles; limits; Brill-Noether theory; hyperelliptic curves}
\MSCclass{14H60}
\languagesection{Fran\c{c}ais}{
\textbf{Titre. Limites du fibr\'e trivial sur une courbe} \commentskip \textbf{R\'esum\'e.} Nous essayons de d\'ecrire les fibr\'es vectoriels qui sont des sp\'ecialisations de $\mathcal{O}_C^{2}$. Nous obtenons une classification compl\`ete lorsque $C$ est g\'en\'erale au sens de Brill-Noether-Petri, ou lorsque $C$ est hyperelliptique; les fibr\'es limites sont d\'ecomposables dans chacune des deux situations. Nous donnons \'egalement des exemples de fibr\'es limites ind\'ecomposables sur certaines courbe sp\'eciales.}
\end{prelims}
\setcounter{tocdepth}{1} \tableofcontents
\section{Introduction}
Let $ C $ be a smooth complex projective curve, and $ E $ a vector bundle on $ C $, of rank $ r $. We will say that $ E $ \emph{is a limit of} $\mathcal{ O}_C^r$ if there exists an algebraic family $ (E_b)_{b\in B} $ of vector bundles on $ C $, parametrized by an algebraic curve $ B $, and a point $ \mathrm{o}\in B $, such that $ E_\mathrm{o}=E $ and $ E_b\cong \mathcal{ O}_C^r $ for $ b\neq \mathrm{o} $. Can we classify all these vector bundles? If $E$ is a limit of $\mathcal{ O}_C^2$ clearly $E\oplus \mathcal{ O}_C^{r-2}$ is a limit of $\mathcal{ O}_C^r$, so it seems reasonable to start in rank 2.
We get a complete classification in two extreme cases: when $C$ is generic (in the sense of Brill-Noether theory), and when it is hyperelliptic. In both cases the limit vector bundles are of the form $L\oplus L^{-1}$, with some precise conditions on $L$. However for large families of curves, for instance for plane curves, some limits of $\mathcal{ O}_C^2$ are indecomposable, and those seem hard to classify.
\section{Generic curves}
Throughout the paper we denote by $C$ a smooth connected projective curve of genus $g$ over $\mathbb{ C}$.
\begin{prop}\label{ex}
Let $L$ be a line bundle on $C$ which is a limit of globally generated line bundles \emph{(}in particular, any line bundle of degree $\geq g+1)$. Then $L\oplus L^{-1}$ is a limit of $\mathcal{ O}_C^2\,$.
\end{prop}
\begin{proof}
By hypothesis there exist a curve $B$, a point $\mathrm{o}\in B$ and a line bundle $\mathcal{L}$ on $C\times B$ such that $\mathcal{L}_{|C\times \{\mathrm{o}\} }\cong L$ and $\mathcal{L}_{|C\times \{\mathrm{b}\} }$ is globally generated for $b\neq \mathrm{o}$. We may assume that $B$ is affine and that $\mathrm{o}$ is defined by $f=0$ for a global function $f$ on $B$; we put $B^*:=B\smallsetminus\{\mathrm{o}\} $.
We choose two general sections $s,t$ of $\mathcal{L}$ on $C\times B^*$; reducing $B^*$ if necessary, we may assume that they generate $\mathcal{L}$. Thus we have an exact sequence on $C\times B^*$
\[0\rightarrow \mathcal{L}^{-1}\xrightarrow{\ (t,-s)\ } \mathcal{ O}_{C\times B^*}^2\xrightarrow{\ (s,t)\ } \mathcal{L}\rightarrow 0\]which corresponds to an extension class $e\in H^1(C\times B^*,\mathcal{L}^{-2})$. For $n$ large enough, $f^ne$ comes from a class in $H^1(C\times B,\mathcal{L}^{-2})$ which vanishes along $C\times \{\mathrm{o}\} $; this class gives rise to an extension
\[0\rightarrow \mathcal{L}^{-1}\longrightarrow \mathcal{E}\longrightarrow\mathcal{L}\rightarrow 0\]with $\mathcal{E}_{|C\times \{b\} }\cong \mathcal{ O}_C^2$ for $b\neq \mathrm{o}$, and $\mathcal{E}_{|C\times \{\mathrm{o}\} }\cong L\oplus L^{-1}$.\qed
\end{proof}
\begin{rem}\label{prop}{\rm
Let $E$ be a vector bundle limit of $\mathcal{ O}_C^2\,$. We have $\det E=\mathcal{ O}_C\,$, and $h^0(E)\geq 2$ by semi-continuity. If $E$ is semi-stable this implies $E\cong\mathcal{ O}_C^2\,$; otherwise $E$ is unstable. Let $L$ be the maximal destabilizing sub-line bundle of $E$; we have an extension $0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0$, with $h^0(L)\geq 2$. Note that this extension is trivial (so that $E=L\oplus L^{-1}$) if $H^1(L^2)=0$, in particular if $\deg(L)\geq g$.} \end{rem}
\begin{prop}
Assume that $ C $ is Brill-Noether-Petri general. The following conditions are equivalent:
\begin{itemize}
\item[\rm (i)] $ E $ is a limit of $ \mathcal{ O}_C^2 \,;$
\item[\rm (ii)] $ h^0(E)\geq 2 $ and $ \det E=\mathcal{ O}_C \, ;$
\item[\rm (iii)] $ E=L\oplus L^{-1} $ for some line bundle $ L $ on $ C $ with $ h^0(L)\geq 2 $ or $ L=\mathcal{ O}_C \,.$
\end{itemize}
\end{prop}
\begin{proof} We have seen that (i) implies (ii) (Remark \ref{prop}). Assume (ii) holds, with $E\not\cong \mathcal{ O}_C^2\,$. Then $E$ is unstable, and
we have an extension $0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0$ with $h^0(L)\geq 2$.
Since $ C $ is Brill-Noether-Petri general we have $ H^0(C,K_C\otimes L^{-2})=0 $ \cite[Ch.\ 21, Proposition 6.7]{ACG}, hence $ H^1(C,L^2)=0 $. Therefore the above extension is trivial, and we get (iii).
Assume that (iii) holds.
Brill-Noether theory implies that
any line bundle $ L $ with $ h^0(L)\geq 2 $ is a limit of globally generated ones
\footnote{Indeed, the subvariety $W^r_d$ of $\mathbb{ P}ic^d(C)$ parametrizing line bundles $L$ with $h^0(L)\geq r+1$ is equidimensional, of dimension $g-(r+1)(r+g-d)$; the line bundles which are not globally generated belong to the subvariety $W^r_{d-1}+C$, which has codimension~$r$.}.
So (i) follows from Proposition \ref{ex}.\qed
\end{proof}
\section{Hyperelliptic curves}
\begin{prop}
Assume that $C$ is hyperelliptic, and let $H$ be the line bundle on $C$ with $h^0(H)=\deg(H)=2$. The limits of $\mathcal{ O}_C^2$ are the decomposable bundles $L\oplus L^{-1}$, with $\deg(L)\geq g+1$ or $L=H^k$ for $k\geq 0$.
\end{prop}
\begin{proof} Let $\pi :C\rightarrow \mathbb{ P}^1$ be the two-sheeted covering defined by $\lvert H\rvert$. Let us say that an effective divisor $D$ on $C$ is \emph{simple} if it does not contain a divisor of the form $\pi ^*p$ for $p\in\mathbb{ P}^1$.
We will need the following well-known lemma:
\begin{lem}
\label{lemma1}
Let $L$ be a line bundle on $C$.
\begin{itemize}
\item[\rm 1)] If $L=H^k(D)$ with $D$ simple and $\deg(D)+k\leq g$, we have $h^0(L)=h^0(H^k)=k+1$.
\item[\rm 2)] If $\deg(L)\leq g$, $L$ can be written in a unique way $H^k(D)$ with $D$ simple. If $L$ is globally generated, it is a power of $H$.
\end{itemize}
\end{lem}
\begin{proof}[Proof of Lemma \ref{lemma1}] 1) Put $\ell:=g-1-k$ and $d:=\deg(D)$. Recall that $K_C\cong H^{g-1}$. Thus by Riemann-Roch, the first assertion is equivalent to $h^0(H^{\ell }(-D))=h^0(H^{\ell })-d$.
We have $H^0(C,H^{\ell })=\mathfrak{A}_6lowbreak \pi ^*H^0(\mathbb{ P}^1,\mathcal{ O}_{\mathbb{ P}^1}(\ell))$; since $D$ is simple of degree $\leq \ell+1$, it imposes $d$
independent conditions on $H^0(C,H^{\ell })$, hence our claim.
2) Let $k$ be the greatest integer such that $h^0(L\otimes H^{-k})>0$; then $L=H^k(D)$ for some effective divisor $D$, which is simple since $k$ is maximal. By 1) $D$ is the fixed part of $\abs{L}$, hence is uniquely determined, and so is $k$.
In particular the only globally generated line bundles on $C$ of degree $\leq g$ are the powers of $H$.
\qed
\end{proof}
\noindent\emph{Proof of the Proposition} : Let $E$ be a vector bundle on $C$ limit of $\mathcal{ O}_C^2\,$. Consider the exact sequence
\begin{equation}\label{hyp}
0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0\, ,
\end{equation}
where
we can assume $\deg(L)\leq g$ (Remark \ref{prop}). By Lemma \ref{lemma1} we have $L=H^k(D)$ with $D$ simple of degree $\leq g-2k$. After tensor product with $H^k$, the corresponding cohomology exact sequence reads
\[0\rightarrow H^0(C,H^{2k}(D))\rightarrow H^{0}(C,E\otimes H^k)\rightarrow H^0(C,\mathcal{ O}_C(-D))\xrightarrow{\ \partial \ } H^1(C,H^{2k}(D))\]
which implies
$h^0(E\otimes H^k)= h^0(H^{2k}(D))+\dim\Ker \partial =2k+1+\dim\Ker \partial \ $ by Lemma~\ref{lemma1}.
By semi-continuity we have $h^0(E\otimes H^k)\geq 2h^0(H^k)=2k+2$; the only possibility is $D=0$ and $\partial =0$.
But $\partial (1)$ is the class of the extension (\ref{hyp}), which must therefore be trivial; hence $E=H^k\oplus H^{-k}$.\qed
\end{proof}
\section{Examples of indecomposable limits}
To prove that some limits of $\mathcal{ O}_C^2$ are indecomposable we will need the following easy lemma:
\begin{lem}\label{split}
Let $L$ be a line bundle of positive degree on $C$, and let \begin{equation}\label{2}
0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0
\end{equation} be an exact sequence. The following conditions are equivalent:
\begin{itemize}
\item[\rm (i)] $E$ is indecomposable;
\item[\rm (ii)] The extension \emph{(\ref{2})} is nontrivial;
\item[\rm (iii)] $h^0(E\otimes L)=h^0(L^2)$.
\end{itemize}
\end{lem}
\begin{proof} The implication (i)$\ \mathbb{ R}ightarrow\ $(ii) is clear.
(ii)$\ \mathbb{ R}ightarrow\ $(iii) : After tensor product with $L$, the cohomology exact sequence associated to (\ref{2}) gives
\[0\rightarrow H^0(L^2)\xrightarrow{\ i\ } H^0(E\otimes L)\longrightarrow H^0(\mathcal{ O}_C)\xrightarrow{\ \partial \ }H^1(L^2)\, , \]where $\partial $ maps $1\in H^0(\mathcal{ O}_C)$ to the extension class of (\ref{2}). Thus (ii) implies that $i$ is an isomorphism, hence (iii).
(iii)$\ \mathbb{ R}ightarrow\ $(i): If $E$ is decomposable, it must be equal to $L\oplus L^{-1}$ by unicity of the destabilizing bundle. But this implies $h^0(E\otimes L)=h^0(L^2)+1$. \qed
\end{proof}
The following construction was suggested by N. Mohan Kumar:
\begin{prop}
\label{proposition4}
Let $C\subset \mathbb{ P}^2$ be a smooth plane curve, of degree $d$. For $0<k<\dfrac{d}{4} $, there exist extensions
\[0\rightarrow \mathcal{ O}_{C}(k)\rightarrow E \rightarrow \mathcal{ O}_C(-k)\rightarrow 0\] such that $E$ is indecomposable and is a limit of $\mathcal{ O}_C^2\,$.
\end{prop}
\begin{proof}
Let $Z$ be a finite subset of $\mathbb{ P}^2$ which is the complete intersection of two curves of degree $k$, and such that $C\cap Z=\varnothing$. By \cite[Remark 4.6]{S}, for a general extension
\begin{equation}
\label{3}
0\rightarrow \mathcal{ O}_{\mathbb{ P}^2}(k)\rightarrow E\rightarrow \mathcal{I}_Z(-k)\rightarrow 0\, ,
\end{equation}
the vector bundle $E$ is a limit of $\mathcal{ O}_{\mathbb{ P}^2}^2$; therefore $E_{|C}$ is a limit of $\mathcal{ O}_C^2\,$.
The extension (\ref{3}) restricts to an exact sequence
\[0\rightarrow \mathcal{ O}_C(k)\rightarrow E_{|C}\rightarrow \mathcal{ O}_C(-k)\rightarrow 0\, .\]
To prove that $E_{|C}$ is indecomposable, it suffices by Lemma \ref{split} to prove that
$ h^0(E_{|C}(k))=h^0(\mathcal{ O}_C(2k))$. Since $2k<d$ we have $h^0(\mathcal{ O}_C(2k))=h^0(\mathcal{ O}_{\mathbb{ P}^2}(2k))=h^0(E(k))$, so in view of the exact sequence
\[0\rightarrow E(k-d)\longrightarrow E(k)\longrightarrow E_{|C}(k)\rightarrow 0\]it suffices to prove $H^1(E(k-d))=0$, or by Serre duality $H^1(E(d-k-3))=0$.
The exact sequence (\ref{3}) gives an injective map $H^1(E(d-k-3))\hookrightarrow H^1(\mathcal{I}_Z(d-2k-3))$. Now since $Z$ is a complete intersection we have an exact sequence
\[0\rightarrow \mathcal{ O}_{\mathbb{ P}^2}(-2k)\rightarrow \mathcal{ O}_{\mathbb{ P}^2}(-k)^2\rightarrow \mathcal{I}_Z\rightarrow 0\, ;\]
since $4k<d$ we have $H^2(\mathcal{ O}_{\mathbb{ P}^2}(d-4k-3))=0$, hence $H^1(\mathcal{I}_Z(d-2k-3))=0$, and finally $H^1(E(d-k-3))=0$ as asserted.\qed
\end{proof}
We can also perform the Str\o mme construction directly on the curve $C$, as follows.
Let $L$ be a base point free line bundle on $C$. We choose sections $s,t\in H^0(L)$ with no common zero. This gives rise to a Koszul extension
\begin{equation}\label{K}
0\rightarrow L^{-1}\xrightarrow{\ i\ } \mathcal{ O}_C^2 \xrightarrow{\ p\ } L\rightarrow 0\quad\mbox{with }\ i=(-t,s)\,,\ p=(s,t)\,.\end{equation}
We fix a nonzero section $u\in H^0(L^2)$. Let $\mathcal{L}$ be the pull-back of $L$ on $C\times \mathbb{ A}^1$. We consider the complex (``monad")
\[\mathcal{L}^{-1}\xrightarrow{\ \mathfrak{A}_6pha \ }\mathcal{L}^{-1}\oplus \mathcal{ O}^2\oplus \mathcal{L} \xrightarrow{\ \beta \ }\mathcal{L}\ ,\qquad \mathfrak{A}_6pha =(\lambda ,i,u)\,,\ \beta =(u,p,-\lambda ), \]where $\lambda $ is the coordinate on $\mathbb{ A}^1$.
Let $\mathcal{E}:=\Ker \beta /\operatorname{Im} \mathfrak{A}_6pha$, and let $E:=\mathcal{E}_{|C\times \{0\} }$.
\begin{lem}
$E$ is a rank 2 vector bundle, limit of $\mathcal{ O}_C^2\,$. There is an exact sequence \mathfrak{A}_6lowbreak$0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0$; the corresponding extension class in $H^1(L^2)$ is the product by $u^2\in H^0(L^4)$ of the class $e\in H^1(L^{-2})$ of the Koszul extension $(\ref{K})$.
\end{lem}
\begin{proof} The proof is essentially the same as in \cite{S}; we give the details for completeness.
For $\lambda \neq 0$, we get easily $\mathcal{E}_{|C\times \{\lambda \} }\cong\mathcal{ O}_C^2\,$; we will show that $E$ is a rank 2 vector bundle. This implies that $\mathcal{E}$ is a vector bundle on $C\times \mathbb{ A}^1$,
and therefore that $E$ is a limit of $\mathcal{ O}_C^2\,$.
Let us denote by $\mathfrak{A}_6pha _0,\beta _0$ the restrictions of $\mathfrak{A}_6pha $ and $\beta $ to $C\times \{0\} $.
We have $\Ker \beta_0 =\mathfrak{A}_6lowbreak L\oplus N$, where $N$ is the kernel of $(u,p):L^{-1}\oplus \mathcal{ O}_C^2\rightarrow L$. Applying the snake lemma to the commutative diagram
\[\xymatrix@M=5pt{0\ar[r] &L^{-1}\ar[r]^{i}\ar[d]&\mathcal{ O}^2\ar[r]^{p}\ar@{^{(}->}[d]&L \ar[r]\ar@{=}[d]&0\\
0\ar[r] &N\ar[r]& L^{-1}\oplus \mathcal{ O}^2 \ar[r]& L \ar[r]&0\\
}\]we get an exact sequence
\begin{equation}\label{N}
0\rightarrow L^{-1}\rightarrow N\rightarrow L^{-1}\rightarrow 0\,,
\end{equation} which fits into a commutative diagram
\[\xymatrix{0\ar[r] &L^{-1}\ar[r]\ar@{=}[d]&N\ar[r]\ar[d]&L^{-1} \ar[r]\ar[d]^{\times u}&0\,\hphantom{.}\\
0\ar[r] &L^{-1}\ar[r]& \mathcal{ O}^2 \ar[r]& L \ar[r]&0\, ;
}\]this means that the extension (\ref{N}) is the pull-back by $\times u:L^{-1}\rightarrow L$ of the Koszul extension (\ref{K}).
Now since $E$ is the cokernel of the map $L^{-1}\rightarrow L\oplus N$ induced by $\mathfrak{A}_6pha _0$, we have a commutative diagram
\[\xymatrix{0\ar[r] &L^{-1}\ar[r]\ar[d]^{\times u}&N\ar[r]\ar[d]&L^{-1} \ar[r]\ar@{=}[d]&0\\
0\ar[r] &L\ar[r]& E \ar[r]& L^{-1} \ar[r]&0
}\]so that the extension $L\rightarrow E\rightarrow L^{-1}$ is the push-forward by $\times u$ of (\ref{N}). This implies the Lemma.\qed
\end{proof}
Unfortunately it seems difficult in general to decide whether the extension $L\rightarrow E\rightarrow L^{-1}$ nontrivial. Here is a case where we can conclude:
\begin{prop}\label{theta}
Assume that $C$ is non-hyperelliptic. Let $L$ be a globally generated line bundle on $C$ such that $L^2\cong K_C$. Let $0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0$ be the unique nontrivial extension of $L^{-1}$ by $L$. Then $E$ is indecomposable, and is a limit of $\mathcal{ O}_C^2\,$.
\end{prop}
\begin{proof} We choose $s,t$ in $H^0(L)$ without common zero, and use the previous construction. It suffices to prove that we can choose $u\in H^0(K_C)$ so that $u^2e\neq 0$: since $H^1(K_C)\cong\mathbb{ C}$, the vector bundle $E$ will be the unique nontrivial extension of $L^{-1}$ by $L$, and indecomposable by Lemma \ref{split}.
Suppose that $u^2e=0$ for all $u$ in $H^0(K_C)$; by bilinearity this implies $uve=0$ for all $u,v$ in $H^0(K_C)$. Since $C$ is not hyperelliptic, the multiplication map
$\mathsf{S}^2H^0(K_C)\rightarrow H^0(K_C^2)$ is surjective, so we have $we=0$ for all $w\in H^0(K^2)$. But the pairing
\[H^1(K_C^{-1})\otimes H^0(K_C^2)\rightarrow H^1(K_C)\cong\mathbb{ C}\]is perfect by Serre duality, hence our hypothesis implies $e=0$, a contradiction. \qed
\end{proof}
\begin{rem}{\rm
In the moduli space $\mathcal{M}_g$ of curves of genus $g\geq 3$, the curves $C$ admitting a line bundle $L$ with $L^2\cong K_C$ and $h^0(L)$ even $\geq 2$ form an irreducible divisor \cite{T2}; for a general curve $C$ in this divisor, the line bundle $L$ is unique, globally generated, and satisfies $h^0(L)=2$ \cite{T1}.
Thus Proposition \ref{theta} provides for $g\geq 4$ a codimension 1 family of curves in $\mathcal{M}_g$ admitting an indecomposable vector bundle limit of $\mathcal{ O}_C^2\,$.}
\end{rem}
\begin{rem}{\rm
Let $\pi :C\rightarrow B$ be a finite morphism of smooth projective curves. If $E$ is a vector bundle limit of $\mathcal{ O}_B^2\,$, then clearly $\pi ^*E$ is a limit of $\mathcal{ O}_C^2\,$. Now if $E$ is indecomposable, $\pi ^*E$ is also indecomposable. Consider indeed the nontrivial extension $0\rightarrow L\rightarrow E\rightarrow L^{-1}\rightarrow 0$ (Remark \ref{prop}); by Lemma \ref{split} it suffices to show that the class $e\in H^1(B,L^2)$ of this extension remains nonzero in $H^1(C,\pi ^*L^2)$. But the
pull-back homomorphism $\pi ^*:H^1(B,L^2) \rightarrow H^1(C,\pi ^*L^2)$ can be identified with the homomorphism $H^1(B,L^2) \rightarrow H^1(B,\pi _*\pi ^*L^2)$ deduced from the linear map $L^2\rightarrow \pi _*\pi ^*L^2$, and the latter is an isomorphism onto a direct factor; hence $\pi ^*$ is injective and $\pi ^*e\neq 0$, so $E$ is indecomposable.
Thus any curve dominating one of the curves considered in Propositions \ref{proposition4} and \ref{theta} carries an indecomposable vector bundle which is a limit of $\mathcal{ O}_C^2\,$.}
\end{rem}
\noindent\emph{Proof} : ovidecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\bibliographymark{References}
\def$'${$'$}
\end{document} |
\begin{document}
\title{Exact solutions to a carsharing pricing and relocation problem under uncertainty}
\author{Giovanni Pantuso \\
Department of Mathematical Sciences, University of Copenhagen,\\
Universitetsparken 5, 2100, Copenhagen, Denmark\\
gp@math.ku.dk.}
\maketitle
\begin{abstract}
In this article we study the problem of jointly deciding carsharing prices and vehicle relocations. We consider carsharing services operating in the context of multi-modal urban transportation systems.
Pricing decisions take into account the availability of alternative transport modes, and customer preferences with respect to these. In order to account for the inherent uncertainty in customer preferences, the problem is formulated as a mixed-integer two-stage stochastic program with integer decision variables at both stages. We propose an exact solution method for the problem based on the integer L-Shaped method which exploits an efficient exact algorithm for the solution of the subproblems. Tests on artificial instances based on the city of Milan illustrate that the method can solve, or find good solutions to, moderately sized instances for which a commercial solver fails. Furthermore, our results suggest that, by adjusting prices between different zones of the city, the operator can attract significantly more demand than with a fixed pricing scheme and that such a pricing scheme, coupled with a sufficiently large fleet, significantly reduces the relevance of staff-based relocations. A number of issues, that remain to be addressed in future research, are pointed out in our conclusions.
\end{abstract}
\section{Introduction}\label{sec:intro}
High dependency on private vehicles and low occupancy rates
increase car usage and congestion in many cities of the world, contributing to pollution and poor urban air quality \cite{ChoYAB18}.
Improvements of public transport \cite{PruKLF12} and road pricing measures
\cite{CarG11,Dud13} have, to a large extent, failed to provide sustainable solutions \cite{ChoYAB18,BarFDS12,MayN96}.
In this context, shared mobility, and particularly carsharing, has emerged as a viable alternative, linked to, e.g.,
a decrease in congestion \cite{CraEHN12}, pollution \cite{MarS16}, land used \cite{ShaC13} and transport costs \cite{Dun11,MouPC19}.
In reaction to the high flexibility demanded by users, modern carsharing services are commonly designed for \textit{on-demand}, \textit{short-term}, \textit{one-way} usage \cite{IllH19}.
That is, users are allowed to rent a car without reservation and return it as soon as, and wherever (within the operating area), their journey is completed.
The drop-off location/station is thus typically different from the pick-up location/station.
Such configuration poses new planning challenges to \textit{carsharing operators} (CSOs).
On-demand rentals make the CSO unaware of when, where and for how long new rentals will occur.
One-way rentals create frequent imbalances in the distribution of vehicles, that is an accumulation of vehicles in low-demand zones, and vehicle shortage in high-demand zones \cite{BarTX04,BoyZG15} with levels of service dropping accordingly. A central task for a CSO is to provide a distribution of vehicles in the business area compatible with demand tides and oscillations \cite{WasJB13,WeiB15}.
As a prime form of response to these challenges, CSOs initiate staff-based vehicle relocations between stations/zones of the city before shortages occur and customer satisfaction levels drop \cite{JorC13,IllH19}. That is, CSO's staff reach designated cars and drives them to different places.
This gives rise to the so called \textit{Vehicle Relocation Problem} (VReP), which consists of determining the relocations to perform in order to prepare for future demand. The research literature covers several problem settings, different levels of detail and granularity of decisions, as well as different mathematical approaches, see e.g., \cite{BoyZG17,WeiB15,BoyZG15,BruCL14,JorCB14,KekCMF09,KekCC06,MukW05,BarTX04,BarT99,FolHFAP20,HelJMAFP21}. These studies are thoroughly reviewed and classified in a recent survey, see \cite{IllH19}.
The survey also reports that staff-based rebalancing could be complemented by manipulating demand through dynamic pricing.
In fact, users of urban mobility services typically choose among different transport modes (e.g., metro, carsharing, bikesharing) that vary in a number of key attributes including price,
see e.g., \cite{ZoeK16,HanP18, Pan20}. However, the authors of the survey comment that, at present times, this issue is solely ``an important part of future research'' \cite{IllH19}.
Compared with relocation decisions, carsharing pricing strategies have received limited attention in the research literature though the number of available studies is growing.
We can distinguish two main categories of pricing strategies, which we refer to as \textit{individual} and \textit{collective}, according to their end recipient.
Individual pricing strategies are targeted to individual users. They require an interaction between the CSO and the individual user by means of which the final trip price, pick-up or drop-off location are agreed upon. As an example, the operator sends the individual user offers in the form of discounts or bonuses in exchange to a trip configuration which the operator deems beneficial for the entire system. Collective pricing strategies are, instead, targeted to the entire user base.
They have the scope of influencing the cumulative rental demand by, e.g., decreasing the price of rentals to/from selected zones,
but do not require an interaction with the individual user, nor their reply as to whether the price is accepted or not. The approach proposed in this article belongs to the latter category.
Several pricing strategies can be classified as \textit{individual}.
In \cite{WagWBN15} a method is developed that identifies vehicles placed in low demand zones using idle time as a proxy.
The method then offers the user a drop-off location with low expected idle time in exchange for a discount (e.g., free minutes of usage).
The method is evaluated in a simulation framework based on the city of Vancouver. The authors report that the average vehicle idle time is decreased by up to $16$ percent.
In \cite{WasJ16} it is assumed that each customer is interested in a trip between a specific origin and destination and is sensitive to price.
The operator then offers a price for the given trip in order to, ideally, incentivize/prevent favorable/unfavorable car movements.
They model the carsharing system as a continuous-time Markov chain where a pricing policy is input to the model.
In \cite{DifSS18} a user-based relocation method is presented in which the users are offered to leave the car
in a location different from the one planned in exchange for a fare discount.
The authors formulate the decision problem as mixed-integer nonlinear programming problem,
and model customers preferences with respect to the offer of alternative drop-off locations expressing the corresponding utility as a functions of the distance between the desired and offered drop-off locations.
In \cite{StoG21} a predictive, user-based, relocation strategy is introduced for station-based carsharing services.
They assume that the CSO can offer each returning customers an incentive to relocate.
That is, upon the arrival of a customer, the CSO determines whether to offer an incentive, what the incentive should be and where the vehicle should be relocated.
Therefore, an optimization problem is solved upon the arrival of every customer. Estimated customer preferences are used to model their reaction to incentive offers.
Station-based services are considered also in \cite{LiuXC21} where users send real-time trip requests to the CSO, specifying their origin and destination.
Upon receiving requests, the CSO assigns vehicles to users, plans staff-based relocations and determines incentives for customers, to whom a car has already been assigned, in exchange
for a change of destination.
The focus on \cite{WuLSP21} is instead on free-floating services.
The system they consider is organized as follows. Each user sends a request for a vehicle either on-demand or as a reservation for a future rental.
With the request they are required to specify intended pick-up and drop-off locations and departure time.
The system elaborates the available requests and responds to users with proposed service options.
Each service option includes a pick-up and drop-off location, pick-up time, and price.
Finally, in \cite{WanJL21} a pricing scheme to induce user-based relocations is introduced, with focus on station-based one-way services.
The pricing scheme consists of adding or deducting a fixed expense to the
original expense to adjust the users' preferential pick-up and drop-off location.
To study the user behavior a questionnaire is carried out. The pricing problem is finally
formulated as an optimization problem.
A number of articles have also focused on \textit{collective} pricing strategies.
A mixed-integer nonlinear programming model is provided in \cite{XuML18} for the joint problem of deciding fleet size,
trip pricing and staff-based relocations for an electric station-based carsharing service.
The authors consider demand elasticity with respect to prices using a logit-based function. The authors test the method on a case study based in Singapore.
In \cite{WanM19} a pricing scheme is developed with the scope of influencing user demand and keeping the distribution of vehicle at a given balance level.
After modeling the relationship between carsharing price and demand, the authors develop a nonlinear optimization model to define pricing schemes which minimize the deviation from inventory upper and lower bounds at each charging station.
A station-based electric one-way service is considered in \cite{XieWWDM19}.
The CSO is to decide charging schedules and service prices.
The authors assume that rental demand is influenced by prices and adopt a linear elasticity function to express demand as a function of price.
Electric carsharing services are considered also in \cite{RenLLHL19} where a dynamic pricing scheme is proposed with the scope of solving imbalances in the
distribution of vehicles, as well as facilitating vehicle-grid-integration. For each origin and destination station the operator can
influence demand using two price adjustment levels. Rental demand is connected to prices via a price elasticity to a reference demand for
a default price. Pricing decisions are made using a mixed-integer nonlinear program.
Finally, in \cite{LuCZLL21} a bilevel nonlinear mathematical programming model is proposed to determine carsharing prices and staff-based relocations.
In the upper level, the carsharing operator determines vehicles relocations and prices.
In the lower level, travelers choose travel modes from a cost-minimization perspective and demand is computed using a logit model.
The authors assume a one-way station-based carsharing system in competition with private cars.
The method presented in the present paper can be classified as a collective pricing strategy and extends the available literature in a number of ways.
First, the available methods do not take into account the impact of alternative transport modes on transport choices.
Carsharing services live in multi-modal transport systems and failure to model this heterogeneity may result into myopic models of customers behavior.
As an example, compared to \cite{LuCZLL21}, in the present paper customers choices are not limited to private or shared cars.
Rather, we assume that customers may choose among any number of available transport services (e.g., bicycle and bus).
This, in turn, can help the CSO set prices from/to a given zone also as a function of the alternatives available in the zone.
Second, the articles that include demand elasticity, typically limit their attention to elasticity with respect to prices, see e.g., \cite{XuML18,LuCZLL21} and \cite{StoG21}.
In this article we allow the operator to model customers preferences with respect to any number of both exogenous and endogenous characteristics of the service such as,
but not limited to, travel time and waiting time. Such elasticity is modeled using utility functions which yield a linear optimization problem as long as
the function is linear in the endogenous characteristics of the service e.g., price.
Finally, extending all available methods, we explicitly account for uncertainty with respect to customer preferences.
That is, we consider that a portion of the preferences of each customer is unknown to the operator and is, as such, handled by means of a stochastic program.
The contributions of this article can be stated as follows.
\begin{enumerate}
\item We propose a two-stage integer stochastic programming model for the joint pricing and relocation problem.
The central idea is to influence demand by acting on prices, and performing preventive relocations accordingly,
in order to maximize expected profits.
To model the interplay between pricing decisions and customers choices we follow the recipe first provided by \cite{BieS16}
for integrating demand models within mixed integer linear optimization models.
This framework consists of modeling user preferences, and the uncertainty therein, by means of utility functions.
A discretization of the unknown portion of the utility, and the adoption of utility functions which are linear in the decision variables of the model,
ensure that the resulting optimization model is linear.
This framework has also been used by e.g., \cite{PanABG17} in the context of parking services and \cite{HanP18} in the context of carsharing,
and a more general description is provided in \cite{PanBGA21}.
\item To solve the resulting stochastic program with integer variables at both stages,
we propose an exact L-Shaped method that exploits a compact reformulation and efficient exact algorithm for the integer subproblems.
\item We provide empirical evidences on the performance of the algorithm and on the solutions obtainable,
based on artificial instances built on data from the city of Milan. An instance generator is made available online.
\end{enumerate}
The remainder of this article is organized as follows.
In \Cref{sec:prob} we define the problem and clarify modeling assumptions.
In \Cref{sec:model} we provide an extensive formulation of the problem which has the scope of explicitly defining the relationship between pricing decisions and customer choices.
We introduce the model, particularly the second-stage, in an extensive and discursive manner in order to make the interplay between pricing decisions and customers demand explicit.
In \Cref{sec:ls} we describe an integer L-Shaped method to find exact solutions to the problem.
This is enabled by a compact and more tractable reformulation of the second-stage problem where customers choices are pre-processed,
and by an exact greedy algorithm for solving the second-stage problem, both described in \Cref{sec:ls}.
In \Cref{sec:instances} we present a set of artificial instances based on the carsharing services offered in the Italian city of Milan. The same instances as well as an instance generator are made available online.
In \Cref{sec:results} we present the results of a computational study. We shed lights on the efficiency of the algorithm and comment on the solutions obtainable by means of the model introduced.
In \Cref{sec:conclusions} we draw final conclusions, point out existing limitations of this work and discuss possible avenues of future research.
\section{Problem definition and assumptions} \label{sec:prob}
A CSO offers one-way, reservation-free, carsharing services and is faced with the problem of jointly deciding the prices to charge and relocations to perform in order to comply with demand.
The characteristics of the service, and the perimeter of the corresponding decision problem, are clarified by following assumptions.
\begin{figure}
\caption{Target periods and decision timing. In this example target periods have a length of one hour and pricing and relocation decisions are made and implemented before the beginning of each the target period.}
\label{fig:target_period}
\end{figure}
\begin{description}
\item[A0-Target periods] The operating hours are partitioned into a number of distinct \textit{target periods},
that is, portions of the operating hours in which the CSO may, in general, apply different prices and distributions of the fleet, see \Cref{fig:target_period}.
Before each target period, and the CSO must decide i) the prices to apply during the target period and ii) the relocations to perform in sight of the uncertain rental demand during the target period.
The CSO plans for each target period independently based on updated system information (e.g., fleet distribution and individual vehicles' status) and demand outlook within the target period.
In the example in \Cref{fig:target_period}, before 12.00 the CSO must decide the prices to apply in target period $1$ (12.00 -- 13.00) and the relocations to perform before that.
Thus, in this case the target period lasts for one hour. Similarly, one may consider longer target periods,
e.g., morning hours and afternoon hours, as well as shorter ones, e.g., $30$ minutes target periods, depending on how often it is sensible to adjust prices in the specific context.
\item[A1-Business area] The operating area is made of a finite set of locations, henceforth \textit{zones}, see \Cref{fig:zones}.
If the carsharing service is station-based a zone naturally represents a station. If the service is free-floating we assume that the business area is suitably partitioned into a number of zones, and each zone is represented by a suitable geographical location.
\item[A2-Pricing scheme] The price is made of a \textit{per-minute fee} and a \textit{drop-off fee}. The per-minute fee is valid throughout the day (i.e., in all target periods) and is independent of the origin and destination of the trip.
Instead, the drop-off fee can be different in each target period and for each origin and destination. \Cref{fig:zones} provides an example where the per-minute fee is Euro $0.2$, independently of the origin and destination, while for each pair of zones a different drop-off fee is set.
In the example, a drop-off fee of Euro $1.5$ is charged if the car is picked up in zone $z_1$ and returned in zone $z_3$, while a drop-off fee of Euro $-1$ is charged if the car is picked up in zone $z_3$ and returned in zone $z_2$.
Thus, we assume that the drop-off fee may also be negative to encourage desired movements of cars and increase demand.
This setup generalizes the pricing schemes adopted in a number of carsharing services which typically charge a positive drop-off fee only if the customer returns the car in specific, unfavorable, zones of the city, or provide incentives, such as free driving minutes, to pick up a car in specific, unfavorable, zones.
In order to keep the pricing scheme easy to communicate to customers, we assume the CSO must choose among only a finite set of possible drop-off fees. In the example of \Cref{fig:zones} this set is Euro $\{-1,0,1,1.5,2\}$.
\item[A3-Alternative transport services] The business area offers a number of alternative transport services (e.g., public transport and bicycles) outside the control of the CSO.
The alternative services may be different for each pair of zones. Each alternative service has unlimited capacity (i.e., each customer can choose any alternative service without decreasing their availability). In the example in \Cref{fig:zones}, carsharing is offered on all origin-destination pairs,
while busses are not an available alternative for moving from zone $z_3$ to zone $z_1$, and riding a bicycle is not an option between $z_2$ and $z_3$ due to e.g., the absence of suitable bicycle lanes.
\item[A4-Customers are informed] The CSO is able to inform customers about the current price from their location to every other zone, prior to rentals.
In the example of \Cref{fig:zones}, the CSO is able to inform a user in $z_1$ (e.g., on the mobile application used to locate the car) that, if the car is returned in $z_2$, there will be a drop-off fee of Euro $1.5$, in addition to the per-minute fee.
Customers are also aware about the availability of alternative transport services. Considering the example in \Cref{fig:zones}, a customer moving between $z_1$ and $z_2$ knows that they may use bus, bicycle, and carsharing.
For all possible transport modes (including carsharing) the user knows the respective prices and characteristics (e.g., waiting time and travel time).
\item[A5-Closed market] A customer chooses exactly one transport service among the available ones. This corresponds to saying that a customer does not give up their trip. In the example of \Cref{fig:zones}, a customer moving from $z_1$ to $z_2$ will eventually choose to travel either by bicycle, carsharing or bus, and complete its journey.
\item[A6-Customers preferences] The CSO is able to describe a portion of customers travel preferences as a function of different observable characteristics of the available transport services (e.g., travel time, price and waiting time). Nevertheless, the choice of each customer depends also on a number of additional elements not observable by the CSO. Therefore, customers preferences are partially unknown to the CSO. The unknown part of customers preferences is fully described by a probability distribution.
\item[A7-Direct rentals] Customers traveling with shared cars drive directly from their origin to their destination zone. This assumption is made for simplicity and is without loss of generality.
Different travel patterns can be included simply by modeling customer-specific travel times in \eqref{eq:utility}.
\item[A8-Homogeneous fleet] All shared vehicles are identical. This assumption is made for the sake of simplicity in the exposition of the reformulation of the second-stage problem and is without loss of generality.
Throughout the text we will comment on the necessary modifications in case of a heterogeneous fleet.
\item[A9-Profit maximization] The CSO maximizes profits. While other objectives may be considered, such as maximizing demand served, or minimizing zonal deficit of cars, profits are the central objective of private carsharing operators.
\item[A10-One-way trips] For the sake of simplicity, we assume one-way trips. That is, customers move from their origin zone to a different zone.
The model presented in \Cref{sec:model} can however accommodate also round trips, provided a suitable specification of the parameters of the trip (e.g., duration).
\end{description}
\begin{figure}
\caption{Zones, fees and alternative transport services. This example counts three zones, and three alternative transport services. Not all transport services are available between each pair of zones. Prices are expressed in Euro.}
\label{fig:zones}
\end{figure}
Based on these assumptions, the problem can be briefly stated as follows.
Given (a) a target period, (b) the cumulative mobility demand between each pair of zones in the target period, (c) usage and relocation costs, (d) the current distribution of cars, (e) a model of customers preferences including a probability distribution describing customer preferences unknown to the CSO, the CSO is to decide
i) the drop-off fees to apply during the target period and ii) the relocations to perform in sight of the uncertain rental demand during the target period in order to maximize expected profits.
\section{Mathematical model}\label{sec:model}
Consider a urban area represented by a finite set $\mathcal{I}$ of zones (e.g., charging stations or a suitable partition of the business area)
and a CSO offering a finite set of shared vehicles $\mathcal{V}$.
Before the beginning of the target period, the CSO is to decide the drop-off fee between each pair of zones and the relocations to perform to better serve demand in the target period.
At the time of planning, the fleet is geographically dispersed in the urban area as the result of previous rentals.
Let decision variable $z_{vi}$ be equal to $1$ if vehicle $v$ is made available for rental in (possibly relocated to) zone $i$ in the target period, $0$ otherwise.
Let $C^R_{vi}$ be the relocation cost born by the CS company to make vehicle $v$ available in zone $i$. This cost is zero if the vehicle is initially in zone $i$ and positive otherwise.
Let $\mathcal{L}$ be a finite set of drop-off fees the CSO may apply. Let decision variable $\lambda_{ijl}$ be equal to $1$ if fee $l$ is applied between zone $i$ and and zone $j$, $0$ otherwise.
Finally, let $z:=(z_{vi})_{i\in\mathcal{I},v\in\mathcal{V}}$ and $\lambda:=(\lambda_{ijl})_{i,j\in\mathcal{I},l\in\mathcal{L}}$.
The carsharing pricing and relocation problem is thus
\begin{subequations}
\label{eq:1S}
\begin{align}
\label{eq:1S:obj}&\max-\sum_{v\in\mathcal{V}}\sum_{i\in\mathcal{I}}C^R_{vi}z_{vi}+Q(z,\lambda)\\
\label{eq:1S:c1}&\sum_{i\in\mathcal{I}}z_{vi} = 1 & v\in\mathcal{V}\\
\label{eq:1S:c2}&\sum_{l\in\mathcal{L}}\lambda_{ijl}=1& i,j\in\mathcal{I}\\
&z_{vi}\in\{0,1\} & i\in\mathcal{I},v\in\mathcal{V}\\
&\lambda_{ijl}\in\{0,1\} & i,j\in\mathcal{I},l\in\mathcal{L}.
\end{align}
\end{subequations}
Constraints \eqref{eq:1S:c1} ensure that each vehicle is made available in exactly one zone.
Constraints \eqref{eq:1S:c2} state that exactly one drop-off fee can be selected between each origin $i$ and destination $j$.
The objective function \eqref{eq:1S:obj} represents the expected profit obtained in the target period.
The first term consists of the total relocation cost while the second term $Q(z,\lambda)$ represents the expected revenue from rentals as a result of pricing and relocation activities.
The meaning of $Q(x,\lambda)$ will be made explicit by the end of this section as a result of the definition of the second-stage problem which we are now introducing.
Once relocation ($z$) and pricing ($\lambda$) decisions have been made, the CSO observes the consequent customers rentals.
The business area offers a set $\mathcal{A}$ of alternative transport services, outside the control of the CSO, such as metro, busses and private bicycles.
Each service has, in general, a different price and different characteristics.
Let decision variable $p_{vij}$ be the price of service $v\in\mathcal{V}\cup\mathcal{A}$ between zones $i$ and $j$.
The price of a carsharing ride between zones $i$ and $j$ is
\begin{equation}\label{eq:pV}
p_{vij} = P^VT^{CS}_{ij} + \sum_{l\in\mathcal{L}}L_{l}\lambda_{ijl}\qquad \forall v\in\mathcal{V}, i,j\in\mathcal{I}
\end{equation}
where parameter $P^V$ is the carsharing per-minute fee,
$T^{CS}_{ij}$ the driving time between zones $i$ and $j$ and $L_{l}$ the value of drop-off fee at level $l\in\mathcal{L}$ in some currency.
Note that, in case of a heterogeneous fleet, it is simply necessary to make the per-minute fee and the driving time vehicle-dependent.
Instead, the price of alternative services is entirely exogenous, that is
\begin{equation}\label{eq:pA}
p_{vij} = P_{vij}\qquad \forall v\in\mathcal{A}, i,j\in\mathcal{I}
\end{equation}
where parameter $P_{vij}$ is the price of alternative service $v\in\mathcal{A}$ between $i$ and $j\in\mathcal{I}$.
Let $\mathcal{K}$ be the set of customers, with $\mathcal{K}_{i}\subseteq\mathcal{K}$ being the set of customers traveling from zone $i\in\mathcal{I}$ and $\mathcal{K}_{ij}\subseteq\mathcal{K}_i$ the set of customers traveling from $i\in\mathcal{I}$ to $j\in\mathcal{I}$ in the target period.
Consider an individual customer $k$. The customer is faced with a choice among a finite number of alternative transport services that can bring them to their destination.
Using the fairly standard assumption that customers maximize their utility, we can state that each service will provide the customer a different utility, and that the customer will choose the transport
service that provides them the highest utility. This utility is known to the customer but not to the CSO.
Consider now the CSO. As we said, the CSO is not aware of the utility provided by the different services to each customer.
Rather, the CSO is aware of a number of characteristics of the different services, primarily the price, $p_{vij}$ and a some additional characteristics, say $\pi^1_{vij},\ldots,\pi^N_{vij}$ for service $v$ between $i$ and $j$ (e.g., travel time and waiting time),
as well as possibly some characteristics of the decision maker.
Based on this, the CSO can specify a function that relates these, known, characteristics to the utility obtained by the customer.
We denote this function as
$$F_k(p_{vij},\pi_{vij}^1,\ldots,\pi_{vij}^N)$$
However, there are additional elements that influence the utility that the CSO does not or cannot observe.
For this reason, the utility is better represented by
$$F_k(p_{vij},\pi_{vij}^1,\ldots,\pi_{vij}^N)+\tilde{\xi}_{kv}$$
where $\tilde{\xi}_{kv}$ is a random variable that captures the difference between the utility that the CSO is able to model and the true utility observed by the customer.
Different distributions for $\tilde{\xi}_{kv}$ will lead to different choice models. As an example, the popular Logit model is obtained when each $\tilde{\xi}_{kv}$ follows, independently, an identical extreme value distribution (Gumbel type I), and the Probit model is obtained when it follows a multivariate normal distribution.
These, and additional choice models, as well as a discussion of their fundamental assumptions and limitations are discussed in, e.g., \cite{Tra09}. See also \cite{BenB99} for an exposition related to transport choices.
Let now $u_{ijkv}$ be a decision variable which captures the utility obtained by customer $k\in\mathcal{K}$ when moving from $i$ to $j\in \mathcal{I}$ using service $v\in \mathcal{V}\cup\mathcal{A}$.
Given a realization $\xi_{kv}$ of the random term $\tilde{\xi}_{kv}$ the utility is determined by
\begin{equation}\label{eq:utility}
u_{ijkv} = F_k(p_{vij},\pi^1_{vij},\ldots,\pi^N_{vij})+\xi_{kv}\qquad \forall i,j\in \mathcal{I},k\in\mathcal{K}_{ij},v\in \mathcal{V}\cup\mathcal{A}
\end{equation}
Note that, since the observed characteristics of the different transport services, $\pi^1_{vij},\ldots,\pi^N_{vij}$, are given,
constraints \eqref{eq:utility} are linear if $F_k(\cdot)$ is linear in $p_{vij}$, which is instead a decision variable.
Based on the utility provided by the different transport services, customers will make their choices.
Let decision variable $w_{ijkv}$ be equal to $1$ if customer $k\in\mathcal{K}_{ij}$ chooses service $v\in\mathcal{V}\cup\mathcal{A}$, $0$ otherwise.
A customer will choose exactly one service (see Assumption A5 in \Cref{sec:prob})
\begin{equation}\label{eq:onlyOne}
\sum_{v\in \mathcal{V}\cup\mathcal{A}}w_{ijkv} = 1 \qquad \forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij}
\end{equation}
In order for a customer to choose a service, the service must be available.
Let thus binary variable $y_{ikv}$ be equal to $1$ if service $v\in\mathcal{V}\cup\mathcal{A}$ is offered to customer $k\in\mathcal{K}_{i}$, $0$ otherwise.
Alternative services $v\in\mathcal{A}$ are always offered to customers whenever they are available at all, that is
\begin{equation}\label{eq:availA}
y_{ikv} = Y_{vi}\qquad\forall i\in \mathcal{I},k\in \mathcal{K}_{i},v\in\mathcal{A}\\
\end{equation}
where parameter $Y_{vi}$ is equal to $1$ if alternative service $v$ is available in zone $i$, $0$ otherwise.
Conversely, a shared car $v\in\mathcal{V}$ may be offered to customers in zone $i$ only if it is physically available at $i$, that is
\begin{equation}\label{eq:availV}
y_{ikv} \leq z_{iv}\qquad\forall i\in \mathcal{I},k\in \mathcal{K}_{i},v\in\mathcal{V}
\end{equation}
In addition, each car $v\in \mathcal{V}$ can be rented by only one customer. If more than one customers wish to use car $v$,
the car is taken by the first customer arriving at the car. We assume that customers are indexed according to their arrival time at the car,
i.e., customer $k$ arrives before $q$ if $k<q$. We impose that a vehicle is offered to a customer only if it is offered also to the customer arriving before them
(who perhaps did not take it), that is:
\begin{equation}\label{eq:availBefore}
y_{ikv}\leq y_{i(k-1)v} \qquad \forall i\in \mathcal{I},k\in \mathcal{K}_{i},v\in\mathcal{V}
\end{equation}
A vehicle becomes unavailable for a customer if any customer has arrived before them and rented the car, that is:
\begin{equation}\label{eq:taken}
z_{iv}-y_{ikv}= \sum_{j\in\mathcal{I}}\sum_{q\in\mathcal{K}_{ij}:q < k}w_{ijqv}\qquad \forall i\in \mathcal{I},k\in\mathcal{K}_{i},v\in\mathcal{V}
\end{equation}
that is, if car $v$ is in zone $i$ ($z_{iv}=1$), but it is not offered to customer $k$ ($y_{ikv}=0$)
we obtain
$$1 = \sum_{j\in\mathcal{I}}\sum_{q\in\mathcal{K}_{ij}:q < k}w_{ijqv}$$
meaning that one customer has arrived before $k$ and rented the car. On the other hand,
if the car is offered to customer $k$, ($y_{ikv}=1$), then it must be in zone $i$ ($z_{iv}=1$ -- see \eqref{eq:availV}),
and we obtain
$$0 = \sum_{j\in\mathcal{I}}\sum_{q\in\mathcal{K}_{ij}:q < k}w_{ijqv}$$
meaning that no customer arriving before $k$ has taken the car.
The same equality holds if the vehicle is not available at all ($z_{iv}=0$ and $y_{ijkv}=0$).
Now that we have clarified how the availability of rental cars is regulated, we can state that a service can be chosen only if it is offered to the customer
\begin{equation}
\label{eq:chooseIfAvailable}
w_{ijkv}\leq y_{ikv}\qquad\forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij},v\in\mathcal{V}\cup\mathcal{A}
\end{equation}
Among the available services, the customer will chose the one yielding the highest utility.
Therefore, for a given zone $i\in\mathcal{I}$, let decision variable $\nu_{ivwk}$ be equal to $1$ if both services $v$ and $w$ in $\mathcal{V}\cup\mathcal{A}$ are available to customer
$k\in\mathcal{K}_{i}$, $0$ otherwise, and decision variable $\mu_{ijvwk}$ be equal to one if service $v\in\mathcal{V}\cup\mathcal{A}$
yields a greater utility than service $w\in\mathcal{V}\cup\mathcal{A}$ to customer $k\in\mathcal{K}_{ij}$ moving from $i$ to $j$, $0$ otherwise.
The following constraints state that $\nu_{ivwk}$ is equal to one when both services $v$ and $w$ are available
\begin{align}
\label{eq:nu1} &y_{ikv}+y_{ikw}\leq 1 + \nu_{ivwk}&\forall i\in\mathcal{I},k\in \mathcal{K}_{i},v,w\in\mathcal{V}\cup\mathcal{A},\\
\label{eq:nu2} &\nu_{ivwk}\leq y_{ikv}&\forall i\in\mathcal{I},k\in \mathcal{K}_{i},v,w\in\mathcal{V}\cup\mathcal{A},\\
\label{eq:nu3} &\nu_{ivwk}\leq y_{ikw}&\forall i\in\mathcal{I},k\in \mathcal{K}_{i},v,w\in\mathcal{V}\cup\mathcal{A}.
\end{align}
A service is chosen only if it yields the highest utility
\begin{equation}\label{eq:chooseHighest}
w_{ijkv}\leq \mu_{ijvwk}\qquad\forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij},v,w\in\mathcal{V}\cup\mathcal{A}
\end{equation}
that is, as soon as $\mu_{ijvwk}$ is set to $0$ for some index $w$, $w_{ijkv}$ is forced to take value $0$ and service $v$ is not chosen by customer $k$ on $i$-$j$.
The following constraints ensure that decision variable $\mu_{ijvwk}$ takes the correct value according to the utility
\begin{align}\label{eq:choice1}
M_{ijk}\nu_{ivwk}-2M_{ijk}\leq u_{ijkv}-&u_{ijkw} - M_{ijk}\mu_{ijvwk}\\
&\nonumber\forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij},v,w\in\mathcal{V}\cup\mathcal{A}
\end{align}
and
\begin{align}\label{eq:choice2}
u_{ijkv}-u_{ijkw} - M_{ijk}\mu_{ijvwk}\leq &(1 -\nu_{ivwk})M_{ijk}\\
&\nonumber\forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij},v,w\in\mathcal{V}\cup\mathcal{A}
\end{align}
where constant $M_{ijk}$ represents the greatest difference in utility between two services on $i-j$ for customer $k\in\mathcal{K}_{ij}$,
that is $M_{ijk}\geq |u_{ijkv}-u_{ijkw}|,\forall v,w\in\mathcal{V}\cup\mathcal{A}$.
Constraints \eqref{eq:choice1}-\eqref{eq:choice2} work as follows.
When both services $v$ and $w$ are available ($\nu_{ivwk}=1$) and $u_{ijkv}>u_{ijkw}$, \eqref{eq:choice2} forces $\mu_{ijvwk}$ to take value $1$,
while \eqref{eq:choice1} reduces to $0\leq u_{ijkv}-u_{ijkw}$. When both service $v$ and $w$ are available and $u_{ijkv}<u_{ijkw}$,
\eqref{eq:choice1} forces $\mu_{ijvwk}$ to take value $0$, while \eqref{eq:choice2} reduces to $0\geq u_{ijkv}-u_{ijkw}$.
When one of the two services is not available ($\nu_{ivwk}=0$), constraints \eqref{eq:choice1}-\eqref{eq:choice2} are satisfied irrespective of the value of $\mu_{ijvwk}$.
In case of ties ($u_{ijkv}=u_{ijkw}$) we impose
\begin{equation}
\label{eq:onlyOneMu} \mu_{ijvwk}+\mu_{ijwvk}\leq 1\qquad\forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij},v,w\in\mathcal{V}\cup\mathcal{A}
\end{equation}
A service can be preferred only if offered
\begin{equation}
\label{eq:muIfAvail}\mu_{ijvwk}\leq y_{ikv} \qquad\forall i,j\in\mathcal{I},k\in \mathcal{K}_{ij},v,w\in\mathcal{V}\cup\mathcal{A}
\end{equation}
Let decision variable $\alpha_{ijkvl}$ be equal to $1$ if fare $l$ is applied between $i$ and $j$ and customer $k$ chooses shared car $v\in\mathcal{V}$, $0$ otherwise.
The following constraints ensure the relationship between $\lambda_{ijl}$ and $w_{ijkv}$ and $\alpha_{ijkvl}$
\begin{align}
\label{eq:lin1}&\lambda_{ijl}+w_{ijkv}\leq 1 + \alpha_{ijkvl}&\forall v\in\mathcal{V}, i,j\in\mathcal{I},k\in\mathcal{K}_{ij},l\in\mathcal{L}\\
\label{eq:lin2}&\alpha_{ijkvl}\leq\lambda_{ijl}&\forall v\in\mathcal{V}, i,j\in\mathcal{I},k\in\mathcal{K}_{ij},l\in\mathcal{L} \\
\label{eq:lin3}&\alpha_{ijkvl}\leq w_{ijkv}&\forall v\in\mathcal{V}, i,j\in\mathcal{I},k\in\mathcal{K}_{ij},l\in\mathcal{L}
\end{align}
That is, $\alpha_{ijkvl}$ is forced to take value $1$ as soon as both $\lambda_{ijl}$ and $w_{ijkv}$ take value one, and value $0$ as soon as either $\lambda_{ijl}$ or $w_{ijkv}$ take value $0$.
Finally, for a given realization $\xi:=(\xi_{kv})_{k\in\mathcal{K},v\in\mathcal{V}\cup\mathcal{A}}$ of the random utility term $\tilde{\xi}:=(\tilde{\xi}_{kv})_{k\in\mathcal{K},v\in\mathcal{V}\cup\mathcal{A}}$ the second-stage profit can be formally expressed as
\begin{subequations}
\label{eq:2SF1}
\begin{align}
\label{eq:obj:1}Q(z,\lambda,\xi) = \max~ & \sum_{v\in\mathcal{V}}\sum_{(i,j)\in\mathcal{I}\times\mathcal{I}}\left(P^VT^{CS}_{ij}-C^U_{ij}\right)\sum_{k\in\mathcal{K}_{ij}}w_{ijkv}\\
\label{eq:obj:2} & +\sum_{v\in\mathcal{V}}\sum_{(i,j)\in\mathcal{I}\times\mathcal{I}}\sum_{k\in\mathcal{K}_{ij}}\sum_{l\in\mathcal{L}}L_{ijl}\alpha_{ijkvl}\\
\text{s.t. }~&\eqref{eq:pV},\eqref{eq:pA},\eqref{eq:utility},\eqref{eq:onlyOne},\eqref{eq:availA},\eqref{eq:availV},\eqref{eq:availBefore},\eqref{eq:taken},\nonumber\\
&\eqref{eq:chooseIfAvailable},\eqref{eq:nu1},\eqref{eq:nu2},\eqref{eq:nu3},\eqref{eq:chooseHighest},\eqref{eq:choice1},\eqref{eq:choice2},\eqref{eq:onlyOneMu},\eqref{eq:muIfAvail},\eqref{eq:lin1},\eqref{eq:lin2},\eqref{eq:lin3} \nonumber\\
\end{align}
\end{subequations}
where $C^U_{ij}$ is the cost born by the CSO when a vehicle is rented between $i$ and $j$, \eqref{eq:obj:1} represents the net revenue generated by the per-minute fee,
and \eqref{eq:obj:2} represents the income generated by the drop-off fee.
Thus, we can formally express the expected profit (i.e., the recourse function) as
$$Q(z,\lambda):= \mathbb{E}_{\tilde{\xi}}\bigg[Q(z,\lambda,\xi)\bigg]$$
Problem \eqref{eq:1S} is a two-stage mixed-integer stochastic program with integer decision variables at both stages.
\section{L-Shaped Method}\label{sec:ls}
We propose a multi-cut Integer L-Shaped method to find exact solutions to problem \eqref{eq:1S}.
The original, single-cut, version of the method was introduced by \cite{LapL93}.
Assuming a set $\mathcal{S}=\{1,\ldots,S\}$ of scenarios (e.g., an iid sample) of $\tilde{\xi}$, each with probability $\pi_s$,
the Master Problem (MP) can be formulated as
\begin{subequations}
\label{eq:F2MP}
\begin{align}
\label{eq:ref:obj}&\max-\sum_{v\in\mathcal{V}}\sum_{i\in\mathcal{I}}C^R_{vi}z_{vi}+\sum_{s\in\mathcal{S}}\pi_s\phi_s\\
\label{eq:ref:c3}&\sum_{i\in\mathcal{I}}z_{vis} = 1 & v\in\mathcal{V}\\
\label{eq:ref:c6}&\sum_{l\in\mathcal{L}}\lambda_{ijl}=1& i\in\mathcal{I},j\in\mathcal{J}\\
&z_{vi}\in\{0,1\} & i\in\mathcal{I},v\in\mathcal{V}\\
&\lambda_{ijl}\in\{0,1\} & i\in\mathcal{I},j\in\mathcal{I},l\in\mathcal{L}\\
&\phi_s \text{~free~}& s\in\mathcal{S}.
\end{align}
\end{subequations}
Let $\phi:=(\phi_s)_{s\in\mathcal{S}}$. For each $s\in \mathcal{S}$, the second-stage problem $Q(z,\lambda,\xi_s)$ is solved as a subproblem.
The L-Shaped method consists of solving MP in a Branch\& Cut framework where optimality cuts are added at (integer) nodes of the tree.
Observe that problem \eqref{eq:1S} has relatively complete recourse, that is, the second-stage problem $Q(z,\lambda,\xi_s)$ is feasible for every solution that satisfies the first-stage constraints.
Consequently, the method requires only the definition of optimality cuts.
The practical viability of the method is enabled by a compact reformulation of $Q(z,\lambda,\xi_s)$, provided in \Cref{sec:ls:f2}, and an efficient exact algorithm for its solution, introduced in \Cref{sec:ls:subproblems}.
We then provide the expression of optimality cuts and relaxation cuts in \Cref{sec:ls:oc} and \Cref{sec:ls:relaxation}, respectively. The former are necessary to cut off solutions for which $\phi_s<Q(z,\lambda,\xi_s)$, the latter provide non-trivial lower bounds to $Q(z,\lambda,\xi_s)$ and are
crucial for the efficiency of the algorithm. Final efficiency measures are provided in \Cref{sec:ls:efficiency}.
\subsection{Compact formulation of the second-stage problem}\label{sec:ls:f2}
For a given realization $\xi$ of $\tilde{\xi}$, and first-stage decision $(z,\lambda)$, the second-stage problem can be reformulated by preprocessing customer choices.
The preprocessing phase ensures that the reformulation is linear regardless of whether the utility function adopted is linear in the price.
Thus, any choice model can be used, without any restriction to utility function linear in the price.
We introduce the concept of a request. A request represents a customer who wishes to use carsharing for moving from its origin to its destination.
For a given realization $\xi$, let the set $\mathcal{R}(\xi)$ be the set of requests. The set $\mathcal{R}(\xi)$ contains a request for each customer $k\in\mathcal{K}$ for which there exists at least one drop-off level $l\in\mathcal{L}$
such that the customer would prefer carsharing to alternative transport services, that is, for which $u_{ijkv}>u_{ijkw}$ with $v\in\mathcal{V}$ and $w\in\mathcal{A}$ for some choice of $l\in\mathcal{L}$ (note that all shared cars yield the same utility).
Let $i(r)$, $j(r)$ and $k(r)$ be the origin, destination and customer of request $r$, respectively, and $l(r)$ the highest drop-off fee at which customer $k(r)$ would prefer carsharing to other services.
Note that customer $k(r)$ would still prefer carsharing at any drop-off fee lower than $l(r)$ (under the reasonable assumption that the customer is sensitive to price).
For each realization $\xi$ of $\tilde{\xi}$ the set of requests can be populated in $\mathcal{O}\big(|\mathcal{K}|\times|\mathcal{L}|\times |\mathcal{A}|\big)$ operations as described in \Cref{alg:Rs}.
\begin{algorithm}[h]
\caption{Computation of $\mathcal{R}(\xi)$.}
\label{alg:Rs}
\begin{algorithmic}[1]
\STATE Input: $\xi$
\STATE $\mathcal{R}(\xi)\gets \emptyset$
\FOR{customer $k\in\mathcal{K}$}
\STATE $i\gets i(k)$, $j\gets j(k)$ \COMMENT{{\footnotesize $i(k)$ and $j(k)$ are the origin and destination of customer $k$.}}
\STATE $l^{MAX}\gets -\infty$ \COMMENT{{\footnotesize The highest drop-off fee at which customer $k$ will choose carsharing.}}
\STATE $L_{l^{MAX}}\gets -\infty$
\FOR{drop-off level $l\in\mathcal{L}$}
\STATE $p^{CS} \gets P^VT^{CS}_{ij} + L_{l}$ \COMMENT{{\footnotesize Calculate the price of a carsharing ride.}}
\STATE $U^{CS} \gets F_k(P^{CS},\pi^1_{vij},\ldots,\pi^N_{vij})+\xi_{kv}$ for some $v\in\mathcal{V}$\COMMENT{{\footnotesize Calculate the utility of carsharing.}}
\FOR{service $v\in\mathcal{A}:Y_{vi}=1$}
\STATE $p^A_v \gets P_{vij}$ \COMMENT{{\footnotesize Calculate the price of a ride with alternative $v$.}}
\STATE $U^A_v \gets F_k(P^{A}_v,\pi^1_{vij},\ldots,\pi^N_{vij})+\xi_{kv}$ \COMMENT{{\footnotesize Calculate the utility of alternative $v$.}}
\ENDFOR
\IF{$U^{CS} > \max_{v\in\mathcal{A}}\{U^A_v\}$ and $L_l > L_{l^{MAX}}$ }
\STATE $l^{MAX}\gets l$
\STATE $L_{l^{MAX}}\gets L_l$
\ENDIF
\ENDFOR
\IF{$l^{MAX}> -\infty$}
\STATE $r\gets |\mathcal{R}(\xi)|+1$ \COMMENT{{\footnotesize In this case there exists a drop-off fee at which $k$ prefers carsharing. Thus we create a request.}}
\STATE $\mathcal{R}(\xi)\gets \mathcal{R}(\xi)\cup \{r\}$
\STATE $i(r)\gets i$
\STATE $j(r)\gets j$
\STATE $k(r)\gets k$
\STATE $l(r)\gets l^{MAX}$
\ENDIF
\ENDFOR
\RETURN $\mathcal{R}(\xi)$
\end{algorithmic}
\end{algorithm}
Observe that, in case of a heterogeneous fleet, \Cref{alg:Rs} should be edited at lines $8-9$ and $14$ to account for the fact that each vehicle gives, in general, a different utility.
Let then $R_{rl} = P^VT^{CS}_{i(r),j(r)}-C^U_{i(r),j(r)}+L_l$, for $l\leq l(r)$, be the net revenue generated if request $r$ is satisfied at drop-off fee level $l$.
Let $\mathcal{R}_r(\xi)=\{\rho \in\mathcal{R}(\xi): i(\rho) = i(r), k(\rho) < k(r)\}$ be the set of requests which have a precedence over $r$.
Let $\mathcal{R}_{ij}(\xi)=\{r\in\mathcal{R}(\xi):i(r) = i, j(r)=j\}$.
Let $\mathcal{L}_r(\xi)=\{l\in\mathcal{L}:L_l \leq L_{l(r)}\}$.
Finally, let decision variable $y_{vrl}$ be equal to $1$ if request $r$ is satisfied by vehicle $v$ at level $l$, $0$ otherwise.
We can now reformulate the second-stage problem as follows.
\begin{subequations}
\label{eq:2SF2}
\begin{align}
\label{eq:2SF2:obj}&Q(z,\lambda,\xi)=\max\sum_{r\in\mathcal{R}(\xi)}\sum_{v\in\mathcal{V}}\sum_{l\in\mathcal{L}_r(\xi)}R_{vrl}y_{vrl}\\
\label{eq:2SF2:c1}&\sum_{v\in\mathcal{V}}\sum_{l\in\mathcal{L}_r(\xi)}y_{vrl}\leq 1 & r\in\mathcal{R}(\xi)\\
\label{eq:2SF2:c2}&\sum_{r\in\mathcal{R}(\xi)}\sum_{l\in\mathcal{L}_r(\xi)}y_{vrl}\leq 1 & v\in\mathcal{V}\\
\label{eq:2SF2:c4}&\sum_{l\in\mathcal{L}_{r_1}(\xi)}y_{v,r_1,l} + \sum_{r_2\in\mathcal{R}_{r_1}(\xi)}\sum_{l\in\mathcal{L}_{r_2}(\xi)}y_{v,r_2,l}\leq z_{v,i(r_1)} & r_1\in\mathcal{R}(\xi),v\in\mathcal{V}\\[5pt]
\nonumber&y_{v,r_1,l_1}+ \sum_{r_2\in\mathcal{R}_{r_1}(\xi)}\sum_{l_2\in\mathcal{L}_{r_2}(\xi)}y_{v,r_2,l_2}+ \sum_{v_1\in\mathcal{V}:v_1\neq v}y_{v_1,r_1,l_1} &\\
\label{eq:2SF2:c5} &\geq \lambda_{i(r_1),j(r_j),l_1} + z_{v,i(r_1)}-1 & r_1\in\mathcal{R}(\xi),v\in\mathcal{V}, l_1\in\mathcal{L}_{r_1}(\xi)\\
\label{eq:2SF2:c7}&\sum_{v\in\mathcal{V}}y_{vrl}\leq\lambda_{i(r),j(r),l}& r\in\mathcal{R}(\xi),l\in\mathcal{L}_r(\xi)\\
&y_{vrl}\in\{0,1\} & r\in\mathcal{R}(\xi),v\in\mathcal{V},l\in\mathcal{L}_r(\xi)
\end{align}
\end{subequations}
The objective function \eqref{eq:2SF2:obj} represents the net revenue obtained by the satisfaction customer requests.
Constraints \eqref{eq:2SF2:c1} ensure that each request is satisfied at most once.
Constraints \eqref{eq:2SF2:c2} ensure that each vehicle satisfies at most one request.
Constraints \eqref{eq:2SF2:c4} state that a request can be satisfied by vehicle $v$ only if the vehicle is in zone $i(r_1)$
and the vehicle has not been assigned to a customer with a lower index (that is, arriving at the vehicle before $k(r_1)$).
Constraints \eqref{eq:2SF2:c5} state that a request $r_1$ at a certain level $l_1$ must be satisfied by vehicle $v$
if level $l_1$ has been chosen ($\lambda_{i(r_1),j(r_1),l_1}=1)$ and the vehicle is available at $i(r_1)$ ($z_{v,i(r_1)}=1)$, unless the car has been used to satisfy the request of a customer with a higher priority (second term on the left-hand-side), or $r_1$ has been satisfied by another vehicle (third term on the left-hand-side).
Constraints \eqref{eq:2SF2:c7} state that a request can be satisfied at level $l$ only if level $l$ is applied to all customers traveling between $i$ and $j$.
Note that $z$ and $\lambda$ are input data in problem \eqref{eq:2SF2}.
\subsection{Solution of the second-stage problem}\label{sec:ls:subproblems}
Given a solution $(z,\lambda)$ to MP and a scenario $\xi_s$, the optimal second stage profit $Q(z,\lambda,\xi_s)$ and solution can be computed by the greedy procedure sketched in \Cref{alg:Q}.
\begin{algorithm}[h]
\caption{Greedy algorithm for computing $Q(z,\lambda,\xi_s)$ and its optimal solution.}
\label{alg:Q}
\begin{algorithmic}[1]
\STATE INPUT: $z$, $\lambda$, $\mathcal{R}(\xi_s)$.
\STATE $\mathcal{V}^A_s\gets \mathcal{V}$\COMMENT{{\footnotesize $\mathcal{V}^A_s$ is the set of available vehicles.}}
\STATE $Y_{vrl} \gets 0$ $\forall v\in\mathcal{V}, r\in\mathcal{R}(\xi_s),l\in\mathcal{L}_r(\xi_s)$.
\STATE $Q(z,\lambda,\xi_s)\gets 0$
\STATE Sort requests $\mathcal{R}(\xi_s)$ in non-decreasing order of the customer index $k(r)$\COMMENT{{\footnotesize Remember that customers with a lower index have the precedence over customers with a higher index, see \eqref{eq:taken}}.}
\FOR{request $r\in \mathcal{R}(\xi_s)$}
\STATE $L_{i(r),j(r)} = \sum_{l\in\mathcal{L}}l\lambda_{i(r),j(r),l}$\COMMENT{{\footnotesize Identify the fee applied between $i(r)$ and $j(r)$.}}
\IF{$L_{i(r),j(r)}\leq l(r)$}
\FOR{$v\in\mathcal{V}^A_s$}
\IF{$z_{v,{i(r)}}=1$}
\STATE $Y_{v,r,L_{i(r),j(r)}}\gets 1$
\STATE $\mathcal{V}^A_s\gets \mathcal{V}^A_s\setminus \{v\}$\COMMENT{{\footnotesize Vehicle $v$ becomes unavailable}}
\STATE $Q(z,\lambda,\xi_s)\gets Q(z,\lambda,\xi_s) + R_{r,L_{i(r),j(r)}}$
\ENDIF
\ENDFOR
\ENDIF
\ENDFOR
\RETURN $Q(z,\lambda,\xi_s)$ and $Y_{vrl} \forall v\in\mathcal{V}, r\in\mathcal{R}(\xi_s),l\in\mathcal{L}(\xi_s)$.
\end{algorithmic}
\end{algorithm}
\Cref{alg:Q} proceeds as follows. Given a solution $(z,\lambda)$ to MP and the requests available in scenario $\xi_s$, the algorithm first initializes the solution $Y_{vrl}$, the objective value $Q(z,\lambda,\xi_s)$ and the set of available vehicles.
Then it sorts the requests in non-decreasing order of the customer index $k(r)$. This is necessary to enforce that customers with a lower index have their request satisfied before customers with a higher index.
The algorithm then iterates over the ordered requests. For each request it first checks whether the fee applied on its origin-destination pair, $L_{i(r),j(r)}$, is lower than then highest drop-off fee acceptable to the customer, $l(r)$.
If this is the case, the algorithm looks for vehicles available at the origin of request $r$, $i(r)$. If one such vehicle $v$ is found, the request is assigned to the vehicle at the current drop-off fee, i.e, $Y_{v,r,L_{i(r),j(r)}}$ is set to $1$,
vehicle $v$ is made unavailable, and the revenue is increased by the revenue of request $r$, that is $R_{r,L_{i(r),j(r)}}$.
The algorithm performs $\mathcal{O}\big(|\mathcal{R}(\xi_s)|\times |\mathcal{V}|\big)$ operations.
\subsection{Optimality cuts}\label{sec:ls:oc}
We are now concerned with finding a \textit{valid set of optimality cuts}, that is a finite number of optimality cuts which enforce $\phi_s\leq Q(z,\lambda,\xi_s)$ for all $s\in\mathcal{S}$.
Assume an upper bound $U_s$ on $\max_{z,\lambda}Q(z,\lambda,\xi_s)$ exists for all $s\in\mathcal{S}$.
In the case of a homogeneous fleet a valid upper bound $U_s$ on $\max_{z,\lambda}Q(z,\lambda,\xi_s)$ is
$$U_s=\sum_{r\in\mathcal{R}(\xi_s)}\max\{R_{r,l(r)} , 0 \}$$
That is, the upper bound assumes that all, and only, the requests which generate a positive revenue are satisfied, and that these are satisfied at the highest drop-off fee $l(r)$.
If the fleet is not homogeneous, we would have a vehicle-specific revenue $R_{vrl}$ for satisfying request $r$ at level $l$. In this case,
a valid upper bound can be obtained by assuming all requests are satisfied by the vehicle which yields the highest non-negative revenue, i.e.,
$$U_s=\sum_{r\in\mathcal{R}(\xi_s)}\max\bigg\{ \max_{v\in\mathcal{V}}\{R_{v,r,l(r)}\} , 0 \bigg\}$$
Given a solution $(z^t,\lambda^t)$ to MP (e.g., at a given node $t$ of the Branch \& Cut procedure),
let $\mathcal{Z}^+_t\subseteq \mathcal{V}\times\mathcal{I}$ and $\mathcal{Z}^-_t\subseteq \mathcal{V}\times\mathcal{I}$ be the set of tuples $(v,i)$ for which $z_{vi}^t=1$ and $z_{vi}^t=0$, respectively.
Similarly, let $\Lambda^+_t\subseteq\mathcal{I}\times\mathcal{I}\times\mathcal{L}$ and $\Lambda^-_t\subseteq\mathcal{I}\times\mathcal{I}\times\mathcal{L}$ be the set of tuples $(i,j,l)$ for which $\lambda_{ijl}=1$ and $\lambda_{ijl}=0$, respectively.
\Cref{prop:ocs} defines a valid set of optimality cuts.
\begin{proposition}
\label{prop:ocs}
Let $(z^t,\lambda^t)$ be the $t$-th feasible solution to MP, and $Q(z,\lambda,\xi_s)$ its second-stage value for scenario $s$. The set of cuts
\begin{align}
\label{eq:oc}
\phi_s \leq &\bigg(Q(z,\lambda,\xi_s) - U_s\bigg)\bigg(\sum_{(v,i)\in\mathcal{Z}^+_t}z_{vi}-\sum_{(v,i)\in\mathcal{Z}^-_t}z_{vi}+\sum_{(i,j,l)\in\Lambda^+_t}\lambda_{ijl}-\sum_{(i,j,l)\in\Lambda^-_t}\lambda_{ijl}\bigg)\\
\nonumber &+ U_s - \bigg(Q(z,\lambda,\xi_s) - U_s\bigg)\bigg(|\mathcal{Z}^+_t|+|\Lambda^+_t|-1 \bigg)
\end{align}
defined for all $(z^t,\lambda^t)$ feasible to MP is a valid set of optimality cuts.
\begin{proof}
It is sufficient to observe that, for $(z,\lambda)=(z^t,\lambda^t)$, we have
$$\bigg(\sum_{(v,i)\in\mathcal{Z}^+_t}z_{vi}-\sum_{(v,i)\in\mathcal{Z}^-_t}z_{vi}+\sum_{(i,j,l)\in\Lambda^+_t}\lambda_{ijl}-\sum_{(i,j,l)\in\Lambda^-_t}\lambda_{ijl}\bigg)=|\mathcal{Z}^+_t|+|\Lambda^+_t|$$
and optimality cut \eqref{eq:oc} reduces to
$$\phi_s\leq Q(z,\lambda,\xi_s), \qquad \forall s\in\mathcal{S}$$
On the other hand, if $(z,\lambda)\neq(z^t,\lambda^t)$ we get
$$\bigg(\sum_{(v,i)\in\mathcal{Z}^+_t}z_{vi}-\sum_{(v,i)\in\mathcal{Z}^-_t}z_{vi}+\sum_{(i,j,l)\in\Lambda^+_t}\lambda_{ijl}-\sum_{(i,j,l)\in\Lambda^-_t}\lambda_{ijl}\bigg)\leq|\mathcal{Z}^+_t|+|\Lambda^+_t|-1$$
and, observing that $Q(z,\lambda,\xi_s) - U_s\leq 0$, the right-hand-side of the cut becomes greater than or equal to $U_s$.
Thus, since MP is a maximization problem, the set of cuts enforces $\phi_s\leq Q(z,\lambda,\xi_s)$ when $(z,\lambda)=(z^t,\lambda^t)$ and yields a valid upper bound on the remaining solutions.
\end{proof}
\end{proposition}
\subsection{Relaxation cuts}\label{sec:ls:relaxation}
Ordinary Benders decomposition cuts can be derived by solving the LP relaxation of the subproblems $Q(z,\lambda,\xi_s)$. Relaxation cuts are not to be confused with optimality cuts as they are, in general, not tight at the point $(z^t,\lambda^t)$ at which they are generated.
However, relaxation cuts provide a, possibly, non-trivial upper bound on $Q(z,\lambda,\xi_s)$, that is an upper bound which might be lower than $U_s$ for several $(z,\lambda)$ solutions.
A relaxation cut is obtained as follows
\begin{subequations}
\label{eq:rc}
\begin{align}
\phi_s \leq& \sum_{r\in\mathcal{R}(\xi_s)}\pi^A_r+\sum_{v\in\mathcal{V}}\pi^B_{v}+\sum_{r\in\mathcal{R}(\xi_s)} \sum_{v\in\mathcal{V}} \pi^C_{rv}z_{v,i(r)}\\
&+\sum_{r\in\mathcal{R}(\xi_s)}\sum_{v\in\mathcal{V}}\sum_{l\in\mathcal{L}_{r}(\xi_s)}\pi^D_{rvl}\bigg(\lambda_{i(r),j(r),l} + z_{v,i(r)}-1\bigg)+ \sum_{r\in\mathcal{R}(\xi_s)}\sum_{l\in\mathcal{L}_r(\xi_s)}\pi^E_{rl}\lambda_{i(r),j(r),l}
\end{align}
\end{subequations}
where $\pi^A_r$, $\pi^B_v$, $\pi^C_{rv}$, $\pi^D_{rvl}$, $\pi^E_{rl}$ are the values of the dual solution to $Q(z,\lambda,\xi_s)$ corresponding to constraints \eqref{eq:2SF2:c1}, \eqref{eq:2SF2:c2}, \eqref{eq:2SF2:c4}, \eqref{eq:2SF2:c5} and \eqref{eq:2SF2:c7}, respectively.
\subsection{Other efficiency measures}\label{sec:ls:efficiency}
MP exhibits symmetric solutions. If no vehicle is made available in a given zone $i$, all configurations of the drop-off fees between zone $i$ and the remaining zones $j$ are equivalent.
In fact, no customer will be served on those $(i,j)$ pairs. This problem can be solved by mean of the following constraints
\begin{align}\label{eq:VI}
&\sum_{v\in\mathcal{V}}z_{vi}+\lambda_{ij1}\geq 1& \forall i,j\in\mathcal{I}
\end{align}
Constraints \eqref{eq:VI} enforce that, when no vehicle is available in zone $i$ (i.e., $\sum_{v\in\mathcal{V}}z_{vi}=0$) we arbitrarily chose drop-off fee number $1$ (i.e., $\lambda_{ij1}=1$). On the other hand, if $\sum_{v\in\mathcal{V}}z_{vi}\geq 1$ constraints \eqref{eq:VI} are satisfied regardless of the choice of a drop-off fee.
\section{Test instances}\label{sec:instances}
In this section we present the test instances we used to run a computational study whose results are presented in \Cref{sec:results}.
The test instances mimic carsharing services in the Italian city of Milan. The city hosts a number of carsharing companies and, according to the municipality of Milan \cite{Mil20},
in $2018$ there were a total of $3 108$ free-floating shared vehicles, with an average of $16 851$ daily rentals, and $149$ station-based shared vehicles with an average of $108$ daily rentals.
We start by describing how the instances were constructed and finally we clarify the specific choices of control parameters for our tests.
For the sake of replicability, an instance generator is made publicly available at the address \url{https://github.com/GioPan/instancesPricingAndRepositioningProblem}.
\subsection{Zones and alternative transport services}
We build upon, and expand, the instances used by \cite{HanP18}.
The authors consider ten key locations in the business area of the city of Milan which we use as representatives of as many zones, thus setting $\mathcal{I}=\{1,\ldots,10\}$, see \Cref{fig:mi}.
The authors consider as alternative transport services \textit{public transport} (PT -- consisting of a combination of busses, metro and superficial trains) and \textit{bicycles} (B).
Therefore we set $\mathcal{A}=\{PT,B\}$.
For each pair of zones, the authors provide all the information necessary to calculate the utility as further explained in \Cref{sec:instances:utility}.
\begin{figure}
\caption{Municipality of Milan. Pins identify the ten locations of the city of Milan in the instances of \cite{HanP18}
\label{fig:mi}
\end{figure}
\subsection{Customers and utility functions}\label{sec:instances:utility}
According to \eqref{eq:utility} each customer $k$ is characterized by a known utility function and a random variable $\tilde{\xi}_{kv}$ which represents the portion of the preferences of the customer with respect to service $v$ that the CSO cannot explain. We start by introducing the portion of the utility estimated by the CSO.
We adopt the utility function described in \cite{ModS98}.
This function provides an estimate of the utility of each transport service as a function of price, travel time, walking time (e.g., to reach the service and from the service to destination) and waiting time. These represent the characteristics observable by the CSO.
For each customer $k\in\mathcal{K}$ traveling between $i$ and $j$ with transportation service $v$ the utility function is
\begin{align}\label{eq:cs:utility}
\nonumber F_k(p_{vij},T^{CS}_{vij},T^{PT}_{vij},&T^B_{vij},T^{Walk}_{vkij}, T^{Wait}_{vij})= \beta_k^{P} p_{vij} + \beta_k^{CS} T_{vij}^{CS}+ \beta_k^{PT} T_{vij}^{PT}\\
&+\tau(T_{vij}^{B})\beta_k^{B} T_{vij}^{B}+\tau(T_{vij}^{Walk})\beta_k^{Walk} T_{vij}^{Walk}+
\beta_k^{Wait} T_{vij}^{Wait}
\end{align}
The meaning of each parameter and coefficient of function \eqref{eq:cs:utility} is clarified in \Cref{tab:instances:utilityparams} and function $\tau:\mathbb{R}\rightarrow\mathbb{R}$, defined as $\tau(t)=\lceil \frac{t}{10}\rceil$, allows us to model the utility of cycling and walking as a piece-wise linear function: the utility of walking and cycling decreases faster as the walking and cycling time increases, see \cite{ModS98}.
\begin{table}
\centering
\caption{Parameters and coefficients of the utility function.}\label{tab:instances:utilityparams}
\begin{tabular}{p{0.1\linewidth}|p{0.8\linewidth}}
\toprule
Parameter & Meaning\\
\midrule
$T^{CS}_{vij}$ & Time spent riding a shared car between $i$ and $j$ when using service $v$. This quantity is strictly positive only when $v$ is a carsharing service, otherwise it is $0$.\\
$T^{PT}_{vij}$ & Time spent in public transportation between $i$ and $j$ when using service $v$. This quantity is strictly positive only when $v$ is PT, otherwise it is $0$.\\
$T^{B}_{vij}$ & Time spent riding a bicycle between $i$ and $j$ when using service $v$. This quantity is strictly positive only when $v$ is B, otherwise it is $0$.\\
$T^{Walk}_{vij}$& Walking time necessary when moving with transportation service $v$ between $i$ and $j$. This includes the walking time to the nearest service (e.g., shared car or bus stop),
between connecting means (e.g., when switching between bus and metro to reach the final destination), and from to the final destination.\\
$T^{Wait}_{vij}$& Waiting time when using service $v$ between $i$ and $j$, and includes the waiting time for the service (e.g., bus or metro) as well as for connection.\\
$beta_k^{P}$ & Price sensitivity of customer $k$.\\
$\beta_k^{CS}$ & Time sensitivity of customer $k$ when using a shared vehicle.\\
$\beta_k^{PT}$ &Time sensitivity of customer $k$ when using public transport.\\
$\beta_k^{B}$ & Time sensitivity of customer $k$ when riding a bicycle.\\
$\beta_k^{Walk}$ & Time sensitivity of customer $k$ when walking.\\
$\beta_k^{Wait}$ & Time sensitivity of customer $k$ when waiting.\\
\bottomrule
\end{tabular}
\end{table}
We use the $\beta$ coefficients of the original utility function provided by \cite{ModS98} and marginally adapted to the carsharing context by \cite{HanP18} (e.g., price sensitivity has been adapted from Italian Lira to Euro). The values of the coefficients are the following: $\beta^{CS} = -1$, $\beta^{PT} =-2$, $\beta^{B}=-2.5$, $\beta^{Walk}=-3$ and $\beta^{Wait}=-6$.
For the price sensitivity $\beta^P$ the authors create two customer segments. They assign $\beta^P = - 188.33$ if a customer belongs to the \textit{lower-middle class} or $\beta^P = - 70.63$ if a customer belongs to the \textit{upper-middle class}. We randomly assigned customers (with equal probability) to the either upper-middle class ($\beta^P_k=-70.63$) or lower-middle class ($\beta^P_k=-188.33$).
In more general cases, the parameters of utility functions can be estimated, provided the availability of data records on actual customers choices. The estimation procedure itself depends on several elements and underlying assumptions. As an example, a classical procedure to estimate the parameters of a Logit model is to maximize the log-likelihood function. Alternative methods include maximum simulated likelihood, simulated moments as well as Bayesian estimation. This topic is treated in detail in, e.g., \cite{Tra09}.
For the time parameters ($T$-parameters) in \Cref{tab:instances:utilityparams} we use the values estimated by \cite{HanP18} on the actual transport services in the city of Milan in $2017$.
These values can also be found in the files accompanying the instance generator we make available online at \url{https://github.com/GioPan/instancesPricingAndRepositioningProblem}.
It should be noted that, in more general applications, the $T$-parameters for the different services might change significantly during the day as a result of issues such as different traffic patterns, road congestion or time-varying public transport schedules.
Therefore, the $T$-parameters should be understood as specific for the target period under consideration.
The price parameters are set as follows. The price of a bicycle ride is set to $P_{Bij}=0$ for all $(i,j)$ pairs, the price for public transport services is $P_{PT,ij}=2$ (in Euro) for all $(i,j)$ corresponding to the current price of an ordinary ticket valid for $90$ minutes between each origin and destination within the municipality of Milan (price valid on November 2020).
The per-minute fee of carsharing is set to $P^V_v=0.265$ Euros per minute (the average of current per-minute fees offered by the CSOs in the city of Milan).
The drop-off fees considered are $L_1=-2$, $L_2=-1$, $L_3=0$, $L_4=1$, $L_5=2$ Euro in the base case ($\mathcal{L}=\{1,2,3,4,5\}$). Further analysis on the drop-off fee will be described in \Cref{sec:results}.
\subsection{Individual customers profiles}\label{sec:instances:individual}
The utility function in \Cref{sec:instances:utility} entails that all customers within a given class (upper- or lower-middle class) are characterized by identical preferences with respect to travel time, price, waiting time and walking time. Customers are told apart by their preferences with respect to unobserved features of the services, captured by $\tilde{\xi}_{kv}$.
However, the availability of large amounts of customer data may allow the CSO to profile customers at the individual level, i.e., to assign each customer an individual utility function.
We are not aware of publicly available utility functions which are able distinguish between individual customers.
Therefore, in order to assess the effect of individual customer profiles, at least on the the performance of the algorithm,
we use an additional configuration in which an individual utility function for each customer is obtained by applying a random perturbation to the coefficients provided by \cite{HanP18}.
Particularly, for each customer $k$, $\beta^P_k$ will be uniformly drawn in $[-188.33,-70.63]$, where $-188.33$ is the $\beta^P_k$ coefficient for lower-middle class customers and $-70.63$ is the $\beta^P_k$ coefficient of upper-middle class customers in the general case, see \Cref{sec:instances:utility}.
This allows us to obtain customers which can be anywhere between the upper- and lower-middle class.
The remaining $\beta$ coefficients will be uniformly drawn in $[0.8\beta,1.2\beta]$, where $\beta$ is the value provided by \cite{HanP18}.
As an example, for each $k$ we will draw $\beta^{PT}_k$ in $[-1.6,-2.4]$. The lower $\beta^{PT}_k$ the less utility the customer will obtain for each minute spent in public transportation.
\subsection{Uncertainty}\label{sec:uncertainty}
The random term of the utility $\tilde{\xi}_{kv}$ is modeled as a Gumbel (Extreme Value type I) distribution with mean $0$ and standard deviation $\sigma$.
This corresponds to using a Logit choice model (see \cite{Tra09,BenB99}).
The value of $\sigma$ is set as the empirical standard deviation of $U_{ijkv}= F_k(p_{vij},T^{CS}_{vij},T^{PT}_{vij},T^B_{vij},T^{Walk}_{vij}, T^{Wait}_{vij})$ for all $i,j\in\mathcal{I},v\in\mathcal{V}\cup\mathcal{A},k\in\mathcal{K}_{ij}$.
This entails that the expectation term in the objective function of \eqref{eq:1S} (i.e., $Q(z,\lambda)$) is a multidimensional integral that makes the solution of the problem prohibitive.
For this reason we approximate $\tilde{\xi}_{kv}$ by iid samples drawn from its the underlying Gumbel distributions. The resulting discrete stochastic program goes under the name of \textit{Sample Average Approximation} (SAA), see \cite{KleSH02}. Its optimal objective value provides an unbiased estimator of the true objective value. The full model of the SAA is provided in \Cref{sec:saa}.
\subsection{Position of customers and vehicles}
We partition customers into sets $\mathcal{K}_i$ and then further into sets $\mathcal{K}_{ij}$ is such a way to test different configurations of demand, e.g., center to outskirt and vice-versa.
Each one of the ten zones in our instances is characterized by a degree of ``centrality''. We use the walking distance from the \textit{Dome of Milan} as a proxy of centrality, see \Cref{fig:mi}.
Let $d_i$ be the walking distance from zone $i\in\mathcal{I}$ to the Dome. Customers $\mathcal{K}$ are first randomly partitioned
into disjoint subsets $\mathcal{K}_{i}$ with a probability $\pi_i$ which depends on the centrality of the zone as follows
\begin{equation}
\label{eq:probability}\pi_i = \frac{\gamma_id_i}{\sum_{i \in\mathcal{I}}\gamma_id_i}
\end{equation}
where $\gamma_i=e^{-\alpha^{FROM}\Delta_i}$ with $\alpha^{FROM}\in[0,1]$ and $\Delta_i=d_i-\sum_{i \in\mathcal{I}}d_i/|\mathcal{I}|$ is the deviation from the mean distance.
In words, as $\alpha^{FROM}$ increases, the zones closer to the center (negative $\Delta_i$) will receive a higher probability and the zones far from the center a lower probability, resulting in a higher concentration of customers in the central zones.
Further, all customers assigned to a given zone $i$ will be randomly assigned a destination zone $j$, and thus inserted into subset $\mathcal{K}_{ij}$, with a probability \eqref{eq:probability}.
This time $\gamma_j=e^{-\alpha^{TO}\Delta_i}$ with $\alpha^{TO}\in[0,1]$. Again, as $\alpha^{TO}$ increases more customers will be directed to central zones.
As an example, setting a low value of $\alpha^{FROM}$ and a high value of $\alpha^{TO}$ will create instances with higher demand from the outskirt to the center.
The partitioning of customers is sketched in \Cref{alg:partitionK}.
\begin{algorithm}[h]
\caption{Algorithm for the partition of customers into subsets $\mathcal{K}_i$ and $\mathcal{K}_{ij}$, $(i,j)\in\mathcal{I}\times\mathcal{I}$.}
\label{alg:partitionK}
\begin{algorithmic}[1]
\STATE Input: $\mathcal{K}$, $\mathcal{I}$, $d_i$ for $i\in \mathcal{I}$, $\alpha^{FROM}\in[0,1]$, $\alpha^{TO}\in[0,1]$
\STATE $\mathcal{K}_i=\mathcal{K}_{ij}\gets\emptyset$ for $(i,j)\in\mathcal{I}\times\mathcal{I}$
\FOR{ zone $i\in \mathcal{I}$}
\STATE Calculate $\Delta_i=d_i-\sum_{i \in\mathcal{I}}d_i/|\mathcal{I}|$
\STATE Calculate $\gamma_i^{FROM}=e^{-\alpha^{FROM}\Delta_i}$ and $\gamma_i^{TO}=e^{-\alpha^{TO}\Delta_i}$
\ENDFOR
\FOR{ zone $i\in \mathcal{I}$}
\STATE Calculate $\pi_i^{FROM} = \frac{\gamma_i^{FROM}d_i}{\sum_{i \in\mathcal{I}}\gamma_i^{FROM}d_i}$
\STATE Calculate $\pi_i^{TO} = \frac{\gamma_i^{TO}d_i}{\sum_{i \in\mathcal{I}}\gamma_i^{TO}d_i}$
\ENDFOR
\FOR{Customer $k\in \mathcal{K}$}
\STATE Draw an origin zone $i$ from $\mathcal{I}$ according to the probability distribution $(\pi_i)^{FROM}_{i\in\mathcal{I}}$
\STATE $\mathcal{K}_{i}\gets\mathcal{K}_{i}\cup\{k\}$
\STATE Draw a destination zone $j$ from $\mathcal{I}$ according to the probability distribution $(\pi_i)^{TO}_{i\in\mathcal{I}}$
\STATE $\mathcal{K}_{ij}\gets\mathcal{K}_{ij}\cup\{k\}$
\ENDFOR
\RETURN $\mathcal{K}_i$ and $\mathcal{K}_{ij}$ for $(i,j)\in\mathcal{I}\times\mathcal{I}$
\end{algorithmic}
\end{algorithm}
We assume the decision maker is a CSO with a fleet of $|\mathcal{V}|$ homogeneous vehicles. Each vehicle $v$ is randomly assigned to an initial zone $i$ according to probability \eqref{eq:probability},
where $\gamma_i=e^{-\alpha^V\Delta_i}$ with $\alpha^V\in[0,1]$. Also in this case, as $\alpha^V$ increases more cars will be initially located in central zones.
\subsection{Costs}
We assume a fleet of Fiat 500 cars with classical combustion engine. The relocation cost $C^R_{ij}$, equal for all vehicles, is set as the cost of the fuel necessary for a ride between $i$ and $j$, plus the per-minute salary of the driver multiplied by the driving time.
The per-minute salary of the driver is set to $0.20$ Euro/minute. It is calculated from the Italian national collective contract for logistics services valid at October 1st 2019 (available at \url{https://www.lavoro-economia.it/ccnl/ccnl.aspx?c=328}) as follows:
the average per minute salary of the five lowest salary levels is increased by $30\%$ to account for e.g., night shifts and holidays, yielding approximately $0.20$ Euro/minute.
Finally, the cost $C^U_{ij}$ is set equal to the fuel necessary for a ride between $i$ and $j$. The fuel consumption is calculated based on the specifics of a Fiat 500 petrol car and assuming an average speed of $50$km/h and a fuel price of $1.60$ Euro/liter.
All the data data necessary to generate the instances described, as well as an instance generator implemented in Java, are made available at \url{https://github.com/GioPan/instancesPricingAndRepositioningProblem}.
\subsection{Control parameters}
The control parameters for the instances used in the tests are summarized in \Cref{tab:instances:controlParams}.
For each control parameter we report the different values used in the tests. The control parameters were chosen in order to test different
vehicles-to-customers ratios, ranging from $1/2$ to $1/12$, and different absolute values for the number of customers and vehicles.
In addition, the different configurations of the parameters $\alpha^{FROM}$, $\alpha^{TO}$ and $\alpha^{V}$ yield different configurations of demand (e.g., center to outskirt and outskirt to center) and of the carsharing system (e.g., vehicles located in the center and in the outskirt).
The number of scenarios $|\mathcal{S}|$ is arbitrarily set equal to $10$ on all instances.
In \Cref{sec:results:ls} we provide insights on how the L-Shaped method scales with the number of scenarios.
As explained in \Cref{sec:uncertainty} scenarios represent i.i.d. samples of the underlying Gumbel distribution.
\begin{table}
\caption{Control parameters used to generate the test instances.}
\label{tab:instances:controlParams}
\begin{tabular}{p{0.25\linewidth}|p{0.55\linewidth}|p{0.15\linewidth}}
\toprule
Parameter& Meaning& Values\\
\midrule
$|\mathcal{V}|$& the number of vehicles & $50$, $100$, $200$\\
$|\mathcal{K}|$& the number of customers& $200$,$400$,$600$\\
$\alpha^{FROM}$& initial location of the customers&$0.2$, $0.8$\\
$\alpha^{TO}$& destination of the customers&$0.2$, $0.8$\\
$\alpha^{V}$& initial location of the vehicles&$0.2$, $0.8$\\
Individual profiles &Whether each customer is profiled individually & Yes, No \\
\bottomrule
\end{tabular}
\end{table}
\section{Results}\label{sec:results}
This section is divided into three parts. First, we provide implementation details and setup of the experiments in \Cref{sec:results:setup}.
Following, in \Cref{sec:results:ls} we report on the performance of L-Shaped method especially in comparison with a commercial solver.
Finally, in \Cref{sec:results:managerial} we provide an analysis of the solutions and comment on their managerial implications.
\subsection{Experiments setup}\label{sec:results:setup}
The L-Shaped method and the extensive form the SAAs (see \Cref{sec:saa}) were implemented in Java using the CPLEX $12.10$ libraries.
Particularly, the L-Shaped method was implemented by solving the master problem in a Branch \& Cut framework and adding optimality cuts as lazy constraints at integer nodes.
Unless otherwise specified, we used CPLEX's default parameters both when solving the extensive SAA and when using the L-Shaped method.
This entails, e.g., a target relative optimality gap of $0.01\%$. The only exception, unless otherwise specified, is a time limit of $1800$ seconds.
In the L-Shaped method, relaxation cuts and optimality cuts were applied only at integer nodes throughout the entire tree.
Particularly, relaxation cuts were crucial to the implementation. The performance of the algorithm without relaxation cuts was extremely poor.
Tests were run on machines with $2\times 2.4$ GHz \texttt{AMD Opteron 2431} 6 core CPU and $24$Gb RAM.
We remind the reader that, unless otherwise specified, the SAAs are solved with ten iid samples (scenarios) of random variable $\tilde{\xi}$, see \Cref{sec:uncertainty}.
We stress that the number of scenarios does not represent the number of instances, as in a scenario-analysis procedure.
Rather, by definition, the stochastic program takes into account all scenarios simultaneously. The impact of the number of scenarios on the computational complexity of the problems solved is assessed in \Cref{sec:results:ls}.
\subsection{Analysis of the L-Shaped Method}\label{sec:results:ls}
In the first part of the tests we compared the performance of the L-Shaped method to that of CPLEX for solving the SAA on all configurations of the control parameters in \Cref{tab:instances:controlParams} for which $|\mathcal{K}|>|\mathcal{V}|$. The scope of our experiments is thus to obtain empirical evidences as to whether, and to what extent, the L-Shaped method scales better than using CPLEX without any decomposition strategy.
The tables in this section report the optimality gap (\texttt{gap}) and elapsed time (\texttt{t} in seconds) for both CPLEX and the L-Shaped method.
For the L-Shaped method they also report the optimality gap at the root node (\texttt{gapR}) and after $50\%$ of the time limit -- i.e., $50\%$ of $1800$ seconds -- (\texttt{gap50}).
All gaps are expressed as percentages and are calculated as $100 * |\texttt{bestbound}-\texttt{bestinteger}|/|\texttt{bestinteger}|$.
The size of the SAA problem without decomposition is reported in \Cref{sec:app:size}.
\begin{longtable}{rrrr|rr|rrrr}
\caption{Comparison of CPLEX and L-Shaped method on the instances with and $\alpha^V=0.2$.}\label{tab:resultsIC02}\\
\toprule
& & & & \multicolumn{2}{c}{CPLEX}&\multicolumn{4}{c}{L-Shaped}\\
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{t} & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.0088 & 452.47 & 0.6967 & 18.6752 & 0.7466 & 1801.00 \\
50 & 400 & 0.2 & 0.2 & 4.7820 & 1801.68 & 14.4808 & 74.9725 & 17.2825 & 1800.66 \\
50 & 600 & 0.2 & 0.2 & - & - & 34.9493 & 89.2833 & 39.1895 & 1802.00 \\
100 & 200 & 0.2 & 0.2 & 0.0084 & 553.92 & 0.0975 & 9.9177 & 0.0975 & 1801.38 \\
100 & 400 & 0.2 & 0.2 & - & - & 2.5186 & 23.4470 & 5.3168 & 1801.07 \\
100 & 600 & 0.2 & 0.2 & - & - & 14.1692 & 35.2426 & 14.8637 & 1821.75 \\
200 & 400 & 0.2 & 0.2 & - & - & 0.0000 & 6.3509 & - & 156.78 \\
200 & 600 & 0.2 & 0.2 & - & - & 0.0785 & 10.6491 & 0.7863 & 1806.39 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.0000 & 134.00 & 0.3381 & 17.4624 & 0.3381 & 1800.03 \\
50 & 400 & 0.2 & 0.8 & 1.8164 & 1803.60 & 11.2732 & 64.1181 & 13.2796 & 1800.05 \\
50 & 600 & 0.2 & 0.8 & 137.5269 & 1801.60 & 16.2842 & 91.0324 & 25.1233 & 1800.16 \\
100 & 200 & 0.2 & 0.8 & 0.0000 & 346.80 & 0.2815 & 7.2972 & 0.2815 & 1800.55 \\
100 & 400 & 0.2 & 0.8 & - & - & 0.6280 & 19.5696 & 0.8692 & 1800.42 \\
100 & 600 & 0.2 & 0.8 & - & - & 11.1184 & 34.2304 & 12.2836 & 1804.76 \\
200 & 400 & 0.2 & 0.8 & - & - & 0.0000 & 41.7344 & - & 119.53 \\
200 & 600 & 0.2 & 0.8 & - & - & 0.3384 & 11.2541 & 0.3625 & 1820.99 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.1916 & 1800.45 & 1.9646 & 23.9553 & 2.2150 & 1800.28 \\
50 & 400 & 0.8 & 0.2 & 10.3621 & 1806.04 & 19.1742 & 91.0136 & 25.5109 & 1800.03 \\
50 & 600 & 0.8 & 0.2 & - & - & 50.0477 & 109.7915 & 52.8586 & 1800.02 \\
100 & 200 & 0.8 & 0.2 & 0.0863 & 1803.15 & 0.5169 & 15.6382 & 0.5336 & 1801.16 \\
100 & 400 & 0.8 & 0.2 & - & - & 6.5731 & 27.8935 & 9.8736 & 1801.15 \\
100 & 600 & 0.8 & 0.2 & - & - & 24.8446 & 62.7364 & 24.9827 & 1812.75 \\
200 & 400 & 0.8 & 0.2 & - & - & 0.2654 & 13.1386 & 0.4498 & 1810.83 \\
200 & 600 & 0.8 & 0.2 & - & - & 5.1871 & 20.1720 & 5.7967 & 1821.70 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.1403 & 1800.78 & 0.7991 & 22.5586 & 0.8374 & 1800.33 \\
50 & 400 & 0.8 & 0.8 & 3.1983 & 1803.68 & 15.2703 & 57.2480 & 20.6710 & 1802.52 \\
50 & 600 & 0.8 & 0.8 & - & - & 46.6289 & 106.1663 & 48.8839 & 1804.99 \\
100 & 200 & 0.8 & 0.8 & 0.2490 & 1801.28 & 0.7255 & 12.8107 & 0.7930 & 1801.43 \\
100 & 400 & 0.8 & 0.8 & - & - & 3.4139 & 27.6540 & 5.4211 & 1803.38 \\
100 & 600 & 0.8 & 0.8 & - & - & 24.8277 & 52.2813 & 26.1444 & 1800.02 \\
200 & 400 & 0.8 & 0.8 & - & - & 0.0963 & 7.2607 & 0.0963 & 1801.18 \\
200 & 600 & 0.8 & 0.8 & - & - & 6.9456 & 17.7706 & 7.3125 & 1846.31 \\
\midrule
& & & & 12.1823 & 885.47 & 9.8292 & 38.2289 & 12.1067 & 1701.43 \\
\bottomrule
\end{longtable}
We start by reporting the results on the default setup, that is, in which customers are not profiled at the individual level (i.e., customers have identical sensitivities to prices, driving time, waiting and walking time, see \Cref{sec:instances:utility}).
\Cref{tab:resultsIC02} and \Cref{tab:resultsIC08} report the results on the instances with $\alpha^V=0.2$ (more vehicles initially located in zones far from the center) and $\alpha^V=0.8$ (more vehicles initially located in central zones), respectively. Each table reports on a total of $32$ instances, one for each configuration of the control parameters.
The results in \Cref{tab:resultsIC02} and \Cref{tab:resultsIC08} are rather similar. We observe that CPLEX is a viable alternative only for the smallest instances.
As the number of vehicles grows CPLEX fails to deliver a feasible solution, and runs into memory problems. On the other hand, the L-Shaped method is able to provide a solution to all instances tested, and in many cases it provides a high quality solution, with a rather small optimality gap. We can also observe that, while the optimality gap at the root node is on average much higher than the final optimality gap, the gap after $50\%$ of the allowed time is only a few percentage points higher. This illustrates, that the L-Shaped method may also deliver good solutions in a relatively short time ($15$ minutes).
\begin{longtable}{rrrr|rr|rrrr}
\caption{Comparison of CPLEX and L-Shaped method on the instances with $\alpha^V=0.8$.}\label{tab:resultsIC08}\\
\toprule
& & & & \multicolumn{2}{c}{CPLEX}&\multicolumn{4}{c}{L-Shaped}\\
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{t} & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.0059 & 479.49 & 0.5028 & 24.4373 & 0.5411 & 1800.83 \\
50 & 400 & 0.2 & 0.2 & 16.7040 & 1801.95 & 14.0036 & 48.9993 & 19.7977 & 1800.03 \\
50 & 600 & 0.2 & 0.2 & - & - & 33.2558 & 81.8976 & 37.6588 & 1800.91 \\
100 & 200 & 0.2 & 0.2 & 0.0091 & 1250.03 & 0.0904 & 15.8445 & 0.2180 & 1800.29 \\
100 & 400 & 0.2 & 0.2 & - & - & 2.0635 & 22.9479 & 6.5261 & 1802.54 \\
100 & 600 & 0.2 & 0.2 & - & - & 16.2161 & 37.5899 & 17.4882 & 1808.28 \\
200 & 400 & 0.2 & 0.2 & - & - & 0.7891 & 12.0835 & 2.2191 & 1809.83 \\
200 & 600 & 0.2 & 0.2 & - & - & 6.3879 & 20.9339 & 30.6798 & 1853.58 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.0099 & 246.36 & 0.6165 & 25.7909 & 0.9025 & 1800.87 \\
50 & 400 & 0.2 & 0.8 & 2.0974 & 1804.45 & 11.7004 & 45.4173 & 15.7910 & 1802.50 \\
50 & 600 & 0.2 & 0.8 & - & - & 31.6544 & 95.5335 & 36.7324 & 1801.59 \\
100 & 200 & 0.2 & 0.8 & 0.0000 & 469.42 & 0.0291 & 13.9523 & 0.0291 & 1800.33 \\
100 & 400 & 0.2 & 0.8 & - & - & 1.9427 & 25.7279 & 4.6547 & 1805.75 \\
100 & 600 & 0.2 & 0.8 & - & - & 16.5597 & 34.3635 & 17.5988 & 1801.26 \\
200 & 400 & 0.2 & 0.8 & - & - & 0.3674 & 11.6829 & 1.8109 & 1809.27 \\
200 & 600 & 0.2 & 0.8 & - & - & 4.5425 & 19.4970 & 6.6028 & 1807.60 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.0000 & 76.22 & 0.0389 & 12.3461 & 0.0389 & 1800.51 \\
50 & 400 & 0.8 & 0.2 & 4.1437 & 1804.33 & 15.1026 & 67.4814 & 21.7608 & 1803.37 \\
50 & 600 & 0.8 & 0.2 & - & - & 44.8552 & 87.1632 & 48.1871 & 1803.69 \\
100 & 200 & 0.8 & 0.2 & 0.0000 & 208.51 & 0.0000 & 9.4161 & - & 26.28 \\
100 & 400 & 0.8 & 0.2 & - & - & 0.2335 & 12.5821 & 0.2726 & 1800.85 \\
100 & 600 & 0.8 & 0.2 & - & - & 7.3760 & 25.0311 & 8.8577 & 1817.52 \\
200 & 400 & 0.8 & 0.2 & - & - & 0.0020 & 8.6414 & - & 206.88 \\
200 & 600 & 0.8 & 0.2 & - & - & 0.0021 & 11.9967 & - & 790.72 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.0083 & 78.21 & 0.0083 & 16.2539 & - & 34.46 \\
50 & 400 & 0.8 & 0.8 & 34.2159 & 1802.63 & 9.6329 & 47.5271 & 12.1571 & 1802.80 \\
50 & 600 & 0.8 & 0.8 & - & - & 37.0417 & 90.4674 & 40.3969 & 1800.35 \\
100 & 200 & 0.8 & 0.8 & 0.0000 & 222.09 & 0.0000 & 9.2810 & - & 31.41 \\
100 & 400 & 0.8 & 0.8 & - & - & 0.2077 & 19.7303 & 0.3026 & 1805.44 \\
100 & 600 & 0.8 & 0.8 & - & - & 6.8777 & 27.1833 & 6.9310 & 1811.58 \\
200 & 400 & 0.8 & 0.8 & - & - & 0.0000 & 42.1315 & - & 125.03 \\
200 & 600 & 0.8 & 0.8 & - & - & 0.0294 & 8.4615 & 0.0306 & 1801.72 \\
\midrule
& & & & 4.7662 & 539.14 & 8.1916 & 32.2623 & 13.0072 & 1505.25 \\
\bottomrule
\end{longtable}
A sensible reduction of the optimality gap can be achieved by applying valid inequality \eqref{eq:VI}.
Such valid inequality has the effect of removing some symmetric solutions, as explained in \Cref{sec:ls:efficiency}.
We observed that the addition of \eqref{eq:VI} decreased the average optimality gap from $9.82\%$ to $9.24\%$ on the instances with $\alpha^V=0.2$ and
from $8.19\%$ to $7.10\%$ on the instances with $\alpha^V=0.8$.
The addition of \eqref{eq:VI} resulted particularly beneficial on the instances which yielded the largest optimality gaps reported in \Cref{tab:resultsIC02} and \Cref{tab:resultsIC08}.
All the results on the performance of the L-Shaped method with the addition of \eqref{eq:VI} are reported in \Cref{sec:app:ICplusVI}.
We turn now our attention on the performance of the algorithm when each customer is profiled individually (see \Cref{sec:instances:individual}).
The results in \Cref{tab:resultsDC02} and \Cref{tab:resultsDC08} are obtained with $\alpha^V=0.2$ and $0.8$, respectively.
Valid inequality \eqref{eq:VI} is always added to the models.
\begin{longtable}{rrrr|rr|rrrr}
\caption{Comparison of CPLEX and L-Shaped method on the instances with $\alpha^V=0.2$ and individual customer profiles.}\label{tab:resultsDC02}\\
\toprule
& & & & \multicolumn{2}{c}{CPLEX}&\multicolumn{4}{c}{L-Shaped}\\
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{t} & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.0014 & 297.99 & 0.4747 & 21.5553 & 0.4747 & 1801.00 \\
50 & 400 & 0.2 & 0.2 & 0.4151 & 1804.09 & 6.5981 & 59.3308 & 8.9622 & 1800.13 \\
50 & 600 & 0.2 & 0.2 & - & - & 13.6751 & 52.5981 & 18.9363 & 1800.03 \\
100 & 200 & 0.2 & 0.2 & 0.0000 & 221.91 & 0.0000 & 8.9488 & - & 16.47 \\
100 & 400 & 0.2 & 0.2 & - & - & 0.4829 & 26.1941 & 0.8372 & 1801.67 \\
100 & 600 & 0.2 & 0.2 & - & - & 9.2186 & 40.2920 & 9.2848 & 1805.22 \\
200 & 400 & 0.2 & 0.2 & - & - & 0.0000 & 9.9094 & - & 229.96 \\
200 & 600 & 0.2 & 0.2 & - & - & 0.0571 & 14.7626 & 0.0579 & 1801.39 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.0067 & 381.59 & 0.5159 & 20.9177 & 0.5159 & 1800.11 \\
50 & 400 & 0.2 & 0.8 & 0.3913 & 1800.30 & 3.5957 & 43.2969 & 4.3436 & 1800.03 \\
50 & 600 & 0.2 & 0.8 & - & - & 14.1098 & 63.4863 & 16.2944 & 1802.22 \\
100 & 200 & 0.2 & 0.8 & 0.0000 & 199.66 & 0.1800 & 10.6250 & 0.1800 & 1800.13 \\
100 & 400 & 0.2 & 0.8 & - & - & 0.1798 & 22.5452 & 0.3266 & 1800.02 \\
100 & 600 & 0.2 & 0.8 & - & - & 9.1325 & 36.2728 & 9.8433 & 1808.65 \\
200 & 400 & 0.2 & 0.8 & - & - & 0.0000 & 59.2478 & - & 165.06 \\
200 & 600 & 0.2 & 0.8 & - & - & 0.1327 & 15.6153 & 0.4830 & 1800.02 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.0000 & 145.84 & 0.0632 & 22.1339 & 0.0632 & 1800.60 \\
50 & 400 & 0.8 & 0.2 & 0.4584 & 1801.37 & 6.0223 & 57.8394 & 9.1647 & 1800.69 \\
50 & 600 & 0.8 & 0.2 & 2.9868 & 1804.64 & 20.5953 & 74.1159 & 23.3852 & 1800.03 \\
100 & 200 & 0.8 & 0.2 & 0.0000 & 374.57 & 0.1444 & 14.1475 & 0.1444 & 1800.12 \\
100 & 400 & 0.8 & 0.2 & 0.2382 & 1806.73 & 0.9336 & 12.6012 & 1.4826 & 1800.09 \\
100 & 600 & 0.8 & 0.2 & - & - & 14.7882 & 35.0665 & 14.8822 & 1815.08 \\
200 & 400 & 0.8 & 0.2 & - & - & 0.1239 & 11.8485 & 0.1239 & 1800.50 \\
200 & 600 & 0.8 & 0.2 & - & - & 4.6691 & 23.0727 & 5.0362 & 1827.23 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.0000 & 137.42 & 0.2137 & 21.9851 & 0.2137 & 1800.11 \\
50 & 400 & 0.8 & 0.8 & 0.2161 & 1801.02 & 4.0149 & 53.8589 & 5.2497 & 1801.68 \\
50 & 600 & 0.8 & 0.8 & - & - & 14.4585 & 70.6343 & 20.4297 & 1802.88 \\
100 & 200 & 0.8 & 0.8 & 0.0000 & 367.32 & 0.0327 & 14.5185 & 0.0327 & 1800.03 \\
100 & 400 & 0.8 & 0.8 & - & - & 2.0523 & 30.1037 & 3.4881 & 1800.07 \\
100 & 600 & 0.8 & 0.8 & - & - & 10.3710 & 34.8283 & 17.2050 & 1814.54 \\
200 & 400 & 0.8 & 0.8 & - & - & 0.0001 & 11.5481 & - & 170.66 \\
200 & 600 & 0.8 & 0.8 & - & - & 1.7683 & 15.4424 & 2.2644 & 1807.74 \\
\midrule
& & & & 0.3367 & 867.80 & 3.8517 & 32.0441 & 6.0126 & 1498.71 \\
\bottomrule
\end{longtable}
The results illustrated in \Cref{tab:resultsDC02} and \Cref{tab:resultsDC08} are similar to those observed earlier in \Cref{tab:resultsIC02} and \Cref{tab:resultsIC08}.
CPLEX remains a viable option only for the smallest instances, while the L-Shaped method is able to find a solution, in some cases a high quality one, to all instances and to solve a number of them.
We observe also that the average optimality gap appears sensibly lower compared to the results in \Cref{tab:resultsIC02,tab:resultsIC08}.
Also in this case high quality solutions can be obtained already after $15$ minutes.
A pattern in the optimality gaps reported in \Cref{tab:resultsIC02,tab:resultsIC08,tab:resultsDC02,tab:resultsDC08} can be observed.
The optimality gap appears inversely correlated with the vehicles-to-customers ratio.
That is, the instances which yield the largest optimality gaps ($50$ vehicles and $400$ customers, $50$ vehicles and $600$ customers, $100$ vehicles and $600$ customers) are those with the smallest vehicles-to-customers ratios among the instances tested ($1/8$, $1/12$, $1/6$, respectively).
Supposedly, when vehicles are scarce compared to the number of customers, it becomes more challenging for the algorithm to identify, within the $30$ minutes provided,
a relocation and pricing plan which is able to satisfy demand in such a way to yield the highest profit.
On the contrary, as the ratio increases, the model has more freedom to satisfy customers demand, and especially those requests generating the highest revenue.
\begin{longtable}{rrrr|rr|rrrr}
\caption{Comparison of CPLEX and L-Shaped method on the instances with $\alpha^V=0.8$ and individual customer profiles.}\label{tab:resultsDC08}\\
\toprule
& & & & \multicolumn{2}{c}{CPLEX}&\multicolumn{4}{c}{L-Shaped}\\
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{t} & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.0044 & 311.01 & 3.8564 & 17.7774 & 4.2676 & 1800.18 \\
50 & 400 & 0.2 & 0.2 & 1.5001 & 1802.42 & 13.5540 & 60.3835 & 14.4840 & 1800.75 \\
50 & 600 & 0.2 & 0.2 & - & - & 25.1954 & 105.0348 & 27.2104 & 1801.63 \\
100 & 200 & 0.2 & 0.2 & 0.0099 & 1246.59 & 1.1923 & 7.5390 & 1.3117 & 1801.87 \\
100 & 400 & 0.2 & 0.2 & - & - & 7.0935 & 33.3830 & 9.9477 & 1802.14 \\
100 & 600 & 0.2 & 0.2 & - & - & 14.8632 & 29.7073 & 18.9278 & 1803.56 \\
200 & 400 & 0.2 & 0.2 & - & - & 1.7218 & 22.1296 & 4.9610 & 1805.73 \\
200 & 600 & 0.2 & 0.2 & - & - & 8.6047 & 17.3350 & 8.6047 & 1833.83 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.0097 & 375.72 & 1.6477 & 20.1198 & 2.0232 & 1800.04 \\
50 & 400 & 0.2 & 0.8 & 0.9894 & 1802.12 & 11.4544 & 53.4891 & 13.7308 & 1801.20 \\
50 & 600 & 0.2 & 0.8 & - & - & 20.1773 & 89.9520 & 24.8459 & 1803.77 \\
100 & 200 & 0.2 & 0.8 & 0.0091 & 671.58 & 0.1778 & 6.9976 & 0.2153 & 1801.05 \\
100 & 400 & 0.2 & 0.8 & - & - & 2.9674 & 32.4136 & 5.1617 & 1800.03 \\
100 & 600 & 0.2 & 0.8 & - & - & 15.1294 & 32.5442 & 16.0408 & 1805.42 \\
200 & 400 & 0.2 & 0.8 & - & - & 4.7056 & 28.3808 & 4.7289 & 1801.26 \\
200 & 600 & 0.2 & 0.8 & - & - & 10.8675 & 26.4194 & 10.8675 & 1800.02 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.0003 & 73.80 & 0.0175 & 17.0926 & 0.0175 & 1800.08 \\
50 & 400 & 0.8 & 0.2 & 0.2689 & 1802.31 & 2.5979 & 32.0731 & 3.4004 & 1800.03 \\
50 & 600 & 0.8 & 0.2 & - & - & 14.2154 & 75.7595 & 17.8579 & 1800.19 \\
100 & 200 & 0.8 & 0.2 & 0.0000 & 167.21 & 0.0000 & 14.6898 & - & 22.02 \\
100 & 400 & 0.8 & 0.2 & - & - & 0.2919 & 18.9859 & 0.5424 & 1803.09 \\
100 & 600 & 0.8 & 0.2 & - & - & 10.3980 & 40.7039 & 10.4926 & 1810.93 \\
200 & 400 & 0.8 & 0.2 & - & - & 0.0000 & 61.0074 & - & 150.43 \\
200 & 600 & 0.8 & 0.2 & - & - & 0.0296 & 12.2654 & 0.0328 & 1804.32 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.0068 & 64.66 & 0.2138 & 4.6541 & 0.2138 & 1800.14 \\
50 & 400 & 0.8 & 0.8 & 0.0302 & 1804.08 & 1.7691 & 42.8840 & 2.3329 & 1800.18 \\
50 & 600 & 0.8 & 0.8 & - & - & 10.0485 & 78.3238 & 13.5323 & 1800.02 \\
100 & 200 & 0.8 & 0.8 & 0.0000 & 165.74 & 0.0000 & 1.4263 & - & 23.85 \\
100 & 400 & 0.8 & 0.8 & - & - & 0.1842 & 6.4020 & 0.1842 & 1800.06 \\
100 & 600 & 0.8 & 0.8 & - & - & 7.8771 & 36.8295 & 9.4600 & 1800.01 \\
200 & 400 & 0.8 & 0.8 & - & - & 0.0000 & 61.6141 & - & 129.12 \\
200 & 600 & 0.8 & 0.8 & - & - & 0.0469 & 13.3682 & 0.0471 & 1802.95 \\
\midrule
& & & & 0.2357 & 791.33 & 5.7899 & 33.6217 & 8.1178 & 1594.61 \\
\bottomrule
\end{longtable}
\Cref{tab:resultsLongICVI02} and \Cref{tab:resultsLongICVI08} report, for the instances with $\alpha^V=0.2$ and $0.8$, respectively, the results obtained by letting the L-Shaped method run for up to $5$ hours ($18 000$ seconds) with a target $1\%$ optimality gap. We observe that the optimality gap goes down from an average of $9.82\%$ to an average of $6.48\%$ for the case with $\alpha^V= 0.2$ and from an average of $8.19\%$ to an average of $5.14\%$ for the case with $\alpha^V= 0.8$. For the case with individual customer profiles we obtain an average optimality gap of $2.48\%$ and $3.31\%$ for the case with $\alpha^V= 0.2$ and $0.8$, respectively. All results on the instances with individual customer profiles are reported in \Cref{app:resultsLongDC}.
These results are possibly of little practical use since, in a business context, solutions are most likely required in much shorter time.
Nevertheless, they show that the model can provide useful bounds that may serve a reference point for example in the development of faster heuristic methods.
\begin{longtable}{rrrr|rrrr}
\caption{Results of the L-Shaped method with the addition of \cref{eq:VI} on the instances with $\alpha^V=0.2$ with a time limit of $18 000$ seconds and $1\%$ target optimality gap.}\label{tab:resultsLongICVI02}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.9890 & 18.6752 & - & 159.36 \\
50 & 400 & 0.2 & 0.2 & 10.2092 & 66.5573 & 11.1192 & 18000.12 \\
50 & 600 & 0.2 & 0.2 & 22.9573 & 87.2440 & 25.9743 & 18003.41 \\
100 & 200 & 0.2 & 0.2 & 0.6386 & 9.5524 & - & 29.63 \\
100 & 400 & 0.2 & 0.2 & 0.9219 & 21.2273 & - & 3653.12 \\
100 & 600 & 0.2 & 0.2 & 7.4259 & 35.8381 & 8.8381 & 18000.29 \\
200 & 400 & 0.2 & 0.2 & 0.0000 & 6.3509 & - & 157.42 \\
200 & 600 & 0.2 & 0.2 & 0.8479 & 10.4516 & - & 717.90 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.8965 & 17.6529 & - & 21.23 \\
50 & 400 & 0.2 & 0.8 & 6.6519 & 62.4842 & 7.6630 & 18000.62 \\
50 & 600 & 0.2 & 0.8 & 13.2604 & 90.2582 & 13.8926 & 18000.10 \\
100 & 200 & 0.2 & 0.8 & 0.3083 & 7.2972 & - & 23.84 \\
100 & 400 & 0.2 & 0.8 & 0.9374 & 19.6630 & - & 1164.27 \\
100 & 600 & 0.2 & 0.8 & 6.3696 & 34.0425 & 6.9128 & 18008.18 \\
200 & 400 & 0.2 & 0.8 & 0.0000 & 41.7344 & - & 128.15 \\
200 & 600 & 0.2 & 0.8 & 0.7697 & 12.6665 & - & 705.42 \\
\midrule
50 & 200 & 0.8 & 0.2 & 1.3911 & 23.9553 & 1.5307 & 18000.20 \\
50 & 400 & 0.8 & 0.2 & 15.1496 & 85.1665 & 15.6460 & 18000.37 \\
50 & 600 & 0.8 & 0.2 & 36.5566 & 108.3757 & 37.5433 & 18000.09 \\
100 & 200 & 0.8 & 0.2 & 0.9165 & 15.4557 & - & 151.94 \\
100 & 400 & 0.8 & 0.2 & 2.7552 & 22.7406 & 3.2210 & 18008.25 \\
100 & 600 & 0.8 & 0.2 & 13.9898 & 42.5987 & 14.5641 & 18010.22 \\
200 & 400 & 0.8 & 0.2 & 0.4081 & 12.4280 & - & 332.82 \\
200 & 600 & 0.8 & 0.2 & 1.2695 & 15.3980 & 2.6620 & 18000.07 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.9616 & 22.5871 & - & 839.48 \\
50 & 400 & 0.8 & 0.8 & 11.8177 & 55.7576 & 13.0508 & 18001.72 \\
50 & 600 & 0.8 & 0.8 & 33.2157 & 106.9156 & 34.4212 & 18000.09 \\
100 & 200 & 0.8 & 0.8 & 0.9309 & 13.3519 & - & 364.48 \\
100 & 400 & 0.8 & 0.8 & 1.8436 & 17.1371 & 2.0311 & 18001.11 \\
100 & 600 & 0.8 & 0.8 & 10.3464 & 46.7222 & 15.3117 & 18004.26 \\
200 & 400 & 0.8 & 0.8 & 0.5470 & 7.2607 & - & 296.25 \\
200 & 600 & 0.8 & 0.8 & 2.3070 & 16.7484 & 2.7073 & 18009.94 \\
\midrule
& & & & 6.4872 & 36.0717 & 12.7700 & 9837.32 \\
\bottomrule
\end{longtable}
\begin{longtable}{rrrr|rrrr}
\caption{Results of the L-Shaped method with the addition of \cref{eq:VI} on the instances with $\alpha^V=0.8$ with a time limit of $18 000$ seconds and $1\%$ target optimality gap.}\label{tab:resultsLongICVI08}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.9052 & 37.8092 & - & 186.24 \\
50 & 400 & 0.2 & 0.2 & 10.1669 & 42.1383 & 10.4452 & 18000.24 \\
50 & 600 & 0.2 & 0.2 & 21.5968 & 77.8433 & 22.0765 & 18000.11 \\
100 & 200 & 0.2 & 0.2 & 0.9726 & 6.1394 & - & 106.15 \\
100 & 400 & 0.2 & 0.2 & 1.2153 & 22.6182 & 1.4967 & 18000.48 \\
100 & 600 & 0.2 & 0.2 & 5.8809 & 35.6069 & 7.0281 & 18000.17 \\
200 & 400 & 0.2 & 0.2 & 0.9991 & 5.3451 & - & 1793.44 \\
200 & 600 & 0.2 & 0.2 & 0.9964 & 10.6059 & 1.3911 & 14025.40 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.9928 & 33.4059 & - & 904.69 \\
50 & 400 & 0.2 & 0.8 & 8.4548 & 42.0692 & 8.9256 & 18000.29 \\
50 & 600 & 0.2 & 0.8 & 20.4095 & 96.0829 & 21.1479 & 18000.08 \\
100 & 200 & 0.2 & 0.8 & 0.7544 & 4.7786 & - & 37.44 \\
100 & 400 & 0.2 & 0.8 & 0.9978 & 20.3964 & 1.2145 & 16084.62 \\
100 & 600 & 0.2 & 0.8 & 6.5475 & 35.6683 & 6.8735 & 18000.45 \\
200 & 400 & 0.2 & 0.8 & 0.5994 & 9.3896 & - & 396.00 \\
200 & 600 & 0.2 & 0.8 & 0.8804 & 17.6274 & - & 8150.06 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.5667 & 12.5954 & - & 12.95 \\
50 & 400 & 0.8 & 0.2 & 9.1978 & 65.9956 & 9.6357 & 18000.12 \\
50 & 600 & 0.8 & 0.2 & 28.8099 & 82.1348 & 30.6454 & 18002.77 \\
100 & 200 & 0.8 & 0.2 & 0.4533 & 9.4214 & - & 22.96 \\
100 & 400 & 0.8 & 0.2 & 0.9253 & 4.0455 & - & 204.01 \\
100 & 600 & 0.8 & 0.2 & 3.7135 & 27.6410 & 4.3096 & 18000.61 \\
200 & 400 & 0.8 & 0.2 & 0.0020 & 8.6414 & - & 220.92 \\
200 & 600 & 0.8 & 0.2 & 0.5837 & 11.5793 & - & 613.09 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.6258 & 14.9683 & - & 12.69 \\
50 & 400 & 0.8 & 0.8 & 6.2098 & 44.5269 & 7.6010 & 18002.40 \\
50 & 600 & 0.8 & 0.8 & 27.1409 & 99.1525 & 27.7092 & 18000.46 \\
100 & 200 & 0.8 & 0.8 & 0.9110 & 1.3040 & - & 22.28 \\
100 & 400 & 0.8 & 0.8 & 0.9539 & 17.2446 & - & 276.26 \\
100 & 600 & 0.8 & 0.8 & 2.2269 & 13.2578 & 2.6462 & 18005.33 \\
200 & 400 & 0.8 & 0.8 & 0.0000 & 42.1315 & - & 124.68 \\
200 & 600 & 0.8 & 0.8 & 0.0754 & 8.4615 & - & 419.24 \\
\midrule
& & & & 5.1489 & 30.0196 & 10.8764 & 8675.83 \\
\bottomrule
\end{longtable}
Finally, we report on the performance of the L-Shaped method and CPLEX as the number of scenarios (sample size) increases.
The results reported above in this section have been obtained by arbitrarily using ten scenarios to approximate the underlying continuous random variable.
\Cref{fig:scenarios_50_200,fig:scenarios_200_600} report the optimality gap obtained with the L-Shaped method and CPLEX as the sample size increases, for the smallest ($|\mathcal{V}|=50$, $|\mathcal{K}|=200$) and largest ($|\mathcal{V}|=200$, $|\mathcal{K}|=600$) instances, respectively, with identical customers. Particularly, \Cref{fig:scenarios_50_200_ls} and \Cref{fig:scenarios_50_200_5h} report the gap of the L-Shaped method after $30$ minutes and $5$ hours, respectively, while \Cref{fig:scenarios_50_200_cplex} and \Cref{fig:scenarios_50_200_cplex_5h} report the gap of CPLEX after $30$ minutes and $5$ hours, respectively.
Similarly, \Cref{fig:scenarios_200_600} reports the gaps of the L-Shaped method after after $30$ minutes and $5$ hours on the largest instance. Tests on the largest instances were conducted only for the L-Shaped method as it is already evident from \Cref{tab:resultsIC02,tab:resultsIC08} that, on those instances, CPLEX fails to deliver solutions already with a sample of ten scenarios.
As intuition suggests, when using the L-Shaped method, the optimality gap grows with the number of scenarios, both on the smallest (\Cref{fig:scenarios_50_200}) and the largest (\Cref{fig:scenarios_200_600}) instances. For the smallest instances, with a time limit of $30$ minutes the growth appears mild and, with $100$ scenarios, the optimality gap remains in the neighborhood of $4\%$ in the worst case, with an average optimality gap in the neighborhood of $2\%$ (see \Cref{fig:scenarios_50_200_ls}). After $5$ hours, the L-Shaped method drops the optimality gap even further, with a worst case gap in the neighborhoods of $2$\%. This gives room for a more dense approximation of the uncertainty, i.e., a larger sample size, compared to the $10$ scenarios used in our previous tests.
The performance of CPLEX on the same instances is dramatically worse (see \Cref{fig:scenarios_50_200_cplex}). Given a $30$-minute time limit, on the smallest sample sizes, CPLEX outperforms the L-Shaped method, but as the sample size grows to $50$ or higher the solver's optimality gaps are orders of magnitude higher than those of the L-Shaped method (compare \Cref{fig:scenarios_50_200_cplex} and \Cref{fig:scenarios_50_200_ls}). The performance of CPLEX improves with a $5$ hours time limit (see \Cref{fig:scenarios_50_200_cplex_5h}), though performing much worse than the L-Shaped method at least for the largest sample sizes (compare \Cref{fig:scenarios_50_200_cplex_5h} and \Cref{fig:scenarios_50_200_5h}).
On the largest instances, with a $30$-minute time limit, the gap growth for the L-Shaped method remains limited up to a sample size of $25$, but grows dramatically with a larger sample size, see \Cref{fig:scenarios_200_60030min}.
Also the variance of the optimality gap grows with the sample size, limiting the reliability of the method for large numbers of scenarios. Nevertheless, with a longer time limit the L-Shaped method is able to reduce the optimality gap approximately ten times on the largest sample sizes, see \Cref{fig:scenarios_200_6005h}.
Summarizing, the results in \Cref{fig:scenarios_50_200,fig:scenarios_200_600} illustrate that, for small instances, a solution time of $30$ minutes is sufficient to obtain high quality solutions also with a more extensive approximation of the uncertainty. However, especially on the largest instances, a $30$-minute time limit might result too small to accommodate for a better description of the uncertainty. Nevertheless, allowing the L-Shaped method to run for a longer time (e.g., $5$ hours) can yield substantial reductions of the optimaility gap. In any case, \Cref{fig:scenarios_50_200} illustrates that the L-Shaped method scales significantly better than CPLEX as the sample size increases.
\begin{figure}
\caption{L-Shaped method after $30$ minutes.}
\label{fig:scenarios_50_200_ls}
\caption{L-Shaped method after $5$ hours. }
\label{fig:scenarios_50_200_5h}
\caption{CPLEX after $30$ minutes.}
\label{fig:scenarios_50_200_cplex}
\caption{CPLEX after $5$ hours. Plot obtained removing an outlier with an optimality gap greater than $1000$\%.}
\label{fig:scenarios_50_200_cplex_5h}
\caption{Optimality gap obtained when using the L-Shaped method and CPLEX for different sample sizes $|\mathcal{S}
\label{fig:scenarios_50_200}
\end{figure}
\begin{figure}
\caption{L-Shaped method after $30$ minutes.}
\label{fig:scenarios_200_60030min}
\caption{L-Shaped method after $5$ hours.}
\label{fig:scenarios_200_6005h}
\caption{Optimality gap obtained when using the L-Shaped method for different sample sizes $|\mathcal{S}
\label{fig:scenarios_200_600}
\end{figure}
The observed performance of the L-Shaped method allows us a conclusive reflection on the envisaged usage of the method.
In \Cref{tab:resultsIC02,tab:resultsIC08,tab:resultsDC02,tab:resultsDC08} we have let both the algorithm and CPLEX run for $30$ minutes
and observed that our algorithm scales significantly better. Therefore, depending on the practical operating needs of the the CSO, the algorithm might already provide a practice-ready tool.
That is, if the CSO is able to wait for a solution for $30$ minutes, our tests provide empirical evidences that the algorithm delivers a solution and often a high-quality one,
with the vehicles-to-customers ratio being a strong driver of the quality of the solution obtained.
\Cref{tab:resultsIC02,tab:resultsIC08,tab:resultsDC02,tab:resultsDC08}
also illustrate that the algorithm was able to find good solutions after $15$ minutes ($50$\% of the solution time, see column \texttt{gap50}).
Therefore, if the CSO has tighter time requirements, a potential strategy is to terminate
the algorithm earlier, knowing that this implies giving up something in terms of quality of the solution.
However, our tests show that the optimality gap after $15$ minutes is typical not dramatically higher than the
optimality gap obtained after $30$ minutes.
Nevertheless, there might arise situations in which waiting for a solution for $30$ or even $15$ minutes might be impractical, e.g., if the demand landscape changes more
frequently and relocation and pricing plans are required more often.
In these cases, the proposed algorithm might be proven inefficient and one might have to consider developing faster heuristic algorithms.
If this is the case, the performance of the L-Shaped method, and the dual bounds it delivers, provide a reliable benchmark. An example is provided in \Cref{app:ILS} where we test a simple Iterated Local Search and assess its performances using the bounds provided by the L-Shaped method. For the smallest instances the heuristic is able to provide primal solutions of quality comparable or even better than the L-Shaped method.
Nevertheless, the quality of the solutions delivered drops significantly as the size of the instance increase, indicating that further refinement is needed.
Finally, regardless of the solution time, the algorithm proposed may be used by the CSO to obtain solutions
that allow them to support managerial choices or analyze policy implications, e.g., subsidies, plans to expand the fleet or to hire additional staff for relocations activities.
Some insights on the impact of a pricing scheme on profits and demand are provided in \Cref{sec:results:managerial}.
\subsection{Analysis of the solutions}\label{sec:results:managerial}
In this section we present some evidences based on the analysis of the solutions obtained by the proposed model.
The analysis was performed using the default configuration, i.e., without individual customer profiles, as we believe it is a more realistic configuration to achieve by CSOs.
The analysis in this section is based on the results obtained on the instances with the largest number of customers ($600$)
and with different distributions of vehicles and customers, namely
\begin{description}
\item[D1] Vehicles in the outskirt and demand from center to outskirt ($\alpha^V=0.2$, $\alpha^{FROM}=0.8$, $\alpha^{TO}=0.2$)
\item[D2] Vehicles in the center and demand from outskirt to center ($\alpha^V=0.8$, $\alpha^{FROM}=0.2$, $\alpha^{TO}=0.8$)
\item[D3] Vehicles in the center and demand from center to outskirt ($\alpha^V=0.8$, $\alpha^{FROM}=0.8$, $\alpha^{TO}=0.2$)
\item[D4] Vehicles in the outskirt and demand from outskirt to center ($\alpha^V=0.2$, $\alpha^{FROM}=0.2$, $\alpha^{TO}=0.8$)
\end{description}
The number of vehicles is set either to $50$ or to $200$, corresponding to vehicles-to-customers ratios of $1/12$ and $1/3$, respectively.
It should be noted that, as pointed out by e.g., \cite{HuaHK18,JiaRD19}, the number of available cars in a zone influences customer demand.
In our approach, in which customers are considered at the individual level, the connection between the number of available cars and demand is handled jointly by the utility function and, especially, by the optimization model.
That is, there is a potential demand, made of the users which, according to their utility function, would choose carsharing, if available, at the drop-off fee level set in the first stage,
and there is a realized demand (i.e., actual rentals), which takes into account that not all potential customers may find an available car.
This is done through constraints \eqref{eq:taken} that state that an available car is taken by the first customer arriving at the car.
It should be further clarified that, in our experiments, we assume that customers do not wait for more cars to become available, i.e., the waiting time is always set to zero in our instances, corresponding to saying that, if no car is available, the customer will immediately choose another transport service.
Indeed, in some real-life carsharing services, some waiting time could be taken into account. That is, when there are no cars available in the zone, some user might decide to wait until a car is returned. However, we believe the assumption that customers do not wait is the most appropriate especially in a free-floating one-way service, where both customers and the CSO have limited information on whether and when a new car will be returned close to the user.
We start by presenting the effect of pricing strategies on profits and relocations.
For each distribution of customers and vehicles, we solved two configurations of the model.
In the first configuration the prices were optimally set by model \eqref{eq:1S}.
In the second configuration the drop-off fee was set to $0$ (the average of the drop-off fees considered) everywhere, corresponding to a situation in which the CSO applies only a per-minute fee and does not adjust prices with respect to the origin and destination and to the time of the day.
\begin{longtable}{ll|cc}
\caption{Comparison of the solutions with and without dynamic pricing on the instances with $50$ vehicles and $600$ customers.}\label{tab:results:solutions50600}\\
\toprule
Distribution&Metric & With dynamic pricing & Without dynamic pricing \\
\midrule
D1 &Expected Profit [\%] &100 &81.78\\
&\% of vehicles Relocated & 26.0 & 10.0\\
&Min $|\mathcal{R}(\xi)|$&167 &80 \\
&Max $|\mathcal{R}(\xi)|$&195 &107\\
&Expected \% Requests satisfied & 24&42\\
\midrule
D2 &Expected Profit [\%] &100 & 66.06\\
&\% of vehicles Relocated & 22.0 & 2.0\\
&Min $|\mathcal{R}(\xi)|$&168 &81 \\
&Max $|\mathcal{R}(\xi)|$&187 &105\\
&Expected \% Requests satisfied & 26&49\\
\midrule
D3 &Expected Profit [\%] &100 & 65.05\\
&\% of vehicles Relocated & 18.0 & 6.0\\
&Min $|\mathcal{R}(\xi)|$&167 &80 \\
&Max $|\mathcal{R}(\xi)|$&195 &107\\
&Expected \% Requests satisfied & 26&49\\
\midrule
D4 &Expected Profit [\%] &100 & 66.36\\
&\% of vehicles Relocated & 10.0 & 0.0\\
&Min $|\mathcal{R}(\xi)|$&168 &81 \\
&Max $|\mathcal{R}(\xi)|$&187 &105\\
&Expected \% Requests satisfied & 26&48\\
\bottomrule
\end{longtable}
\Cref{tab:results:solutions50600,tab:results:solutions200600} report a number of solution statistics for the case with $50$ and $200$ customers, respectively.
Expected profits for the case without dynamic pricing are reported as a percentage of the expected profits with dynamic pricing.
In both the case with $50$ and $200$ vehicles we observe that the expected profit without pricing is approximately $65$ to $80\%$ of the expected profit obtained by adjusting prices.
The main driver of the higher expected profit generated by a pricing strategy is the higher number of requests generated, approximately double both in the case with $50$ and in the case with $200$ vehicles. That is, by adjusting prices the CSO is able to attract significantly more demand and increase competition.
\begin{longtable}{ll|cc}
\caption{Comparison of the solutions with and without dynamic pricing on the instances with $200$ vehicles and $600$ customers.}\label{tab:results:solutions200600}\\
\toprule
Distribution&Metric & With dynamic pricing [\%] & Without dynamic pricing [\%]\\
\midrule
D1 &Expected Profit [\%] &100 & 70.12\\
&\% of vehicles Relocated & 0.5 & 1.5\\
&Min $|\mathcal{R}(\xi)|$&167 &80 \\
&Max $|\mathcal{R}(\xi)|$&195 &107\\
&Expected \% Requests satisfied & 53&90\\
\midrule
D2 &Expected Profit [\%] &100 & 70.87\\
&\% of vehicles Relocated & 1.5 & 0.5\\
&Min $|\mathcal{R}(\xi)|$&168 &82 \\
&Max $|\mathcal{R}(\xi)|$&187 &99\\
&Expected \% Requests satisfied & 51&91\\
\midrule
D3 &Expected Profit [\%] &100 & 73.94\\
&\% of vehicles Relocated & 0 & 0\\
&Min $|\mathcal{R}(\xi)|$&167 &80 \\
&Max $|\mathcal{R}(\xi)|$&195 &107\\
&Expected \% Requests satisfied & 56&100\\
\midrule
D4 &Expected Profit [\%] &100 & 71.18\\
&\% of vehicles Relocated & 0 & 0\\
&Min $|\mathcal{R}(\xi)|$&168 &82 \\
&Max $|\mathcal{R}(\xi)|$&187 &99\\
&Expected \% Requests satisfied & 56&94\\
\bottomrule
\end{longtable}
\begin{figure}
\caption{Distribution $D1$}
\label{fig:200d1}
\caption{Distribution $D2$}
\label{fig:200d2}
\caption{Distribution $D3$}
\label{fig:200d3}
\caption{Distribution $D4$}
\label{fig:200d4}
\caption{Drop-off fees adopted in the instances with $200$ vehicles and $600$ customers. Drop-off fees are expressed in Euro.}
\label{fig:fares_distribuition200}
\end{figure}
In the case with $200$ vehicles and without dynamic pricing, the CSO is able to satisfy the great majority of the requests (more than $90\%$ -- see \Cref{tab:results:solutions200600}) performing very few, if any, relocations -- we will return to this point later.
However, in this case the CSO is able to attract less than (approximately) $1/6$ of the customers. With dynamic pricing, the CSO is able to attract close to $1/3$ of the customers and serve slightly more than $50\%$ of the requests. In total, the number of rentals are approximately the same, with and without dynamic pricing. However, by adjusting prices the CSO is able to increase the revenue. In fact, as shown in \Cref{fig:fares_distribuition200}, the most used drop-off fee is the highest ($2$ Euro), illustrating that the CSO is able to exploit the higher willingness to pay of some customers.
In the case with $50$ vehicles (thus one vehicle every twelve potential customers), without dynamic pricing the CSO is able to satisfy approximately half of the total requests due to the reduced number of vehicles, see \Cref{tab:results:solutions50600}. Also in this case the number of requests is lower (approximately half) then the number of requests obtained by adjusting prices.
With dynamic pricing, the CSO is able to attract close to $1/3$ of the customers and to satisfy only approximately $25\%$ of them. Also in this case, the CSO benefits from the higher competition.
In more than $50\%$ of the origin-destination pairs the CSO is able to apply the highest drop-off fee ($2$ Euro) see \Cref{fig:fares_distribuition50} and to reposition the fleet in such a way to satisfy the requests with the highest revenue. Thus, a trend we observe in \Cref{tab:results:solutions50600} is that a pricing strategy allows the CSO to attract more demand but to satisfy only part of it. While this allows the CSO to exploit competition, many customers do not see their wish to use carsharing satisfied. This negative user experience might have an impact in the long run. This is however beyond the scope of this study.
Interestingly, in the case with $200$ vehicles (one every three customers -- see \Cref{tab:results:solutions200600}) the need for relocations is almost null, regardless of how prices are set.
The fleet is large enough to cover sufficiently well the entire business area and serve almost all requests.
On the other hand, with a fleet $50$ vehicles (one every twelve customers -- see \Cref{tab:results:solutions50600}) the need for relocations is more evident.
The fleet is now insufficient to cover the entire demand. In the case without dynamic pricing, fewer relocations are needed compared to the case with dynamic pricing.
This is due to the lower demand attracted (approximately $80$ to $107$ requests with a fleet of $50$ vehicles).
Many more relocations are performed when dynamic pricing is applied as a consequence of the higher demand generated (at least $167$ requests for $50$ vehicles). Thus the CSO finds it beneficial to move vehicles where they can generate more revenue.
\begin{figure}
\caption{Distribution $D1$}
\label{fig:d1}
\caption{Distribution $D2$}
\label{fig:d2}
\caption{Distribution $D3$}
\label{fig:d3}
\caption{Distribution $D4$}
\label{fig:d4}
\caption{Drop-off fees adopted in the instances with $50$ vehicles and $600$ customers. Drop-off fees are expressed in Euro.}
\label{fig:fares_distribuition50}
\end{figure}
A natural follow up question is the impact of relocations on profits. Therefore, we focused on those instances where more relocations were suggested, i.e., with $50$ vehicles and dynamic pricing, see \Cref{tab:results:solutions50600}. We solved the same instances, but this time preventing the model from making any relocations.
That is, vehicles were forced to remain in their initial positions.
The results indicate that the expected profit without relocations is only marginally lower.
Particularly, $98.5\%$ for distribution $D1$, $98.3\%$ for $D2$, $97.4\%$ for $D3$ and $98.8\%$ for $D4$.
This is due to the fact that relocations are expensive and can yield only a very minor increase in revenue.
That is, with $50$ vehicles and always more than $167$ requests, vehicles would always be rented, even if not relocated.
By relocating a vehicle the CSO is able to charge a higher drop-off fee, but bears the relocation cost. This results in a marginal profit increase.
Relocations are however likely to generate a higher impact on profits when the vehicles-to-customers ratio is even smaller.
We performed the same test with a vehicles-to-customers ratio equal to $1/100$ (i.e., $10$ vehicles $1000$ customers).
The results show that the profit without relocations was $70.07\%$ with distribution $D1$, $75.26\%$ with $D2$, $76.22\%$ with distribution $D3$ and $78.07\%$ with distribution $D4$ (these percentages are calculated on the best upper bounds since a near optimal solution was available for all instances).
In all cases the percentage of vehicles relocated ranged between $10$ and $20\%$, similar to the case with $50$ vehicles and $600$ vehicles.
This means that the percentage of relocations remained approximately the same with a smaller vehicles-to-customers ratio, but had a much higher impact on profits.
Thus, it appears that a dynamic pricing strategy, coupled with a sufficiently large fleet (say more than one vehicle every twelve customers in our case),
decreases significantly the need for staff-based relocations. Otherwise, relocations remain an important tool even with a dynamic pricing strategy.
\section{Conclusions, limitations and future work}\label{sec:conclusions}
We presented a novel optimization model for jointly deciding carsharing prices and relocations.
The problem is modeled as a two-stage integer stochastic program in order to account for uncertainty in customers preferences.
An exact solution algorithm based on the integer L-Shaped method has been proposed.
Extensive tests have been performed on instances based on the municipality of Milan, in order to assess both the performance of the
solution algorithm and the type of solutions obtained.
The instances have been made available online for the sake of future research.
Results illustrate that, within times compatible with business practice, the method solves or finds a high quality solution to most instances. In addition, it finds a feasible solution to all instances considered. In contrast, CPLEX delivers a solution to only a few, small, instances.
The analysis of the solutions illustrates that a pricing strategy helps the CSO to significantly increase expected profits.
This is due to the increased demand generated and the resulting competition. In our instances the demand was approximately doubled compared to a situation without dynamic pricing.
This in turn generates higher expected profits by exploiting customer's higher willingness to pay.
The results also show that, by adopting a zone-based pricing strategy and employing a large enough fleet, the impact of staff-based relocations on profits becomes marginal.
On the other hand, the impact of relocations becomes more evident as the size of the fleet decreases.
A number of limitations remain to be addressed in future research, as we comment in what follows.
A pricing strategy which varies with each origin and destination, or frequently throughout the day, may not be applicable in all contexts, or raise concerns related to the potential complexity for users who would rather prefer a simpler pricing strategy.
The scope of this article was that of introducing a general model framework which could then be adapted to specific contexts and improved.
For example, the model proposed can be easily adapted to different time and space resolutions, i.e., it is possible to define the length of the target period and the discretization of the business area based on the specific needs.
In addition, the model may be easily modified to enforce that e.g., drop-off fees vary only according to the pick-up place or only according to the drop-off place. Future research may provide furter modifications and improvements.
The size of the instances used in this study is comparable with the size of the station-based carsharing in Milan which,
according to \cite{Mil20}, in 2018 counted $149$ shared vehicles and, on average $108$ daily rentals (see \Cref{tab:results:solutions200600} for a comparison).
Other examples are the station-based carsharing offered by \textit{Letsgo} (\url{https://letsgo.dk/}), which currently operates a fleet of around $200$ vehicles in Copenhagen, and
\textit{Vy} (\url{https://www.vy.no/en/travelling-with-us/other-modes-of-transport/city-car}) that operates a fleet of 250 vehicles in Oslo.
Nevertheless, bigger fleets and a higher number of customers are likely to limit the practical efficiency of our exact method and call for faster, e.g., heuristic, methods.
The performance of the algorithm with respect to a higher number of zones remains to be assessed.
Our instances, generated on the basis of \cite{HanP18}, contained ten zones. Supposedly, a more granular discretization of the business area is likely to have a negative impact on the practical applicability of the method. However, the benefits of a finer partition of the business area into pricing zones is to be addressed by further research,
particularly in the case of free-floating services. Effective discretization strategies and methods are, to our knowledge, still an open research question.
As reported by \cite{ZoeK16}, the choice of carsharing users is also influenced by elements such as the type of vehicle and its proximity to the user.
In addition, comfort, weather conditions, and purpose of the trip are all factors which might influence customers decisions.
While proximity is considered in the form of walking time in the utility function we used in our experiments, the remaining elements are not captured explicitly, but are rather included in the portion of customer preferences that the CSO cannot explain.
Future research might be set up to extend the model and utility function used in the tests in order to better capture customers behavior.
Several other sources of uncertainty affect the problem, that have not been considered in this study.
These include, e.g., the total number of customers appearing in each zone, and their destination.
Our model might account for this uncertainty by setting a sufficiently large number of customers.
Travel times, both with carsharing and with alternative transport services are also, to a certain degree, uncertain in practice.
The impact of this uncertainty on solutions and profits remains to be understood.
The analysis of the solutions indicates that, by dynamically adjusting prices, the CSO is able to attract significantly more demand.
However, with a vehicles-to-customers ratio of $1$ to $12$ (see \Cref{tab:results:solutions50600}), the portion of the demand satisfied was, approximately, only $25\%$.
That is, the majority of the customers who would have used carsharing did not have the chance to do so.
As a consequence, users may perceive a low availability of the service. The effect of this in the long term remains to be clarified.
Finally, our model is currently unable to use pricing as a preventive measure to encourage a profitable distribution of the fleet.
Consider two subsequent target periods, say $t_1$ and $t_2$, and two zones, say $A$ and $B$. Assume that the CSO expects high demand in zone $A$ in period $t_2$.
They may, consequently, set a lower price or an incentive in $t_1$, for renting cars in zone $B$ and delivering them in zone $A$, and/or disincentivize movements in the opposite direction.
In order to be able to optimize these decisions the proposed model should be extended to account for a multistage decision process.
\appendix
\crefalias{section}{appendix}
\section{Sample Average Approximation}\label{sec:saa}
Let $\xi_{1},\ldots,\xi_{S}$ be an $S$-dimensional iid sample of $\tilde{\xi}$ and let $\mathcal{S}=\{1,\ldots,S\}$.
Let decision variable $y_{vrls}$ be equal to $1$ if request $r\in\mathcal{R}(\xi_s)$ is satisfied by vehicle $v$ at level $l$ under realization $s$, $0$ otherwise.
The SAA of problem \eqref{eq:1S} can be stated as follows.
\begin{subequations}
\label{eq:SAA}
\begin{align}
\label{eq:SAA:obj}&\max-\sum_{v\in\mathcal{V}}\sum_{i\in\mathcal{I}}C^R_{vi}z_{vi}+\frac{1}{|\mathcal{S}|}\sum_{s\in\mathcal{S}}\sum_{r\in\mathcal{R}(\xi_s)}\sum_{v\in\mathcal{V}}\sum_{l\in\mathcal{L}_{r}(\xi_s)}R_{rl}y_{vrls}\\
\label{eq:SAA:c3}&\sum_{i\in\mathcal{I}}z_{vi} = 1 & v\in\mathcal{V}\\
\label{eq:SAA:c6}&\sum_{l\in\mathcal{L}}\lambda_{ijl}=1& i\in\mathcal{I},j\in\mathcal{J}\\
\label{eq:SAA:c1}&\sum_{v\in\mathcal{V}}\sum_{l\in\mathcal{L}_r(\xi_s)}y_{vrls}\leq 1 & r\in\mathcal{R}(\xi_s), s\in\mathcal{S}\\
\label{eq:SAA:c2}&\sum_{r\in\mathcal{R}(\xi_s)}\sum_{l\in\mathcal{L}_{r}(\xi_s)}y_{vrls}\leq 1 & v\in\mathcal{V},s\in\mathcal{S}\\
\label{eq:SAA:c4}&\sum_{l\in\mathcal{L}_{r_1}(\xi_s)}y_{v,r_1,l,s} - z_{v,i(r_1)} + \sum_{r_2\in\mathcal{R}_{r_1}(\xi_s)}\sum_{l\in\mathcal{L}_{r_2}(\xi_s)}y_{v,r_2,l,s}\leq 0 & r_1\in\mathcal{R}(\xi_s),v\in\mathcal{V}.s\in\mathcal{S}\\[5pt]
\nonumber&y_{v,r_1,l_1,s}+ \sum_{r_2\in\mathcal{R}_{r_1}(\xi_s)}\sum_{l_2\in\mathcal{L}_{r_2}(\xi_s)}y_{v,r_2,l_2,s}+ \sum_{v_1\in\mathcal{V}:v_1\neq v}y_{v_1,r_1,l_1,s} &\\
\label{eq:SAA:c5} &\geq \lambda_{i(r_1),j(r_j),l_1} + z_{v,i(r_1)}-1 & r_1\in\mathcal{R}(\xi_s),v\in\mathcal{V}, l_1\in\mathcal{L}_{r_1}(\xi_s),s\in\mathcal{S}\\
\label{eq:SAA:c7}&\sum_{v\in\mathcal{V}}y_{vrls}\leq\lambda_{i(r),j(r),l}& r\in\mathcal{R}(\xi_s),l\in\mathcal{L}_{r}(\xi_s),s\in\mathcal{S}\\
&y_{vrls}\in\{0,1\} & r\in\mathcal{R}(\xi_s),v\in\mathcal{V},l\in\mathcal{L}_{r}(\xi_s),s\in\mathcal{S}\\
&z_{vi}\in\{0,1\} & i\in\mathcal{I},v\in\mathcal{V}\\
&\lambda_{ijl}\in\{0,1\} & i\in\mathcal{I},j\in\mathcal{I},l\in\mathcal{L}.
\end{align}
\end{subequations}
\section{Size of the instances}\label{sec:app:size}
This section reports the size of the instances for the base case in \Cref{tab:app:sizeIC} and for the case with customers profiled individually in \Cref{tab:app:sizeDC}.
\begin{longtable}{rrrr|rr}
\caption{Size of the SAA model without decomposition for all instances tested in the base case.}\label{tab:app:sizeIC}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \# Variables & \# Constraints \\
\midrule
50 & 200 & 0.2 & 0.2 & 95350 & 127528 \\
50 & 400 & 0.2 & 0.2 & 199800 & 269257 \\
50 & 600 & 0.2 & 0.2 & 296000 & 399868 \\
100 & 200 & 0.2 & 0.2 & 190200 & 252478 \\
100 & 400 & 0.2 & 0.2 & 399500 & 533561 \\
100 & 600 & 0.2 & 0.2 & 591600 & 791919 \\
200 & 400 & 0.2 & 0.2 & 798300 & 1061560 \\
200 & 600 & 0.2 & 0.2 & 1182900 & 1576120 \\
\midrule
50 & 200 & 0.2 & 0.8 & 89450 & 119419 \\
50 & 400 & 0.2 & 0.8 & 189750 & 254569 \\
50 & 600 & 0.2 & 0.8 & 279350 & 375643 \\
100 & 200 & 0.2 & 0.8 & 178500 & 236520 \\
100 & 400 & 0.2 & 0.8 & 379200 & 504170 \\
100 & 600 & 0.2 & 0.8 & 558300 & 743843 \\
200 & 400 & 0.2 & 0.8 & 758100 & 1003471 \\
200 & 600 & 0.2 & 0.8 & 1116500 & 1480645 \\
\midrule
50 & 200 & 0.8 & 0.2 & 94300 & 125998 \\
50 & 400 & 0.8 & 0.2 & 195900 & 263086 \\
50 & 600 & 0.8 & 0.2 & 287050 & 387577 \\
100 & 200 & 0.8 & 0.2 & 188100 & 249448 \\
100 & 400 & 0.8 & 0.2 & 391800 & 521542 \\
100 & 600 & 0.8 & 0.2 & 573900 & 767780 \\
200 & 400 & 0.8 & 0.2 & 783300 & 1038043 \\
200 & 600 & 0.8 & 0.2 & 1147300 & 1527880 \\
\midrule
50 & 200 & 0.8 & 0.8 & 90300 & 120337 \\
50 & 400 & 0.8 & 0.8 & 185750 & 249469 \\
50 & 600 & 0.8 & 0.8 & 279500 & 375949 \\
100 & 200 & 0.8 & 0.8 & 180100 & 238237 \\
100 & 400 & 0.8 & 0.8 & 371300 & 494272 \\
100 & 600 & 0.8 & 0.8 & 558800 & 744752 \\
200 & 400 & 0.8 & 0.8 & 742300 & 983773 \\
200 & 600 & 0.8 & 0.8 & 1117700 & 1482856 \\
\midrule
& & & & 452819 & 603174 \\
\bottomrule
\end{longtable}
\begin{longtable}{rrrr|rr}
\caption{Size of the SAA model without decomposition for all instances tested with individual customers profiles.}\label{tab:app:sizeDC}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \# Variables & \# Constraints \\
\midrule
50 & 200 & 0.2 & 0.2 & 84500 & 116461 \\
50 & 400 & 0.2 & 0.2 & 171000 & 237127 \\
50 & 600 & 0.2 & 0.2 & 250800 & 347389 \\
100 & 200 & 0.2 & 0.2 & 168600 & 230662 \\
100 & 400 & 0.2 & 0.2 & 341600 & 469628 \\
100 & 600 & 0.2 & 0.2 & 501200 & 687990 \\
200 & 400 & 0.2 & 0.2 & 682900 & 934729 \\
200 & 600 & 0.2 & 0.2 & 1002100 & 1369291 \\
\midrule
50 & 200 & 0.2 & 0.8 & 77200 & 107281 \\
50 & 400 & 0.2 & 0.8 & 163300 & 226723 \\
50 & 600 & 0.2 & 0.8 & 239650 & 333007 \\
100 & 200 & 0.2 & 0.8 & 153900 & 212381 \\
100 & 400 & 0.2 & 0.8 & 326100 & 448923 \\
100 & 600 & 0.2 & 0.8 & 478900 & 659508 \\
200 & 400 & 0.2 & 0.8 & 651700 & 893323 \\
200 & 600 & 0.2 & 0.8 & 957500 & 1312609 \\
\midrule
50 & 200 & 0.8 & 0.2 & 83100 & 114676 \\
50 & 400 & 0.8 & 0.2 & 168100 & 232792 \\
50 & 600 & 0.8 & 0.2 & 243900 & 338821 \\
100 & 200 & 0.8 & 0.2 & 165900 & 227329 \\
100 & 400 & 0.8 & 0.2 & 335700 & 460942 \\
100 & 600 & 0.8 & 0.2 & 487300 & 670921 \\
200 & 400 & 0.8 & 0.2 & 670900 & 917242 \\
200 & 600 & 0.8 & 0.2 & 974100 & 1335121 \\
\midrule
50 & 200 & 0.8 & 0.8 & 79300 & 110035 \\
50 & 400 & 0.8 & 0.8 & 161150 & 224428 \\
50 & 600 & 0.8 & 0.8 & 243800 & 340351 \\
100 & 200 & 0.8 & 0.8 & 158100 & 217835 \\
100 & 400 & 0.8 & 0.8 & 321800 & 444378 \\
100 & 600 & 0.8 & 0.8 & 487300 & 674153 \\
200 & 400 & 0.8 & 0.8 & 643300 & 884479 \\
200 & 600 & 0.8 & 0.8 & 974100 & 1341553 \\
\midrule
& & & & 389025 & 535065 \\
\bottomrule
\end{longtable}
\section{Effect of valid inequality}\label{sec:app:ICplusVI}
\begin{longtable}{rrrr|rrrr}
\caption{Results of the L-Shaped method with the addition of \cref{eq:VI} on the instances with $\alpha^V=0.2$.}\label{tab:resultsICVI02}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.6967 & 18.6752 & 0.6967 & 1800.20 \\
50 & 400 & 0.2 & 0.2 & 17.2850 & 66.5573 & 20.6265 & 1800.52 \\
50 & 600 & 0.2 & 0.2 & 30.5015 & 87.2440 & 39.2311 & 1809.94 \\
100 & 200 & 0.2 & 0.2 & 0.0975 & 9.5524 & 0.0975 & 1800.98 \\
100 & 400 & 0.2 & 0.2 & 1.9480 & 21.2273 & 3.9377 & 1805.67 \\
100 & 600 & 0.2 & 0.2 & 11.6208 & 35.8381 & 14.7910 & 1801.95 \\
200 & 400 & 0.2 & 0.2 & 0.0000 & 6.3509 & - & 139.41 \\
200 & 600 & 0.2 & 0.2 & 0.0777 & 10.4516 & 0.3662 & 1800.78 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.3381 & 17.6529 & 0.3381 & 1800.63 \\
50 & 400 & 0.2 & 0.8 & 9.6397 & 62.4842 & 11.6808 & 1801.21 \\
50 & 600 & 0.2 & 0.8 & 19.5455 & 90.2582 & 20.6217 & 1800.03 \\
100 & 200 & 0.2 & 0.8 & 0.2815 & 7.2972 & 0.2815 & 1800.13 \\
100 & 400 & 0.2 & 0.8 & 0.7148 & 19.6630 & 1.0807 & 1801.47 \\
100 & 600 & 0.2 & 0.8 & 9.8896 & 34.0425 & 11.4341 & 1812.93 \\
200 & 400 & 0.2 & 0.8 & 0.0000 & 41.7344 & - & 115.74 \\
200 & 600 & 0.2 & 0.8 & 0.4710 & 12.6665 & 0.6677 & 1814.99 \\
\midrule
50 & 200 & 0.8 & 0.2 & 1.9850 & 23.9553 & 2.2390 & 1800.13 \\
50 & 400 & 0.8 & 0.2 & 21.8829 & 85.1665 & 25.2703 & 1800.02 \\
50 & 600 & 0.8 & 0.2 & 44.9080 & 108.3757 & 62.3967 & 1803.83 \\
100 & 200 & 0.8 & 0.2 & 0.5238 & 15.4557 & 0.5320 & 1801.09 \\
100 & 400 & 0.8 & 0.2 & 6.1743 & 22.7406 & 9.0017 & 1806.48 \\
100 & 600 & 0.8 & 0.2 & 23.6427 & 42.5987 & 23.6588 & 1816.20 \\
200 & 400 & 0.8 & 0.2 & 0.3072 & 12.4280 & 0.3209 & 1804.22 \\
200 & 600 & 0.8 & 0.2& 6.7474 & 15.3980 & 17.5717 & 1865.22 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.8509 & 22.5871 & 0.9463 & 1800.15 \\
50 & 400 & 0.8 & 0.8 & 15.4867 & 55.7576 & 19.9941 & 1801.46 \\
50 & 600 & 0.8 & 0.8 & 40.1722 & 106.9156 & 41.2931 & 1807.37 \\
100 & 200 & 0.8 & 0.8 & 0.7932 & 13.3519 & 0.8354 & 1802.32 \\
100 & 400 & 0.8 & 0.8 & 4.4619 & 17.1371 & 7.3475 & 1803.98 \\
100 & 600 & 0.8 & 0.8 & 19.1824 & 46.7222 & 23.6637 & 1823.21 \\
200 & 400 & 0.8 & 0.8 & 0.0963 & 7.2607 & 0.0963 & 1800.78 \\
200 & 600 & 0.8 & 0.8 & 5.6194 & 16.7484 & 6.8357 & 1823.23 \\
\midrule
& & & & 9.2482 & 36.0717 & 12.2618 & 1702.07 \\
\bottomrule
\end{longtable}
\begin{longtable}{rrrr|rrrr}
\caption{Results of the L-Shaped method with the addition of \cref{eq:VI} on the instances with $\alpha^V=0.8$.}\label{tab:resultsICVI08}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.4613 & 37.8092 & 0.5067 & 1800.37 \\
50 & 400 & 0.2 & 0.2 & 11.6098 & 42.1383 & 13.2894 & 1802.87 \\
50 & 600 & 0.2 & 0.2 & 30.3469 & 77.8433 & 39.0602 & 1807.97 \\
100 & 200 & 0.2 & 0.2 & 0.1113 & 6.1394 & 0.2584 & 1800.27 \\
100 & 400 & 0.2 & 0.2 & 4.0199 & 22.6182 & 6.3574 & 1800.15 \\
100 & 600 & 0.2 & 0.2 & 14.6664 & 35.6069 & 15.9543 & 1805.19 \\
200 & 400 & 0.2 & 0.2 & 0.9991 & 5.3451 & 1.0499 & 1810.37 \\
200 & 600 & 0.2 & 0.2 & 4.5424 & 10.6059 & 14.3498 & 1889.13 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.8298 & 33.4059 & 1.0115 & 1800.11 \\
50 & 400 & 0.2 & 0.8 & 11.8754 & 42.0692 & 16.4411 & 1801.42 \\
50 & 600 & 0.2 & 0.8 & 24.6840 & 96.0829 & 32.7535 & 1800.96 \\
100 & 200 & 0.2 & 0.8 & 0.0291 & 4.7786 & 0.0291 & 1800.47 \\
100 & 400 & 0.2 & 0.8 & 2.1136 & 20.3964 & 3.4560 & 1805.67 \\
100 & 600 & 0.2 & 0.8 & 12.3031 & 35.6683 & 14.0160 & 1817.99 \\
200 & 400 & 0.2 & 0.8 & 0.2123 & 9.3896 & 0.4837 & 1800.23 \\
200 & 600 & 0.2 & 0.8 & 3.7770 & 17.6274 & 4.4652 & 1833.46 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.0389 & 12.5954 & 0.0389 & 1800.08 \\
50 & 400 & 0.8 & 0.2 & 13.3947 & 65.9956 & 15.1977 & 1800.04 \\
50 & 600 & 0.8 & 0.2 & 34.8704 & 82.1348 & 38.2156 & 1803.88 \\
100 & 200 & 0.8 & 0.2 & 0.0000 & 9.4214 & - & 26.88 \\
100 & 400 & 0.8 & 0.2 & 0.2335 & 4.0455 & 0.2348 & 1801.63 \\
100 & 600 & 0.8 & 0.2 & 9.1181 & 27.6410 & 11.0863 & 1801.40 \\
200 & 400 & 0.8 & 0.2 & 0.0020 & 8.6414 & - & 205.30 \\
200 & 600 & 0.8 & 0.2 & 0.0034 & 11.5793 & - & 786.63 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.0083 & 14.9683 & - & 24.39 \\
50 & 400 & 0.8 & 0.8 & 9.8202 & 44.5269 & 12.5750 & 1801.95 \\
50 & 600 & 0.8 & 0.8 & 30.9029 & 99.1525 & 36.1072 & 1808.71 \\
100 & 200 & 0.8 & 0.8 & 0.0000 & 0.4394 & - & 21.84 \\
100 & 400 & 0.8 & 0.8 & 0.1667 & 17.2446 & 0.2030 & 1801.88 \\
100 & 600 & 0.8 & 0.8 & 6.1475 & 13.2578 & 6.5530 & 1805.95 \\
200 & 400 & 0.8 & 0.8 & 0.0000 & 42.1315 & - & 122.34 \\
200 & 600 & 0.8 & 0.8 & 0.0294 & 8.4615 & 0.0306 & 1802.96 \\
\midrule
& & & & 7.1037 & 29.9925 & 10.9125 & 1506.01 \\
\bottomrule
\end{longtable}
\section{Results on the instances with individual customer profiles after 5 hours}\label{app:resultsLongDC}
\begin{longtable}{rrrr|rrrr}
\caption{Results of the L-Shaped method with the addition of \cref{eq:VI} on the instances with $\alpha^V=0.2$ and individual customer profiles with a time limit of $18 000$ seconds and a $1\%$ target optimality gap.}\label{tab:resultsLongDCVI02}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.9337 & 21.5553 & - & 120.82 \\
50 & 400 & 0.2 & 0.2 & 3.0295 & 59.3308 & 4.1428 & 18002.76 \\
50 & 600 & 0.2 & 0.2 & 10.1187 & 52.5981 & 11.3538 & 18000.13 \\
100 & 200 & 0.2 & 0.2 & 0.1841 & 8.9488 & - & 16.69 \\
100 & 400 & 0.2 & 0.2 & 0.8770 & 26.1941 & - & 841.63 \\
100 & 600 & 0.2 & 0.2 & 2.6675 & 40.2920 & 3.4833 & 18000.07 \\
200 & 400 & 0.2 & 0.2 & 0.1093 & 9.9094 & - & 224.15 \\
200 & 600 & 0.2 & 0.2 & 0.3517 & 14.7626 & - & 479.14 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.8707 & 20.9177 & - & 11.71 \\
50 & 400 & 0.2 & 0.8 & 1.8429 & 43.2969 & 2.5193 & 18000.92 \\
50 & 600 & 0.2 & 0.8 & 9.9981 & 63.4863 & 10.4941 & 18000.11 \\
100 & 200 & 0.2 & 0.8 & 0.2865 & 10.6250 & - & 19.35 \\
100 & 400 & 0.2 & 0.8 & 0.9395 & 22.5452 & - & 535.48 \\
100 & 600 & 0.2 & 0.8 & 2.8913 & 36.2728 & 4.4123 & 18008.31 \\
200 & 400 & 0.2 & 0.8 & 0.0000 & 59.2478 & - & 181.93 \\
200 & 600 & 0.2 & 0.8 & 0.5768 & 15.6153 & - & 726.62 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.7559 & 22.1339 & - & 44.38 \\
50 & 400 & 0.8 & 0.2 & 1.5553 & 58.5075 & 1.7269 & 18000.08 \\
50 & 600 & 0.8 & 0.2 & 14.8996 & 72.8427 & 17.8340 & 18000.08 \\
100 & 200 & 0.8 & 0.2 & 0.8890 & 13.9807 & - & 121.73 \\
100 & 400 & 0.8 & 0.2 & 0.9990 & 11.8809 & - & 1379.79 \\
100 & 600 & 0.8 & 0.2 & 4.8890 & 35.7724 & 5.7060 & 18006.26 \\
200 & 400 & 0.8 & 0.2 & 0.1760 & 11.8485 & - & 149.53 \\
200 & 600 & 0.8 & 0.2 & 0.9180 & 23.4393 & - & 5103.61 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.9921 & 21.9851 & - & 82.89 \\
50 & 400 & 0.8 & 0.8 & 1.7121 & 53.8589 & 2.1396 & 18002.06 \\
50 & 600 & 0.8 & 0.8 & 9.9533 & 70.6343 & 10.2331 & 18000.25 \\
100 & 200 & 0.8 & 0.8 & 0.9995 & 14.5185 & - & 101.45 \\
100 & 400 & 0.8 & 0.8 & 0.9953 & 30.1037 & - & 4394.49 \\
100 & 600 & 0.8 & 0.8 & 3.0919 & 34.8283 & 5.3525 & 18000.65 \\
200 & 400 & 0.8 & 0.8 & 0.0001 & 11.5481 & - & 164.89 \\
200 & 600 & 0.8 & 0.8 & 0.9972 & 15.4424 & - & 4699.97 \\
\midrule
& & & & 2.4844 & 31.5289 & 6.6165 & 7356.94 \\
\bottomrule
\end{longtable}
\begin{longtable}{rrrr|rrrr}
\caption{Results of the L-Shaped method with the addition of \cref{eq:VI} on the instances with $\alpha^V=0.8$ and individual customer profiles with a time limit of $18 000$ seconds and a $1\%$ target optimality gap.}\label{tab:resultsLongDCVI08}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gap} & \texttt{gapR} & \texttt{gap50} & \texttt{t} \\
\midrule
50 & 200 & 0.2 & 0.2 & 1.5133 & 17.7774 & 1.7378 & 18000.07 \\
50 & 400 & 0.2 & 0.2 & 8.8698 & 60.3835 & 11.4205 & 18000.10 \\
50 & 600 & 0.2 & 0.2 & 16.0592 & 105.0348 & 19.0821 & 18000.14 \\
100 & 200 & 0.2 & 0.2 & 0.9991 & 7.5390 & - & 2345.67 \\
100 & 400 & 0.2 & 0.2 & 1.9678 & 33.3830 & 2.2880 & 18000.46 \\
100 & 600 & 0.2 & 0.2 & 6.7921 & 29.7073 & 7.3807 & 18000.10 \\
200 & 400 & 0.2 & 0.2 & 0.9852 & 22.1296 & - & 3051.54 \\
200 & 600 & 0.2 & 0.2 & 1.6326 & 17.3350 & 3.1395 & 18000.54 \\
\midrule
50 & 200 & 0.2 & 0.8 & 1.1847 & 20.1198 & 1.3004 & 18000.58 \\
50 & 400 & 0.2 & 0.8 & 8.7233 & 53.4891 & 9.5341 & 18000.09 \\
50 & 600 & 0.2 & 0.8 & 14.6785 & 89.9520 & 16.2307 & 18000.09 \\
100 & 200 & 0.2 & 0.8 & 0.5693 & 6.9976 & - & 43.73 \\
100 & 400 & 0.2 & 0.8 & 1.7097 & 32.4136 & 1.9677 & 18000.31 \\
100 & 600 & 0.2 & 0.8 & 6.3776 & 32.5442 & 7.1209 & 18000.09 \\
200 & 400 & 0.2 & 0.8 & 0.9601 & 28.3808 & - & 4120.56 \\
200 & 600 & 0.2 & 0.8 & 4.6562 & 26.4194 & 5.5152 & 18000.11 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.7444 & 17.0926 & - & 15.28 \\
50 & 400 & 0.8 & 0.2 & 1.5675 & 32.0731 & 1.7184 & 18000.13 \\
50 & 600 & 0.8 & 0.2 & 8.4393 & 75.7595 & 9.0644 & 18000.30 \\
100 & 200 & 0.8 & 0.2 & 0.0000 & 14.6898 & - & 25.29 \\
100 & 400 & 0.8 & 0.2 & 0.9903 & 18.9859 & - & 579.88 \\
100 & 600 & 0.8 & 0.2 & 3.8100 & 40.7039 & 5.1358 & 18000.36 \\
200 & 400 & 0.8 & 0.2 & 0.0000 & 61.0074 & - & 164.47 \\
200 & 600 & 0.8 & 0.2 & 0.3060 & 12.2654 & - & 441.39 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.9938 & 4.6541 & - & 14.80 \\
50 & 400 & 0.8 & 0.8 & 0.9997 & 42.8840 & 1.0556 & 12864.17 \\
50 & 600 & 0.8 & 0.8 & 6.3284 & 78.3238 & 6.5753 & 18000.09 \\
100 & 200 & 0.8 & 0.8 & 0.2054 & 1.4263 & - & 23.31 \\
100 & 400 & 0.8 & 0.8 & 0.9188 & 6.4020 & - & 104.09 \\
100 & 600 & 0.8 & 0.8 & 2.6157 & 36.8295 & 3.3959 & 18000.60 \\
200 & 400 & 0.8 & 0.8 & 0.0000 & 61.6141 & - & 127.80 \\
200 & 600 & 0.8 & 0.8 & 0.3572 & 13.3682 & - & 445.02 \\
\midrule
& & & & 3.3111 & 34.4277 & 6.3146 & 10324.10 \\
\bottomrule
\end{longtable}
\section{Example Heuristic}\label{app:ILS}
In this section we present a simple Iterated Local Search (ILS) to find primal solutions to the problem. In a nutshell, an ILS works as follows: given an initial (current at a generic iteration) solution it performs a local search. To escape local optima, the solution returned by the local search is randomly perturbed and the local search restarted. This procedure is repeated until a stopping criteria is met. In what follows we explain how this procedure is adapted to our problem.
We encode solutions $\zeta$ to the original problem using
\begin{itemize}
\item a vector $\Pi(\zeta)\in\mathbb{N}^{|\mathcal{V}|}$ defining the position of the vehicles. The $v$-th element of the vector is an integer $i\in\mathcal{I}$ identifying the zone where vehicle $v$ becomes available (possibly after relocation).
\item a matrix $\Lambda(\zeta)\in \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|}$ defining the drop-off-fees. The $i-j$-th element of the matrix is an integer $l\in\mathcal{L}$ identifying the drop-off-bee between zone $i$ and $j$.
\end{itemize}
Let $H(a,b)$ be the Hamming distance between two vectors or, in the case of matrices, concatenations of rows. We define two types of neighborhoods.
\begin{itemize}
\item $\mathcal{N}^\Pi(\zeta)=\big\{\zeta'|H(\Pi(\zeta),\Pi(\zeta'))=1,\Lambda(\zeta)=\Lambda(\zeta')\big\}$. In words, it defines all solutions which can be obtained from $\zeta$ by changing solely one vehicle position.
\item $\mathcal{N}^\Lambda(\zeta)=\big\{\zeta'|\Pi(\zeta)=\Pi(\zeta'), H(\Lambda(\zeta)),\Lambda(\zeta'))=1\big\}$. In words, it defines all solutions which can be obtained by changing solely one drop-off fee.
\end{itemize}
We define two \textit{first improvement operators}:
\begin{itemize}
\item $f^\Pi(\zeta):\mathbb{N}^{|\mathcal{V}|}\times \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|} \to \mathcal{N}^\Pi(\zeta)$ scans the neighborhood $\mathcal{N}^\Pi(\zeta)$ and returns the first improving solution (i.e., with a higher fitness value) if it exists, $\zeta$ otherwise.
\item $f^\Lambda(\zeta):\mathbb{N}^{|\mathcal{V}|}\times \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|} \to \mathcal{N}^\Lambda(\zeta)$ scans the neighborhood $\mathcal{N}^\Lambda(\zeta)$ and returns the first improving solution (i.e., with a higher fitness value) if it exists, $\zeta$ otherwise.
\end{itemize}
Let $z(\zeta)$ be the fitness function defining how each solution is evaluated. It is defined as the objective function of the original problem \eqref{eq:1S:obj}.
For each solution $\zeta$ considered, the second-stage revenue is computed as illustrated in \Cref{sec:ls:subproblems}.
Given a solution $\zeta$, we define two types of local search
\begin{itemize}
\item $LS^\Pi(\zeta):\mathbb{N}^{|\mathcal{V}|}\times \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|}\to \mathcal{N}^\Pi(\zeta)$ which performs a local search on the $\mathcal{N}^\Pi(\zeta)$ neighborhood using the $f^\Pi(\zeta)$ first improvement operator
\item $LS^\Lambda(\zeta): \mathbb{N}^{|\mathcal{V}|}\times \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|}\to \mathcal{N}^\Lambda(\zeta)$ which performs a local search on the $\mathcal{N}^\Lambda(\zeta)$ neighborhood using the $f^\Lambda(\zeta)$ first improvement operator.
\end{itemize}
\Cref{alg:LS} sketches the local search procedures.
\begin{algorithm}[h]
\caption{Local Search}
\label{alg:LS}
\begin{algorithmic}[1]
\STATE INPUT: $\zeta$, Operator $f(\zeta)$ to use (i.e., $f^\Pi(\zeta)$ or $f^\Lambda(\zeta)$).
\STATE INPUT: \texttt{TIMELIMIT}
\STATE \texttt{FOUND}=\texttt{TRUE}
\STATE $\zeta^{CURRENT}=\zeta$
\WHILE{\textit{FOUND} AND \texttt{ELAPSEDTIME}$\leq$\texttt{TIMELIMIT}}
\STATE $\zeta^N\gets f(\zeta^{CURRENT})$
\IF{$z(\zeta^N)>z(\zeta^{CURRENT})$}
\STATE $\zeta^{CURRENT}\gets \zeta^N$
\ELSE
\STATE \texttt{FOUND}=\texttt{FALSE}
\ENDIF
\ENDWHILE
\RETURN $\zeta^{CURRENT}$
\end{algorithmic}
\end{algorithm}
Finally, we define a function $P(\zeta, R): \mathbb{N}^{|\mathcal{V}|}\times \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|}\to\mathbb{N}^{|\mathcal{V}|}\times \mathbb{N}^{|\mathcal{I}|\times|\mathcal{I}|}$ that randomly re-assigns $R$\% of the positions and $R$\% of the fees. The entire Iterated Local Search is sketched in \Cref{alg:ILS}.
\begin{algorithm}[h]
\caption{Iterated Local Search}
\label{alg:ILS}
\begin{algorithmic}[1]
\STATE INPUT: \texttt{MAXRESTARTSWITHOUTIMPROVEWMENT}, \texttt{TIMELIMIT}, $R$
\STATE \texttt{\#RESTARTSWITHOUTIMPROVEWMENT}$\gets 0$
\STATE $\zeta^{CURRENT}\gets$ random solution
\STATE $\zeta^{BEST}\gets\zeta^{CURRENT}$
\WHILE{\texttt{\#RESTARTSWITHOUTIMPROVEWMENT}$\leq$\texttt{MAXRESTARTSWITHOUTIMPROVEWMENT} AND \texttt{ELAPSEDTIME}$\leq$\texttt{TIMELIMIT}}
\STATE $\zeta^{N}\gets LS^\Pi(LS^\Lambda(\zeta^{CURRENT}))$
\IF{$z(\zeta^N)<z(\zeta^{CURRENT})$}
\STATE \texttt{\#RESTARTSWITHOUTIMPROVEWMENT}$\gets$ \texttt{\#RESTARTSWITHOUTIMPROVEWMENT}$+1$
\ELSE
\STATE \texttt{\#RESTARTSWITHOUTIMPROVEWMENT}$\gets 0$
\ENDIF
\IF{$z(\zeta^N)>z(\zeta^{BEST})$}
\STATE $\zeta^{BEST}\gets \zeta^N$
\ENDIF
\STATE $\zeta^{CURRENT}\gets P(\zeta^N,R)$
\ENDWHILE
\RETURN $\zeta^{BEST}$
\end{algorithmic}
\end{algorithm}
In \Cref{alg:ILS} we set \texttt{MAXRESTARTSWITHOUTIMPROVEWMENT} to $3$, \texttt{TIMELIMIT} to $1800$ seconds, and $R$ to $30$\%.
\Cref{tab:ILS02,tab:ILS08} report the optimality gap and solution time of the ILS compared to that of the L-Shaped method on all instances with identical customer profiles.
The optimality gap of the ILS is calculated using the bound delivered by the L-Shaped method as
$$\texttt{gap}=\frac{|\texttt{ILSOBJECTIVE}-\texttt{LSBESTBOUND}|}{|ILSOBJECTIVE|+10^{-10}}$$
The tables show that, for the smallest instances, the performance of the ILS is comparable to that of the L-Shaped method. On a number of instances (e.g., with $|\mathcal{V}|=50$) the ILS even delivers better primal solutions than the L-Shaped method. Nevertheless, as the size of the instances increases the performance of the ILS drops.
\begin{longtable}{rrrr|rrrr}
\caption{Comparison of optimality gap and solution time obtained with ILS and the LS method on the instances with identical customer profiles and with $\alpha^V=0.2$.}\label{tab:ILS02}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gapLS} & \texttt{gapILS} & \texttt{tLS} & \texttt{tILS} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.6967 & 1.2615 & 1800.20 & 1801.00 \\
50 & 400 & 0.2 & 0.2 & 17.2850 & 14.4768 & 1800.52 & 1800.13 \\
50 & 600 & 0.2 & 0.2 & 30.5015 & 21.5516 & 1809.94 & 1800.05 \\
100 & 200 & 0.2 & 0.2 & 0.0975 & 0.3102 & 1800.98 & 1804.39 \\
100 & 400 & 0.2 & 0.2 & 1.9480 & 3.9251 & 1805.67 & 1802.78 \\
100 & 600 & 0.2 & 0.2 & 11.6208 & 44.7249 & 1801.95 & 1811.11 \\
200 & 400 & 0.2 & 0.2 & 0.0000 & 276.9414 & 139.41 & 1810.90 \\
200 & 600 & 0.2 & 0.2 & 0.0777 & 456.4824 & 1800.78 & 1812.87 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.3381 & 0.7718 & 1800.63 & 1800.57 \\
50 & 400 & 0.2 & 0.8 & 9.6397 & 8.5835 & 1801.21 & 1801.20 \\
50 & 600 & 0.2 & 0.8 & 19.5455 & 15.2189 & 1800.03 & 1801.78 \\
100 & 200 & 0.2 & 0.8 & 0.2815 & 0.2815 & 1800.13 & 1801.84 \\
100 & 400 & 0.2 & 0.8 & 0.7148 & 3.6945 & 1801.47 & 1801.23 \\
100 & 600 & 0.2 & 0.8 & 9.8896 & 38.4997 & 1812.93 & 1807.95 \\
200 & 400 & 0.2 & 0.8 & 0.0000 & 258.6824 & 115.74 & 1803.26 \\
200 & 600 & 0.2 & 0.8 & 0.4710 & 421.6527 & 1814.99 & 1810.50 \\
\midrule
50 & 200 & 0.8 & 0.2 & 1.9850 & 3.9133 & 1800.13 & 1800.40 \\
50 & 400 & 0.8 & 0.2 & 21.8829 & 17.4800 & 1800.02 & 1801.26 \\
50 & 600 & 0.8 & 0.2 & 44.9080 & 33.0970 & 1803.83 & 1801.01 \\
100 & 200 & 0.8 & 0.2 & 0.5238 & 1.3703 & 1801.09 & 1801.37 \\
100 & 400 & 0.8 & 0.2 & 6.1743 & 8.1636 & 1806.48 & 1800.58 \\
100 & 600 & 0.8 & 0.2 & 23.6427 & 52.5532 & 1816.20 & 1806.71 \\
200 & 400 & 0.8 & 0.2 & 0.3072 & 260.9089 & 1804.22 & 1817.48 \\
200 & 600 & 0.8 & 0.2 & 6.7474 & 386.4842 & 1865.22 & 1801.24 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.8509 & 0.9891 & 1800.15 & 1801.00 \\
50 & 400 & 0.8 & 0.8 & 15.4867 & 12.6303 & 1801.46 & 1801.19 \\
50 & 600 & 0.8 & 0.8 & 40.1722 & 28.5881 & 1807.37 & 1800.41 \\
100 & 200 & 0.8 & 0.8 & 0.7932 & 1.4929 & 1802.32 & 1800.77 \\
100 & 400 & 0.8 & 0.8 & 4.4619 & 5.6145 & 1803.98 & 1807.19 \\
100 & 600 & 0.8 & 0.8 & 19.1824 & 45.0015 & 1823.21 & 1806.86 \\
200 & 400 & 0.8 & 0.8 & 0.0963 & 242.2407 & 1800.78 & 1806.16 \\
200 & 600 & 0.8 & 0.8 & 5.6194 & 307.5339 & 1823.23 & 1811.13 \\
\bottomrule
\end{longtable}
\begin{longtable}{rrrr|rrrr}
\caption{Comparison of optimality gap and solution time obtained with ILS and the LS method on the instances with identical customer profiles and with $\alpha^V=0.8$.}\label{tab:ILS08}\\
\toprule
$|\mathcal{V}|$ & $|\mathcal{K}|$ & $\alpha^{FROM}$ & $\alpha^{TO}$ & \texttt{gapLS} & \texttt{gapILS} & \texttt{tLS} & \texttt{tILS} \\
\midrule
50 & 200 & 0.2 & 0.2 & 0.4613 & 1.2477 & 1800.37 & 1800.88 \\
50 & 400 & 0.2 & 0.2 & 11.6098 & 10.5114 & 1802.87 & 1800.99 \\
50 & 600 & 0.2 & 0.2 & 30.3469 & 23.8963 & 1807.97 & 1800.32 \\
100 & 200 & 0.2 & 0.2 & 0.1113 & 0.4159 & 1800.27 & 1801.25 \\
100 & 400 & 0.2 & 0.2 & 4.0199 & 6.9075 & 1800.15 & 1806.85 \\
100 & 600 & 0.2 & 0.2 & 14.6664 & 42.8427 & 1805.19 & 1813.64 \\
200 & 400 & 0.2 & 0.2 & 0.9991 & 235.4502 & 1810.37 & 1814.95 \\
200 & 600 & 0.2 & 0.2 & 4.5424 & 347.5242 & 1889.13 & 1817.40 \\
\midrule
50 & 200 & 0.2 & 0.8 & 0.8298 & 1.2781 & 1800.11 & 1800.27 \\
50 & 400 & 0.2 & 0.8 & 11.8754 & 8.8758 & 1801.42 & 1800.32 \\
50 & 600 & 0.2 & 0.8 & 24.6840 & 21.8355 & 1800.96 & 1801.07 \\
100 & 200 & 0.2 & 0.8 & 0.0291 & 1.0756 & 1800.47 & 1800.33 \\
100 & 400 & 0.2 & 0.8 & 2.1136 & 5.9566 & 1805.67 & 1800.81 \\
100 & 600 & 0.2 & 0.8 & 12.3031 & 36.9526 & 1817.99 & 1803.40 \\
200 & 400 & 0.2 & 0.8 & 0.2123 & 219.8087 & 1800.23 & 1811.94 \\
200 & 600 & 0.2 & 0.8 & 3.7770 & 332.4861 & 1833.46 & 1805.89 \\
\midrule
50 & 200 & 0.8 & 0.2 & 0.0389 & 0.3443 & 1800.08 & 1800.10 \\
50 & 400 & 0.8 & 0.2 & 13.3947 & 12.2721 & 1800.04 & 1801.02 \\
50 & 600 & 0.8 & 0.2 & 34.8704 & 26.9711 & 1803.88 & 1802.90 \\
100 & 200 & 0.8 & 0.2 & 0.0000 & 0.0000 & 26.88 & 1800.97 \\
100 & 400 & 0.8 & 0.2 & 0.2335 & 1.6940 & 1801.63 & 1802.90 \\
100 & 600 & 0.8 & 0.2 & 9.1181 & 55.6383 & 1801.40 & 1810.57 \\
200 & 400 & 0.8 & 0.2 & 0.0020 & 232.7449 & 205.30 & 1806.80 \\
200 & 600 & 0.8 & 0.2 & 0.0034 & 331.4251 & 786.63 & 1816.44 \\
\midrule
50 & 200 & 0.8 & 0.8 & 0.0083 & 0.2765 & 24.39 & 1800.52 \\
50 & 400 & 0.8 & 0.8 & 9.8202 & 8.4977 & 1801.95 & 1802.57 \\
50 & 600 & 0.8 & 0.8 & 30.9029 & 21.7163 & 1808.71 & 1800.03 \\
100 & 200 & 0.8 & 0.8 & 0.0000 & 0.0000 & 21.84 & 1800.08 \\
100 & 400 & 0.8 & 0.8 & 0.1667 & 1.3339 & 1801.88 & 1805.07 \\
100 & 600 & 0.8 & 0.8 & 6.1475 & 48.0809 & 1805.95 & 1809.60 \\
200 & 400 & 0.8 & 0.8 & 0.0000 & 224.0099 & 122.34 & 1800.48 \\
200 & 600 & 0.8 & 0.8 & 0.0294 & 285.5657 & 1802.96 & 1815.21 \\
\bottomrule
\end{longtable}
\end{document} |
\begin{document}
\title[Operator BMO spaces]
{Embeddings between operator-valued dyadic BMO spaces}
\author{Oscar Blasco}
\address{Department of Mathematics,
Universitat de Valencia, Burjassot 46100 (Valencia)
Spain}
\varepsilonmail{oscar.blasco@uv.es}
\author{Sandra Pott}
\address{Department of Mathematics, University of
Glasgow, Glasgow G12 8QW, UK}
\varepsilonmail{sp@maths.gla.ac.uk}
\keywords{Operator BMO, Carleson measures, paraproducts}
\thanks{{\it 2000 Mathematical Subjects Classifications.}
Primary 42B30, 42B35, Secondary 47B35 \\
The first author gratefully acknowledges support by the LMS and
Proyectos MTM 2005-08350 and PR2006-0086. The second author
gratefully acknowledges support by EPSRC}
\begin{abstract}
We investigate a scale of dyadic operator-valued BMO spaces,
corresponding to the different yet equivalent characterizations of
dyadic BMO in the scalar case. In the language of operator spaces,
we investigate different operator space structures on the scalar
dyadic BMO space which arise naturally from the different
characterisations of scalar BMO. We also give sharp
dimensional growth estimates for the sweep of functions and its bilinear extension in some of those
different dyadic BMO spaces.
\varepsilonnd{abstract}
\maketitle
\section{Introduction}
Let
$\mathrm{Mat}(\C, n \times n)hcal{D}$ denote the collection of dyadic subintervals
of the unit circle $\mathrm{Mat}(\C, n \times n)hbb{T}$, and let $(h_I)_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}$,
where $h_I = \varphirac{1}{|I|^{1/2}} ( \chi_{I^+} - \chi_{I^-})$,
be the Haar basis of $L^2(\mathrm{Mat}(\C, n \times n)hbb{T})$.
For $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$ and $\mathrm{Mat}(\C, n \times n)hcal{P}hi\in L^2(\mathrm{Mat}(\C, n \times n)hbb{T})$, let $\mathrm{Mat}(\C, n \times n)hcal{P}hi_I$ denote the formal Haar
coefficient
$\int_I \mathrm{Mat}(\C, n \times n)hcal{P}hi(t) h_I dt$, and $m_I \mathrm{Mat}(\C, n \times n)hcal{P}hi = \varphirac{1}{|I|} \int_I \mathrm{Mat}(\C, n \times n)hcal{P}hi(t) dt$
denote the
average of $\mathrm{Mat}(\C, n \times n)hcal{P}hi$ over $I$. We write $P_I(\mathrm{Mat}(\C, n \times n)hcal{P}hi)=\sum_{J\subseteq I}
\mathrm{Mat}(\C, n \times n)hcal{P}hi_Jh_J$.
We say that $\mathrm{Mat}(\C, n \times n)hcal{P}hi\in L^2(\mathrm{Mat}(\C, n \times n)hbb{T})$ belongs to dyadic BMO, written
$\mathrm{Mat}(\C, n \times n)hcal{P}hi\in {\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T})$, if
\begin{equation}\lambdabel{bmo1}
\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}( \varphirac{1}{|I|} \int_I | \mathrm{Mat}(\C, n \times n)hcal{P}hi(t) - m_I \mathrm{Mat}(\C, n \times n)hcal{P}hi |^2 dt)^{1/2}
< \infty.\varepsilonnd{equation}
Using the identity $P_I(\mathrm{Mat}(\C, n \times n)hcal{P}hi)= (\mathrm{Mat}(\C, n \times n)hcal{P}hi- m_I\mathrm{Mat}(\C, n \times n)hcal{P}hi)\chi_I$, this can
also be written as
\begin{equation}\lambdabel{bmo2}
\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{1}{|I|^{1/2}} \|P_I( \mathrm{Mat}(\C, n \times n)hcal{P}hi) \|_{L^2}
< \infty,
\varepsilonnd{equation}
or
\begin{equation} \lambdabel{bmo3}
\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{1}{|I|} \sum_{J \in \mathrm{Mat}(\C, n \times n)hcal{D}, J \subseteq I}
| \mathrm{Mat}(\C, n \times n)hcal{P}hi_J |^2
< \infty.
\varepsilonnd{equation}
Due to John-Nirenberg's lemma, we have, for $0< p < \infty$, that
$\mathrm{Mat}(\C, n \times n)hcal{P}hi\in {\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T})$ if and only if
\begin{equation}\lambdabel{bmo}
\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}( \varphirac{1}{|I|} \int_I | \mathrm{Mat}(\C, n \times n)hcal{P}hi(t) - m_I \mathrm{Mat}(\C, n \times n)hcal{P}hi |^p
dt)^{1/p}= \sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{1}{|I|^{1/p}} \|P_I(
\mathrm{Mat}(\C, n \times n)hcal{P}hi)\|_{L^p} < \infty.\varepsilonnd{equation}
It is well-known that the space ${\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T})$ has the following equivalent
formulation in terms of boundedness of dyadic paraproducts: The map
\begin{equation} \lambdabel{bmo4}
\mathrm{Mat}(\C, n \times n)hcal{P}i_\mathrm{Mat}(\C, n \times n)hcal{P}hi: L^2(\mathrm{Mat}(\C, n \times n)hbb{T}) \rightarrow L^2(\mathrm{Mat}(\C, n \times n)hbb{T}), \quad f = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}
f_I h_I\mapsto \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \mathrm{Mat}(\C, n \times n)hcal{P}hi_I (m_I f) h_I
\varepsilonnd{equation}
defines a bounded linear operator on $L^2(\mathrm{Mat}(\C, n \times n)hbb{T})$, if and only if
$\mathrm{Mat}(\C, n \times n)hcal{P}hi\in {\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T})$.
For real-valued functions, we can also
replace the boundedness of the dyadic paraproduct
$\mathrm{Mat}(\C, n \times n)hcal{P}i_\mathrm{Mat}(\C, n \times n)hcal{P}hi$ by the boundedness of its
adjoint operator
\begin{equation} \lambdabel{adjpara}
\mathrm{Mat}(\C, n \times n)hbb{D}elta_\mathrm{Mat}(\C, n \times n)hcal{P}hi: L^2(\mathrm{Mat}(\C, n \times n)hbb{T}) \rightarrow L^2(\mathrm{Mat}(\C, n \times n)hbb{T}), \quad f = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}
f_I h_I\mapsto
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \mathrm{Mat}(\C, n \times n)hcal{P}hi_I f_I
\varphirac{\chi_I}{|I|}.
\varepsilonnd{equation}
Another equivalent formulation comes from the duality
\begin{equation} \lambdabel{h1dual}
{\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T})=(H_d^1(\mathrm{Mat}(\C, n \times n)hbb{T}))^*,
\varepsilonnd{equation}
where the dyadic Hardy space $H_d^1(\mathrm{Mat}(\C, n \times n)hbb{T})$ consists
of those functions $\mathrm{Mat}(\C, n \times n)hcal{P}hi\in L^1(\mathrm{Mat}(\C, n \times n)hbb{T})$ for which the dyadic square function $\mathrm{Mat}(\C, n \times n)hcal{S}
\mathrm{Mat}(\C, n \times n)hcal{P}hi = (\sum_{I\in
\mathrm{Mat}(\C, n \times n)hcal{D}}|\mathrm{Mat}(\C, n \times n)hcal{P}hi_I|^2\varphirac{\chi_I}{|I|})^{1/2}$ is also in $L^1(\mathrm{Mat}(\C, n \times n)hbb{T})$.
Let us recall that
$H_d^1(\mathrm{Mat}(\C, n \times n)hbb{T})$ can also be described in terms of dyadic atoms. That is, $H_d^1(\mathrm{Mat}(\C, n \times n)hbb{T})$ consists
of functions $\mathrm{Mat}(\C, n \times n)hcal{P}hi= \sum_{k \in
\mathrm{Mat}(\C, n \times n)hbb{N}} \lambdambda_k a_k,
\lambdambda_k \in \mathrm{Mat}(\C, n \times n)hbb{C}$,
$\sum_{k \in \mathrm{Mat}(\C, n \times n)hbb{N}} | \lambdambda_k| < \infty $, where the $a_k$
are dyadic atoms, i.e. $\operatorname{supp} (a_k)\subset I_k$ for
some $I_k\in \mathrm{Mat}(\C, n \times n)hcal{D}$, $\int_{I_k} a_k(t)dt=0$, and
$\|a_k\|_\infty\le\varphirac{1}{|I_k|}.$
The reader is referred to \cite{M} or to \cite{G} for standard results about
$H^1_d$ and $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}$.
Let
$$S_\mathrm{Mat}(\C, n \times n)hcal{P}hi= (\mathrm{Mat}(\C, n \times n)hcal{S}\mathrm{Mat}(\C, n \times n)hcal{P}hi)^2=\sum_{I\in
\mathrm{Mat}(\C, n \times n)hcal{D}}|\mathrm{Mat}(\C, n \times n)hcal{P}hi_I|^2\varphirac{\chi_I}{|I|}$$
denote the sweep of the function
$\mathrm{Mat}(\C, n \times n)hcal{P}hi$. Using John-Nirenberg's
lemma, one easily verifies the well-known fact that
\begin{equation} \lambdabel{sweep}
\mathrm{Mat}(\C, n \times n)hcal{P}hi\in {\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T}) \mathrm{Mat}(\C, n \times n)hcal{H}box{ if and only if } S_\mathrm{Mat}(\C, n \times n)hcal{P}hi \in {\rm
BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T}).
\varepsilonnd{equation}
The reader is referred to \cite{blasco4} for a proof of
(\operatorname{Re}f{sweep}) independent of John-Nirenberg's lemma.
The aim of this paper is twofold. Firstly, it is to investigate the spaces of
operator-valued BMO functions corresponding to characterizations
(\operatorname{Re}f{bmo1})-(\operatorname{Re}f{h1dual}). In the operator-valued case, these
characterizations are in general no longer equivalent. In the
language of operator spaces, we investigate the different operator
space structures on the scalar space $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}$ which arise
naturally from the different yet equivalent characterisations of
$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}$. The reader is referred to \cite{BlascoArg,BP,new,psm}
for some recent results on dyadic BMO and Besov spaces connected
to the ones in this paper. The second aim is to give sharp dimensional estimate
for the operator sweep and its bilinear extension, of which more will be said below,
in these operator $\operatorname{{\mathrm{BMO}}}^d$ norms.
We require some further notation for the operator-valued case.
Let $\mathrm{Mat}(\C, n \times n)hcal{H}$ be a separable, finite or infinite-dimensional Hilbert space.
Let $\operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}$ denote the subspace of
$\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$-valued functions
on $\mathrm{Mat}(\C, n \times n)hbb{T}$
with finite formal Haar expansion.
Given $e,f\in \mathrm{Mat}(\C, n \times n)hcal{H} $ and $B \in L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ we denote by $B_e$ the
function in $L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$ defined by $B_e(t)= B(t)(e)$
and by
$B_{e,f}$ the function in $L^2(\mathrm{Mat}(\C, n \times n)hbb{T})$ defined by $B_{e,f}(t)=
\lambdangle B(t)(e),f\rangle$. As in the scalar case,
let $B_I$ denote the formal Haar coefficients
$\int_I B(t) h_I dt$, and $m_I B = \varphirac{1}{|I|} \int_I B(t) dt$
denote the average of $B$ over $I$ for any $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$. Observe
that for $B_I$ and $m_IB$ to be well-defined operators, we shall
be assuming that the $\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$- valued function $B$ is
$weak^*$-integrable. That means, using the duality
$\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})=(\mathrm{Mat}(\C, n \times n)hcal{H}\mathrm{Mat}(\C, n \times n)hcal{H}at\otimes \mathrm{Mat}(\C, n \times n)hcal{H})^*$, that $\lambdangle
B(\cdot)(e),f\rangle\in L^1(\mathrm{Mat}(\C, n \times n)hbb{T})$ for $e,f\in \mathrm{Mat}(\C, n \times n)hcal{H} $ and for any
measurable set $A$, there exist $B_A\in \mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$ such that
$\lambdangle B_A(e),f\rangle=\lambdangle\int_A B(t)(e) dt, f\rangle $ for $e,f\in \mathrm{Mat}(\C, n \times n)hcal{H} $.
We denote by $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$ the space of Bochner integrable
$\mathrm{Mat}(\C, n \times n)hcal{H}$-valued functions $b: \mathrm{Mat}(\C, n \times n)hbb{T} \rightarrow \mathrm{Mat}(\C, n \times n)hcal{H}$ such that
\begin{equation}
\|b\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}}=\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} (\varphirac{1}{|I|} \int_I \| b(t) - m_I b\|^2
dt)^{1/2}<\infty
\varepsilonnd{equation}
and by $\rm wBMO^d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$ the space of Pettis integrable
$\mathrm{Mat}(\C, n \times n)hcal{H}$-valued functions $b: \mathrm{Mat}(\C, n \times n)hbb{T} \rightarrow \mathrm{Mat}(\C, n \times n)hcal{H}$ such that
\begin{equation}
\|b\|_{\rm wBMO^d}=\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}, e \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|e\|=1} (\varphirac{1}{|I|} \int_I |\lambdangle b(t) - m_I b,
e \rangle|^2 dt)^{1/2}<\infty.
\varepsilonnd{equation}
In the operator-valued case we define the following notions
corresponding to the previous formulations: We denote by
$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ the space of Bochner integrable
$\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$-valued functions $B$ such that
\begin{equation} \lambdabel{bmond}
\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d}=\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} (\varphirac{1}{|I|} \int_I \| B(t) - m_I B
\|^2 dt)^{1/2}<\infty,
\varepsilonnd{equation}
by ${\rm SBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ the space of $\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$-valued
functions $B$ such that $B_e\in {\rm BMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$ for all
$e\in\mathrm{Mat}(\C, n \times n)hcal{H}$ and
\begin{equation}\lambdabel{sbmo}
\|B\|_{{\rm SBMO^d}}= \sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D},e \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|e \|=1}
(\varphirac{1}{|I|} \int_I \| (B(t) - m_I B)e \|^2
dt)^{1/2}< \infty,
\varepsilonnd{equation}
and, finally, by ${\rm WBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ the space of
$weak^*$-integrable $\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$-valued functions $B$ such that
$B_{e,f}\in {\rm BMO^d}$ for all $e,f\in \mathrm{Mat}(\C, n \times n)hcal{H}$ and
\begin{multline}\lambdabel{wbmo}
\|B\|_{{\rm WBMO^d}}=\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}, \|e \|=\|f \|=1}
(\varphirac{1}{|I|} \int_I | \lambdangle(B(t) - m_I B)e,f\rangle
|^2 dt)^{1/2} <\infty,
\varepsilonnd{multline}
or, equivalently, such that
$$
\|B\|_{{\rm WBMO^d}}= \sup_{e \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|e \|=1} \|B_e\|_{\rm
wBMO^d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})} =\sup_{A \in S_1, \| A \|_1 \le 1 } \| \lambdangle B,
A \rangle \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T})} < \infty.
$$
Here, $S_1$ denotes the ideal of trace class operators in $\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$,
and $\lambdangle B, A \rangle$ stands for the scalar-valued function
given by $\lambdangle B, A \rangle (t) = \operatorname{trace}( B(t) A^*)$.
The space $ \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d (\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ is the space of
$weak^*$-integrable operator-valued functions for which
\begin{equation} \lambdabel{def:bmocd}
\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d}=\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} (\varphirac{1}{|I|}
\sum_{J \in \mathrm{Mat}(\C, n \times n)hcal{D}, J \subseteq I} \| B_J \|^2 )^{1/2} < \infty.
\varepsilonnd{equation}
We would like to point out that while $B$ belongs to one of the
spaces $ \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})),{\rm WBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$) or
$B\in\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ if and only if $B^*$ does, this is not
the case for the space $\mathrm{Mat}(\C, n \times n)hrm{SBMO}^d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. This leads
to the following notion
(see \cite{gptv2, petermichl, pxu}): We say that
$B\in\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$, \lambdabel{bmosd} if
$B$ and $B^*$ belong to $ {\rm SBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
We define \begin{equation} \lambdabel{def:bmoso}\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d}=
\|B\|_{{\rm SBMO^d}}+\|B^*\|_{{\rm SBMO^d}}.\varepsilonnd{equation}
We now define another operator-valued BMO space, using the notion of Haar
multipliers.
A sequence $(\Phi_I)_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}$, $\Phi_I\in L^2(I,\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$
for all $I\in \mathrm{Mat}(\C, n \times n)hcal{D}$, is said to be an \varepsilonmph{operator-valued Haar
multiplier} (see \cite{per, BP}), if there exists $C>0$ such that
$$\|\sum_{I\in \mathrm{Mat}(\C, n \times n)hcal{D}}\Phi_I(f_I)h_I\|_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})}\le C (\sum_{I\in
\mathrm{Mat}(\C, n \times n)hcal{D}}\|f_I\|^2)^{1/2} \text{ for all }
(f_I)_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \in l^2(\mathrm{Mat}(\C, n \times n)hcal{D},\mathrm{Mat}(\C, n \times n)hcal{H}).$$
We write $\|(\Phi_I)\|_{mult}$ for the norm of the corresponding operator
on
$L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$.
Letting again
as in the scalar-valued case
$P_I B =\sum_{J\subseteq I} h_JB_J$,
we denote the space of those $weak^*$-integrable
$\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$-valued functions for
which $(P_IB)_{I\in\mathrm{Mat}(\C, n \times n)hcal{D}}$ defines a bounded operator-valued Haar multiplier on $L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{H})$
by
$ \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}} (\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ and write
\begin{equation}\lambdabel{bmol}\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}=
\|(P_IB)_{I\in\mathrm{Mat}(\C, n \times n)hcal{D}}\|_{mult}.
\varepsilonnd{equation}
We shall use the notation $\Lambda_B(f)=\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} (P_I B) (f_I) h_I$.
Let us mention that there is a further BMO space, defined in terms
of paraproducts, which is very much connected with
$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ and was studied in detail in \cite{new}.
Operator-valued paraproducts are of particular interest, because
they can be seen as dyadic versions of vector Hankel operators or
of vector Carleson embeddings, which are important in the real and
complex analysis of matrix valued functions and its applications in the theory
of infinite-dimensional linear systems (see e.g.~\cite{jp}, \cite{jpp1}).
Let $B \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}$. We define the dyadic operator-valued paraproduct
with symbol $B$,
$$
\mathrm{Mat}(\C, n \times n)hcal{P}i_B: L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{H}) \rightarrow L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{H}), \quad f = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}
f_I h_I\mapsto
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} B_I (m_I f) h_I,
$$
and
$$
\mathrm{Mat}(\C, n \times n)hbb{D}elta_B: L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{H}) \rightarrow L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{H}), \quad f = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}
f_I h_I\mapsto
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} B_I (f_I)
\varphirac{\chi_I}{|I|}.
$$
It is easily seen that $(\mathrm{Mat}(\C, n \times n)hcal{P}i_B)^* = \mathrm{Mat}(\C, n \times n)hbb{D}elta_{B^*}$.
We denote by $ \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}} (\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ the space of
$weak^*$-integrable operator-valued functions for which
$\|\mathrm{Mat}(\C, n \times n)hcal{P}i_{B}\| < \infty$
and write \begin{equation}
\lambdabel{bmop}\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}}= \|\mathrm{Mat}(\C, n \times n)hcal{P}i_B\|.
\varepsilonnd{equation}
We refer the reader to \cite{BlascoArg, new} and \cite{mei1, mei}
for results on this space.
It is elementary to see that
\begin{equation} \lambdabel{mult}
\Lambda_B( f )=
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} B_I (m_I f) h_I
+ \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} B_I (f_I) \varphirac{\chi_I}{|I|} = \mathrm{Mat}(\C, n \times n)hcal{P}i_B f + \mathrm{Mat}(\C, n \times n)hbb{D}elta_B f.
\varepsilonnd{equation}
Hence $\Lambda_B=\mathrm{Mat}(\C, n \times n)hcal{P}i_B+\mathrm{Mat}(\C, n \times n)hbb{D}elta_B$ and
$(\Lambda_B)^*=\Lambda_{B^*}$. This shows that
$\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}=\|B^*\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}$.
Let us finally
denote by ${\rm BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ the space of symbols $B$
such that $\mathrm{Mat}(\C, n \times n)hcal{P}i_{B}$ and $\mathrm{Mat}(\C, n \times n)hcal{P}i_{B^*}$ are bounded operators, and
define
\begin{equation}
\lambdabel{bmosp}\|B\|_{\rm BMO_{spara}}= \|\mathrm{Mat}(\C, n \times n)hcal{P}i_B\|+\|\mathrm{Mat}(\C, n \times n)hcal{P}i_{B^*}\|.
\varepsilonnd{equation}
Since $\mathrm{Mat}(\C, n \times n)hbb{D}elta_B=\mathrm{Mat}(\C, n \times n)hcal{P}i^*_{B^*}$, one concludes that ${\rm
BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subseteq \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}
(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
We write $\approx$ for equivalence of norms up to a constant (independent of the
dimension of the Hilbert space $\mathrm{Mat}(\C, n \times n)hcal{H}$, if this appears), and similarly $\lesssim, \gtrsim$ for
the corresponding one-sided estimates up to a constant.
Recall that for a given Banach space $(X, \| \cdot \|)$, a family
of norms $( M_n(X), \| \cdot\|_n)$ on the spaces $M_n(X)$ of
$X$-valued $n \times n$ matrices defines an \varepsilonmph{operator space
structure} on $X$, if $\|\cdot\|_1 \approx \|\cdot\|$,
\begin{enumerate}
\item[(M1)] $\| A \oplus B \|_{n +m} = \max \{ \|A\|_n, \|B\|_m \}$ for $A \in M_n(X)$,
$B \in M_m(X)$
\item[(M2)] $ \|\alpha A \beta \|_{m} \le \|\alpha \|_{M_{n,m}(\mathrm{Mat}(\C, n \times n)hbb{C})}
\| A \|_n \|\beta\|_{M_{m,n}(\mathrm{Mat}(\C, n \times n)hbb{C})} $ for all $A \in M_n(X)$
and all scalar matrices $\alpha \in M_{n,m}(\mathrm{Mat}(\C, n \times n)hbb{C})$, $\beta \in M_{m,n}(\mathrm{Mat}(\C, n \times n)hbb{C})$.
\varepsilonnd{enumerate}
(see e.~g.~\cite{effros}). One verifies easily that all the $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}$-norms
on $\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$-valued functions defined above,
except $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d$ and $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d$, define operator space
structures on $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T})$ when taken for n-dimensional $\mathrm{Mat}(\C, n \times n)hcal{H}$, $n \in \mathrm{Mat}(\C, n \times n)hbb{N}$.
The aim of the paper is to show the following strict inclusions for infinite-dimensional
$\mathrm{Mat}(\C, n \times n)hcal{H}$:
\begin{multline} \lambdabel{eq:inclchain}
\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))
\subsetneq \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subsetneq\\ \subsetneq {\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))
\subsetneq
{\rm WBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))
\varepsilonnd{multline}
and
\begin{equation} \lambdabel{carles}
\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \subsetneq {\rm
BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subsetneq
\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})).\varepsilonnd{equation}
This means that the
corresponding inclusions of operator spaces over $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T})$, where they apply, are
completely bounded, but not completely isomorphic (for the
notation, see again e.~g.~\cite{effros}). We will also consider the preduals for some of the spaces
shown. Finally, we will give sharp estimates for the dimensional growth of the sweep and its bilinear
extension on $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}$, $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}$ and $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d$, completing results in \cite{new} and \cite{mei}.
The paper is organized as follows. In Section 2, we prove the chains
of strict inclusions (\operatorname{Re}f{eq:inclchain}) and (\operatorname{Re}f{carles}).
Actually the only nontrivial inclusion to be shown is $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))
\subset \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
For this purpose, we introduce a new Hardy space $H^1_\Lambda$ adapted to the problem, and then the result
can be shown from an estimate on the dual side. The remaining inclusions are
immediate consequences of the definition, and only the
counterexamples showing that none of the spaces are equal need to be found.
The reader is referred to \cite{mei1} for more on the theory of
operator-valued Hardy spaces.
Section 3 deals with dimensional growth properties of the \varepsilonmph{operator sweep}
and its bilinear extension. We define the operator sweep for $B \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}$,
$$
S_B = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{\chi_I}{|I|} B_I^* B_I,
$$
and its bilinear extension
$$
\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]= \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{\chi_I}{|I|} U_I^* V_I \qquad (U,V \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}).
$$
These maps are of interest for several reasons. They are closely connected with
the paraproduct and certain bilinear paraproducts,
they provide a tool to understand the dimensional growth in the John-Nirenberg lemma,
and they are useful to understand
products of paraproducts and products of certain other operators (see \cite{new}, \cite{psm}).
Considering (\operatorname{Re}f{sweep}) in the operator valued case, it was shown
in \cite{new} that
\begin{equation}\lambdabel{normsweep} \|S_B\|_{\rm
BMO^d_{mult}}+\|B\|^2_{\rm SBMO^d}\approx \|B\|^2_{\rm
BMO^d_{para}}.
\varepsilonnd{equation}
Here, we prove the bilinear analogue
\begin{equation}\lambdabel{normbisweep} \|\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]\|_{\rm
BMO^d_{mult}}+\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{1}{|I|} \|\sum_{J \subset I} U_J^* V_J\|
\approx \|\mathrm{Mat}(\C, n \times n)hcal{P}i_U^* \mathrm{Mat}(\C, n \times n)hcal{P}i_V\|.
\varepsilonnd{equation}
It was also shown in \cite{new} that
\begin{equation}\lambdabel{estsweep}
\|S_B\|_{\rm SBMO^d}\le C \log(n+1) \|B\|^2_{\rm SBMO^d}
\varepsilonnd{equation}
for $dim(\mathrm{Mat}(\C, n \times n)hcal{H})=n$, where $C$ is a constant independent of $n$, and that this estimate is sharp.
We extend this by proving sharp estimates
of $\|S_B\|$ and $\|\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]\|$ in terms of $\|B\|, \|U\|, \|V\|$ with respect to the
norms in ${\rm SBMO}^d$, $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}$, $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}$ and $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d$.
\section{Strict inclusions}
Let us start by stating the following characterizations of
$\mathrm{Mat}(\C, n \times n)hrm{SBMO}$ to be used later on. Some of the
equivalences can be found in \cite{gptv2}, we give the proof for
the convenience of the reader.
\begin{prop}\lambdabel{carbmoso} Let $B\in{\rm
SBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. Then
\begin{eqnarray*}\|B\|^2_{{\rm SBMO^d}} &=& \sup_{e \in \mathrm{Mat}(\C, n \times n)hcal{H},
\|e
\|=1}
\|B_e\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H}) }\\
&=&\displaystyle\sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D},\|e\|=1}\varphirac{1}{|I|}\|P_I(
B_e)\|^2_{L^2(\mathrm{Mat}(\C, n \times n)hcal{H})} \\
&=&\displaystyle\sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D}}\varphirac{1}{|I|}\|\sum_{J\subseteq I}
B_J^* B_J\| \\\
&=&\displaystyle
\sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}\left\|
\varphirac{1}{|I|} \int_I (B(t) - m_I B)^* (B(t) - m_I B) dt
\right\|\\
&=&\sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D}} \|m_I(B^*B)-m_I(B^*)m_I(B)\|.
\varepsilonnd{eqnarray*}
\varepsilonnd{prop}
\mathrm{Mat}(\C, n \times n)hcal{P}roof The two first equalities are obvious from the definition.
Now observe that
$$\|\sum_{J\subseteq I} B_J^*
B_J\|=\sup_{\|e\|=1,
\|f\|=1}\sum_{J\subseteq I} \lambdangle B_J(e),
B_J(f)\rangle=\sup_{\|e\|=1}\sum_{J\subseteq I} \| B_J(e)
\|^2=\|P_I(B_e)\|^2_{L^2(\mathrm{Mat}(\C, n \times n)hcal{H})}.$$
The other equalities follow from
\begin{eqnarray*}
\|m_I(B^*B)-m_I(B^*)m_I(B)\| &=& \left\|\varphirac{1}{|I|} \int_I
(B(t)-m_I B)^* (B(t) - m_IB) dt \right\| \\
&=& \sup_{e \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|e\|=1 }\varphirac{1}{|I|} \int_I \lambdangle
(B(t)-m_I B)^* (B(t) - m_IB)e,e \rangle dt \\
&=&
\sup_{e \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|e\|=1 }\varphirac{1}{|I|} \int_I \| P_I B e\|^2 dt.
\varepsilonnd{eqnarray*}
\qed
\begin{lemm} Let $B=\sum_{k=1}^N B_k r_k$ where $ r_k=\sum_{|I|=2^{-k}}|I|^{1/2} h_I$ denote the Rademacher
functions. Then
\begin{equation}\lambdabel{n1}
\|B\|_{\operatorname{{\mathrm{SBMO^d}}}}=
\sup_{\|e\|=1}(\sum_{k=1}^N \|B_k e\|^2)^{1/2}
\varepsilonnd{equation}
\begin{equation}\lambdabel{n2}
\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}}=
\sup_{\|e\|=1}(\sum_{k=1}^N \|B_k
e\|^2)^{1/2}+\sup_{\|e\|=1}(\sum_{k=1}^N\|B^*_k e\|^2)^{1/2}
\varepsilonnd{equation}
\begin{equation}\lambdabel{n3}
\|B\|_{\mathrm{Mat}(\C, n \times n)hrm{WBMO}^d}= \sup_{\|f\|=\|e\|=1}
(\sum_{k=1}^N |\lambdangle B_k e,f\rangle|^2
)^{1/2}.
\varepsilonnd{equation}
\varepsilonnd{lemm}
\mathrm{Mat}(\C, n \times n)hcal{P}roof This follows from standard Littlewood-Paley theory.
\qed
For $x,y\in \mathrm{Mat}(\C, n \times n)hcal{H}$ we denote by $x\otimes y$ the rank 1 operator in
$\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$ given by $(x\otimes y)(h)=\lambdangle h,y\rangle x$. Clearly
$(x\otimes y)^*=(y\otimes x)$.
\begin{prop} \lambdabel{firstinc} Let $\dim \mathrm{Mat}(\C, n \times n)hcal{H} = \infty$. Then
$$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}} \subsetneq\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \subsetneq \operatorname{{\mathrm{SBMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subsetneq {\rm
WBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})).$$
\varepsilonnd{prop}
\mathrm{Mat}(\C, n \times n)hcal{P}roof Note that if $(\Phi_I)_{I\in\mathrm{Mat}(\C, n \times n)hcal{D}}$ is a Haar multiplier then
\begin{equation}\lambdabel{debilmult}
\sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D}, \|e\|=1} |I|^{-1/2}
\|\Phi_I(e)\|_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})}\le \|(\Phi_I)\|_{mult}.
\varepsilonnd{equation}
The first inclusion thus follows from (\operatorname{Re}f{debilmult}) and Proposition
\operatorname{Re}f{carbmoso}. The other inclusions are immediate.
Let us see that they are strict. It was shown in \cite{gptv2} that $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \neq
\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
Let $(e_k)$ is an orthonormal basis of $\mathrm{Mat}(\C, n \times n)hcal{H}$ and
$h\in \mathrm{Mat}(\C, n \times n)hcal{H}$ with $\|h\|=1$. Hence by (\operatorname{Re}f{n1}),
$B=\sum_{k=1}^\infty h\otimes e_k
\ r_k\in \operatorname{{\mathrm{SBMO^d}}} $ and $B^*=\sum_{k=1}^\infty e_k\otimes h \
r_k\notin\operatorname{{\mathrm{SBMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
Thus $B\in\operatorname{{\mathrm{SBMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \setminus \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. Similarly by (\operatorname{Re}f{n1}) and (\operatorname{Re}f{n3}), $B\in
{\rm
WBMO^d}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\setminus \operatorname{{\mathrm{SBMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. \qed
Note that
\begin{equation}\lambdabel{form}
\Lambda_B f= B f -\sum_{I\in \mathrm{Mat}(\C, n \times n)hcal{D}}(m_IB)(f_I) h_I
\varepsilonnd{equation}
which allows to conclude immediately that
$L^\infty(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subseteq \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
Our next objective is to see that $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \subsetneq
\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. For that,
we need again some more notation.
Let $S_1$ denote the ideal of trace class operators on $\mathrm{Mat}(\C, n \times n)hcal{H}$ and
recall that $S_1=\mathrm{Mat}(\C, n \times n)hcal{H}\mathrm{Mat}(\C, n \times n)hcal{H}at\otimes\mathrm{Mat}(\C, n \times n)hcal{H}$ and $(S_1)^*=\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})$ with the
pairing $\lambdangle U,(e \otimes d)\rangle= \lambdangle U(e), d\rangle.$
It is easy to see that
the space $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$ can be embedded isometrically into the dual of
a certain
$H^1$ space
of $S_1$ valued functions:
\begin{defi} Let $f,g\in L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$. Define$$
f \circledast g = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} h_I (f_I \otimes m_I g + m_I f
\otimes g_I).
$$
Let $\mathrm{Mat}(\C, n \times n)hcal{H}onel$ be the space of functions $ f=\sum_{k=1}^\infty \lambdambda_k f_k
\circledast g_k$ such that $f_k,g_k\in L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$,
$\|f_k\|_{2}=\|g_k\|_2=1$ for all $k \in \mathrm{Mat}(\C, n \times n)hbb{N}$, and $\sum_{k=1}^\infty
|\lambdambda_k| <\infty.$
We endow the space with the norm given by the infimum of $\sum_{k=1}^\infty
|\lambdambda_k|$ for all possible decompositions.
\varepsilonnd{defi}
With this notation, $B \in \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}$ acts on $f \circledast g$ by
$$
\lambdangle B, f \circledast g \rangle
= \int_\mathrm{Mat}(\C, n \times n)hbb{T} \lambdangle B(t), (f \circledast g)(t) \rangle dt =
\lambdangle \Lambda_B f,g \rangle.
$$
By definition of $\mathrm{Mat}(\C, n \times n)hcal{H}onel$, $\| B\|_{(\mathrm{Mat}(\C, n \times n)hcal{H}onel)^*} = \|\Lambda_B\|$.
We will now define a further $H^1$ space of $S_1$-valued functions.
For $F \in L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, S_1)$, define the dyadic Hardy-Littlewood maximal
function $F^*$
of $F$ in
the usual way,
$$
F^*(t) = \sup_{I \in \mathrm{Mat}(\C, n \times n)hbb{D}, t \in I} \varphirac{1}{|I|} \int_I \| F(s)\|_{S_1}
ds.
$$
Then let $\mathrm{Mat}(\C, n \times n)hcal{H}onemax$ be given by functions $ F \in L^1(\mathrm{Mat}(\C, n \times n)hbb{T},S_1)$
such that $ F^* \in L^1(\mathrm{Mat}(\C, n \times n)hbb{T}) $. By a result of Bourgain
(\cite{bourgain}, Th.12), $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d$ embeds continuously into
$(\mathrm{Mat}(\C, n \times n)hcal{H}onemax)^*$ (see also \cite{blasco1,blasco3}).
\begin{lemm} \lambdabel{dual}
$\mathrm{Mat}(\C, n \times n)hcal{H}onel \subseteq \mathrm{Mat}(\C, n \times n)hcal{H}onemax.$
\varepsilonnd{lemm}
\mathrm{Mat}(\C, n \times n)hcal{P}roof It is sufficient to show that there is a constant $C >0$ such
that for all $f,g \in L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})$, $f \circledast g \in \mathrm{Mat}(\C, n \times n)hcal{H}onemax$, and
$\| f \circledast g \|_{\mathrm{Mat}(\C, n \times n)hcal{H}onemax} \le C \|f\|_2 \|g\|_2$.
One verifies that
$$
f \circledast g = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} h_I (f_I \otimes m_I g + m_I f
\otimes g_I)= f \otimes g - \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}\varphirac{\chi_I}{|I|} f_I \otimes
g_I.
$$
Towards the estimate of the maximal function,
let $E_k$ denote the expectation with respect to the
$\sigma$-algebra generated
by dyadic intervals of length $2^{-k}$,
$$
E_k F = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}, |I| > 2^{-k}} h_I F_I,
$$
for each $k \in \mathrm{Mat}(\C, n \times n)hbb{N}$.
Then we have
\begin{equation}
E_k( f \circledast g) = (E_k f) \circledast (E_k g),
\varepsilonnd{equation}
as
$$
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}, |I| > 2^{-k}} h_I (f_I \otimes m_I g + m_I f \otimes
g_I)
= \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} h_I ((E_k f)_I \otimes m_I (E_k g) +
m_I (E_k f) \otimes (E_k g)_I).
$$
Thus
\begin{multline*}
(f \circledast g)^*(t) = \sup_{k \in \mathrm{Mat}(\C, n \times n)hbb{N}} \|E_k(f \circledast g)(t)\|_{S_1}
\le \sup_{k \in \mathrm{Mat}(\C, n \times n)hbb{N}} \|(E_k f)(t)\| \|(E_k g)(t)\| +\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}
\varphirac{\chi_I(t)}{|I|} \|f_I\| \|g_I\|\\
\le \|f^*(t)\| \|g^*(t)\| +\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \varphirac{\chi_I(t)}{|I|}
\|f_I\| \|g_I\|,
\varepsilonnd{multline*}
and
$$
\|(f \circledast g)^*\|_1 \le \|f^*\|_2 \|g^*\|_2 + \|f\|_2 \|g\|_2 \le C
\|f\|_2 \|g\|_2
$$
by the Cauchy-Schwarz inequality and boundedness of the dyadic
Hardy-Littlewood maximal function on $L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n)hcal{H})$.
\qed
\noindent
In particular, $\mathrm{Mat}(\C, n \times n)hcal{H}onel \subseteq L^1(\mathrm{Mat}(\C, n \times n)hbb{T},S_1)$.
We can now prove our inclusion result:
\begin{satz}\lambdabel{maininclu}
$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \subsetneq \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
\varepsilonnd{satz}
\mathrm{Mat}(\C, n \times n)hcal{P}roof The inclusion follows by Lemma \operatorname{Re}f{dual}, duality and Bourgain's
result.
To see that the spaces do not coincide, use the fact that
$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\varepsilonll_\infty)\subsetneq \varepsilonll_\infty(\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}})$ to find for
each $N \in \mathrm{Mat}(\C, n \times n)hbb{N}$ functions $b_k \in \operatorname{{\mathrm{BMO}}}$, $k=1,...,N$, such that
$\sup_{1\le k\le N}\|b_k\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}}\le 1$, but
$\|(b_k)_{k=1,\dots,N}\|_{\operatorname{{\mathrm{BMO}}}^d(\mathrm{Mat}(\C, n \times n)hbb{T},l^\infty_N)}\ge c_N$, $c_N
{\rightarrow} \infty$ as $N \to \infty$.
Let $(e_k)_{k \in \mathrm{Mat}(\C, n \times n)hbb{N}}$ be
an orthonormal basis of $\mathrm{Mat}(\C, n \times n)hcal{H}$, and
consider the operator-valued function
$B(t)=\sum_{k=1}^{N} b_k(t)e_k\otimes e_k\in L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\varepsilonll_2))$. Clearly
$B_I= \sum_{k=1}^N (b_k)_I e_k \otimes e_k$, and
for each $\mathrm{Mat}(\C, n \times n)hbb{C}^N$-valued function $f= \sum_{k=1}^N f_k e_k$, $f_1, \dots, f_N \in L^2(\mathrm{Mat}(\C, n \times n)hbb{T})$, we have
$$\Lambda_B(f)=\sum_{k=1}^N \Lambda_{b_k}(f_k)e_k . $$
Choosing the $f_k$ such that $\|f\|_2^2=\sum_{k=1}^N
\|f_k\|^2_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T})}=1$, we find that
$$
\|\Lambda_B(f)\|^2_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\varepsilonll_2)}=\sum_{k=1}^N
\|\Lambda_{b_k}(f_k)\|^2_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T})}\le C \sum_{k=1}^N
\|{b_k}\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}}\|f_k\|^2_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T})}\le C,
$$
where $C$ is a constant independent of $N$. Therefore, $\Lambda_B$ is bounded.
But since
$\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d}=\|(b_k)_{k=1,\dots,N}\|_{{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}}(\mathrm{Mat}(\C, n \times n)hbb{T},l^\infty_N)}\ge
c_N$, it follows that $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}(\mathrm{Mat}(\C, n \times n)hbb{T})$ is not continuously embedded in
$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},
\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
From the open mapping theorem, we obtain inequality of the spaces.
\qed
The next proposition shows that the space $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d$ belongs to a
different scale than $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d$ and $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}$.
\begin{prop}
$L^\infty(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \nsubseteq \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})).$
\lambdabel{subs:inf-carl}
\varepsilonnd{prop}
\mathrm{Mat}(\C, n \times n)hcal{P}roof This follows from the result $L^\infty(\mathrm{Mat}(\C, n \times n)hbb{T},
\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\nsubseteq \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}} $ in \cite{mei} (see Lemma \operatorname{Re}f{mei}
below) and next proposition. We give a simple direct argument.
Choose an orthonormal basis of $\mathrm{Mat}(\C, n \times n)hcal{H}$ indexed by the elements of
$\mathrm{Mat}(\C, n \times n)hcal{D}$, say $(e_I)_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}$, and let $\Phi_I = e_I \otimes
e_I$, $\Phi_I h = \lambdangle h, e_I \rangle e_I$. Let $\lambdambda_I =
|I|^{1/2}$ for $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$, and define $B = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} h_I
\lambdambda_I \Phi_I$. Then $\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \|B_I\|^2 = \sum_{I \in
\mathrm{Mat}(\C, n \times n)hcal{D}} |I| = \infty$, so in particular $B \notin
\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. But the operator function $B$ is diagonal
with uniformly bounded diagonal entry functions $\mathrm{Mat}(\C, n \times n)hcal{P}hi_I(t)
=\lambdangle B(t) e_I, e_I \rangle = |I|^{1/2} h_I(t)$, so $B \in
L^\infty(\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.\qed
\begin{prop} \lambdabel{subs:carl-para}
$$\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})) \subsetneq {\rm
BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subsetneq {\rm BMO_{mult}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H})).$$
\varepsilonnd{prop}
\mathrm{Mat}(\C, n \times n)hcal{P}roof The inclusion $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d \subseteq {\rm BMO_{spara}}$ is easy,
since (\operatorname{Re}f{def:bmocd}) implies that for $B \in \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d$,
the $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d$ norm equals the norm of the scalar $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}$ function
given by $|B|:=\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} h_I \|B_I\|$. For $f \in L^2(\mathrm{Mat}(\C, n \times n)hcal{H})$,
let $|f|$ denote the function given by $|f|(t) = \|f(t)\|$. Thus
$$
\|\mathrm{Mat}(\C, n \times n)hcal{P}i_B f \|_2^2 = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \|B_I m_I f\|^2 \le
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} (\|B_I\| m_I |f|)^2 = \| \mathrm{Mat}(\C, n \times n)hcal{P}i_{|B|} |
f|\|.
$$
The boundedness of $\mathrm{Mat}(\C, n \times n)hcal{P}i_{B^*}$ follows analogously.
To show that $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d \neq {\rm BMO_{spara}}$, we can use the
diagonal operator function $B$ constructed in Proposition
\operatorname{Re}f{subs:inf-carl}. There, it is shown that $B \notin \operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_c}}}d$,
and that the diagonal entry functions $\mathrm{Mat}(\C, n \times n)hcal{P}hi_I = \lambdangle Be_I, e_I
\rangle$ are uniformly bounded. Since the paraproduct of each
scalar-valued $L^\infty$ function is bounded, we see that $\mathrm{Mat}(\C, n \times n)hcal{P}i_B =
\bigoplus_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \mathrm{Mat}(\C, n \times n)hcal{P}i_{\mathrm{Mat}(\C, n \times n)hcal{P}hi_I}$ is bounded. Similarly,
$\mathrm{Mat}(\C, n \times n)hcal{P}i_{B^*}$ is bounded. Thus $B \in {\rm BMO_{spara}}$.
It is clear from (\operatorname{Re}f{mult}) that ${\rm BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\subseteq {\rm
BMO_{mult}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$.
Using that
$L^\infty(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\nsubseteq {\rm BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$
(see \cite{mei}), one concludes that ${\rm
BMO_{spara}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))\neq {\rm BMO_{mult}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{L}(\mathrm{Mat}(\C, n \times n)hcal{H}))$. \qed
\section{Sharp dimensional growth of the sweep}
We begin with the following lower estimate of the $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}$ norm in terms of the $L^\infty$ norm of
certain $\mathrm{Mat}(\C, n \times n)$-valued functions from \cite{mei}.
\begin{lemm}\lambdabel{mei} (see \cite{mei}, Thm 1.1.) There exists an absolute constant
$c >0$ such that for each $n\in \mathrm{Mat}(\C, n \times n)hbb{N}$, there exists a measurable
function $F:\mathrm{Mat}(\C, n \times n)hbb{T} \rightarrow \mathrm{Mat}(\C, n \times n)$ with $\|F\|_\infty \le 1$ and
$\|\mathrm{Mat}(\C, n \times n)hcal{P}i_F\| \ge c \log(n+1)$.
\varepsilonnd{lemm}
Here are our dimensional estimates of the sweep.
\begin{satz} There exists an absolute constant $C >0$ such that for
each $n \in \mathrm{Mat}(\C, n \times n)hbb{N}$ and each measurable function $B: \mathrm{Mat}(\C, n \times n)hbb{T} \rightarrow \mathrm{Mat}(\C, n \times n)$,
\begin{equation} \lambdabel{eq:sharppara}
\| S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \le C \log(n+1) \| B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}},
\varepsilonnd{equation}
\begin{equation} \lambdabel{eq:sharpmult}
\| S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} \le C (\log(n+1))^2 \| B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}},
\varepsilonnd{equation}
\begin{equation} \lambdabel{eq:sharpnorm}
\| S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d} \le C (\log(n+1))^2 \| B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d},
\varepsilonnd{equation}
and the dimensional estimates are sharp.
\varepsilonnd{satz}
\mathrm{Mat}(\C, n \times n)hcal{P}roof Let $B: \mathrm{Mat}(\C, n \times n)hbb{T} \rightarrow \mathrm{Mat}(\C, n \times n)$ be measurable. Since
$\|S_B\|_* = \lim_{k \to \infty} \|S(E_k B) \|_*$ in all of the
above BMO norms ( because we are in the finite-dimensional
situation) it suffices to consider the case $B \in \mathrm{Mat}(\C, n \times n)hcal{F}_{00}$.
We start by proving (\operatorname{Re}f{eq:sharppara}). Since
\begin{equation} \lambdabel{eq:paraest}
\|\mathrm{Mat}(\C, n \times n)hcal{P}i_B\| \le C'
\log(n+1) \|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d}
\varepsilonnd{equation}
for some absolute constant $C' >0$
(see \cite{ntv}, \cite{katz}) and
\begin{equation} \lambdabel{eq:strongmult}
\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \le \|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}},
\varepsilonnd{equation}
we have
\begin{equation*} \|S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \le C'
\log(n+1) \|S_B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} \le C \log(n+1) \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}}
\varepsilonnd{equation*}
by (\operatorname{Re}f{normsweep}).
For the sharpness of the estimate, take $F$ as in Lemma \operatorname{Re}f{mei}. Again,
approximating by $E_k F$, we can assume that $F \in \mathrm{Mat}(\C, n \times n)hcal{F}_{00}$.
Since each function in $L^\infty(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))$ is the linear
combination of 4 nonnegative-matrix valued functions, the $L^\infty$-norm of which is controlled by the
norm of the original function, we can (by replacing $c$ with a smaller constant) assume
that $F$ is a nonnegative matrix-valued function. Each such nonnegative
matrix-valued function $F$ can be written as $F = S_B$ with $B \in
\mathrm{Mat}(\C, n \times n)hcal{F}_{00}$, for example by choosing $B = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}, |I|=
{2^{-k}}} h_I B_I$, where $B_I = |I|^{1/2} (F^I)^{1/2}$, $F =
\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}, |I|= {2^{-k}}} \chi_I F^I$. It follows that
\begin{multline*}
\|S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}}\ge c \log(n+1) \|S_B\|_\infty \\
\ge c/2 \log(n+1)( \|S_B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} + \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d})
\gtrsim \log(n+1) \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}}
\varepsilonnd{multline*}
again by (\operatorname{Re}f{normsweep}). Here, we use the estimate
$\|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \le \|S_B\|_\infty$, which can easily be obtained by
$$
\|P_I B e\|_2^2 = \|S_{P_I Be}\|_1 \le |I| \|S_{P_I Be}\|_\infty \le |I| \|S_{P_I B}\|_\infty
\le |I| \|S_{ B}\|_\infty \text{ for } e \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|e\|=1.
$$
This proves that (\operatorname{Re}f{eq:sharppara}) is sharp.
Let us now show (\operatorname{Re}f{eq:sharpmult}). Note that by
(\operatorname{Re}f{normsweep}) and (\operatorname{Re}f{eq:paraest}), for $B \in \mathrm{Mat}(\C, n \times n)hcal{F}_{00}$,
$$
\|S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} \lesssim \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \le {C}'^2 \log(n+1)^2 \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}.
$$
For sharpness, choose $B \in \mathrm{Mat}(\C, n \times n)hcal{F}_{00}$, $\|B \|_{\infty} \le 1$,
$\|\mathrm{Mat}(\C, n \times n)hcal{P}i_B\|\ge c \log(n+1)$ as above, to obtain
\begin{multline*}
\|S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} + \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \gtrsim \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \\
\ge c^2 \log(n+1)^2 \|B\|_\infty^2
\ge c^2 \log(n+1)^2 \|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}^2
\varepsilonnd{multline*}
and thus
$$
\|S_B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} \gtrsim \log(n+1)^2 \|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}^2,
$$
as $\|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \le \|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}$.
Finally, let us show (\operatorname{Re}f{eq:sharpnorm}). Again, we can restrict
ourselves to the case $B \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}$ by an approximation argument.
We use the fact that the UMD constant of $\mathrm{Mat}(\C, n \times n)$ is equivalent to
$\log(n+1)$ (see for instance \cite{Pi1}) and the representation
$$
S_B(t) = \int_{\Sigma} (T_\sigma B)^*(t) (T_\sigma B)(t) d \sigma \qquad(B \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}})
$$
(see \cite{new}, \cite{gptv2}),
where $T_\sigma$ denotes the dyadic martingale transform $B \mapsto T_\sigma B = \sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \sigma_I h_I B_I$,
$\sigma= (\sigma_I)_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \in\{-1,1\}^\mathrm{Mat}(\C, n \times n)hcal{D}$,
and $d \sigma$ the natural product probability measure on $\Sigma =\{-1,1\}^\mathrm{Mat}(\C, n \times n)hcal{D}$ assigning measure $2^{-n}$ to
cylinder sets of length $n$,
to prove that
\begin{multline*}
\|P_I S_B\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))} = \|P_I S_{P_I B}\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))}
\le 2 \|S_{P_I B}\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))} \\
\lesssim (\log(n+1))^2 \|P_I B\|_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))}^2 \le (\log(n+1))^2
|I| \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d},
\varepsilonnd{multline*}
which gives the desired inequality.
To prove sharpness, choose $B \in \mathrm{Mat}(\C, n \times n)hcal{F}_{00}$, $\|B \|_{\infty} \le 1$, $\|\mathrm{Mat}(\C, n \times n)hcal{P}i_B\|\ge c \log(n+1)$
and note that by Theorem \operatorname{Re}f{maininclu},
\begin{multline*}
\|S_B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d} + \|B \|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \gtrsim \|S_B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} + \|B \|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \\
\gtrsim \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \ge c^2 \log(n+1)^2 \|B\|^2_\infty \ge c^2 \log(n+1)^2 \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d}.
\varepsilonnd{multline*}
Since $\|B \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{so}}}}d} \le \|B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d}$, this implies
$$
\|S_B\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d} \gtrsim \log(n+1)^2 \|B\|^2_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d}.
$$
\qed
We now consider the bilinear extension of the sweep. By
\cite{psm}, \cite{new} or \cite{BlascoArg}
\begin{equation} \lambdabel{eq:bidentity}
\mathrm{Mat}(\C, n \times n)hcal{P}i_{U}^* \mathrm{Mat}(\C, n \times n)hcal{P}i_{V} = \Lambda_{\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]} + D_{U^*,V} \qquad(U,V \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}),
\varepsilonnd{equation}
where $D_{U^*,V}$ is given by $D_{U^*,V} h_I e = h_I
\varphirac{1}{|I|}\sum_{J \subset I} U^*_J V_J e$ for $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$, $e
\in \mathrm{Mat}(\C, n \times n)hcal{H}$.
\begin{prop} \lambdabel{prop:bisweep}
$$
\|\mathrm{Mat}(\C, n \times n)hcal{P}i_{U}^* \mathrm{Mat}(\C, n \times n)hcal{P}i_{V}\| \approx \|\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} + \sup_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}}\varphirac{1}{|I|}\|\sum_{J \subset I} U^*_J V_J \|
\qquad (U,V \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}).
$$
\varepsilonnd{prop}
\mathrm{Mat}(\C, n \times n)hcal{P}roof Obviously $\|D_{U^*,V}\| = \sup_{I \in
\mathrm{Mat}(\C, n \times n)hcal{D}}\varphirac{1}{|I|}\|\sum_{J \subset I} U^*_J V_J \|$. Thus by
(\operatorname{Re}f{eq:bidentity}),
$$
\|\mathrm{Mat}(\C, n \times n)hcal{P}i_{U}^* \mathrm{Mat}(\C, n \times n)hcal{P}i_{V}\| \le \|\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} + \sup_{I \in
\mathrm{Mat}(\C, n \times n)hcal{D}}\varphirac{1}{|I|}\|\sum_{J \subset I} U^*_J V_J \|.
$$
For the reverse estimate, it suffices to observe that $D_{U^*,V}$ is the block diagonal of the
operator $\mathrm{Mat}(\C, n \times n)hcal{P}i_{U}^* \mathrm{Mat}(\C, n \times n)hcal{P}i_{V}$ with respect to the orthogonal subspaces $h_I \mathrm{Mat}(\C, n \times n)hcal{H}$, $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$
and therefore $\|D_{U^*,V}\| \le \|\mathrm{Mat}(\C, n \times n)hcal{P}i_{U}^* \mathrm{Mat}(\C, n \times n)hcal{P}i_{V} \|$.
\qed
\noindent
Here are the dimensional estimates of the bilinear map $\mathrm{Mat}(\C, n \times n)hbb{D}elta$.
\begin{cor} There exists an absolute constant $C >0$ such that for
each $n \in \mathrm{Mat}(\C, n \times n)hbb{N}$ and each pair of measurable functions $U,V: \mathrm{Mat}(\C, n \times n)hbb{T} \rightarrow \mathrm{Mat}(\C, n \times n)$,
\begin{equation} \lambdabel{eq:bistrong}
\| \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V] \|_{\operatorname{{\mathrm{SBMO^d}}}} \le C \log(n+1) \|U\|_{\operatorname{{\mathrm{SBMO^d}}}}\|V\|_{\operatorname{{\mathrm{SBMO^d}}}},
\varepsilonnd{equation}
\begin{equation} \lambdabel{eq:bipara}
\| \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V] \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \le C \log(n+1) \|U\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}}\|V\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}},
\varepsilonnd{equation}
\begin{equation} \lambdabel{eq:bimult}
\| \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V] \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}} \le C (\log(n+1))^2 \| U\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}}\| V\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{mult}}}}},
\varepsilonnd{equation}
\begin{equation} \lambdabel{eq:binorm}
\| \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V] \|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d} \le C (\log(n+1))^2 \|U\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d}\|V\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d},
\varepsilonnd{equation}
and the dimensional estimates are sharp.
\varepsilonnd{cor}
\mathrm{Mat}(\C, n \times n)hcal{P}roof Only the upper bounds need to be shown.
For (\operatorname{Re}f{eq:bistrong}), use Proposition \operatorname{Re}f{carbmoso} to write $\|B\|_{\operatorname{{\mathrm{SBMO^d}}}}= \sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D}, \|e\|=1} \|\Lambda_B(h_Ie)\|$
and (\operatorname{Re}f{eq:bidentity}) to estimate
$$ \| \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V] \|_{\operatorname{{\mathrm{SBMO^d}}}}\le \sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D},\|e\|=1} \| \mathrm{Mat}(\C, n \times n)hcal{P}i_U^* \mathrm{Mat}(\C, n \times n)hcal{P}i_V h_I e\|
+ \sup_{I\in \mathrm{Mat}(\C, n \times n)hcal{D},\|e\|=1}\|D_{U^*,V}(h_Ie)\|.$$
Now observe that for $e \in \mathrm{Mat}(\C, n \times n)hcal{H}$, $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$, one has
$$\| \mathrm{Mat}(\C, n \times n)hcal{P}i_U^* \mathrm{Mat}(\C, n \times n)hcal{P}i_V h_I e\| \\
\le \|U\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{para}}}}} \|V\|_{\operatorname{{\mathrm{SBMO^d}}}} \|e\| \le C' \log(n+1) \|U\|_{\operatorname{{\mathrm{SBMO^d}}}}
\|V\|_{\operatorname{{\mathrm{SBMO^d}}}}\|e\|
$$
by (\operatorname{Re}f{eq:paraest}). Since $D_{U^*,V}
h_Ie = \varphirac{1}{|I|} \sum_{J\subset I} U_J^* V_Je \, h_I$, one
obtains
\begin{multline*}
\|D_{U^*,V}(h_Ie)\|= \sup_{f \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|f\|=1} |\lambdangle D_{U^*,V}(h_Ie), h_I f \rangle| \\
= \sup_{f \in \mathrm{Mat}(\C, n \times n)hcal{H}, \|f\|=1}
\varphirac{1}{|I|} |\sum_{J\subset I}\lambdangle V_Je, U_Jf\rangle |
\le \|V_e\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO^d}}}(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n)hcal{H})}\|U\|_{\operatorname{{\mathrm{SBMO^d}}}},
\varepsilonnd{multline*}
and the proof of (\operatorname{Re}f{eq:bistrong}) if complete.
Using first (\operatorname{Re}f{eq:paraest}) and (\operatorname{Re}f{eq:strongmult}) and then
Proposition \operatorname{Re}f{prop:bisweep}, we obtain (\operatorname{Re}f{eq:bipara}).
In a similar way, using first Proposition \operatorname{Re}f{prop:bisweep} and then
(\operatorname{Re}f{eq:paraest}), (\operatorname{Re}f{eq:strongmult}) yields
(\operatorname{Re}f{eq:bimult}).
Finally, for (\operatorname{Re}f{eq:binorm}) observe first that for any $U,V \in \operatorname{{\mathrm{Mat}(\C, n \times n)hcal{F}_{00}}}$, $e, f \in \mathrm{Mat}(\C, n \times n)hcal{H}$, $t \in \mathrm{Mat}(\C, n \times n)hbb{T}$,
\begin{eqnarray*}
&& |\lambdangle \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V](t)e, f \rangle|\\
&=& |\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \left \lambdangle \varphirac{\chi_I(t)}{|I|^{1/2}} V_I e, \varphirac{\chi_I(t)}{|I|^{1/2}}
U_I f \right \rangle| \\
& \le& \left(\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \|\varphirac{\chi_I(t)}{|I|^{1/2}}V_I e\|^2\right)^{1/2}
\left(\sum_{I \in \mathrm{Mat}(\C, n \times n)hcal{D}} \|\varphirac{\chi_I(t)}{|I|^{1/2}}U_I f\|^2\right)^{1/2} \\
&=& \lambdangle S_U (t) e,e \rangle^{1/2} \lambdangle S_V (t) f,f \rangle^{1/2}
\le\|S_U(t)\|^{1/2} \|S_V(t)\|^{1/2}
\varepsilonnd{eqnarray*}
and therefore
\begin{equation} \lambdabel{eq:pointest}
\|\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V](t)\| \le \|S_U(t)\|^{1/2} \|S_V(t)\|^{1/2} \quad (t \in \mathrm{Mat}(\C, n \times n)hbb{T}).
\varepsilonnd{equation}
Now consider the $\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d$ norm of $\mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]$. For $I \in \mathrm{Mat}(\C, n \times n)hcal{D}$,
\begin{eqnarray*}
&& \|P_I \mathrm{Mat}(\C, n \times n)hbb{D}elta[U^*,V]\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))}\\
& =& \|P_I \mathrm{Mat}(\C, n \times n)hbb{D}elta[P_I U^*,P_I V]\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))} \\
&\le& 2\|\mathrm{Mat}(\C, n \times n)hbb{D}elta[P_I U^*,P_I V]\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T}, \mathrm{Mat}(\C, n \times n))} \\
& \le& 2 \| \|S_{P_I U}(\cdot)\|^{1/2} \|S_{P_I V}(\cdot)\|^{1/2}\|_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T})} \\
&\le& 2 \|S_{P_I U}\|^{1/2}_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n))}\|S_{P_I V}\|^{1/2}_{L^1(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n))}\\
&\le & 2 (\log(n+1))^2 \|P_I U\|_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n))} \|P_I U\|_{L^2(\mathrm{Mat}(\C, n \times n)hbb{T},\mathrm{Mat}(\C, n \times n))} \\
& \le & 2 (\log(n+1))^2 |I| \|U\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d} \|V\|_{\operatorname{{\mathrm{Mat}(\C, n \times n)hrm{BMO_{norm}}}}d},
\varepsilonnd{eqnarray*}
where we obtain the third inequality from (\operatorname{Re}f{eq:pointest})
and the fourth inequality from the proof of (\operatorname{Re}f{eq:sharpnorm}).
This finishes the proof of (\operatorname{Re}f{eq:binorm}). \qed
\begin{thebibliography}{99}
\bibitem[B1]{blasco1} O.~Blasco, \varepsilonmph{Hardy spaces of vector-valued
functions: Duality},
Trans.~Am.~Math.~Soc. {\bf 308} (1988), no.2, 495-507.
\bibitem[B2]{blasco3} O.~Blasco, \varepsilonmph{Boundary values of functions in
vector-valued Hardy spaces and geometry of Banach
spaces}, J. Funct. Anal. {\bf 78} (1988), 346-364.
\bibitem[B3]{blasco4} O.~Blasco, \varepsilonmph{Dyadic BMO, paraproduct and
Haar multipliers}, Contemporary Math. (to appear)
\bibitem[B4]{BlascoArg} O.~Blasco, \varepsilonmph{
Remarks on operator-valued BMO spaces}, Rev. Uni. Mat. Argentina
{\bf 345} (2004), 63-78.
\bibitem[BP1]{BP} O.~Blasco, S.~Pott, {\varepsilonm Dyadic BMO on the
bidisk}, Rev. Mat. Iberoamericana {\bf 21} 2(2005), 483-510.
\bibitem[BP2]{new} O.~Blasco, S.~Pott, \varepsilonmph{Operator-valued dyadic BMO spaces}, to appear in J.~Op.~Th.
\bibitem[Bou] {bourgain}J.~Bourgain, \varepsilonmph{Vector-valued singular integrals
and the $H^1$-BMO
duality}, Probability Theory and Harmonic Analysis, Cleveland, Ohio 1983
Monographs and Textbooks in Pure and Applied Mathematics {\bf 98}, Dekker,
New York 1986.
\bibitem[ER]{effros} E.~G.~Effros and Z.~J.~Ruan, Operator Spaces,
London Mathematical Society Monographs 23, Oxford University Press, 2000.
\bibitem[G]{G} A. M. Garsia, \varepsilonmph{Martingale inequalities: Seminar
Notes on recent progress}, Benjamin, Reading, 1973.
\bibitem[GPTV]{gptv2}T.A.~Gillespie, S.~Pott, S.~Treil, A.~Volberg,
\varepsilonmph{Logarithmic growth for matrix martingale transforms},
J. London Math. Soc. (2) 64 (2001), no. 3, 624-636.
\bibitem[JPa]{jp} B.~Jacob and J.R.~Partington, \varepsilonmph{The Weiss
conjecture on admissibility of observation operators for contraction semigroups},
Integral Equations and Operator Theory 40 (2001),
231-243.
\bibitem[JPaP]{jpp1} B.~Jacob, J.~R.~Partington, S.~Pott,
\varepsilonmph{Admissible and weakly admissible observation operators for the right
shift semigroup},
Proc. Edinb. Math. Soc. (2) {\bf 45}(2002), no. 2, 353-362.
\bibitem[K]{katz} N. H. Katz, \varepsilonmph{Matrix valued paraproducts}, J.
Fourier Anal. Appl. {\bf 300} (1997), 913-921
\bibitem[M]{M} Y. Meyer,\varepsilonmph{Wavelets and operators} Cambridge
Univ. Press, Cambridge, 1992.
\bibitem[NTV]{ntv} F.~Nazarov, S.~Treil, A.~Volberg,
\varepsilonmph{Counterexample to
the infinite
dimensional Carleson embedding theorem},
C. R. Acad. Sci. Paris {\bf 325} (1997), 383-389.
\bibitem[NPiTV]{nptv} F.~Nazarov, G.~Pisier, S.~Treil, A.~Volberg,
\varepsilonmph{Sharp estimates in vector Carleson imbedding theorem and for vector
paraproducts}, J. Reine Angew. Math. {\bf 542} (2002), 147-171.
\bibitem[Me1]{mei1} T.~Mei, \varepsilonmph{Operator valued Hardy spaces}, Memoirs of the Am.~Math.~Soc.~2007, vol.~188, no.~881.
\bibitem[Me2]{mei} T.~Mei, \varepsilonmph{Notes on Matrix Valued
Paraproducts} Indiana Univ. Math. J. {\bf 55} 2,(2006), 747-760.
\bibitem[Per]{per} M.C.~Pereyra, Lecture notes on dyadic harmonic analysis.
Second Summer School in Analysis and Mathematical Physics (Cuernavaca,
2000), 1--60,
Contemp. Math. {\bf 289},
Amer. Math. Soc., Providence, RI, 2001.
\bibitem[Pet]{petermichl} S.~Petermichl,
\varepsilonmph{Dyadic shifts and a logarithmic estimate for Hankel operators with
matrix symbol},
C. R. Acad. Sci. Paris Sér. I Math. {\bf 330} (2000), no. 6, 455-460.
\bibitem[Pi]{Pi1} G. Pisier \varepsilonmph{ Notes on
Banach spaces valued Hp-spaces, non-commutative martingale
inequalities and related questions}. Preliminary Notes, 2000.
\bibitem[PV]{pvpers} G.~Pisier and A.~Volberg, personal communication
\bibitem[PXu]{pxu} G.~Pisier and Q.~Xu, \varepsilonmph{Non-commutative martingale
inequalities}, Comm. Math. Physics, 189 (1997) 667-698.
\bibitem[PS]{ps} S.~Pott, C.~Sadosky,
\varepsilonmph{Bounded mean oscillation on the bidisk and Operator BMO},
J.~Funct.~Anal.~{\bf 189}(2002), 475-495.
\bibitem[PSm]{psm} S.~Pott, M. Smith,
\varepsilonmph{Vector paraproducts and Hankel operators of Schatten class
via $p$-John-Nirenberg theorem}, J.~Funct.~Anal.~{\bf 217}(2004), no. 1, 38--78.
\bibitem[SW]{SW} E.~M.~Stein and G.~Weiss, \varepsilonmph{ Introduction to Fourier
Analysis
on Euclidean Spaces }, Princeton Univ. Press, [1971].
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{equation}gin{document}
\primearskip=4pt
\primearindent=18pt
\begin{array}selineskip=22pt \setcounter{page}{1} \centerline{\Lambdaarge\bf A
Class of Special Matrices and Quantum Entanglement}
\begin{equation}gin{center}
Shao-Ming Fei$^{\dag\ddag}$~~~ and ~~~Xianqing Li-Jost$^\ddag$
\begin{equation}gin{minipage}{6.2in}
$^\dag$Department of Mathematics, Capital Normal University, Beijing 100037, P.R. China\\
$^\dag$Institut f{\"u}r Angewandte Mathematik, Universit{\"a}t Bonn, 53115 Bonn, Germany\\
E-mail: fei@wiener.iam.uni-bonn.de\\
$^\ddag$Max-Planck-Institute for Mathematics in the Sciences, 04103 Leipzig, Germany\\
E-mail: Xianqing.Li-Jost@mis.mpg.de
\end{minipage}
\end{center}
\vskip 1 true cm
\primearindent=18pt
\primearskip=6pt
\begin{equation}gin{center}
\begin{equation}gin{minipage}{5in}
\centerline{\lambdaarge Abstract}
We present a kind of construction for a class of special matrices
with at most two different eigenvalues, in terms of some
interesting multiplicators which are very useful in calculating
eigenvalue polynomials of these matrices. This class of matrices
defines a special kind of quantum states --- $d$-computable
states. The entanglement of formation
for a large class of quantum mixed states is explicitly presented.
\end{minipage}
\end{center}
Keywords: Entanglement of formation, Generalized concurrence, $d$-computable states
PACS: 03.65.Bz; 89.70.+c
\section{Introduction}
Quantum entangled states are playing an important role in quantum
communication, information processing and quantum computing
\cite{DiVincenzo}, especially in the investigation of quantum
teleportation \cite{teleport,teleport1}, dense coding
\cite{dense}, decoherence in quantum computers and the evaluation
of quantum cryptographic schemes \cite{crypto}. To quantify
entanglement, a number of entanglement measures such as the
entanglement of formation and distillation
\cite{Bennett96a,BBPS,Vedral}, negativity
\cite{Peres96a,Zyczkowski98a}, relative entropy \cite{Vedral,sw}
have been proposed for bipartite states \cite{crypto,BBPS}
[11-13]. Most of these measures of entanglement involve
extremizations which are difficult to handle analytically. For
instance the entanglement of formation \cite{Bennett96a} is
intended to quantify the amount of quantum communication required
to create a given state. The entanglement of formation for
a pair of qubits can be
expressed as a monotonically increasing function of the
``concurrence'', which can be taken as a measure of entanglement
in its own right \cite{HillWootters}. From the expression of this
concurrence, the entanglement of formation for mixed states of a
pair of qubits is calculated \cite{HillWootters}. Although
entanglement of formation is defined for arbitrary dimension, so
far no explicit analytic formulae for entanglement of formation
have been found for systems larger than a pair of qubits, except
for some special symmetric states \cite{th}.
For a multipartite quantum system, the degree of entanglement will
neither increase nor decrease under local unitary transformations
on a quantum subsystem. Therefore the measure of entanglement must
be an invariant of local unitary transformations. The
entanglements have been studied in the view of this kind of
invariants and a generalized formula of concurrence for high
dimensional bipartite and multipartite systems is derived from the
relations among these invariants \cite{note}. The generalized
concurrence can be used to deduce necessary and sufficient
separability conditions for some high dimensional mixed states
\cite{qsep}. However in general the generalized concurrence is not
a suitable measure for $N$-dimensional bipartite quantum pure
states, except for $N=2$. Therefore it does not help in calculating the
entanglement of formation for bipartite mixed states.
Nevertheless in \cite{fjlw} it has been shown that for
some class of quantum states with $N>2$,
the corresponding entanglement of formation is
a monotonically increasing function of a generalized
concurrence, and the entanglement of formation can be also calculated
analytically. Let ${\cal H}$ be an $N$-dimensional complex Hilbert space with
orthonormal basis $e_i$, $i=1,...,N$. A general bipartite pure
state on ${\cal H}\otimes {\cal H}$ is of the form,
\begin{equation}gin{equation}\lambdaabel{psi}
\vert\primesi>=\sum_{i,j=1}^N a_{ij}e_i\otimes e_j,~~~~~~a_{ij}\in\Cb
\end{equation}
with normalization $\displaystyle\sum_{i,j=1}^N a_{ij}a_{ij}^\ast=1$.
The entanglement of formation $E$ is defined as the entropy of
either of the two sub-Hilbert spaces \cite{BBPS},
\begin{equation}
\lambdaabel{epsiq}
E(|\primesi \rangle) = - {\mbox{Tr}\,}
(\rho_1 \lambdaog_2 \rho_1) = - {\mbox{Tr}\,} (\rho_2 \lambdaog_2 \rho_2)\,,
\end{equation}
where $\rho_1$ (resp. $\rho_2$) is the partial trace of $\bf
|\primesi\rangle\lambdaangle\primesi|$ over the first (resp. second) Hilbert
space of ${\cal H}\otimes{\cal H}$.
Let $A$ denote the matrix with
entries given by $a_{ij}$ in (\ref{psi}). $\rho_1$ can be
expressed as $\rho_1=AA^\dag$.
The quantum mixed states are described by
density matrices $\rho$ on ${\cal H}\otimes{\cal H}$,
with pure-state decompositions, i.e., all ensembles of states $|\primesi_i \rangle$
of the form (\ref{psi}) with probabilities $p_i\geq 0$,
$\rho = \sum_{i=1}^l p_i |\primesi_i \rangle \lambdaangle\primesi_i|$,
$\sum_{i=1}^l p_i =1$
for some $l\in{I\!\! N}$. The entanglement of formation for the mixed
state $\rho$ is defined as the average entanglement of the pure
states of the decomposition, minimized over all decompositions of
$\rho$, $E(\rho) = \mbox{min}\, \sum_{i=1}^l p_i
E(|\primesi_i \rangle)$.
For $N=2$ equation (\ref{epsiq}) can be written as
$
E(|\primesi \rangle)|_{N=2} =h((1+\sqrt{1-C^2})/2),
$
where $h(x) = -x\lambdaog_2 x - (1-x)\lambdaog_2 (1-x)$.
$C$ is called concurrence,
$C(|\primesi \rangle) =2|a_{11}a_{22}-a_{12}a_{21}\vert$ \cite{HillWootters}.
$E$ is a monotonically increasing function of $C$ and therefore $C$ can be
also taken as a kind of measure of entanglement. Calculating
$E(\rho)$ is then reduced to the calculation of the corresponding minimum of
$C(\rho) = \mbox{min}\, \sum_{i=1}^M p_i C(|\primesi_i \rangle)$,
which simplifies the problems, as $C(|\primesi_i \rangle)$ has a much simpler
expression than $E(|\primesi_i \rangle)$.
For $N\geq 3$, there is no such concurrence $C$ in general. The
concurrences discussed in \cite{note} can be only used to judge
whether a pure state is separable (or maximally entangled) or not
\cite{qsep}. The entanglement of formation is no longer a
monotonically increasing function of these concurrences.
Nevertheless, for a special class of quantum states
such that $AA^\dag$ has only two non-zero eigenvalues, a kind of
generalized concurrence has been found to simplify the
calculation of the corresponding entanglement of formation \cite{fjlw}.
Let $\lambdaambda_1$ (resp. $\lambdaambda_2$) be the two non-zero
eigenvalues of $AA^\dag$ with degeneracy $n$ (resp. $m$), $n+m\lambdaeq N$,
and $D$ the maximal non-zero diagonal determinant, $D=\lambdaambda_1^n\lambdaambda_2^m$.
In this case the entanglement of formation of $|\primesi \rangle$ is given by
$E(|\primesi \rangle)=-n \lambdaambda_1 \lambdaog_2 \lambdaambda_1 -m \lambdaambda_2 \lambdaog_2 \lambdaambda_2$.
It is straightforward to show that $E(|\primesi\rangle)$ is a
monotonically increasing function of $D$ and hence
$D$ is a kind of measure of entanglement in this case.
In particular for the case $n=m>1$, we have
\begin{equation}\lambdaabel{hehe}
E(|\primesi \rangle)=n \lambdaeft(-x\lambdaog_2 x
- (\frac{1}{n}-x)\lambdaog_2 (\frac{1}{n}-x)\right),
\end{equation}
where
$$
x = \frac{1}{2}\lambdaeft(\frac{1}{n}+\sqrt{\frac{1}{n^2}(1-d^2)}\right)
$$
and
\begin{equation}\lambdaabel{GC}
d\equiv 2nD^{\frac{1}{2n}}=2n\sqrt{\lambdaambda_1\lambdaambda_2}.
\end{equation}
$d$ is defined to be the generalized concurrence in this case.
Instead of calculating $E(\rho)$ directly, one may calculate the
minimum decomposition of $D(\rho)$ or $d(\rho)$ to simplify the
calculations.
In \cite{fjlw} a class of pure states (\ref{psi}) with the matrix $A$ given by
\begin{equation}\lambdaabel{a} A=\lambdaeft( \begin{array}{cccc}
0&b&a_1&b_1\\
-b&0&c_1&d_1\\
a_1&c_1&0&-e\\
b_1&d_1&e&0
\end{array}
\right),
\end{equation}
$a_1,b_1,c_1,d_1,b,e\in\Cb$, is considered.
The matrix $AA^\dag$ has two eigenvalues with degeneracy two, i.e., $n=m=2$ and
$\vert AA^\dag\vert=|b_1c_1-a_1d_1+be|^4$.
The generalized concurrence $d$ is given by
$d=4|b_1c_1-a_1d_1+be|$.
Let $p$ be a $16\tildeimes 16$ matrix with only non-zero entries
$p_{1,16}=p_{2,15}=-p_{3,14}=p_{4,10}=p_{5,12}=p_{6,11}
=p_{7,13}=-p_{8,8}=-p_{9,9}=p_{10,4}=p_{11,6}=p_{12,5}
=p_{13,7}=-p_{14,3}=p_{15,2}=p_{16,1}=1$.
$d$ can be further written as
\begin{equation}\lambdaabel{dp}
d=|\lambdaangle \primesi |p\primesi^* \rangle |.
\end{equation}
Let $\Psi$ denote the set of pure states (\ref{psi})
with $A$ given as the form of (\ref{a}). Consider all mixed states with
density matrix $\rho$ such that its decompositions are of the form
\begin{equation}\lambdaabel{rho1}
\rho = \sum_{i=1}^M p_i |\primesi_i \rangle \lambdaangle \primesi_i|,~~~~\sum_{i=1}^M
p_i =1,~~~~|\primesi_i\rangle\in\Psi.
\end{equation}
All other kind of decompositions, say decomposition from $|\primesi_i^\primerime\rangle$,
can be obtained from a unitary linear combination
of $|\primesi_i\rangle$ \cite{HillWootters,fjlw}. As linear combinations of
$|\primesi_i\rangle$ do not change the form of the corresponding matrices (\ref{a}),
once $\rho$ has a decomposition with all $|\primesi_i\rangle\in\Psi$, all
other decompositions, including the minimum decomposition of the entanglement
of formation, also satisfy that $|\primesi_i^\primerime\rangle\in\Psi$.
Then the minimum decomposition of the generalized concurrence is
\cite{fjlw}
\begin{equation} \lambdaabel{drho}
d(\rho)=\Lambdaambda_1 - \sum_{i=2}^{16}\Lambdaambda_i,
\end{equation}
where $\Lambdaambda_i$, in decreasing
order, are the square roots of the eigenvalues of the Hermitian
matrix $R \equiv \sqrt{\sqrt{\rho}p{\rho^\ast}p\sqrt{\rho}}$, or,
alternatively, the square roots of the eigenvalues of the
non-Hermitian matrix $\rho p{\rho^\ast}p$.
\section{Entanglement of formation for a class of high dimensional
quantum states}
An important fact in obtaining the formula (\ref{drho}) is that
the generalized concurrence $d$ is a quadratic form of the entries of
the matrix $A$, so that $d$ can be expressed in the form of (\ref{dp})
in terms of a suitable matrix $p$. Generalizing to the $N$-dimensional
case we call an pure state (\ref{psi})
\underline{$d$-computable} if $A$ satisfies the following relations:
\begin{equation}\lambdaabel{dcomputable}
\begin{array}{l}
\vert AA^\dag\vert = ([ A ][ A ]^\ast)^{N/2},\\[3mm]
\vert AA^\dag - \lambdaambda Id_N\vert=(\lambdaambda^2 - \| A \| \lambdaambda + [ A ][ A ]^\ast)^{N/2},
\end{array}
\end{equation}
where $[A]$ and $\|A \|$ are quadratic forms of $a_{ij}$,
$Id_N$ is the $N\tildeimes N$ identity matrix.
We denote ${\cal A}$ the set of matrices satisfying (\ref{dcomputable}),
which implies that for $A\in{\cal A}$, $AA^\dag$ has at most
two different eigenvalues, each one has order $N/2$ and
$d$ is a quadratic form of the entries of the matrix $A$.
In the following we give a kind of constructions of high dimensional
$d$-computable states. For all $N^2\tildeimes N^2$
density matrices with decompositions on these $N$-dimensional
$d$-computable pure states, their entanglement of formations
can be calculated with a similar formula to (\ref{drho})
(see (\ref{d2k1})).
We first present a kind of construction for a class of
$N$-dimensional, $N = 2^k$, $2\lambdaeq k\in{I\!\! N}$,
$d$-computable states.
Set
$$
A_2= \lambdaeft(
\begin{equation}gin{array}{cc}
a&-c \\[3mm]
c&d\\[3mm]
\end{array}
\right),
$$
where $a,c,d \in \Cb$.
For any $b_1,c_1 \in \Cb$, a $4\tildeimes 4$ matrix $A_4\in{\cal A}$ can
be constructed in the following way,
\begin{equation}\lambdaabel{ha4}
A_4= \lambdaeft(
\begin{equation}gin{array}{cc}
B_2&A_2\\[3mm]
-A_2^t&C_2^t\\[3mm]
\end{array}
\right) = \lambdaeft(
\begin{equation}gin{array}{cccc}
0&b_1&a&-c\\[3mm]
-b_1&0&c&d\\[3mm]
-a&-c&0&-c_1\\[3mm]
c&-d&c_1&0
\end{array}
\right),
\end{equation}
where
$$
B_2 = b_1J_2, ~~~~ C_2 = c_1J_2, ~~~ J_2= \lambdaeft(
\begin{equation}gin{array}{cc}
0&1 \\[3mm]
-1&0\\[3mm]
\end{array}
\right).
$$
$A_4$ satisfies the relations in (\ref{dcomputable}):
$$
\begin{equation}gin{array}{l}
\lambdaeft| A_4 A^\dag_4 \right|=[(b_1c_1+a d + c^2)(b_1c_1+a d + c^2)^\ast]^2=
([ A_4 ][ A_4 ]^\ast)^2,\\[3mm]
\lambdaeft| A_4 A^\dag_4 - \lambdaambda Id_4 \right| = (\lambdaambda^2 -
(b_1b_1^\ast+c_1c_1^\ast+aa^\ast+2cc^\ast+dd^\ast)\lambdaambda\\[3mm]
~~~~~~~~~~~~~~~~~~~~~~+ (b_1c_1+ a d + c^2)(b_1c_1+ a d + c^2)^\ast)^2\\[3mm]
~~~~~~~~~~~~~~~~~~~= (\lambdaambda^2 - \| A_4 \|\lambdaambda + [ A_4 ][ A_4 ]^\ast)^2,
\end{array}
$$
where
\begin{equation}
[ A_4 ]=(b_1c_1+a d + c^2),~~~
\| A_4 \|=b_1b_1^\ast+c_1c_1^\ast+aa^\ast+2cc^\ast+dd^\ast.
\end{equation}
$A_8\in{\cal A}$ can be obtained from $A_4$,
\begin{equation}\lambdaabel{a8} A_8=
\lambdaeft(
\begin{equation}gin{array}{cc}
B_4&A_4\\[3mm]
-A_4^t&C_4^t\\[3mm]
\end{array}
\right), \end{equation} where
\begin{equation}\lambdaabel{i4} B_4 = b_2J_4, ~~~~C_4 = c_2J_4,
~~~~ J_4= \lambdaeft(
\begin{equation}gin{array}{cccc}
0&0&0&1\\[3mm]
0&0&1&0\\[3mm]
0&-1&0&0\\[3mm]
-1&0&0&0
\end{array}
\right),~~~~
b_2,~c_2 \in \Cb.
\end{equation}
For general construction of high dimensional matrices
$A_{2^{k+1}}\in{\cal A}$, $2 \lambdaeq k\in{I\!\! N}$, we have
\begin{equation}\lambdaabel{a2k}
A_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cc}
B_{2^{k}}&A_{2^{k}}\\[3mm]
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^t&C_{2^{k}}^t
\end{array}
\right) \equiv\lambdaeft(
\begin{equation}gin{array}{cc}
b_{k}J_{2^{k}}&A_{2^{k}}\\[3mm]
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^t&c_{k}J_{2^{k}}^t
\end{array}
\right),
\end{equation}
\begin{equation}
\lambdaabel{i2k}
J_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cc}
0&J_{2^{k}}\\[3mm]
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^t&0\\[3mm]
\end{array}
\right),
\end{equation}
where $b_k, c_k \in \Cb$,
$B_{2^{k}}=b_{k}J_{2^{k}}$, $C_{2^{k}}=c_{k}J_{2^{k}}$.
We call $J_{2^{k+1}}$ multipliers.
Before proving that $A_{2^{k+1}}\in{\cal A}$, we first give the
following lemma.
{\sf Lemma 1}. $A_{2^{k+1}}$ and $J_{2^{k+1}}$ satisfy the
following relations: \begin{equation}\lambdaabel{ii2k}
\begin{equation}gin{array}{l}
J_{2^{k+1}}^tJ_{2^{k+1}}=J_{2^{k+1}}J_{2^{k+1}}^t=Id_{2^{k+1}},\\[3mm]
J_{2^{k+1}}^tJ_{2^{k+1}}^t=J_{2^{k+1}}J_{2^{k+1}}
=(-1)^{\frac{(k+1)(k+2)}{2}}Id_{2^{k+1}},
\end{array}
\end{equation}
\begin{equation}\lambdaabel{ai2k}
\begin{equation}gin{array}{ll}
J_{2^{k+1}}^\dag =J_{2^{k+1}}^t ,~~~&
J_{2^{k+1}}^t=(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k+1}},\\[3mm]
A_{2^{k+1}}^t=(-1)^{\frac{k(k+1)}{2}}A_{2^{k+1}},~~~&
A_{2^{k+1}}^\dag=(-1)^{\frac{k(k+1)}{2}}A^*_{2^{k+1}}.
\end{array}
\end{equation}
{\sf Proof}. One easily checks that relations in (\ref{ii2k}) hold
for $k=1$. Suppose (\ref{ii2k}) hold for general $k$. We have
$$
\begin{array}{rcl} J_{2^{k+1}}^tJ_{2^{k+1}}&=& \lambdaeft(
\begin{equation}gin{array}{cc}
0&(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^t\\[3mm]
J_{2^{k}}&0\\[3mm]
\end{array}
\right) \lambdaeft(
\begin{equation}gin{array}{cc}
0&J_{2^{k}}\\[3mm]
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^t&0\\[3mm]
\end{array}
\right)\\[9mm]
&=&\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{(k+1)(k+2)}J_{2^{k}}J_{2^{k}}^t&0\\[3mm]
0&J_{2^{k}}^tJ_{2^{k}}\\[3mm]
\end{array}
\right) =Id_{2^{k+1}} \end{array}
$$
and
$$
\begin{array}{rcl} J_{2^{k+1}}^tJ_{2^{k+1}}^t &=& \lambdaeft(
\begin{equation}gin{array}{cc}
0&(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^t\\[3mm]
J_{2^{k}}&0\\[3mm]
\end{array}
\right) \lambdaeft(
\begin{equation}gin{array}{cc}
0&(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^t\\[3mm]
J_{2^{k}}&0\\[3mm]
\end{array}
\right)\\[9mm]
&=&\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}J_{2^{k}}^t&0\\[3mm]
0&(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^tJ_{2^{k}}\\[3mm]
\end{array}
\right) =(-1)^{\frac{(k+1)(k+2)}{2}}Id_{2^{k+1}}. \end{array}
$$
Therefore the relations for $J_{2^{k+1}}^tJ_{2^{k+1}}$
and $J_{2^{k+1}}^tJ_{2^{k+1}}^t$ are valid also for $k+1$.
The cases for $J_{2^{k+1}}J_{2^{k+1}}^t$ and $J_{2^{k+1}}J_{2^{k+1}}$
can be similarly treated.
The formula $J_{2^{k+1}}^t=(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k+1}}$
in (\ref{ai2k}) is easily deduced from (\ref{ii2k}) and the fact
$J_{2^{k+1}}^\dag = J_{2^{k+1}}^t$.
The last two formulae in (\ref{ai2k}) are easily verified for
$k=1$. If it holds for general $k$, we have then,
$$
A_{2^{k+1}}^t = \lambdaeft(
\begin{equation}gin{array}{cc}
B_{2^{k}}^t&(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}\\[3mm]
A_{2^{k}}^t&C_{2^{k}}
\end{array}
\right)
=\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{k(k+1)}{2}}B_{2^{k}}&(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}\\[3mm]
A_{2^{k}}^t&(-1)^{\frac{k(k+1)}{2}}C_{2^{k}}^t
\end{array}
\right) =(-1)^{\frac{k(k+1)}{2}}A_{2^{k+1}},
$$
i.e., it holds also for $k+1$. The last equality in (\ref{ai2k}) is
obtained from the conjugate of the formula above.
$\rule{2mm}{2mm}$
{\sf Lemma 2.} The following relations can be verified
straightforwardly from Lemma 1,
\begin{equation}\begin{array}{l}\lambdaabel{BC}
B_{2^{k}}^t=(-1)^{\frac{k(k+1)}{2}}B_{2^{k}},~~~
C_{2^{k}}^t=(-1)^{\frac{k(k+1)}{2}}C_{2^{k}},\\[3mm]
B_{2^{k}}^\dag =(-1)^{\frac{k(k+1)}{2}}B^*_{2^{k}},~~~
C_{2^{k}}^\dag =(-1)^{\frac{k(k+1)}{2}}C^*_{2^{k}}.
\end{array}
\end{equation}
\begin{equation}\lambdaabel{BCI}
B_{2^{k+1}}^{-1}=\frac{1}{b_k^2}B_{2^{k+1}}^t
=\frac{1}{b_kb_k^*}B^\dag_{2^{k+1}},~~~
C_{2^{k+1}}^{-1}=\frac{1}{c_k^2}C_{2^{k+1}}^t
=\frac{1}{c_kc_k^*}C^\dag_{2^{k+1}}.
\end{equation}
\begin{equation}\lambdaabel{BBCC}
\begin{array}{l}
B_{2^{k+1}}^tB_{2^{k+1}}=B_{2^{k+1}}B_{2^{k+1}}^t
=b_k^2 Id_{2^{k+1}},~~
C_{2^{k+1}}^tC_{2^{k+1}}=C_{2^{k+1}}C_{2^{k+1}}^t
=c_k^2 Id_{2^{k+1}},\\[3mm]
B_{2^{k+1}}^\dag B_{2^{k+1}}=B_{2^{k+1}}B^\dag_{2^{k+1}}
=b_k b_k^* Id_{2^{k+1}},~~~
C_{2^{k+1}}^\dag C_{2^{k+1}}=C_{2^{k+1}}C_{2^{k+1}}^\dag
=c_k c_k^* Id_{2^{k+1}}.
\end{array}
\end{equation}
For any $A_{2^{k+1}}\in \cal A$, $k\geq 2$, we define
\begin{equation}
\begin{array}{rcl}
||A_{2^{k+1}}||&=:&b_kb_k+c_kc_k+||A_{2^k}||,\\[3mm]
[A_{2^{k+1}}]&=:&(-1)^{k(k+1)/2}b_kc_k-[A_{2^k}].
\end{array}
\end{equation}
{\sf Lemma 3}. For any $k\geq 2$, we have, \begin{equation}\lambdaabel{lemma3}
\begin{array}{rcl} (A_{2^{k+1}}J_{2^{k+1}})(J_{2^{k+1}}A_{2^{k+1}})^t&=&
(A_{2^{k+1}}J_{2^{k+1}})^t(J_{2^{k+1}}A_{2^{k+1}})\\[3mm]
&=&((-1)^{\frac{k(k+1)}{2}}b_kc_k-[A_{2^{k}}])Id_{2^{k+1}}
=[A_{2^{k+1}}] Id_{2^{k+1}},\\[4mm]
(A_{2^{k+1}}^\ast J_{2^{k+1}})(J_{2^{k+1}}A_{2^{k+1}}^\ast)^t&=&
(A_{2^{k+1}}^\ast J_{2^{k+1}})^t(J_{2^{k+1}}A_{2^{k+1}}^\ast)
=[A_{2^{k+1}}]^\ast Id_{2^{k+1}}.
\end{array}
\end{equation}
{\sf Proof}. One can verify that Lemma 3 holds for $k=2$. Suppose
it is valid for $k$, we have
$$
\begin{array}{l}
(A_{2^{k+1}}J_{2^{k+1}})(J_{2^{k+1}}A_{2^{k+1}})^t\\[3mm]
=\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{(k+1)(k+2)}{2}}A_{2^{k}}J_{2^{k}}^t&B_{2^{k}}J_{2^{k}}\\[3mm]
(-1)^{\frac{(k+1)(k+2)}{2}}C_{2^{k}}^tJ_{2^{k}}^t&
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^tJ_{2^{k}}
\end{array}
\right) \lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{k(k+1)}{2}}J_{2^{k}}A_{2^{k}}^t&J_{2^{k}}C_{2^{k}}^t\\[3mm]
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^tB_{2^{k}}&
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^tA_{2^{k}}
\end{array}
\right)^t\\[9mm]
=\lambdaeft(
\begin{equation}gin{array}{cc}
e_{11}& e_{12}\\[3mm]
e_{21}& e_{22}
\end{array}
\right), \end{array}
$$
where
$$
\begin{array}{rcl}
e_{11}&=&(-1)^{\frac{(k+1)(k+2)+k(k+1)}{2}}A_{2^{k}}J_{2^{k}}^tA_{2^{k}}J_{2^{k}}^t
+(-1)^{\frac{k(k+1)}{2}}b_kc_k Id_{2^{k}}\\[3mm]
&=&(-1)^{\frac{(k+1)(k+2)+k(k-1)}{2}}(A_{2^{k}}J_{2^{k}}^t)(J_{2^{k}}A_{2^{k}}^t)^t
+(-1)^{\frac{k(k+1)}{2}}b_kc_k Id_{2^{k}}\\[3mm]
&=&((-1)^{\frac{k(k+1)}{2}}b_kc_k-[A_{2^{k}}])Id_{2^{k}},\\[3mm]
e_{12}&=&b_k A_{2^{k}}J_{2^{k}}^t+
(-1)^{\frac{(k+1)(k+2)+k(k+1)}{2}}b_kA_{2^{k}}^tJ_{2^{k}}\\[3mm]
&=& b_kA_{2^{k}}J_{2^{k}}^t(1+(-1)^{\frac{(k+1)(k+2)+k(k-1)}{2}})=0,\\[3mm]
e_{21}&=&(-1)^{\frac{(k+1)(k+2)}{2}}c_k A_{2^{k}}J_{2^{k}}^t
+(-1)^{\frac{k(k+1)}{2}}c_k A_{2^{k}}^tJ_{2^{k}}=0,\\[3mm]
e_{22}&=& (-1)^{\frac{k(k+1)}{2}}b_k c_kId_{2^{k}}+
(-1)^{\frac{(k+1)(k+2)+k(k+1)}{2}}A_{2^{k}}^tJ_{2^{k}}A_{2^{k}}^tJ_{2^{k}}\\[3mm]
&=&(-1)^{\frac{k(k+1)}{2}}b_k c_k Id_{2^{k}}+
(-1)^{\frac{(k+1)(k+2)+k(k-1)}{2}}(A_{2^{k}}J_{2^{k}})(J_{2^{k}}A_{2^{k}})^t\\[3mm]
&=&((-1)^{\frac{k(k+1)}{2}}b_kc_k-[A_{2^{k}}])Id_{2^{k}}. \end{array}
$$
Hence
$$
(A_{2^{k+1}}J_{2^{k+1}})(J_{2^{k+1}}A_{2^{k+1}})^t
=((-1)^{\frac{k(k+1)}{2}}b_kc_k-[A_{2^{k}}])Id_{2^{k+1}}=[A_{2^{k+1}}])Id_{2^{k+1}}.
$$
Similar calculations apply to
$(A_{2^{k+1}}J_{2^{k+1}})^t(J_{2^{k+1}}A_{2^{k+1}})$. Therefore the Lemma holds
for $k+1$. The last equation can be deduced from the first one.
{\sf Theorem 2}. $A_{2^{k}}$ satisfies the following relation: \begin{equation}
\lambdaabel{thm2}
|A_{2^{k+1}}A_{2^{k+1}}^\dag|=([A_{2^{k+1}}][A_{2^{k+1}}]^*)^{2^k}
=[((-1)^{\frac{k(k+1)}{2}}b_kc_k-[A_{2^{k}}])
((-1)^{\frac{k(k+1)}{2}}b^*_kc^*_k-[A_{2^{k}}]^*)]^{2^k}. \end{equation}
{\sf Proof}. By using Lemma 1-3, we have
$$
\begin{array}{rcl}
|A_{2^{k+1}}|
&=&\lambdaeft|
\lambdaeft(
\begin{equation}gin{array}{cc}
B_{2^{k}}& A_{2^{k}}\\[3mm]
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^t&C_{2^{k}}^t
\end{array}
\right)
\right|\\[9mm]
&=&\lambdaeft|
\lambdaeft(
\begin{equation}gin{array}{cc}
Id_{2^{k}}&-A_{2^{k}}(C_{2^{k}}^t)^{-1}\\[3mm]
0&Id_{2^{k}}
\end{array}
\right)
\lambdaeft(
\begin{equation}gin{array}{cc}
B_{2^{k}}& A_{2^{k}}\\[3mm]
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^t&C_{2^{k}}^t
\end{array}
\right)
\right|\\[9mm]
&=&\lambdaeft|
\lambdaeft(
\begin{equation}gin{array}{cc}
B_{2^{k}}-(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}(C_{2^{k}}^t)^{-1}A_{2^{k}}^t& 0\\[3mm]
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^t&C_{2^{k}}^t
\end{array}
\right)
\right|\\[9mm]
&=&|b_kc_k J_{2^{k}}J_{2^{k}}^t-(-1)^{\frac{k(k+1)}{2}}\frac{1}{c_k^2}
A_{2^{k}}C_{2^{k}}A_{2^{k}}^tC_{2^{k}}^t|\\[4mm]
&=&|b_kc_k Id_{2^{k}}-(-1)^{\frac{k(k+1)}{2}}
(A_{2^{k}}J_{2^{k}})(J_{2^{k}}A_{2^{k}})^t|\\[4mm]
&=&|(-1)^{\frac{k(k+1)}{2}}b_kc_k Id_{2^{k}}-
[A_{2^{k}}]Id_{2^{k}}|
=((-1)^{\frac{k(k+1)}{2}}b_kc_k-[A_{2^{k}}])^{2^k}.
\end{array}
$$
Therefore
$$
|A_{2^{k+1}}A_{2^{k+1}}^\dag|=([A_{2^{k+1}}][A_{2^{k+1}}]^\ast)^{2^k}.
$$
$\rule{2mm}{2mm}$
{\sf Lemma 4}. $A_{2^{k+1}}$ and $J_{2^{k+1}}$ satisfy the following relations:
$$
\begin{array}{l}
(A_{2^{k+1}}J_{2^{k+1}})(J_{2^{k+1}}A_{2^{k+1}})^\dag
+(J_{2^{k+1}}A_{2^{k+1}}^\ast)(J_{2^{k+1}}A_{2^{k+1}})^t\\[3mm]
~~~~~~~~=A_{2^{k+1}}A_{2^{k+1}}^\dag+J_{2^{k+1}}A_{2^{k+1}}^\ast
A_{2^{k+1}}^tJ_{2^{k+1}}^t
=||A_{2^{k+1}}||Id_{2^{k+1}},\\[3mm]
(A_{2^{k+1}}J_{2^{k+1}})^t(A_{2^{k+1}}^\ast J_{2^{k+1}})
+(J_{2^{k+1}}A_{2^{k+1}})^\dag(J_{2^{k+1}}A_{2^{k+1}})\\[3mm]
~~~~~~~~=A_{2^{k+1}}^\dag A_{2^{k+1}}+J_{2^{k+1}}^tA_{2^{k+1}}^tA_{2^{k+1}}^\ast J_{2^{k+1}}
=||A_{2^{k+1}}||Id_{2^{k+1}}.
\end{array}
$$
{\sf Proof}. It can be verified that the first formula
holds for $k=2$, if it holds for $k$, we have
$$
\begin{array}{l} (A_{2^{k+1}}J_{2^{k+1}})(A_{2^{k+1}}J_{2^{k+1}})^\dag
+(J_{2^{k+1}}A^*_{2^{k+1}})(J_{2^{k+1}}A_{2^{k+1}})^t\\[3mm]
=\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{(k+1)(k+2)}{2}}A_{2^{k}}J_{2^{k}}^t&B_{2^{k}}J_{2^{k}}\\[3mm]
(-1)^{\frac{(k+1)(k+2)}{2}}C_{2^{k}}^tJ_{2^{k}}^t&
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^tJ_{2^{k}}
\end{array}
\right) \lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}A_{2^{k}}^\dag&
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}C^*_{2^{k}}\\[3mm]
J_{2^{k}}^tB_{2^{k}}^\dag&
(-1)^{\frac{k(k+1)}{2}}J^t_{2^{k}}A_{2^{k}}^*
\end{array}
\right)\\[9mm]
+\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{k(k+1)}{2}}J_{2^{k}}A_{2^{k}}^\dag&J_{2^{k}}C_{2^{k}}^\dag\\[3mm]
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^tB_{2^{k}}^*&
(-1)^{\frac{(k+1)(k+2)}{2}}J_{2^{k}}^tA_{2^{k}}^*
\end{array}
\right) \lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}J_{2^{k}}^t&
(-1)^{\frac{(k+1)(k+2)}{2}}B_{2^{k}}^tJ_{2^{k}}\\[3mm]
C_{2^{k}}J_{2^{k}}^t&
(-1)^{\frac{(k+1)(k+2)}{2}}A_{2^{k}}^tJ_{2^{k}}
\end{array}
\right)\\[9mm]
=\lambdaeft(
\begin{equation}gin{array}{cc}
f_{11}& f_{12}\\[3mm]
f_{21}& f_{22}
\end{array}
\right), \end{array}
$$
where, by using Lemma 1 and 2,
$$
\begin{array}{rcl}
f_{11}=f_{22}&=& A_{2^{k}}A_{2^{k}}^\dag+J_{2^{k}}A_{2^{k}}^\dag
A_{2^{k}}J_{2^{k}}^t+BB^\dag +J_{2^{k}}C^\dag CJ_{2^{k}}^t \\[3mm]
&=&
A_{2^{k}}A_{2^{k}}^\dag+J_{2^{k}}(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^*
(-1)^{\frac{k(k+1)}{2}}A^t_{2^{k}}J_{2^{k}}^t+(b_kb_k^*+c_kc_k^*)Id_{2^{k}}\\[3mm]
&=&(||A_{2^{k}}||+b_kb_k^*+c_kc_k^*)Id_{2^{k}}=||A_{2^{k+1}}||Id_{2^{k}},
\end{array}
$$
$$
\begin{array}{rcl}
f_{12}&=&A_{2^{k}}C^*_{2^{k}}+(-1)^{\frac{k(k+1)}{2}}B_{2^{k}}A^*_{2^{k}}
+(-1)^{\frac{k(k+1)+(k+1)(k+2)}{2}}b_kJ_{2^{k}}A_{2^{k}}^\dag
+(-1)^{\frac{(k+1)(k+2)}{2}}c^*_kA_{2^{k}}^tJ_{2^{k}}\\[3mm]
&=&(-1)^{\frac{k(k+1)}{2}}(B_{2^{k}}A^*_{2^{k}}
+(-1)^{\frac{k(k-1)+(k+1)(k+2)}{2}}B_{2^{k}}A^*_{2^{k}})\\[3mm]
&&+A_{2^{k}}C^*_{2^{k}}
+(-1)^{\frac{k(k-1)+(k+1)(k+2)}{2}}A_{2^{k}}C^*_{2^{k}}=0, \end{array}
$$
$$
\begin{array}{rcl}
f_{21}&=&C_{2^{k}}^tA^\dag_{2^{k}}+(-1)^{\frac{k(k+1)}{2}}A_{2^{k}}^tB_{2^{k}}^\dag
+(-1)^{\frac{k(k+1)+(k+1)(k+2)}{2}}b^*_kA_{2^{k}}J_{2^{k}}^t
+(-1)^{\frac{(k+1)(k+2)}{2}}c_kJ_{2^{k}}^tA^*_{2^{k}}\\[3mm]
&=&(-1)^{\frac{k(k+1)}{2}}(b_kA_{2^{k}}^tJ_{2^{k}}^t
+(-1)^{\frac{k(k-1)+(k+1)(k+2)}{2}}b_kA_{2^{k}}^tJ_{2^{k}}^t)\\[3mm]
&&+c^*_k J_{2^{k}}^tA_{2^{k}}^t
+(-1)^{\frac{k(k-1)+(k+1)(k+2)}{2}}c_k^* J_{2^{k}}^tA_{2^{k}}^t
=0. \end{array}
$$
Hence the first formula holds also for $k+1$.
The second formula can be verified similarly.
$\rule{2mm}{2mm}$
{\sf Lemma 5}. Matrices $B_{2^k}$, $A_{2^k}$ and $C_{2^k}$
satisfy the following relations:
\begin{equation}\lambdaabel{fa2k}
((-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast)
((-1)^{k(k+1)\over 2}A_{2^k}^\ast B_{2^k}+C_{2^k}^\ast A_{2^k})^t
= F(A_{2^{k+1}})Id_{2^k},
\end{equation}
where
\begin{equation}gin{equation}\lambdaabel{F}
F(A_{2^{k+1}}) =c_k^{\ast 2}[A_{2^k}]+b_k^2[A_{2^k}]^{\ast}
+(-1)^{k(k+1)\over 2} b_kc_k^{\ast} \| A_{2^k} \|.
\end{equation}
{\sf Proof}. By using Lemma 3 and 4, we have
$$
\begin{equation}gin{array}{l}
((-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast)
((-1)^{k(k+1)\over 2}A_{2^k}^\ast B_{2^k}+C_{2^k}^\ast A_{2^k})^t\\[3mm]
=b_k^2(J_{2^k}A_{2^k}^{\ast})(A_{2^k}^{\ast}J_{2^k})^t
+c_k^{\ast 2}(A_{2^k}J_{2^k})(J_{2^k}A_{2^k})^t\\[3mm]
~~~+(-1)^{k(k+1)\over 2}b_k c_k^{\ast}[(A_{2^k}^{\ast}J_{2^k})(A_{2^k}^{\ast}J_{2^k})^t
+(J_{2^k}A_{2^k}^{\ast})(J_{2^k}A_{2^k})^t]\\[3mm]
=(c_k^{\ast 2}[A_{2^k}]+b_k^2[A_{2^k}]^{\ast}
+(-1)^{k(k+1)\over 2} b_kc_k^{\ast} \| A_{2^k} \|) Id_{2^k}
=F(A_{2^{k+1}})Id_{2^k}.
\end{array}
$$
{\sf Lemma 6}. $A_{2^k}$ and $J_{2^k}$ satisfy the following relation:
\begin{equation}\lambdaabel{fa2kp}
\| A_{2^k} \| J_{2^k}A_{2^k}^\ast
A_{2^k}^tJ_{2^k}^t = [ A_{2^k} ][ A_{2^k} ]^\ast Id_{2^k} +
J_{2^k}A_{2^k}^\ast A_{2^k}^tA_{2^k}^\ast A_{2^k}^tJ_{2^k}^t. \end{equation}
{\sf Proof}. From (\ref{fa2k}) we have the following relation:
$$
\begin{equation}gin{array}{l}
F(A_{2^{k+1}})J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t\\[4mm]
=((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )(-1)^{k(k+1)\over 2} b_k
(A_{2^k}^\ast J_{2^k})^tJ_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t\\[4mm]
\quad+ ((-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )c_k^\ast
(J_{2^k}A_{2^k})^tJ_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t\\[4mm]
=(-1)^{k(k+1)\over 2} b_k((-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )
[(A_{2^k}^\ast J_{2^k})^t(J_{2^k}A_{2^k}^\ast) ] A_{2^k}^tJ_{2^k}^t\\[4mm]
\quad+ c_k^\ast [(-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast
(J_{2^k}A_{2^k})^tJ_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
+A_{2^k}C_{2^k}^\ast (J_{2^k}A_{2^k})^tJ_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t ]\\[4mm]
=(-1)^{k(k+1)\over 2} b_k((-1)^{k(k+1)\over 2} b_kJ_{2^k}A_{2^k}^\ast
[ A_{2^k} ] A_{2^k}^tJ_{2^k}^t + c_k^\ast A_{2^k}J_{2^k} [A_{2^k}]^\ast A_{2^k}^tJ_{2^k}^t )\\[4mm]
\quad + c_k^\ast [ (-1)^{k(k+1)\over 2} b_kJ_{2^k}A_{2^k}^\ast
A_{2^k}^tA_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
+c_k^\ast [A_{2^k}] J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t ]\\[4mm]
=b_k^2[ A_{2^k} ]^\ast J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t + (-1)^{k(k+1)\over 2}
b_kc_k^\ast [ A_{2^k} ][ A_{2^k} ]^\ast Id_{2^k})\\[4mm]
\quad +(-1)^{k(k+1)\over 2} c_k^\ast
b_kJ_{2^k}A_{2^k}^\ast A_{2^k}^tA_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
+c_2^2 [A_{2^k}] J_{2^k}A_{2^k}A_{2^k}^tJ_{2^k}^t ]\\[4mm]
=(b_k^2 + c_k^2)[ A_{2^k} ]J_{2^k}A_{2^k}
A_{2^k}^tJ_{2^k}^t + (-1)^{k(k+1)\over 2}
b_kc_k ([ A_{2^k} ] ^2 Id_{2^k}\\[4mm]
\quad +(-1)^{k(k+1)\over 2} c_k b_kJ_{2^k}A_{2^k}A_{2^k}^tA_{2^k}A_{2^k}^tJ_{2^k}^t).
\end{array}
$$
Using (\ref{F}) we have
$$
\| A_{2^k} \| J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
= [ A_{2^k} ][ A_{2^k} ]^\ast Id_{2^k}
+ J_{2^k}A_{2^k}^\ast A_{2^k}^tA_{2^k}^\ast A_{2^k}^tJ_{2^k}^t.
$$
$\rule{2mm}{2mm}$
{\sf Theorem 3}. The eigenvalue polynom of
$A_{2^{k+1}}A_{2^{k+1}}^\dag$ satisfies the following relations:
\begin{equation}
\lambdaabel{thm4}
\begin{array}{l} |A_{2^{k+1}}A_{2^{k+1}}^\dag-\lambdaambda
Id_{2^{k+1}} |
=(\lambdaambda^2-||A_{2^{k+1}}||\lambdaambda+[A_{2^{k+1}}][A_{2^{k+1}}]^*)^{2^k},\\[3mm]
|A_{2^{k+1}}^\dag A_{2^{k+1}}-\lambdaambda Id_{2^{k+1}} |
=(\lambdaambda^2-||A_{2^{k+1}}||\lambdaambda+[A_{2^{k+1}}][A_{2^{k+1}}]^*)^{2^k}.
\end{array}
\end{equation}
{\sf Proof}.
Let
$$
\Lambdaambda_k=-[(c_kc_k^\ast-\lambdaambda) Id_{2^k} + A_{2^k}^tA_{2^k}^\ast] [(-1)^{k(k+1)\over 2}
B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast]^{-1}.
$$
$$
\begin{equation}gin{array}{l}
\lambdaeft|A_{2^{k+1}}A_{2^{k+1}}^\dag - \lambdaambda Id_{2^{k+1}} \right|
= \lambdaeft|
\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast&(b_kb_k^\ast-\lambdaambda)Id_{2^k}
+ A_{2^k}A_{2^k}^\dag \\[3mm]
(c_kc_k^\ast-\lambdaambda) Id_{2^k} + A_{2^k}^tA_{2^k}^\ast&(-1)^{k(k+1)\over 2} A_{2^k}^tB_{2^k}^\dag
+C_{2^k}^tA_{2^k}^\dag \\[3mm]
\end{array}
\right)
\right|\\[9mm]
=\lambdaeft|
\lambdaeft(
\begin{equation}gin{array}{cc}
Id_{2^k}&0\\[3mm]
\Lambdaambda_k&Id_{2^k}\\[3mm]
\end{array}\right)
\lambdaeft(
\begin{equation}gin{array}{cc}
(-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast&(b_kb_k^\ast-\lambdaambda)Id_{2^k}
+ A_{2^k}A_{2^k}^\dag \\[3mm]
(c_kc_k^\ast-\lambdaambda) Id_{2^k} + A_{2^k}^tA_{2^k}^\ast&(-1)^{k(k+1)\over 2} A_{2^k}^tB_{2^k}^\dag
+C_{2^k}^tA_{2^k}^\dag \\[3mm]
\end{array}
\right)\right|\\[9mm]
=\lambdaeft|
\lambdaeft(\begin{equation}gin{array}{cc}
(-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast&(b_kb_k^\ast-\lambdaambda)Id_{2^k}
+ A_{2^k}A_{2^k}^\dag \\[3mm]
0&-\Lambdaambda_k[(b_kb_k^\ast-\lambdaambda)Id_{2^k} + A_{2^k}A_{2^k}^\dag]
+(-1)^{k(k+1)\over 2} A_{2^k}^tB_{2^k}^\dag+C_{2^k}^tA^\dag_{2^k}
\end{array}
\right)
\right|\\[9mm]
=\lambdaeft|I+II \right|,
\end{array}
$$
where
$$
\begin{array}{rcl}
I&=&((-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast+A_{2^k}C_{2^k}^\ast)
((-1)^{k(k+1)\over 2} B_{2^k}^\ast A_{2^k}+A_{2^k}^\ast C_{2^k})^t\\[3mm]
&=&(-1)^{k(k+1)\over 2}b_kc_k[A_{2^k}]^\ast Id_{2^k}
+(-1)^{k(k+1)\over 2}b_k^\ast c_k^\ast [A_{2^k}]Id_{2^k}
+b_kb_k^\ast J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
+c_kc_k^\ast A_{2^k}A_{2^k}^\dag
\end{array}
$$
and, by using Lemma 5,
$$
\begin{array}{rcl}
II&=&-((-1)^{k(k+1)\over 2} B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )
\Lambdaambda_k[(b_kb_k^\ast -\lambdaambda)Id_{2^k} + A_{2^k}A_{2^k}^\dag]\\[3mm]
&=&[(c_kc_k^\ast -\lambdaambda)((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast)
+((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )A_{2^k}^tA_{2^k}^\ast]\\[3mm]
&&[(b_kb_k^\ast -\lambdaambda)((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast)^{-1}
+((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast)^{-1}A_{2^k}A_{2^k}^\dag]\\[3mm]
&=&(b_kb_k^\ast -\lambdaambda)(c_kc_k^\ast -\lambdaambda)Id_{2^k}+(b_kb_k^\ast -\lambdaambda)
((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast \\[3mm]
&&+A_{2^k}C_{2^k}^\ast )A_{2^k}^tA_{2^k}^\ast
((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )^{-1}
+(c_kc_k^\ast -\lambdaambda)A_{2^k}A_{2^k}^\dag\\[3mm]
&&+((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast)A_{2^k}^tA_{2^k}^\ast
((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast)^{-1}A_{2^k}A_{2^k}^\dag\\[3mm]
&=&(b_kb_k^\ast -\lambdaambda)(c_kc_k^\ast -\lambdaambda)Id_{2^k}+
(c_kc_k^\ast -\lambdaambda)A_{2^k}A_{2^k}^\dag
+\frac{b_kb_k^\ast -\lambdaambda}{F(A_{2^{k+1}})}III
+\frac{1}{F(A_{2^{k+1}})}III A_{2^k}A_{2^k}^\dag,
\end{array}
$$
where
$$
\begin{array}{rcl}
III&=&((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast
+A_{2^k}C_{2^k}^\ast )A_{2^k}^tA_{2^k}^\ast
((-1)^{k(k+1)\over 2}A_{2^k}^\ast B_{2^k}+C_{2^k}^\ast A_{2^k})^t\\[3mm]
&=&((-1)^{k(k+1)\over 2}B_{2^k}A_{2^k}^\ast +A_{2^k}C_{2^k}^\ast )
A_{2^k}^tJ_{2^k}^tJ_{2^k}A_{2^k}^\ast
((-1)^{k(k+1)\over 2}A_{2^k}^\ast B_{2^k}+C_{2^k}^\ast A_{2^k})^t\\[3mm]
&=&[(-1)^{k(k+1)\over 2}b_k (J_{2^k}A_{2^k}^\ast )(J_{2^k}A_{2^k})^t
+c_k^\ast (A_{2^k}J_{2^k})(J_{2^k}A_{2^k})^t]\\[3mm]
&&[(-1)^{k(k+1)\over 2}b_k (J_{2^k}A_{2^k}^\ast )(A_{2^k}^\ast J_{2^k})^t
+c_k^\ast (J_{2^k}A_{2^k}^\ast )(J_{2^k}A_{2^k})^t]\\[3mm]
&=&[(-1)^{k(k+1)\over 2}b_k (J_{2^k}A_{2^k}^\ast )(J_{2^k}A_{2^k})^t
+c_k^\ast [A_{2^k}]Id_{2^k}]\cdot\\[3mm]
&&~~[(-1)^{k(k+1)\over 2}b_k [A_{2^k}]^\ast Id_{2^k}
+c_k^\ast (J_{2^k}A_{2^k}^\ast )(J_{2^k}A_{2^k})^t]\\[3mm]
&=&(b_k^2[A_{2^k}]^\ast+c_k^{\ast 2}[A_{2^k}])J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
+(-1)^{k(k+1)\over 2}b_kc_k^\ast J_{2^k}A_{2^k}^\ast
A_{2^k}^tA_{2^k}^\ast A_{2^k}^tJ_{2^k}^t\\[3mm]
&&+(-1)^{k(k+1)\over 2}b_kc_k^\ast [A_{2^k}][A_{2^k}]^\ast Id_{2^k}.\\[3mm]
\end{array}
$$
From Lemma 6, we get
$$
\begin{array}{rcl}
III&=&(b_k^2[A_{2^k}]^\ast+c_k^{\ast 2}[A_{2^k}])J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t
+(-1)^{k(k+1)\over 2}b_kc_k^\ast ||A_{2^k}||J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t\\[3mm]
&=&F(A_{2^{k+1}})J_{2^k}A_{2^k}^\ast A_{2^k}^tJ_{2^k}^t.
\end{array}
$$
From Lemma 3 we also have
$$
\begin{array}{rcl}
III A_{2^k}A_{2^k}^\dag&=&III A_{2^k}J_{2^k} J_{2^k}^tA_{2^k}^\dag\\[3mm]
&=&F(A_{2^{k+1}})J_{2^k}A_{2^k}^\ast(J_{2^k}A_{2^k})^t(A_{2^k}J_{2^k})
(A_{2^k}^\ast J_{2^k})^t
=F(A_{2^{k+1}})[A_{2^k}][A_{2^k}]^\ast Id_{2^k}.
\end{array}
$$
Therefore,
$$
\begin{array}{l}
\lambdaeft|A_{2^{k+1}}A_{2^{k+1}}^\dag - \lambdaambda Id_{2^{k+1}} \right|
=|I+II|\\[3mm]
=|-\lambdaambda^2 Id_{2^k}+\lambdaambda(b_kb_k^\ast+c_kc_k^\ast+||A_{2^k}||)Id_{2^k}
-(b_kb_k^\ast c_kc_k^\ast-(-1)^{k(k+1)\over 2}b_k^\ast c_k^\ast[A_{2^k}]\\[3mm]
~~~-(-1)^{k(k+1)\over 2}b_kc_k[A_{2^k}]^\ast+[A_{2^k}][A_{2^k}]^\ast)Id_{2^k}|\\[3mm]
=(\lambdaambda^2-||A_{2^{k+1}}||\lambdaambda+[A_{2^{k+1}}][A_{2^{k+1}}]^\ast)^{2^k},
\end{array}
$$
where the first formula in Lemma 4 is used. The second formula in Theorem 3
is obtained from the fact that $A_{2^{k+1}}A_{2^{k+1}}^\dag$ and
$A_{2^{k+1}}^\dag A_{2^{k+1}}$ have the same eigenvalue set.
$\rule{2mm}{2mm}$
From Theorem 2 and 3 the states given by (\ref{a2k}) are $d$-computable.
In terms of (\ref{GC}) the generalized concurrence for these states is given by
$$
d_{2^{k+1}}=2^{k+1}\vert[A_{2^{k+1}}]\vert=2^{k+1}\vert b_kc_k+
b_{k-1}c_{k-1}+...+b_1c_1+ad+c^2\vert.
$$
Let $p_{2^{k+1}}$ be a symmetric anti-diagonal $2^{2k+2}\tildeimes 2^{2k+2}$ matrix with
all the anti-diagonal elements $1$ except for those
at rows $2^{k+1}-1 + s(2^{k+2}-2)$, $2^{k+1} + s(2^{k+2}-2)$,
$2^{k+2}-1 + s(2^{k+2}-2)$, $2^{k+2} + s(2^{k+2}-2)$,
$s=0,...,2^{k+1}-1$, which are $-1$. $d_{2^{k+1}}$ can then be written as
\begin{equation}\lambdaabel{dkp}
d_{2^{k+1}}=|\lambdaangle \primesi_{2^{k+1}} |p_{2^{k+1}}\primesi_{2^{k+1}}^{*} \rangle |,
\end{equation}
where
\begin{equation}\lambdaabel{psi2k1}
\vert\primesi_{2^{k+1}}\rangle=\sum_{i,j=1}^{2^{k+1}} (A_{2^{k+1}})_{ij}\,e_i\otimes e_j.
\end{equation}
Let $\Phi$ denote the set of pure states with the form (\ref{psi2k1}).
For mixed states with
density matrices such that their decompositions are of the form
\begin{equation}\lambdaabel{rho12k1}
\rho_{2^{2k+2}} = \sum_{i=1}^M p_i |\primesi_i \rangle \lambdaangle \primesi_i|,~~~~\sum_{i=1}^M
p_i =1,~~~~|\primesi_i\rangle\in\Phi,
\end{equation}
their entanglement of formations,
by using a similar calculation in obtaining formula (\ref{drho}) \cite{fjlw},
are then given by $E(d_{2^{k+1}}(\rho_{2^{2k+2}}))$, where
\begin{equation}\lambdaabel{d2k1}
d_{2^{k+1}}(\rho_{2^{2k+2}})=\Omega_1 - \sum_{i=2}^{2^{2k+2}}\Omega_i,
\end{equation}
and $\Omega_i$, in decreasing order, are the
the square roots of the eigenvalues of the
matrix $\rho_{2^{2k+2}} p_{2^{k+1}}{\rho_{2^{2k+2}}^\ast}p_{2^{k+1}}$.
Here again due to the form of the so constructed matrix $A_{2^{k+1}}$ in (\ref{a2k}),
once $\rho$ has a decomposition with all $|\primesi_i\rangle\in\Phi$, all
other decompositions of $|\primesi_i^\primerime\rangle$
also satisfy $|\primesi_i^\primerime\rangle\in\Phi$.
Therefore from high dimensional $d$-computable states $A_{2^{k+1}}$,
$2\lambdaeq k\lambdaeq N$, the entanglement of formation for a class of density matrices
whose decompositions lie in these $d$-computable quantum states can be obtained
analytically.
\section{Remarks and conclusions}
Besides the $d$-computable states constructed above,
from (\ref{ha4}) we can also construct another class of high dimensional
$d$-computable states given by $2^{k+1}\tildeimes 2^{k+1}$ matrices $A_{2^{k+1}}$,
$2\lambdaeq k\in{I\!\! N}$,
\begin{equation}\lambdaabel{newa}
A_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cc}
B_k&A_k\\[3mm]
-A_k^t&C_k\\[3mm]
\end{array}
\right)
\equiv\lambdaeft(
\begin{equation}gin{array}{cc}
b_{k}I_{2^{k}}&A_{2^{k}}\\[3mm]
-A_{2^{k}}^t&c_{k}I_{2^{k}}
\end{array}
\right),
\end{equation}
where $b_k,~c_k\in\Cb$, $I_4=J_4$,
\begin{equation}
I_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cc}
0&I_{2^{k}}\\[3mm]
-I_{2^{k}}&0\\[3mm]
\end{array}
\right)
\end{equation}
for $k+2~ mode~4 =0$,
\begin{equation}
I_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cc}
0&I_{2^{k}}\\[3mm]
I_{2^{k}}&0
\end{array}
\right)
\end{equation}
for $k+2~ mode~4 =1$,
\begin{equation}
I_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cccc}
0&0&0&I_{2^{k-1}}\\[3mm]
0&0&-I_{2^{k-1}}&0\\[3mm]
0&I_{2^{k-1}}&0&0\\[3mm]
-I_{2^{k-1}}&0&0&0
\end{array}
\right)
\end{equation}
for $k+2~ mode~4 =2$, and
\begin{equation}
I_{2^{k+1}}= \lambdaeft(
\begin{equation}gin{array}{cccccccc}
0&0&0&0&0&0&0&I_{2^{k-2}}\\[3mm]
0&0&0&0&0&0&-I_{2^{k-2}}&0\\[3mm]
0&0&0&0&0&-I_{2^{k-2}}&0&0\\[3mm]
0&0&0&0&I_{2^{k-2}}&0&0&0\\[3mm]
0&0&0&-I_{2^{k-2}}&0&0&0&0\\[3mm]
0&0&I_{2^{k-2}}&0&0&0&0&0\\[3mm]
0&I_{2^{k-2}}&0&0&0&0&0&0\\[3mm]
-I_{2^{k-2}}&0&0&0&0&0&0&0
\end{array}
\right)
\end{equation}
for $k+2~ mode~4 =3$.
One can prove that the matrices in (\ref{newa}) also give rise to
$d$-computable states:
$$
|A_{2^{k+1}}A_{2^{k+1}}^\dag|
=[(c^2+ad-\sum_{i=1}^{k}b_ic_i)(c^2+ad-\sum_{i=1}^{k}b_ic_i)^*]^{2^k},
$$
$$
\begin{array}{rcl}
|A_{2^{k+1}}A_{2^{k+1}}^\dag-\lambdaambda Id_{2^{k+1}}|
&=&\displaystyle[\lambdaambda^2-(aa^\ast+2cc^\ast+dd^\ast
+\sum_{i=1}^{k}b_ib_i^\ast+\sum_{i=1}^{k}c_ic_i^\ast)\lambdaambda\\[4mm]
&&\displaystyle+(c^2+ad
-\sum_{i=1}^{k}b_ic_i)(c^2+ad-\sum_{i=1}^{k}b_ic_i)^*]^{2^k}.
\end{array}
$$
The entanglement of formation for a density
matrix with decompositions in these states is also given by a formula
of the form (\ref{d2k1}).
In addition, the results obtained above may be used to solve
linear equation systems, e.g., in the analysis of data bank,
described by $A{\bf x}={\bf y}$, where $A$
is a $2^{k}\tildeimes 2^{k}$ matrix, $k\in{I\!\! N}$, ${\bf x}$ and ${\bf
y}$ are $2^{k}$-dimensional column vectors.
When the dimension $2^{k}$ is large, the standard
methods such as Gauss elimination to solve $A{\bf x}={\bf y}$
could be not efficient. From our Lemma 3, if the
matrix $A$ is of one of the following forms: $A_{2^k}$,
$B_{2^k}A_{2^k}$, $A_{2^k}^t$ or $A_{2^k}^tB_{2^k}^t$, the
solution ${\bf x}$ can be obtained easily by applying the matrix
multiplicators. For example, $A_{2^k}{\bf x}={\bf y}$ is solved by
$$
{\bf x}=\frac{1}{[A_{2^k}]}(A_{2^k}J_{2^k})^t J_{2^k}{\bf y}.
$$
The solution to $B_{2^k}A_{2^k}{\bf x}={\bf y}$ is given by
$$
{\bf x}=\frac{1}{b_k[A_{2^k}]}(A_{2^k}J_{2^k})^t J_{2^k}{\bf y}.
$$
We have presented a kind of construction for a class of special matrices
with at most two different eigenvalues. This class of matrices
defines a special kind of $d$-computable states.
The entanglement of formation for these $d$-computable states
is a monotonically increasing function
of a the generalized concurrence. From this generalized concurrence
the entanglement of formation for a large class of density matrices
whose decompositions lie in these $d$-computable quantum states is obtained
analytically. Besides the relations to the quantum entanglement,
the construction of $d$-computable states has its own mathematical interests.
\begin{equation}gin{thebibliography}{99}
\bibitem{DiVincenzo} See, for example, D.P. DiVincenzo,
{\em Science \/} {\bf 270}, 255 (1995).
\bibitem{teleport} C.H. Bennett, G. Brassard, C. Cr\'epeau,
R. Jozsa, A. Peres
and W.K. Wootters, {\em Phys. Rev. Lett.} {\bf 70}, 1895 (1993).
\bibitem{teleport1} S. Albeverio and S.M. Fei,
{\em Phys. Lett.} {\bf A 276}, 8 (2000).
\bibitem{teleport2}
S. Albeverio and S.M. Fei and W.L. Yang,
{\em Commun. Theor. Phys.} {\bf 38}, 301 (2002);
{\em Phys. Rev.} {\bf A 66}, 012301 (2002).
\bibitem{dense} C.H. Bennett and S.J. Wiesner,
{\em Phys. Rev. Lett.} {\bf 69}, 2881 (1992).
\bibitem{crypto} See, for example, C.A. Fuchs, N. Gisin,
R.B. Griffiths, C-S. Niu, and
A. Peres, {\em Phys. Rev.}, {\bf A 56}, 1163 (1997) and
references therein.
\bibitem{Bennett96a}
C.H. Bennett, D.P. DiVincenzo, J.A. Smolin and W.K. Wootters,
{\em Phys. Rev.} {\bf A 54}, 3824 (1996).
\bibitem{BBPS} C.H. Bennett, H.J. Bernstein, S. Popescu,
and B. Schumacher, {\em Phys.~Rev.~A} {\bf 53}, 2046 (1996).
\bibitem{Vedral} V.~Vedral, M.B.~Plenio, M.A.~Rippin and
P.L.~Knight, {\em Phys. Rev. Lett.} {\bf 78}, 2275 (1997);\\
V.~Vedral, M.B.~Plenio, K.~Jacobs and P.L.~Knight, {\em Phys. Rev. A}
{\bf 56}, 4452 (1997);\\
V.~Vedral and M.B.~Plenio, {\em Phys. Rev. A} {\bf 57}, 1619 (1998).
\bibitem{Peres96a}
A. Peres, Phys. Rev. Lett. {\bf 77}, 1413 (1996).
\bibitem{Zyczkowski98a}
K. \.Zyczkowski and P. Horodecki, Phys. Rev. A {\bf 58}, 883 (1998).
\bibitem{sw}
B. Schumacher and M.D. Westmoreland, {\it Relative entropy in
quantum information theory}, quant-ph/0004045.
\bibitem{Horodecki} M.~Horodecki, P.~Horodecki and R.~Horodecki,
{\em Phys. Rev. Lett.} {\bf 80}, 5239 (1998).
\bibitem{HillWootters} S.~Hill and W.K.~Wootters, {\em Phys. Rev. Lett.}
{\bf 78}, 5022 (1997).\\
W.K.~Wootters, {\em Phys. Rev. Lett.} {\bf 80}, 2245 (1998).
\bibitem{th}
B.M. Terhal, K. Gerd and K.G.H. Vollbrecht, {Phys. Rev. Lett.} {\bf 85},
2625 (2000).
\bibitem{note}
A.Uhlmann, {\em Phys. Rev.} {\bf A 62}, 032307 (2000).\\
S. Albererio and S.M. Fei, {\em J. Opt. B: Quantum Semiclass. Opt.}
{\bf 3}, 1 (2001).\\
P. Rungta, V. Bu$\check{\rm z}$ek, C.M. Caves, M. Hillery and G.J.
Milburn, {\em Phys. Rev.} {\bf A 64}, (042315)(2001).
\bibitem{qsep}
S. Albeverio, S.M. Fei and D. Goswami, {\em Phys. Lett.} {\bf A},
91 (2001).\\
S.M. Fei, X.H. Gao, X.H. Wang, Z.X. Wang and K. Wu,
{\em Phys. Lett. A} {\bf 300}, 559 (2002);
{\em Int. J. Quant. Inform.} {\bf 1}, 37 (2003).
\bibitem{fjlw}
S.M. Fei, and J. Jost, X.Q. Li-Jost and G.F. Wang,
{\em Phys. Lett.} {\bf A 310}, 333 (2003).
\end{thebibliography}
\end{document} |
\begin{document}
\title{Pure-state quantum trajectories for general non-Markovian systems do not exist }
\author{Howard M. Wiseman}
\affiliation{Centre for Quantum Dynamics, School of Science, Griffith University, Nathan 4111, Australia}
\author{J. M. Gambetta}
\affiliation{Institute for Quantum Computing and Department of Physics and Astronomy, University of Waterloo, Waterloo, Ontario, Canada N2L 3G1}
\begin{abstract}
Since the first derivation of non-Markovian stochastic Schr\"odinger \ equations, their interpretation has been contentious. In a recent Letter [Phys. Rev. Lett. {\bf 100}, 080401 (2008)], Di\'osi claimed to prove that they generate ``true single system trajectories [conditioned on] continuous measurement''. In this Letter we show that his proof is fundamentally flawed: the solution to his non-Markovian stochastic Schr\"odinger \ equation at any particular time can be interpreted as a conditioned state, but joining up these solutions as a trajectory creates a fiction.
\end{abstract}
\pacs{03.65.Yz, 42.50.Lc, 03.65.Ta}
\maketitle
It is well recognized that the continuous measurement of an open quantum system $S$ with Markovian dynamics can be described by a stochastic Schr\"odinger \ equation (SSE). The pure-state solution to such an equation over some time interval, a ``quantum trajectory'' \cite{Car93b}, can be interpreted as the state of $S$ evolving while its environment is under continuous observation (monitoring). This fact is of great importance for designing and experimentally implementing feedback control on open quantum systems \cite{adaptph,QED,spin-sqz}. If this interpretation could also be applied to {\em non-Markovian} SSEs \cite{StrDioGis,GamWis02}, then this would be very significant for quantum technologies, especially in condensed matter environments, which are typically non-Markovian \cite{BrePet02}.
Previously we have argued that non-Markovian SSEs (NMSSEs) {\em cannot} be interpreted in this way \cite{GamWis02,GamWis03}. The solution at any particular time can be interpreted as the system state conditioned upon some measurement of the environment \cite{GamWis02}. But connecting up those solutions to make a trajectory is a fiction akin to trajectories in Bohmian mechanics \cite{GamWis03}.
Restricting to standard quantum mechanics,
the basic problem is that for the state of $S$ to remain pure, the bath field must be continuously observed to disentangle it from the system. For Markovian dynamics, this is not a problem, because the moving field interacts with $S$ and, having interacted, moves on. But for non-Markovian dynamics, the field comes back and interacts again with $S$. Thus monitoring the field will feed disturbance back into the system, changing the {\em average} evolution of the state of $S$. That is contrary to the derivation of the NMSSE, which is constructed so as to reproduce on average the no-measurement evolution of $S$.
Recently, Di\'osi\ rederived one form of NMSSE from a different starting point, and claimed that, contrary to the conclusions of Ref.~\cite{GamWis03}, this allows an interpretation of the solutions as ``true single system trajectories [conditioned on] continuous measurement'' \cite{Dio08}. Here we show by general argument, and an explicit calculation, that this claim is incorrect, and that the reformulation does not alter our earlier conclusion.
\section{The non-Markovian system} Di\'osi\ considers a bath comprising an infinite sequence of von Neumann apparatuses $A_n$, each described by position and momentum operators $\hat x_n$, $\hat p_n$, $n\in\cu{1,2,\ldots \infty}$. (For clarity, we are using slightly different notation from Ref.~\cite{Dio08}.) The system interacts with the bath via the coupling Hamiltonian
\begin{equation} \label{defV}
\hat V = \sum_n \delta(t-\tau_n) \hat X \hat p_n, \;\; \tau_n=\epsilon n,
\end{equation}
where $\hat X$ is an Hermitian system operator. Here the explicit time-dependence plays the role of the free propagation of a bath field. This would seem to be a recipe for generating Markovian evolution, since $S$ interacts only once with each $A_n$, which thus plays a role analogous to a small segment of a Markovian bath field. The novelty of Di\'osi's approach is to generate non-Markovian evolution by having the $\cu{A_k}_{k=1}^\infty$ prepared in an entangled state $\ket{\phi_0}$. In the position representation it is given by
\begin{equation} \label{phi0}
\ip{ \cu{x_k}_{k=1}^\infty}{\phi_0} \propto \exp\left[{ - \epsilon^2\sum_{l,m} x_l x_m \alpha(\tau_l-\tau_m) }\right].
\end{equation}
The continuum-time limit is $\epsilon\to0$, where the system is subjected to infinitely frequent, but infinitesimally strong, interactions with the apparatuses. In this limit, $\alpha(t)$ plays the role of the correlation function for the bath.
It is a real and symmetric function \cite{GamWis02,Bassi:2002a}, and equals $g^2\delta(t)$ in the Markovian case. Assuming the system is initially in a pure state also, the Hamiltonian (\ref{defV}) produces an entangled system--bath state $\ket{\Psi(\tau_n^+)}$ immediately after the $n{\mathrm{th}}$ interaction.
Di\'osi\ first considers the case where, immediately after each time $\tau_n$, the observable $\hat x_n$ is measured, yielding result $x_n$. This gives an unnormalized state for the conditioned quantum system,
$\tilde \rho(\tau_n^+;\cu{x_l}_{l=1}^{n})$, given by
\begin{equation}
{\rm Tr}_{\cu {A_m}_{m=n+1}^{\infty}}\left[\ip{\cu{x_l}_{l=1}^{n}}{\Psi(\tau_n^+)}\ip{\Psi(\tau_n^+)}{\cu{x_l}_{l=1}^{n}}\right],
\end{equation}
with $\tr{\tilde \rho(\tau_n^+;\cu{x_l}_{l=1}^{n})}$ being the probability for the record $\cu{x_l}_{l=1}^{n}$. In the limit $\epsilon \to 0$, this state (if appropriately scaled) will have a continuous but stochastic evolution through time. The measurement of observable $\hat x_n$ does not disturb the future evolution of $S$ because $A_n$ never interacts with $S$ again. Thus, there is no difficulty with interpreting this stochastic evolution as the trajectory of an individual system, with the average state at time $t$
\begin{equation}
\rho(t) = \int_{-\infty}^\infty \! dx_0 \cdots \int_{-\infty}^\infty \! dx_n\ \tilde \rho(t;\cu{x_l}_{l=1}^{\lfloor t/\epsilon \rfloor})
\end{equation}
being identical with that obtained simply by tracing over the bath (the apparatuses),
\begin{equation}
\rho(t) = {\rm Tr}_{\cu {A_k}_{k=1}^{\infty}}\left[\ket{\Psi(t)}{\bigr)}a{\Psi(t)}\right].
\end{equation}
It is obvious, however, that $\tilde \rho(t;\cu{x_l}_{l=1}^{\lfloor t/\epsilon \rfloor})$ is {\em not} the solution of a SSE, for the simple reason that the state is mixed, not pure, even if it begins pure \footnote{It is not clear whether this mixed state trajectory is the solution of a well-defined non-Markovian stochastic master equation.}. The mixedness arises because the interaction of $S$ with $A_n$ entangles $S$ with $A_m$ for $m > n$, because initially $A_n$ and $A_m$ are entangled. That is, the system becomes entangled with apparatuses that are not yet measured. A mixed conditional equation state is not unexpected for non-Markovian systems. It has previously been shown in Refs. \cite{Imamoglu:1994a} and \cite{Breuer:2004b} that it is possible to derive a mixed state quantum trajectory equation that reproduces the non-Markovian evolution on average by adding to $S$ a fictitious system $F$, with the latter coupled to a monitored (Markovian) bath. A mixed state for $S$ arises when the partial trace over $F$ is performed. See Ref.~\cite{GamWis02b} for a comparison of this method with that of the NMSSE.
\section{The non-Markovian SSE and its interpretation}
The only way to obtain a pure state for $S$ at time $t$ is by measuring all the apparatuses with which the system is entangled. Specifically, Di\'osi\ shows that it is necessary to measure the set of bath observables $\cu{\hat z(s):s\in[0,t]}$, where
$\hat z(s)$ is the ``retarded observable'' \cite{Dio08}
\begin{equation} \label{defz}
\hat z(s) = 2\epsilon \sum_{k=1}^{\infty } \hat x_k \alpha(s-\tau_k) .
\end{equation}
This is of course a different observable at different times $s$. The state conditioned on the result $Z_t \equiv \cu{z(s):s\in[0,t]}$ of this measurement at time $t$ is a {\em functional}
of $z(s)$ for $0\leq s \leq t$, which we will write as $\ket{\bar \psi_t[Z_t]}$. Di\'osi\ shows that this state is pure, and that it is the solution of the NMSSE
\begin{equation}
\frac{d\ket{\bar \psi_t[Z_t]}}{dt} = \hat X_t\left(z(t) {-} 2 \int_0^t \alpha(t-s)
\frac{\delta}{\delta z(s)} ds\right)\ket{\bar \psi_t[Z_t]}. \label{NMSSE}
\end{equation}
Here, Di\'osi\ is working in the interaction picture with respect to the system Hamiltonian $\hat H$; hence, the time dependence of $\hat X_t \equiv e^{i\hat H t}\hat X e^{-i\hat H t}$.
Equation (\ref{NMSSE}) was first derived in Refs.~\cite{GamWis02,Bassi:2002a}, but is very similar to that derived earlier in Refs.~\cite {StrDioGis}. The ensemble average of solutions of this NMSSE reproduces the reduced state of the system:
\begin{equation}
\rho(t) = {\rm E} \sq{\op{\bar \psi_t[Z_t]}{\bar \psi_t[Z_t]}} .
\end{equation}
Here in taking the expectation value, $z(t)$ must be treated as a Gaussian noise process with correlation function ${\rm E}[z(t)z(s)] = \alpha(t-s)$, as appropriate for $\ket{\phi_0}$. This convention is indicated by the notation $\bar\psi$ (as oppiosed to $\tilde\psi$) for the state.
The contentious issue is not whether the solution $\ket{\bar \psi_t[Z_t]}$ has an interpretation in standard quantum mechanics. As just explained, this state is the conditioned state of $S$ at time $t$ if an {\em all-at-one measurement} of the set of bath observables $\cu{\hat z(s):s\in[0,t]}$ were made at that time, yielding the result $Z_t$. The contentious issue is: can the {\em family} of states $\ket{\bar \psi_t[Z_t]}$ for $0\leq t \leq \infty$ be interpreted as a trajectory for the state of a single system, conditioned on monitoring of its bath. Di\'osi\ claims that it can be so interpreted, and that the required monitoring is simply to measure $\hat z(\tau_0)$ at time $\tau_0^+$, $\hat z(\tau_1)$ at time $\tau_1^+$ and so on. At first sight this {\em monitoring} may seem equivalent to the all-at-once measurement described above. But in fact it is not, as we will now explain.
A measurement of $\hat z(t)$ at time $t^+$ involves measuring apparatuses that have not yet interacted with $S$. This is necessarily so because the symmetry of $\alpha(\tau)$ means that $\hat z(t)$ contains contributions from $\hat x_m$ for some $\tau_m>t$ (except for the Markovian case of course). Consequently, $\hat z(t)$ does not commute with $\hat p_m$ for some $\tau_m>t$, and the measurement will therefore disturb these momentum observables. But these are precisely the observables that will couple to the system via (\ref{defV}), and thereby disturb it. Thus, as soon as the first measurement is performed, of $\hat z(\tau_0)$ at time $\tau_0$, $S$ ceases to obey the NMSSE. Whatever stochastic evolution it does undergo, it will not reproduce the reduced state of the unmeasured system $\rho(t)$.
It might be thought that it would be possible to avoid this alteration of the future evolution of the system by repreparing the apparatuses $A_m$ for $\tau_m>t$ in their pre-measurement states. However, this is not possible; before the measurement, these $A_m$ were {\em entangled} with the system $S$ and the other apparatuses. The correlation of these $A_m$ with $S$ is why the system state $\tilde \rho(\tau_n;\cu{x_l}_{l=1}^{n})$, conditioned on measuring the apparatuses after they have interacted with the system, is {\em mixed}. The evolution of this state over time is the only true quantum trajectory for a single system, and its mixedness is an inevitable consequence of the non-Markovian dynamics. In fact, we now show by explicit calculation that the monitoring Di\'osi\ suggests does not even produce pure conditioned states of $S$ --- it also leads to mixed states.
\section{A simple example}
We consider the case where the bath consists of two apparatuses and $\epsilon=1$.
Thus there are just three relevant times, $\tau_0=0$ (the initial time), $\tau_1^+=1$ (just after the interaction with $A_1$) and $\tau_2^+=2$ (just after the interaction with $A_2$). Without loss of generality, we can write the initial Gaussian entangled state of the bath, analogous to \erf{phi0}, as
\begin{equation}
\phi_0(x_1,x_2)=c\exp[-(x_1^2+x_2^2 + 2 ax_1x_2)],
\end{equation}
where $c^2 ={2 \sqrt{1-a^2}}/\pi$. Here $0\leq a<1$ parametrizes the initial entanglent between the apparatuses. The analogue of \erf{defz} defines two operators,
\begin{equation}
\begin{split}
\hat z_1 = 2(\hat x_1+ a\hat x_2) , ~\hat z_2 = 2(\hat x_2+ a\hat x_1) .
\end{split} \label{defdisz}
\end{equation}
Let us consider the unconditioned evolution of the system. At the initial time $\tau_0$ the total state is
\begin{equation}
\begin{split}
\ket{\Psi_0}=& \int_2 \phi_0(x_1,x_2) \ket{x_1}_1\ket{x_2}_2\ket{\psi_0} dx_1 dx_2,
\end{split}
\end{equation} where the final ket (with no subscript) denotes a state of $S$,
and the subscript on the integral sign indicates it is a double integral.
This evolves to
the following state immediately after the interaction with the first apparatus:
\begin{eqnarray}
\ket{\Psi_1}&=& \int_3 \phi_0(x_1,x_2)\ket{x_1+ X_1}_1\ket{x_2}_2 \nl{\times}\ket{X_1} \langle X_1\ket{\psi_0} dx_1dx_2dX_1.
\label{Psi1}
\end{eqnarray}
Here $\ket{X_1}$ denote eigenstates of $\hat X_1\equiv\hat X(\tau_1)$, which we have taken to
have a continuous spectrum for simplicity.
Finally, after the second interaction, the total state is
\begin{eqnarray}
\ket{\Psi_2}&=& \int_4 \phi_0(x_1,x_2) \ket{x_1+ X_1}_1\ket{x_2+ X_2}_2 \ket{X_2} \nl{\times}
{\bigr)}a{X_2}X_1\rangle\langle{X_1}\ket{\psi_0} dx_1dx_2dX_1 dX_2. \label{eq:total}
\end{eqnarray}
From Eq. \eqref{eq:total}, the reduced state for the system at time $\tau_2^+$ is simply
\begin{eqnarray}
\rho_2&=& \int_4 \phi_0^2\left(\frac{X_1-X_1'}{2},\frac{X_2-X_2'}{2}\right) \ket{X_2}\ip{X_2}{X_1}\ip {X_1}{\psi}\nl{\times} \ip{\psi}{ X_1'} \ip{X_1'}{ X_2'} {\bigr)}a{X_2'} dX_1 dX_2 dX_1'dX_2'.
\end{eqnarray}
\section{All-at-once measurement at time $\tau_2^+$}
It is convenient to use, rather than the observables $\hat z_n$ (\ref{defdisz}), the scaled observables
\begin{equation}
\begin{split}
\hat y_1 &= \hat z_1/2 = \hat x_1+ a\hat x_2 \equiv \zeta_1(\hat x_1,\hat x_2),\\
\hat y_2 &= \hat z_2/2 =\hat x_2+ a\hat x_1 \equiv \zeta_2(\hat x_1,\hat x_2).
\end{split} \label{defdisy}
\end{equation}
A measurement of $\hat z_n$, or $\hat y_n$, is described by the projector-density $\hat{\Pi}_n(y_n)$, defined by
\begin{equation}\label{eq:projectors}
\hat\Pi_n(y_n) =\int dx_1 \int dx_2 \ \hat\pi_1(x_1)\otimes \hat\pi_2(x_2) \delta(y_n-\zeta_n(x_1,x_2)),
\end{equation}
where $\hat\pi_n(x)=\ket{x}_n{\bigr)}a{x}_n$. Note that, unlike $\hat\pi_n(x)$, $\hat \Pi_n(y)$ is not a rank-one projector; it is in fact a rank-infinity projector.
It satisfies $\int dy\hat \Pi_n(y)=1$ and $\hat \Pi_n(y)\hat \Pi_n(y')=\delta(y-y')\hat \Pi_n(y)$ (no sum over $n$ implied). It is obvious from the definition (\ref{defdisz}) that the two measurements commute.
Consider first the case where at time $\tau_2^+$ projective measurements of $\hat y_1$ and $\hat y_2$ are performed. This yields
\begin{eqnarray}
\ket{\tilde\Psi_{2}(y_1,y_2)}&=& \hat \Pi_2(y_2)\hat \Pi_1(y_1) \ket{\Psi_2} \nonumber \\
&=& \ket{\frac{y_1-ay_2}{1-a^2}}_1\ket{\frac{y_2-ay_1}{1-a^2}}_2\ket{\tilde\psi_2(y_1,y_2)}, \nonumber\\
\label{eq:conditionstate}
\end{eqnarray}
where the conditional system state $\ket{\tilde\psi_{2}(y_1,y_2)}$ is
\begin{eqnarray}
&&c\int_2 \exp[-(X_1-y_1)^2-(X_2-y_2)^2]
\nl{\times}\exp\ro{-2aX_1X_2-\frac{a^2(y_1^2+y_2^2)-2ay_1y_2}{1-a^2} }\ket{X_2}
\nl{\times}{\bigr)}a{X_2}X_1\rangle\langle{X_1}\ket{\psi_0} dX_1 dX_2.
\label{eq:conditionsysstate}
\end{eqnarray}
Obviously $S$ is no longer entangled with $\cu{A_1,A_2}$. This is as expected since the operators $\hat y_1$ and $\hat y_2$ are linearly independent, and jointly measuring these is equivalent to jointly measuring $\hat x_1$ and $\hat x_2$. That is, the measurement at time $\tau_2^+$ effects a rank-one projective measurement on the bath, disentangling it from the system. Moreover, it is easy to verify that, as expected,
\begin{equation}
\frac{1}{1-a^2}\int_2\ket{\tilde\psi_{2}(y_1,y_2)}{\bigr)}a{\tilde\psi_{2}(y_1,y_2)} dy_1dy_2 = \rho_2.
\end{equation}
This establishes that \erf{eq:conditionsysstate} is indeed the discrete-time analogue of the solution of the NMSSE (\ref{NMSSE}) at the
relevant time (here $\tau_2^+$).
\section{Monitoring (measurements at $\tau_1^+$ and $\tau_2^+$)}
Now consider the case that Di\'osi\ claims is equivalent to the above, namely measuring $\hat y_1$ at time $\tau_1^+$ and $\hat y_2$ at $\tau_2^+$. From \erf{Psi1}, the conditional total state at time $\tau_1^+$ is
\begin{eqnarray}
\label{Psi1alt}
\ket{\tilde\Psi_{1}(y_1)}&=&\hat \Pi_1(y_1)\ket{\Psi_1}\\
&=&\int e^{-\left(1-a^2\right) x^2}\ket{ {y_1} -ax}_1\ket{x}_2dx \ket{\tilde\psi_{1}(y_1)} , \nonumber
\end{eqnarray}
where the conditional system state is
\begin{equation}
\ket{\tilde\psi_{1}(y_1)} = c
\exp\left[- (y_1-\hat X_1)^2\right]\ket{\psi_0}.
\end{equation} So far we have a pure state for the system, as expected from Di\'osi's argument. However, at the very next step
it breaks down. Because the measurement of the bath has disturbed it, we cannot use the state (\ref{eq:total}) to calculate the next conditioned state. Rather, we must calculate the effect of the interaction between $S$ and $A_2$ on state (\ref{Psi1alt}). The new entangled system-bath state at $\tau_2^+$ is
\begin{eqnarray}
\ket{\tilde\Psi_{2|1}(y_1)} &=& \int_2 e^{-\left(1-a^2\right) x^2} \ket{y_1-ax}_1\ket{x+X_2}_2 dx \nl{\times}\ket{X_2}\left\langle{X_2} \ket {\tilde\psi_{1}(y_1)}\right. dX_2 \label{Psi21}
\end{eqnarray}
Here the ${2|1}$ subscript indicates that the state is at time $\tau_2^+$ but the measurements it is conditioned upon was performed at time $\tau_1^+$.
After the second measurement we have
\begin{equation}
\ket{\tilde\Psi_{2|1,2}(y_1,y_2)}=\hat \Pi_2(y_2)\ket{\tilde\Psi_{2|1}(y_1)},
\end{equation}
which evaluates to
\begin{eqnarray}
&&c\int \ket{\frac{aX_2+y_1-ay_2}{1-a^2}}_1 \ket{\frac{y_2-a^2 X_2-ay_1 }{1-a^2}}_2\nl{\times}
\exp\left[-\frac{( X_2+ay_1-y_2)^2}{1-a^2}\right]
\nl{\times}\ket{X_2}{\bigr)}a{X_2} \exp[-(\hat X_1-y_1)^2]\ket{\psi_{0}} dX_2 \label{Froghorn}
\end{eqnarray}
Note that this is an entangled state between $S$ and the bath --- it is not possible to define a pure conditional state
for the system. The reason is that, as noted above, the projector $\hat\Pi_2(y_2)$ is not rank-one, so there is no guarantee that it will disentangle the system from the bath. So the monitoring procedure Di\'osi\ describes cannot possibly correspond to the solution of the NMSEE (\ref{NMSSE}). Moreover, it is easy to verify that, as expected,
\begin{equation}
\int_2 \mathrm{Tr}_{12}\sq{\ket{\tilde\Psi_{2|1,2}(y_1,y_2)}{\bigr)}a{\tilde\Psi_{2|1,2}(y_1,y_2)}} dy_1dy_2 \neq \rho_2.
\end{equation}
That is, the measurements described by Di\'osi\ disturb the evolution of the system so that it no longer obeys the original non-Markovian dynamics.
\section{Markovian limit} There is one case where Di\'osi's monitoring procedure does give a pure-state solution at all times which is identical to that which would be obtained by an all-at-once measurement at that time. This is case $a\rightarrow 0$, where $\hat y_n=\hat x_n$. That is to say, the intial bath state is unentangled, and the apparatuses are measured locally. In this Markovian limit we find
\begin{equation}
\ket{\tilde\Psi_{2}(y_1,y_2)}= \ket{y_1}_1\ket{y_2}_2 \ket{\tilde\psi_{2}(y_1,y_2)},
\end{equation}
where the conditional state $\ket{\tilde\psi_{2}(y_1,y_2)}$ is given by
\begin{equation}
c\exp\left[- (\hat X_2-y_2)^2\right]\exp\left[- (\hat X_1-y_1)^2\right]\ket{\psi_0}.
\end{equation}
This sequence of exponentials can obviously be continued indefinitely.
The correspondence between the all-at-once measurement and Di\'osi's monitoring here is not surprising: in the Markovian limit the interpretation of a SSE in terms of continuous monitoring of the bath is well known.
To conclude, Di\'osi\ has introduced an elegant formulation of non-Markovian evolution using a local (Markovian) coupling to the bath but an initially non-local (entangled) bath state. In this formulation, it is simple to monitor the bath without affecting the future evolution of the system, because each apparatus only interacts with the system once. However, to make the conditioned system state pure, it is necessary to measure not only the apparatuses which have already interacted with the system, but also some of those which are yet to interact. Measuring the latter necessarily introduces noise that will disturb the future evolution of the system, so that it will not reproduce the original non-Markovian evolution on average. We show by explicit calculation that the monitoring scheme suggested by Di\'osi\ does disturb the evolution in this manner, and moreover it even fails to produce pure conditional system states.
While it is certainly possible to derive a non-Markovian stochastic Schr\"odinger \ equation, its solution can only be interpreted as a conditioned system state at some particular (but arbitrary) time $t$ \cite{GamWis02,GamWis03}. Connecting the solutions at different times creates the illusion of a ``quantum trajectory'', but is not part of standard quantum mechanics. Rather, it is related to Bohmian mechanics and its generalizations \cite{GamWis04} which also allow one to derive discontinuous (jumplike) trajectories \cite{GamAskWis04}. Whether the jumplike non-Markovian trajectories recently introduced in Ref.~\cite{Piilo08} can be interpreted in a similar manner remains to be determined. But from the arguments in this Letter we know that
non-Markovian pure-state trajectories cannot be interpreted as true quantum trajectories.
{\em Acknowledgements:} HMW was supported by the Australian
Research Council grant FF0458313.
JMG was partially supported by MITACS and ORDCF.
\end{document} |
\begin{document}
\RUNAUTHOR{Zetina, Contreras, and Jayaswal}
\RUNTITLE{Benders Decomposition for Non-Convex Quadratic Facility Location}
\TITLE{An Exact Algorithm for Large-Scale Non-convex Quadratic Facility Location}
\ARTICLEAUTHORS{
\AUTHOR{Carlos Armando Zetina}
\AFF{IVADO Labs and Canada Excellence Research Chair in Data Science for Real-Time Decision-Making, Polytechnique Montr\'{e}al, Canada, \EMAIL{carlos.zetina@ivadolabs.com} \URL{}}
\AUTHOR{Ivan Contreras}
\AFF{Concordia University and Interuniversity Research Centre on Enterprise Networks, \\Logistics and Transportation (CIRRELT) Montreal, Canada H3G 1M8, \EMAIL{ivan.contreras@cirrelt.ca} \URL{}}
\AUTHOR{Sachin Jayaswal}
\AFF{Indian Institute of Management Ahmedabad, Gujarat, India, \EMAIL{sachin@iima.ac.in} \URL{}}
}
\ABSTRACT{
We study a general class of quadratic capacitated $p$-location problems facility location problems with single assignment where a non-separable, non-convex, quadratic term is introduced \modif{in the objective function} to account for the interaction cost between facilities and customer assignments. This problem has many applications in the field of transportation and logistics where its most well-known special case is the single-allocation hub location problem and its many variants. The non-convex, \modif{binary quadratic} program is linearized by applying a reformulation-linearization technique and the resulting continuous auxiliary variables are projected out using Benders decomposition. \modif{The obtained Benders reformulation is then solved using an exact branch-and-cut algorithm that exploits the underlying network flow structure of the decomposed separation subproblems to efficiently generate strong Pareto-optimal Benders cuts.} Additional enhancements such as a matheuristic, a partial enumeration procedure, and variable elimination tests are also embedded in the proposed algorithmic framework. Extensive computational experiments \modif{on benchmark instances (with up to 500 nodes) and on a new set of instances (with up to 1,000 nodes) of four variants of single-allocation hub location problems confirm the algorithm's ability to scale to large-scale instances.}}
\KEYWORDS{quadratic facility location; hub location; \modif{binary quadratic} programs; Benders decomposition;}
\maketitle
\section{Introduction}\label{Sec:Intro}\label{S:1}
\textit{Discrete facility location problems} (FLPs) constitute a fundamental class of problems in location science. Given a finite set of customers and a finite set of potential locations for the facilities, FLPs consist of determining the location of facilities to open and the assignment of customers to the selected facilities in order to optimize some cost (or service)-oriented objective. FLPs arise naturally in a wide variety of applications such as supply chain management, emergency and health care systems, humanitarian logistics, telecommunications systems, districting, and urban transportation planning, among others \citep[see,][]{Hamacher2001,Laporte2019}.
Given the wide range of applications, many variants of FLPs have been studied and they differ with respect to the considered objective, type of demand, network topology, and assignment pattern, among other features. Some of the classical problems involving a cost-oriented objective are \textit{fixed-charge facility location} \citep{Kuehn1963,fernandez15,Fisch17}, \textit{$p$-median} \citep{hakimi,daskin15}, \textit{uncapacitated $p$-location} \citep{Nemhauser1978,wolsey1983,Ortiz2017}, and \textit{single-source capacitated facility location problems} \citep{neebe83,Gadegaard2018}\modif{.} FLPs with service-oriented objectives are \textit{$p$-center} \citep{hakimi,calik15} and \textit{covering location problems} \citep{toregas,Church1974,garcia15}. In such classical FLP variants, the associated objective functions are assumed to be linear functions of the setup cost for opening facilities and/or of the allocation cost of the demand served by the open facilities.
In this paper, we study a general class of \textit{quadratic capacitated $p$-location problems with single assignments} (QC$p$LPs) which can be defined as follows. Let $N$ be a set of nodes representing both the set of potential facility locations and the set of customers. Let $f_k$ and $b_k$ denote the setup cost and capacity, respectively, of potential facility $k \in N$, and $c_{ik}$ denote the cost of allocating customer $i \in N$ to facility $k \in N$. Let $q_{ikjm}$ represent the interaction cost between the allocation of customer $i \in N$ to facility $k \in N$ and the allocation of customer $j \in N$ to facility $m \in N$. When $i = k$ and $j = m$, $q_{kkmm}$ corresponds to the interaction cost between facilities $k$ and $m$. The QC$p$LP consists of selecting a set of facilities to open, such that no more than $p$ facilities are opened, and of assigning each customer to exactly one open facility while respecting capacity constraints on the amount of demand served by each facility. The objective is to minimize the total setup cost, allocation cost and the (quadratic) interaction cost between facilities and between customer assignments.
The main contributions of this paper are the following. The first is to introduce the QC$p$LPs, which can be stated as a \modif{binary quadratic} program (BQP) having a non-separable, non-convex quadratic function. QC$p$LPs are quite a general class of problems that can be used to model numerous applications in transportation and telecommunications where a cost (or benefit) captures the interaction between facilities and/or customer assignments. It contains several special cases of hub location problems that are of particular interest such as the \textit{$p$-hub median problem with single assignments} \citep{o1987quadratic,ernst98,Ernst1996}, the \textit{uncapacitated hub location problem with single assignments} \citep{campbell1994integer,Ghaffarinasab2018}, the \textit{capacitated hub location problem with single assignments} \citep{contreras2011b,Meier2018}, and the \textit{star-star hub network design problem} \citep{Helme89,labbe08}. It also has as particular cases some BQPs such as the \textit{quadratic semi-assignment problem} \citep{saito09,Rostami18} and its capacitated variant. In addition, the QC$p$LP can also be used to model situations in which a negative interaction between obnoxious (or undesirable) facilities exist and as a consequence, it is desirable to locate them far from each other. For instance, dangerous plants producing or handling hazardous materials are a source of potential damage to population and their negative impact can be measured as a function of the distances between them \citep{carrizosa15}. A negative interaction among facilities also arises when the facilities compete for the same market and locating facilities as far away from each other is beneficial to mitigate cannibalization and to optimize their competitive market advantage \citep{curtin06,Lei13}.
The second contribution of this paper is to introduce and computationally compare two strong mixed-integer linear programming (MILP) reformulations of QC$p$LP. The first is obtained by applying the \textit{reformulation-linearization technique} (RLT) of \cite{adams86} to the original \modif{BQP}. The second reformulation is obtained by projecting out a large number of (continuous) variables from a relaxation of the RLT reformulation using Benders decomposition. Although the resulting Benders reformulation is not as strong as the complete RLT reformulation, its \modif{separation problem has} a network flow structure, which can be exploited to solve QC$p$LPs efficiently. The third contribution is to present an exact branch-and-cut algorithm based on the Benders reformulation to solve large-scale instances of the QC$p$LP. We develop several algorithmic refinements to accelerate its convergence. These include: \textit{i)} an exact separation procedure that efficiently generates Pareto-optimal cuts at fractional and integer points by solving transportation problems, \textit{ii)} the use of a matheuristic to generate high quality initial feasible solutions, \textit{iii)} variable elimination tests performed at the root node, and \textit{iv)} a partial enumeration phase to further reduce the size of the problem before branching is performed. To \modif{assess} the performance of our algorithm, we performed extensive computational experiments on several sets of benchmark large-scale instances \modif{of networks} with up to 1,000 nodes for special uncapacitated and capacitated cases previously studied in the literature.
The remainder of the paper is organized as follows. Section \ref{Sec:litrev} provides a succinct review on nonlinear discrete FLPs and positions this paper in view of related work. Section \ref{Sec:Problem} formally defines the problem and presents a \modif{BQP} formulation. The \modif{RLT} reformulation and the Benders reformulation are presented in Sections \ref{RLTsection} and \ref{Sec:Benders}, respectively. Section \ref{Sec:Enhance} presents the proposed acceleration techniques and the branch-and-cut algorithm. Section \ref{Sec:CompExp} presents the results of extensive computational experiments performed on a wide variety of \modif{large-scale} instances \modif{of variants of single allocation hub location problems}. Conclusions follow in Section \ref{Sec:Conclusions}.
\section{Literature Review}\label{Sec:litrev}
The study of nonlinear discrete FLPs began with the work of \cite{zangwill68} where production and distribution costs are considered to be separable concave functions of the quantities produced and distributed. In particular, \cite{zangwill68} considers an uncapacitated FLP with arbitrary concave cost functions and presents an exact algorithm that uses a characterization of the extreme points of the feasible region and dynamic programming. \cite{soland74} extends the previous problem to the capacitated case and develops an exact branch-and-bound algorithm in which dual bounds are obtained at the nodes of the enumeration tree by solving a transportation problem. \cite{o1986activity,Kelly86,o1987quadratic} are arguably the first studies dealing with quadratic extensions of FLPs. In particular, \cite{o1987quadratic} introduce a quadratic extension of the $p$-median problem to design hub-and-spoke networks where a set of hub facilities are used as transshipment, consolidation or sorting points to connect a large set of interacting nodes. A non-convex quadratic term arises from the interaction of hub facilities due to the routing of flows between node pairs. The seminal work of \citeauthor{Kelly86} gave rise to a new family of FLPs, denoted as \textit{hub location problems} (HLPs), \modif{that} has since evolved into a rich research area \citep[see,][]{campbell2012twenty,contreras2019}. HLPs lie at the heart of network design \modif{and} planning in transportation and telecommunication systems. Application areas of HLPs in transportation include air freight and passenger travel, postal delivery, express package delivery, trucking, liner shipping, public transportation, and rapid transit systems. Applications of HLPs in telecommunications arise in the design of various digital data service networks. For several particular classes of HLPs, exact solution algorithms have been developed using various integer programming techniques: \textit{i)} branch-and-bound algorithms that use bounding procedures such as combinatorial algorithms \citep{ernst98}, dual-ascent algorithms \citep{canovas07}, and Lagrangean relaxations \citep{contreras11a,Rostami2016,tanash2016,Alibeyg2018}; \textit{ii)} branch-and-price algorithms \citep{contreras2011b,Rothenbcher2016}; \textit{iii)} branch-and-cut algorithms \citep{labbe2005branch,Contreras2012supermodula,Zetina2017TRB,Meier2018}; and \textit{iv)} Benders decomposition algorithms \citep{camargo09,contrerasb,contreras11b,martins15,maheo19}.
\cite{Helme89} study a quadratic extension of the uncapacitated fixed-charge facility location problem arising in the design of satellite communication networks. In this case, demand nodes in the same cluster communicate via terrestrial links whereas different clusters communicate with each other via satellite links using earth stations. A (non-convex) quadratic term is considered in the objective to account for both the within and between cluster traffic costs. \cite{desrochers95} introduce a congested facility location problem in which congestion takes place at facilities and is characterized by convex delay functions that approximate the mean delay experienced by users waiting to be served at a facility. The authors propose a column generation algorithm as a bounding procedure within a branch-and-price algorithm to obtain optimal solutions of the problem. \cite{Fischetti2016} study a congested capacitated facility location problem in which the congestion cost is modeled as a convex quadratic function. The authors present a perspective formulation and an associated Benders reformulation in which all continuous variables are projected out. The resulting convex mixed integer nonlinear program is solved with a branch-and-cut algorithm to obtain optimal solutions for the linear and quadratic capacitated variants. \cite{Harkness03} study a closely related problem in which unit production costs increase once a certain scale of output is reached. Such diseconomies of scale in production are modeled using convex functions to represent situations in which production capacity can be stretched by incurring some additional cost due to overtime, procuring more costly materials, or neglecting equipment maintenance schedules. The authors propose alternative linear integer programming formulations using piecewise linear approximations of the convex functions. \cite{fatma14} focus on a more comprehensive model in which both economies and diseconomies of scale are jointly considered. In the context of production-distribution systems, unit production costs tend to initially decrease as production volume increases, but after some threshold production level, unit costs start to increase due to over-utilization of resources, overtime and facility congestion. Economies and diseconomies of scale are captured by an inverse S-shaped function which is initially concave and then turns convex. The authors present various column generation heuristics to approximately solve this problem.
\cite{gunluk07} and \cite{gunluk12} study a quadratic extension of the fixed-charge facility location problem in which a separable convex quadratic term is used. In particular, allocation costs are assumed to be proportional to the square of a customer's demand served by an open facility. This problem arises in the design of electric power distribution systems where distribution transformers are located close to customer nodes so as to minimize power loss. Because of cable resistance, power loss in a cable is proportional to the square of the current and the resistance of the cable. \cite{Fisch17} develop a Benders decomposition algorithm based on the perspective formulation of \cite{gunluk12} for solving large-scale instances for both the linear and separable convex quadratic variants. Finally, we refer the reader to \cite{Elhedhli12}, \cite{fatma14}, and \cite{berman2015} for additional references on nonlinear FLPs.
From a methodological perspective, our paper is related to \cite{contrerasb} where an exact algorithm based on Benders decomposition is presented to solve the uncapacitated hub location problem with multiple assignments. Similar to \cite{contrerasb}, we use a Benders reformulation in combination with several algorithmic refinements such as the separation of Pareto-optimal cuts, partial enumeration, and heuristics to obtain optimal solutions to large-scale instances. However, the way these techniques are used in our exact algorithm are significantly different. The Benders reformulation used in \cite{contrerasb} is obtained from a (mixed-integer linear) path-based formulation whereas the one we present in this work is obtained from a relaxation of a strong RLT reformulation of a BQP. \cite{contrerasb} use a standard iterative Benders decomposition algorithm in which at every iteration, a relaxed integer master problem is optimally solved to derive a cut. In our work, we solve the Benders reformulation with a branch-and-cut framework in which Benders cuts are separated at fractional and integer solutions in a single enumeration tree. Moreover, the partial enumeration, heuristic, and Benders cuts of our algorithm exploit the information from fractional solutions whereas in \cite{contrerasb} only integer solutions are used for these techniques. Finally, the separation procedure for generating (non-stabilized) Pareto-optimal cuts presented in \cite{contrerasb} is approximate and can only be used for integer solutions, whereas in our algorithm we use an efficient exact separation routine to generate stabilized Pareto-optimal cuts for both fractional and integer solutions.
\section{Problem Definition}\label{Sec:Problem}
We recall that QC$p$LP seeks to select a set of facilities to open, such that no more than $p$ facilities are opened, and to assign each customer to exactly one open facility while respecting capacity constraints on the amount of demand served by each facility. The objective is to minimize the total setup cost, allocation cost and the (quadratic) interaction cost between facilities and customer assignments. To formulate the problem, for each pair $i,k \in N$, we define binary location/allocation variables $z_{ik}$ equal to one if and only if node $i$ is assigned to facility $k$. When, $i=k$, variable $z_{kk}$ represents whether a facility is installed or not at node $k$. It is also assumed that when $z_{kk}$ is equal to one, node $k$ will be allocated to itself. Hereafter, whenever obvious, the limits on summations and the $\forall$ symbol associated with the constraints are suppressed for ease of notation. The QC$p$LP can be stated as follows \modif{BQP}:
\begin{eqnarray}
(QF) \quad \min & &
\sum_{k \in N} f_{k} z_{kk} + \sum_{i \in N}\sum_{k \in N} c_{ik}z_{ik} + \sum_{i \in N}\sum_{i<j}\sum_{k \in N}\sum_{m \in N}q_{ikjm}z_{ik}z_{jm} \label{eq_Q_obj}\\
\mbox{s.t.} & & \sum_{k \in N}z_{ik} = 1 \qquad i \in N \label{eq_Q_1}\\
& & z_{ik} \leq z_{kk} \qquad i,k \in N, i\neq k \label{eq_Q_2}\\
& & \sum_{k \in N} z_{kk} \leq p \qquad \label{eq_Q_0} \\
& & \sum_{i\neq k} d_i z_{ik} \leq \bar{b}_{k}z_{kk} \qquad k \in N \label{eq_Q_3} \\
& & z_{ik}\in \{0,1\} \qquad i, k \in N, \label{eq_Q_5}
\end{eqnarray}
where $\bar{b}_{k}=\left(b_{k}-d_k \right)$. The three terms in the objective function \eqref{eq_Q_obj} capture the (linear) setup cost of opening facilities, the (linear) assignment cost of customers to facilities, and the (quadratic) interaction cost between facilities and customer assignments. Constraint sets \eqref{eq_Q_1} and \eqref{eq_Q_5} ensure that each node is assigned to exactly one facility while constraints \eqref{eq_Q_2} ensure that customers are assigned to open facilities. Constraint \eqref{eq_Q_0} \modif{states} that at most $p$ facilities can be opened. Finally, constraints \eqref{eq_Q_3} are the capacity restrictions on the amount of demand that can be served by each opened facility. Note that these constraints take into account that if facility $k$ is opened, the demand of node $k$ will always be served by such facility. The non-separable, non-convex quadratic term in the objective function in combination with the capacity and single assignment constraints make QC$p$LP a challenging problem to solve. In the following section, we describe the procedure we followed to linearize the quadratic term in order to obtain tight linear MIP reformulations for the QC$p$LP.
\section{RLT-based Reformulations} \label{RLTsection}
A \textit{standard} strategy \citep{Fortet1960, Glover74} to linearizing the bilinear terms $z_{ik}z_{jm}$ of the objective function \eqref{eq_Q_obj} is to replace them by new \textit{continuous} variables $x_{ijkm}$. The nonlinear relation $x_{ikjm} = z_{ik}z_{jm}$ is then imposed via the following sets of linear constraints:
\begin{align}
& x_{ikjm} \leq z_{ik} && i, j, k, m \in N, i<j \label{std1} \\
& x_{ikjm} \leq z_{jm} && i, j, k, m \in N, i<j \label{std2} \\
& x_{ikjm} \geq z_{ik} + z_{jm} - 1 && i, j, k, m \in N, i<j. \label{std3}
\end{align}
Several improved linearization strategies have been proposed for reformulating zero-one quadratic programs as equivalent linear mixed-integer programs, such as \modif{those} described in \citet{Glover75}, \cite{adams86}, \citet{Sherali2007}, \citet{Liberti2007}, and \cite{caprara08}, among others. In particular, the linearization strategy introduced in \cite{adams86} is known to produce tight reformulations and has been generalized and enhanced to design the so-called \modif{reformulation linearization technique RLT} \citep{Sherali1990,sherali13}. The RLT is a powerful technique whose $n$-th hierarchy produces the convex hull of the mixed integer solutions at the expense of having polynomial terms in the resulting reformulation.
In what follows, we apply the level-1 RLT to QF in order to obtain tight linear MIP reformulations for the QC$p$LP. In the next section, we show how the additional set of continuous variables $x_{ikjm}$ required in a \textit{reduced} reformulation can be projected out using Benders decomposition to obtain an equivalent linear MIP reformulation for the QC$p$LP in the original space of the $z_{ik}$ variables.
The level-1 RLT applied to \modif{QF} consists of the following steps:
\begin{itemize}
\item[(i.)] Form $n^3$ constraints by multiplying the $n$ equality constraints \eqref{eq_Q_1} by each $z_{jm}$, $j,m \in N$:
\begin{align*}
& \sum_{k \in N} z_{ik}z_{jm} = z_{jm} && i, j, m \in N.
\end{align*}
\item[(ii.)] Form $n^3(n-1)+n^2+n^3$ constraints by multiplying the $n(n-1)+1+n$ inequality constraints \eqref{eq_Q_2}, \eqref{eq_Q_0}, \eqref{eq_Q_3} and the bound constraints by each $z_{jm}$, $j,m \in N$:
\begin{align*}
& z_{ik}z_{jm} \leq z_{kk}z_{jm} && i,j,k,m \in N, i \neq k \\
& \sum_{k \in N} z_{kk}z_{jm} \leq pz_{jm} && j,m \in N \\
& \sum_{i\neq k} d_i z_{ik}z_{jm} \leq \bar{b}_{k}z_{kk}z_{jm} && j,k,m \in N.\\
& z_{ik}z_{jm} \leq z_{jm} && i,j,k,m \in N.\\
& 0 \leq z_{ik}z_{jm} && i,j,k,m \in N.
\end{align*}
\item[(iii.)] Form $n^3(n-1)+n^2+n^3$ constraints by multiplying the $n(n-1)+1+n$ inequality constraints \eqref{eq_Q_2}, \eqref{eq_Q_0}, and \eqref{eq_Q_3} by each $\left( 1-z_{jm}\right)$, $j,m \in N$. Note the bound constraints are ommited as they give meaningless inequalities.:
\begin{align*}
& z_{ik}\left( 1-z_{jm}\right) \leq z_{kk}\left( 1-z_{jm}\right) && i,j,k,m \in N, i \neq k \\
& \sum_{k \in N} z_{kk}\left( 1-z_{jm}\right) \leq p\left( 1-z_{jm}\right) && j,m \in N \\
& \sum_{\substack{i \in N \\ i\neq k}} d_i z_{ik}\left( 1-z_{jm}\right) \leq \bar{b}_{k}z_{kk}\left( 1-z_{jm}\right) && j,k,m \in N.
\end{align*}
\item[(iv.)] Linearize the above sets of constraints and objective \eqref{eq_Q_obj} by substituting $x_{ijkm}=z_{ik}z_{jm}$, for each $i,j,k,m \in N, i<j$, and add $x_{ikjm} \geq 0$ to obtain the following linear MIP reformulation:
\end{itemize}
\begin{eqnarray}
(RL_1) \quad \min & &
\sum_{k \in N}f_{k} z_{kk} +\sum_{i \in N}\sum_{k \in N} c_{ik}z_{ik} + \sum_{i \in N}\sum_{i<j}\sum_{k \in N}\sum_{m \in N}q_{ikjm}x_{ikjm} \notag\\
\mbox{s.t.} & & \eqref{eq_Q_1}-\eqref{eq_Q_5} \notag \\
& & \sum_{k \in N} x_{ikjm} = z_{jm} \qquad i,j,m \in N, i<j \label{eq_SK_1}\\
& & \sum_{k \in N} x_{jmik} = z_{jm} \qquad i, j, m\in N, i>j\label{eq_SK_2}\\
& & x_{ikjm} \leq x_{kkjm} \qquad i,j,k,m \in N, i \neq k, i<j, k<j \label{eqRLT2a}\\
& & x_{ikjm} \leq x_{jmkk} \qquad i,j,k,m \in N, i \neq k, i<j, k>j \label{eqRLT2b}\\
& & x_{jmik} \leq x_{kkjm} \qquad i,j,k,m \in N, i \neq k, i>j, k<j \label{eqRLT2c}\\
& & x_{jmik} \leq x_{jmkk} \qquad i,j,k,m \in N, i \neq k, i>j, k>j \label{eqRLT2d}\\
& & x_{ikjm} \leq z_{jm} \qquad i,j,k,m \in N, i \neq k, i<j, k<j \label{eqRLTbound}\\
& & \sum_{k < j} x_{kkjm} + \sum_{k > j} x_{jmkk} \leq pz_{jm} \qquad j,m \in N \label{eqRLT1} \\
& & \sum_{i<j} d_i x_{ikjm} + \sum_{i>j} d_i x_{jmik} \leq \bar{b}_{k}x_{kkjm} \qquad j,k,m \in N, k < j \label{eqRLT3a} \\
& & \sum_{i<j} d_i x_{ikjm} + \sum_{i>j} d_i x_{jmik} \leq \bar{b}_{k}x_{jmkk} \qquad j,k,m \in N, k > j \label{eqRLT3b} \\
& & z_{ik} - x_{ikjm} \leq z_{kk} - x_{kkjm} \qquad i,j,k,m \in N, i \neq k, i<j, k<j \label{eqRLT5a}\\
& & z_{ik} - x_{ikjm} \leq z_{kk} - x_{jmkk} \qquad i,j,k,m \in N, i \neq k, i<j, k>j \label{eqRLT5b}\\
& & z_{ik} - x_{jmik} \leq z_{kk} - x_{kkjm} \qquad i,j,k,m \in N, i \neq k, i>j, k<j \label{eqRLT5c}\\
& & z_{ik} - x_{jmik} \leq z_{kk} - x_{jmkk} \qquad i,j,k,m \in N, i \neq k, i>j, k>j \label{eqRLT5d}\\
& & \sum_{k \in N} z_{kk} - \sum_{k < j} x_{kkjm} - \sum_{k > j} x_{jmkk} \leq p\left( 1-z_{jm}\right) \qquad j,m \in N \label{eqRLT4} \\
& & \sum_{i \neq k} d_i z_{ik} - \sum_{i < j} d_ix_{ikjm} - \sum_{i > j}d_i x_{jmik} \leq \bar{b}_{k}\left(z_{kk}-x_{kkjm}\right) \ \ j,k,m \in N, k < j \label{eqRLT6a} \\
& & \sum_{i \neq k} d_i z_{ik} - \sum_{i < j} d_ix_{ikjm} - \sum_{i > j} d_ix_{jmik} \leq \bar{b}_{k}\left(z_{kk}-x_{jmkk}\right) \ \ j,k,m \in N, k > j \label{eqRLT6b} \\
& & x_{ikjm} \geq 0 \qquad i, j, k, m \in N, i<j. \label{eq_SK_3}
\end{eqnarray}
The RLT reformulation of \cite{adams86} contains the standard linearization constraints \eqref{std1}-\eqref{std3}, which are obtained after multiplying $\left( 1-z_{jm}\right)$, $j,m \in N$, by each bound constraint $0\leq z_{ik} \leq 1$, $i,k \in N$. However, these constraints are \modif{implied} by constraints \eqref{eq_SK_1}-\eqref{eq_SK_2} and thus, there is no need to add them to $RL_1$ \modif{\citep{adams86}}. Moreover, an important consequence of this implication is that constraints \eqref{eq_SK_1}-\eqref{eq_SK_2} are sufficient to provide a valid linear MIP formulation of QC$p$LP. That is, constraints \eqref{eqRLT2a}-\eqref{eqRLT6b}, although useful for strengthening the linear programming (LP) relaxation, are actually redundant for the description of the set of feasible solutions to QC$p$LP in the extended space of the $(z,x)$ variables. Therefore, the QC$p$LP can be stated as the following \textit{reduced} linear MIP:
\begin{eqnarray}
(RL_2) \quad \min & &
\sum_{k \in N}f_{k} z_{kk} +\sum_{i \in N}\sum_{k \in N} c_{ik}z_{ik} + \sum_{i \in N}\sum_{i<j}\sum_{k \in N}\sum_{m \in N}q_{ikjm}x_{ikjm} \notag\\
\mbox{s.t.} & & \eqref{eq_Q_1}-\eqref{eq_Q_5}, \eqref{eq_SK_1}, \eqref{eq_SK_2}, \eqref{eq_SK_3}. \notag
\end{eqnarray}
Although both reformulations $RL_1$ and $RL_2$ contain $n^2$ binary variables and $n^3(n-1)/2$ continuous variables, the number of additional constraints required in $RL_1$ is about $n^3(n-1) + 3n^2(n-1)/2 + 2n^2$, whereas in $RL_2$ is only $n^2(n-1)/2$.
We next provide the results of computational experiments to compare the quality of the LP relaxation bounds obtained with $RL_1$ and $RL_2$, as well as with six additional relaxations, denoted as $RL_3, \dots, RL_8$, to \modif{assess} the marginal contribution to the improvement of the LP bounds when adding specific subsets of constraints of $RL_1$ independently. Table \ref{table1} provides the configurations of the considered relaxations of the RLT-based reformulation.
\begin{table}[htbp]
\TABLE
{Configuration of considered relaxations of the RLT-based reformulation.\label{table1}}
{\begin{tabular}{|c|l|}
\hline \up
Formulation & Combination of RLT inequalities \\
\hline \up
$RL_1$ & Full RLT reformulation\\
$RL_2$ & Only RLT for assignment constraints: (10)--(11)\\
$RL_3$ & $RL_2$ + RLT for linking constraints: (12)--(15) \\
$RL_4$ & $RL_2$ + RLT for linking constraints: (19)--(22) \\
$RL_5$ & $RL_2$ + RLT for all linking constraints: (12)--(15), (19)--(22) \\
$RL_6$ & $RL_2$ + RLT for capacity constraints: (17)--(18) \\
$RL_7$ & $RL_2$ + RLT for capacity constraints: (24)--(25) \\
$RL_8$ & $RL_2$ + RLT for all capacity constraints: (17)--(18), (24)--(25) \\
\hline
\end{tabular}}
{}
\end{table}
Tables \ref{table2} and \ref{table3} provide a comparison of the obtained \%LP gap and computational time (in seconds) needed to solve the LP relaxation, respectively, of each of the considered relaxations. For this experiment, we focus on one well-known particular case of the QC$p$LP, the capacitated hub location problem with single assignments (CHLPSA), and use a set of small-size instances of the AP data set considering $n \in \left\lbrace 20, 25, 40 \right\rbrace$ (see Section \ref{Sec:CompExp} for a detailed description of the instances and the computational setting used throughout our experiments). The entry N.A. corresponds to the cases in which CPLEX failed to solve the associated LP relaxation due to numerical errors encountered during the solution process.
\begin{table}[htbp]
\TABLE
{Comparison of \%LP gaps for different RLT-based reformulations for the CHLPSA. \label{table2}}
{\begin{tabular}{|c|r|r|r|r|r|r|r|r|}
\hline \up
& \multicolumn{8}{c|}{\% LP gap} \\
\hline \up
Instance & $RL_2$ & $RL_3$ & $RL_4$ & $RL_5$ & $RL_6$ & $RL_7$ & $RL_8$ &$RL_1$ \\
\hline \up
20LT & 1.59 & 1.30 & 1.30 & 1.30 & 1.49 & 1.00 & 0.97 & 0.37 \\
20TT & 1.64 & 1.63 & 1.63 & 1.63 & 1.61 & 1.06 & 1.03 & 0.90 \\
25LL & 0.17 & 0.17 & 0.17 & 0.17 & 0.17 & 0.15 & 0.15 & 0.10 \\
25LT & 1.88 & 1.88 & 1.88 & 1.88 & 1.88 & 1.21 & 1.18 & 0.62 \\
25TT & 3.34 & 3.00 & 3.00 & 3.00 & 3.27 & 3.01 & N.A. & N.A. \\
40LT & 0.76 & 0.76 & 0.76 & 0.76 & 0.75 & 0.16 & 0.14 & N.A. \\
40TL & 0.03 & 0.03 & 0.03 & 0.03 & 0.03 & N.A. & N.A. & 0.00 \\
\hline
\end{tabular}}
{}
\end{table}
\begin{table}[htbp]
\TABLE
{Comparison of computational time needed to solve the LP relaxations of different RLT-based reformulations for the CHLPSA. \label{table3}}
{\begin{tabular}{|c|r|r|r|r|r|r|r|r|}
\hline \up
& \multicolumn{8}{c|}{CPU time (seconds)} \\
\hline \up
Instance & $RL_2$ & $RL_3$ & $RL_4$ & $RL_5$ & $RL_6$ & $RL_7$ & $RL_8$ & $RL_1$ \\
\hline \up
20LT & 13 & 172 & 208 & 191 & 158 & 391 & 772 & 39,326 \\
20TT & 11 & 185 & 207 & 217 & 146 & 442 & 834 & 60,384 \\
25LL & 154 & 2,403 & 3,652 & 3,766 & 1,392 & 5,992 & 9,234 & 51,527 \\
25LT & 76 & 923 & 1,338 & 1,339 & 1,059 & 1,904 & 7,817 & 4,736 \\
25TT & 74 & 863 & 1,395 & 1,434 & 1,236 & 3,456 & N.A. & N.A. \\
40LT & 8,152 & 112,210 & 191,358 & 177,974 & 186,659 & 814,479 & 782,425 & N.A. \\
40TL & 9,184 & 141,839 & 239,990 & 233,689 & 183,850 & N.A. & N.A. & 19,410 \\
\hline
\end{tabular}}
{}
\end{table}
From Tables \ref{table2} and \ref{table3}, we note that although there is a clear improvement in the quality of the LP bound when solving $RL_1$, the substantial increase in the CPU time required to solve the associated LP when compared to $RL_2$, does not justify its use. The same is true for the rest of the relaxations ($RL_3$ to $RL_8$). Moreover, adding constraints \eqref{eqRLT2a}-\eqref{eqRLT6b} breaks the decomposability and the network flow structure of the separation problem associated with the Benders reformulation of $RL_2$. For these reasons, we next present a Benders reformulation only for $RL_2$.
\section{Benders Reformulation}\label{Sec:Benders}
Benders decomposition is a well-known partitioning method applicable to MILPs \citep{Benders62}. In particular, it reformulates an MILP by projecting out a set of complicating \modif{continuous} variables to obtain a formulation with fewer variables but typically with a huge number of constraints, which can be separated efficiently via the solution to an LP subproblem, known as the \textit{dual subproblem} (DSP). These new constraints are usually referred to as Benders cuts and involve only the variables kept in the reduced problem, plus one additional continuous variable. Given that only a small subset of these constraints are usually active in an optimal solution, a natural relaxation is obtained by dropping most of them and generating them on the fly as needed within a cutting plane algorithm.
Let $Z$ denote the set of vectors $z$ satisfying constraints \modif{\eqref{eq_Q_1}}--\eqref{eq_Q_5}. For any fixed $\bar{z} \in Z$, the \textit{primal subproblem} (PS) in the space of the $x$ variables is
\begin{align}
(PS) \quad \min \quad & \sum_{i \in N}\sum_{j>i}\sum_{k \in N}\sum_{m \in N} q_{ikjm} x_{ikjm} \notag\\
\mbox{s.t.} \quad
& \sum_{k \in N} x_{ikjm} = \bar{z}_{jm} && i,j,m \in N, i<j \label{eq_PS_1}\\
& \sum_{m \in N} x_{ikjm} = \bar{z}_{ik} && i,j,k \in N, i<j \label{eq_PS_2}\\
& x_{ikjm} \geq 0 && i,j,k,m \in N, i<j. \label{eq_PS_3}
\end{align}
Note that the indices $i,j$ and $k,m$ of constraints \eqref{eq_SK_2} have been swapped in \eqref{eq_PS_2} to better highlight the decomposability of the problem, where subproblems consist of several independent transportation problems. In particular, PS can be decomposed into $n(n-1)/2$ problems PS$_{ij}$, one for each pair $(i,j) \in N \times N, i<j$. Therefore, we can construct the corresponding dual subproblem (DS$_{ij}$) for each $(i,j)$. Let $\alpha$ and $\beta$ denote the dual variables of constraints \eqref{eq_PS_1} and \eqref{eq_PS_2}, respectively. For each $(i,j) \in N \times N, i<j$, the corresponding DS$_{ij}$ can be stated as follows:
\begin{align}
(DS_{ij}) \quad \text{maximize} \quad & \sum_{m \in N}\bar{z}_{jm} \alpha_{ijm} + \sum_{k \in N}\bar{z}_{ik} \beta_{ijk} \label{eq_DSij_obj}\\
\mbox{s.t.} \quad
& \alpha_{ijm} + \beta_{ijk} \leq q_{ikjm} & k,m \in N. \label{eq_DSij_1}
\end{align}
Given the network flow structure of each PS$_{ij}$, a sufficient condition for feasibility to PS$_{ij}$ is $\sum_{k\in N}\bar{z}_{ik}=\sum_{m \in N}\bar{z}_{jm}$, for each $(i,j)\in N \times N, i<j$. This is actually guaranteed by constraints \eqref{eq_Q_1} of $Z$ and as a consequence, PS$_{ij}$ is always feasible. Therefore, DS$_{ij}$ has always at least one optimal solution with a finite objective value. In turn, this implies there is no need to incorporate feasibility cuts to the resulting Benders reformulation. For $i,j \in N, i<j$, let $EP_{ij}$ denote the set of extreme points of the polyhedron of DS$_{ij}$. The optimal value of each DS$_{ij}$ is then equal to
$$\max_{(\alpha, \beta) \in EP_{ij}} \sum_{m \in N}\bar{z}_{jm} \alpha_{ijm} + \sum_{k \in N}\bar{z}_{ik} \beta_{ijk}. $$
Introducing the extra continuous variable $\eta$ for the overall interaction cost, the \textit{Benders reformulation} (BR) associated with $RL_2$ is
\begin{align}
(BR) \quad \min\quad & \sum_{k \in N}f_{k} z_{kk} +
\sum_{i \in N}\sum_{k \in N}c_{ik} z_{ik} + \eta \label{eq_BR_obj_agg}\\
\text{subject to} \quad & \eqref{eq_Q_1}-\eqref{eq_Q_5} \notag\\
\quad & \eta \geq \sum_{i \in N} \sum_{k \in N} \left(\sum_{j<i} \alpha_{jik} + \sum_{j>i} \beta_{ijk}\right) {z}_{ik} && (\alpha, \beta) \in EP, \label{eq_BR_cuts_agg}
\end{align}
where $EP$ is the Cartesian product of the sets of extreme points $EP_{ij}$, $i,j \in N, i<j$ and constraints \eqref{eq_BR_cuts_agg} are the so-called Benders \textit{optimality} cuts. We note that BR contains only the original variables $z$ and one additional continuous variable.
Whereas it has been empirically shown \citep{Magn81} that the disaggregated form of \eqref{eq_BR_cuts_agg}, \modif{with one for each subproblem}, may lead to an improved performance of the Benders decomposition algorithm for some classes of problems, our preliminary computational experiments showed that not to be the case for our problem, since the need to add $n(n-1)/2$ constraints at every iteration leads to a problem that increases too rapidly in size, especially for medium to large-size instances with $n>200$. As shown in \modif{the} next section, the proposed algorithm does however exploit the decomposability of the dual subproblem leading to a reduction of the required CPU time to prove optimality.
\section{An Exact Algorithm for QC$p$LP}\label{Sec:Enhance}
In this section, we present an exact branch-and-cut algorithm based on BR to solve QC$p$LPs. The \textit{standard} Benders decomposition algorithm is an iterative procedure in which at every iteration a relaxed integer master problem, containing only a small subset of Benders cuts, is optimally solved to obtain a dual bound. The dual subproblem is then solved to obtain a primal bound and to determine whether additional Benders cuts are needed in the relaxed master problem. If needed, these cuts are added to the master problem and solved again. This iterative procedure is repeated until the convergence of the bounds is attained, if an optimal solution exists. One of the major drawbacks of this \textit{standard} approach is the need to solve an integer master problem at each iteration. To overcome this difficulty, \modif{\textit{recent}} implementations of Benders decomposition have considered the solution of the Benders reformulation with a standard branch-and-cut framework, in which Benders cuts are separated not only at nodes with integer solutions but also at the nodes with fractional solutions of a single enumeration tree \citep[see, for instance][]{Fisch17,Ortiz2017,Zetina2018}. We use this approach to develop an exact algorithm for QC$p$LP.
In addition, we use the following strategies to speed up the convergence of our branch-and-cut algorithm: i) we exploit the structure of the subproblem to generate Pareto-optimal cuts by efficiently solving network flow problems, ii) we use a simple but effective stabilization procedure for generating cuts, iii) we use a matheuristic to generate high quality solutions, iv) we apply variable elimination tests at the root node, and v) we perform a partial enumeration phase to permanently fix location variables $z_{kk}$ to either one or zero before exploring the enumeration tree.
\subsection{Stabilized Pareto-optimal Cuts}\label{Subsec:Pareto}
It is well known that the selection of Benders cuts plays an important role in the overall convergence of Benders decomposition algorithms. \cite{Magn81} proposed a procedure for obtaining Pareto-optimal cuts, that is, cuts that are not dominated by any other cut. In particular, \cite{Magn81} define the notion of cut dominance as follows. Given two cuts defined by dual solutions $u$ and $u'$ of the form $\theta\geq f(u) +zg(u)$ and $\theta\geq f(u') +zg(u')$, respectively, the cut defined by $u$ dominates the cut defined by $u'$ if and only if $f(u) +zg(u)\geq f(u') +zg(u')$ with strict inequality holding for at least one feasible point $z$ of MP. If a cut defined by $u$ is not dominated by any other cut, then it is a Pareto-optimal Benders cut.
To obtain Pareto-optimal cuts, \cite{Magn81} propose solving an additional linear program similar to DS. It is parameterized by a \textit{core point} of the set $Z$, which is a point in the relative interior of its convex hull. Let $z^0$, $\bar{z}$, and $\Gamma(\bar{z})$ denote a given core point, the current (possibly fractional) solution, and the optimal solution value of DS$_{ij}$, respectively. To determine whether there exists a Pareto-optimal cut or not that is violated by the point $\bar{z}$, for each $i,j \in N, i<j$, we solve the following Pareto-optimal subproblem:
\begin{align}
(DPO_{ij})\quad \text{maximize}\quad &
\sum_{m \in N}z^0_{jm} \alpha_{ijm} + \sum_{k \in N}z^0_{ik} \beta_{ijk} \label{eq_PODSP_obj}\\
\mbox{s.t.} \quad
&\sum_{m \in N}\bar{z}_{jm} \alpha_{ijm} + \sum_{k \in N}\bar{z}_{ik} \beta_{ijk} \geq \Gamma(\bar{z}) \label{eq_PODSP_0} &&\\
& \alpha_{ijk} + \beta_{ijm} \leq q_{ikjm} & & k,m \in N, \label{eq_PODSP_1}
\end{align}
where constraints \eqref{eq_PODSP_0} guarantee that the optimal solution to $DPO_{ij}$ is contained in the set of optimal solutions to the original $DP_{ij}$.
Let $\delta$ be the dual variable associated with constraints \eqref{eq_PODSP_0} and $x_{ikjm}$ be those of constraints \eqref{eq_PODSP_1}. Dualizing DPO$_{ij}$ we obtain the following linear program:
\begin{align}
(PPO_{ij})\quad \min\quad &
\sum_{k \in N}\sum_{m \in N} q_{ikjm}x_{ikjm}-\Gamma(\bar{z}) \delta \label{eq_POPSP_obj}\\
\mbox{s.t.} \quad
& \sum_{k \in N}x_{ikjm}=z_{jm}^0+\bar{z}_{jm}\delta& & m\in N \label{eq_POPSP_1}\\
& \sum_{m \in N}x_{ikjm}=z_{ik}^0+\bar{z}_{ik}\delta& & k\in N \label{eq_POPSP_2}\\
& x_{ikjm}\geq 0& & k, m\in N \\
& \delta^{ij}\geq 0. & &
\end{align}
Given that $\delta$ affects the right-and-side of flow constraints \eqref{eq_POPSP_1} and \eqref{eq_POPSP_2}, this problem corresponds to a \textit{parametric transportation problem}. Given that $\sum_{m \in N} \bar{z}_{jm} = \sum_{k \in N} \bar{z}_{ik} = 1$, PPO$_{ij}$ can be interpreted as a problem where a rebate of $\Gamma$ is given for each unit of additional supply/demand shipped. Similar to the Pareto-optimal problem presented in \cite{Magn86} for uncapacitated multi-commodity network design problems, rather than performing a parametric analysis on PPO$_{ij}$ to determine its optimal solution, we use the following result.
\begin{proposition}
Any value of $\delta \geq 1$ is optimal to PPO$_{ij}$.
\end{proposition}
\proof{Proof}
The total demand $\sum_{k \in N}\left( z_{ik}^0+\bar{z}_{ik}\delta\right)$ must flow via some subset of arcs $(k,m) \in N\times N$. At most $\sum_{k \in N}z_{ik}^0 = 1$ of this flow can go through demand nodes $j \in N$ having $\bar{z}_{ik}=0$. Any additional flow must use arcs incident to demand nodes $j \in N$ such that $\bar{z}_{ik}>0$, and thus, the marginal flow cost will be precisely $\Gamma(\bar{z})$. Therefore, any value of $\delta \geq 1$ must be optimal for PPO$_{ij}$.
\endproof
An important consequence of this result is that there is no longer a need to know the value $\Gamma(\bar{z})$ to select $\delta$. Therefore, the complexity for generating a Pareto-optimal cut is the same as generating a standard Benders optimality cut, i.e, solving $n(n-1)/2$ transportation problems, which can be efficiently solved by using the network simplex algorithm.
In our algorithm, we consider the following family of core points of $Z$ for $p \geq 2$:
$$z^0_{ik}(\tilde{H}) =
\begin{cases}
\frac{p}{|\tilde{H}|}-\epsilon \qquad \quad \ \ i\in \tilde{H}, k \in \tilde{H}, i = k, \\
\frac{1-\left(\frac{p}{|\tilde{H}|} -\epsilon\right)}{|\tilde{H}|-1} \qquad i\in \tilde{H}, k \in \tilde{H}, i \neq k, \\
\frac{1}{|\tilde{H}|} \qquad \qquad \quad i\in N\backslash\tilde{H}, k \in \tilde{H}, i \neq k, \\
\end{cases}
$$
where $0 < \epsilon < \min \left\lbrace \frac{1}{|\tilde{H}|}, \frac{p-1}{|\tilde{H}| - 2} \right\rbrace $, and $\tilde{H} \subseteq N$ denote the current set of candidate facilities to open at a given node of the enumeration tree.
Although the solution to the Pareto-optimal subproblem $PPO_{ij}$ guarantees that the obtained cut will be non-dominated, in practice some non-dominated cuts may be more useful than others. That is, a core \modif{point} selection strategy is needed to further improve the convergence of the cutting plane algorithm used to solve the LPs at the nodes of the enumeration tree.
\cite{Ortiz2017}, \cite{Zetina17}, and \cite{Zetina2018} present and computationally compare several static and dynamic core point selection strategies to separate Pareto-optimal cuts for multi-level facility location and multi-commodity network design problems. An interesting observation from these studies is that the strategy providing on average the best results is different in each of these works. These strategies can actually be seen as stabilization procedures for generating effective non-dominated cuts. Stabilization is frequently used to improve the convergence of column generation and cutting plane algorithms needed to solve Dantzig-Wolfe decompositions and Lagrangean relaxation \citep{benameur07, Fisch17, pessoa18}.
After performing extensive preliminary experiments, we note that an efficient core point selection strategy is to dynamically update the separation point $\hat{z}^{'}$ at each iteration by considering a convex combination of the separation point $\hat{z}$ considered in the previous iteration and the current point $\bar{z}$ as follows:
\begin{equation}
\label{updatecore}
\hat{z}^{'} = \phi \hat{z} + (1-\phi)\bar{z},
\end{equation}
where $\phi\in \mathbb{R}$, and $0 < \phi < 1$. We initialize the algorithm by using $\hat{z} = z^0(N)$ at the first iteration of the cutting plane algorithm. Each time the reduction tests of Section \ref{Subsub:Elim} modify the current set of candidate facilities to open $\tilde{H} \subseteq N$, we reinitialize $\hat{z}$ as $z^0(\tilde{H})$.
\subsection{A Matheuristic for QC$p$LP}\label{Subsec:Heuristic}
When using branch-and-cut algorithms, it is important to find high quality feasible solutions early on in the process. This leads to smaller search trees since they provide better bounds for pruning and a guide for selecting variables to branch on. Finding near-optimal solutions in a preprocessing stage can be used to perform variable elimination tests that reduce the size of the formulation to be solved \citep{contrerasb,contreras2011b,Alibeyg2018}.
In this section, we present a matheuristic that exploits the information generated during the solution of the root node to effectively explore the solution space. It consists of two phases. The first is a \textit{constructive heuristic} in which the support of the LP relaxation of the Benders reformulation is used to build a reduced linear integer relaxation of $QF$, which is then solved with a general purpose solver to construct an initial feasible solution. The second is a \textit{local search heuristic} in which several neighborhoods are systematically explored to improve the initial solution obtained during the constructive phase. One of such neighborhoods is efficiently explored by solving an MILP with CPLEX. We next describe in detail each of these phases.
\subsubsection{Constructive Heuristic}\label{Subsub:MILPRelax}
Let $\bar{z}^t$ denote the solution at iteration $t$ of the LP relaxation of the Benders reformulation at the root node of the enumeration tree and let $H(\bar{z}^t) = \left\lbrace k \in N: \bar{z}^t_{kk} > 0 \right\rbrace $ correspond to the set of (partially) opened hub facilities in $\bar{z}^t$. We can obtain a feasible solution to QC$p$LP by solving the following reduced (linear) relaxation of SQFLP:
\begin{eqnarray}
(RLF) \quad \min\quad & &
\sum_{k \in H(\bar{z}^t)} f_{k} z_{kk} +
\sum_{i \in N}\sum_{k \in H(\bar{z}^t)} c_{ik}z_{ik} \label{eq_L_obj}\\
\mbox{s.t.} \quad & & \sum_{k \in H(\bar{z}^t)}z_{ik} = 1 \qquad i \in N \label{eq_Q_1red}\\
& & z_{ik} \leq z_{kk} \qquad i \in N, k \in H(\bar{z}^t), i\neq k \label{eq_Q_2red}\\
& & \sum_{k \in H(\bar{z}^t)} z_{kk} \leq p \qquad \label{eq_Q_0red} \\
& & \sum_{i\neq k} d_i z_{ik} \leq \bar{b}_{k}z_{kk} \qquad k \in H(\bar{z}^t) \label{eq_Q_3red} \\
& & z_{ik}\in \{0,1\} \qquad i \in N, k \in H(\bar{z}^t). \label{eq_Q_5red}
\end{eqnarray}
Note that the quadratic term of objective function \eqref{eq_Q_obj} of QF has been relaxed. Whenever $|H(\bar{z}^t)| \ll |N|$, RLF will be substantially smaller than QF. Given that any feasible solution to \eqref{eq_Q_1red}-\eqref{eq_Q_5red} is also feasible for \eqref{eq_Q_1}-\eqref{eq_Q_5}, we can compute a valid upper bound on the optimal solution of QC$p$LP by simply evaluating objective \eqref{eq_Q_obj} using the optimal solution of RLF.
Noting that any instance of the RLF can be transformed into an instance of the well-known \textit{single-source capacitated facility location problem}, one can use the state-of-the-art exact algorithm given in \cite{Gadegaard2018} to solve RLF. However, we solve RLF by using a general purpose solver given that the time spent in solving RLF is negligible when compared to the total CPU time needed by our branch-and-cut algorithm. Finally, we only solve RLF when the support of the LP relaxation of the Benders reformulation changes from one iteration to the next at the root node, i.e., whenever $H(\bar{z}^{t-1}) \neq H(\bar{z}^t)$.
\subsubsection{Local Search Heuristic}\label{Subsub:Localsearch}
The local search heuristic is used to improve the initial solution obtained from the constructive heuristic. It consists of two phases. The first phase, denoted as the \textit{facility modification phase}, uses a variable neighborhood descent (VND) method to systematically explore five neighborhoods that modify both the set of hubs and assignment decisions. The second phase, denoted as the \textit{assignment modification phase}, solves a well-known MILP as an approximation to improve assignment decisions.
In what follows, solutions are represented by pairs of the form $s=(H, a)$ where $H \subseteq N$ denotes the set of opened facilities and $a: N \rightarrow H$ is the assignment mapping, i.e., $a(i)=k$ if customer $i\in N$ is assigned to facility $k \in H$. For any feasible assignment, $h_k$ denotes the available capacity of facility $k$, i.e., $h_k=b_k-\sum_{i:a(i)=k}d_i$.
During the facility modification phase, we use a VND method \citep{brimberg1995variable}. It is based on a systematic search in a set of $r$ neighborhoods, $\mathcal{N}_1,\mathcal{N}_2, \ldots, \mathcal{N}_r$. The VND works by performing a local search in a neighborhood $\mathcal{N}_1$ until a local optimal solution is found. After that, the algorithm switches to neighborhoods $\mathcal{N}_2, \ldots, \mathcal{N}_r$, sequentially, until an improved solution is found. Each time the search improves the best known solution, the procedure restarts using the neighborhood $\mathcal{N}_1$. Our implementation of the VND algorithm explores two types of neighborhood structures. The first type focuses on the reassignment of customers to open facilities, whereas the second type allows the set of open facilities to change. In all cases, we only consider movements that are feasible with respect to constraints \eqref{eq_Q_1}-\eqref{eq_Q_5}.
The \textit{shift} neighborhood considers all solutions that can be obtained from the current one by changing the assignment of exactly one node, whereas the \textit{swap} neighborhood contains all solutions that differ from the current one in the assignment of two nodes. Let $s=(H, a)$ be the current solution, then
$$ \mathcal{N}_{1}(s)= \left\lbrace s'=(H, a'):\exists! i\in N, a'(i)\neq a(i) \right\rbrace ,$$
and
$$ \mathcal{N}_{2}(s)= \left\lbrace s'=(H, a'): \exists i_1, i_2,
a'(i_1)=a(i_2), a'(i_2)=a(i_1), a'(i)=a(i), \forall i \neq
i_1, i_2 \right\rbrace .$$
For exploring $\mathcal{N}_{1}$, we consider all pairs of the form $(i, j)$ where $a(j) \neq i $ and $h_i \geq d_j$. Also, for exploring $\mathcal{N}_{2}$ we consider all pairs of the form $(i_1, i_2)$ where $a(i_1) \neq a(i_2), h_{a(i_1)} + d_{i_1} \geq d_{i_2}$ and $h_{a(i_2)} + d_{i_2} \geq d_{i_1}$. In both cases we perform the first improving move.
We explore three additional neighborhood structures of the second type. They affect the current set of open facilities. The first one considers a subset of feasible solutions that are obtained from the current one by opening a new facility and by assigning some customers to it. That is,
$$\mathcal{N}_{3}(s)\subset \left\lbrace s'=(H', a'): H'=H\cup\left\{k\right\} ; \forall j, a'(j)=r \in H', \sum_{j:a'(j)=r} d_j \leq b_r, \forall r \in H' \right\rbrace.$$
To explore $\mathcal{N}_{3}(s)$, all nodes $k \in N \setminus H$ are considered. Again, let $s=(H,a)$ denote the current solution, $a'(k)$ the new assignment and $\hat{h_r}$ the available capacity of hub $r$. Initially, $a'(p)=a(p)$ for all $p \in N$ and $\hat{h}_r=h_r$ for all $r \in H$. For each potential facility $k \in N \setminus H$, we consider nodes by decreasing order of their demand $d_i$. Node $j$ is reassigned to facility $k$ if $c_{jk} \leq c_{j a(j)}$ and $\hat{h}_k \geq d_j$. If node $j$ is reassigned to facility $k$ we update its assignment and the available capacity of facilities $k$ and $a(j)$.
The second neighborhood structure of the second type considers a subset of feasible solutions that are obtained from the current one by closing a facility and reassigning its assigned customers to other open facilities. That is,
$$\mathcal{N}_{4}(s)\subset \left\lbrace s'=(H', a'): H'=H \setminus k ; \forall j, a'(j)=r \in H', \sum_{j:a'(j)=r} d_j \leq b_r, \forall r \in H' \right\rbrace. $$
To explore $\mathcal{N}_{4}(s)$, all facilities $k \in H$ are considered. Once more, let $a'(k)$ denote the new assignment and $\hat{h_r}$ the available capacity of facility $r$. Initially, $a'(p)=a(p)$ for all $p \in N$ and $\hat{h}_r=h_r$, for all $r \in H$. For each open facility $k \in H$, we consider its assigned customers in decreasing order of their demand $d_i$. Customer $j$ is reassigned to facility $\hat{m}$, where $\hat{m}= argmin \left\{c_{jm} : \hat{h}_m - d_j \geq 0 , m \in H\setminus k \right\}$. If node $j$ is reassigned to facility $\hat{m}$, we update its assignment and the available capacity of facilities $\hat{m}$ and $a(j)$.
The last neighborhood of the second type considers a subset of feasible solutions that are obtained by simultaneously closing an open facility and opening a closed one. That is,
$$\mathcal{N}_5(s) \subset \left\lbrace s^{'} = (H^{'},a^{'} ):H^{'}=H \backslash \{m\}\cup\{i\},m \in H, \ i\in N \backslash H \right\rbrace. $$
To explore $\mathcal{N}_5(s)$, all nodes $i \in N \backslash H$ are considered, and a set of solutions is obtained from the current one by interchanging an open facility by a closed one while ensuring capacity is enough to meet demand, and reassigning all the customers to their closest open facility with available capacity.
Once the VND search terminates with a local optimal solution with respect to the above five neighborhoods, we proceed with the assignment modification phase. The objective of this second step is to intensify the search on the assignment decisions by solving a well-known combinatorial optimization problem that arises as a subproblem in the QC$p$LP. In particular, when the location of the facilities is given, the QC$p$LP reduces to a capacitated variant of the quadratic semi-assignment problem \citep{saito09,Rostami18}. Moreover, if we relax the quadratic term of \eqref{eq_Q_obj} for each set of open facilities $H \subseteq N$, we obtain the following \textit{generalized assignment problem} (GAP):
\begin{eqnarray}
(GAP) \quad \min\quad & & \sum_{i \in N}\sum_{k \in H} c_{ik}z_{ik} \label{eq_L_objGAP}\\
\mbox{s.t.} \quad & & \sum_{k \in H}z_{ik} = 1 \qquad i \in N \label{eq_Q_1GAP}\\
& & \sum_{i\neq k} d_i z_{ik} \leq \bar{b}_{k}z_{kk} \qquad k \in H \label{eq_Q_3GAP}\\
& & z_{ik}\in \{0,1\} \qquad i \in N, k \in H. \label{eq_Q_5GAP}
\end{eqnarray}
The solution to the GAP can be seen as an intensification mechanism in which we focus on improving the assignment decisions for all customers simultaneously, while taking explicitly into account the capacity restrictions at facilities but relaxing the quadratic term in the objective. In case the solution to the GAP for a given set $H$ obtained from the local optimal solution of the first step modifies some customers' assignments, we perform a simple VNS on the obtained solution in which only the shift and swap neighborhoods are explored. Similar to the MILP solved in the constructive phase, one can use the state-of-the-art exact algorithm given in \cite{avella10} to solve GAP. However, we solve GAP by using a general purpose solver given that the time spent in solving it is negligible compared to the total CPU time needed by our branch-and-cut algorithm.
\subsection{Model Reduction Techniques}\label{Subsec:ModelRed}
When solving large-scale optimization problems, reducing the size of the formulation without compromising optimality is crucial to reduce the computational time. Having a high quality feasible solution suggests the use of elimination tests and variable fixing procedures to significantly reduce the size of the formulation to be solved in the branch-and-cut algorithm.
\subsubsection{Elimination Tests}\label{Subsub:Elim}
First proposed in \citet{crowder1980}, elimination tests have been shown to be an effective tool for identifying variables that will not be in the optimal solution and can therefore be discarded. It uses linear programming principles to conclude that a chosen variable will have a value of zero at an optimal solution. The procedure is based on the following well-known proposition written in terms of our Benders reformulation $BR$.
\begin{proposition}
Let $UB$ and $LB$ be an upper bound and an LP relaxation-based lower bound to $BR$, respectively, and $rc_{k}$ be the reduced cost coefficient of $z_{kk}$, $k\in N$, at an optimal solution of any linear relaxation of $BR$ in which only a subset of Benders cuts is included. If $LB + rc_{k} > UB$, then $z_{kk} = 0$ at an optimal solution to $\mbox{BR}$.
\end{proposition}
This proposition allows the elimination of a variable from the formulation without compromising on optimality. By verifying the condition for all non-basic location variables every time new constraints are added to the LP relaxation of BR, a significant number of variables may be eliminated. Note that we focus only on the location variables $z_{kk}$, $k\in H$ since the impact of removing it will lead to the elimination of all other related variables, $z_{ik}$, $\forall i \in N$, thereby reducing the size of BR by $|N|$.
\subsubsection{Partial Enumeration}\label{Subsub:Partial}
Similar to the idea of strong branching \citep{Applegate95}, partial enumeration creates a set of what-if scenarios to measure the impact that fixing a variable $z_{kk}$, $k\in N$ to an arbitrary value $\pi$ has on the LP of the formulation. If the optimal solution value of the linear program with fixed variable $z_{kk}$ is larger than that of a known upper bound, then that variable will not have a value of $\pi$ at an optimal solution to BR. This method is particularly useful when the variable can only take two possible values as is the case with the binary location variables.
Since there are two possible what-if scenarios, one can create for each variable in the partial enumeration scheme, a simple rule to identify good candidates. If for a given location variable $z_{kk}$ $k\in N$ its value is less than or equal to 0.2, we then solve a linear programming problem with the added constraint $z_{kk}=1$. If the resulting optimal solution value is greater than the best upper bound known, we can then make $z_{kk}$ and its corresponding assignment variables equal to 0 (i.e., remove them from the formulation). We refer to this procedure as PE$_0$, which stands for partial enumeration to fix at 0.
If the value of $z_{kk}$ is greater than or equal to 0.8, we then solve a linear programming problem with the added constraint $z_{kk}=0$. If the resulting optimal solution value is greater than the best upper bound known, we then fix $z_{kk}=1$. We refer to this procedure as PE$_1$, which stands for partial enumeration to fix at 1.
Both the elimination test and partial enumeration procedure are performed as a part of a preprocessing phase in which the linear relaxation of BR is solved. This reduces the size of the mixed binary formulation used in the branch-and-cut algorithm, and of the modified transportation problems $\mbox{PPO$_{ij}$}$ solved to obtain Benders cuts.
\subsection{Some Implementation Details of the Branch-and-Cut Algorithm for QC$p$LP}\label{Sec:CompleteAlgo}
Having explained each of the algorithmic enhancements of our procedure, we now present the complete exact algorithm which is divided into two parts. The first is the root-processing routine, in which the linear programming relaxation of BR is solved and variables are dynamically eliminated and fixed. The second part is the branch-and-cut algorithm where the mixed binary program is solved to proven optimality. The transportation problem PPO$_{ij}$ is used to separate new Benders cuts at every node of the tree at a depth of multiples of $\gamma$.
The preprocessing phase is an iterative procedure in which the LP relaxation of BR without any Benders cuts is first solved. The mathheuristic is then called using the support (set of variables with non-zero solution value) of the LP relaxation to obtain a feasible solution and to potentially update the incumbent. We then perform the variable elimination procedure and, if the iteration number is divisible by $\phi$, we also perform the partial enumeration to close facilities (PE$_0$). We then proceed to generate a Benders cut by solving our modified transportation problem $\mbox{PPO$_{ij}$}$ for each $i,j\in N$ and add it to BR if the cut is violated by at least $\epsilon$. We then reoptimize the BR with its newly added constraint. If the resulting optimal solution value is larger than the previous by a margin of at least $\kappa$, the variable elimination procedure is executed again and the entire process is repeated. If the improvement is less than $\kappa$, we then execute the partial enumeration methods PE$_1$ followed by PE$_0$. The resulting formulation is then used in the mixed binary programming phase.
Upon completing the preprocessing phase, the resulting mixed binary program BR is of a significantly smaller size than the original formulation. The reduced BR along with the previously generated Benders cuts are then inputted into a solver that executes a branch-and-cut process and infer additional general mixed integer cuts to strengthen the formulation. Up to $\Upsilon$ additional Benders cuts are obtained when exploring nodes at a depth of multiples of $\gamma$ of the branch-and-bound tree and are added only if the current fractional solution violates them by at least $\epsilon$. For correctness, the tolerance to add Benders cuts at integer solutions is set to 0. Finally, we also impose that in the branching framework, priority be given to location variables $z_{kk}$, to ensure the impact of opening and closing facilities is evaluated first. The source code can be downloaded from \url{https://sites.google.com/view/carloszetina/Research/Publications}.
\section{Computational Experiments} \label{Sec:CompExp}
We next present the results of extensive computational experiments performed to \modif{assess} the performance of our branch-and-cut algorithm. Given that the star-star hub network design problem is a special case of hub location problems and that the instances are not publicly available, our experiments are based on the well-studied, well-benchmarked single-allocation hub location problems. In the first part of the experiments, we focus on a comparison between our exact algorithm and other exact algorithms reported in the literature for four particular cases of QC$p$LP arising in hub location: the \textit{uncapacitated hub location problem with single assignments} (UHLPSA), the \textit{uncapacitated $p$-hub median problem with single assignments} (U$p$HMPSA), the \textit{capacitated hub location problem with single assignments} (CHLPSA), and the \textit{capacitated $p$-hub median problem with single assignments} (C$p$HMPSA). In the second part of the experiments, we test the robustness and limitations of our exact algorithm on large instances involving up to 1,000 nodes. All algorithms were coded in C using the callable library for CPLEX 12.9.0 and run on an Intel Xeon E5 2687W V3 processor at 3.10 GHz with 750 GB of RAM under Linux environment. The separation and addition of Benders optimality cuts within the branch-and-cut algorithm has been implemented via \textit{lazy callbacks} and \textit{user cut callbacks}. For a fair comparison, all use of CPLEX was limited to one thread and the traditional MIP search strategy. The following parameter values were, in order of appearance used in our final implementation: $\phi=0.5$, $\epsilon=100$, $\kappa=0.1$, $\Upsilon=2$, and $\gamma=10$.
\subsection{Comparison with Alternative Solution Algorithms}\label{Subsec:Compare}
We now present a comparison between our branch-and-cut algorithm for QC$p$LP and the most recently proposed exact algorithms for UHLPSA \citep{Meier2018,Ghaffarinasab2018}, U$p$HMPSA \citep{Ghaffarinasab2018}, and CHLPSA \citep{contreras2011b,Meier2018}. To the best of our knowledge, these are the state-of-the-art algorithms for solving the respecting problems to proven optimality. We also present the results of our algorithm for the C$p$HMPSA. However, to the best of our knowledge, there is no ad hoc exact algorithm in the literature for the C$p$HMPSA. All computational experiments in this section are performed using the well-known Australian Post (AP) set of instances. It consists of the Euclidean distances $d_{ij}$ between 200 postal districts in \modif{Sydney}, Australia, and of the values of $w_{ij}$ representing postal flows between pairs of postal districts. From this set of instances, we have selected those with $|N| \in \left\lbrace 100, 125, 150, 175, 200 \right\rbrace$, $p \in \left\lbrace 5, 10, 15, 20 \right\rbrace$ and with setup costs and capacities of the types loose (L) and tight (T) \citep[see][]{contreras2011b}. We can represent any instance of the above mentioned hub location problems as an instance of the QC$p$LP by setting the costs in the objective as $c_{ik} = \left(\chi O_i + \delta D_i \right)d_{ik}$ and $q_{ikjm} = \tau w_{ij}d_{km}$, where $O_i=\sum_{j \in N} w_{ij}$, $D_i=\sum_{j \in N} w_{ji}$, and $\chi$, $\tau$, $\delta$ represent the unit collection, transfer, and distribution costs, respectively. For the AP data set, these unit costs are set to $\chi=2$, $\tau=0.75$, and $\delta=3$.
Our comparisons are done with \cite{Meier2018}, \cite{Ghaffarinasab2018}, and \cite{contreras2011b} which are, to the best of our knowledge, the state of the art ad hoc exact solvers for their respective problem variants. \cite{Meier2018} use C$^{\#}$ to call Gurobi 6.0 running on a processor at 3.4 GHz while \cite{Ghaffarinasab2018} use Java to call CPLEX 12.6 on a processor at 3.30 GHz. Finally, \cite{contreras2011b} use C to develop a branch-and-price solver run on a processor at 2.33 GHz.
Our first comparison is for the UHLPSA where there exist fixed set-up costs for locating facilities but both capacity and cardinality constraints are relaxed. We compare our algorithm to the results presented in \cite{Meier2018} and \cite{Ghaffarinasab2018}. The detailed results of the comparison between the exact methods using the AP data set are provided in Table \ref{UHLPSAtable}. The first two columns are the instance name and optimal objective function value. These are then followed by the time in seconds reported by \cite{Ghaffarinasab2018} and \cite{Meier2018} and the information of our algorithm's performance. The column under the heading \textit{\%Dev heur} reports the percent deviation between the best solution obtained with the matheuristic presented in Section \ref{Subsec:Heuristic} and the optimal solution. The column \textit{\%fixed plants} gives the percent of facilities that were closed at the end of the partial enumeration phase. The column \textit{\%time root} provides the percent of time spent by the algorithm solving the root node, including the matheuristic, the elimination tests, and the partial enumeration phase. The last column reports the number of nodes explored in the enumeration tree.
\begin{table}[htbp]
\TABLE
{Comparison of state-of-the-art exact algorithms for the uncapacitated hub location problem with single assignments using the AP data set. \label{UHLPSAtable}}
{\begin{tabular}{|rr|r|r|rrrrr|}
\hline \up
& & \multicolumn{1}{c|}{Ghaffarinasab} & \multicolumn{1}{c|}{Meier and} & \multicolumn{5}{c|}{Branch-and-cut algorithm} \\
& & \multicolumn{1}{c|}{and Kara (2018)} & \multicolumn{1}{c|}{Clausen (2018)} & & \%Dev & \%fixed & \%time & \multicolumn{1}{l|}{BB} \\
Instance & Opt. & time(s) & time(s) & time(s) & heur & plants & root & \multicolumn{1}{l|}{nodes} \\
\hline \up
100LT & 238,016.28 & 6.91 & 82.42 & 23.76 & 0.00 & 97 & 68 & 70 \\
100TT & 305,097.95 & 3.24 & 60.23 & 2.32 & 1.52 & 98 & 68 & 0 \\
125LT & 227,949.00 & 43.30 & 411.46 & 42.89 & 0.00 & 98 & 37 & 122 \\
125TT & 258,839.68 & 16.08 & 188.05 & 8.54 & 0.00 & 98 & 21 & 9 \\
150LT & 225,450.09 & 107.24 & 1,259.22 & 76.05 & 0.00 & 99 & 39 & 172 \\
150TT & 234,778.74 & 26.30 & 478.38 & 59.06 & 0.00 & 99 & 5 & 227 \\
175LT & 227,655.38 & 188.17 & 2,044.77 & 89.86 & 0.00 & 99 & 70 & 51 \\
175TT & 247,876.80 & 44.55 & 1,639.21 & 69.84 & 0.00 & 99 & 8 & 180 \\
200LT & 233,802.98 & 68.18 & 5,493.47 & 290.87 & 0.00 & 99 & 65 & 276 \\
200TT & 272,188.11 & 1,399.53 & 20,292.35 & 102.27 & 0.46 & 97 & 29 & 136 \\
\hline \up
& Geom. mean & 45.15 & 782.85 & 41.96 & 0.00 & 98 & 31 & 0 \\
& Arith. mean & 190.35 & 3,194.96 & 76.55 & 0.20 & 98 & 41 & 124 \\
\hline
\end{tabular}}
{}
\end{table}
The results in Table \ref{UHLPSAtable} indicate that our exact algorithm \modif{is, on average, an order of magnitude faster than the cutting plane algorithm} of \cite{Meier2018}, which explicitly \modif{requires the assumption} of Euclidean distances. \modif{On the other hand, our algorithm takes on average half of the computation time required by the iterative Benders decomposition algorithm of \cite{Ghaffarinasab2018}}. In addition, the matheuristic is capable of finding an optimal solution in 9 out of 10 instances.
Our second comparison is for the U$p$HMPSA, where the fixed set-up costs for locating facilities are disregarded but a cardinality \modif{constraint} on the number of open hubs is imposed. Also, capacity limitations on the facilities are not considered. Once more, we compare our algorithm to the results presented in \cite{Meier2018} and \cite{Ghaffarinasab2018} for solving the U$p$HMPSA. The detailed results of the comparison between the exact methods using the AP data set are provided in Table \ref{UpHMPSAtable}.
\begin{table}[ht!]
\TABLE
{Comparison of state-of-the-art exact algorithms for the uncapacitated p-hub median problem with single assignments using the AP data set. \label{UpHMPSAtable}}
{\begin{tabular}{|rrr|r|r|rrrrr|}
\hline \up
& & & \multicolumn{1}{c|}{Ghaffarinasab} & \multicolumn{1}{c|}{Meier and} & \multicolumn{5}{c|}{Branch-and-cut algorithm} \\
& & & \multicolumn{1}{c|}{and Kara (2018)} & \multicolumn{1}{c|}{Clausen (2018)} & & \%Dev & \%fixed & \%time & \multicolumn{1}{l|}{BB} \\
Instance & p & Opt. & time(s) & time(s) & time(s) & heur & plants & root & \multicolumn{1}{l|}{nodes} \\
\hline \up
100 & 5 & 136,929.44 & 313.80 & 356.39 & 23.26 & 0.00 & 86 & 82 & 32 \\
100 & 10 & 106,469.57 & 109.18 & 32.22 & 34.33 & 0.00 & 82 & 84 & 14 \\
100 & 15 & 90,533.52 & 144.40 & 85.95 & 41.92 & 0.00 & 67 & 66 & 79 \\
100 & 20 & 80,270.96 & 61.47 & 33.54 & 26.35 & 0.00 & 72 & 82 & 10 \\
125 & 5 & 137,175.68 & 1,286.48 & 1,104.31 & 79.83 & 0.04 & 88 & 58 & 111 \\
125 & 10 & 107,092.09 & 414.08 & 184.62 & 69.69 & 0.00 & 81 & 89 & 13 \\
125 & 15 & 91,494.56 & 1,271.86 & 465.08 & 81.34 & 0.00 & 69 & 65 & 132 \\
125 & 20 & 81,471.65 & 213.76 & 111.03 & 107.45 & 0.00 & 67 & 63 & 149 \\
150 & 5 & 137,425.90 & 2,989.83 & 1,474.76 & 153.27 & 0.00 & 91 & 72 & 178 \\
150 & 10 & 107,478.12 & 1,148.01 & 412.53 & 159.79 & 0.00 & 92 & 82 & 68 \\
150 & 15 & 92,050.58 & 1,695.15 & 405.57 & 256.22 & 0.03 & 68 & 52 & 142 \\
150 & 20 & 82,229.39 & 531.99 & 185.34 & 195.07 & 0.00 & 78 & 70 & 114 \\
175 & 5 & 139,354.51 & 31,347.15 & 10,699.58 & 535.70 & 0.00 & 92 & 54 & 342 \\
175 & 10 & 109,744.35 & 10,551.64 & 3,023.02 & 895.67 & 0.61 & 57 & 41 & 236 \\
175 & 15 & 94,123.66 & 19,602.93 & 8,143.73 & 627.45 & 0.04 & 66 & 42 & 402 \\
175 & 20 & 83,843.59 & 1,778.11 & 271.03 & 429.19 & 0.00 & 79 & 61 & 167 \\
200 & 5 & 140,062.65 & 127,546.79 & 17,628.38 & 1,156.65 & 0.07 & 79 & 50 & 367 \\
200 & 10 & 110,147.66 & 46,706.90 & 4,957.31 & 1,302.66 & 0.00 & 83 & 39 & 1036 \\
200 & 15 & 94,459.20 & 26,640.56 & 1,107.48 & 769.89 & 0.21 & 66 & 52 & 151 \\
200 & 20 & 84,955.36 & 27,224.48 & 526.08 & 1,141.73 & 0.00 & 75 & 37 & 1007 \\
\hline \up
& & Geom. mean & 2,195.82 & 613.97 & 198.46 & 0.00 & 76 & 60 & 122.88 \\
& & Arith. mean & 15,078.93 & 2,560.40 & 404.37 & 0.05 & 77 & 62 & 237.5 \\
\hline
\end{tabular}}
{}
\end{table}
The results in Table \ref{UpHMPSAtable} show once more that our exact algorithm \modif{is on average three and two orders of magnitude faster than those of \cite{Ghaffarinasab2018} and \cite{Meier2018}, respectively}. The matheuristic is capable of finding an optimal solution in 14 out of 20 instances, and the average deviation is only 0.05\%. It is worth noting that the U$p$HMPSA seems to be more difficult to solve as compared to the UHLPSA. The largest CPU time for the UHLPSA was about six minutes (200 nodes with loose setup costs) whereas for the U$p$HMPSA was one hour (200 nodes and $p=20$).
Our third comparison is for the CHLPSA, where the fixed set-up costs and capacities of facilities are taken into account but the cardinality constraint is disregarded. We compare our algorithm to the results presented in \cite{Meier2018} using a branch-and-cut algorithm and \cite{contreras2011b} using a branch-and-price algorithm for solving the CHLPSA. The detailed results of the comparison between the exact methods using the AP data set are provided in Table \ref{CHLPSAtable}.
\begin{table}[ht!]
\TABLE
{Comparison of state-of-the-art exact algorithms for the capacitated hub location problem with single assignments using the AP data set. \label{CHLPSAtable}}
{\begin{tabular}{|rr|r|r|rrrrr|}
\hline \up
& & \multicolumn{1}{c|}{Meier and} & \multicolumn{1}{c|}{Contreras} & \multicolumn{5}{c|}{Branch-and-cut algorithm} \\
& & \multicolumn{1}{c|}{Clausen (2018)} & \multicolumn{1}{c|}{ et al. (2011d)} & & \%Dev & \%fixed & \%time & \multicolumn{1}{l|}{BB} \\
Instance & Opt. & time(s) & time(s) & time(s) & heur & plants & root & \multicolumn{1}{l|}{nodes} \\
\hline \up
100LL & 246,713.97 & 176.31 & 459.89 & 19.11 & 0.00 & 93 & 54 & 68 \\
100LT & 256,155.33 & 141.23 & 347.95 & 27.88 & 0.06 & 95 & 29 & 155 \\
100TL & 362,950.09 & 174.03 & 124.92 & 40.49 & 0.59 & 92 & 14 & 242 \\
100TT & 474,068.96 & 1,122.14 & 328.11 & 95.35 & 0.37 & 93 & 6 & 943 \\
125LL & 239,889.33 & 415.41 & 1,650.57 & 45.15 & 0.02 & 93 & 52 & 101 \\
125LT & 251,259.16 & 279.53 & 552.99 & 45.11 & 0.02 & 97 & 41 & 299 \\
125TL & 246,486.69 & 53.24 & 41.22 & 59.04 & 0.00 & 98 & 4 & 302 \\
125TT & 291,807.35 & 109.19 & 322.73 & 27.52 & 0.00 & 97 & 12 & 194 \\
150LL & 234,765.44 & 153.85 & 3,347.21 & 53.01 & 0.35 & 97 & 82 & 8 \\
150LT & 249,797.49 & time out & 11,818.19 & 84.61 & 0.00 & 92 & 50 & 293 \\
150TL & 262,543.08 & 1,419.80 & 1,114.95 & 73.29 & 0.31 & 98 & 13 & 393 \\
150TT & 322,976.47 & 3,243.56 & 4,299.28 & 192.74 & 0.00 & 92 & 7 & 1124 \\
175LL & 227,997.58 & 1,010.55 & 3,418.10 & 119.05 & 0.00 & 98 & 56 & 150 \\
175LT & 251,540.80 & 2,196.45 & 12,408.05 & 131.39 & 0.00 & 95 & 61 & 138 \\
175TL & 244,860.41 & 152.07 & 256.60 & 54.48 & 0.00 & 98 & 11 & 138 \\
175TT & 312,193.78 & 17,634.54 & 4,886.88 & 223.70 & 0.07 & 94 & 7 & 764 \\
200LL & 231,069.50 & 725.40 & 5,813.00 & 210.83 & 0.00 & 98 & 54 & 241 \\
200LT & 267,218.35 & 7,460.18 & 45,874.73 & 980.45 & 0.71 & 86 & 19 & 1441 \\
200TL & 273,443.81 & 902.96 & 869.67 & 163.37 & 0.00 & 98 & 8 & 412 \\
200TT & 290,582.04 & 4,117.31 & 3,211.04 & 346.75 & 0.11 & 98 & 7 & 804 \\
\hline \up
& Geom. mean & 470.16 & 1,376.70 & 88.96 & 0.00 & 95 & 20 & 252 \\
& Arith. mean & 2,183.57 & 5,057.30 & 149.67 & 0.13 & 95 & 29 & 411 \\
\hline
\end{tabular}}
{}
\end{table}
In the case of the CHLPSA, our algorithm \modif{is significantly faster than} the others. In some instances it is up to three orders-of-magnitude faster, e.g., 150LT which was not solved in 12 hours by \cite{Meier2018}. The matheuristic provides an optimal solution for 13 out of 20 instances, while the average deviation is only 0.13\%. The most time consuming instance (200LT) was solved in about eight minutes.
Our last series of experiments in this section is for the C$p$HMPSA, where the fixed set-up costs are disregarded but capacities and cardinality constraints of facilities are taken into account. To the best of our knowledge, there is \modif{no} ad hoc algorithm for solving this hub location variant in the literature. Therefore, in Table \ref{CpHMPSAtable}, we only report the results obtained with our algorithm using the AP data set.
\begin{table}[ht!]
\TABLE
{Results of branch-and-cut algorithm for the capacitated $p$-hub median problem with single assignments using the AP data set. \label{CpHMPSAtable}}
{\begin{tabular}{|rrr|rrrrr|}
\hline \up
& & & \multicolumn{5}{c|}{Branch-and-cut algorithm} \\
& & & & \%Dev & \%fixed & \%time & \multicolumn{1}{l|}{BB} \\
Instance & $p$ & Opt. & time(s) & heur & plants & root & \multicolumn{1}{l|}{nodes} \\
\hline \up
100 & 5 & 137,232.52 & 26.08 & 0.04 & 88 & 70 & 30 \\
100 & 10 & 107,207.72 & 34.69 & 0.00 & 80 & 81 & 12 \\
100 & 15 & 91,283.34 & 40.10 & 0.00 & 65 & 61 & 63 \\
100 & 20 & 81,034.59 & 25.73 & 0.00 & 71 & 76 & 22 \\
125 & 5 & 137,175.68 & 118.32 & 0.00 & 86 & 39 & 313 \\
125 & 10 & 107,092.09 & 65.62 & 0.00 & 90 & 90 & 18 \\
125 & 15 & 91,494.56 & 84.85 & 0.00 & 69 & 62 & 96 \\
125 & 20 & 81,471.65 & 127.93 & 0.00 & 67 & 53 & 188 \\
150 & 5 & 137,425.90 & 152.15 & 0.00 & 90 & 74 & 117 \\
150 & 10 & 107,478.12 & 159.42 & 0.00 & 92 & 83 & 82 \\
150 & 15 & 92,050.58 & 278.94 & 0.03 & 67 & 51 & 163 \\
150 & 20 & 82,229.39 & 203.05 & 0.00 & 75 & 69 & 82 \\
175 & 5 & 139,354.51 & 544.00 & 0.00 & 92 & 55 & 462 \\
175 & 10 & 109,744.35 & 856.88 & 0.61 & 57 & 45 & 205 \\
175 & 15 & 94,123.66 & 652.80 & 0.01 & 67 & 42 & 343 \\
175 & 20 & 83,843.59 & 474.33 & 0.00 & 79 & 61 & 182 \\
200 & 5 & 140,062.65 & 1,018.33 & 0.08 & 82 & 62 & 260 \\
200 & 10 & 110,147.66 & 1,274.57 & 0.29 & 70 & 44 & 616 \\
200 & 15 & 94,459.20 & 911.05 & 0.21 & 66 & 46 & 176 \\
200 & 20 & 84,955.37 & 1,720.54 & 0.00 & 75 & 28 & 1084 \\
\hline \up
& & Geom. mean & 211.39 & 0.00 & 75.61 & 57.32 & 126.91 \\
& & Arithm. mean & 438.47 & 0.06 & 76.32 & 59.61 & 225.7 \\
\hline
\end{tabular}}
{}
\end{table}
The results of Table \ref{CpHMPSAtable} show that our exact algorithm can optimally solve all considered instances in less than one hour. In fact, the most time consuming instance (200 nodes and $p=20$) took only 30 minutes to be solved. The matheuristic is capable of finding an optimal solution in 13 out of 20 instances, and the average deviation is only 0.06\%. \modif{ It is worth noting that the C$p$HMPSA seems to be more difficult to solve as compared to the CHLPSA. The largest CPU time for the CHLPSA is about eight minutes (200 nodes with loose setup costs and tight capacities) whereas for the C$p$HMPSA is 30 minutes (200 nodes and $p=20$)}.
\subsection{Results for Larger Instances}\label{Subsec:Largeinstances}
As seen in Section \ref{Subsec:Compare}, our algorithm scales well to larger instances while either being competitive or significantly faster than \modif{tailored exact algorithms that exploit problem-specific structure for all considered problem variants}. This leads to the question of what are the size limits of our proposed algorithm. In this section, we present the results of solving the large-scale instances first presented in \cite{contrerasb} as a single-assignment variant. During this last set of experiments, we focus on the two variants in which our algorithm performed the best for the AP data set: the UHLPSA and CHLPSA.
\cite{contrerasb} introduce a new data set containing three different sets of instances with diverse structural characteristics in the flow network. They consider different levels of magnitude for the amount of flow originating at a given node to obtain three different sets of nodes: low-level (LL) nodes, medium-level (ML) nodes, and high-level (HL) nodes. The total outgoing flow of LL, ML, and HL nodes lies in the interval $[1,10]$, $[10,100]$, and $[100,1,000]$, respectively. In this section, we use the first set of instances, called \textit{Set I}, in which the number of HL, ML, and LL nodes is 2\%, 38\%, and 60\% of the total number of nodes, respectively. We use the instance generation code of \cite{contrerasb} to generate instances with $N=$ 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, and 1,000. To generate setup costs and capacities, we use the same procedure as described in \cite{contrerasb,contreras11b}.
Tables \ref{LargeUncap} and \ref{LargCap} present the detailed results of our branch-and-cut algorithm for the UHLPSA and CHLPSA, respectively, using the \textit{Set I} instances. In the case of the UHLPSA, instances with up to 700 nodes can be optimally solved in less than half of a day of CPU time. The largest-size instances with 750 to 1,000 were optimally solved in CPU times of less than three days. From our observations, these results are remarkable given that the number of variables in the largest instance with 1,000 nodes require 499,501,000,000 variables in the $RL_2$ formulation. Similar results are obtained for the more challenging CHLPSA, in which instances with up to 800 nodes can be solved in less than one day of CPU time. The largest instances from 850 to 1,000 nodes require two to three days to prove optimality. Moreover, the matheuristic is capable of finding the optimal solution to 13 out of 15 uncapacitated instances and in 14 out of 20 for the capacitated case, with an average deviation of only 0.01\%.
\begin{table}[ht!]
\TABLE
{Results for the uncapacitated hub location problem with single assignments using \textit{Set I} data set. \label{LargeUncap}}
{\begin{tabular}{|rr|rrrrr|}
\hline \up
& & \multicolumn{5}{c|}{Branch-and-cut algorithm} \\
& & & \%Dev & \%fixed & \%time & \multicolumn{1}{l|}{BB} \\
Instance & Opt. & time(s) & heur & plants & root & \multicolumn{1}{l|}{nodes} \\
\hline \up
250 & 7,349,579.02 & 451.85 & 0.03 & 94 & 42 & 340 \\
300 & 7,499,305.16 & 1,103.00 & 0 & 96 & 49 & 687 \\
350 & 9,361,597.79 & 2,203.65 & 0 & 98 & 59 & 745 \\
400 & 22,060,422.84 & 2,821.72 & 0 & 95 & 66 & 699 \\
450 & 28,765,424.39 & 6,884.78 & 0 & 96 & 62 & 1560 \\
500 & 35,679,997.04 & 13,536.08 & 0.01 & 95 & 31 & 4007 \\
600 & 46,396,548.35 & 18,716.31 & 0 & 99 & 83 & 1489 \\
650 & 50,183,822.62 & 31,409.75 & 0 & 99 & 84 & 1679 \\
700 & 59,187,662.18 & 43,505.48 & 0 & 98 & 75 & 3767 \\
750 & 67,824,339.65 & 63,430.23 & 0 & 98 & 83 & 3293 \\
800 & 74,897,428.36 & 69,898.70 & 0 & 98 & 88 & 1493 \\
850 & 84,429,662.33 & 142,740.82 & 0 & 97 & 66 & 6296 \\
900 & 91,522,770.07 & 99,430.21 & 0 & 98 & 83 & 2537 \\
950 & 100,422,604.52 & 182,577.62 & 0 & 99 & 77 & 7277 \\
1000 & 111,339,940.06 & 226,876.92 & 0 & 98 & 62 & 7248 \\
\hline \up
& Geom. mean & 19,303.40 & 0.00 & 97 & 65 & 1987 \\
& Arithm. mean & 60,372.47 & 0.00 & 97 & 67 & 2874 \\
\hline
\end{tabular}}
{}
\end{table}
\begin{table}[ht!]
\TABLE
{Results for the large-scale capacitated hub location problem with single assignments using \textit{Set I} data set. \label{LargCap}}
{\begin{tabular}{|rr|rrrrr|}
\hline \up
& & \multicolumn{5}{c|}{Branch-and-cut algorithm} \\
& & & \%Dev & \%fixed & \%time & \multicolumn{1}{l|}{BB} \\
Instance & Opt. & time(s) & heur & plants & root & \multicolumn{1}{l|}{nodes} \\
\hline \up
250 & 7,880,912.40 & 439.14 & 0.04 & 94 & 58 & 207 \\
300 & 7,611,876.55 & 1,194.01 & 0.00 & 96 & 43 & 991 \\
350 & 9,445,459.03 & 6,524.52 & 0.00 & 95 & 18 & 5338 \\
400 & 22,828,621.74 & 5,976.75 & 0.05 & 94 & 29 & 2259 \\
450 & 29,715,150.35 & 20,463.62 & 0.20 & 92 & 19 & 4968 \\
500 & 36,500,689.64 & 5,221.38 & 0.00 & 98 & 81 & 498 \\
600 & 47,878,571.44 & 30,162.95 & 0.02 & 96 & 56 & 2385 \\
650 & 51,115,098.98 & 57,811.47 & 0.00 & 98 & 48 & 10082 \\
700 & 60,386,958.62 & 50,582.63 & 0.00 & 97 & 54 & 5968 \\
750 & 68,382,804.86 & 49,143.18 & 0.01 & 99 & 78 & 3002 \\
800 & 75,011,490.17 & 60,505.13 & 0.00 & 99 & 86 & 2065 \\
850 & 85,547,053.64 & 102,411.16 & 0.01 & 99 & 80 & 4037 \\
900 & 92,987,214.82 & 111,830.99 & 0.00 & 99 & 75 & 5260 \\
950 & 101,245,457.94 & 212,247.85 & 0.00 & 99 & 59 & 14463 \\
1000 & 113,144,817.97 & 227,549.00 & 0.00 & 99 & 62 & 8719 \\
\hline \up
& Geom. mean & 23,266.50 & 0.00 & 97 & 51 & 3003 \\
& Arithm. mean & 62,804.25 & 0.02 & 97 & 56 & 4683 \\
\hline
\end{tabular}}
{}
\end{table}
\section{Conclusions} \label{Sec:Conclusions}
In this paper, we have studied a general class of non-convex quadratic capacitated $p$-location problems with single assignments. The quadratic term in the objective function accounts for the interaction cost between facilities and customer assignments. A binary quadratic program was first linearized by applying a reformulation-linearization technique and the associated additional variables were then projected out using Benders decomposition. We proposed an exact branch-and-cut algorithm that incorporated several algorithmic refinements such as stabilized Benders Pareto-optimal cuts, a matheuristic, and variable elimination and partial enumeration procedures. Extensive computational experiments on a set of existing and new large-scale instances with up to 1,000 nodes have clearly confirmed the efficiency and robustness of our algorithm in solving four different particular cases of the studied problem. To the best of our knowledge, the new instances are by far the largest and most difficult ever solved for any type of hub location problem.
\ACKNOWLEDGMENT{
This research was partially funded by the the Canadian Natural Sciences and Engineering research Council under grants 2018-06704, 2017-01746, and by a Research \& Publication Grant, Indian Institute of Management Ahmedabad. The second author would also like to acknowledge the support and productive research environment provided by Andrea Lodi at the Canada Excellence Research Chair in Data Science for Real-Time Decision-Making as well as IVADO Labs. This support is gratefully acknowledged.
}
\end{document} |
\begin{document}
\title{``Disproof of Bell's Theorem" : more critics.}
\author{ Philippe Grangier}
\affiliation{Laboratoire Charles Fabry de l'Institut d'Optique, \\CNRS, Univ Paris-Sud,
CD128, 91127 Palaiseau, France}
\begin{abstract}
In a series of recent papers \cite{p1,p2,p3} Joy Christian claims to have ``disproved Bell's theorem". Though his work is certainly intellectually stimulating, we argue below that his main claim is unwarranted.
\end{abstract}
\maketitle
In a series of recent papers, Joy Christian introduced a model which violates Bell's inequalities, and which agrees with quantum mechanics, though at first sight it does look ``local and realistic".
For details the reader is referred to \cite{p1,p2,p3}, and here we will assume that the content of these papers is correct, and use the same notations. The essential features of the proposed model are thus :
\begin{itemize}
\item the ``measurement results" denoted as $A_a(\mu) = \mu.a$ and $A_b(\mu) = \mu.b$ are algebraic quantities (bivectors) which do not commute, and which depend on the hidden variables ($\mu$, which is a trivector), the analysers directions ($a$ or $b$, which are unit vectors), and a sign $\pm 1$ which tells the outcome of the measurement, given $\mu$, and either $a$ or $b$.
\item when averaged over $\mu$, the correlation functions deduced from these algebraic ``measurement results" are real numbers, which exactly reproduce quantum mechanical predictions, and thus violate Bell's inequalities.
\end{itemize}
However, what is still lacking in that model is a way to extract the ``sign" , ie the result of each individual (dichotomic) measurement, from the algebraic quantities $\mu.a$ or $\mu.b$. As written in \cite{ p3}, we still need ``a yet to be discovered physical theory, which, when measured, should reproduce the binary outcomes $\pm 1$".
But whatever is this theory, it will give a real-valued function equal to $\pm 1$ for each particle and each measurement device, i.e. for each $\mu$, $a$ and $b$, and thus it will have to obey Bell's theorem... This brings us back to step 0 : nothing is wrong with Bell's theorem.
In other words, though it seems that the only ``available" information in the algebraic quantities $\mu.a$ and $\mu.b$ are their signs, averaging over non-commuting algebraic quantities is certainly not equivalent to averaging over commuting real functions \cite{reply1}. So the model proposed by Joy Christian lacks an essential ingredient~: it does not provide a physical way to extract the ``measurement results" from the algebraic quantities which are introduced. And whatever method will be used, what has to be averaged according to Bell's reasoning are the real-valued functions (i.e. the measurement results), {\bf not } the algebraic quantities \cite{reply2}. Then Bell's theorem still tells us that the whole construction must be either non-local and/or non-realistic \cite{note1}, or in conflict with quantum mechanics.
More generally, Bell's theorem cannot be ``disproved", in the sense that its conclusions follow from its premices in a mathematically correct way. On the other hand, one may argue that these premices are unduly restrictive, but this is discussing, not disproving \cite{pg}. Here the conclusion is that extracting the ``sign" of a bivector is a non-trivial operation~: this is an interesting mathematical remark, but not a challenge for Bell's theorem.
In order to finally conclude this discussion, it may be enlightening to consider the following ``toy model"~:
\begin{itemize}
\item The ``hidden variable" is a random variable $\epsilon$, with values $\pm 1$ with equal probabilities. If $\epsilon =1$, the particle goes along the Stern-Gerlach axis $a$, and if $\epsilon =-1$, it goes opposite to it. For correlated particles 1 and 2 with $\epsilon_1 = - \epsilon_2 = \epsilon$ (singlet state), this model obeys Bell's inequalities, and gives $S_{Bell}=2$.
\item But now let us change the nature of the ``measurement result", and consider that it is the vector $\epsilon_1 a$ for particle 1, and $\epsilon_2 b$ for particle 2. In addition, let us define the ``correlation function" for these two ``observables" \cite{reply2} by the scalar product $(\epsilon_1 a).(\epsilon_2 b)$. Since $\epsilon_1 \epsilon_2 = -1$ in all cases, the averaged correlation function is $-a.b$ alike quantum mechanics, and thus violates Bell's inequalities. According to the terminology of \cite{p1,p2,p3}, this is a ``local realistic model disproving Bell's inequalities".
\end{itemize}
The crucial point in this toy model is the following : by computing correlation functions with just the same rules as in \cite{p1,p2,p3}, the ``ordinary vectors" do just the same job as the ``sophisticated bivectors" : they violate Bell's inequalities. Therefore for achieving this goal the ``sophisticated bivectors" are completely useless.
So once again : the basic problem in \cite{p1,p2,p3} is that the mathematical objects which are used to calculate the ``correlation functions" are simply not the good ones. According to Bell's formalism, which is based on usual classical statistics, these objects must be the measurement results $\pm 1$. But as soon as these objects are changed, and that new rules are introduced for computing new ``correlation functions", one can easily violate Bell's inequalities, no matter whether the objects are ``ordinary vectors" or ``sophisticated bivectors".
Obviously, anybody may define new ways for calculating correlation functions if he wishes so, but this is not ``disproving Bell's theorem", because this moves too far out from Bell's hypothesis. What is at stake is rather an alternative formulation of Quantum Mechanics, and then the questions go to completely different grounds : is this new formulation correct ? is it useful ? can it handle more complicated situations such as multiparticle entanglement ? etc... Answering such questions is certainly more interesting than agitating useless polemics about an inexistent and irrelevant ``disproving".
\vskip 2mm
\centerline{-o-o-o-}
\vskip 2mm
Useful exchanges of ideas with Valerio Scarani and Gregor Weihs are acknowledged.
\begin{thebibliography}{1}
\bibitem{p1}
``Disproof of Bell's Theorem by Clifford Algebra Valued Local Variables", Joy Christian, arXiv:quant-ph/0703179
\bibitem{p2}
``Disproof of Bell's Theorem: Reply to Critics", Joy Christian, arXiv:quant-ph/0703244
\bibitem{p3}
``Disproof of Bell's Theorem: Further Consolidations", Joy Christian, arXiv:quant-ph/0707.1333
\bibitem{note1} Actually, by opposition to e.g. Bohm's model which is considered to be ``realistic but non-local", the present model can be said to be ``local but non-realistic", since one cannot associate simultaneous ``elements of reality" to the two non-commuting measurements $a$ and $a'$ on one side (or $b$ and $b'$ on the other side). See e.g. in~\cite{p3} :``It is crucial to note here that a given bivector $\mu . n$ cannot be spinning either “up” or “down”, or in any other way, about any other direction but $n$ (...) Thus, our observables $A_a(\mu)$ and $B_b(\mu)$ represent quite faithfully what is actually observed in a Stern-Gerlach type experiment." This is consistent with calculating the correlation functions algebraically (like in quantum mechanics), and not from real-valued functions (which would lead to Bell's theorem).
\bibitem{pg} An alternative view about \cite{p1,p2,p3} may be based on the ``contextual objectivity" point of view~\cite{pgp}~: by considering that the ``measurement result" is the bivector $\mu.a$ rather than simply $\pm 1$, one makes the ``context" (i.e. the value of $a$) an intrinsic part of the measurement result, which certainly makes sense according to \cite{pgp}. Then the algebraic ``hidden variable" $\mu$ can be seen as a way to carry the ``holistic" character of the entangled state, in a different way from the usual quantum formalism (usually $a$ and $b$ are seen as ``measurement parameters" rather than ``measurement results"). It is unclear whether or not such an approach might be interesting as an alternative formulation of quantum mechanics, but again it does not ``disprove" Bell's theorem, and it contradicts local realism just as much as quantum mechanics does.
\bibitem{pgp} ``Contextual objectivity and the quantum formalism",
Philippe Grangier, Int. J. Quant. Inf. {\bf 3:1}, 17 (2005); see also arXiv:quant-ph/0407025.
\bibitem{reply1}
Besides the fact that \cite{p2} seems to ignore what a collegial tone is, it is constantly misinterpreting
when accusing of misinterpretation.
What is meant in this sentence is the obvious fact that
$$ \int (\mu.a) (\mu.b) d\rho(\mu) \neq \int ``sign"(\mu.a) ``sign"(\mu.b) d\rho(\mu), $$ where $``sign"$ is any function which gives the measurement result $\pm 1$, knowing $\mu.a$ and $\mu.b$~: clearly the rhs has to obey Bell's theorem, while the lhs has not.
\bibitem{reply2}
It is actually quite revealing that \cite{p2} keeps on using the wording ``observable" rather than ``measurement result". This ``quantum" vocabulary clearly misses that the central issue in Bell's theorem, which is correlating clicks between detectors (corresponding to binary measurement results), and not correlating bivectors (which cannot be given any ``local realistic meaning"). More precisely, knowing the ``sign" of $(\mu.a)$ forbids to tell anything about the ``sign" of $(\mu.a')$, for the same given $\mu$, while in Bell's formalism $E(\lambda, a)$ and $E(\lambda, a')$ are two values of the same function, taken for the same $\lambda$ and two different measurement angles.
So the proposed model \cite{p1} cannot be a local realistic model, it could at best be an alternative formulation of quantum mechanics \cite{note1}, like Bohm's theory is.
\end{thebibliography}
\end{document} |
\begin{document}
\title[An explicit hybrid estimate for $L(1/2+it,\chi)$]{An explicit hybrid estimate
for $L(1/2+it,\chi)$}
\author[G.A. Hiary]{Ghaith A. Hiary}
\thanks{Preparation of this material is partially supported by
the National Science Foundation under agreements No.
DMS-1406190.}
\address{Department of Mathematics, The Ohio State University, 231 West 18th
Ave, Columbus, OH 43210.}
\email{hiary.1@osu.edu}
\subjclass[2010]{Primary 11Y05.}
\keywords{Van der Corput method, Weyl method, exponential sums, Dirichlet
$L$-functions, powerfull modulus, hybrid estimates, explicit estimates.}
\begin{abstract}
An explicit hybrid estimate for $L(1/2+it,\chi)$ is derived, where
$\chi$ is a Dirichlet character modulo $q$.
The estimate applies when $t$ is bounded away from zero, and
is most effective when $q$ is powerfull,
yielding an explicit Weyl bound in this case.
The estimate takes a particularly simple form if $q$ is a sixth power.
Several hybrid lemmas of
van der Corput--Weyl type are presented.
\end{abstract}
\maketitle
\section{Introduction} \label{intro}
Let $\chi$ be a Dirichlet character modulo $q$.
Let $L(1/2+it,\chi)$ be the corresponding Dirichlet $L$-function
on the critical line.
Let $\tau(q)$ be the number of divisors of $q$.
If $|t|\ge 3$, say, we define
the analytic conductor of $L(1/2+it,\chi)$
to be $\mathfrak{q}:=q|t|$.
We are interested in finding
an explicit hybrid estimate
for $L(1/2+it,\chi)$ in terms of $\mathfrak{q}$ and $\tau(q)$.
Specifically, we would like to find constants
$c$, $\kappa_1$, $\kappa_2$, $\kappa_3$, and $t_0\ge 3$ as small
as possible, such that
\begin{equation}\label{sought bound}
|L(1/2+it,\chi)| \le
c\, \tau(q)^{\kappa_1}\mathfrak{q}^{\kappa_2} \log^{\kappa_3}\mathfrak{q}, \qquad
(|t|\ge t_0).
\end{equation}
If $|t|\le t_0$, then estimating $L(1/2+it,\chi)$
reduces, essentially, to bounding
pure character sums.
Barban, Linnik, and Tshudakov~\cite{blt} gave Big-$O$ bounds
for such sums, as well as some applications.
The convexity bound in our context is $L(1/2+it,\chi)\ll
\mathfrak{q}^{1/4}$.
This can be derived using the standard method of the
approximate functional equation.
Habsieger derived such an approximate equation
in \cite{habsieger}.
And we use this
in \textsection{\ref{bounds proofs}} to prove that
if $\chi$ is a primitive character\footnote{We consider the principal character
as neither primitive nor imprimitive.}
modulo $q>1$, then we have the convexity bound
\begin{equation}\label{convexity bound}
|L(1/2+it,\chi)| \le 124.46 \mathfrak{q}^{1/4},\qquad
(\mathfrak{q} \ge 10^9,\,\, |t|\ge \sqrt{q}).
\end{equation}
Previously, Rademacher~\cite{rademacher} derived the explicit bound
\begin{equation}
|L(\sigma+it,\chi)| \le
\left(\frac{q|1+\sigma+it|}{2\pi}\right)^{\frac{1+\eta-\sigma}{2}}\zeta(1+\eta),
\end{equation}
valid if $0<\eta\le 1/2$, $\sigma\le 1+\eta$, and $\chi\pmod q$ is
primitive. This is nearly a convexity bound
except for an additional $\eta>0$ in the exponent.
Using partial summation,
we obtain an explicit bound applicable for any $t$.
Specifically, if $\chi$ is primitive modulo $q>1$,
then we obtain in \textsection{\ref{bounds proofs}} that
\begin{equation}\label{partial summation bound}
|L(1/2+it,\chi)|\le 4 q^{1/4}\sqrt{(|t|+1)\log q}.
\end{equation}
The bound \eqref{partial summation bound}
is weaker than the convexity bound in general,
but it can be useful in the limited region where $t$ is small.
Our main result is Theorem~\ref{main theorem}.
This theorem supplies the first example of
an explicit hybrid Weyl bound
(i.e.\ with $\kappa_2=1/6$ in \eqref{sought bound})
for an infinite set of Dirichlet $L$-functions; namely,
the set of Dirichlet $L$-functions corresponding to powerfull moduli.
Theorem~\ref{main theorem} takes a particularly
simple form if
$q$ is a sixth power and $\chi$ is primitive,
yielding Corollary~\ref{main theorem simple} below.
\begin{corollary}\label{main theorem simple}
Let $\chi$ be a primitive Dirichlet character modulo $q$.
If $q$ is a sixth power, then
\begin{equation}
|L(1/2+it,\chi)| \le 9.05 \tau(q) \mathfrak{q}^{1/6}
\log^{3/2} \mathfrak{q},\qquad (|t|\ge 200).
\end{equation}
\end{corollary}
In the notation of \eqref{sought bound},
Corollary~\ref{main theorem simple} asserts that
if $q$ is a sixth power and $\chi$ is primitive, then the choice
$c=9.05$, $\kappa_1=1$, $\kappa_2=1/6$, $\kappa_3=3/2$, and $t_0=200$
is admissible. The constant $\kappa_3=3/2$ arises
from two sources: a dyadic division
that contributes $1$, and the Weyl
differencing method (see \cite[\textsection{5.4}]{titchmarsh})
which contributes $1/2$.
The constant $\kappa_1=1$ arises, in part, when counting
the number of solutions to quadratic congruence equations
in the Weyl differencing method.
The $\kappa_2=1/6$ arises from proving that, on average, square-root
cancellation occurs in certain short
segments of the dyadic pieces
$\sum_{V\le n< 2V}\frac{\chi(n)}{n^{1/2+it}}$.
The constant $c=9.05$ is largely contributed by the part of the main
sum over $\mathfrak{q}^{1/3}\ll n\ll \mathfrak{q}^{2/3}$.
Last, the constant $t_0=200$ is due to technical reasons,
and can be lowered with some work.
We state the main theorem below.
See \textsection{\ref{main notation}} for the definitions of
$\sqf(q)$, $\cbf(q)$, $\spf(q)$, $B$, $B_1$, $D$, and $\Lambda(D)$.
For now we remark that if $\chi$ is primitive, then $B=B_1=1$.
And if $q$ is sixth power, then
$\sqf(q)=\cbf(q)=\spf(q) = 1$. The number $\Lambda(D)$ is bounded by $\tau(D)$,
and $D$ is usually of size about $q^{1/3}$.
\begin{theorem}\label{main theorem}
Let $\chi$ be a Dirichlet character modulo $q$.
If $|t|\ge 200$, then
\begin{equation}
|L(1/2+it,\chi)| \le \mathfrak{q}^{1/6} Z(\log \mathfrak{q})+W(\log
\mathfrak{q})
\end{equation}
where
\begin{equation*}
\begin{split}
Z(X)&:= 6.6668 \sqrt{\cbf(q)} - 16.0834 \spf(q) +15.6004 \spf(q)X\\
&+1.7364\sqrt{\Lambda(D) \cbf(q)(65.5619 - 17.1704 X - 2.4781 X^2 + 0.6807 X^3)}\\
&+1.7364 \sqrt{\Lambda(D) \cbf(q) B \tau(D/B) (-1732.5 - 817.82 X +71.68 X^2
+ 47.57 X^3)},\\
&\\
W(X)&:= -101.152 - 195.696 B_1 \sqf(q)+ 19.092 X + 94.978 B_1 \sqf(q) X.
\end{split}
\end{equation*}
\end{theorem}
For many applications,
it suffices to focus on the case where
$\chi$ is primitive.
For if not, then letting $\chi_1\pmod{q_1}$ be the primitive
character inducing $\chi$ and using
the Euler product and analytic continuation of $L(s,\chi)$,
we have
\begin{equation}\label{imprimitive to primitive}
|L(1/2+it,\chi)|
\le |L(1/2+it,\chi_1)|\prod_{\substack{p|q\\ p\nmid q_1}}(1+1/\sqrt{p}).
\end{equation}
Thus, we obtain an explicit bound on $L(1/2+it,\chi)$
by bounding $L(1/2+it,\chi_1)$
and using the inequality \eqref{imprimitive to primitive}.
In our proof of Theorem~\ref{main theorem}, though,
we bound general sums of the form \eqref{general f sum},
and keep track of the dependence on $B$ and $B_1$.
The main devices in our proofs
are the hybrid van der Corput--Weyl
Lemmas \ref{corput lemma 1 1} and \ref{corput lemma 2 2}.
These lemmas provide explicit bounds for sums of the form
\begin{equation}\label{general f sum}
\sum_{n=N+1}^{N+L} \chi(n)e^{2\pi i f(n)},
\end{equation}
where we take $f(x) = -\frac{t}{2\pi}\log x$ in our application.
A pleasant feature of the resulting bounds
is that they naturally split into
two main terms, one originating from $\chi(n)$
and the other originating from $n^{-iqt}$.
In particular, we can detect cancellation in the $q$ and
$t$ aspects separately, then
combine the savings routinely
using the well-spacing Lemma~\ref{well spacing lemma}.
The starting point in our proof of Theorem~\ref{main theorem}
is the Dirichlet series
\begin{equation}\label{dirichlet series}
L(1/2+it,\chi) = \sum_{n=1}^{\infty} \frac{\chi(n)}{n^{1/2+it}},
\end{equation}
valid for $\chi$ nonprincipal. (If $\chi$ is principal, we use a bound
for the Riemann zeta function.) We partition the sum in \eqref{dirichlet series}
into four parts: $1\ll n\ll
\mathfrak{q}^{1/3}$ which is bounded trivially,
$\mathfrak{q}^{1/3}\ll n\ll \mathfrak{q}^{2/3}$ for which
Lemma~\ref{corput lemma 1 1} is used,
$\mathfrak{q}^{2/3} \ll n\ll \mathfrak{q}$
for which Lemma~\ref{corput lemma 2 2} is used, and
the tail $\mathfrak{q}\ll n$ which is bounded using the P\'olya--Vinogradov
inequality.
We remark that the restriction in Corollary~\ref{main theorem simple}
that $q$ is a sixth
power may be relaxed to $q$ is a cube provided that one starts with a main sum of
length about $\sqrt{\mathfrak{q}}$ (as in the approximate functional equation)
instead of the main sum \eqref{dirichlet series}.
One then applies van der Corput lemmas
analogous to those in \cite{hiary-corput}, but for the twisted sums
\eqref{general f sum}.
Interest in powerfull modulus $L$-functions has
grown recently, both from theoretical and computational
perspectives.
Mili\'cevi\'c~\cite{milicevic}
has recently derived sub-Weyl bounds for pure character sums to prime-power modulus.
And the author~\cite{hiary-char-sums} had derived an algorithm
to compute hybrid sums to
powerfull modulus in $\mathfrak{q}^{1/3+o(1)}$ time.
If $q$ is smooth (but not necessarily powerfull)
or prime, then one
can obtain explicit hybrid subconvexity bounds
by deriving
an explicit version of Heath-Brown's $\mathfrak{q}$-analogue of
the van der Corput method
in \cite{heath-brown-1}, and an explicit version of
Heath-Brown's hybrid Burgess
method in \cite{heath-brown-2}.
\section{Notation}\label{main notation}
Let $\chi$ be a Dirichlet character modulo $q$.
We factorize the modulus
\begin{equation}
q := p_1^{a_1}\cdots p_{\omega}^{a_{\omega}},
\end{equation}
where the $p_j$ are distinct primes and $a_j\ge 1$.
For each prime power $p^a$, we define
\begin{equation}
C_1(p^a):=p^{\lceil a/2\rceil},\qquad D_1(p^a):=p^{a-\lceil a/2\rceil},
\end{equation}
then extend the definitions multiplicatively; i.e.\
$C_1(q) = C_1(p_1^{a_1})\cdots C_1(p_{\omega}^{a_{\omega}})$.
In addition, we define
\begin{equation}
C(p^a):= p^{\lceil a/3\rceil},
\qquad
D(p^a):=\left\{\begin{array}{ll}
1 & a = 1,\\
p^{a-2\lceil a/3\rceil+1} & p = 2 \textrm{ and } a > 1,\\
p^{a-2\lceil a/3\rceil} & p\ne 2 \textrm{ and } a > 1,
\end{array}\right.
\end{equation}
then extend the definitions multiplicatively.
Since the quantities $C_1(q)$, $D_1(q)$, $C(q)$, and $D(q)$ will appear often,
it is useful to introduce the short-hand notation
$C_1:= C_1(q)$, $D_1:=D_1(q)$, $C:=C(q)$, and $D:=D(q)$.
For example, $C_1D_1=q$.
Some additional arithmetic factors will appear in our estimates:
$(m,n)\ge 0$ is the greatest common divisor
of $m$ and $n$,
$\omega(m)$ is the number of distinct prime divisors of $m$, and
$\Lambda(m)$ is the number of solutions of the congruence
$x^2\equiv 1\pmod m$ with $0\le x < m$. Explicitly,
\begin{equation}
\Lambda(m)= \left\{\begin{array}{ll}
2^{\omega(m) -1}, & m \equiv 2\pmod 4,\\
2^{\omega(m)},& m\not \equiv 2\pmod{4} \textrm{ and } m\not\equiv 0\pmod 8,\\
2^{\omega(m)+1}, & m\equiv 0\pmod 8.
\end{array}\right.
\end{equation}
We define $\Lambda := \Lambda(D)$, and
\begin{equation}\label{prf def}
\begin{split}
&\sqf(p^a) := p^{\lceil a/2\rceil -a/2},\qquad
\cbf(p^a) :=p^{\lceil a/3\rceil - a/3}, \\
&\spf(p^a) := \frac{p^{\lceil a/2\rceil -\lceil a/3\rceil/2 - a/6}}
{\sqrt{D(p^a)}},
\end{split}
\end{equation}
then extend the definitions multiplicatively.
Note that $\sqf(q)$ is determined by the primes $p_j|q$ such that
$a_j\not\equiv 0\pmod 2$ and
$\cbf(q)$ by the primes $p_j$ such that
$a_j\not\equiv 0\pmod 3$.
If $q$ is a square, then $\sqf(q)=1$. If $q$ is a cube, then $\cbf(q)=1$.
And if $q$ is a sixth power, then $\sqf(q)=\cbf(q)=1$
and $\spf(q)\le 1$.
The numbers $B$ and $B_1$ that appear
in Theorem~\ref{main theorem} are defined in
Lemma~\ref{postnikov lemma}.
In the remainder of the paper, we use the following notation:
$\exp(x)=e^x$ is the usual exponential function,
$[x]$ is the closest integer to $x$,
$\|x\|$ is the distance to the closest integer to $x$, $\bar{\ell}\pmod C$
is the modular inverse of $\ell \pmod C$ if it exists, and
\begin{equation}
\sgn(x)=\left\{\begin{array}{ll}
1, &x\ge 0,\\
0, & x< 0.
\end{array}\right.
\end{equation}
\\
\noindent
\textbf{Acknowledgments.} I thank Tim Trudgian for pointing out
the reference \cite{rademacher}.
\section{Preliminary Lemmas}
\begin{lemma}\label{well spacing lemma}
Let $\{y_r : r=0,1,\ldots\}$ be a set of real numbers.
Suppose that there is a number $\delta >0$ such that
$\min_{r\ne r'} |y_r-y_{r'}|\ge \delta$.
If $P\ge 2$ and $y\ge x$ then
\begin{equation}\label{well spacing lemma 1}
\sum_{y_r\in [x,y]} \min(P,\|y_r\|^{-1})\le
2(y-x+1)(2P+\delta^{-1}\log(eP/2)).
\end{equation}
If $P< 2$, then replace the r.h.s.\ by $2(y-x+1)(P+\delta^{-1})$.
\end{lemma}
\begin{proof}
We may assume that $\delta \le 1/2$, otherwise the bounds follow
on trivially estimating the number of points $y_r$ in $[x,y]$ by $2(y-x)+1$
and using the trivial bound $\min(P,\|y_r\|^{-1}) \le P$.
For each integer $k\in [x, y]$, we consider the interval
$[k-1/2,k+1/2]$. There are at most two points
$y_r$ in $[k-\delta,k+\delta)$, say $y_k^+\in [k,k+\delta)$
and $y_k^-\in [k-\delta,k)$.
If no such points exist, then we insert
one (or both) of them
subject to the condition $|y_k^+-y_k^-| \ge \delta$.
To preserve the $\delta$-spacing condition, we slide the remaining
points in $(y_k^+,k+1/2]$ (resp. $[k-1/2,y_k^{-})$) to the right of $y_k^{+}$
(resp. left of $y_k^-$) in the obvious way.
It is possible that a point falls off each edge, in which
case we may discard it.
This is permissible since the overall procedure that we described can
only increase the magnitude of the sum in \eqref{well spacing lemma 1}.
We have $y_k^+ = k+\rho_k \delta$ for some
$\rho_k \in [0,1)$, and so
$y_k^- \le k+(\rho_k-1)\delta$.
Hence, using the inequality
$\min(P,\|y_r\|^{-1})+\min(P,\|y_{r'}\|^{-1})\le \min (2P,\|y_r\|^{-1} +
\|y_{r'}\|^{-1})$, and the formula $\|y_r\| = |y_r-k|$ if $|y_r - k| \le 1/2$,
we obtain
\begin{equation}\label{well spacing lemma 2}
\begin{split}
\sum_{|y_r-k|\le \frac{1}{2}} \min(P,\|y_r\|^{-1})
\le \sum_{0\le r\le \frac{1}{2\delta}}
\min\left(2P,\frac{1}{\delta(r+\rho_k)}+\frac{1}{\delta(r+1-\rho_k)}\right).
\end{split}
\end{equation}
We observe that
\begin{equation}\label{well spacing lemma 3}
\frac{1}{\delta(r+\rho_k)}+\frac{1}{\delta(r+1-\rho_k)}
= \frac{1}{\delta} \frac{2r+1}{r^2+r+\rho_k-\rho_k^2}\le \frac{2}{\delta r}.
\end{equation}
Combining this with the observation
$\frac{2}{r\delta} \ge 2P$ if $r\le \frac{1}{\delta P}$, we conclude
that
\begin{equation}\label{well spacing lemma 4}
\sum_{|y_r-k|\le \frac{1}{2}} \min(P,\|y_r\|^{-1})
\le 2P\lceil 1/\delta P\rceil
+ \sum_{\lceil 1/\delta P\rceil \le r\le 1/2\delta} \frac{2}{r\delta}.
\end{equation}
To bound the sum over $r$, we isolate the first term
and estimate the remainder by an integral.
If $P\ge 2$ (so that the integral below makes sense),
then this gives the bound
\begin{equation}
\sum_{\lceil 1/\delta P\rceil \le r\le 1/2\delta} \frac{2}{r\delta}\le
2P+2\delta^{-1}\int_{1/\delta P}^{1/2\delta} \frac{1}{x}dx.
\end{equation}
The integral evaluates to $\log(P/2)$. Therefore, the r.h.s.\ in
\eqref{well spacing lemma 4} is bounded by
$2\delta^{-1} + 2P+2P + 2\delta^{-1}\log(P/2)$.
So the lemma follows if $P\ge 2$ as the cardinality of
$\{k : x\le k\le y\}$ is $\le y-x+1$.
Finally, if $P< 2$, then the sum on the r.h.s.\ in \eqref{well spacing lemma 4}
is empty, and so the bound is $2\delta^{-1} + 2P$.
\end{proof}
\begin{lemma}\label{exp reduce}
Let $f$ be an analytic function on a disk of radius
$\lambda(L-1)$ centered at $N+1$, where $\lambda > 1$ and $1\le L \in \mathbb{Z}$.
If there is a number $\eta$ and an integer $J \ge 0$ such that
$\frac{|f^{(j)}(N+1)|}{j!}\lambda^j(L-1)^j\le
\frac{\eta}{\lambda^j}$ for $j> J$,
then
\begin{equation}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right| \le
\nu_J(\lambda,\eta) \max_{0\le \Delta < L}
\left|\sum_{n=N+1+\Delta}^{N+L} \chi(n)e^{2\pi i
P_J(n-N-1)}\right|,
\end{equation}
where
\begin{equation}
\begin{split}
P_J(x) := \sum_{j=0}^J \frac{f^{(j)}(N+1)x^j}{j!},\quad
\nu_J(\lambda,\eta):= \left(1+\frac{\lambda^{-J}}{\lambda-1}\right)
\exp \left(\frac{2\pi \eta \lambda^{-J}}{\lambda-1}\right).
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
If $L=1$, the lemma is trivial. So assume that $L>1$.
We apply the Taylor expansion to obtain
\begin{equation}
f(N+1+z) = P_J(z)+\sum_{j>J}
\frac{f^{(j)}(N+1)}{j!} z^j, \qquad (|z| \le \lambda(L-1)).
\end{equation}
Using the Taylor expansion once more,
\begin{equation}
e^{2\pi i (f(N+1+z)-P_J(z))}=:\sum_{j=0}^{\infty} c_j(J,N) z^j, \qquad
(|z| \le \lambda(L-1)).
\end{equation}
So if we define $\nu^*_J := \sum_{j=0}^{\infty} |c_j(J,N)(L-1)^j|$,
then partial summation gives
\begin{equation}\label{weyl 0 est}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right| \le
\nu^*_J \max_{0\le \Delta< L}
\left|\sum_{n=N+1+\Delta}^{N+L} \chi(n) e^{2\pi i P_J(n-N-1)}\right|.
\end{equation}
To estimate the coefficients $c_j(J,N)$, we
use the Cauchy theorem
applied with a circle of radius $\lambda(L-1)$ around the origin.
In view of the growth condition on the derivatives of $f$,
this yields
\begin{equation}
|c_j(J,N)| \le \frac{1}{2\pi}\left|\oint \frac{e^{2\pi i
(f(N+1+z)-P_J(z))}}{z^{j+1}}\,dz\right|
\le \frac{\exp\left(\frac{2\pi \eta\lambda^{-J}}{\lambda-1}\right)}{\lambda^j(L-1)^j}.
\end{equation}
Noting that $c_j(J,N)=0$ for $1\le j\le J$, we therefore deduce that
\begin{equation}
\nu^*_J \le \exp\left(\frac{2\pi \eta\lambda^{-J}}{\lambda-1}\right)
\left[1+ \sum_{j>J} \frac{(L-1)^j}{\lambda^j(L-1)^j}\right]
=\nu_J(\lambda,\eta).
\end{equation}
\end{proof}
\begin{lemma}\label{postnikov lemma}
There exists an integer $\tilde{L}$ such that
$\chi(1+C_1 x) = e^{2\pi i \tilde{L} x/D_1}$
for all $x\in \mathbb{Z}$.
If $\chi$ is primitive, then $B_1:=(\tilde{L},D_1)=1$.
Furthermore, there exist integers $L_0$ and $L$ such that
$\chi(1+C x) = e^{4\pi i L_0 x/CD + 2\pi i L x^2/D}$
for all $x\in \mathbb{Z}$.
If $\chi$ is primitive, then
$L$ can be chosen so that $B:=(L,D)=1$.
\end{lemma}
\begin{proof}
We start with the decomposition
$\chi = \chi_1\cdots\chi_{\omega}$, where $\chi_j$ is a Dirichlet
character modulo $p_j^{a_j}$.
By \cite[Lemma 3.4]{hiary-simple-alg}, there
exists an integer $\tilde{L}_j$ such that
\begin{equation}
\chi_j(1+C_1(p_j^{a_j})x) = e^{2\pi i
\tilde{L}_j x/D_1(p_j^{a_j})}
\end{equation}
for all $x\in \mathbb{Z}$. Hence,
\begin{equation}\label{C1 form}
\chi(1+C_1 x) = \chi_1(1+C_1x) \cdots \chi_{\omega}(1+C_1x)=e^{2\pi i
\tilde{L} x/D_1},
\end{equation}
where, noting that $C_1D_1 = q$, we have
\begin{equation}
\tilde{L}= q\sum_{j=1}^{\omega} \tilde{L}_j/p_j^{a_j}.
\end{equation}
Let $B_1=(\tilde{L},D_1)$.
It is clear that
$\chi(1+ qx/B_1)=1$ for all $x$.
So $q/B_1$ is an induced modulus for $\chi$.
In particular, if $B>1$ then $\chi$ is imprimitive.
This completes the proof of the first part of the lemma.
For the second part, we use
\cite[Lemma 4.2]{hiary-char-sums}.
Consider first the case $p_j^{a_j}\not\in\{4,8\}$ and $a_j>1$.
Then there are integers $L_{0,j}$ and $L_j$ such that
\begin{equation}\label{assertion 1}
\chi_j(1+C(p_j^{a_j})x) = e^{\frac{4\pi i L_{0,j} x}{C(p_j^{a_j})D(p_j^{a_j})}+
\frac{2\pi i L_j x^2}{D(p_j^{a_j})}}
\end{equation}
for all $x\in \mathbb{Z}$, and moreover we can take
$L_{0,j}=- L_j$.
If $a_j=1$,
then $C(p_j^{a^j})=p_j^{a_j}$. So $\chi_j(1+C(p_j^{a_j})x) =1$
and we can take $L_{0,j}=L_j=0$.
If $p_j^{a_j} = 4$, then either $L_{0,j}=0$ and $L_j=1$ or $\chi$ is principal.
If $p_j^{a_j} = 8$, then either $L_{0,j} = L_j=1$, or $L_{0,j} = 2$ and $L_j=0$
(an imprimitive character),
or $L_{0,j}=-1$ and $L_j=1$, or $\chi$ is principal.
Put together, we have
\begin{equation}
\chi(1+C x) = \chi_1(1+Cx) \cdots \chi_{\omega}(1+Cx)=
e^{\frac{4\pi i L_0 x}{CD} + \frac{2\pi i L x^2}{D}}
\end{equation}
where
\begin{equation}\label{cond L L*}
\begin{split}
L_0 = C^2D\sum_{j=1}^{\omega} \frac{L_{0,j}}{C(p_j^{a_j})^2D(p_j^{a_j})},\qquad
L = C^2D\sum_{j=1}^{\omega} \frac{L_j}{C(p_j^{a_j})^2D(p_j^{a_j})}.
\end{split}
\end{equation}
It remains to prove that if $\chi$ is primitive then $B=1$.
To this end, we note that
$\frac{Lq^2}{B^2C^2D}$ is an integer.
So if we show that $\frac{2L_0q}{BC^2D}$ is an integer too,
then $\chi(1+qx/B)=1$ for all $x\in \mathbb{Z}$.
In particular, if $B>1$, then $q/B$ is a nontrivial
induced modulus and $\chi$ is imprimitive,
which completes the proof of the second
part of the lemma.
Now, to show that $\frac{2L_0q}{BC^2D}$ is an integer,
we first note that $\frac{L_0q}{C^2}$ is always an integer. (Recall that
$L_0=0$ if $a_j=1$.)
Furthermore, if $a_j=1$ then $(D,p_j)=1$ and so $(B,p_j)=1$.
In light of this, we may assume that $a_j>1$ for all $j$.
We consider two possibilities.
If $p_j^{a_j}\not\in\{4,8\}$ for any $j$, then $C^2D=q$ (if $q$ is odd)
or $2q$ (if $q$ is even), and in any case
$L_{0,j} = -L_j$ for all $j$. The last fact implies in turn that
$L_0=-L$, hence $B=(L_0,D)$. In particular, $B$ divides $L_0$
and we conclude that $\frac{2L_0q}{BC^2D}=\frac{L_0}{B}$ or $\frac{2L_0}{B}$
and so is an integer in either case.
On the other hand,
if $p_j^{a_j}\in \{4,8\}$ for some $j$, then $C^2D=2q$ and we appeal to
the remark following \eqref{assertion 1}.
Accordingly, if $\chi$ is primitive and
$p_j^{a_j}\in \{4,8\}$ then
$L_j=1$ and so $L$ must be odd.
This shows that $B=(L,D/2)$. In addition, we have
\begin{equation}
L_0 = L-\left\{\begin{array}{ll}
\frac{C^2(L_j-L_{0,j})}{4}\frac{D}{2} & p_1^{a_1}=4,\\
\frac{C^2(L_j-L_{0,j})}{8}\frac{D}{2} & p_1^{a_1}=8.
\end{array}\right.
\end{equation}
Therefore, given the possibilities for $L_{0,j}$ and $L_j$ stated after
\eqref{assertion 1}, we see if $\chi$ is primitive then
$L_0\equiv L \pmod{D/2}$, and so $B= (L_0,D/2)$.
This shows that $B$ is a divisor of $L_0$, hence
$\frac{2L_0q}{BC^2D}=\frac{L_0}{B}$ is an integer.
\end{proof}
\begin{lemma}\label{gcd sum lemma}
Let $M,N\in \mathbb{Z}_{\ge 1}$, $W_M(m) := 1-m/M$,
and $d_m(N):= (2m,N)$. Then
\begin{equation}\label{gcd sum eq 1}
\begin{split}
\sum_{m=1}^M W_M(m) \frac{d_m(N)}{m} \le \tau(N) \log M,\qquad
\sum_{m=1}^M W_M(m) d_m(N) \le \tau(N) M.
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
We prove the first bound, the second one being analogous.
Let us write $N = 2^aN'$ with $N'$ odd. We induct on $a$.
If $a=0$, then $d_m(N) = (m,N)$ and the result follows
because
\begin{equation}\label{gcd sum lemma 1}
\begin{split}
\sum_{m=1}^M W_M(m) \frac{d_m(N)}{m} &\le \sum_{\substack{r|N\\ r\le 2M}}
\sum_{1\le m'\le M/r} W_M(rm') \frac{1}{m'}\\
&\le \tau(N) \sum_{1\le m'\le M} W_M(m')\frac{1}{m'}\\
&\le \tau(N)\log M.
\end{split}
\end{equation}
If $a=1$, then
$d_m(N) = 2 d_m(N')$.
So using the previous calculation
and observing that $2\tau(N') = \tau(N)$ yields the desired bound.
Henceforth, we may assume that $a\ge 2$.
We may further assume that $M> 2$, for if $M=1$ or $2$ then the lemma is trivial.
Since $N$ is even by hypothesis, then $d_m(N) = 2(m,N/2)$.
Using this, and dividing
the sum over $m$ into even and odd terms, we obtain
\begin{equation}\label{gcd sum lemma 0}
\begin{split}
\sum_{m=1}^M W_M(m) \frac{d_m(N)}{m} &=
2\sum_{1\le m' \le\lfloor M/2\rfloor}W_M(2m')\frac{(2m',N/2)}{2m'} \\
&+2\sum_{0\le m' \le\lfloor (M-1)/2\rfloor}W_M(2m'+1)\frac{(2m'+1,N/2)}{2m'+1}.
\end{split}
\end{equation}
We have $W_M(2m') \le W_{\lceil M/2\rceil}(m')$ and, by definition, $(2m',N/2) =
d_{m'}(N/2)$. It follows by induction that
the first sum on the r.h.s.\ of \eqref{gcd sum lemma 0} is bounded by
$\tau(N/2)\log \lceil M/2\rceil$.
Furthermore, the second sum is clearly bounded by
\begin{equation}
\sum_{0\le m' \le\lfloor (M-1)/2\rfloor}W_M(2m'+1)\frac{(2m'+1,N')}{m'+1/2}
\le 2(1-1/M)+\sum_{1\le m\le M} W_M(m) \frac{d_m(N')}{m},
\end{equation}
which, by induction, is $\le 2(1-1/M)+ \tau(N')\log M$.
Therefore,
using the bound $\log \lceil M/2\rceil \le \log M +1/M -\log 2$
and the formula $\tau(N/2) + \tau(N') = \tau(N)$, we arrive at
\begin{equation}\label{wm eq}
\sum_{m=1}^M W_M(m) \frac{d_m(N)}{m} \le
\tau(N) \log M+(2-2/M +\tau(N/2)/M-\tau(N/2) \log 2).
\end{equation}
We conclude that the bound \eqref{gcd sum eq 1}
holds provided that
$\tau(N/2)\ge 4$. This is always fulfilled if $a\ge 2$ unless
$N=4$ or $8$. But the lemma follows in these cases also by direct calculation.
\end{proof}
\section{Hybrid van der Corput--Weyl lemmas}
\begin{lemma}\label{corput lemma 1 1}
Suppose that $f$ is a function satisfying the hypothesis of Lemma~\ref{exp reduce}
for some $\lambda>1$, $\eta \ge 0$, and
with $J=1$.
If $f(x)$ is real for real $x$, then
\begin{equation}\label{corput lemma 1 1 result}
\begin{split}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right| \le &\,
\frac{2\nu_1(\lambda,\eta) C_1}{\pi}\left(\log \frac{D_1}{2B_1} +\frac{7}{4}+\frac{\pi}{2}\right)\\
&+\frac{\nu_1(\lambda,\eta) C_1}{\pi}\min\left(\frac{\pi B_1 L}{q},
\|qf'(N+1)/B_1\|^{-1}\right).
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
Applying Lemma~\ref{exp reduce} with $J=1$ gives
\begin{equation}\label{temp 1 1 1}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right| \le
\nu_1(\lambda,\eta) \max_{0\le \Delta < L}
\left|\sum_{n=N+1+\Delta}^{N+L} \chi(n)e^{2\pi i
P_1(n-N-1)}\right|,
\end{equation}
where $P_1(x) = f(N+1)+ f'(N+1)x$.
Let $\Delta^*$ be where the maximum is achieved on the r.h.s.\ of
\eqref{temp 1 1 1}. Let $N^*:=N+\Delta^*$ and
$L^*=L-\Delta^*$. So we have
\begin{equation}\label{corput lemma 1 1 0}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right| \le
\nu_1(\lambda,\eta) \left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i
P_1(n-N-1)}\right|.
\end{equation}
We split the range of summation
$N^*+1\le n\le N^*+L^*$ into arithmetic progressions
along the residue classes $\ell \pmod{C_1}$.
For each residue class $0\le \ell < C_1$,
the terms in the progression $n\equiv \ell\pmod {C_1}$
are indexed by the integers $k$ that verify
$N^*+1\le \ell+C_1k \le N^*+L^*$. So we have
$\lceil (N^*+1 -\ell)/C_1\rceil \le k \le \lfloor(N^*+L^*-\ell)/C_1\rfloor$.
Using the formula $\lceil x+\delta \rceil - \lfloor x\rfloor = 1$,
valid for any $x$ and $\delta \in (0,1)$,
we deduce that
$\lceil (N^*+1 -\ell)/C_1\rceil - \lfloor (N^*-\ell)/C_1\rfloor = 1$.
Therefore, if we define $H_{\ell}:= \lfloor (N^*-\ell)/C_1\rfloor$,
then each $\ell$ determines an integer $\Omega_{\ell} \le \lceil L^*/C_1\rceil$
such that (we use the triangle inequality below)
\begin{equation}\label{weyl0 sum 0 0}
\left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_1(n-N-1)}\right| \le
\sum_{\ell =0}^{C_1-1}
\left|\sum_{k=H_{\ell}+1}^{H_{\ell}+\Omega_{\ell}}
\chi(\ell+C_1k)e^{2\pi i P_1(\ell+C_1k-N-1)}\right|.
\end{equation}
From Lemma~\ref{postnikov lemma},
and the formula
$\chi(\ell+ C_1 k) =\chi(\ell)\chi(1+C_1\overline{\ell}k)$,
valid for $(\ell,q)=1$,
we deduce that
there are integers $\gamma_1$ and $B_1$ such that
$(\gamma_1,D_1)=1$, $B_1|D_1$, and
\begin{equation}
\chi(\ell+ C_1 k)=
\chi(\ell)e^{2\pi i B_1\gamma_1 \overline{\ell} k/D_1}, \qquad (\ell,q)=1.
\end{equation}
If $(\ell,q)>1$, then $\chi(\ell+C_1k)=0$. Therefore,
\begin{equation}\label{weyl0 sum 0}
\left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_1(n-N-1)}\right| \le
\sum_{\substack{\ell =0\\ (\ell,q)=1}}^{C_1-1}
\left|\sum_{k=H_{\ell}+1}^{H_{\ell}+\Omega_{\ell}}
e^{2\pi i (B_1\gamma_1 \overline{\ell} /D_1+ C_1 f'(N+1))k}\right|.
\end{equation}
Let us define
\begin{equation}
z_f:=\left[\frac{qf'(N+1)}{B_1}\right],\qquad \delta_f:=
\pm \left\|\frac{qf'(N+1)}{B_1}\right\|,
\end{equation}
where $\delta_f$ is positive if
$z_f$ is obtained by rounding down, and negative if
$z_f$ is obtained by rounding up.
In either case, since $D_1C_1=q$ by construction,
we have $C_1f'(N+1)= (z_f+ \delta_f)B_1/D_1$. Therefore,
\begin{equation}\label{Ul}
\left\| \frac{B_1 \gamma_1\overline{\ell}}{D_1}+C_1 f'(N+1)\right\|
= \left\| \frac{\gamma_1\overline{\ell} + z_f+\delta_f} {D_1/B_1}\right\|
=:U_{\gamma_1\overline{\ell}+z_f+\delta_f}.
\end{equation}
In view of this, it follows by the Kusmin--Landau Lemma in \cite[Lemma
2]{cheng-graham} that the inner sum in \eqref{weyl0 sum 0} satisfies
\begin{equation}
\left|\sum_{k=H_{\ell}+1}^{H_{\ell}+\Omega_{\ell}}
e^{2\pi i (B_1\gamma_1 \overline{\ell} /D_1+ C_1 f'(N+1))k}\right|\le
\min\left(\Omega_{\ell},\frac{1}{\pi}
U_{\gamma_1\overline{\ell}+z_f+\delta_f}^{-1} +1\right).
\end{equation}
Given this, we divide the sum over $\ell$ in \eqref{weyl0 sum 0} into segments of length
$D_1/B_1$.
\begin{equation}
\left[\frac{uD_1}{B_1},\frac{(u+1)D_1}{B_1}\right),\quad u\in\mathbb{Z},
\quad 0\le u< \frac{B_1 C_1}{D_1}.
\end{equation}
Over each segment, we can
get an easy handle on $U_{\gamma_1\overline{\ell}+z_f+\delta_f}$.
Indeed, as $\ell$ runs over the reduced residue classes modulo $q$
(hence reduced modulo $D_1/B_1$) in a given segment,
$\gamma_1 \overline{\ell}+z_f$ runs over a subset of the
residue classes modulo $D_1/B_1$, hitting each class at most once.
Therefore, summing over the $B_1C_1/D_1$ segments, and recalling that
$\Omega_{\ell}\le \lceil L/C_1\rceil$ by construction,
we obtain
\begin{equation}\label{corput lemma 1 1 1}
\left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_1(n-N-1)}\right|
\le \frac{B_1 C_1}{D_1} \sum_{\ell \pmod*{D_1/B_1}}
\min\left(\lceil L/C_1\rceil,\frac{1}{\pi}U_{\ell+\delta_f}^{-1}+1\right).
\end{equation}
We choose the residue class representatives mod $D_1/B_1$
to be in $[-D_1/2B_1, D_1/2B_1)$
if $\delta_f\ge 0$,
and in $(-D_1/2B_1, D_1/2B_1]$ if $\delta_f <0$.
In either case, let
$\tilde{\ell}$ denote the representative of $\ell$.
Since $0\le |\delta_f|\le 1/2$, we deduce the formula
\begin{equation}\label{corput lemma 1 1 2}
U_{\ell+\delta_f} =
\left\{\begin{array}{ll}
\displaystyle
\frac{|\tilde{\ell}|+\sgn(\tilde{\ell})\delta_f}{D_1/B_1} &
\displaystyle
\tilde{\ell} \ne 0,\\
&\\
\displaystyle
\frac{|\delta_f|}{D_1/B_1}&
\displaystyle
\tilde{\ell}=0.
\end{array}\right.
\end{equation}
Now, if $\delta_f\ge 0$, we isolate the terms corresponding to
$\tilde{\ell}=0$ and $\tilde{\ell}=-1$ (if they exist)
on the r.h.s.\ of \eqref{corput lemma 1 1 1}.
And if $\delta_f<0$, we isolate the terms for $\tilde{\ell}=0$ and $\tilde{\ell}=1$.
Moreover, we use the lower bound $U_{\pm 1+\delta_f} \ge B_1/2D_1$ to control
the term $\tilde{\ell}=\pm 1$. Then we sum over the remaining $\tilde{\ell}$,
pairing the terms for
$\tilde{\ell}$ and $-\tilde{\ell} -1$ if $\delta_f\ge 0$,
and the terms for $\tilde{\ell}+1$ and $-\tilde{\ell}$
if $\delta_f<0$. In summary, assuming that $D_1/B_1\ge 2$ (so there are at least
two residue class modulo $D_1/B_1$), we obtain
\begin{equation}\label{corput lemma 1 1 3}
\begin{split}
&\sum_{\ell\pmod*{D_1/B_1}}
\min\left(\lceil L/C_1\rceil,\frac{1}{\pi}U_{\ell+\delta_f}^{-1}+1\right)
\le \min\left(\lceil L/C_1\rceil,\frac{1}{\pi}U_{\delta_f}^{-1}+1\right)\\
&+\left(\frac{2D_1}{\pi B_1}+1\right)
+\left(\frac{D_1}{B_1}-2\right)
+\frac{D_1}{\pi B_1}
\sum_{1\le \ell < \frac{D_1}{2B_1}}\left(\frac{1}{\ell+|\delta_f|}
+\frac{1}{\ell+1-|\delta_f|}\right).
\end{split}
\end{equation}
The second sum over $\ell$ on the r.h.s.\ of \eqref{corput lemma 1 1 3}
is bounded by
\begin{equation}\label{corput lemma 1 1 5}
\sum_{1\le \ell < \frac{D_1}{2B_1}}
\frac{2\ell+1}{\ell^2+\ell+|\delta_f|-\delta_f^2}\le
\frac{3}{2}+\sum_{2\le \ell < \frac{D_1}{2B_1}} \frac{2}{\ell} \le
\frac{3}{2}+2\log\left(\frac{D_1}{2B_1}\right).
\end{equation}
It is easy to check that the last two estimates still hold if $D_1/B_1=1$.
Hence, substituting \eqref{corput lemma 1 1 5} into \eqref{corput
lemma 1 1 3}
we obtain, on noting that $\lceil L/C_1\rceil \le L/C_1+1$,
\begin{equation}
\begin{split}
\sum_{\ell\pmod*{D_1/B_1}}
&\min\left(\lceil L/C_1\rceil,\frac{1}{\pi}U_{\ell+\delta_f}^{-1}+1\right)
\le \min\left(\frac{L}{C_1},\frac{1}{\pi} U_{\delta_f}^{-1} \right)\\
&+\frac{2D_1}{\pi B_1}\left(1+\frac{\pi}{2}+\frac{3}{4}\right)
+\frac{2D_1}{\pi B_1}\log\left(\frac{D_1}{2B_1}\right).
\end{split}
\end{equation}
We multiply the last estimate by the outside factor $B_1C_1/D_1$ in
\eqref{corput lemma 1 1 1}. This gives
\begin{equation}\label{corput lemma 1 1 6}
\begin{split}
\left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_1(n-N-1)}\right|
&\le
\frac{B_1C_1}{D_1}\left(\min\Big(\frac{L}{C_1},\frac{1}{\pi}U_{\delta_f}^{-1}\Big)\right.\\
&+\left.\frac{2C_1}{\pi}\Big(\log
\frac{D_1}{2B_1}+\frac{7}{4}+\frac{\pi}{2}\Big)\right).
\end{split}
\end{equation}
Finally, we use the formula $U_{\delta_f}^{-1} = \|qf'(N+1)/B_1\|^{-1} D_1/B_1$,
and substitute \eqref{corput lemma 1 1 6} back into \eqref{corput lemma 1 1
0}. After straightforward rearrangements, we obtain the lemma.
\end{proof}
\begin{lemma}\label{corput lemma 2 2}
Suppose that $f$ is a function satisfying the hypothesis of Lemma~\ref{exp reduce}
for some $\lambda>1$, $\eta \ge 0$, and
with $J=2$. Let $d_m:=(2m,D/B)$.
If $f(x)$ is real for real $x$, then
\begin{equation}\label{corput lemma 2 2 result}
\begin{split}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right|^2 \le&\,
\frac{4\nu_2(\lambda,\eta)^2\Lambda C L}{\pi}\left(\log
\frac{D}{2B}+\frac{7}{4}+\frac{3\pi}{2\Lambda}\right)\\
&+ \frac{4\nu_2(\lambda,\eta)^2\Lambda
C^2}{\pi}\sum_{m=1}^{\lceil L/C\rceil}\left(1-\frac{m}{\lceil L/C\rceil}\right)
\\
&\times \min\left(\frac{\pi d_m B L}{CD},
\left\|\frac{m C^2Df''(N+1)}{Bd_m}\right\|^{-1}\right).
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
We apply Lemma~\ref{exp reduce} with $J=2$ to the sum.
This yields (similarly to the beginning of the
proof of Lemma~\ref{corput lemma 1 1}) that
\begin{equation}\label{corput lemma 2 2 0}
\left|\sum_{n=N+1}^{N+L} \chi(n) e^{2\pi i f(n)} \right| \le
\nu_2(\lambda,\eta) \left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i
P_2(n-N-1)}\right|,
\end{equation}
where $P_2(x) = f(N+1) + f'(N+1)x+ f''(N+1)x^2/2$
and $[N^*+1,N^*+L^*]\subset [N+1,N+L]$.
We split the range of summation on the r.h.s.\
of \eqref{corput lemma 2 2 0} into arithmetic progressions
along the residue classes $\ell$ modulo $C$.
Letting $K_{\ell}:= \lfloor (N^*-\ell)/C\rfloor$
and $\Delta_{\ell} := \lfloor(N^*+L^*-\ell)/C\rfloor- K_{\ell}$, we have
\begin{equation}\label{corput lemma 2 2 1}
\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_2(n-N-1)} =
\sum_{\ell =0}^{C-1}\sum_{k=K_{\ell}+1}^{K_{\ell}+\Delta_{\ell}}
\chi(\ell + C k) e^{2\pi i P_2(\ell + C k-N-1)}.
\end{equation}
We make use of the following properties of $\Delta_\ell$.
First, by construction, we have
\begin{equation}\label{corput lemma 2 2 2}
\sum_{\ell=0}^{C-1} \Delta_{\ell}=L^* \le L.
\end{equation}
Second, using the periodicity of $\Delta_{\ell}$
as a function of $\ell\pmod C$,
and the change of variable $r \equiv N^*-\ell \pmod C$,
we obtain
\begin{equation}\label{corput lemma 2 2 3}
\sum_{\ell=0}^{C-1} \sqrt{\Delta_{\ell}} =\sum_{r=0}^{C-1}
\sqrt{\lfloor(L^*+r)/C\rfloor}
\le \sum_{r=0}^{C-1}
\sqrt{\lfloor(L+r)/C\rfloor}.
\end{equation}
Furthermore, supposing that $L\equiv \ell_0 \pmod C$, where $0\le \ell_0< C$,
we obtain on considering the summation ranges
$0\le r\le C-\ell_0-1$ and $C-\ell_0\le r \le C-1$
in \eqref{corput lemma 2 2 3} separately that
\begin{equation}
\sum_{r=0}^{C-1} \sqrt{\lfloor(L+r)/C\rfloor} = (C-\ell_0) \sqrt{(L-\ell_0)/C}
+ \ell_0 \sqrt{(L-\ell_0)/C+1}.
\end{equation}
If we view the r.h.s.\ above as a function of $0\le \ell_0 < C$, say $p(\ell_0)$,
then its maximum is achieved when $\ell_0=0$. Thus,
\begin{equation}\label{corput lemma 2 2 4}
\sum_{\ell=0}^{C-1} \sqrt{\Delta_{\ell}} \le p(0)=\sqrt{CL}.
\end{equation}
Also, we have the bound
\begin{equation}\label{corput lemma 2 2 4 0}
\sum_{\ell=0}^{C-1} \Delta_{\ell}^2 \le
\frac{L^2}{C}+(\tilde{\rho}-\tilde{\rho}^2)C,
\qquad \tilde{\rho}:=\ell_0/C.
\end{equation}
We are now ready to return to \eqref{corput lemma 2 2 1}.
Lemma~\ref{postnikov lemma}
asserts that there is a polynomial $g_{\ell}(x)$ of degree $2$ in $x$ such that
\begin{equation}\label{corput lemma 2 2 5}
\chi(\ell+C k) = \chi(\ell)e^{2\pi i g_{\ell}(k)}, \qquad (\ell,q)=1,
\end{equation}
where $g_{\ell}(x) = \alpha_{\ell}x +B \gamma \overline{\ell}^2 x^2/D$,
$(\gamma,q)=1$, and $B|D$.
Therefore, applying the Cauchy--Schwarz inequality to the r.h.s.\ in \eqref{corput lemma 2 2 1},
we obtain
\begin{equation}\label{corput lemma 2 2 6}
\left|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_2(n-N-1)}\right|^2 \le
C\sum_{\substack{\ell=0\\ (\ell,q)=1}}^{C-1}\left|\sum_{k=K_{\ell}+1}^{K_{\ell}+\Delta_{\ell}}
e^{2\pi i Q_{\ell}(k)}\right|^2.
\end{equation}
where $Q_{\ell}(x) := g_{\ell}(x)+P_2(\ell + C x-N-1)$.
We bound the inner sum using the van der Corput--Weyl Lemma
in \cite[Lemma 5]{cheng-graham}.
In fact, we use the more precise form
of the lemma at the bottom of
page 1273.
This form implies that if $M$ is a positive integer then
\begin{equation}\label{corput lemma 2 2 7}
\begin{split}
\Big|\sum_{k=K_{\ell}+1}^{K_{\ell}+\Delta_{\ell}} e^{2\pi i Q_{\ell}(k)} \Big|^2
\le & (\Delta_{\ell}+M)\Big(
\frac{\Delta_{\ell}}{M}+\frac{2}{M}\sum_{m=1}^{M}
\left(1-\frac{m}{M}\right)|S_m'(\ell)|\Big),
\end{split}
\end{equation}
where
\begin{equation}\label{corput lemma 2 2 8}
S_m'(\ell):= \sum_{r = K_{\ell}+1}^{K_{\ell}+\Delta_{\ell}-m} e^{2\pi
i (Q_{\ell}(r+m)- Q_{\ell}(r))}.
\end{equation}
Substituting \eqref{corput lemma 2 2 7} into \eqref{corput lemma 2 2 6},
and using the properties \eqref{corput lemma 2 2 2}
and \eqref{corput lemma 2 2 4 0},
together with the upper
bound $\Delta_{\ell}\le \lceil L/C\rceil$, we obtain
\begin{equation}\label{corput lemma 2 2 9}
\begin{split}
\Big|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_2(n-N-1)}\Big|^2 &\le
CL + \frac{L^2+\tilde{\rho}(1-\tilde{\rho})C^2}{M}\\
&+2C \left(1+\frac{\lceil L/C\rceil}{M}\right)
\sum_{m=1}^{M}\left(1-\frac{m}{M}\right)
\sum_{\substack{\ell=0\\ (\ell,q)=1}}^{C-1}|S_m'(\ell)|.
\end{split}
\end{equation}
Since
$Q_{\ell}(x)$ is a quadratic polynomial, we have the simpler
expression
\begin{equation}
|S'_m(\ell)| = \left|\sum_{r=K_{\ell}+1}^{K_{\ell}+\Delta_{\ell}-m}
e^{2\pi i (2mB \gamma\overline{\ell}^2/D + mC^2 f''(N+1)) r)}\right|.
\end{equation}
We plan to bound $S'_m(\ell)$ using the Kusmin--Landau Lemma in \cite[Lemma
2]{cheng-graham}. With this in mind, recall the definition $d_m=(2m,D/B)$.
Let
\begin{equation}
m'=:\frac{2m}{d_m},
\qquad P_m:=\frac{D}{Bd_m}.
\end{equation}
Let\footnote{If each prime factor of $q$ occurs with multiplicity $>1$, then
$C^2D= q$ if $q$ is odd, and $C^2D= 2q$ if $q$ is even. So the expressions that
follow can be simplified in this case.}
\begin{equation}
\begin{split}
&w_m:= [P_m m C^2f''(N+1)] = \left[\frac{m' C^2D f''(N+1)}{2B}\right],\\
&\epsilon_m:=\|P_mmC^2f''(N+1)\| =\pm \left\|\frac{m' C^2D f''(N+1)}{2B}\right\|.
\end{split}
\end{equation}
Here, $\epsilon_m$ is positive if
$w_m$ is obtained by rounding down, and negative if
$w_m$ is obtained by rounding up. Hence,
\begin{equation}
\left\| \frac{2 m B \gamma \overline{\ell}^2}{D} + m C^2 f''(N+1)\right\|
= \left\| \frac{m'\gamma \overline{\ell}^2+ w_m
+\epsilon_m}{P_m}\right\|.
\end{equation}
Letting $U_{z,m} := \| z/P_m\|$,
the Kusmin--Landau Lemma furnishes the estimate
\begin{equation}\label{corput lemma 2 2 11}
|S'_m(\ell)| \le
\min\left(\Delta_{\ell}-m,\frac{1}{\pi}
U_{m'\gamma\overline{\ell}^2+w_m+\epsilon_m,m}^{-1}+1\right).
\end{equation}
Therefore, using the inequality $\Delta_{\ell}\le \lceil L/C\rceil$
yields
\begin{equation}
\mathcal{S}_m=
\sum_{\substack{\ell=0\\ (\ell,q)=1}}^{C-1}|S_m'(\ell)|
\le \sum_{\substack{\ell=0\\ (\ell,q)=1}}^{C-1}
\min\left(\lceil L/C\rceil-m,\frac{1}{\pi}
U_{m'\gamma\overline{\ell}^2+w_m+\epsilon_m,m}^{-1}+1\right).
\end{equation}
To get an explicit expression for $U_{z,m}$,
we consider subsums of $\mathcal{S}_m$
over the segments
\begin{equation}
\left[uP_m,(u+1)P_m\right),\quad u\in\mathbb{Z}, \quad
0\le u< C/P_m.
\end{equation}
To this end, let
\begin{equation}
\Lambda_m:=\#\{0\le x<P_m\,:\, x^2\equiv 1\pmod{P_m}\}.
\end{equation}
As $\ell$ runs over the reduced residue classes in each segment,
then since $(m'\gamma,P_m)=1$ and $\overline{\ell}$ is squared,
if $m'\gamma\overline{\ell}^2+w_m$ hits
a residue class modulo $P_m$, it does so $\Lambda_m$ times.
Let $\mathcal{R}_m$ denote the classes that are hit.
We have that the cardinality of $\mathcal{R}_m$ is
$\le P_m/\Lambda_m$.
If $\epsilon_m\ge 0$
we take $\mathcal{R}_m\subset [-P_m/2, P_m/2)$
while if $\epsilon_m<0$ we take
$\mathcal{R}_m\subset (-P_m/2, P_m/2]$.
Furthermore, given $m'\gamma \bar{\ell}^2+w_m$,
let $\tilde{\ell}\in \mathcal{R}_m$ denote the class representative
that it hits.
Then, summing over the $C/P_m$ segments, we obtain
\begin{equation}
\mathcal{S}_m \le \frac{C\Lambda_m}{P_m}
\sum_{\tilde{\ell}\in\mathcal{R}_m}\min\left(\lceil L/C\rceil-m,\frac{1}{\pi}
U^{-1}_{\tilde{\ell}+\epsilon_m,m}+1\right),
\end{equation}
and we have the formula
\begin{equation}
U_{\tilde{\ell}+\epsilon_m,m} = \left\{\begin{array}{ll}
\displaystyle
\frac{|\tilde{\ell}|+\sgn(\tilde{\ell})\epsilon_m}{P_m}&
\displaystyle \tilde{\ell}\ne 0,\\
\,&\\
\displaystyle
\frac{|\epsilon_m|}{P_m}& \tilde{\ell} = 0.
\end{array}\right.
\end{equation}
At worst, the classes that are hit concentrate in
$[-P_m/2\Lambda_m,P_m/2\Lambda_m]$. If $\epsilon_m\ge 0$,
we isolate the terms corresponding to $\tilde{\ell}=0$ and $\tilde{\ell}=-1$ (if they exist),
and pair the remaining terms for $\tilde{\ell}$ and $-\tilde{\ell}-1$.
While if $\epsilon_m<0$, we isolate the terms for $\tilde{\ell} =0$ and
$\tilde{\ell} = 1$,
and pair the remaining terms for $\tilde{\ell}+1$ and $-\tilde{\ell}$.
Since $0\le |\epsilon_m|\le 1/2$ and $P_m/\Lambda_m\ge 1$,
this gives
\begin{equation}
\begin{split}
\mathcal{S}_m &\le \frac{C\Lambda_m}{P_m}\min\left(\lceil L/C\rceil-m,\frac{P_m}{\pi
|\epsilon_m|}+1\right)+ \frac{C\Lambda_m}{P_m}\left(\frac{2P_m}{\pi}+1\right)\\
&+ \frac{C\Lambda_m}{P_m}\left(\frac{P_m}{\Lambda_m}-1\right)
+\frac{C \Lambda_m}{\pi}
\sum_{1\le \ell< \frac{P_m}{2\Lambda_m}}
\left(\frac{1}{\ell+|\epsilon_m|}+\frac{1}{\ell+1-|\epsilon_m|}\right).
\end{split}
\end{equation}
Furthermore,
\begin{equation}
\sum_{1\le \ell< \frac{P_m}{2\Lambda_m}}
\left(\frac{1}{\ell+|\epsilon_m|}+\frac{1}{\ell+1-|\epsilon_m|}\right)
\le \sum_{1\le \ell< \frac{P_m}{2\Lambda_m}}\frac{2\ell+1}{\ell^2+\ell}
\le \frac{3}{2}+ 2\log \frac{P_m}{2\Lambda_m}.
\end{equation}
Hence, using to the trivial inequalities $\lceil L/C\rceil < L/C+1$
and $1\le \Lambda_m\le P_m$, together with the observation $P_m|D$ so that
$\Lambda_m=\Lambda(P_m) \le \Lambda(D)= \Lambda$, we obtain
\begin{equation}
\mathcal{S}_m \le
\frac{C\Lambda_m}{P_m}\min\left(
L/C-m,\frac{P_m}{\pi |\epsilon_m|}\right)
+\frac{2C\Lambda}{\pi}+2C
+\frac{C\Lambda}{\pi}\left(\frac{3}{2}+ 2\log \frac{D}{2B}\right).
\end{equation}
Now, we have $\sum_{1\le m\le M}(1-m/M) = (M-1)/2$.
So summing over $m$ we obtain
\begin{equation}\label{corput lemma 2 2 12}
\begin{split}
\sum_{m=1}^{M}\left(1-\frac{m}{M}\right) \mathcal{S}_m
&\le C\sum_{m=1}^{M}\left(1-\frac{m}{M}\right)\frac{\Lambda_m}{P_m}
\min\left( L/C-m,\frac{P_m}{\pi
|\epsilon_m|}\right)\\
&+C(M-1)+
\frac{C\Lambda (M-1)}{\pi}\left(\frac{7}{4}+\log \frac{D}{2B}\right).
\end{split}
\end{equation}
In \eqref{corput lemma 2 2 12},
we choose $M=\lceil L/C\rceil$, so that $M=L/C+1-\tilde{\rho}$.
Then we substitute the
resulting expression into \eqref{corput lemma 2 2 9}, which gives
\begin{equation}\label{corput lemma 2 2 14}
\begin{split}
\Big|\sum_{n=N^*+1}^{N^*+L^*} \chi(n)e^{2\pi i P_2(n-N-1)}\Big|^2 &\le
CL+\frac{L^2+\tilde{\rho}(1-\tilde{\rho})C^2}{L/C+1-\tilde{\rho}}\\
&+4C^2\sum_{m=1}^{\lceil L/C\rceil} \left(1-\frac{m}{\lceil L/C\rceil}\right)\frac{\Lambda_m}{P_m}
\min\left(L/C-m,\frac{P_m}{\pi
|\epsilon_m|}\right)\\
&+4CL
+\frac{4\Lambda C L}{\pi}\left(\frac{7}{4}+\log \frac{D}{2 B}\right).
\end{split}
\end{equation}
At this point, we may assume that $L \ge C$, otherwise the lemma
is trivial due to the first term in \eqref{corput lemma 2 2 result}.
Given this assumption, it is easy to check that the second term in
\eqref{corput lemma 2 2 14}, viewed as a function of $\tilde{\rho}$, has no critical
points in the interval $[0,1)$, and so it is monotonic over that interval.
Comparing the values at $\tilde{\rho}=0$ and $\tilde{\rho}=1$, we deduce that the maximum
is at $\tilde{\rho}=1$.
Using this in \eqref{corput lemma 2 2 14}, and
substituting the result into \eqref{corput lemma 2 2 0} (after squaring both
sides there) yields the lemma.
\end{proof}
\section{Proof of Theorem~\ref{main theorem}}
If $\chi=\chi_0$ is the principal character, then
\begin{equation}
L(s,\chi_0) = \zeta(s)\prod_{p|q} (1-p^{-s}).
\end{equation}
Bounding the product above trivially, we obtain
\begin{equation}
\begin{split}
|L(1/2+it,\chi_0)|&\le
|\zeta(1/2+it)|
\prod_{p|q}\left(1+1/\sqrt{p}\right) \le |\zeta(1/2+it)| \tau(q).
\end{split}
\end{equation}
(Note that this is a large overestimate, but it is still fine since the
difficult part of the proof is $\chi\ne \chi_0$.)
Combining this with the bound
for the Riemann zeta function in \cite{hiary-corput}, we arrive at
\begin{equation}\label{zeta bound}
|L(1/2+it,\chi_0)| \le 0.63 \tau(q) \mathfrak{q}^{1/6}\log\mathfrak{q},\qquad
(|t| \ge 3).
\end{equation}
So the theorem follows in this case.
Henceforth, we assume
that $\chi$ is nonprincipal, and so $q>2$.
Let $\rho =1.3$, which is
a parameter that will control the size of the
segments in our dyadic subdivision.
The starting point of the dyadic subdivision is
\begin{equation}
v_0 =\left\lceil \frac{C |t|^{1/3}}{(\rho-1)^2}\right\rceil.
\end{equation}
We assume that $|t|\ge t_0\ge \rho^3/(\rho-1)^3$
where $t_0:=200$.
Since $q>2$ by assumption, then $\mathfrak{q}\ge \mathfrak{q}_0:= 3t_0$.
From the Dirichlet series definition of $L(s,\chi)$, we have
\begin{equation}\label{trivial dirichlet bound}
|L(1/2+it,\chi)| \le \left|\sum_{n=1}^{\infty} \frac{\chi(n)}{n^{1/2+it}}\right|.
\end{equation}
We divide the summation range on the r.h.s.\ of \eqref{trivial dirichlet bound}
into an initial sum followed by dyadic segments
$[\rho^{\ell} v_0,\rho^{\ell+1}v_0)$.
This gives
\begin{equation}
|L(1/2+it,\chi)| \le \left|\sum_{n=1}^{v_0-1} \frac{\chi(n)}{n^{1/2+it}}\right|
+\sum_{\ell=0}^{\infty} \left|\sum_{\rho^{\ell} v_0\le n<\rho^{\ell+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right|.
\end{equation}
The $\ell$-th dyadic segment is subdivided into blocks of length $L_{\ell}$
where
\begin{equation}
L_{\ell}=\left\{\begin{array}{ll}
\left\lceil \frac{(\rho-1)\rho^{\ell}v_0}{|t|^{1/3}}
\right\rceil, & 0\le \ell< \ell_0:=
\frac{\log(C D|t|^{2/3}/v_0)}{\log\rho}\\
&\\
\left\lceil \frac{(\rho-1)\rho^{\ell} v_0}{|t|^{1/2}}\right\rceil, &
\ell_0 \le \ell <
\ell_1:=\frac{\log(q|t|/5 v_0)}{\log \rho},\\
&\\
\left\lceil \frac{(\rho-1)\rho^{\ell} v_0}{|t|}\right\rceil, &
\ell_1 \le \ell,
\end{array}\right.
\end{equation}
plus a (possibly empty) boundary block. (Note that our assumption $|t|\ge t_0$ and
the fact $CD\le q$ imply that $\ell_0<\ell_1$.) So there are
$R_{\ell} = \lceil (\rho-1)\rho^{\ell}v_0/L_{\ell}\rceil$
blocks in the $\ell$-th segment.
The $r$-th block
in the $\ell$-th segment
begins at
\begin{equation}
N_{r,\ell}+1= \lceil \rho^{\ell} v_0\rceil +rL_{\ell}, \qquad
(0\le r< R_{\ell}).
\end{equation}
We first bound the initial sum, then we bound
the sum over each range of $\ell$ separately.
\subsection{Initial sum}
The initial sum is bounded trivially using the triangle inequality,
the fact $|\chi(n)/n^{1/2+it}|\le 1/\sqrt{n}$, and on comparing with the integral
$\int_0^{v_0-1} \frac{1}{\sqrt{x}}dx$. Recalling that $C/q^{1/3}=\cbf(q)$, this
gives
\begin{equation}\label{main theorem region 0}
\left|\sum_{n=1}^{v_0-1} \frac{\chi(n)}{n^{1/2+it}}\right|
\le 2\sqrt{v_0-1} \le \mathfrak{v}_0 \sqrt{\cbf(q)}\mathfrak{q}^{1/6},
\qquad
\mathfrak{v}_0:= \frac{2}{\rho-1}.
\end{equation}
\subsection{Sum over $0\le \ell < \ell_0$}
Using the Cauchy--Schwarz inequality we obtain
\begin{equation}\label{main theorem 0}
\left|\sum_{0\le \ell<\ell_0} \sum_{\rho^{\ell} v_0\le n<\rho^{\ell+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right|^2\le
(\ell_0+1)\sum_{0\le \ell<\ell_0}
\left|\sum_{\rho^{\ell} v_0\le n<\rho^{\ell+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right|^2
\end{equation}
We partition
the $\ell$-th dyadic segment in \eqref{main theorem 0}
into blocks of length $L_{\ell}$.
Then we apply partial summation to each segment
to remove the weighting factor $1/\sqrt{n}$.
Finally, we apply the Cauchy--Schwarz inequality to the sum of the blocks.
This yields
\begin{equation}\label{main theorem 1}
\begin{split}
&\left|\sum_{\rho^{\ell} v_0\le n < \rho^{{\ell}+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right|^2 \le
\frac{R_{\ell}}{\rho^{\ell}v_0}
\sum_{r=0}^{R_{\ell}-1} \max_{0\le \Delta \le L_{\ell}}\left|
\sum_{n=N_{r,\ell}+1}^{N_{r,\ell}+ \Delta}
\frac{\chi(n)}{n^{it}}\right|^2.
\end{split}
\end{equation}
We estimate the inner sum in \eqref{main theorem 1}
via Lemma~\ref{corput lemma 2 2}.
To this end, let
\begin{equation}
\lambda = \frac{1}{\sqrt{\rho-1}},\qquad
f(x)=-\frac{t}{2\pi}\log x,
\end{equation}
and $1\le L=\Delta\le L_{\ell}$. (Note that $\lambda>1$, as required by the
lemma.)
We have
\begin{equation}\label{analyticity condition}
\frac{\lambda (L_{\ell}-1)}{N_{r,\ell}+1} < \frac{1}{\lambda |t|^{1/3}}< 1.
\end{equation}
So $f(N_{r,\ell}+1+z)$ is analytic on a disk of radius $|z|\le
\lambda(L_{\ell}-1)$.
Moreover, as a consequence of \eqref{analyticity
condition},
\begin{equation}
\frac{|f^{(j)}(N_{r,\ell}+1)|}{j!}\lambda^j (L-1)^j =
\frac{|t|\lambda^j(L-1)^j}{2\pi j (N_{r,\ell}+1)^j}
\le \frac{1}{2\pi j \lambda^j},\qquad (j\ge 3).
\end{equation}
In particular, the required bound on $|f^{(j)}(N_{r,\ell}+1)|/j!$
in Lemma~\ref{corput lemma 2 2} holds with $\eta = 1/6\pi$.
Therefore, letting
$\nu_2=\nu_2(1/\sqrt{\rho-1},1/6\pi)$ and
\begin{equation}
y_{r,m,\ell} := \frac{mC^2D f''(N_{\ell,r}+1)}{Bd_m}
= \frac{m}{d_m} \frac{C^2 D t}{2\pi B}\frac{1}{(N_{r,\ell}+1)^2},
\end{equation}
Lemma~\ref{corput lemma 2 2} gives
that the r.h.s.\ in \eqref{main theorem 0} is bounded by
\begin{equation}\label{main theorem 2}
\frac{4\nu_2^2\Lambda}{\pi} (\ell_0+1)\sum_{0\le \ell <\ell_0} \left(*_{\ell}+**_{\ell}\right),
\end{equation}
where
\begin{equation}
\begin{split}
&*_{\ell} :=\frac{CL_{\ell}R_{\ell}^2}{\rho^{\ell}v_0}
\left(\log\frac{D}{2B}+\frac{7}{4}+\frac{3\pi}{2\Lambda}\right)\\
&\textrm{$**_{\ell}$}:=\frac{C^2R_{\ell}}{\rho^{\ell} v_0}
\sum_{m=1}^{\lceil L_{\ell}/C\rceil}W(m)
\sum_{r=0}^{R_{\ell}-1} \min\left(\frac{\pi d_m B L_{\ell}}{CD},
\frac{1}{\|y_{r,m,\ell}\|}\right),
\end{split}
\end{equation}
where for brevity we write
\begin{equation}
W(m)=1-\frac{m}{\lceil L_{\ell}/C\rceil}.
\end{equation}
We consider the term $*_{\ell}$ first since it is easier to handle.
Since $(\rho-1)^2v_0/|t|^{1/3} \ge C$, we obtain
\begin{equation}\label{Lell bound0}
\frac{(\rho-1)\rho^{\ell}v_0}{|t|^{1/3}}
\le L_{\ell}\le
\frac{(\rho-1)\rho^{\ell+1}v_0}{|t|^{1/3}}.
\end{equation}
And the upper bound in \eqref{Lell bound0} gives
$(\rho-1)^2 v_0/L_{\ell} \ge 1$. Hence,
\begin{equation}\label{Rell bound}
\frac{(\rho-1)\rho^{\ell}v_0}{L_{\ell}} \le R_{\ell} \le
\frac{(\rho-1)\rho^{\ell+1}v_0}{L_{\ell}}.
\end{equation}
As can be seen from \eqref{Rell bound} and the definition of $L_{\ell}$, we have
\begin{equation}\label{Rell bound 2}
R_{\ell} \le \rho |t|^{1/3}, \qquad (0\le \ell < \ell_0).
\end{equation}
Using this bound, the bound \eqref{Rell bound},
and the inequality $\Lambda \ge 2$ (valid since $q>2$ by assumption),
we arrive at
\begin{equation}\label{main theorem 4}
\sum_{0\le \ell < \ell_0}*_{\ell} \le
(\ell_0+1) \rho^2(\rho-1) C |t|^{1/3}
\left(\log \frac{D}{2}+\frac{7}{4}+\frac{3\pi}{4}\right).
\end{equation}
Furthermore, by our choice of $v_0$, we have
\begin{equation}\label{ell0 bound}
\ell_0+1 \le
\frac{\log\left(\rho(\rho-1)^2 D|t|^{1/3}\right)}{\log\rho}.
\end{equation}
Therefore, using
the inequality $\log\frac{D|t|^{1/3}}{2} \le
\log\mathfrak{q}^{1/3}$,
the formula $C/q^{1/3} = \cbf(q)$,
and incorporating the additional factor $\ell_0+1$ from
\eqref{main theorem 2} into our estimate, gives
\begin{equation}\label{star eq 1}
(\ell_0+1) \sum_{0\le \ell<\ell_0} *_{\ell}\le \mathfrak{v}_1 \cbf(q) \mathfrak{q}^{1/3}
Z_1(\log \mathfrak{q}),\qquad
\mathfrak{v}_1:=\frac{\rho^2(\rho-1)}{27\log^2 \rho},
\end{equation}
where
\begin{equation}
Z_1(X) := \left(X-\log t_0 +21/4+ 9\pi/4\right)
\left(X+3\log(2\rho(\rho-1)^2)\right)^2.
\end{equation}
The term $**_{\ell}$ in \eqref{main theorem 2} is more
complicated to handle.
First, we apply Lemma~\ref{well spacing lemma}
to estimate the sum over $r$ there.
To this end, note that
\begin{equation}
(N_{r+1,\ell}+1)^2-(N_{r,\ell}+1)^2\ge 2\lceil \rho^{\ell} v_0\rceil
L_{\ell}, \qquad (0\le r < R_{\ell}-1).
\end{equation}
Moreover, by construction,
$N_{R_{\ell}-1,\ell}+1\le \lfloor \rho^{\ell+1} v_0\rfloor$ and
$N_{0,\ell} +1\ge \lceil \rho^{\ell}v_0\rceil$.
Hence,
\begin{equation}\label{well spacing parameters}
\begin{split}
&|y_{r+1,m,\ell}-y_{r,m,\ell}|
\ge\frac{m}{d_m} \frac{C^2D|t|}{2\pi B}
\frac{2\lceil \rho^{\ell} v_0\rceil L_{\ell}}{\lfloor
\rho^{\ell+1}v_0\rfloor^4},\qquad (0\le r<R_{\ell}-1),\\
&|y_{R_{\ell}-1,m,\ell}-y_{0,m,\ell}|
\le \frac{m}{d_m} \frac{C^2D|t|}{2\pi B}
\frac{\rho^2-1}{\lfloor \rho^{\ell+1}v_0\rfloor^2}.
\end{split}
\end{equation}
We apply Lemma~\ref{well spacing lemma} to the sequence $\{y_{r,m,\ell}\}_r$ with
$y=y_{R_{\ell}-1,m,\ell}$, $x=y_{0,m,\ell}$,
$P=\pi d_m B L_{\ell}/CD$,
and (since $y_{r,m,\ell}$ is monotonic in $r$)
with $\delta$ equal to
the lower bound for $|y_{r+1,m,\ell}-y_{r,m,\ell}|$
in \eqref{well spacing parameters}.
Using these parameter choices,
Lemma~\ref{well spacing lemma} gives
\begin{equation}
\sum_{r=0}^{R_{\ell}-1} \min\left(\frac{\pi d_m B L_{\ell}}{CD},
\frac{1}{\|y_{r,m,\ell}\|}\right)
\le 2(y-x+1)(2P+\delta^{-1}\log (e\max\{P,2\}/2)).
\end{equation}
Multiplying out the brackets,
we obtain four terms: $2(y-x)\delta^{-1} \log (e\max\{P,2\}/2))$,
$2\delta^{-1}\log (e\max\{P,2\}/2))$, $4(y-x)P$, and $4P$.
We estimate the sum of each term over $m$ with the aid of
the following
inequalities, which are either straightforward to prove (left two inequalities) or are a consequence of
Lemma~\ref{gcd sum lemma}.
\begin{equation}\label{m bounds}
\begin{split}
&\sum_{m=1}^{\lceil L_{\ell}/C\rceil}W(m) \le \frac{L_{\ell}}{2C},
\qquad
\sum_{m=1}^{\lceil L_{\ell}/C\rceil} W(m)\frac{d_m}{m}
\le \tau\left(D/B\right)\log \lceil L_{\ell}/C\rceil,\\
&\sum_{m=1}^{\lceil L_{\ell}/C\rceil}
W(m) m \le \frac{L_{\ell}^2}{2C^2},
\qquad
\sum_{m=1}^{\lceil L_{\ell}/C\rceil}
W(m)d_m
\le \tau\left(D/B\right) \lceil L_{\ell}/C\rceil.
\end{split}
\end{equation}
Combining
\eqref{Lell bound0}, \eqref{Rell bound 2},
\eqref{well spacing parameters},
and \eqref{m bounds},
together with the bound (here we use $L_{\ell}\ge C$)
\begin{equation}
\log\left(e\max\{P,2\}/2\right) \le
\log\left(\frac{e\pi L_{\ell}}{2C}\right),
\end{equation}
we routinely deduce the estimates
\begin{equation}\label{main theorem 5}
\begin{split}
&\frac{C^2R_{\ell}}{\rho^{\ell}v_0}
\sum_{m=1}^{\lceil L_{\ell}/C\rceil}
W(m) \frac{(\rho^2-1)\lfloor \rho^{\ell+1}v_0 \rfloor^2}{ \lceil
\rho^{\ell}v_0\rceil L_{\ell}}
\log\left(\frac{e\pi L_{\ell}}{2C}\right)
\le \mathcal{B}_1(\ell)\\
&\frac{C^2R_{\ell}}{\rho^{\ell}v_0}
\sum_{m=1}^{\lceil L_{\ell}/C\rceil}
W(m) \frac{d_m}{m}
\frac{2\pi B}{C^2D|t|}
\frac{\lfloor \rho^{\ell+1}v_0\rfloor^4}
{\lceil \rho^{\ell} v_0\rceil L_{\ell}}
\log\left(\frac{e\pi L_{\ell}}{2C}\right)
\le \mathcal{B}_2(\ell)\\
& \frac{C^2R_{\ell}}{ \rho^{\ell}v_0}
\sum_{m=1}^{\lceil L_{\ell}/C\rceil}
W(m)\frac{4\pi d_m B L_{\ell}}{CD}
\frac{m}{d_m} \frac{C^2D|t|}{2\pi B}
\frac{\rho^2-1}{\lfloor \rho^{\ell+1}v_0\rfloor^2}
\le \mathcal{B}_3(\ell)\\
&\frac{C^2R_{\ell}}{\rho^{\ell}v_0}
\sum_{m=1}^{\lceil L_{\ell}/C\rceil}
W(m) d_m \frac{4\pi B L_{\ell}}{CD}
\le \mathcal{B}_4(\ell),
\end{split}
\end{equation}
where
\begin{equation}
\begin{split}
&\mathcal{B}_1(\ell):=\frac{\rho^3(\rho^2-1)}{2}C|t|^{1/3} \log\left(
\frac{e\pi L_{\ell}}{2C}\right),\\
&\mathcal{B}_2(\ell):=
\frac{2\pi\rho^5}{(\rho-1)}\frac{\rho^{\ell}v_0}{D|t|^{1/3}}
B\tau(D/B)\log\left(\frac{eL_{\ell}}{C}\right)
\log\left(\frac{e\pi L_{\ell}}{2C}\right),\\
&\mathcal{B}_3(\ell):=\rho^3(\rho-1)^3(\rho^2-1) C|t|^{1/3}, \\
&\mathcal{B}_4(\ell):=4\pi \rho^2(\rho-1)^2\frac{\rho^{\ell}v_0}{D|t|^{1/3}}
B\tau(D/B).
\end{split}
\end{equation}
Incorporating the additional factor $\ell_0+1$ from
\eqref{main theorem 0} into our estimate, we therefore conclude that
\begin{equation}\label{Bcal bound}
(\ell_0+1)\sum_{0\le \ell<\ell_0} **_{\ell} \le \sum_{j=1}^4(\ell_0+1)\sum_{0\le \ell<\ell_0} \mathcal{B}_j(\ell).
\end{equation}
We estimate the sum over $\ell$ in \eqref{Bcal bound} as a geometric progression.
To this end, we use the bound on $\ell_0$ in \eqref{ell0 bound},
the bound
\begin{equation}\label{Lell bound}
L_{\ell} \le \rho(\rho-1)CD|t|^{1/3},\qquad (0\le \ell < \ell_0).
\end{equation}
which follows directly from the definitions of $L_{\ell}$ and $\ell_0$,
and consequently the bound
\begin{equation}
\log\left(\frac{e\pi L_{\ell}}{2C}\right)
\le \log\left(\frac{e\pi \rho(\rho-1) D|t|^{1/3}}{2}\right),
\qquad (0\le \ell<\ell_0).
\end{equation}
After some rearrangements, this yields
\begin{equation}\label{main theorem 3}
\begin{split}
&(\ell_0+1)\sum_{0\le \ell < \ell_0} B_1(\ell) \le
\mathfrak{v}_2 \cbf(q) \mathfrak{q}^{1/3}Z_2(\log \mathfrak{q}),\qquad
\mathfrak{v}_2:= \frac{\rho^3(\rho^2-1)}{54\log^2 \rho},\\
&(\ell_0+1)\sum_{0\le \ell < \ell_0} B_2(\ell) \le
\mathfrak{v}_3 \cbf(q) B\tau(D/B) \mathfrak{q}^{1/3}Z_3(\log\mathfrak{q}),\qquad
\mathfrak{v}_3:= \frac{2\pi \rho^6}{27(\rho-1)^2\log \rho},\\
& (\ell_0+1)\sum_{0\le \ell < \ell_0} B_3(\ell) \le
\mathfrak{v}_4 \cbf(q) \mathfrak{q}^{1/3}Z_4(\log\mathfrak{q}),\qquad
\mathfrak{v}_4:= \frac{\rho^3(\rho-1)^3(\rho^2-1)}{9\log^2
\rho},\\
& (\ell_0+1)\sum_{0\le \ell < \ell_0} B_4(\ell) \le
\mathfrak{v}_5\cbf(q) B\tau(D/B) \mathfrak{q}^{1/3}Z_5(\log\mathfrak{q}),\qquad
\mathfrak{v}_5:= \frac{4\pi
\rho^3(\rho-1)}{3\log\rho}.
\end{split}
\end{equation}
where
\begin{equation}
\begin{split}
&Z_2(X):= \left(X+ 3\log(e\pi \rho(\rho-1)\right)\left(X+3\log(2\rho(\rho-1)^2)\right)^2 \\
&Z_3(X):= \left(X+ 3\log(2e\rho(\rho-1)\right)
\left(X+ 3\log(e\pi \rho(\rho-1)\right)\times \\
&\qquad\qquad \left(X+3\log(2\rho(\rho-1)^2)\right)\\
&Z_4(X):= \left(X+3\log(2\rho(\rho-1)^2)\right)^2 \\
&Z_5(X):= X+3\log(2\rho(\rho-1)^2) \\
\end{split}
\end{equation}
Therefore,
\begin{equation}\label{star eq 2}
\begin{split}
(\ell_0+1)\sum_{0\le \ell <\ell_0} **_{\ell} &\le
\mathfrak{v}_2\cbf(q)\mathfrak{q}^{1/3}Z_2(\log \mathfrak{q})\\
&+\mathfrak{v}_3B\tau(D/B)\cbf(q)\mathfrak{q}^{1/3}Z_3(\log
\mathfrak{q})\\
&+\mathfrak{v}_4\cbf(q)\mathfrak{q}^{1/3}Z_4(\log \mathfrak{q})\\
&+\mathfrak{v}_5B\tau(D/B)\cbf(q)\mathfrak{q}^{1/3}Z_5(\log \mathfrak{q}).
\end{split}
\end{equation}
We combine \eqref{star eq 2} and \eqref{star eq 1},
and use the inequality
$\sqrt{x+y}\le \sqrt{x}+\sqrt{y}$ with $x,y\ge 0$. This gives
\begin{equation}
\sqrt{(\ell_0+1)\sum_{0\le \ell < \ell_0} (*_{\ell}+**_{\ell})}
\le \sqrt{\cbf(q)}
\left(\sqrt{Z_6(\log
\mathfrak{q})}+\sqrt{B\tau(D/B)Z_7(\log\mathfrak{q})}\right)
\mathfrak{q}^{1/6}
\end{equation}
where
\begin{equation}
\begin{split}
&Z_6(X):= \mathfrak{v}_1Z_1(X)+\mathfrak{v}_2Z_2(X)+\mathfrak{v}_4Z_4(X),\\
&Z_7(X):= \mathfrak{v}_3Z_3(X)+\mathfrak{v}_5Z_5(X).
\end{split}
\end{equation}
Finally, we substitute this back into \eqref{main theorem 0} which yields
\begin{equation}\label{main theorem region 1}
\begin{split}
\left|\sum_{0\le \ell < \ell_0}
\sum_{\rho^{\ell}v_0 \le n < \rho^{\ell+1} v_0}
\frac{\chi(n)}{n^{1/2+it}}\right|
&\le \frac{2\nu_2}{\sqrt{\pi}} \sqrt{\Lambda \cbf(q) Z_6(\log
\mathfrak{q})} \mathfrak{q}^{1/6}\\
&+ \frac{2\nu_2}{\sqrt{\pi}} \sqrt{\Lambda \cbf(q)
B\tau(D/B)Z_7(\log\mathfrak{q})}\mathfrak{q}^{1/6}.
\end{split}
\end{equation}
\subsection{Sum over $\ell_0 \le \ell < \ell_1$}
Applying the triangle inequality
and partial summation gives
\begin{equation}\label{main theorem 6}
\sum_{\ell_0\le \ell<\ell_1}
\left|\sum_{\rho^{\ell} v_0\le n < \rho^{{\ell}+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right| \le
\sum_{\ell_0\le \ell<\ell_1}
\frac{1}{(\rho^{\ell}v_0)^{1/2}}\sum_{r=0}^{R_{\ell}-1} \max_{0\le \Delta \le L_{\ell}}\left|
\sum_{n=N_{r,\ell}+1}^{N_{r,\ell}+ \Delta}
\frac{\chi(n)}{n^{it}}\right|.
\end{equation}
We bound the inner sum in \eqref{main theorem 6} via
Lemma~\ref{corput lemma 1 1}.
Using a similar analysis as in the beginning of Section 5.2,
one verifies that the required analyticity conditions
on $f(x)=-\frac{t}{2\pi}\log x$
in Lemma~\ref{corput lemma 1 1} hold with $J=1$, $\lambda=1/\sqrt{\rho-1}$,
and $\eta=1/4\pi$.
Therefore, if we let
$\nu_1=\nu_1(1/\sqrt{\rho-1},1/4\pi)$ and
\begin{equation}
x_{r,\ell}:=\frac{q f'(N_{\ell,r}+1)}{B_1} = -\frac{q t}{2\pi B_1}
\frac{1}{N_{r,\ell}+1},
\end{equation}
then by Lemma~\ref{corput lemma 1 1}
the inner double sum in \eqref{main theorem 6} is bounded by
\begin{equation}\label{main theorem 7}
\begin{split}
&\frac{2\nu_1}{\pi}
\frac{C_1R_{\ell}}{(\rho^{\ell}v_0)^{1/2}}
\left(\log\frac{D_1}{2B_1}+\frac{7}{4}+\frac{\pi}{2}\right)\\
&+ \frac{\nu_1}{\pi}
\frac{C_1}{(\rho^{\ell}v_0)^{1/2}}
\sum_{r=0}^{R_{\ell}-1} \min\left(\frac{\pi B_1
L_{\ell}}{q},\frac{1}{\|x_{r,\ell}\|}\right).
\end{split}
\end{equation}
We bound the sum over $r$ in \eqref{main theorem 7}
using Lemma~\ref{well spacing lemma}. To this end, note that
\begin{equation}\label{xrell bounds}
\begin{split}
|x_{r+1,\ell} -x_{r,\ell}| \ge \frac{q |t|}{2\pi B_1} \frac{L_{\ell}}
{\lfloor \rho^{\ell+1} v_0\rfloor^2},\qquad
|x_{R_{\ell}-1,\ell}-x_{0,\ell}| \le \frac{q |t|}{2\pi B_1}\frac{\rho-1}{\lfloor
\rho^{\ell+1}v_0\rfloor}.
\end{split}
\end{equation}
Furthermore, since the sequence
$x_{r,\ell}$ is monotonic in $r$, we may set
$\delta$ in
Lemma~\ref{well spacing lemma} to be the lower bound for
$|x_{r+1,\ell} -x_{r,\ell}|$ in \eqref{xrell bounds},
set $y-x$ as the upper bound for
$|x_{R_{\ell}-1,\ell}-x_{0,\ell}|$ in \eqref{xrell bounds},
and set $P=\pi B_1L_{\ell}/q$. (Note that $P\ge 2$.)
Therefore, applying the lemma, and multiplying out the bracket
in the resulting bound
$2(y-x+1)(2P+\delta^{-1}\log (eP/2))$, gives
\begin{equation}
\begin{split}
\sum_{r=0}^{R_{\ell}-1} \min\left(\frac{\pi B_1
L_{\ell}}{q},\frac{1}{\|x_{r,\ell}\|}\right) &\le
\left(\frac{2(\rho-1)\lfloor \rho^{\ell+1}v_0\rfloor}{L_{\ell}} + \frac{4\pi
B_1}{q |t|} \frac{\lfloor \rho^{\ell+1}
v_0\rfloor^2}{L_{\ell}}\right)\log \left(\frac{e\pi B_1 L_{\ell}}{2q}\right)\\
&+ \frac{2(\rho-1)|t|L_{\ell}}{\lfloor\rho^{\ell+1}
v_0\rfloor} + \frac{4\pi B_1 L_{\ell}}{q}.
\end{split}
\end{equation}
Using similar inequalities as in Section 5.2, we deduce that
\begin{equation}\label{Lell bound 2}
\frac{(\rho-1)\rho^{\ell} v_0}{|t|^{1/2}}\le L_{\ell} \le
\frac{(\rho-1)\rho^{\ell+1} v_0}{|t|^{1/2}} \le \frac{1}{5}\rho(\rho-1)q|t|^{1/2},\qquad
(\ell_0\le \ell<\ell_1).
\end{equation}
Consequently, since $B_1\le D_1$, we have
\begin{equation}
\log\left(\frac{e\pi B_1L_{\ell}}{2q}\right) \le
\log\left(\frac{e\pi \rho(\rho-1)D_1
|t|^{1/2}}{10}\right),\qquad (\ell_0\le \ell<\ell_1).
\end{equation}
Using these inequalities, the formulas
$\sqrt{q}/D_1=\sqf(q)$,
$C_1/(\sqrt{CD} q^{1/6}) = \spf(q)$,
and executing the geometric sum over $\ell$,
we therefore conclude that
\begin{equation}\label{main theorem 9}
\begin{split}
&\sum_{\ell_0\le \ell<\ell_1}
\frac{C_1}{(\rho^{\ell}v_0)^{1/2}}
\sum_{r=0}^{R_{\ell}-1} \min\left(\frac{\pi B_1
L_{\ell}}{q},\frac{1}{\|x_{r,\ell}\|}\right) \le \\
&\mathfrak{v}_8\spf(q) \mathfrak{q}^{1/6}Z_8(\log \mathfrak{q})
+ \mathfrak{v}_9 \sqf(q) B_1 Z_8(\log \mathfrak{q})
+\mathfrak{v}_{10} \spf(q) \mathfrak{q}^{1/6} +\mathfrak{v}_{11} \sqf(q) B_1,
\end{split}
\end{equation}
where
\begin{equation}
Z_8(X) := X+2\log(e\pi \rho(\rho-1)/10)
\end{equation}
and
\begin{equation}
\begin{split}
&\mathfrak{v}_8:= \frac{\rho^{3/2}}{\sqrt{\rho}-1},\qquad
\mathfrak{v}_9:=\frac{2\pi \rho^{5/2}}{\sqrt{5}(\rho-1)(\sqrt{\rho}-1)},\\
&\mathfrak{v}_{10}:=\frac{2\rho^{3/2}(\rho-1)^2}{\sqrt{\rho}-1},\qquad
\mathfrak{v}_{11}:=\frac{4\pi
\rho^{3/2}(\rho-1)}{\sqrt{5}(\sqrt{\rho}-1)}.
\end{split}
\end{equation}
Furthermore, using the bound
\begin{equation}\label{Rell bound 3}
R_{\ell}\le \rho |t|^{1/2},\qquad (\ell_0\le \ell<\ell_1).
\end{equation}
we obtain
\begin{equation}\label{main theorem 10}
\sum_{\ell_0\le \ell<\ell_1}
\frac{C_1R_{\ell}}{(\rho^{\ell}v_0)^{1/2}}
\left(\log\frac{D_1}{2B_1}+\frac{7}{4}+\frac{\pi}{2}\right)\le
\mathfrak{v}_{12} \spf(q) \mathfrak{q}^{1/6}Z_9(\log\mathfrak{q}),
\end{equation}
where
\begin{equation}
\mathfrak{v}_{12}:= \frac{\rho^{3/2}}{2(\sqrt{\rho}-1)},\qquad
Z_9(X) = X-2\log(2 \sqrt{t_0}) + \frac{7}{2}+\pi.
\end{equation}
We substitute \eqref{main theorem 10} and \eqref{main theorem 9} into
\eqref{main theorem 7} and \eqref{main theorem 6}, which gives (after some
simplification)
\begin{equation}\label{main theorem region 2}
\begin{split}
&\sum_{\ell_0\le \ell<\ell_1}
\left|\sum_{\rho^{\ell} v_0\le n < \rho^{{\ell}+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right| \le\\
&\frac{\nu_1}{\pi}\left(\mathfrak{v}_8\spf(q) \mathfrak{q}^{1/6}Z_8(\log \mathfrak{q})
+ \mathfrak{v}_9 \sqf(q) B_1 Z_8(\log \mathfrak{q})
+\mathfrak{v}_{10} \spf(q) \mathfrak{q}^{1/6} +\mathfrak{v}_{11} \sqf(q) B_1\right)\\
&+ \frac{2\nu_1}{\pi} \mathfrak{v}_{12} \spf(q)
\mathfrak{q}^{1/6}Z_9(\log\mathfrak{q}).
\end{split}
\end{equation}
\subsection{Sum over $\ell_1 \le \ell$}
Like before, we
apply the triangle inequality, partial summation, and
Lemma~\ref{exp reduce} with $J=0$, to obtain
\begin{equation}\label{main theorem 12}
\sum_{\ell_1\le \ell}
\left|\sum_{\rho^{\ell} v_0\le n < \rho^{{\ell}+1}v_0}
\frac{\chi(n)}{n^{1/2+it}}\right| \le
\sum_{\ell_1\le \ell}
\frac{\nu_0}{(\rho^{\ell}v_0)^{1/2}} \sum_{r=0}^{R_{\ell}-1} \max_{0\le \Delta \le
L_{\ell}}\left|
\sum_{n=N_{r,\ell}+1}^{N_{r,\ell}+ \Delta} \chi(n)\right|.
\end{equation}
Here, $\nu_0 := \nu_0(1/\sqrt{\rho-1},1/2\pi)$. We use the bound
for non-principal characters at the bottom of page 139 of
\cite{hiary-char-sums}. Specifically, if $\chi\pmod{q}$ is non-principal,
then $|\sum_n \chi(n) |\le 2\sqrt{q} \log q$. Using this in \eqref{main theorem
12}, we deduce that the r.h.s.\ is
\begin{equation}\label{main theorem region 3}
\le 2\nu_0\sqrt{q}\log q \sum_{\ell_1\le \ell}
\frac{1}{(\rho^{\ell}v_0)^{1/2}} \le
\frac{2\sqrt{5}\nu_0\sqrt{\rho}}{\sqrt{\rho}-1} \frac{\log q}{\sqrt{|t|}}
\le \nu_0 \mathfrak{v}_{13}Z_{10}(\log \mathfrak{q}),
\end{equation}
where
\begin{equation}
\mathfrak{v}_{13}:=\frac{2\sqrt{5}
\sqrt{\rho}}{(\sqrt{\rho}-1)\sqrt{t_0}},\qquad Z_{10}(X) := X-\log t_0.
\end{equation}
\subsection{Summary}
We combine \eqref{main theorem region 0},
\eqref{main theorem region 1},
\eqref{main theorem region 2}, and
\eqref{main theorem region 3}, then
evaluate the resulting numerical constants
with $\rho = 1.3$. This yields the theorem.
\section{Proof of Corollary~\ref{main theorem simple}}\label{theorem simple proof}
We may assume that $q>1$, otherwise the corollary follows from the bound
\eqref{zeta bound} for principal characters.
By Lemma~\ref{postnikov lemma},
if $\chi$ is primitive, then
$B=B_1=1$. Also, since $q$ is a sixth power then $\sqf(q)=\cbf(q)=1$ and
$\spf(q)\le 1$.
Therefore, the functions $Z(X)$ and $W(X)$ in Theorem~\ref{main theorem}
satisfy
\begin{equation}
\begin{split}
Z(X)&\le -9.416 +15.6004 X+1.4327\sqrt{\Lambda(D)} X^{3/2}
+12.1673\sqrt{\Lambda(D) \tau(D)}X^{3/2},\\
W(X)&\le -296.84+ 114.07X,
\end{split}
\end{equation}
where we used that for $X\ge \log (2t_0)$ we have
\begin{equation}
\begin{split}
&65.5619 - 17.1704 X - 2.4781 X^2 + 0.6807 X^3 \le 0.6807 X^3,\\
&-1732 - 817.82 X +71.68 X^2 + 47.57X^3 \le 49.1 X^3.
\end{split}
\end{equation}
These inequalities are verified using \verb!Mathematica!.
It is easy to see that
$\Lambda(p^a) \le \tau(p^a)$ which, by multiplicativity, implies that
$\sqrt{\Lambda(D)\tau(D)} \le \tau(D)$.
Furthermore, since $q$ is a sixth power and $q>1$, then
$\tau(D) \le 0.572\tau(q)$ (as can be seen by considering the case
$q=2^{6a}$), $\tau(q) \ge 7$, and $q\ge 2^6$.
Substituting these bounds into the expressions for $Z(X)$ and $W(X)$, we
verify via \verb!Mathematica! that
\begin{equation}
\begin{split}
Z(X) &\le \tau(q)(-1.3451 + 2.2287 X + 7.2695 X^{3/2}) \le 7.95 \tau(q) X^{3/2},\\
W(X) &\le \tau(q)(-42.4056 + 16.2958 X) \le 16.30 \tau(q) X.
\end{split}
\end{equation}
holds for $X\ge \log(2^6 t_0)$. Therefore,
\begin{equation}
|L(1/2+it,\chi)| \le 7.95 \tau(q) \mathfrak{q}^{1/6} \log^{3/2}\mathfrak{q} +
16.30 \tau(q) \log\mathfrak{q}.
\end{equation}
Finally, using the bound $\mathfrak{q} \ge 2^6t_0$, we deduce that
$|L(1/2+it,\chi)| \le 9.05 \tau(q) \mathfrak{q}^{1/6}\log^{3/2} \mathfrak{q}$,
proving the corollary.
\section{Proofs of bounds \eqref{partial summation bound} and
\eqref{convexity bound}}\label{bounds proofs}
\begin{proof}[Proof of bound \eqref{partial summation bound}]
Since $\chi$ is nonprincipal, we have
\begin{equation}
L(1/2+it,\chi) = \sum_{n\le M}
\frac{\chi(n)}{n^{1/2+it}}+\mathcal{R}_M(t,\chi),
\end{equation}
where the remainder
$\mathcal{R}_M(t,\chi) := \sum_{n>M}^{\infty} \chi(n) n^{-1/2-it}$
is just the tail of the Dirichlet series. (We do not
require that $M> 0$ be an integer.)
To estimate the tail, we use
partial summation \cite[formula (1)]{rubinstein-computational-methods},
\begin{equation}\label{partial sum 1}
\begin{split}
\left|\sum_{M<n\le M_2} \frac{\chi(n)}{n^{1/2+it}}\right|
\le&
\frac{1}{\sqrt{M_2}} \left|\sum_{n\le M_2} \chi(n) \right|+
\frac{1}{\sqrt{M}} \left|\sum_{n\le M} \chi(n)\right|\\
& + (1/2+|t|)\int_M^{M_2} \left|\sum_{1\le n\le u} \chi(n)\right| u^{-3/2}\, du.
\end{split}
\end{equation}
We bound the character sums on the r.h.s.\ of \eqref{partial sum 1}
using the P\'olya-Vinogradov inequality in
\cite[\textsection{23}]{davenport-book}.
This asserts that
if $\chi$ is a primitive character modulo $q>1$ then
$|\sum_{N_1\le n< N_2} \chi(n) |\le \sqrt{q}\log q$.
Substituting this in \eqref{partial sum 1}, taking the limit $M_2\to \infty$,
and executing the integral gives
\begin{equation}
|L(1/2+it,\chi)|\le 2\sqrt{M} + \frac{2\sqrt{q}\log q}{\sqrt{M}}(|t|+1).
\end{equation}
The claimed bound follows on choosing $M=(|t|+1)\sqrt{q}\log q$.
\end{proof}
\begin{remark}
If $\chi$ is merely assumed to be nonprincipal, then the bound \eqref{partial
summation bound} still holds but with an extra factor of $\sqrt{2}$ in front.
One simply uses the P\'olya-Vinogradov inequality stated
in \cite[page 139]{hiary-char-sums} in the proof.
\end{remark}
\begin{proof}[Proof of bound \eqref{convexity bound}]
Since
$|L(1/2+it,\chi)| = |L(1/2-it,\overline{\chi})|$
and the proof will apply symmetrically
to $L(1/2+it,\chi)$ and $L(1/2+it,\overline{\chi})$,
we may assume that $t\ge 0$.
Let $n_1=\lfloor \sqrt{qt/(2\pi)}\rfloor$. Since $qt\ge 2\pi$,
\cite[Theorem 5.3]{habsieger} implies that
\begin{equation}\label{approx func eq formula}
|L(1/2+it,\chi)| \le (2 + \delta_t)\left|\sum_{n=1}^{n_1}
\frac{\chi(n)}{n^{1/2+it}}\right|+|\mathcal{R}(t,\chi)|,
\end{equation}
where\footnote{The appearance of $\delta_t$ in \eqref{approx func eq formula}
is due to a slight imperfection in the form of the approximate functional
equation proved in \cite{habsieger}, and is not significant otherwise.}
$\displaystyle \delta_t := e^{\frac{\pi}{24t}+\frac{1}{12t^2}}-1$ and
\begin{equation}\label{calR bound}
|\mathcal{R}(t,\chi)|\le \frac{264.72 q^{1/4}\log q}{t^{1/4}} + \frac{11.39
q^{3/4}}{t^{3/4}}e^{-0.78\sqrt{t/q}}.
\end{equation}
To prove this,
we specialize \cite[Theorem 5.3]{habsieger}
to the critical line, taking $X=Y$ with $2\pi X^2 = qt$,
then appeal to well-known properties of Gauss sums. Put together, this yields
\begin{equation}\label{L formula}
L(1/2+it,\chi) = \sum_{n\le X} \frac{\chi(n)}{n^{1/2+it}} +
F(t,\chi)\sum_{n\le X} \frac{\overline{\chi(n)}}{n^{1/2-it}}+ \mathcal{R}(t,\chi),
\end{equation}
where $G(\chi,-1)$ is a Gauss sum and
\begin{equation}\label{F def}
F(t,\chi):=\frac{(2\pi i )^{1/2+it}q^{-1/2-it}G(\chi,-1)}{\Gamma(1/2+it)}.
\end{equation}
(Here, $(2\pi i )^{1/2+it}$ is defined using the principal branch of the
logarithm.)
We estimate $\mathcal{R}(t,\chi)$ in \eqref{L formula} using the case ``$X\le Y$'' in \cite[Theorem 5.3]{habsieger}. Since we specialized $X=\sqrt{qt/(2\pi)}$, we obtain
\begin{equation}
|\mathcal{R}(t,\chi)|
\le \left(167.2(2\pi)^{1/4}\log q + \frac{2.87 (2\pi)^{3/4}
\sqrt{q}}{\sqrt{t}}e^{-\sqrt{\pi^3/50} \sqrt{t/q}}\right)\frac{q^{1/4}}{t^{1/4}}.
\end{equation}
The claimed estimate \eqref{calR bound}
for $\mathcal{R}(t,\chi)$ follows on noting that
$167.2(2\pi)^{1/4} < 264.72$, $2.87(2\pi)^{3/4} < 11.39$, and
$\pi^{3/2}/\sqrt{50} > 0.78$.
To bound the factor $1/\Gamma(1/2+it)$ appearing in the definition of $F(t)$, we mimic the proof of \cite[Lemma 2.1]{habsieger} with minor adjustments. This gives
\begin{equation}\label{gamma bound}
\frac{1}{|\Gamma(1/2+it)|} \le \frac{e^{\frac{\pi t}{2}+\frac{\pi}{24t}+\frac{1}{12t^2}}}{\sqrt{2\pi}},\qquad (t>0).
\end{equation}
Combining \eqref{gamma bound} with the facts $|G(\chi,-1)|=\sqrt{q}$ and
$|(2\pi i )^{1/2+it}| = \sqrt{2\pi}e^{-\pi t/2}$ gives
$|F(t,\chi)|\le e^{\frac{\pi}{24t}+\frac{1}{12t^2}}=1+\delta_t$.
Since the second sum in the approximate functional equation
\eqref{L formula} is just the complex
conjugate of the first sum there, this proves \eqref{approx func eq
formula}.
Last, we trivially estimate the sum in
\eqref{approx func eq formula}, then use the assumption $t\ge \sqrt{q}\ge
\sqrt{2}$ and monotonicity
to bound $\mathcal{R}(t,\chi)$ and $\delta_t$. This gives (on noting
that $\log q \le \log \mathfrak{q}^{2/3}$)
\begin{equation}
|L(1/2+it,\chi)|\le
\left(\frac{2(1+\delta_{\sqrt{2}})}{(2\pi)^{1/4}}+
11.39e^{-\frac{0.78}{2^{1/4}}}\right) \mathfrak{q}^{1/4}
+ 264.72 \mathfrak{q}^{1/12}\log \mathfrak{q}^{2/3}.
\end{equation}
Denote the r.h.s.\ above by $(*)$. Using \verb!Mathematica! we verify that
the equation $(*)=124.46 \mathfrak{q}^{1/4}$ has no real solution if $\mathfrak{q}
\ge 10^9$. Furthermore, $(*)$ is smaller than $124.46 \mathfrak{q}^{1/4}$ when
$\mathfrak{q}=10^9$. Hence, $(*) \le 124.46 \mathfrak{q}^{1/4}$
for all $\mathfrak{q} \ge 10^9$, as claimed.
\end{proof}
\end{document} |
\begin{document}
\title{On presheaf submonads of quantale-enriched categories}
\author{Maria Manuel Clementino}
\author{Carlos Fitas}
\address{University of Coimbra, CMUC, Department of Mathematics, 3001-501 Coimbra, Portugal}
\email{mmc@mat.uc.pt, cmafitas@gmail.com}
\thanks{}
\begin{abstract}
This paper focus on the presheaf monad and its submonads on the realm of $V$-categories, for a quantale $V$. First we present two characterisations of presheaf submonads, both using $V$-distributors: one based on admissible classes of $V$-distributors, and other using Beck-Chevalley conditions on $V$-distributors. Then we focus on the study of the corresponding Eilenberg-Moore categories of algebras, having as main examples the formal ball monad and the so-called Lawvere monad.
\end{abstract}
\subjclass[2020]{18D20, 18C15, 18D60, 18A22, 18B35, 18F75}
\keywords{Quantale, $V$-category, distributor, lax idempotent monad, Presheaf monad, Ball monad, Lawvere monad}
\maketitle
\section*{Introduction}
Having as guideline Lawvere's point of view that it is worth to regard metric spaces as categories enriched in the extended real half-line $[0,\infty]_+$ (see \cite{Law73}), we regard both the formal ball monad and the monad that identifies Cauchy complete spaces as its algebras -- which we call here the \emph{Lawvere monad} -- as submonads of the presheaf monad on the category $\Met$ of $[0,\infty]_+$-enriched categories. This leads us to the study of general presheaf submonads on the category of $V$-enriched categories, for a given quantale $V$.
Here we expand on known general characterisations of presheaf submonads and their algebras, and introduce a new ingredient -- conditions of Beck-Chevalley type -- which allows us to identify properties of functors and natural transformations, and, most importantly, contribute to a new facet of the behaviour of presheaf submonads.
In order to do that, after introducing the basic concepts needed to the study of $V$-categories in Section 1, Section 2 presents the presheaf monad and a characterisation of its submonads using admissible classes of $V$-distributors which is based on \cite{CH08}. Next we introduce the already mentioned Beck-Chevalley conditions (BC*) which resemble those discussed in \cite{CHJ14}, with $V$-distributors playing the role of $V$-relations. In particular we show that lax idempotency of a monad $\mathbb{T}$ on $\Cats{V}$ can be identified via a BC* condition, and that the presheaf monad satisfies fully BC*.
This leads to the use of BC* to present a new characterisation of presheaf submonads in Section 4.
The remaining sections are devoted to the study of the Eilenberg-Moore category induced by presheaf submonads. In Section 5, based on \cite{CH08}, we detail the relationship between the algebras, (weighted) cocompleteness, and injectivity. Next we focus on the algebras and their morphisms, first for the formal ball monad, and later for a general presheaf submonad. We end by presenting the relevant example of the presheaf submonad whose algebras are the so-called Lawvere complete $V$-categories \cite{CH09}, which, when $V=[0,\infty]_+$, are exactly the Cauchy complete (generalised) metric spaces, while their morphisms are the $V$-functors which preserve the limits for Cauchy sequences.
\section{Preliminaries}
Our work focus on $V$-categories (or $V$-enriched categories, cf. \cite{EK66, Law73, Kel82}) in the special case of $V$ being a quantale.
Throughout $V$ is a \emph{commutative and unital quantale}; that is, $V$ is a complete lattice endowed with a symmetric tensor product $\otimes$, with unit $k\neq\bot$, commuting with joins, so that it has a right adjoint $\hom$; this means that, for $u,v,w\in V$,
\[u\otimes v\leq w\;\Leftrightarrow\; v\leq\hom(u,w).\]
As a category, $V$ is a complete and cocomplete (thin) symmetric monoidal closed category.
\begin{definition}
A \emph{$V$-category} is a pair $(X,a)$ where $X$ is a set and $a\colon X\times X\to V$ is a map such that:
\begin{itemize}
\item[(R)] for each $x\in X$, $k\leq a(x,x)$;
\item[(T)] for each $x,x',x''\in X$, $a(x,x')\otimes a(x',x'')\leq a(x,x'')$.
\end{itemize}
If $(X,a)$, $(Y,b)$ are $V$-categories, a \emph{$V$-functor} $f\colon (X,a)\to(Y,b)$ is a map $f\colon X\to Y$ such that
\begin{itemize}
\item[(C)] for each $x,x'\in X$, $a(x,x')\leq b(f(x),f(x'))$.
\end{itemize}
The category of $V$-categories and $V$-functors will be denoted by $\Cats{V}$. Sometimes we will use the notation $X(x,y)=a(x,y)$ for a $V$-category $(X,a)$ and $x,y\in X$.
\end{definition}
We point out that $V$ has itself a $V$-categorical structure, given by the right adjoint to $\otimes$, $\hom$; indeed, $u\otimes k\leq u\;\Rightarrow\;k\leq\hom(u,u)$, and $u\otimes\hom(u,u')\otimes\hom(u',u'')\leq u'\otimes\hom(u',u'')\leq u''$ gives that $\hom(u,u')\otimes\hom(u',u'')\leq\hom(u,u'')$. Moreover, for every $V$-category $(X,a)$, one can define its \emph{opposite $V$-category} $(X,a)^\op=(X,a^\circ)$, with $a^\circ(x,x')=a(x',x)$ for all $x,x'\in X$.
\begin{examples}\label{ex:VCat}
\begin{enumerate}
\item For $V=\mbox{\bf 2}=(\{0<1\},\wedge,1)$, a $\mbox{\bf 2}$-category is an
\emph{ordered set} (not necessarily antisymmetric) and a
$\mbox{\bf 2}$-functor is a \emph{monotone map}. We denote $\Cats{\mbox{\bf 2}}$ by $\Ord$.
\item The lattice $V=[0,\infty]$ ordered by the ``greater
or equal'' relation $\geq$ (so that $r\wedge s=\max\{r,s\}$, and the supremum of $S\subseteq[0,\infty]$ is given
by $\inf S$) with tensor $\otimes=+$ will be denoted by $[0,\infty]_+$. A $[0,\infty]_+$-category is a
\emph{(generalised) metric space} and a
$[0,\infty]_+$-functor is a \emph{non-expansive map} (see \cite{Law73}). We denote $\Cats{[0,\infty]_+}$ by $\Met$. We note that
\[
\hom(u,v)=v\ominus u:=\max\{v-u,0\},
\]
for all $u,v\in[0,\infty]$.
If instead of $+$ one considers the tensor product $\wedge$, then $\Cats{[0,\infty]_\wedge}$ is the category $\UMet$ of \emph{ultrametric spaces} and \emph{non-expansive maps}.
\item\label{ex.zero_um} The complete lattice $[0,1]$ with the usual
``less or equal'' relation $\le$ is isomorphic to $[0,\infty]$ via
the map $[0,1]\to[0,\infty],\,u\mapsto -\ln(u)$ where
$-\ln(0)=\infty$. Under this isomorphism, the operation $+$ on
$[0,\infty]$ corresponds to the multiplication $*$ on $[0,1]$. Denoting this quantale by $[0,1]_*$, one has $\Cats{[0,1]_*}$
isomorphic to the category $\Met=\Cats{[0,\infty]_+}$ of (generalised) metric spaces and
non-expansive maps.
Since $[0,1]$ is a frame, so that finite meets commute with infinite joins, we can also consider it as a quantale
with $\otimes=\wedge$. The category $\Cats{[0,1]_{\wedge}}$ is isomorphic
to the category $\UMet$.
Another interesting tensor product in $[0,1]$ is given by the
\emph{\L{}ukasiewicz tensor} $\odot$ where $u\odot v=\max(0,u+v-1)$; here
$\hom(u,v)=\min(1,1-u+v)$. Then $\Cats{[0,1]_\odot}$ is the category of \emph{bounded-by-1 (generalised) metric spaces} and \emph{non-expansive maps}.
\item We consider now the set
\[
\Delta=\{\varphi\colon[0,\infty]\to [0,1]\mid \text{for all
$\alpha\in[0,\infty]$: }\varphi(\alpha)=\bigvee_{\beta<\alpha}
\varphi(\beta) \},
\]
of \emph{distribution functions}. With the
pointwise order, it is a complete lattice. For $\varphi,\psi\in\Delta$ and
$\alpha\in[0,\infty]$, define $\varphi\otimes\psi\in\Delta$ by
\[
(\varphi\otimes \psi)(\alpha)=\bigvee_{\beta+\gamma\le
\alpha}\varphi(\beta)*\psi(\gamma).
\]
Then
$\otimes:\Delta\times\Delta\to\Delta$ is associative and
commutative, and
\[
\kappa:[0,\infty]\to [0,1],\, \alpha\mapsto
\begin{cases}
0 & \text{if }\alpha=0,\\
1 & \text{else}
\end{cases}
\]
is a unit for $\otimes$. Finally,
$\psi\otimes-:\Delta\to\Delta$ preserves suprema since, for all $u\in [0,1]$,
$u*-\colon[0,1]\to[0,1]$ preserves suprema. A $\Delta$-category is a
\emph{(generalised) probabilistic metric space} and a
$\Delta$-functor is a \emph{probabilistic non-expansive
map} (see \cite{HR13} and references there).
\end{enumerate}
\end{examples}
We will also make use of two additional categories we describe next, the category $\Rels{V}$, of sets and $V$-relations, and the category $\Dists{V}$, of $V$-categories and $V$-distributors.\\
Objects of $\Rels{V}$ are sets, while morphisms are $V$-relations, i.e., if $X$ and $Y$ are sets, a \emph{$V$-relation} $r\colon X{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Y$ is a map $r\colon X\times Y\to V$. Composition of $V$-relations is given by \emph{relational composition}, so that the composite of $r\colon X{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Y$ and $s\colon Y{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Z$ is given by
\[(s\cdot r)(x,z)=\bigvee_{y\in Y}r(x,y)\otimes s(y,z),\]
for every $x\in X$, $z\in Z$.
Identities in $\Cats{V}$ are simply identity relations, with $1_X(x,x')=k$ if $x=x'$ and $1_X(x,x')=\bot$ otherwise.
The category $\Rels{V}$ has an involution $(\;)^\circ$, assigning to each $V$-relation $r\colon X{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Y$ the $V$-relation $r^\circ\colon Y{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} X$ defined by $r^\circ(y,x)=r(x,y)$, for every $x\in X$, $y\in Y$.
Since every map $f\colon X\to Y$ can be thought as a $V$-relation through its graph $f_\circ\colon X\times Y\to V$, with $f_\circ(x,y)=k$ if $f(x)=y$ and $f_\circ(x,y)=\bot$ otherwise, there is an injective on objects and faithful functor $\Set\to\Rels{V}$. When no confusion may arise, we use also $f$ to denote the $V$-relation $f_\circ$.
The category $\Rels{V}$ is a 2-category, when equipped with the 2-cells given by the pointwise order; that is, for $r,r'\colon X{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Y$, one defines $r\leq r'$ if, for all $x\in X$, $y\in Y$, $r(x,y)\leq r'(x,y)$. This gives us the possibility of studying adjointness between $V$-relations. We note in particular that, if $f\colon X\to Y$ is a map, then $f_\circ\cdot f^\circ\leq 1_Y$ and $1_X\leq f^\circ\cdot f_\circ$, so that $f_\circ\dashv f^\circ$.\\
Objects of $\Dists{V}$ are $V$-categories, while morphisms are $V$-distributors (also called $V$-bimodules, or $V$-profunctors); i.e., if $(X,a)$ and $(Y,b)$ are $V$-categories, a \emph{$V$-distributor} -- or, simply, a \emph{distributor} -- $\varphi\colon (X,a){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} (Y,b)$ is a $V$-relation $\varphi\colon X{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Y$ such that $\varphi\cdot a\leq\varphi$ and $b\cdot\varphi\leq\varphi$ (in fact $\varphi\cdot a=\varphi$ and $b\cdot\varphi=\varphi$ since the other inequalities follow from (R)). Composition of distributors is again given by relational composition, while the identities are given by the $V$-categorical structures, i.e. $1_{(X,a)}=a$. Moreover, $\Dists{V}$ inherits the 2-categorical structure from $\Rels{V}$.\\
Each $V$-functor $f\colon(X,a)\to(Y,b)$ induces two distributors, $f_*\colon(X,a){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}}(Y,b)$ and \linebreak $f^*\colon(Y,b){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}}(X,a)$, defined by $f_*(x,y)=Y(f(x),y)$ and $f^*(y,x)=Y(y,f(x))$, that is, $f_*=b\cdot f_\circ$ and $f^*=f^\circ\cdot b$. These assignments are functorial, as we explain below.\\
First we define 2-cells in $\Cats{V}$: for $f,f'\colon(X,a)\to(Y,b)$ $V$-functors, $f\leq f'$ when $f^*\leq (f')^*$ as distributors, so that
\[f\leq f' \;\;\Leftrightarrow\;\; \forall x\in X, \,y\in Y, \; Y(y,f(x))\leq Y(y,f'(x)).\] $\Cats{V}$ is then a 2-category, and we can define two 2-functors
\[\begin{array}{rclcrcl}
(\;)_*\colon \Cats{V}^\co&\longrightarrow&\Dists{V}&\mbox{ and }&(\;)^*\colon\Cats{V}^\op&\longrightarrow&\Dists{V}\\
X&\longmapsto&X&&X&\longmapsto&X\\
f&\longmapsto&f_*&&f&\longmapsto&f^*
\end{array}\]
Note that, for any $V$-functor $f\colon(X,a)\to(Y,b)$,
\[f_*\cdot f^*=b\cdot f_\circ\cdot f^\circ\cdot b\leq b\cdot b=b\mbox{ and }f^*\cdot f_*=f^\circ\cdot b\cdot b\cdot f_\circ\geq f^\circ\cdot f_\circ\cdot a\geq a;\]
hence every $V$-functor induces a pair of adjoint distributors, $f_*\dashv f^*$. A $V$-functor $f\colon X\to Y$ is said to be \emph{fully faithful} if $f^*\cdot f_*=a$, i.e. $X(x,x')=Y(f(x),f(x'))$ for all $x,x'\in X$, while it is \emph{fully dense} if $f_*\cdot f^*=b$, i.e. $Y(y,y')=\bigvee_{x\in X}Y(y,f(x))\otimes Y(f(x),y')$, for all $y,y'\in Y$. A fully faithful $V$-functor $f\colon X\to Y$ does not need to be an injective map; it is so in case $X$ and $Y$ are separated $V$-categories (as defined below).\\
\begin{remark}\label{rem:adjcond}
In $\Cats{V}$ adjointness between $V$-functors
\[Y\adjunct{f}{g}X\]
can be equivalently expressed as:
\[f\dashv g\;\Leftrightarrow\;f_*=g^*\;\Leftrightarrow\; g^*\dashv f^* \;\Leftrightarrow\;(\forall x\in X)\;(\forall y\in Y)\;X(x,g(y))=Y(f(x),y).\]
In fact the latter condition encodes also $V$-functoriality of $f$ and $g$; that is, if $f\colon X\to Y$ and $g\colon Y\to X$ are maps satisfying the condition
\[(\forall x\in X)\;(\forall y\in Y)\;\; X(x,g(y))=Y(f(x),y),\]
then $f$ and $g$ are $V$-functors, with $f\dashv g$.
Furthermore, it is easy to check that, given $V$-categories $X$ and $Y$, a map $f\colon X\to Y$ is a $V$-functor whenever $f_*$ is a distributor (or whenever $f^*$ is a distributor).
\end{remark}
The order defined on $\Cats{V}$ is in general not antisymmetric. For $V$-functors $f,g\colon X\to Y$, one says that $f\simeq g$ if $f\leq g$ and $g\leq f$ (or, equivalently, $f^*=g^*$). For elements $x,y$ of a $V$-category $X$, one says that $x\leq y$ if, considering the $V$-functors $x,y\colon E=(\{*\},k)\to X$ (where $k(*,*)=k$) defined by $x(*)=x$ and $y(*)=y$, one has $x\leq y$; or, equivalently, $X(x,y)\geq k$. Then, for any $V$-functors $f,g\colon X\to Y$, $f\leq g$ if, and only if, $f(x)\leq g(x)$ for every $x\in X$.
\begin{definition}
A $V$-category $Y$ is said to be \emph{separated} if, for $f,g\colon X\to Y$, $f=g$ whenever $f\simeq g$; equivalently, if, for all $x,y\in Y$, $x\simeq y$ implies $x=y$.
\end{definition}
The tensor product $\otimes$ on $V$ induces a tensor product on $\Cats{V}$, with $(X,a)\otimes(Y,b)=(X\times Y,a\otimes b)=X\otimes Y$, where $(X\otimes Y)((x,y),(x',y'))=X(x,x')\otimes Y(y,y')$. The $V$-category $E$ is a $\otimes$-neutral element. With this tensor product, $\Cats{V}$ becomes a monoidal closed category. Indeed, for each $V$-category $X$, the functor $X\otimes (\;)\colon \Cats{V}\to\Cats{V}$ has a right adjoint $(\;)^X$ defined by $Y^X=(\Cats{V}(X,Y), \fspstr{\;}{\;} )$, with $\fspstr{f}{g}=\bigwedge_{x\in X}Y(f(x),g(x))$ (see \cite{EK66, Law73, Kel82} for details).
It is interesting to note the following well-known result (see, for instance, \cite[Theorem 2.5]{CH09}).
\begin{theorem}\label{th:fct_dist}
For $V$-categories $(X,a)$ and $(Y,b)$, and a $V$-relation $\varphi\colon X{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} Y$, the following conditions are equivalent:
\begin{tfae}
\item $\varphi\colon(X,a){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}}(Y,b)$ is a distributor;
\item $\varphi\colon(X,a)^\op\otimes(Y,b)\to(V,\hom)$ is a $V$-functor.
\end{tfae}
\end{theorem}
In particular, the $V$-categorical structure $a$ of $(X,a)$ is a $V$-distributor $a\colon(X,a){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}}(X,a)$, and therefore a $V$-functor $a\colon(X,a)^\op\otimes (X,a)\to(V,\hom)$, which induces, via the closed monoidal structure of $\Cats{V}$, the \emph{Yoneda $V$-functor} $\mathpzc{y}_X\colon(X,a)\to (V,\hom)^{(X,a)^\op}$. Thanks to the theorem above, $V^{X^\op}$ can be equivalently described as
\[PX:=\{\varphi\colon X {\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E\,|\,\varphi \mbox{ $V$-distributor}\}.\]
Then the structure $\widetilde{a}$ on $PX$ is given by \[\widetilde{a}(\varphi,\psi)=\fspstr{\varphi}{\psi}=\bigwedge_{x\in X}\hom(\varphi(x),\psi(x)),\] for every $\varphi, \psi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$, where by $\varphi(x)$ we mean $\varphi(x,*)$, or, equivalently, we consider the associated $V$-functor $\varphi\colon X\to V$. The Yoneda functor $\mathpzc{y}_X\colon X\to PX$ assigns to each $x\in X$ the distributor $x^*\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$, where we identify again $x\in X$ with the $V$-functor $x\colon E\to X$ assigning $x$ to the (unique) element of $E$. Then, for every $\varphi\in PX$ and $x\in X$, we have that \[\fspstr{\mathpzc{y}_X(x)}{\varphi}=\varphi(x),\]
as expected. In particular $\mathpzc{y}_X$ is a fully faithful $V$-functor, being injective on objects (i.e. an injective map) when $X$ is a separated $V$-category. We point out that $(V,\hom)$ is separated, and so is $PX$ for every $V$-category $X$.
For more information on $\Cats{V}$ we refer to \cite[Appendix]{HN20}.
\section{The presheaf monad and its submonads}
The assignment $X\mapsto PX$ defines a functor $P\colon\Cats{V}\to\Cats{V}$: for each $V$-functor $f\colon X\to Y$, $Pf\colon PX\to PY$ assigns to each distributor $\xymatrix{X\ar[r]|{\circ}^{\varphi}&E}$ the distributor $\xymatrix{Y\ar[r]|{\circ}^{f^*}&X\ar[r]|{\circ}^{\varphi}&E}$. It is easily checked that the Yoneda functors $(\mathpzc{y}_X\colon X\to PX)_X$ define a natural transformation $\mathpzc{y}\colon 1\to P$. Moreover, since, for every $V$-functor $f$, the adjunction $f_*\dashv f^*$ yields an adjunction $Pf=(\;)\cdot f^*\dashv (\;)\cdot f_*=:Qf$, $P\mathpzc{y}_X$ has a right adjoint, which we denote by $\mathpzc{m}_X\colon PPX\to PX$. It is straightforward to check that $\mathbb{P}=(P,\mathpzc{m},\mathpzc{y})$ is a 2-monad on $\Cats{V}$ -- the so-called \emph{presheaf monad} --, which, by construction of $\mathpzc{m}_X$ as the right adjoint to $P\mathpzc{y}_X$, is lax idempotent (see \cite{Ho11} for details).\\
Next we present a characterisation of the submonads of $\mathbb{P}$ which is partially in \cite{CH08}. We recall that, given two monads $\mathbb{T}=(T,\mu,\eta)$, $\mathbb{T}'=(T',\mu',\eta')$ on a category $\C$, a monad morphism $\sigma\colon\mathbb{T}\to\mathbb{T}'$ is a natural transformation $\sigma\colon T\to T'$ such that
\begin{equation}\label{eq:monadmorphism}
\xymatrix{1\ar[r]^{\eta}\ar[rd]_{\eta'}&T\ar[d]^{\sigma}&&TT\ar[r]^-{\sigma_T}\ar[d]_{\mu}&T'T\ar[r]^-{T'\sigma}&T'T'\ar[d]^{\mu'}\\
&T'&&T\ar[rr]_{\sigma}&&T'}
\end{equation}
By \emph{submonad of $\mathbb{P}$} we mean a 2-monad $\mathbb{T}=(T,\mu,\eta)$ on $\Cats{V}$ with a monad morphism $\sigma:\mathbb{T}\to\mathbb{P}$ such that $\sigma_X$ is an embedding (i.e. both fully faithful and injective on objects) for every $V$-category $X$.
\begin{definition}\label{def:admi}
Given a class $\Phi$ of $V$-distributors, for every $V$-category $X$ let \[\Phi X=\{\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E\,|\,\varphi\in\Phi\}\] have the $V$-category structure inherited from the one of $PX$. We say that $\Phi$ is \emph{admissible} if, for every $V$-functor $f\colon X\to Y$ and $V$-distributors $\varphi\colon Z{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Y$ and $\psi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Z$ in $\Phi$,
\begin{itemize}
\item[(1)] $f^*\in\Phi$;
\item[(2)] $\psi\cdot f^*\in\Phi$ and $f^*\cdot \varphi\in\Phi$;
\item[(3)] $\varphi\in\Phi\;\Leftrightarrow\;(\forall y\in Y)\;y^*\cdot\varphi\in\Phi$;
\item[(4)] for every $V$-distributor $\gamma\colon PX{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$, if the restriction of $\gamma$ to $\Phi X$ belongs to $\Phi$, then $\gamma\cdot(\mathpzc{y}_X)_*\in\Phi$.
\end{itemize}
\end{definition}
\begin{lemma}
Every admissible class $\Phi$ of $V$-distributors induces a submonad $\Phi=(\Phi,\mathpzc{m}^\Phi,\mathpzc{y}^\Phi)$ of $\mathbb{P}$.
\end{lemma}
\begin{proof}
For each $V$-category $X$, equip $\Phi X$ with the initial structure induced by the inclusion $\sigma_X\colon \Phi X\to PX$, that is, for every $\varphi,\psi\in \Phi X$, $\Phi X(\varphi,\psi)=PX(\varphi,\psi)$. For each $V$-functor $f\colon X\to Y$ and $\varphi\in\Phi X$, by condition (2), $\varphi\cdot f^*\in\Phi$, and so $Pf$ (co)restricts to $\Phi f\colon\Phi X\to\Phi Y$.
Condition (1) guarantees that $\mathpzc{y}_X\colon X\to PX$ corestricts to $\mathpzc{y}^\Phi_X\colon X\to \Phi X$.
Finally, condition (4) guarantees that $\mathpzc{m}_X\colon PPX\to PX$ also (co)restricts to $\mathpzc{m}^\Phi_X:\Phi\Phi X\to\Phi X$: if $\gamma\colon\Phi X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ belongs to $\Phi$, then $\widetilde{\gamma}:=\gamma\cdot (\sigma_X)^*\colon PX{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ belongs to $\Phi$ by (2), and then, since $\gamma$ is the restriction of $\widetilde{\gamma}$ to $\Phi X$, by (4) $\mathpzc{m}_X(\widetilde{\gamma})=\gamma\cdot(\sigma_X)^*\cdot(\mathpzc{y}_X)_*
=\gamma\cdot(\sigma_X)^*\cdot(\sigma_X)_*\cdot(\mathpzc{y}^\Phi_X)_*=\gamma\cdot(\mathpzc{y}^\Phi_X)_*\in\Phi$.
By construction, $(\sigma_X)_X$ is a natural transformation, each $\sigma_X$ is an embedding, and $\sigma$ makes diagrams \eqref{eq:monadmorphism} commute.
\end{proof}
\begin{theorem}\label{th:Phi}
For a 2-monad $\mathbb{T}=(T,\mu,\eta)$ on $\Cats{V}$, the following assertions are equivalent:
\begin{tfae}
\item $\mathbb{T}$ is isomorphic to $\Phi$, for some admissible class of $V$-distributors $\Phi$.
\item $\mathbb{T}$ is a submonad of $\mathbb{P}$.
\end{tfae}
\end{theorem}
\begin{proof}
(i) $\Rightarrow$ (ii) follows from the lemma above.\\
(ii) $\Rightarrow$ (i): Let $\sigma\colon\mathbb{T}\to\mathbb{P}$ be a monad morphism, with $\sigma_X$ an embedding for every $V$-category $X$, which, for simplicity, we assume to be an inclusion. First we show that
\begin{equation}\label{eq:fai}
\Phi=\{\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Y\,|\,\forall y\in Y\;y^*\cdot\varphi\in TX\}
\end{equation}
is admissible. In the sequel $f\colon X\to Y$ is a $V$-functor.\\
(1) For each $x\in X$, $x^*\cdot f^*=f(x)^*\in TY$, and so $f^*\in\Phi$.\\
(2) If $\psi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Z$ is a $V$-distributor in $\Phi$, and $z\in Z$, since $z^*\cdot\psi\in TX$, $T f(z^*\cdot\psi)=z^*\cdot\psi\cdot f^*\in TY$, and therefore $\psi\cdot f^*\in \Phi$ by definition of $\Phi$.
Now, if $\varphi\colon Z{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Y\in\Phi$, then, for each $x\in X$, $x^*\cdot f^*\cdot\varphi=f(x)^*\cdot\varphi\in TZ$ because $\varphi\in\Phi$, and so $f^*\cdot\varphi\in\Phi$.\\
(3) follows from the definition of $\Phi$.\\
(4) If the restriction of $\gamma\colon PX{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ to $TX$, i.e., $\gamma\cdot(\sigma_X)_*$, belongs to $\Phi$, then $\mu_X(\gamma\cdot(\sigma_X)_*)=\gamma\cdot(\sigma_X)_*\cdot(\eta_X)_*=\gamma\cdot(\mathpzc{y}_X)_*$ belongs to $TX$.
\end{proof}
We point out that, with $\mathbb{P}$, also $\mathbb{T}$ is lax idempotent. This assertion is shown at the end of next section, making use of the Beck-Chevalley conditions we study next. (We note that the arguments of \cite[Prop. 16.2]{CLF20}, which states conditions under which a submonad of a lax idempotent monad is still lax idempotent, cannot be used directly here.)
\section{The presheaf monad and Beck-Chevalley conditions}
In this section our aim is to show that $\mathbb{P}$ verifies some interesting conditions of Beck-Chevalley type, that resemble the BC conditions studied in \cite{CHJ14}. We recall from \cite{CHJ14} that a commutative square in $\Set$
\[\xymatrix{W\ar[r]^l\ar[d]_g&Z\ar[d]^h\\
X\ar[r]_f&Y}\]
is said to be a \emph{BC-square} if the following diagram commutes in $\Rel$
\[\xymatrix{W\ar[r]|-{\object@{|}}^{l_\circ}&Z\\
X\ar[u]|-{\object@{|}}^{g^\circ}\ar[r]|-{\object@{|}}_{f_\circ}&Y,\ar[u]|-{\object@{|}}_{h^\circ}}\]
where, given a map $t\colon A \to B$, $t_\circ\colon A{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} B$ denotes the relation defined by $t$ and $t^\circ\colon B{\longrightarrow\hspace*{-2.8ex}{\mapstochar}\hspace*{2.6ex}} A$ its opposite. Since $t_\circ\dashv t^\circ$ in $\Rel$, this is in fact a kind of Beck-Chevalley condition. A $\Set$-endofunctor $T$ is said to satisfy BC if it preserves BC-squares, while a natural transformation $\alpha\colon T\to T'$ between two $\Set$-endofunctors satisfies BC if, for each map $f\colon X\to Y$, its naturality square
\[\xymatrix{TX\ar[r]^{\alpha_X}\ar[d]_{Tf}&T'X\ar[d]^{T'f}\\
TY\ar[r]_{\alpha_Y}&T'Y}\]
is a BC-square.
In our situation, for endofunctors and natural transformations in $\Cats{V}$, the role of $\Rel$ is played by $\Dists{V}$.
\begin{definition}
A commutative square in $\Cats{V}$
\[\xymatrix{(W,d)\ar[r]^l\ar[d]_g&(Z,c)\ar[d]^h\\
(X,a)\ar[r]_f&(Y,b)}\]
is said to be a \emph{BC*-square} if the following diagram commutes in $\Dists{V}$
\begin{equation}\label{diag:BC*}
\xymatrix{(W,d)\ar[r]|-{\circ}^{l_*}&(Z,c)\\
(X,a)\ar[u]|-{\circ}^{g^*}\ar[r]|-{\circ}_{f_*}&(Y,b)\ar[u]|-{\circ}_{h^*}}\end{equation}
(or, equivalently, $h^*\cdot f_*\leq l_*\cdot g^*$).
\end{definition}
\begin{remarks}\label{rem:BC*}
\begin{enumerate}
\item For a $V$-functor $f\colon(X,a)\to(Y,b)$, to be fully faithful is equivalent to
\[\xymatrix{(X,a)\ar[r]^1\ar[d]_1&(X,a)\ar[d]^f\\
(X,a)\ar[r]_f&(Y,b)}\]
being a BC*-square (exactly in parallel with the characterisation of monomorphisms via BC-squares).
\item We point out that, contrarily to the case of BC-squares, in BC*-squares the horizontal and the vertical arrows play different roles; that is, the fact that diagram \eqref{diag:BC*} is a BC*-square is not equivalent to
\[\xymatrix{(W,d)\ar[r]^g\ar[d]_l&(X,a)\ar[d]^f\\
(Z,c)\ar[r]_h&(Y,b)}\]
being a BC*-square; it is indeed equivalent to its \emph{dual}
\[\xymatrix{(W,d^\circ)\ar[r]^g\ar[d]_l&(X,a^\circ)\ar[d]^f\\
(Z,c^\circ)\ar[r]_h&(Y,b^\circ)}\]
being a BC*-square.
\end{enumerate}
\end{remarks}
\begin{definitions}
\begin{enumerate}
\item A \emph{functor $T\colon \Cats{V}\to\Cats{V}$ satisfies BC*} if it preserves BC*-squares.
\item Given two endofunctors $T,T'$ on $\Cats{V}$, a \emph{natural transformation $\alpha\colon T\to T'$ satisfies BC*} if the naturality diagram
\[\xymatrix{TX\ar[r]^{\alpha_X}\ar[d]_{Tf}&T'X\ar[d]^{T'f}\\
TY\ar[r]_{\alpha_Y}&T'Y}\]
is a BC*-square for every morphism $f$ in $\Cats{V}$.
\item \emph{A 2-monad $\mathbb{T}=(T,\mu,\eta)$} on $\Cats{V}$ is said to satisfy \emph{fully BC*} if $T$, $\mu$, and $\eta$ satisfy BC*.
\end{enumerate}
\end{definitions}
\begin{remark}
In the case of $\Set$ and $\Rel$, since the condition of being a BC-square is equivalent, under the Axiom of Choice (AC), to being a weak pullback, a $\Set$-monad $\mathbb{T}$ \emph{satisfies fully BC} if, and only if, it is \emph{weakly cartesian} (again, under (AC)). This, together with the fact that there are relevant $\Set$-monads -- like for instance the ultrafilter monad -- whose functor and multiplication satisfy BC but the unit does not, led the authors of \cite{CHJ14} to name such monads as \emph{BC-monads}. This is the reason why we use \emph{fully BC*} instead of BC* to identify these $\Cats{V}$-monads.
As a side remark we recall that, still in the $\Set$-context, a partial BC-condition was studied by Manes in \cite{Ma02}: for a $\Set$-monad $\mathbb{T}=(T,\mu,\eta)$ to be \emph{taut} requires that $T$, $\mu$, $\eta$ satisfy BC for commutative squares where $f$ is monic.
\end{remark}
Our first use of BC* is the following characterisation of lax idempotency for a 2-monad $\mathbb{T}$ on $\Cats{V}$.
\begin{prop}\label{prop:laxidpt} Let $\mathbb{T}=(T,\mu,\eta)$ be a 2-monad on $\Cats{V}$.
\begin{enumerate}
\item The following assertions are equivalent:
\begin{tfae}
\item[\em (i)] $\mathbb{T}$ is lax idempotent.
\item[\em (ii)] For each $V$-category $X$, the diagram
\begin{equation}\label{eq:laxidpt}
\xymatrix{TX\ar[r]^-{T\eta_X}\ar[d]_{\eta_{TX}}&TTX\ar[d]^{\mu_X}\\
TTX\ar[r]_-{\mu_X}&TX}
\end{equation}
is a BC*-square.
\end{tfae}
\item If $\mathbb{T}$ is lax idempotent, then $\mu$ satisfies BC*.
\end{enumerate}
\end{prop}
\begin{proof}
(1) (i) $\Rightarrow$ (ii): The monad $\mathbb{T}$ is lax idempotent if, and only if, for every $V$-category $X$, $T\eta_X\dashv \mu_X$, or, equivalently, $\mu_X\dashv \eta_{TX}$. These two conditions are equivalent to $(T\eta_X)_*=(\mu_X)^*$ and $(\mu_X)_*=(\eta_{TX})^*$. Hence $(\mu_X)^*(\mu_X)_*=(T\eta_X)_* (\eta_{TX})^*$ as claimed.
(ii) $\Rightarrow$ (i): From $(\mu_X)^* (\mu_X)_*=(T\eta_X)_* (\eta_{TX})^*$ it follows that
\[(\mu_X)_*=(\mu_X)_* (\mu_X)^* (\mu_X)_*=(\mu_X\cdot T\eta_X)_* (\eta_{TX})^*=(\eta_{TX})^*,\]
that is, $\mu_X\dashv \eta_{TX}$.\\
(2) BC* for $\mu$ follows directly from lax idempotency of $\mathbb{T}$, since
\[\xymatrix{TTX\ar[r]^-{(\mu_X)_*}|-{\circ}&TX\ar@{}[rrd]|{=}&&TTX\ar[r]^-{(\eta_{TX})^*}|-{\circ}&TX\\
TTY\ar[u]^{(TTf)^*}|-{\circ}\ar[r]_-{(\mu_Y)_*}|-{\circ}&TY\ar[u]_{(Tf)^*}|-{\circ}&&
TTY\ar[u]^{(TTf)^*}|-{\circ}\ar[r]_-{(\eta_{TY})^*}|-{\circ}&TY\ar[u]_{(Tf)^*}|-{\circ}}\]
and the latter diagram commutes trivially.\\
\end{proof}
\begin{remark}
Thanks to Remarks \ref{rem:BC*} we know that, if we invert the role of $\eta_{TX}$ and $T\eta_X$ in \eqref{eq:laxidpt}, we get a characterisation of oplax idempotent 2-monad: $\mathbb{T}$ is oplax idempotent if, and only if, the diagram
\[\xymatrix{TX\ar[r]^-{\eta_{TX}}\ar[d]_{T\eta_{X}}&TTX\ar[d]^{\mu_X}\\
TTX\ar[r]_-{\mu_X}&TX}\]
is a BC*-square.
\end{remark}
\begin{theorem}
The presheaf monad $\mathbb{P}=(P,\mathpzc{m},\mathpzc{y})$ satisfies fully BC*.
\end{theorem}
\begin{proof}
(1) \emph{$P$ satisfies BC*}: Given a BC*-square
\[\xymatrix{(W,d)\ar[r]^l\ar[d]_g&(Z,c)\ar[d]^h\\
(X,a)\ar[r]_f&(Y,b)}\]
in $\Cats{V}$, we want to show that
\begin{equation}\label{eq:BC}
\xymatrix{PW\ar[r]|-{\circ}^{(Pl)_*}&PZ\\
PX\ar@{}[ru]|{\geq}\ar[u]|-{\circ}^{(Pg)^*}\ar[r]|-{\circ}_{(Pf)_*}&PY.\ar[u]|-{\circ}_{(Ph)^*}}
\end{equation}
For each $\varphi\in PX$ and $\psi\in PZ$, we have
\begin{align*}
(Ph)^*(Pf)_*(\varphi,\psi)&= (Ph)^\circ\cdot\widetilde{b}\cdot Pf(\varphi,\psi)\\
&=\widetilde{b}(Pf(\varphi),Ph(\psi))\\
&= \bigwedge_{y\in Y}\hom(\varphi\cdot f^*(y),\psi\cdot h^*(y))\\
&\leq \displaystyle\bigwedge_{x\in X} \hom(\varphi\cdot f^*\cdot f_*(x),\psi\cdot h^*\cdot f_*(x))\\
&\leq \displaystyle\bigwedge_{x\in X}\hom(\varphi(x),\psi\cdot l_*\cdot g^*(x))&\mbox{($\varphi\leq \varphi\cdot f^*\cdot f_*$, \eqref{eq:BC} is BC*)}\\
&= \widetilde{a}(\varphi,\psi\cdot\l_*\cdot g^*)\\
&\leq \widetilde{a}(\varphi,\psi\cdot l_*\cdot g^*)\otimes \widetilde{c}(\psi\cdot l_*\cdot l^*,\psi)&\mbox{(because $\psi\cdot l_*\cdot l^*\leq\psi$)}\\
&=\widetilde{a}(\varphi,Pg(\psi\cdot l_*)\otimes\widetilde{c}(Pl(\psi\cdot l_*),\psi)\\
&\leq \displaystyle\bigvee_{\gamma\in PW}\widetilde{a}(\varphi,Pg(\gamma))\otimes\widetilde{c}(Pl(\gamma),\psi)\\
&=(Pl)_*(Pg)^*(\varphi,\psi).
\end{align*}
(2) \emph{$\mu$ satisfies BC*}: For each $V$-functor $f\colon X\to Y$, from the naturality of $\mathpzc{y}$ it follows that the following diagram
\[\xymatrix{PPX\ar[r]|-{\circ}^-{(\mathpzc{y}_{PX})^*}&PX\\
PPY\ar[u]|-{\circ}^{(PPf)^*}\ar[r]|-{\circ}_-{(\mathpzc{y}_{PY})^*}&PY\ar[u]|-{\circ}_{(Pf)^*}}\]
commutes. Lax idempotency of $\mathbb{P}$ means in particular that $\mathpzc{m}_X\dashv \mathpzc{y}_{PX}$, or, equivalently, $(\mathpzc{m}_X)_*=(\mathpzc{y}_{PX})^*$, and therefore the commutativity of this diagram shows BC* for $\mathpzc{m}$.
(3) \emph{$\mathpzc{y}$ satisfies BC*}: Once again, for each $V$-functor $f\colon(X,a)\to(Y,b)$, we want to show that the diagram
\[\xymatrix{X\ar[r]|-{\circ}^-{(\mathpzc{y}_X)_*}&PX\\
Y\ar[u]|-{\circ}^{f^*}\ar[r]|-{\circ}_-{(\mathpzc{y}_Y)_*}&PY\ar[u]|-{\circ}_{(Pf)^*}}\]
commutes. Let $y\in Y$ and $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ belong to $PX$. Then
\begin{align*}((Pf)^*(\mathpzc{y}_Y)_*)(y,\varphi)&=((Pf)^\circ\cdot \widetilde{b}\cdot\mathpzc{y}_Y)(y,\varphi)=\widetilde{b}(\mathpzc{y}_Y(y),Pf(\varphi))=Pf(\varphi)(y)=\bigvee_{x\in X}b(y,f(x))\otimes\varphi(x)\\
&=\bigvee_{x\in X}b(y,f(x))\otimes\widetilde{a}(\mathpzc{y}_X(x),\varphi)=(\widetilde{a}\cdot\mathpzc{y}_X\cdot f^\circ\cdot b)(y,\varphi)=(\mathpzc{y}_X)_*\cdot f^*(y,\varphi),\\
\end{align*}
as claimed.
\end{proof}
\begin{corollary}\label{cor:laxidpt}
Let $\mathbb{T}=(T,\mu,\eta)$ on $\Cats{V}$ be a 2-monad on $\Cats{V}$, and $\sigma\colon\mathbb{T}\to\mathbb{P}$ be a monad morphism, pointwise fully faithful. Then $\mathbb{T}$ is lax idempotent.
\end{corollary}
\begin{proof}
We know that $\mathbb{P}$ is lax idempotent, and so, for every $V$-category $X$, $(\mathpzc{m}_X)_*=(\mathpzc{y}_{PX})^*$.
Consider diagram \eqref{eq:monadmorphism}. The commutativity of the diagram on the right gives that $(\mu_X)_*=(\sigma_X)^*(\sigma_X)_*(\mu_X)_*=(\sigma_X)^*(\mathpzc{m}_X)_*(P\sigma_X)_*(\sigma_{TX})_*$; using the equality above, and preservation of fully faithful $V$-functors by $\mathbb{P}$ -- which follows from BC* -- we obtain:
\begin{align*}
(\mu_X)_*&=(\sigma_X)^*(\mathpzc{y}_{PX})^*(P\sigma_X)_*(\sigma_{TX})_*=(\sigma_X)^*(\eta_{PX})^*(\sigma_{PX})^*(P\sigma_X)_*(\sigma_{TX})_*
=\\
&=(\eta_{TX})^*\cdot (\sigma_{TX})^*(P\sigma_X)^*(P\sigma_X)_*(\sigma_{TX})_*=(\eta_{TX})^*.\end{align*}
\end{proof}
\section{Presheaf submonads and Beck-Chevalley conditions}
In this section, for a general 2-monad $\mathbb{T}=(T,\mu,\eta)$ on $\Cats{V}$, we relate its BC* properties with the existence of a (sub)monad morphism $\mathbb{T}\to\mathbb{P}$. We remark that a necessary condition for $\mathbb{T}$ to be a submonad of $\mathbb{P}$ is that $TX$ is separated for every $V$-category $X$, since $PX$ is separated and separated $V$-categories are stable under monomorphisms.
\begin{theorem}\label{th:submonad}
For a 2-monad $\mathbb{T}=(T,\mu,\eta)$ on $\Cats{V}$ with $TX$ separated for every $V$-category $X$, the following assertions are equivalent:
\begin{tfae}
\item $\mathbb{T}$ is a submonad of $\mathbb{P}$.
\item $\mathbb{T}$ is lax idempotent and satisfies BC*, and both $\eta_X$ and $Q\eta_X\cdot\mathpzc{y}_{TX}$ are fully faithful, for each $V$-category $X$.
\item $\mathbb{T}$ is lax idempotent, $\mu$ and $\eta$ satisfy BC*, and both $\eta_X$ and $Q\eta_X\cdot\mathpzc{y}_{TX}$ are fully faithful, for each $V$-category $X$.
\item $\mathbb{T}$ is lax idempotent, $\eta$ satisfies BC*, and both $\eta_X$ and $Q\eta_X\cdot\mathpzc{y}_{TX}$ are fully faithful, for each $V$-category $X$.
\end{tfae}
\end{theorem}
\begin{proof}
(i) $\Rightarrow$ (ii): By (i) there exists a monad morphism $\sigma\colon \mathbb{T}\to \mathbb{P}$ with $\sigma_X$ an embedding for every $V$-category $X$.
By Corollary \ref{cor:laxidpt}, with $\mathbb{P}$, also $\mathbb{T}$ is lax idempotent.
Moreover, from $\sigma_X\cdot \eta_X=\mathpzc{y}_X$, with $\mathpzc{y}_X$, also $\eta_X$ is fully faithful. (In fact this is valid for any monad with a monad morphism into $\mathbb{P}$.)
To show that $\mathbb{T}$ satisfies BC* we use the characterisation of Theorem \ref{th:Phi}; that is, we know that there is an admissible class $\Phi$ of distributors so that $\mathbb{T}=\Phi$. Then BC* for $T$ follows directly from the fact that $\Phi f$ is a (co)restriction of $Pf$, for every $V$-functor $f$.
BC* for $\eta$ follows from BC* for $\mathpzc{y}$ and full faithfulness of $\sigma$ since, for any commutative diagram in $\Cats{V}$
\[\xymatrix{\cdot\ar[r]\ar[d]&\cdot\ar[r]^f\ar[d]&\cdot\ar[d]\\
\cdot\ar@{}[ru]|{\mathfrak{b}ox{1}}\ar[r]&\ar@{}[ru]|{\mathfrak{b}ox{2}}\cdot\ar[r]_g&\cdot}\]
with $\mathfrak{b}ox{1}\mathfrak{b}ox{2}$ satisfying BC*, and $f$ and $g$ fully faithful, also $\mathfrak{b}ox{1}$ satisfies BC*.
Thanks to Proposition \ref{prop:laxidpt}, BC* for $\mu$ follows directly from lax idempotency of $\mathbb{T}$.\\
The implications (ii) $\Rightarrow$ (iii) $\Rightarrow$ (iv) are obvious.\\
(iv) $\Rightarrow$ (i): For each $V$-category $(X,a)$, we denote by $\widehat{a}$ the $V$-category structure on $TX$, and define
the $V$-functor $(\xymatrix{TX\ar[r]^-{\sigma_X}&PX})=(\xymatrix{TX\ar[r]^-{\mathpzc{y}_{TX}}&PTX\ar[r]^-{Q\eta_X}&PX})$;
that is, $\sigma_X(\mathfrak{x})=(\xymatrix{X\ar[r]^{\eta_X}&TX\ar[r]|-{\object@{|}}^{\widehat{a}}&TX\ar[r]|-{\object@{|}}^-{\mathfrak{x}^\circ}&E})=\widehat{a}(\eta_X(\;),\mathfrak{x})$. As a composite of fully faithful $V$-functors, $\sigma_X$ is fully faithful; moreover, it is an embedding because, by hypothesis, $TX$ and $PX$ are separated $V$-categories.\\
To show that \emph{$\sigma=(\sigma_X)_X\colon T\to P$ is a natural transformation}, that is, for each $V$-functor $f\colon X\to Y$,
the outer diagram
\[\xymatrix{TX\ar[r]^{\mathpzc{y}_{TX}}\ar[d]_{Tf}&PTX\ar[r]^{Q\eta_X}\ar[d]|{PTf}&PX\ar[d]^{Pf}\\
TY\ar@{}[ru]|{\mathfrak{b}ox{1}}\ar[r]_{\mathpzc{y}_{TY}}&PTY\ar@{}[ru]|{\mathfrak{b}ox{2}}\ar[r]_{Q\eta_Y}&PY}\]
commutes, we only need to observe that $\mathfrak{b}ox{1}$ is commutative and BC* for $\eta$ implies that $\mathfrak{b}ox{2}$ is commutative.\\
It remains to show \emph{$\sigma$ is a monad morphism}: for each $V$-category $(X,a)$ and $x\in X$, \[(\sigma_X\cdot\eta_X)(x)=\widehat{a}(\eta_X(\;),\eta_X(x))=a(-,x)=x^*=\mathpzc{y}_X(x),\] and so $\sigma\cdot \eta=\mathpzc{y}$.
To check that, for every $V$-category $(X,a)$, the following diagram commutes
\[\xymatrix{TTX\ar[r]^{\sigma_{TX}}\ar[d]_\mu&PTX\ar[r]^{P\sigma_X}&PPX\ar[d]^{\mathpzc{m}_X}\\
TX\ar[rr]_{\sigma_X}&&PX,}\]
let $\mathfrak{X}\in TTX$. We have
\begin{align*}
\mathpzc{m}_X\cdot P\sigma_X\cdot \sigma_{TX}(\mathfrak{X})&=(\xymatrix{X\ar[r]^{\mathpzc{y}_X}&PX\ar[r]|-{\object@{|}}^{\widetilde{a}}&PX\ar[r]|-{\object@{|}}^{\sigma_X^\circ}&
TX\ar[r]^{\eta_{TX}}&TTX\ar[r]|-{\object@{|}}^{\widehat{\widehat{a}}}&TTX\ar[r]|-{\object@{|}}^-{\mathfrak{X}^\circ}&E})\\
&=(\xymatrix{X\ar[r]^{\eta_X}&TX\ar[r]|-{\object@{|}}^{\widehat{a}}&TX\ar[r]^{\eta_{TX}}&TTX\ar[r]|-{\object@{|}}^{\widehat{\widehat{a}}}&TTX\ar[r]|-{\object@{|}}^-{\mathfrak{X}^\circ}&E}),
\end{align*}
since $\sigma_X^\circ\cdot\widetilde{a}\cdot\mathpzc{y}_X(x,\mathfrak{x})=\widetilde{a}(\mathpzc{y}_X(x),\sigma_X(\mathfrak{x}))=\sigma_X(\mathfrak{x})(x)=\widehat{a}\cdot\eta_X(x,\mathfrak{x})$, and
\[\sigma_X\cdot \mu_X(\mathfrak{x})=(\xymatrix{X\ar[r]^{\eta_X}&TX\ar[r]|-{\object@{|}}^{\widehat{a}}&TX\ar[r]|-{\object@{|}}^{\mu_X^\circ}&TTX\ar[r]|-{\object@{|}}^-{\mathfrak{X}^\circ}&E}).\]
Hence the commutativity of the diagram follows from the equality $\widehat{\widehat{a}}\cdot \eta_{TX}\cdot\widehat{a}\cdot\eta_X=\mu_X^\circ\cdot \widehat{a}\cdot\eta_X$ we show next. Indeed,
\[\widehat{\widehat{a}}\cdot\eta_{TX}\cdot\widehat{a}\cdot\eta_X=(\eta_{TX})_*(\eta_X)_*=(\eta_{TX}\cdot\eta_X)_*=(T\eta_X\cdot\eta_X)_*=(T\eta_X)_*(\eta_X)_*=
\mu_X^* (\eta_X)_*=\mu_X^\circ\cdot\widehat{a}\cdot\eta_X.\]
\end{proof}
The proof of the theorem allows us to conclude immediately the following result.
\begin{corollary}\label{cor:morphism}
Given a 2-monad $\mathbb{T}=(T,\mu,\eta)$ on $\Cats{V}$ such that $\eta$ satisfies BC*, there is a monad morphism $\mathbb{T}\to\mathbb{P}$ if, and only if, $\eta$ is pointwise fully faithful.
\end{corollary}
\section{On algebras for submonads of $\mathbb{P}$: a survey}
In the remainder of this paper we will study, given a submonad $\mathbb{T}$ of $\mathbb{P}$, the category $(\Cats{V})^\mathbb{T}$ of (Eilenberg-Moore) $\mathbb{T}$-algebras. Here we collect some known results which will be useful in the following sections. We will denote by $\Phi(\mathbb{T})$ the admissible class of distributors that induces the monad $\mathbb{T}$ (defined in \eqref{eq:fai}).
The following result, which is valid for any lax-idempotent monad $\mathbb{T}$, asserts that, for any $V$-category, to be a $\mathbb{T}$-algebra is a property (see, for instance, \cite{EF99} and \cite{CLF20}).
\begin{theorem}\label{th:KZ}
Let $\mathbb{T}$ be lax idempotent monad on $\Cats{V}$.
\begin{enumerate}
\item For a $V$-category $X$, the following assertions are equivalent:
\begin{tfae}
\item[\em (i)] $\alpha\colon TX\to X$ is a $\mathbb{T}$-algebra structure on $X$;
\item[\em (ii)] there is a $V$-functor $\alpha\colon TX\to X$ such that $\alpha\dashv\eta_X$ with $\alpha\cdot\eta_X=1_X$;
\item[\em (iii)] there is a $V$-functor $\alpha\colon TX\to X$ such that $\alpha\cdot\eta_X=1_X$;
\item[\em (iv)] $\alpha\colon TX\to X$ is a split epimorphism in $\Cats{V}$.
\end{tfae}
\item If $(X,\alpha)$ and $(Y,\beta)$ are $\mathbb{T}$-algebra structures, then every $V$-functor $f\colon X\to Y$ satisfies $\beta\cdot Tf\leq f\cdot\alpha$.
\end{enumerate}
\end{theorem}
Next we formulate characterisations of $\mathbb{T}$-algebras that can be found in \cite{Ho11, CH08}, using \emph{injectivity} with respect to certain \emph{embeddings}, and using the existence of certain \emph{weighted colimits}, notions that we recall very briefly in the sequel.
\begin{definition}\cite{Es98}
A $V$-functor $f\colon X\to Y$ is a \emph{$T$-embedding} if $Tf$ is a left adjoint right inverse; that is, there exists a $V$-functor $Tf_\sharp$ such that $Tf\dashv Tf_\sharp$ and $Tf_\sharp\cdot Tf=1_{TX}$.
\end{definition}
For each submonad $\mathbb{T}$ of $\mathbb{P}$, the class $\Phi(\mathbb{T})$ allows us to identify easily the $T$-embeddings.
\begin{prop}\label{prop:emb}
For a $V$-functor $h\colon X\to Y$, the following assertions are equivalent:
\begin{tfae}
\item $h$ is a $T$-embedding;
\item $h$ is fully faithful and $h_*$ belongs to $\Phi(\mathbb{T})$.
\end{tfae}
In particular, $P$-embeddings are exactly the fully faithful $V$-functors.
\end{prop}
\begin{proof}
(ii) $\Rightarrow$ (i): Let $h$ be fully faithful with $h_*\in\Phi(\mathbb{T})$. As in the case of the presheaf monad, $\Phi h:\Phi X\to\Phi Y$ has always a right adjoint whenever $h_*\in\Phi(\mathbb{T})$, $\Phi^\dashv h:=(-)\cdot h_*\colon \Phi Y\to\Phi X$; that is, for each distributor $\psi:Y{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ in $\Phi Y$, $\Phi^\dashv h(\psi)=\psi\cdot h_*$, which is well defined because by hypothesis $h_*\in\Phi(\mathbb{T})$. If $h$ is fully faithful, that is, if $h^*\cdot h_*=(1_X)^*$, then $(\Phi^\dashv h\cdot \Phi h)(\varphi)=\varphi\cdot h^*\cdot h_*=\varphi$.
(i) $\Rightarrow$ (ii): If $\Phi^\dashv h$ is well-defined, then $y^*\cdot h_*$ belongs to $\Phi(\mathbb{T})$ for every $y\in Y$, hence $h_*\in \Phi(\mathbb{T})$, by \ref{def:admi}(3), and so $h_*\in\Phi(\mathbb{T})$. Moreover, if $\Phi^\dashv h\cdot \Phi h=1_{\Phi X}$, then in particular $x^*\cdot h^*\cdot h_*=x^*$, for every $x\in X$, which is easily seen to be equivalent to $h^*\cdot h_*=(1_X)^*$.
\end{proof}
In $\Dists{V}$, given a $V$-distributor $\varphi\colon (X,a){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} (Y,b)$, the functor $(\;)\cdot\varphi$ preserves suprema, and therefore it has a right adjoint $[\varphi,-]$ (since the hom-sets in $\Dists{V}$ are complete ordered sets):
\[\Dist(X,Z)\adjunct{(\;)\cdot\varphi}{[\varphi,-]}\Dist(Y,Z).\]
For each distributor $\psi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Z$,
\[\xymatrix{X\ar[r]|-{\circ}^{\psi}\ar[d]|-{\circ}_{\varphi}&Z\\
Y\ar@{}[ru]^{\leq}\ar[ru]|-{\circ}_{[\varphi,\psi]}}\]
$[\varphi,\psi]\colon Y{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Z$ is defined by
\[ [\varphi,\psi](y,z)=\bigwedge_{x\in X}\,\hom(\varphi(x,y),\psi(x,z)).\]
\begin{definitions}\begin{enumerate}
\item Given a $V$-functor $f\colon X\to Z$ and a distributor (here called \emph{weight}) $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Y$, a \emph{$\varphi$-weighted colimit} of $f$ (or simply a \emph{$\varphi$-colimit} of $f$), whenever it exists, is a $V$-functor $g\colon Y\to Z$ such that $g_*=[\varphi,f_*]$. One says then that \emph{$g$ represents $[\varphi,f_*]$}.
\item A $V$-category $Z$ is called \emph{$\varphi$-cocomplete} if it has a colimit for each weighted diagram with weight $\varphi\colon(X,a){\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}}(Y,b)$; i.e. for each $V$-functor $f\colon X\to Z$, the $\varphi$-colimit of $f$ exists.
\item Given a class $\Phi$ of $V$-distributors, a $V$-category $Z$ is called \emph{$\Phi$-cocomplete} if it is $\varphi$-cocomplete for every $\varphi\in \Phi$. When $\Phi=\Dists{V}$, then $Z$ is said to be \emph{cocomplete}.
\end{enumerate}
\end{definitions}
The proof of the following result can be found in \cite{Ho11, CH08}.
\begin{theorem}\label{th:ch}
Given a submonad $\mathbb{T}$ of $\mathbb{P}$, for a $V$-category $X$ the following assertions are equivalent:
\begin{tfae}
\item $X$ is a $\mathbb{T}$-algebra.
\item $X$ is injective with respect to $T$-embeddings.
\item $X$ is $\Phi(\mathbb{T})$-cocomplete.
\end{tfae}
\end{theorem}
$\Phi(\mathbb{T})$-cocompleteness of a $V$-category $X$ is guaranteed by the existence of some special weighted colimits, as we explain next. (Here we present very briefly the properties needed. For more information on this topic see \cite{St04}.)
\begin{lemma}
For a distributor $\varphi\colon X\to Y$ and a $V$-functor $f\colon X\to Z$, the following assertions are equivalent:
\begin{tfae}
\item there exists the $\varphi$-colimit of $f$;
\item there exists the $(\varphi\cdot f^*)$-colimit of $1_Z$;
\item for each $y\in Y$, there exists the $(y^*\cdot\varphi)$-colimit of $f$.
\end{tfae}
\end{lemma}
\begin{proof}
(i) $\Leftrightarrow$ (ii): It is straightforward to check that
\[ [\varphi,f_*]=[\varphi\cdot f^*,(1_Z)_*].\]
(i) $\Leftrightarrow$ (iii): Since $[\varphi,f_*]$ is defined pointwise, it is easily checked that, if $g$ represents $[\varphi,f_*]$, then, for each $y\in Y$, the $V$-functor $\xymatrix{E\ar[r]^y&Y\ar[r]^g&Z}$ represents $[y^*\cdot \varphi,f_*]$.
Conversely, if, for each $y\colon E\to Y$, $g_y\colon E\to Z$ represents $[y^*\cdot\varphi,f_*]$, then the map $g\colon Y\to Z$ defined by $g(y)=g_y(*)$ is such that $g_*=[\varphi,f_*]$; hence, as stated in Remark \ref{rem:adjcond}, $g$ is automatically a $V$-functor.
\end{proof}
\begin{corollary}
Given a submonad $\mathbb{T}$ of $\mathbb{P}$, a $V$-category $X$ is a $\mathbb{T}$-algebra if, and only if, $[\varphi, (1_X)_*]$ has a colimit for every $\varphi\in TX$.
\end{corollary}
\begin{remark}
Given $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ in $TX$, in the diagram
\[\xymatrix{X\ar[r]|-{\circ}^{a}\ar[d]|-{\circ}_{\varphi}&X\\
Y\ar@{}[ru]^{\leq}\ar[ru]|-{\circ}_{[\varphi,a]}}\]
\[[\varphi,a](*,x)=\bigwedge_{x'\in X}\hom(\varphi(x',*),a(x',x))=TX(\varphi,x^*).\]
Therefore, if $\alpha\colon TX\to X$ is a $\mathbb{T}$-algebra structure, then
\[ [\varphi,a](*,x)=TX(\varphi,x^*)=X(\alpha(\varphi),x),\]
that is, $[\varphi,a]=\alpha(\varphi)_*$; this means that $\alpha$ assigns to each distributor $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ the representative of $[\varphi,(1_X)_*]$.
\end{remark}
Hence, we may describe the category of $\mathbb{T}$-algebras as follows.
\begin{theorem}\label{thm:charact}
\begin{enumerate}
\item A map $\alpha\colon TX\to X$ is a $\mathbb{T}$-algebra structure if, and only if, for each distributor $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ in $TX$, $\alpha(\varphi)_*=[\varphi,(1_X)_*]$.
\item If $X$ and $Y$ are $\mathbb{T}$-algebras, then a $V$-functor $f\colon X\to Y$ is a $\mathbb{T}$-homomorphism if, and only if, $f$ preserves $\varphi$-weighted colimits for any $\varphi\in TX$, i.e., if $x\in X$ represents $[\varphi,(1_X)_*]$, then $f(x)$ represents $[\varphi\cdot f^*,(1_Y)_*]$.
\end{enumerate}
\end{theorem}
\section{On algebras for submonads of $\mathbb{P}$: the special case of the formal ball monad}
From now on we will study more in detail $(\Cats{V})^\mathbb{T}$ for special submonads $\mathbb{T}$ of $\mathbb{P}$. In our first example, the formal ball monad $\mathbb{B}$, we will need to consider the (co)restriction of $\mathbb{B}$ and $\mathbb{P}$ to $\Cats{V}_\sep$. We point out that the characterisations of $\mathbb{T}$-algebras of Theorem \ref{th:ch} remain valid for these (co)restrictions.
The space of formal balls is an important tool in the study of (quasi-)metric spaces. Given a metric space $(X,d)$ its \emph{space of formal balls} is simply the collection of all pairs $(x,r),$ where $x \in X$ and $r \in [0,\infty[$. This space can itself be equipped with a (quasi-)metric. Moreover this construction can naturally be made into a monad on the category of (quasi-)metric spaces (cf. \cite{GL19, KW11} and references there).
This monad can readily be generalised to $V$-categories, using a $V$-categorical structure in place of the (quasi-)metric.
We will start by considering an extended version of the formal ball monad, the \emph{extended formal ball monad} $\mathbb{B}b,$ which we define below.
\begin{definitions} The \emph{extended formal ball monad} $\mathbb{B}b=(B_\bullet ,\eta, \mu)$ is given by the following:
\begin{enumerate}
\item[--] a functor $B_\bullet\colon\Cats{V}\to\Cats{V}$ which maps each $V$-category $X$ to $B_\bullet X$ with underlying set $X\times V$ and \[B_\bullet X((x,r),(y,s))= \hm{r}{ X(x,y) \otimes s }\] and every $V$-functor $f\colon X \to Y$ to the $V$-functor $B_\bullet f\colonB_\bullet X\to B_\bullet Y$ with $B_\bullet f(x,r)=(f(x),r)$;
\item[--] natural transformations $\eta\colon 1 \to B_\bullet$ and $\mu\colon B_\bulletB_\bullet \to B_\bullet$ with $\eta_X(x)=(x,k)$ and $\mu_X((x,r),s)=(x,r\otimes s)$, for every $V$-category $X$, $x\in X$, $r,s\in V$.
\end{enumerate}
The \emph{formal ball monad} $\mathbb{B}$ is the submonad of $\mathbb{B}b$ obtained when we only consider balls with radius different from $\bot$.
\end{definitions}
\begin{remark}
Note that $\mathbb{B}b X$ is not separated if $X$ has more than one element (for any $x,y \in X$, $(x,\bot)\simeq (y,\bot)$), while, as shown in \ref{prop:canc}, for $X$ separated, separation of $\mathbb{B} X$ depends on an extra property of the quantale $V$.
\end{remark}
Using Corollaries \ref{cor:morphism} and \ref{cor:laxidpt}, it is easy to check that
\begin{prop}\label{prop:Bbmonadmorphismff}
There is a pointwise fully faithful monad morphism $\sigma \colon \mathbb{B}b \to \mathbb{P}$. In particular, both $\mathbb{B}b$ and $\mathbb{B}$ are lax-idempotent.
\end{prop}
\begin{proof}
First of all let us check that $\eta$ satisfies BC*, i.e., for any $V$-functor $f\colon X\to Y$,
\[\xymatrix{X\ar[r]|-{\circ}^{(\eta_X)_*}&B_\bullet X\\
Y\ar@{}[ru]|{\geq}\ar[u]|-{\circ}^{f^*}\ar[r]|-{\circ}_{(\eta_Y)_*}&B_\bullet Y\ar[u]|-{\circ}_{(B_\bullet f)^*}}\]
For $y\in Y$, $(x,r)\inB_\bullet X$,
\begin{align*}
((B_\bullet f)^*(\eta_Y)_*)(y,(x,r))&=B_\bullet Y((y,k),(f(x),r))=Y(y,f(x))\otimes r\\
&\leq \bigvee_{z\in X}Y(y,f(z))\otimes X(z,x)\otimes r=\bigvee_{z\in X} Y(y,f(z))\otimesB_\bullet X((z,k),(x,r))\\
&=((\eta_X)_*f^*)(y,(x,r)).
\end{align*}
Then, by Corollary \ref{cor:morphism}, for each $V$-category $X$, $\sigma_X$ is defined as in the proof of Theorem \ref{th:submonad}, i.e. for each $(x,r)\inB_\bullet X$, $\sigma_X(x,r)=B_\bullet X((-,k),(x,r))\colon X\to V$; more precisely, for each $y\in X$, $\sigma_X(x,r)(y)=X(y,x)\otimes r$.
Moreover, $\sigma_X$ is fully faithful: for each $(x,r), (y,s)\in B_\bullet X$,
\begin{align*}
B_\bullet X((x,r),(y,s))&=\hom(r,X(x,y)\otimes s)\geq \hom(X(x,x)\otimes r, X(x,y)\otimes s)\\
&\geq \bigwedge_{z\in X}\hom(X(z,x)\otimes r,X(z,y)\otimes s)=PX(\sigma(x,r),\sigma(y,s)).
\end{align*}
\end{proof}
It is clear that $\sigma\colon\mathbb{B}b\to\mathbb{P}$ is not pointwise monic; indeed, if $r=\bot$, then $\sigma_X(x,\bot)\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$ is the distributor that is constantly $\bot$, for any $x\in X$. Still it is interesting to identify the $\mathbb{B}b$-algebras via the existence of special weighted colimits.
\begin{prop}\label{prop:Balg}
For a $V$-category $X$, the following conditions are equivalent:
\begin{tfae}
\item $X$ has a $\mathbb{B}b$-algebra structure $\alpha\colonB_\bullet X\to X$;
\item $(\forall x\in X)\;(\forall r\in V)\;(\exists x\oplus r\in X)\;(\forall y\in X)\;\; X(x\oplus r,y)=\hom(r,X(x,y))$;
\item for all $(x,r)\inB_\bullet X$, every diagram of the sort
\[\xymatrix{X\ar[r]|-{\circ}^{(1_X)_*}\ar[d]|-{\circ}_{\sigma_X(x,r)}&X\\
E\ar@{}[ru]^{\leq}\ar[ru]|-{\circ}_{[\sigma_X(x,r),(1_X)_*]}}\]
has a (weighted) colimit.
\end{tfae}
\end{prop}
\begin{proof}
(i) $\Rightarrow$ (ii): The adjunction $\alpha\dashv\eta_X$ gives, via Remark \ref{rem:adjcond},
\[X(\alpha(x,r),y)=B_\bullet X((x,r),(y,k))=\hom(r,X(x,y)).\]
For $x\oplus r:=\alpha(x,r)$, condition (ii) follows.\\
(ii) $\Rightarrow$ (iii): The calculus of the distributor $[\sigma_X(x,r),(1_X)_*]$ shows that it is represented by $x\oplus r$:
\[ [\sigma_X(x,r),(1_X)_*](*,y)=\hom(r,X(x,y)).\]
(iii) $\Rightarrow$ (i) For each $(x,r)\in B_\bullet X$, let $x\oplus r$ represent $[\sigma_X(x,r),(1_X)_*]$. In case $r=k$, we choose $x\oplus k=x$ to represent the corresponding distributor (any $x'\simeq x$ would fit here but $x$ is the right choice for our purpose). Then $\alpha\colonB_\bullet X\to X$ defined by $\alpha(x,r)=x\oplus r$ is, by construction, left adjoint to $\eta_X$, and $\alpha\cdot\eta_X=1_X$.
\end{proof}
The $V$-categories $X$ satisfying (iii), and therefore satisfying the above (equivalent) conditions, are called \emph{tensored}.
This notion was originally introduced in the article \cite{BK75} by Borceux and Kelly for general $V$-categories (for our special $V$-categories we suggest to consult \cite{St04}).\\
Note that, thanks to condition (ii), we get the following characterisation of tensored categories.
\begin{corollary}\label{cor:oplus}
A $V$-category $X$ is tensored if, and only if, for every $x\in X$,
\[X\adjunct{x\oplus -}{X(x,-)}V\]
is an adjunction in $\Cats{V}$.
\end{corollary}
We now shift our attention to the formal ball monad $\mathbb{B}.$
The characterisation of $\mathbb{B}b$-algebras given by the Proposition \ref{prop:Balg} may be adapted to obtain a characterisation of $\mathbb{B}$-algebras. Indeed, the only difference is that a $\mathbb{B}$-algebra structure $BX\to X$ does not include the existence of $x\oplus\bot$ for $x\in X$, which, when it exists, is the top element with respect to the order in $X$. Moreover, the characterisation of $\mathbb{B}$-algebras given in \cite[Proposition 3.4]{GL19} can readily be generalised to $\Cats{V}$ as follows.
\begin{prop}
For a $V$-functor $\alpha\colon BX\to X$ the following conditions are equivalent.
\begin{tfae}
\item $\alpha$ is a $\mathbb{B}$-algebra structure.
\item For every $x\in X$, $r,s\in V\setminus\{\bot\}$, $\alpha(x,k)=x$ and $\alpha(x,r\otimes s)=\alpha(\alpha(x,r),s)$.
\item For every $x\in X$, $r\in V\setminus\{\bot\}$, $\alpha(x,k)=x$ and $X(x,\alpha(x,r))\geq r$.
\item For every $x\in X$, $\alpha(x,k)=x$.
\end{tfae}
\end{prop}
\begin{proof}
By definition of $\mathbb{B}$-algebra, (i) $\Leftrightarrow$ (ii), while (i) $\Leftrightarrow$ (iv) follows from Theorem \ref{th:KZ}, since $\mathbb{B}$ is lax-idempotent.
(iii) $\Rightarrow$ (iv) is obvious, and so it remains to prove that, if $\alpha$ is a $\mathbb{B}$-algebra structure, then $X(x,\alpha(x,r))\geq r$, for $r\neq\bot$. But
\[X(x,\alpha(x,r))\geq r\;\Leftrightarrow\; k\leq\hom(r,X(x,\alpha(x,r))=X(\alpha(x,r),\alpha(x,r)),\]
because $\alpha(x,-)\dashv X(x,-)$ by Corollary \ref{cor:oplus}.
\end{proof}
Since we know that, if $X$ has a $\mathbb{B}$-algebra structure $\alpha$, then $\alpha(x,r)=x\oplus r$, we may state the conditions above as follows.
\begin{corollary}\label{cor:condition}
If $\xymatrix{BX\ar[r]^{-\oplus-}&X}$ is a $\mathbb{B}$-algebra structure, then, for $x\in X$, $r,s\in V\setminus\{\bot\}$:
\begin{enumerate}
\item $x\oplus k=x$;
\item $x\oplus(r\otimes s)=(x\oplus r)\oplus s$;
\item $X(x,x\oplus r)\geq r$.
\end{enumerate}
\end{corollary}
\begin{lemma}
Let $X$ and $Y$ be $V$-categories equipped with $\mathbb{B}$-algebra structures $\xymatrix{BX\ar[r]^{-\oplus-}&X}$ and $\xymatrix{BY\ar[r]^{-\oplus-}&Y}$. Then a map $f: X \rightarrow Y$ is a $V$-functor if and only if $$ f \textrm{ is monotone and } f(x) \oplus r \leq f(x \oplus r) ,$$ for all $(x,r) \in BX$.
\end{lemma}
\begin{proof}
Assume that $f$ is a $V$-functor. Then it is, in particular, monotone, and, from Theorem \ref{th:KZ} we know that $f(x)\oplus r\leq f(x\oplus r)$.
Conversely, assume that $f$ is monotone and that $f(x) \oplus r \leq f(x \oplus r),$ for all $(x,r) \in BX $.
Let $x,x' \in X$. Then $x\oplus X(x,x')\leq x'$ since $(x\oplus -)\dashv X(x,-)$ by Corollary \ref{cor:oplus}, and then
\begin{align*}
f(x)\oplus X(x,x')&\leq f(x\oplus X(x,x'))&\mbox{(by hypothesis)}\\
&\leq f(x')&\mbox{(by monotonicity of $f$).}
\end{align*}
Now, using the adjunction $ f(x)\oplus - \dashv Y(f(x),-) )$, we conclude that \[X(x,x') \leq Y(f(x),f(x')).\] \end{proof}
The following results are now immediate:
\begin{corollary}
\begin{enumerate}
\item Let $(X,\oplus), (Y,\oplus)$ be $\mathbb{B}$-algebras. Then a map $f\colon X \rightarrow Y$ is a $\mathbb{B}$-algebra morphism if and only if, for all $(x,r) \in BX$, \[f \textrm{ is monotone and } f(x \oplus r)= f(x) \oplus r.\]
\item Let $(X,\oplus), (Y,\oplus)$ be $\mathbb{B}$-algebras. Then a $V$-functor $f\colon X \rightarrow Y$ is a $\mathbb{B}$-algebra morphism if and only if, for all $(x,r) \in BX$, \[f(x \oplus r)\leq f(x) \oplus r.\]
\end{enumerate}
\end{corollary}
\begin{example}
If $X\subseteq\,[0,\infty]$, with the $V$-category structure inherited from $\hom$, then
\begin{enumerate}
\item $X$ is a $\mathbb{B}b$-algebra if, and only if, $X=[a,b]$ for some $a,b\in\,[0,\infty]$.
\item $X$ is a $\mathbb{B}$-algebra if, and only if, $X=\,]a,b]$ or $X=[a,b]$ for some $a,b\in\,[0,\infty]$.
\end{enumerate}
Let $X$ be a $\mathbb{B}b$-algebra. From Proposition \ref{prop:Balg} one has
\[(\forall x\in X)\;(\forall r\in\,[0,\infty])\;(\exists x\oplus r\in X)\;(\forall y\in X)\;\;y\ominus (x\oplus r)=(y\ominus x)\ominus r=y\ominus (x+r).\]
This implies that, if $y\in X$, then $y>x\otimes r\;\Leftrightarrow\;y>x+r$. Therefore, if $x+r\in X$, then $x\oplus r=x+r$, and, moreover, $X$ is an interval: given $x,y,z\in\,[0,\infty]$ with $x<y<z$ and $x,z\in X$, then, with $r=y-x\in\,[0,\infty]$, $x+r=y$ must belong to $X$:
\[z\ominus(x\oplus r)=z-(x+r)=z-y>0\;\Rightarrow\;z\ominus(x\oplus r)=z-(x\oplus r)=z-y\;\Leftrightarrow\; y=x\oplus r\in X.\]
In addition, $X$ must have bottom element (that is a maximum with respect to the classical order of the real half-line): for any $x\in X$ and $b=\sup X$, $x\oplus(b-x)=\sup\{z\in X\,;\,z\leq b\}=b\in X$. For $r=\infty$ and any $x\in X$, $x\oplus\infty$ must be the top element of $X$, so $X=[a,b]$ for $a,b\in\,[0,\infty]$.
Conversely, if $X=]a,b]$, for $x\in X$ and $r\in\,[0,\infty[$, define $x\oplus r=x+r$ if $x+r\in X$ and $x\oplus r=b$ elsewhere. It is easy to check that condition (ii) of Proposition \ref{prop:Balg} is satisfied for $r\neq\infty$.
Analogously, if $X=[a,b]$, for $x\in X$ and $r\in\,[0,\infty]$, we define $x\oplus r$ as before in case $r\neq\infty$ and $x\oplus\infty=a$.
\end{example}
As we will see, (co)restricting $\mathbb{B}$ to $\Cats{V}_\sep$ will allows us to obtain some interesting results. Unfortunately $X$ being separated does not entail $BX$ being so. Because of this we will need to restrict our attention to the \textit{cancellative} quantales which we define and characterize next.
\begin{definition}
A quantale $V$ is said to be \emph{cancellative} if
\begin{equation}\label{eq:canc}
\forall r,s \in V,\, r\neq \bot :\ r=s \otimes r \ \Rightarrow \ s=k.
\end{equation}
\end{definition}
\begin{remark}
We point out that this notion of cancellative quantale does not coincide with the notion of cancellable ccd quantale introduced in \cite{CH17}. On the one hand cancellative quantales are quite special, since, for instance, when $V$ is a locale, and so with $\otimes=\wedge$ is a quantale, $V$ is not cancellative since condition \eqref{eq:canc} would mean, for $r\neq\bot$, $r=s\wedge r\;\Rightarrow\;s=\top$. On the other hand, $[0,1]_\odot$, that is $[0,1]$ with the usual order and having as tensor product the \L{}ukasiewicz sum, is cancellative but not cancellable.
In addition we remark that every \emph{value quantale} \cite{KW11} is cancellative.
\end{remark}
\begin{prop}\label{prop:canc}
Let $V$ be an integral quantale. The following assertions are equivalent:
\begin{tfae}
\item $BV$ is separated;
\item $V$ is cancellative;
\item If $X$ is separated then $BX$ is separated.
\end{tfae}
\end{prop}
\begin{proof}
(i) $\Rightarrow$ (ii): Let $ r,s \in V,\, r\neq \bot $ and $ r=s \otimes r$. Note that \[ BV((k,r),(s,r))=\hm{r}{\hm{k}{s}\otimes r}=\hm{r}{s \otimes r}=\hm{r}{r}=k\] and \[BV((s,r),(k,r))=\hm{r}{\hm{s}{k}\otimes r}=\hm{r}{\hm{s}{k}\otimes s \otimes r} =\hm{s \otimes r }{ s \otimes r}=k.\] Therefore, since $BV$ is separated, $(s,r)=(k,r)$ and it follows that $s=k.$\\
(ii) $\Rightarrow $ (iii): If $(x,r)\simeq (y,s)$ in $BX$, then
\[BX((x,r),(y,s))=k \Leftrightarrow r \leq X(x,y) \otimes s, \mbox{ and }\]
\[BX((y,s),(x,r))=k \Leftrightarrow s \leq X(y,x) \otimes r.\]
Therefore $r\leq s$ and $s \leq r$, that is $r=s.$ Moreover, since $r \leq X(x,y) \otimes r \leq r$ we have that $X(x,y)=k$. Analogously, $X(y,x)=k$ and we conclude that $x=y$.\\
(iii) $\Rightarrow$ (i): Since $V$ is separated it follows immediately from (iii) that $BV$ is separated.
\end{proof}
We can now show that $\mathbb{B}$ is a submonad of $\mathbb{P}$ in the adequate setting. \emph{From now on we will be working with a cancellative and integral quantale $V$, and $\mathbb{B}$ will be the (co)restriction of the formal ball monad to $\Cats{V}s$.}
\begin{prop}
Let $V$ be a cancellative and integral quantale. Then $\mathbb{B}$ is a submonad of $\mathbb{P}$ in $\Cats{V}s$.
\end{prop}
\begin{proof}
Thanks to Proposition \ref{prop:Bbmonadmorphismff}, all that remains is to show that $\sigma_X $ is injective on objects, for any $V$-category $X$. Let $\sigma(x,r)=\sigma(y,s)$, or, equivalently, $X(-,x)\otimes r =X(-,y)\otimes s$. Then, in particular, \[r = X(x,x)\otimes r = X(x,y) \otimes s \leq s= X(y,y)\otimes s = X(y,x)\otimes r \leq r.\]
Therefore $r=s$ and $X(y,x)=X(x,y)=k$. We conclude that $(x,r)=(y,s)$.
\end{proof}
Thanks to Theorem \ref{th:ch} $\mathbb{B}$-algebras are characterized via an injectivity property with respect to special embeddings. We end this section studying in more detail these embeddings.
Since we are working in $\Cats{V}s$, a $B$-embedding $h\colon X\to Y$, being fully faithful, is injective on objects. Therefore, for simplicity, we may think of it as an inclusion. With $Bh_\sharp\colon BY\to BX$ the right adjoint and left inverse of $Bh\colon BX\to BY$, we denote $Bh_\sharp(y,r)$ by $(y_r, r_y)$.
\begin{lemma}\label{prop:h}
Let $h\colon X\to Y$ be a $B$-embedding. Then:
\begin{enumerate}
\item $(\forall y\in Y)\;(\forall x\in X)\;(\forall r\in V)\; BY((x,r),(y,r))=BY((x,r),(y_r,r_y))$;
\item $(\forall \, y \in Y) \colon k_y=Y(y_k,y)$;
\item $(\forall\, y \in Y)\;(\forall x \in X)\colon \enskip Y(x,y)= Y(x,y_k)\otimes Y(y_k,y)$.
\end{enumerate}
\end{lemma}
\begin{proof}
(1) From $Bh_\sharp\cdot Bh=1_{BX}$ and $Bh\cdot Bh_\sharp\leq 1_{BY}$ one gets, for any $(y,r)\in BY$, $(y,r)\leq (y_r,r_y)$, i.e. $BY((y,r),(y_r,r_y))=\hom(r_y,Y(y_r,y)\otimes r)=k$. Therefore, for all $x\in X$, $y\in Y$, $r\in V$,
\begin{align*}
BY((x,r),(y,r))&\leq BX((x,r),(y_r,r_y))=BY((x,r),(y_r,r_y))\\
&=BY((x,r),(y_r,r_y))\otimes BY((y_r,r_y),(y,r))\leq BY((x,r),(y,r)),
\end{align*}
that is
\[BY((x,r),(y,r))=BY((x,r),(y_r,r_y)).\]
(2) Let $y \in Y$. Then
\[Y(y_k,y)=BY((y_k,k),(y,k))=BY((y_k,k),(y_k,k_y))=k_y.\]
(3) Let $y\in Y$ and $x\in X$. Then
\[Y(x,y)=BY((x,k),(y,k))=BY((x,k),(y_k,k_y))=Y(x,y_k)\otimes k_y=Y(x,y_k)\otimes Y(y_k,y).\]
\end{proof}
\begin{prop}
Let $X$ and $Y$ be $V$-categories. A $V$-functor $h\colon X\to Y$ is a $B$-embedding if and only if $h$ is fully faithful and
\begin{equation}\label{eq:fff}
(\forall y \in Y)\;(\exists ! z\in X)\; (\forall x\in X)\;\;\; Y(x,y)=Y(x,z)\otimes Y(z,y).
\end{equation}
\end{prop}
\begin{proof}
If $h$ is a $B$-embedding, then it is fully faithful by Proposition \ref{prop:emb} and, for each $y\in Y$, $z=y_k\in X$ fulfils the required condition.
To show that such $z$ is unique, assume that $z,z'\in X$ verify the equality of condition \eqref{eq:fff}. Then
\[Y(z,y)=Y(z,z')\otimes Y(z',y)\leq Y(z',y)=Y(z',z)\otimes Y(z,y)\leq Y(z,y),\]
and therefore, because $V$ is cancellative, $Y(z',z)=k$; analogously one proves that $Y(z,z')=k$, and so $z=z'$ because $Y$ is separated.\\
To prove the converse, for each $y\in Y$ we denote by $\overline{y}$ the only $z\in X$ satisfying \eqref{eq:fff}, and define \[Bh_\sharp(y,r)=(\overline{y},Y(\overline{y},y)\otimes r).\] When $x\in X$, it is immediate that $\overline{x}=x$, and so $Bh_\sharp\cdot Bh=1_{BX}$. Using Remark \ref{rem:adjcond}, to prove that $Bh_\sharp$ is a $V$-functor and $Bh\dashv Bh_\sharp$ it is enough to show that
\[BX((x,r),Bh_\sharp(y,s))=BY(Bh(x,r),(y,s)),\]
for every $x\in X$, $y\in Y$, $r,s\in V$. By definition of $Bh_\sharp$ this means
\[BX((x,r),(\overline{y},Y(\overline{y},y)\otimes s))=BY((x,r),(y,s)),\]
that is,
\[\hom(r,Y(x,\overline{y})\otimes Y(\overline{y},y)\otimes s)=\hom(r,Y(x,y)\otimes s),\]
which follows directly from \eqref{eq:fff}.
\end{proof}
\begin{corollary}
In $\Met$, if $X\subseteq [0,\infty]$, then its inclusion $h\colon X\to[0,\infty]$ is a $B$-embedding if, and only if, $X$ is a closed interval.
\end{corollary}
\begin{proof}
If $X=[x_0,x_1]$, with $x_0,x_1\in\,[0,\infty]$, $x_0\leq x_1$, then it is easy to check that, defining $\overline{y}=x_0$ if $y\leq x_0$, $\overline{y}=y$ if $y\in X$, and $\overline{y}=x_1$ if $y\geq x_1$, for every $y\in\,[0,\infty]$, condition \eqref{eq:fff} is fulfilled.\\
We divide the proof of the converse in two cases:
(1) If $X$ is not an interval, i.e. if there exists $x,x'\in X$, $y\in [0,\infty]\setminus X$ with $x<y<x'$, then either $\overline{y}<y$, and then
\[0=y\ominus x'\neq (y\ominus x')+(y\ominus\overline{y})=y-\overline{y},\]
or $\overline{y}>y$, and then
\[y-x=y\ominus x\neq (\overline{y}\ominus x)+(y\ominus\overline{y})=\overline{y}-x.\]\\
(2) If $X=[x_0,x_1[$ and $y> x_1$, then there exists $x\in X$ with $\overline{y}<x<y$, and so
\[y-x=y\ominus x\neq (\overline{y}\ominus x)+(y\ominus\overline{y})=y-\overline{y}.\]
An analogous argument works for $X=]x_0,x_1]$.
\end{proof}
\section{On algebras for submonads of $\mathbb{P}$ and their morphisms}
In the following $\mathbb{T}=(T,\mu,\eta)$ is a submonad of the presheaf monad $ \mathbb{P}=(P,\mathpzc{m},\mathpzc{y})$ in $\Cats{V}s$ For simplicity we will assume that the injective and fully faithful components of the monad morphism $\sigma:T \rightarrow P$ are inclusions. Theorem \ref{th:KZ} gives immediately that:
\begin{prop}
Let $(X,a)$ be a $V$-category and $\alpha: TX \rightarrow X$ be a $V$-functor. The following are equivalent: \begin{enumerate}
\item $(X,\alpha)$ is a $\mathbb{T}$-algebra;
\item $\forall\, x \in X:$ $ \alpha (x^*)=x $.
\end{enumerate}
\end{prop}
We would like to identify the $\mathbb{T}$-algebras directly, as we did for $\mathbb{B}b$ or $\mathbb{B}$ in Proposition \ref{prop:Balg}. First of all, we point out that a $\mathbb{T}$-algebra structure $\alpha\colon TX\to X$ must satisfy, for every $\varphi\in TX$ and $x\in X$,
\[X(\alpha(\varphi), x)=TX(\varphi,x^*),\]
and so, in particular,
\[\alpha(\varphi)\leq x \;\Leftrightarrow\;\varphi\leq x^*;\]
hence $\alpha$ must assign to each $\varphi\in TX$ an $x_\varphi\in X$ so that
\[x_\varphi=\min\{x\in X\,;\,\varphi\leq x^*\}.\] Moreover, for such map $\alpha\colon TX\to X$, $\alpha$ is a $V$-functor if, and only if,
\begin{align*}
&\;(\forall \varphi,\rho\in TX)\;\;TX(\varphi,\rho)\leq X(x_\varphi,x_\rho)=TX(X(-,x_\varphi),X(-,x_\rho))\\
\Leftrightarrow&\;(\forall \varphi,\rho\in TX)\;\;TX(\varphi,\rho)\leq \bigwedge_{x\in X}\hom(X(x,x_\varphi),X(x,x_\rho))\\
\Leftrightarrow&\;(\forall x\in X)\;(\forall \varphi,\rho\in TX)\;\;X(x,x_\varphi)\otimes TX(\varphi,\rho)\leq X(x,x_\rho).
\end{align*}
\begin{prop}
A $V$-category $X$ is a $\mathbb{T}$-algebra if, and only if:
\begin{enumerate}
\item for all $\varphi\in TX$ there exists $\min\{x\in X\,;\,\varphi\leq x^*\}$;
\item for all $\varphi, \rho\in TX$ and for all $x\in X$, $X(x,x_\varphi)\otimes TX(\varphi,\rho)\leq X(x,x_\rho)$.
\end{enumerate}
\end{prop}
We remark that condition (2) can be equivalently stated as:
\begin{enumerate}
\item[\emph{(2')}] for each $\rho\in TX$, the distributor $\rho_1=\displaystyle\bigvee_{\varphi\in TX} X(-,x_\varphi)\otimes TX(\varphi,\rho)$ satisfies $x_{\rho_1}=x_\rho$,
\end{enumerate}
which is the condition corresponding to condition (2) of Corollary \ref{cor:condition}.\\
Finally, as for the formal ball monad, Theorem \ref{th:KZ} gives the following characterisation of $\mathbb{T}$-algebra morphisms.
\begin{corollary}
Let $(X,\alpha), (Y,\beta)$ be $\mathbb{T}$-algebras. Then a $V$-functor $f: X \rightarrow Y$ is a $\mathbb{T}$-algebra morphism if and only if \[(\forall \varphi \in TX)\;\;\beta(\varphi \cdot f^*) \geq f(\alpha(\varphi)).\]
\end{corollary}
\begin{example}
\textbf{The Lawvere monad.} Among the examples presented in \cite{CH08} there is a special submonad of $\mathbb{P}$ which is inspired by the crucial remark of Lawvere in \cite{Law73} that Cauchy completeness for metric spaces is a kind of cocompleteness for $V$-categories. Indeed, the submonad $\mathbb{L}$ of $\mathbb{P}$ induced by
\[\Phi=\{\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} Y\,;\,\varphi\mbox{ is a right adjoint $V$-distributor}\}\]
has as $\mathbb{L}$-algebras the \emph{Lawvere complete $V$-categories}. These were studied also in \cite{CH09}, and in \cite{HT10} under the name $L$-complete $V$-categories. When $V=[0,\infty]_+$, using the usual order in $[0,\infty]$, for distributors $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$, $\psi\colon E{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} X$ to be adjoint
\[\xymatrix@=8ex{X\ar@{}[r]|{\top}\ar@<1mm>@/^2mm/[r]|\circ^{{\varphi}} & \ar@<1mm>@/^2mm/[l]|\circ^{{\psi}}E}\]means that
\begin{align*}
(\forall x,x'\in X)\;\;&X(x,x')\leq \varphi(x)+\psi(x'),\\
&0\geq \inf_{x\in X} (\psi(x)+\varphi(x)).
\end{align*}
This means in particular that
\[(\forall n\in\mathbb{N})\;(\exists x_n\in X)\;\;\psi(x_n)+\varphi(x_n)\leq\frac{1}{n},\]
and, moreover,
\[X(x_n,x_m)\leq\varphi(x_n)+\psi(x_m)\leq \frac{1}{n}+\frac{1}{m}.\]
This defines a \emph{Cauchy sequence} $(x_n)_n$, so that
\[(\forall\varepsilon>0)\;(\exists p\in\mathbb{N})\;(\forall n,m\in\mathbb{N})\;n\geq p\;\wedge\;m\geq p\;\Rightarrow\;\;X(x_n,x_m)+X(x_m,x_n)<\varepsilon.\]
Hence, any such pair induces a (equivalence class of) Cauchy sequence(s) $(x_n)_n$, and a representative for
\[\xymatrix{X\ar[r]|-{\circ}^{(1_X)_*}\ar[d]|-{\circ}_{\varphi}&X\\
E\ar@{}[ru]^{\leq}\ar[ru]|-{\circ}_{[\varphi,(1_X)_*]}}\] is nothing but a limit point for $(x_n)_n$. Conversely, it is easily checked that every Cauchy sequence $(x_n)_n$ in $X$
gives rise to a pair of adjoint distributors
\[\varphi=\lim_n\,X(-,x_n)\mbox{ and }\psi=\lim_n\,X(x_n,-).\]
We point out that the $\mathbb{L}$-embeddings, i.e. the fully faithful and fully dense $V$-functors $f\colon X\to Y$ do not coincide with the $\mathbb{L}$-dense ones (so that $f_*$ is a right adjoint). For instance, assuming for simplicity that $V$ is integral, a $V$-functor $y\colon E\to X$ ($y\in X$) is fully dense if and only if $y\simeq x$ for all $x\in X$, while it is an $\mathbb{L}$-embedding if and only if $y\leq x$ for all $x\in X$. Indeed, $y\colon E\to X$ is $\mathbb{L}$-dense if, and only if,
\begin{enumerate}
\item[--] there is a distributor $\varphi\colon X{\longrightarrow\hspace*{-3.1ex}{\circ}\hspace*{1.5ex}} E$, i.e.
\begin{equation}\label{eq:distr}
(\forall x,x'\in X)\;\;X(x,x')\otimes\varphi(x')\leq\varphi(x),
\end{equation}
such that
\item[--] $k\geq \varphi\cdot y_*$ , which is trivially true, and $a\leq y_*\cdot\varphi$, i.e.
\begin{equation}\label{eq:adjoint}
(\forall x,x'\in X)\;\;X(x,x')\leq \varphi(x)\otimes X(y,x').
\end{equation}
\end{enumerate}
Since \eqref{eq:distr} follows from \eqref{eq:adjoint},
\[y\mbox{ is $\mathbb{L}$-dense }\;\Leftrightarrow\;\;(\forall x,x'\in X)\;\;X(x,x')\leq \varphi(x)\otimes X(y,x').\]
In particular, when $x=x'$, this gives $k\leq \varphi(x)\otimes X(y,x)$, and so we can conclude that, for all $x\in X$, $y\leq x$ and $\varphi(x)=k$. The converse is also true; that is
\[y\mbox{ is $\mathbb{L}$-dense }\;\Leftrightarrow\;\;(\forall x\in X)\;\;y\leq x.\]
Still, it was shown in \cite{HT10} that injectivity with respect to fully dense and fully faithful $V$-functors (called $L$-dense in \cite{HT10}) characterizes also the $\mathbb{L}$-algebras.
\end{example}
\end{document} |
\begin{document}
\title{Effects of thermal motion on electromagnetically induced absorption}
\author{E. Tilchin}
\affiliation{Department of Chemistry, Bar-Ilan University, Ramat Gan 52900, Israel}
\author{O. Firstenberg}
\affiliation{Department of Physics, Technion-Israel Institute of Technology, Haifa 32000, Israel}
\author{A. D. Wilson-Gordon}
\affiliation{Department of Chemistry, Bar-Ilan University, Ramat Gan 52900, Israel}
\pacs{42.50.Gy, 32.70.Jz}
\begin{abstract}
We describe the effect of thermal motion and buffer-gas collisions on a
four-level closed $N$ system interacting with strong pump(s) and a weak probe.
This is the simplest system that experiences electromagnetically induced
absorption (EIA) due to transfer of coherence via spontaneous emission from
the excited to ground state. We investigate the influence of Doppler
broadening, velocity-changing collisions (VCC), and phase-changing collisions
(PCC) with a buffer gas on the EIA spectrum of optically active atoms. In
addition to exact expressions, we present an approximate solution for the
probe absorption spectrum, which provides physical insight into the behavior
of the EIA peak due to VCC, PCC, and wave-vector difference between the pump
and probe beams. VCC are shown to produce a wide pedestal at the base of the
EIA peak, which is scarcely affected by the pump-probe angular deviation,
whereas the sharp central EIA peak becomes weaker and broader due to the
residual Doppler-Dicke effect. Using diffusion-like equations for the atomic
coherences and populations, we construct a spatial-frequency filter for a
spatially structured probe beam and show that Ramsey narrowing of the EIA peak
is obtained for beams of finite width.
\end{abstract}
\maketitle
\section{\label{sec:intro}Introduction}
The absorption spectrum of a weak probe, interacting with a pumped
nearly-degenerate two-level transition, can exhibit either a sharp subnatural
dip or peak at line center \cite{Khitrova1988JOSAB}, depending on the
degeneracy of the levels, the polarizations of the fields, and the absence or
presence of a weak magnetic field. The phenomenon is termed
electromagnetically-induced transparency (EIT)
\cite{HarrisToday,Fleischhauer2005RMP} when there is a dip in the probe
spectrum and electromagnetically induced absorption (EIA)
\cite{Akulshin1998PRA} when there is a peak.
In the case of orthogonal polarizations of the pump and probe, both EIT and
EIA are related to the ground-level Zeeman coherence, which is induced by the
simultaneous action of both fields. The simplest model system that exhibits
EIT is the three-level $\Lambda$ system, where the two lower states $g_{1,2}$
are Zeeman sublevels of the ground hyperfine level $F_{g}$. In a $\Lambda$
system, quantum coherence can lead to the destructive interference between the
two possible paths of excitation. As a result, if the pump field is tuned to
resonance, the narrow dip in the probe absorption spectrum at the two-photon
resonance can be interpreted as EIT caused by a coherent population trapping
\cite{ArimondoCPTRev2} in the lower levels. The simplest system that exhibits
EIA is the four-level $N$ system \cite{Taichenachev1999PRA,GorenN2004PRA}
(Fig. \ref{Fig. 1}, top), consisting of states $g_{1,2}$ and $e_{1,2}$ which
are Zeeman sublevels of the ground ($F_{g}$) and excited ($F_{e}$) hyperfine
levels, where the $g_{i}\leftrightarrow e_{i}$, $i=1,2$, transitions interact
with non-saturating pump(s), and the $g_{2}\leftrightarrow e_{1}$ transition
interacts with a weak probe. The $N$ system gives similar results to those
obtained for a closed alkali-metal $F_{g}\rightarrow F_{e}=F_{g}+1$ transition
interacting with a $\sigma_{\pm}$ polarized pump, and a weak $\pi$ polarized
probe \cite{GorenN2004PRA,Zigdon2007SPIE}. It has been shown
\cite{Taichenachev1999PRA,GorenTOP2003PRA,GorenN2004PRA}, that the EIA peak is
due to transfer of coherence (TOC) from the excited state to the ground state,
via spontaneous emission. The excited-state coherence only exists in systems
where the coherent population trapping is incomplete so that there is some
population in the excited state \cite{GorenTOP2003PRA,Meshulam2007OL}. The
transfer of this coherence to the ground state leads to a peak in the
contribution of the ground-state two-photon coherence to the probe absorption
at line center, instead of the dip that occurs in its absence (for example, in
a $\Lambda$ system or a non-degenerate $N$ system) \cite{Zigdon2008PRA}.
\begin{figure}
\caption{Top: The $N-$configuration atom. The light-induced transitions are
marked by solid (pump field) and dashed (probe field) lines, and the wavy
arrows are spontaneous decay paths. The thick arrow illustrates the
spontaneous transfer of coherence (TOC). Bottom: probe and pump beam(s), of
possibly a finite size, propagating through the vapor cell. The optical axis
is parallel to $\hat{z}
\label{Fig. 1}
\end{figure}
In this paper, we investigate the effect of the thermal motion of the
alkali-metal gas on the EIA spectrum, in the presence of a buffer gas. In a
previous paper \cite{GorenTOP2004PRA}, we discussed the effect of
phase-changing collisions (PCC) with the buffer gas on an $N$ system and
showed that they lead to considerable narrowing of the EIA peak in both the
presence and absence of Doppler broadening. These collisions increase the
transverse decay rate of the optical transitions, resulting in the so-called
pressure broadening of the optical spectral line, and are thus easily
incorporated in the Bloch equations. However, in order to describe the overall
effect of buffer-gas collisions, it is necessary to include both
velocity-changing collisions (VCC) as well as PCC
\cite{Singh1988JOSAB,May1999PRA}, which is a much greater challenge. Due to
the complexity of the problem, we limit our discussion to a four-level $N$
system, and to buffer-gas pressures that are sufficiently low so that
collisional decoherence of the excited state \cite{Failache2003PRA} can be neglected.
The Doppler effect occurs in the limit of \emph{ballistic} atomic motion, when
the mean free-path between VCC is much larger than the radiation wavelength.
Due to their narrow spectral response, Raman processes such as EIT and EIA are
much more sensitive to the ``residual" Doppler effect, arising when there is a
difference between the wavevectors of the Raman fields. In many cases however,
the Raman wavelength can become much larger than the typical free-path between
collisions. For example, an angular deviation of a milliradian between the two
optical beams yields a superposition pattern with a wavelength in the order of
a millimeter. In this limit, the atoms effectively perform a \emph{diffusion}
motion through the spatial oscillations of the superposition field, leading to
the Dicke narrowing of the residual Doppler width. While the residual Doppler
broadening is linearly proportional to the Raman wavevector, Dicke narrowing
shows a quadratic dependence. This behavior was demonstrated in EIT with
non-collimated pump and probe \cite{Weitz2005PRA,Shuker2007PRA}.
Recently, a model describing thermal motion and collisions for EIT was
presented \cite{Firstenberg2007PRA,Shuker2007PRA,Firstenberg2008PRA},
utilizing the density matrix distribution in space and velocity with a
Boltzmann relaxation formalism. The model describes a range of motional
phenomena, including Dicke narrowing, and diffusion in the presence of
electromagnetic fields and during storage of light. This diffusion model was
used to describe a spatial frequency filter for a spatially structured probe
\cite{Firstenberg2008PRA} and also Ramsey narrowing
\cite{Xiao2006PRL,Xiao2008OE}. Here, we utilize a similar formalism to
estimate the influence of the atomic thermal motion in a buffer-gas
environment, including VCC and PCC, on the spectral shape of EIA in a
four-level \textit{N} system, with collimated or non-collimated light beams.
In Sec. \ref{sec:DD}, the Doppler broadening and Dicke narrowing effects are
studied for plane-wave fields. As the full mathematical treatment is lengthy,
it appears in Appendix A. However, an approximate equation which describes the
main features of the spectra is presented in Sec. \ref{sec:DD}. Diffusion-like
equations for the ground and excited state coherences and populations are
derived in Appendix B. Two main phenomena are described using this model: (i)
a spatial-frequency filter for structured probe fields which is presented in
Sec. \ref{sec:filter}, and (ii) atomic diffusion through a finite-sized beam
resulting in Ramsey narrowing of the EIA peak, which is discussed in Sec.
\ref{sec:Ramseynarr}. Finally, conclusions are drawn in Sec. \ref{sec:conc}.
\section{\label{sec:DD}The Doppler-Dicke line shapes of EIA}
Consider the near-resonant interaction of a four-state atom in an \textit{N}
configuration, depicted in Fig. \ref{Fig. 1}. The two lower states $g_{1}$ and
$g_{2}$ are degenerate and belong to the ground level with zero energy, and
the excited states $e_{1}$ and $e_{2}$ are degenerate with energy $\hbar
\omega_{0}$. The light field consists of three beams, each with a carrier
frequency $\omega_{j}$ and wavevector $\mathbf{q}_{j},$ where $j=1,2$ denotes
the two strong pump beams, and $j=p$ the weak probe,
\begin{equation}
\mathbf{\breve{E}}(\mathbf{r},t)=\sum_{j=1,2,p}\mathbf{E}_{j}\left(
\mathbf{r},t\right) e^{-i\omega_{j}t+i\mathbf{q}_{j}\cdot\mathbf{r}
}+\text{c.c.} \label{Eq. 1}
\end{equation}
Here, $\mathbf{E}_{j}\left( \mathbf{r},t\right) $ are the slowly varying
envelopes in space and time. The pumps drive the $g_{1}\leftrightarrow e_{1}$
and $g_{2}\leftrightarrow e_{2}$ transitions, and the probe is coupled to the
$g_{2}\leftrightarrow e_{1}$ transition.
\begin{figure}
\caption{(a) The probe absorption, calculated from the exact solution for the
density matrix (blue line) and from the approximate solution Eq. (\ref{Eq. 2}
\label{Fig. 2}
\end{figure}
Our model will incorporate four relaxation rates: $\Gamma,$ the spontaneous
emission rate from the each of the excited states to all the ground states;
$\Gamma_{\text{pcc}},$ the pressure broadening of the optical transitions
resulting from PCC; $\gamma_{\text{vcc}}$, the velocity autocorrelation
relaxation rate ($1/\gamma_{\text{vcc}}$ is the time it takes the velocity
vector to vary substantially) \cite{Sobelman1967SPU}, which is proportional to
the rate of VCC; and $\gamma$ is the homogenous decoherence rate within the
ground and excited state manifolds due, for example, to spin-exchange and
spin-destruction collisions \footnote{The fact that the `inner' decoherence
rate $\gamma$ is shared by both the ground and the excited manifolds, does not
imply that their total decoherence rate is the same; the coherence between the
two excited states decays via the $e\rightarrow g$ relaxation channels and
therefore decays much faster than the ground-state coherence. Incorporating
different values of $\gamma$ for the ground and excited states does not lead
to substantial changes in the collision-induced phenomena explored here.}. In
the model, the transition $g_{1}\leftrightarrow e_{2}$ is forbidden (due to
some selection rule such as angular momentum).
To focus the discussion, we assume that all three beams are continuous waves,
namely $\mathbf{E}_{j}(\mathbf{r},t)=\mathbf{E}_{j}(\mathbf{r})$. We then
obtain stationary Rabi frequencies, given by $V_{j}=V_{j}\left(
\mathbf{r}\right) =\mu_{j}\mathbf{E}_{j}(\mathbf{r})/\hbar$, where $\mu_{j}$
is the transition dipole moment. The complete set of Bloch equations for the
four-level \textit{N} system consists of sixteen equations
\cite{GorenN2004PRA}. In order to simplify the application of the theory to
EIA, we assume that $V_{p}\ll V_{1,2}<\Gamma$ and that the pump transitions
are well below saturation, so that in the absence of the probe, the population
concentrates in the $g_{2}$ state, the $g_{2} \leftrightarrow e_{2}$ dipole is
excited, and the $e_{2}$ state is empty up to second order in the pump field
\cite{GorenN2004PRA}. The equations can then be written up to the first order
in the probe field $V_{p}$ \cite{Taichenachev1999PRA}, which reduces the
number of Bloch equations to five.
The complete analytical development is presented in Appendix A, and an example
of the calculated probe absorption spectrum for collinear and degenerate beams
($\mathbf{q}_{1}=\mathbf{q}_{2}=\mathbf{q}$) is given in Fig. \ref{Fig. 2}(a)
(blue line). For the numerical calculations, we have considered the $D_{2}$
line of $^{85}$Rb (wavelength 780 nm) at room temperature, with a total
spontaneous emission rate $\Gamma=2\pi\times6$ MHz \cite{Lezama1999PRA}. Other
parameters are indicated in the figure caption and described in what follows.
Four complex frequencies control the EIA dynamics, each relating to a
different coherence in the process:
\begin{subequations}
\label{Eq. 3}
\begin{align}
\xi_{1} & =\left( \Delta_{p}-\Delta_{1}\right) -(\mathbf{q}_{p}
-\mathbf{q}_{1})\cdot\mathbf{v+}i(\gamma+\gamma_{\text{vcc}}),\label{Eq. 3a}\\
\xi_{2} & =\Delta_{p}-\mathbf{q}_{p}\cdot\mathbf{v+}i(\tilde{\Gamma}
+\gamma_{\text{vcc}}),\label{Eq. 3b}\\
\xi_{3} & =\left( \Delta_{p}-\Delta_{2}\right) -(\mathbf{q}_{p}
-\mathbf{q}_{2})\cdot\mathbf{v+}i(\Gamma+\gamma+\gamma_{\text{vcc}
}),\label{Eq. 3c}\\
\xi_{4} & =\left( \Delta_{p}-\Delta_{1}-\Delta_{2}\right) -(\mathbf{q}
_{p}-\mathbf{q}_{1}-\mathbf{q}_{2})\cdot\mathbf{v+}i(\tilde{\Gamma}
+\gamma_{\text{vcc}}), \label{Eq. 3d}
\end{align}
with the one-photon detunings $\Delta_{j}=\omega_{j}-\omega_{e_{j}g_{j}}$
($j=1,2$) and $\Delta_{p}=\omega_{p}-\omega_{e_{1}g_{2}}$, and $\tilde{\Gamma
}=\Gamma/2+\Gamma_{\text{pcc}}+\gamma$. The frequency $\xi_{2}$ is related to
the probe transition and includes the one-photon Doppler shift $\mathbf{q}
_{p}\cdot\mathbf{v}$. $\xi_{1}$ and $\xi_{3}$ relate to the slowly varying
ground and excited state coherences and include the residual Doppler shift
$(\mathbf{q}_{p}-\mathbf{q}_{i})\cdot\mathbf{v}$ and the Raman (two-photon)
detuning. $\xi_{4}$ relates to the three-photon transition (whose direct
optical-dipole is forbidden), required for the EIA process. Note that the fast
optical decay rates ($\Gamma$ or $\tilde{\Gamma}$) is absent only from
$\xi_{1}$.
In EIA, in contrast to EIT, a strong optical-dipole transition ($g_{2}
\leftrightarrow e_{2}$) is excited even in the absence of the probe. Its
excitation depends on its resonance with the pump field, and is thus affected
by Doppler broadening. This leads to velocity-dependent equations even in
zero-order in the probe field, and introduces the additional complex frequency
\end{subequations}
\begin{equation}
\xi_{5}=-\Delta_{2}+\mathbf{q}_{2}\cdot\mathbf{v+}i(\tilde{\Gamma}
+\gamma_{\text{vcc}}),
\end{equation}
with the one-photon Doppler shift $\mathbf{q}_{2}\cdot\mathbf{v}$. The
overall dynamics is thus governed by the five equations (\ref{Eq. A11a}
)-(\ref{Eq. A11e}).
We start by calculating the probe absorption spectrum for uniform pump and
probe fields (plane waves) by solving the equations analytically. The spectrum
depends on $18$ different integrals over velocity, of the form
\begin{equation}
G_{i}=\int d^{3}v\frac{\xi_{\alpha}\cdots\xi_{\beta}}{\xi_{5}\xi_{d}
}F(\mathbf{v}), \label{Gs}
\end{equation}
where $F(\mathbf{v})=\left( 2\pi v_{\text{th}}^{2}\right) ^{-3/2}
e^{-\mathbf{v}^{2}/2v_{\text{th}}^{2}}$ is the Boltzmann velocity
distribution, and $v_{\text{th}}^{2}=k_{b}T/m$ is the mean thermal velocity.
The determinant $\xi_{d}$,
\begin{equation}
\xi_{d}=\xi_{1}\xi_{2}\xi_{3}\xi_{4}-\xi_{3}(\xi_{2}V_{2}^{2}+\xi_{4}V_{1}
^{2})+iV_{1}V_{2}bA\Gamma\left( \xi_{2}+\xi_{4}\right) , \label{xie_d}
\end{equation}
introduces the power broadening effect (first and second terms), \emph{i.e.}
the dependence of the Raman spectral width on the pump powers, and the
spontaneous TOC from the excited state to the ground state (last term). The
last term is associated with the TOC due to its dependence on the parameter
$b$, which sets the amount of TOC in the original dynamic equations
(\ref{Eq. A1}), and can take either the value 0 (no TOC) or 1
\cite{Taichenachev1999PRA}. The spontaneous decay branching ratio is given by
$A^{2}=\mu_{e_{1}g_{1}}^{2}/\left( \mu_{e_{1}g_{1}}^{2}+\mu_{e_{1}g_{2}}
^{2}\right) $ \cite{GorenN2004PRA}. The TOC term in Eq. (\ref{xie_d}) depends
on the complex frequency
\begin{align}
\xi_{2}+\xi_{4} & =\left( 2\Delta_{p}-\Delta_{1}-\Delta_{2}\right)
-(2\mathbf{q}_{p}-\mathbf{q}_{1}-\mathbf{q}_{2})\cdot\mathbf{v}\nonumber\\
& \mathbf{+}2i(\Gamma/2+\Gamma_{\text{pcc}}+\gamma+\gamma_{\text{vcc}}).
\label{Xi2Xi4}
\end{align}
It is important to note that, although each of the individual frequencies
$\xi_{2}$ and $\xi_{4}$ is affected by a Doppler shift (either one- or
three-photon), the sum $\xi_{2}+\xi_{4}$ exhibits \emph{only a residual
Doppler shift} (assuming nearly collinear pumps, $\mathbf{q}_{1}
\approx\mathbf{q}_{2}$). Nevertheless the relaxation rate $(\Gamma
/2+\Gamma_{\text{pcc}}+\gamma+\gamma_{\text{vcc}})$ is the same as that
characterizing the decay of the optical transitions. As a consequence, even
when $\Gamma_{\text{pcc}}$ is much smaller than the optical Doppler width, it
plays a significant role in determining the intensity of the EIA spectrum.
This is in contrast to one- and two-photon processes (such as EIT), in which
$\Gamma_{\text{pcc}}$ is irrelevant when it is much smaller than the Doppler
width. It can also be seen that when $\mathbf{q}_{1}\approx\mathbf{q}_{2},$
the various residual Doppler shifts are negligible compared to the relaxation
rates in the determinant $\xi_{d},$ so that $\xi_{d}$ is only weakly dependent
on these shifts.
Examining the absorption spectrum in Fig. \ref{Fig. 2}(a), we observe the
narrow absorption peak on top of the broad one-photon curve. Moreover, as can
be seen in the inset, the EIA resonance consists of two independent features:
a ``pedestal" at the base and a sharp absorption peak at the center. In order
to obtain physical insight into these features, we have derived an approximate
solution for the probe absorption which incorporates the main contributions to
the EIA, namely the underlying EIT mechanism plus the spontaneous TOC. The
approximate Fourier transform of the nondiagonal density-matrix element for
the probe is
\begin{equation}
R_{e_{1}g_{2}}=n_{0}\left[ -G_{4}+V_{2}^{2}G_{5}+iV_{1}V_{2}bA\Gamma
\frac{iG_{2}G_{3}\gamma_{\text{vcc}}}{1-iG_{1}\gamma_{\text{vcc}}}\right]
V_{p}, \label{Eq. 2}
\end{equation}
where $G_{1}=\int d^{3}v\frac{\xi_{2}\xi_{3}\xi_{4}F(\mathbf{v})}{\xi_{d}}$,
$G_{2}=\int d^{3}v\frac{\xi_{3}\xi_{4}F(\mathbf{v})}{\xi_{d}}$, $G_{3}=\int
d^{3}v\frac{\xi_{2}\xi_{4}F(\mathbf{v})}{\xi_{5}\xi_{d}}$, $G_{4}=\int
d^{3}v\frac{\xi_{1}\xi_{3}\xi_{4}F(\mathbf{v})}{\xi_{d}}$, $G_{5}=\int
d^{3}v\frac{\xi_{3}F(\mathbf{v})}{\xi_{d}}$, and $n_{0}$ is the number density
of the active atoms. It can be shown that Eq. (\ref{Eq. 2}) is valid provided
$\gamma_{\text{vcc}}\ll\Gamma_{\text{pcc}}+\Gamma/2.$ For an atom at rest and
in the absence of collisions, so that $\gamma_{\text{vcc}}=0,$ $v_{\text{th}
}\rightarrow0,$ and $\Gamma_{\text{pcc}}=0,$ Eq. (\ref{Eq. 2}) is identical to
the expression obtained by Taichenachev \textit{et al}.
\cite{Taichenachev1999PRA} (with $b=1$),
\begin{equation}
R_{e_{1}g_{2}}^{\text{rest}}=\frac{in_{0}V_{p}}{\Gamma/2-i\Delta_{p}}\left[
1+\frac{2A\left\vert V_{1}\right\vert ^{2}/\Gamma}{2\left( 1-A^{2}\right)
\left\vert V_{2}\right\vert ^{2}/\Gamma-i\Delta_{p}}\right] . \label{Eq. 2a}
\end{equation}
The first term in the square brackets in Eqs. (\ref{Eq. 2}) and (\ref{Eq. 2a})
describes the one-photon (background) absorption, and the other terms are the
EIA peak.
For a moving atom, the spectrum resulting from Eq. (\ref{Eq. 2}) is shown in
Fig. \ref{Fig. 2}(a) (red dashed line) and is compared with the exact
solution; evidently, there is a good agreement between the spectra. Despite
the small discrepancy in the intensity of the sharp peak, the approximate
solution preserves the main features in the resonance. When plotted separately
in Fig. \ref{Fig. 2}(b), the three terms in Eq. (\ref{Eq. 2}) can be
identified with the different spectral features: $-G_{4}$ (black dashed line)
describes the background absorption; $V_{2}^{2}G_{5}$ (brown dotted line),
which constitutes the total peak in the absence of VCC, describes the wide
pedestal; and $iG_{2}G_{3}\gamma_{\text{vcc}}/(1-G_{1}\gamma_{\text{vcc}})$
(green dashed-dotted line) describes the sharp EIA peak, induced by VCC.
\begin{figure}
\caption{The EIA\ peak for different $\gamma_{\text{vcc}
\label{Fig. 3}
\end{figure}
Fig. \ref{Fig. 3} shows the effect of varying the VCC rate, for a fixed PCC
rate ($\Gamma_{\text{pcc}}=5\Gamma$) and zero pump-probe angular deviation.
The width of the pedestal feature depends on the VCC rate and is given by
$\gamma_{\text{vcc}}+\gamma,$ while the width of the narrow peak shows only a
very weak dependence on $\gamma_{\text{vcc}}$. Increasing the VCC rate leads
to a decrease in the overall EIA\ intensity, but to an increase in the ratio
between the amplitude of the narrow peak and the pedestal baseline.
We now turn to explore the residual (two-photon and four-photon) Doppler and
Dicke effects due to wave-vector mismatch between the pump fields and the
probe, introduced in principle either by a frequency detuning between the
fields, $|\mathbf{q}_{p}|\neq|\mathbf{q}_{1,2}|,$ or due to an angular
deviation between them, $\mathbf{q}_{p}\nparallel\mathbf{q}_{1,2}$. We mainly
focus on the latter, which may be found in a nearly degenerate level scheme,
and we further take the two pump fields to be the same, namely $\mathbf{q}
_{1}=\mathbf{q}_{2}$. Figure \ref{Fig. 4} presents the probe absorption
spectrum for different values of the wave-vector difference, $\delta
\mathbf{q=}$ $\mathbf{q}_{p}-\mathbf{q}_{1,2},$ when $\gamma_{\text{vcc}
}=0.1\Gamma$ and $\Gamma_{\text{pcc}}=\Gamma$. As can be seen, increasing
$\delta\mathbf{q}$ broadens the EIA spectrum (see inset). This is analogous to
the broadening of an EIT transmission peak in a similar configuration
\cite{Shuker2007PRA}. However, the wide collisionally-broadened pedestal
remains unaffected by the changes in $\delta\mathbf{q}$, indicating that it
mostly originates from homogenous decay processes. Figure \ref{Fig. 5}(a)
summarizes the full-width at half-maximum (FWHM) of the EIA peak for
$\Gamma_{\text{pcc}}=\Gamma$ and for various values of $\gamma_{\text{vcc}}$,
as a function of $\delta\mathbf{q}$. Because of the difficulty of separating
the sharp peak from the background in the calculated spectra \footnote{When
$\gamma_{\text{vcc}}\rightarrow0$, the third term in Eq. (\ref{Eq. 2})
gradually vanishes, and the second term ($V_{2}^{2}G_{5}$) is responsible for
the EIA peak, as indicated by the brown-dotted line in Fig. \ref{Fig. 2}(b).
Its width is limited by homogenous broadening mechanisms and determined by
$\gamma$ and $\Gamma_{\text{pcc}}$.}, the widths of the sharp EIA peak were
obtained only from the third term in Eq. (\ref{Eq. 2}). In contrast to an EIT
peak, which does not depend on $\gamma_{\text{vcc}}$ when $\delta\mathbf{q=0}$
(collinear degenerate beams) \cite{Firstenberg2008PRA}, the FWHM of the EIA
peak at $\delta\mathbf{q=0}$ depends weakly on the VCC rate (although barely
noticeable in the figure). This difference derives from the effect of
collisions on the pump absorption in the case of EIA, as described earlier.
\begin{figure}
\caption{Calculated probe absorption spectra with $\gamma_{\text{vcc}
\label{Fig. 4}
\end{figure}
For $\delta\mathbf{q\neq0}$ the FWHM of the peak in the Dicke limit (high
$\gamma_{\text{vcc}}$) depends on $\gamma_{\text{vcc}}$ and is proportional to
the residual Doppler-Dicke width, $2v_{\text{th}}\delta\mathbf{q}^{2}
/\gamma_{\text{vcc}}$. In this limit, the results are well approximated by the
analytic expression \cite{Firstenberg2008PRA} [dotted lines in Fig.
\ref{Fig. 5}(a)]:
\begin{equation}
\text{FWHM}=2\times\frac{2}{a^{2}}\gamma_{\text{vcc}}H\left( a\frac
{v_{\text{th}}\delta q}{\gamma_{\text{vcc}}}\right) , \label{Eq. 5}
\end{equation}
where $H\left( x\right) =e^{-x}-1+x$ and $a^{2}=2/\ln2$. Increasing the
pump-probe angular deviation reduces the efficiency of the EIA process and
thus results in a decrease in the probe absorption [Fig. \ref{Fig. 5}(b)].
This is of course the opposite trend to that of EIT (blue stars), where the
depth of the dip decreases (the absorption increases) with increasing $\delta
q$ \cite{Firstenberg2008PRA}.
\begin{figure}
\caption{Calculated EIA FWHM (a) and absorption (b) for $\Gamma_{\text{pcc}
\label{Fig. 5}
\end{figure}
\section{\label{sec:filter}Spatial-frequency filter}
We now to turn to discuss the results of our model from the viewpoint of a
spatial-frequency filter for a structured probe beam. When non-uniform beams
are considered, the different spatial frequencies that comprise the beams
result in different Doppler and Dicke widths. Consequently, the various
spatial-frequency components experience different absorption and refraction in
the medium. Specifically, the dependence of the absorption on the transverse
wave-vectors of the probe beam manifests a filter for the probe in Fourier space.
We assume an optical configuration of two collinear uniform pumps (plane waves
with $V_{1}$ and $V_{2}$ constant) and a spatially varying propagating probe,
$V_{p}=V_{p}(\mathbf{r},t)$. Since the medium exhibits a non-local response
due the atomic motion, the evolution of the probe is more naturally described
in the Fourier space $V_{p}(\mathbf{k},\omega)$ where $\mathbf{k}$ and
$\omega$ are the spatial and temporal frequencies of the envelope of the
probe. Under these assumptions, the model results in a Diffusion-like
equations for the populations and coherences of the atomic medium, derived in
Appendix B. To simplify the general dynamics of Eqs. (\ref{Eq. B8a}) and
(\ref{Eq. B9}), we take the stationary case [$\omega=0,$ $V_{p}=V_{p}
(\mathbf{k})$] and assume that the carrier wave-vector of the probe is the
same as that of the pumps, $\mathbf{q}_{p}=\mathbf{q}_{1}=\mathbf{q}_{2},$ so
that $\delta\mathbf{q}_{1}=\delta\mathbf{q}_{2}=0$. Taking the Fourier
transform [see Eq. (\ref{Eq. A13})], we obtain a set of steady-state equations
for the spatially-dependent atomic coherences, $R_{g_{1}g_{2}}(\mathbf{k})$,
$R_{e_{1}e_{2}}(\mathbf{k})$, and $R_{e_{1}g_{2}}(\mathbf{k}),$
\begin{subequations}
\label{Eq. 6}
\begin{align}
& \left[ i\left( \Delta_{p}-\Delta_{1}\right) -\gamma-K_{\text{1p}
}\left\vert V_{1}\right\vert ^{2}-K_{\text{3p}}\left\vert V_{2}\right\vert
^{2}-Dk^{2}\right] R_{g_{1}g_{2}}\nonumber\\
& =(Dk^{2}-bA\Gamma)R_{e_{1}e_{2}}+K_{\text{1p}}V_{1}^{\ast}V_{p}
n_{0},\label{Eq. 6a}\\
& \left[ i\left( \Delta_{p}-\Delta_{2}\right) -\Gamma-\gamma
-Dk^{2}\right] R_{e_{1}e_{2}}\nonumber\\
& =-V_{1}V_{2}^{\ast}(K_{\text{1p}}+K_{\text{3p}})R_{g_{1}g_{2}}-V_{2}^{\ast
}(K_{\text{1p}}+K_{\text{pump}})V_{p}n_{0},\label{Eq. 6b}\\
& R_{e_{1}g_{2}}=iK_{\text{1p}}\left( V_{1}R_{g_{1}g_{2}}+V_{p}n_{0}\right)
\label{Eq. 6c}
\end{align}
where $K_{\text{1p}}=iG_{\text{1p}}/\left( 1-iG_{\text{1p}}\gamma
_{\text{vcc}}\right) $ is the one-photon absorption spectrum with
$G_{\text{1p}}=\int F\left( \mathbf{v}\right) /\xi_{2}d^{3}v\mathbf{\ }$;
$K_{\text{3p}}=iG_{\text{3p}}/\left( 1-iG_{\text{3p}}\gamma_{\text{vcc}
}\right) $ is the three-photon absorption spectrum with $G_{\text{3p}}=\int
F\left( \mathbf{v}\right) /\xi_{4}d^{3}v;\mathbf{\ }$and $K_{\text{pump}
}=iG_{\text{pump}}/\left( 1-iG_{\text{pump}}\gamma_{\text{vcc}}\right) $ is
the one-photon (pump) absorption spectrum with $G_{\text{pump}}=\int F\left(
\mathbf{v}\right) /\xi_{5}d^{3}\mathbf{v}$, as described in Appendix B.
Solving Eq. (\ref{Eq. 6}) for $R_{e_{1}g_{2}}\left( \mathbf{k},\omega\right)
$, substituting the result into the expression for the linear-susceptibility
[Eq. (\ref{Eq. A16})], assuming that $V_{1}=\eta V_{2}$ ($0<\eta\leq1$), and
neglecting all the terms proportional to $1/\Gamma$, we obtain
\end{subequations}
\begin{subequations}
\label{Eq. 7}
\begin{align}
& \chi_{e_{1}g_{2}}\left( \mathbf{k}\right) =\frac{g}{c}iKn_{0}\left(
1+\text{L}\right) ,\label{Eq. 7a}\\
& \text{L}=\frac{\eta\left( 2bA-\eta\right) \Gamma_{\text{p}}}{-i\Delta
_{p}+\gamma+\left( \eta^{2}+1-2bA\eta\right) \Gamma_{\text{p}}+Dk^{2}
},\label{Eq. 7b}
\end{align}
where $D=v_{th}/\gamma_{\text{vcc}}$ is the diffusion coefficient,
$\Gamma_{\text{p}}=K\left\vert V_{2}\right\vert ^{2}$ is the power broadening,
and $K_{\text{1p}}\approx K_{\text{3p}}\approx K_{\text{pump}}=K=\int F\left(
\mathbf{v}\right) /\left[ \mathbf{q}_{p}\cdot\mathbf{v+}i\left(
\Gamma/2+\Gamma_{\text{pcc}}+\gamma+\gamma_{\text{vcc}}\right) \right]
d^{3}v$ for $\Delta_{p}\ll$ $\Gamma_{\hom}=\gamma+\Gamma_{\text{p}}$. In the
case where $\eta=A,$ Eqs. (\ref{Eq. 7}) is similar to Eq. (\ref{Eq. 2a})
obtained by Taichenachev \textit{et al. }\cite{Taichenachev1999PRA}, except
for the diffusion term $Dk^{2}$, which vanishes for an atom at rest.
\begin{figure}
\caption{The EIA spatial-frequency filter, given in Eq. (\ref{Eq. 7b}
\label{Fig. 6}
\end{figure}
The imaginary part of the susceptibility in Eq. (\ref{Eq. 7}) yields the
absorption of the probe for various values of $\mathbf{k}$. The first term in
the brackets in Eq. (\ref{Eq. 7a}) is the linear one-photon absorption, and
the second term is the $k$-dependent EIA contribution. Thus, the real part of
$L$ in Eq. (\ref{Eq. 7b}) describes an \textquotedblleft
absorbing\textquotedblright\ spatial-frequency filter, the same way as was
done for EIT \cite{Shuker2008PRL,Firstenberg2009NP}. Fig. \ref{Fig. 6}
summarizes several examples of the EIA spatial filter behavior as a function
of $k$ for $\Delta_{p}=0,$ $\Delta_{p}=\pm\Gamma_{\hom},$ and $\Delta_{p}
=\pm2\Gamma_{\hom}$. At $\Delta_{p}=0,$ the curve is a Lorentzian and maximum
absorption is achieved. When $\Delta_{p}\neq0$ the filter becomes more transparent.
\section{\label{sec:Ramseynarr}Ramsey narrowing}
We now consider the \textit{N} system interacting with collinear probe and pump beams that have finite widths.
Due to thermal motion, the alkali atoms spend a period of time in the interaction region and then leave the
light beams, evolve `in the dark', and diffuse back inside. Such a random periodic
motion was described recently by Xiao \textit{et al. }
\cite{Xiao2006PRL,Xiao2008OE} for an EIT system, and was shown to result in a
cusp-like spectrum. Near its center, the line is much narrower than that
expected from time-of-flight broadening and power broadening, and the effect,
resulting from the contribution of bright-dark-bright atomic trajectories of
random durations, was named Ramsey narrowing.
\begin{figure}
\caption{Calculated probe absorption spectra (blue line) for one-dimensional
stepwise beam with finite thickness: (a) $2a=100$ $\mu m$ and (b) $2a=10$
$mm$, and fitted Lorentzian (red dashed line). All other parameters are the
same as in Fig.\ref{Fig. 2}
\label{Fig. 7}
\end{figure}
Ramsey-narrowed spectra can be calculated analytically from the diffusion
equations of the atomic coherences when the light fields of both the probe and
pump beams have finite widths \cite{Firstenberg2008PRA}. The EIA spectrum
resulting from a one-dimensional uniform light-sheet of thickness $2a$ in the
$x-$direction is derived analytically in Appendix B [Eq. (\ref{B15})]. In Fig.
\ref{Fig. 7}, we show the spectrum for two different thicknesses and the
fitted Lorentzian curves. Near the resonance, the EIA line for the $100$ $\mu
m$ sheet is spectrally sharper than the fitted Lorentzian -- the
characteristic signature of Ramsey narrowing. In contrast, the EIA peak
calculated for a $10$ $mm$ beam is well fitted by the Lorentzian. In addition,
the EIA contrast deteriorates as the beam becomes narrower, since the
interaction area decreases and fewer atoms interact with the fields.
\section{\label{sec:conc}Conclusions}
In this paper, we extended the theory that describes the effect of buffer-gas
collisions on three-level $\Lambda$ systems in an EIT configuration
\cite{Firstenberg2007PRA,Shuker2007PRA,Firstenberg2008PRA} to the case of a
four-level closed $N$ system which is the simplest system that experiences EIA
due to TOC. Using this formalism, we investigated the influence of collisions
of optically active atoms with a buffer gas on the EIA peak. In addition to
the exact expressions, we presented an approximate solution for the probe
absorption spectrum, which provides a physical insight into the behavior of
the EIA peak due to VCC, PCC, and wave-vector difference between the pump and
probe beams. VCC were shown to produce a wide pedestal at the base of the
EIA\ peak; increasing the pump-probe angular deviation scarcely affects the
pedestal whereas the sharp central EIA peak becomes weaker and broader due to
the residual Doppler-Dicke effect. Using diffusion-like equations for the
atomic coherences and populations, the spatial-frequency filter and the
Ramsey-narrowed spectrum were analytically obtained.
In extending the description from the $\Lambda$ to the $N$ schemes, we have
considered several elements that are likely to be important in other
four-level systems. These include the diffusion of excited-state coherences
and the influence of the thermal motion on the optical dipole in the absence
of the probe. The latter introduces a Doppler contribution into the pumping
terms and consequently affects the power broadening of the narrow resonances.
\end{subequations}
\appendix
\section{Reduced density matrix}
Consider the near-resonant interaction of a light field consisting of one or
two moderately strong pumps and a weak probe, as given in Eq. (\ref{Eq. 1}),
with the four-level degenerate $N$\textit{ }system of Fig. \ref{Fig. 1}(a). We
use the first-order approximation in the probe amplitude, $V_{p}$, and assume
that $V_{2}<\Gamma,$ $V_{1}\leqslant V_{2},$ $V_{p}<V_{1,2}$. Since the pump
transitions are assumed non-saturated, the atomic population in the absence of
the probe concentrates in the $g_{2}$ state, and the population in other
states can be neglected. The $g_{2} \leftrightarrow e_{2}$ dipole, excited in
the absence of the probe, is of importance and is thus considered. The
resulting Bloch equations are \cite{Taichenachev1999PRA}
\begin{subequations}
\label{Eq. A1}
\begin{align}
\dot{\breve{\rho}}_{g_{1}g_{2}}^{\left( 1\right) ,i} & \left( \omega
_{p}-\omega_{1}\right) =-\left[ i\left( \omega_{e_{1}g_{2}}-\omega
_{e_{1}g_{1}}\right) +\gamma\right] \breve{\rho}_{g_{1}g_{1}}^{\left(
1\right) ,i}\nonumber\\
& +i\breve{V}_{1}^{\ast}\breve{\rho}_{e_{1}g_{2}}^{\left( 1\right)
,i}-i\breve{V}_{2}\breve{\rho}_{g_{1}e_{2}}^{\left( 1\right) ,i}
+bA\Gamma\breve{\rho}_{e_{1}e_{2}}^{\left( 1\right) ,i},\label{Eq. A1a}\\
\dot{\breve{\rho}}_{e_{1}g_{2}}^{\left( 1\right) ,i} & \left( \omega
_{p}\right) =-\left[ i\omega_{e_{1}g_{2}}+\Gamma/2+\Gamma_{\text{pcc}
}\right] \breve{\rho}_{e_{1}g_{2}}^{\left( 1\right) ,i}\nonumber\\
& +i\breve{V}_{p}\breve{\rho}_{g_{2}g_{2}}^{\left( 0\right) ,i}+i\breve
{V}_{1}\breve{\rho}_{g_{1}g_{2}}^{\left( 1\right) ,i},\label{Eq. A1b}\\
\dot{\breve{\rho}}_{e_{1}e_{2}}^{\left( 1\right) ,i} & \left( \omega
_{p}-\omega_{2}\right) =-\left[ i\left( \omega_{e_{1}g_{2}}-\omega
_{e_{2}g_{2}}\right) +\Gamma+\gamma\right] \breve{\rho}_{e_{1}e_{2}
}^{\left( 1\right) ,i}\nonumber\\
& +i\breve{V}_{p}\breve{\rho}_{g_{2}e_{2}}^{\left( 0\right) ,i}+i\breve
{V}_{1}\breve{\rho}_{g_{1}e_{2}}^{\left( 1\right) ,i}-i\breve{V}_{2}^{\ast
}\breve{\rho}_{e_{1}g_{2}}^{\left( 1\right) ,i},\label{Eq. A1c}\\
\dot{\breve{\rho}}_{g_{1}e_{2}}^{\left( 1\right) ,i} & \left( \omega
_{p}-\omega_{1}-\omega_{2}\right) =-\left[ i\left( \omega_{e_{1}g_{2}
}-\omega_{e_{1}g_{1}}-\omega_{e_{2}g_{2}}\right) \right. \nonumber\\
& +\Gamma/2+\left. \Gamma_{\text{pcc}}\right] \breve{\rho}_{g_{1}g_{1}
}^{\left( 1\right) ,i}-i\breve{V}_{2}^{\ast}\breve{\rho}_{g_{1}g_{2}
}^{\left( 1\right) ,i},\label{Eq. A1d}\\
\dot{\breve{\rho}}_{g_{2}e_{2}}^{\left( 0\right) ,i} & \left( -\omega
_{2}\right) =-\left[ \Gamma/2+\Gamma_{\text{pcc}}-i\omega_{e_{2}g_{2}
}\right] \breve{\rho}_{g_{2}e_{2}}^{\left( 0\right) ,i}\left( -\omega
_{2}\right) \nonumber\\
& +i\breve{V}_{2}^{\ast}\left( \breve{\rho}_{e_{2}e_{2}}^{\left( 0\right)
,i}-\breve{\rho}_{g_{2}g_{2}}^{\left( 0\right) ,i}\right) . \label{Eq. A1e}
\end{align}
Here, $\breve{\rho}_{ss^{\prime}}^{\left( j\right) ,i}$ is the
density-matrix element of the $i-$th atom (one of many identical particles) to
the $j-$th order in the probe, and apart from $\breve{\rho}_{g_{2}g_{2}
}^{\left( 0\right) ,i}\approx1,$ $\breve{\rho}_{ss}^{\left( 0\right)
,i}=0$. We also consider the envelopes of the pumps to be constant in time so
that $V_{1,2}$ is shorthand for $V_{1,2}\left( \mathbf{r}\right) $. The wave
equation for the probe field is
\end{subequations}
\begin{equation}
\left( \nabla^{2}-\frac{1}{c^{2}}\frac{\partial^{2}}{\partial t^{2}}\right)
\mathbf{\breve{E}}_{p}\left( \mathbf{r},t\right) =\frac{4\pi}{c^{2}}
\frac{\partial^{2}}{\partial t^{2}}\mathbf{\breve{P}}_{e_{1}g_{2}}\left(
\mathbf{r},t\right) , \label{Eq. A2}
\end{equation}
where $\mathbf{\breve{P}}_{e_{1}g_{2}}\left( \mathbf{r},t\right)
=\mathbf{P}_{e_{1}g_{2}}\left( \mathbf{r},t\right) e^{-i\omega_{p}
t}e^{-i\mathbf{q}_{p}\cdot t}$ is the contribution of the $e_{1}
\leftrightarrow g_{2}$ transition to the expectation value of the
polarization, $\mathbf{P}_{e_{1}g_{2}}$ is the slowly varying polarization,
and $\nabla^{2}$ is the three-dimensional Laplacian operator. With Eq.
(\ref{Eq. 1}), and assuming without loss of generality that $\mathbf{\hat{q}
}_{p}=\mathbf{\hat{z}}q_{p}$, as shown in Fig. \ref{Fig. 1}(b), Eq.
(\ref{Eq. A2}) can be written in the paraxial approximation as
\begin{equation}
\left( \frac{\partial}{\partial t}+c\frac{\partial}{\partial z}-i\frac
{c}{2q_{p}}\nabla_{\bot}^{2}\right) V_{p}\left( \mathbf{r},t\right)
=i\frac{g}{\mu_{e_{1}g_{2}}^{\ast}}\mathbf{P}_{e_{1}g_{2}}\left(
\mathbf{r},t\right) , \label{Eq. A3}
\end{equation}
where $\nabla_{\bot}^{2}$ is the transverse Laplacian operator, and
$g=2\pi\omega_{p}\left\vert \mu_{e_{1}g_{2}}\right\vert ^{2}/\hbar$ is a
coupling constant .
Following \cite{Firstenberg2008PRA}, we introduce a density-matrix
distribution function in space and velocity,
\begin{equation}
\breve{\rho}_{ss^{\prime}}^{{}}=\breve{\rho}_{ss^{\prime}}^{{}}\left(
\mathbf{r},\mathbf{v},t\right) =\underset{i}{\sum}\breve{\rho}_{ss^{\prime}
}^{i}\left( t\right) \delta\left( \mathbf{r-r}_{i}\left( t\right)
\right) \delta\left( \mathbf{v-v}_{i}\left( t\right) \right) ,
\label{Eq. A4}
\end{equation}
where the time dependence of $\breve{\rho}_{ss^{\prime}}^{i}\left( t\right)
$ is determined by Eqs. (\ref{Eq. A1}). Differentiating Eq. (\ref{Eq. A4})
with respect to time, we arrive at
\begin{align}
& \frac{\partial}{\partial t}\breve{\rho}_{ss^{\prime}}^{{}}+\mathbf{v}
\cdot\frac{\partial}{\partial\mathbf{r}}\breve{\rho}_{ss^{\prime}}^{{}
}+\left[ \frac{\partial}{\partial t}\breve{\rho}_{ss^{\prime}}^{{}}\right]
_{\operatorname{col}}\nonumber\\
& =\underset{i}{\sum}\frac{\partial}{\partial t}\breve{\rho}_{ss^{\prime}
}^{i}\left( t\right) \delta\left( \mathbf{r-r}_{i}\left( t\right)
\right) \delta\left( \mathbf{v-v}_{i}\left( t\right) \right) ,
\label{Eq. A5}
\end{align}
where the effect of velocity-changing collisions is taken in the strong
collision limit in the form of a Boltzmann relaxation term
\cite{Sobelman1967SPU},
\begin{equation}
\left[ \frac{\partial}{\partial t}\breve{\rho}_{ss^{\prime}}^{{}}\right]
_{\operatorname{col}}=-\gamma_{\text{vcc}}\left[ \breve{\rho}_{ss^{\prime}
}^{{}}\left( \mathbf{r},\mathbf{v},t\right) -\breve{R}_{ss^{\prime}}^{{}
}\left( \mathbf{r},t\right) F(\mathbf{v})\right] , \label{Eq. A6}
\end{equation}
with $\breve{R}_{ss^{\prime}}^{{}}=\breve{R}_{ss^{\prime}}^{{}}\left(
\mathbf{r},t\right) =\int d^{3}\mathbf{v}\breve{\rho}_{ss^{\prime}}^{{}
}\left( \mathbf{r},\mathbf{v},t\right) $ being the density-number of atoms
per unit volume, near $\mathbf{r}$ in space, and
\begin{equation}
F=F(\mathbf{v})=\left( 2\pi v_{\text{th}}\right) ^{-3/2}e^{-\mathbf{v}
^{2}/2v_{\text{th}}},\text{ }v_{\text{th}}=\frac{k_{b}T}{m}
\end{equation}
is the Boltzmann distribution.
Before writing the coupled dynamics of the internal and motional degrees of
freedom, we introduce the slowly varying envelopes of the density-matrix
elements, $\rho_{ss^{\prime}}=\rho_{ss^{\prime}}\left( \mathbf{r}
,\mathbf{v},t\right) $, as
\begin{align}
& \breve{\rho}_{g_{1}g_{2}}=\rho_{g_{1}g_{2}}e^{-i\left( \omega_{p}
-\omega_{1}\right) t}e^{i\left( \mathbf{q}_{p}-\mathbf{q}_{1}\right)
\cdot\mathbf{r}},\nonumber\\
& \breve{\rho}_{e_{1}g_{2}}=\rho_{e_{1}g_{2}}e^{-i\omega_{p}t}e^{i\mathbf{q}
_{p}\cdot\mathbf{r}},\nonumber\\
& \breve{\rho}_{e_{1}e_{2}}=\rho_{e_{1}e_{2}}e^{i\left( \mathbf{q}
_{p}-\mathbf{q}_{2}\right) \cdot\mathbf{r}},\nonumber\\
& \breve{\rho}_{g_{1}e_{2}}=\rho_{g_{1}e_{2}}e^{-i\left( \omega_{p}
-\omega_{1}-\omega_{2}\right) t}e^{i\left( \mathbf{q}_{p}-\mathbf{q}
_{1}-\mathbf{q}_{2}\right) \cdot\mathbf{r}},\nonumber\\
& \breve{\rho}_{g_{2}e_{2}}=\rho_{g_{2}e_{2}}e^{i\omega_{2}t}e^{-i\mathbf{q}
_{2}\cdot\mathbf{r}}, \label{Eq. A9}
\end{align}
and similarly the slowly varying densities $R_{ss^{\prime}}^{{}}=\int
d^{3}\mathbf{v}\rho_{ss^{\prime}}^{{}}$. Eqs. (\ref{Eq. A1}) then become
\begin{subequations}
\label{Eq. A11}
\begin{align}
& \left[ \frac{\partial}{\partial t}+\mathbf{v}\cdot\frac{\partial}
{\partial\mathbf{r}}-i\xi_{1}\right] \rho_{g_{1}g_{2}}-\gamma_{\text{vcc}
}R_{g_{1}g_{2}}F\nonumber\\
& =i\left( V_{1}^{\ast}\rho_{e_{1}g_{2}}-V_{2}\rho_{g_{1}e_{2}}\right)
+bA\Gamma\rho_{e_{1}e_{2}},\label{Eq. A11a}\\
& \left[ \frac{\partial}{\partial t}+\mathbf{v}\cdot\frac{\partial}
{\partial\mathbf{r}}-i\xi_{2}\right] \rho_{e_{1}g_{2}}-\gamma_{\text{vcc}
}R_{e_{1}g_{2}}F\nonumber\\
& =i\left[ V_{p}n_{0}F+V_{1}\rho_{g_{1}g_{2}}\right] ,\label{Eq. A11b}\\
& \left[ \frac{\partial}{\partial t}+\mathbf{v}\cdot\frac{\partial}
{\partial\mathbf{r}}-i\xi_{3}\right] \rho_{e_{1}e_{2}}-\gamma_{\text{vcc}
}R_{e_{1}e_{2}}F\nonumber\\
& =i\left( V_{1}\rho_{g_{1}e_{2}}-V_{2}^{\ast}\rho_{e_{1}g_{2}}\right)
+iV_{p}^{{}}\rho_{g_{2}e_{2}},\label{Eq. A11c}\\
& \left[ \frac{\partial}{\partial t}+\mathbf{v}\cdot\frac{\partial}
{\partial\mathbf{r}}-i\xi_{4}\right] \rho_{g_{1}e_{2}}-\gamma_{\text{vcc}
}R_{g_{1}e_{2}}F\nonumber\\
& =-iV_{2}^{\ast}\rho_{g_{1}g_{2}},\label{Eq. A11d}\\
& \left[ \frac{\partial}{\partial t}+\mathbf{v}\cdot\frac{\partial}
{\partial\mathbf{r}}-i\xi_{5}\right] \rho_{g_{2}e_{2}}^{{}}-\gamma
_{\text{vcc}}R_{g_{2}e_{2}}F\nonumber\\
& =-iV_{2}^{\ast}n_{0}F, \label{Eq. A11e}
\end{align}
where $\xi_{i}$ ($i=1-5$) are given in Eq. (\ref{Eq. 3}). The expectation
value of the polarization density $\mathbf{P}_{e_{1}g_{2}}\left(
\mathbf{r},t\right) $ in terms of the number density $R_{e_{1}g_{2}}\left(
\mathbf{r},t\right) $ is $\mathbf{P}_{e_{1}g_{2}}\left( \mathbf{r},t\right)
=\mu_{e_{1}g_{2}}^{\ast}R_{e_{1}g_{2}}\left( \mathbf{r},t\right) $, and Eq.
(\ref{Eq. A3}) becomes
\end{subequations}
\begin{equation}
\left( \frac{\partial}{\partial t}+c\frac{\partial}{\partial z}-i\frac
{c}{2q_{p}}\nabla_{\bot}^{2}\right) V_{p}\left( \mathbf{r},t\right)
=igR_{e_{1}g_{2}}\left( \mathbf{r},t\right) . \label{Eq. A12}
\end{equation}
We now consider the case of stationary plane-wave pumps. For this case, it is
convenient to introduce the Fourier transform
\begin{equation}
f\left( \mathbf{r},t\right) =\underset{-\infty}{\overset{+\infty}{\int}
}\frac{d^{3}k}{2\pi}e^{i\mathbf{kr}}\underset{-\infty}{\overset{+\infty}{\int
}}\frac{d\omega}{2\pi}e^{-i\omega t}f\left( \mathbf{k},\omega\right) ,
\label{Eq. A13}
\end{equation}
and write Eqs. (\ref{Eq. A11}) as
\begin{subequations}
\label{Eq. A14}
\begin{align}
& \left[ \omega-\mathbf{k\cdot v}+\xi_{1}\right] \rho_{g_{1}g_{2}}
-i\gamma_{\text{vcc}}R_{g_{1}g_{2}}\left( \mathbf{k},\omega\right)
F\nonumber\\
& =\left( V_{2}\rho_{g_{1}e_{2}}-V_{1}^{\ast}\rho_{e_{1}g_{2}}\right)
+ibA\Gamma\rho_{e_{1}e_{2}},\label{Eq. A14a}\\
& \left[ \omega-\mathbf{k\cdot v}+\xi_{2}\right] \rho_{e_{1}g_{2}}
-i\gamma_{\text{vcc}}R_{e_{1}g_{2}}\left( \mathbf{k},\omega\right)
F\nonumber\\
& =-\left( V_{p}n_{0}F+V_{1}\rho_{g_{1}g_{2}}\right) ,\label{Eq. A14b}\\
& \left[ \omega-\mathbf{k\cdot v}+\xi_{3}\right] \rho_{e_{1}e_{2}}
-i\gamma_{\text{vcc}}R_{e_{1}e_{2}}\left( \mathbf{k},\omega\right)
F\nonumber\\
& =\left( V_{2}^{\ast}\rho_{e_{1}g_{2}}-V_{1}\rho_{g_{1}e_{2}}\right)
-V_{p}^{{}}\rho_{g_{2}e_{2}},\label{Eq. A14c}\\
& \left[ \omega-\mathbf{k\cdot v}+\xi_{4}\right] \rho_{g_{1}e_{2}}
-i\gamma_{\text{vcc}}R_{g_{1}e_{2}}\left( \mathbf{k},\omega\right)
F\nonumber\\
& =V_{2}^{\ast}\rho_{g_{1}g_{2}},\label{Eq. A14d}\\
& \left[ \omega-\mathbf{k\cdot v}+\xi_{5}\right] \rho_{g_{2}e_{2}}
-i\gamma_{\text{vcc}}R_{g_{2}e_{2}}\left( \mathbf{k},\omega\right)
F\nonumber\\
& =V_{2}^{\ast}n_{0}F, \label{Eq. A14e}
\end{align}
and Eq. (\ref{Eq. A12}) as
\end{subequations}
\begin{equation}
\left( ik_{z}-i\frac{\omega}{c}+i\frac{k^{2}}{2q_{p}}\right) V_{p}\left(
\mathbf{k},\omega\right) =i\frac{g}{c}R_{e_{1}g_{2}}\left( \mathbf{k}
,\omega\right) . \label{Eq. A15}
\end{equation}
The linear susceptibility $\chi_{e_{1}g_{2}}\left( \mathbf{k},\omega\right)
$ is defined by
\begin{equation}
R_{e_{1}g_{2}}\left( \mathbf{k},\omega\right) =\chi_{e_{1}g_{2}}\left(
\mathbf{k},\omega\right) \frac{c}{g}V_{p}\left( \mathbf{k},\omega\right) .
\label{Eq. A16}
\end{equation}
In order to find the probe absorption spectrum, we solve Eqs. (\ref{Eq. A14})
analytically, obtain an expression for $\rho_{ss^{\prime}}$, and formally
integrate it over velocity. This leads to an expression for $R_{ss^{\prime}}$
in terms of integrals over velocity, in the form of Eq. (\ref{Gs}), such as
$G_{1}=\int d^{3}v\frac{\xi_{2}\xi_{3}\xi_{4}F(\mathbf{v})}{\xi_{d}}$, which
can be evaluated numerically. In the general case, the resulting expression
for $R_{ss^{\prime}}$ is very complicated and is not reproduced here. In order
to explore the underlying physics, we developed an approximate expression for
the Fourier transform of the density-matrix element that refers to the probe
transition, namely, $R_{e_{1}g_{2}}$ [see Eq. (\ref{Eq. 2})].
One can verify that in the absence of the pumps ($V_{1}=V_{2}=0$), the
resulting one-photon complex spectrum simplifies to the well known result for
the strong collision regime, $K=iG/\left( 1-i\gamma_{\text{vcc}}G\right) $,
where $G=\int d^{3}\mathbf{v}F /\left( \omega-\mathbf{k\cdot v}+\xi
_{2}\right) $ \cite{Sobelman1967SPU}.
\section{Diffusion in the presence of fields}
In order to obtain diffusion-like equations for the density-matrix elements
and the probe fields, we begin by integrating Eqs. (\ref{Eq. A11a}) and
(\ref{Eq. A11c}) over velocity and obtain
\begin{subequations}
\label{Eq. B1}
\begin{align}
& \left[ \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{1}\right]
\cdot\mathbf{J}_{g_{1}g_{2}}+\left[ \frac{\partial}{\partial t}-i\left(
\Delta_{p}-\Delta_{1}\right) +\gamma\right] R_{g_{1}g_{2}}\nonumber\\
& =i\left( V_{1}^{\ast}R_{e_{1}g_{2}}-V_{2}R_{g_{1}e_{2}}\right) +bA\Gamma
R_{e_{1}e_{2}},\label{Eq. B1a}\\
& \left[ \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{2}\right]
\cdot\mathbf{J}_{e_{1}e_{2}}+\Biggl[\frac{\partial}{\partial t}-i\left(
\Delta_{p}-\Delta_{2}\right) \nonumber\\
& +\Gamma+\gamma\Biggr]R_{e_{1}e_{2}}=i\left( V_{1}R_{g_{1}e_{2}}
-V_{2}^{\ast}R_{e_{1}g_{2}}+V_{p}R_{g_{2}e_{2}}\right) , \label{Eq. B1b}
\end{align}
where $\mathbf{J}_{ss^{\prime}}=\mathbf{J}_{ss^{\prime}}\left( \mathbf{r}
,t\right) =\int d^{3}v\mathbf{v}\rho_{ss^{\prime}} $ is the envelope of the
current density. Expanding $\rho_{g_{1}g_{2}} $ and $\rho_{e_{1}e_{2}} $ in
Eqs. (\ref{Eq. A11a}) and (\ref{Eq. A11c}) as $\rho_{ss^{\prime}}
=R_{ss^{\prime}} F +1/\gamma_{\text{vcc}}\rho_{ss^{\prime}}^{(1)} ,$
multiplying Eqs. (\ref{Eq. A11a}) and (\ref{Eq. A11c}) by $\mathbf{v}$,
integrating the resulting equations over velocity using
\end{subequations}
\begin{equation}
\int d^{3}v_{j}v_{i}\frac{\partial}{\partial x_{i}}R_{ss^{\prime}} F
=\delta_{ij}v_{\text{th}}\frac{\partial}{\partial x_{i}}R_{ss^{\prime}} ,
\label{Eq. B2}
\end{equation}
defining the current density of the density matrix by
\begin{equation}
\gamma_{\text{vcc}}\mathbf{J}_{ss^{\prime}} =\int d^{3}v_{j}\rho_{ss^{\prime}
}^{\left( 1\right) } , \label{Eq. B3}
\end{equation}
and retaining the leading terms in $1/\gamma_{\text{vcc}}$, we obtain
\begin{subequations}
\label{Eq. B4}
\begin{align}
& \mathbf{J}_{g_{1}g_{2}}+D\left[ \frac{\partial}{\partial\mathbf{r}
}+i\delta\mathbf{q}_{1}\right] R_{g_{1}g_{2}}\nonumber\\
& =\frac{i}{\gamma_{\text{vcc}}}\left( V_{1}^{\ast}\mathbf{J}_{e_{1}g_{2}
}-V_{2}\mathbf{J}_{g_{1}g_{2}}\right) -\frac{bA\Gamma}{\gamma_{\text{vcc}}
}\mathbf{J}_{e_{1}e_{2}},\label{Eq. B4a}\\
& \mathbf{J}_{e_{1}e_{2}}+D\left[ \frac{\partial}{\partial\mathbf{r}
}+i\delta\mathbf{q}_{2}\right] R_{e_{1}e_{2}}\nonumber\\
& =\frac{i}{\gamma_{\text{vcc}}}\left( V_{1}\mathbf{J}_{g_{1}e_{2}}
-V_{2}^{\ast}\mathbf{J}_{e_{1}g_{2}}+\tilde{V}_{p}\mathbf{J}_{g_{2}e_{2}
}\right) , \label{Eq. B4b}
\end{align}
where $D=v_{\text{th}}/\gamma_{\text{vcc}}$. Substituting $\mathbf{J}
_{g_{1}g_{2}}$, $\mathbf{J}_{e_{1}e_{2}}$ from Eq. (\ref{Eq. B4}) into Eq.
(\ref{Eq. B1}), we get
\end{subequations}
\begin{subequations}
\label{Eq. B5}
\begin{align}
& \left[ \frac{\partial}{\partial t}-i\left( \Delta_{p}-\Delta_{1}\right)
+\gamma-D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}
_{1}\right) ^{2}\right] R_{g_{1}g_{2}}\nonumber\\
& =i\left( V_{1}^{\ast}R_{e_{1}g_{2}}-V_{2}R_{g_{1}e_{2}}\right) +bA\Gamma
R_{e_{1}e_{2}}-D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta
\mathbf{q}_{1}\right) \nonumber\\
& \times\left[ \frac{i}{\gamma_{\text{vcc}}}\left( V_{1}^{\ast}
\mathbf{J}_{e_{1}g_{2}}-V_{2}\mathbf{J}_{g_{1}e_{2}}\right) -\frac{bA\Gamma
}{\gamma_{\text{vcc}}}\mathbf{J}_{e_{1}e_{2}}\right] ,\label{Eq. B5a}\\
& \left[ \frac{\partial}{\partial t}-i\left( \Delta_{p}-\Delta_{2}\right)
+\Gamma+\gamma-D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta
\mathbf{q}_{2}\right) ^{2}\right] R_{e_{1}e_{2}}\nonumber\\
& =i\left( V_{1}R_{g_{1}e_{2}}-V_{2}^{\ast}R_{e_{1}g_{2}}+V_{p}R_{g_{2}
e_{2}}\right) -D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta
\mathbf{q}_{2}\right) \nonumber\\
& \times\left[ \frac{i}{\gamma_{\text{vcc}}}\left( V_{1}\mathbf{J}
_{g_{1}e_{2}}-V_{2}^{\ast}\mathbf{J}_{e_{1}g_{2}}+V_{p}\mathbf{J}_{g_{2}e_{2}
}\right) \right] . \label{Eq. B5b}
\end{align}
In order to calculate $R_{e_{1}g_{2}}$, $R_{g_{1}e_{2}}$, $R_{g_{2}e_{2}},$
and $\mathbf{J}_{e_{1}g_{2}}$, $\mathbf{J}_{g_{1}e_{2}}$, $\mathbf{J}
_{g_{2}e_{2}}$, we assume in Eqs. (\ref{Eq. A11b}), (\ref{Eq. A11d}), and
(\ref{Eq. A11e}) that the envelopes change slowly enough such that $\left\vert
\partial/\partial t+\mathbf{v}\cdot\partial/\partial\mathbf{r}\right\vert $
$\ll\left\vert \xi_{2,4,5}\right\vert ,$ and get
\end{subequations}
\begin{subequations}
\label{Eq. B6}
\begin{align}
-i\xi_{2}\rho_{e_{1}g_{2}}= & \gamma_{\text{vcc}}R_{e_{1}g_{2}} F+i\left(
V_{p}n_{0}F+V_{1}\rho_{g_{1}g_{2}}\right) ,\label{Eq. B6a}\\
-i\xi_{4}\rho_{g_{1}e_{2}}= & \gamma_{\text{vcc}}R_{g_{1}e_{2}}
F-iV_{2}^{\ast}\rho_{g_{1}g_{2}},\label{Eq. B6b}\\
-i\xi_{5}\rho_{g_{2}e_{2}}= & \gamma_{\text{vcc}}R_{g_{2}e_{2}}
F-iV_{2}^{\ast}n_{0}F. \label{Eq. B6c}
\end{align}
Solving Eq. (\ref{Eq. B6}) formally for $\rho_{e_{1}g_{2}}$, $\rho_{g_{1}
e_{2}}$, $\rho_{g_{2}e_{2}}$ and substituting only their leading parts,
\textit{i.e.} $\rho_{ss^{\prime}}=$ $R_{ss^{\prime}} F $, we find
\end{subequations}
\begin{subequations}
\label{Eq. B7}
\begin{align}
\rho_{e_{1}g_{2}} & =\left[ \gamma_{\text{vcc}}R_{e_{1}g_{2}} \right.
-V_{1}R_{g_{1}g_{2}}) - \left. V_{p} n_{0}\right] F/\xi_{2}, \label{Eq. B7a}
\\
\rho_{g_{1}e_{2}} & =\left[ \gamma_{\text{vcc}}R_{g_{1}e_{2}} +V_{2}^{\ast
}R_{g_{1}g_{2}} \right] F/\xi_{4},\label{Eq. B7b}\\
\rho_{g_{2}e_{2}} & =\left[ \gamma_{\text{vcc}}R_{g_{2}e_{2}} +V_{2}^{\ast
}n_{0}\right] F/\xi_{5}. \label{Eq. B7c}
\end{align}
Integrating Eqs. (\ref{Eq. B7}) over velocity we get
\end{subequations}
\begin{subequations}
\label{Eq. B8}
\begin{align}
& R_{e_{1}g_{2}} =iK_{\text{1p}}\left[ V_{1}R_{g_{1}g_{2}} +V_{p}
n_{0}\right] ,\label{Eq. B8a}\\
& R_{g_{1}e_{2}} =-iK_{\text{3p}}^{{}}V_{2}^{\ast}R_{g_{1}g_{2}}
,\label{Eq. B8b}\\
& R_{g_{2}e_{2}} =-iK_{\text{pump}}V_{2}^{\ast}n_{0}, \label{Eq. B8c}
\end{align}
where $K_{\text{1p}}=iG_{\text{1p}}/\left( 1-G_{\text{1p}}\gamma_{\text{vcc}
}\right) $ is the one-photon absorption spectrum with $G_{\text{1p}}=\int F
/\xi_{2}d^{3}v\mathbf{\ }$, $K_{\text{3p}}=iG_{\text{3p}}/\left(
1-G_{\text{3p}}\gamma_{\text{vcc}}\right) $ is the three-photon absorption
spectrum with $G_{\text{3p}}=\int F/\xi_{4}d^{3}v\mathbf{\ }$and
$K_{\text{pump}}=iG_{\text{pump}}/\left( 1-G_{\text{pump}}\gamma_{\text{vcc}
}\right) $ is the one-photon (pump) absorption spectrum with $G_{\text{pump}
}=\int F/\xi_{5}d^{3}\mathbf{v}$. In the case of collinear pump and probe
beams $\delta\mathbf{q=}\delta\mathbf{q}_{1,2}=\mathbf{q}_{p}-\mathbf{q}
_{1,2}=\delta q\widehat{\mathbf{z}}$, Eqs. (\ref{Eq. B5}) and (\ref{Eq. B8})
form a closed set when
\end{subequations}
\begin{align*}
& \left( \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{1,2}\right)
\cdot\frac{iV_{1,2}\left( \mathbf{r}\right) }{\gamma_{\text{vcc}}}
\mathbf{J}_{e_{1}g_{2}},\\
& \left( \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{1,2}\right)
\cdot\frac{iV_{1,2}\left( \mathbf{r}\right) }{\gamma_{\text{vcc}}}
\mathbf{J}_{g_{1}e_{2}},\text{ }\\
& \left( \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{2}\right)
\cdot\frac{iV_{p}\left( \mathbf{r,}t\right) }{\gamma_{\text{vcc}}}
\mathbf{J}_{g_{2}e_{2}}
\end{align*}
can be neglected in Eq. (\ref{Eq. B5}). These terms vanish completely in the
special case of pump and probe which are plane waves $\left( \partial
/\partial\mathbf{r=0}\right) $, and also collinear and degenerate $\left(
\delta\mathbf{q=0}\right) $. They can also be neglected whenever $\left\vert
V_{1,2,p}\right\vert \ll\gamma_{\text{vcc}}$ as is the case in many realistic
situations. However, the term $\left( \partial/\partial\mathbf{r}
+i\delta\mathbf{q}_{1}\right) \cdot bA\Gamma/\gamma_{\text{vcc}}
\mathbf{J}_{e_{1}e_{2}}$ in Eq. (\ref{Eq. B5a}) cannot be neglected in the
case of collinear pump and probe beams since $bA\Gamma/\gamma_{\text{vcc}}$
does not go to zero.
Substituting Eq. (\ref{Eq. B4b}) into Eq. (\ref{Eq. B5a}), and Eq.
(\ref{Eq. B8}) into Eq. (\ref{Eq. B5}), we find:
\begin{subequations}
\label{Eq. B9}
\begin{align}
& \left\{ \frac{\partial}{\partial t}-i\left( \Delta_{p}-\Delta_{1}\right)
+\gamma+K_{\text{1p}}\left\vert V_{1}\right\vert ^{2}+K_{\text{3p}}\left\vert
V_{2}\right\vert ^{2}\right\} R_{g_{1}g_{2}}\nonumber\\
& =D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{1}\right)
^{2}R_{g_{1}g_{2}}+D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta
\mathbf{q}_{2}\right) ^{2}R_{e_{1}e_{2}}\nonumber\\
& +bA\Gamma R_{e_{1}e_{2}}-K_{\text{1p}}V_{1}^{\ast}V_{p}n_{0}
,\label{Eq. B9a}\\
& \left\{ \frac{\partial}{\partial t}-i\left( \Delta_{p}-\Delta_{2}\right)
+\Gamma+\gamma\right\} R_{e_{1}e_{2}}\nonumber\\
& =D\left( \frac{\partial}{\partial\mathbf{r}}+i\delta\mathbf{q}_{2}\right)
^{2}R_{e_{1}e_{2}}+V_{1}V_{2}^{\ast}\left( K_{\text{1p}}+K_{\text{3p}
}\right) R_{g_{1}g_{2}}\nonumber\\
& +V_{2}^{\ast}\left( K_{\text{1p}}+K_{\text{pump}}\right) V_{p}n_{0}.
\label{Eq. B9b}
\end{align}
These are the final diffusion-like coupled equations for the ground- and
excited-state coherences.
In order to investigate the Ramsey narrowing of the EIA peak, we consider
finite probe and pump beams and restrict the discussion to collinear EIA. We
assume that the fields are stationary and overlap in their cross sections with
negligible variation along the $z-$direction, $V_{p}\left( \mathbf{r}
,t\right) =V_{p}w\left( \mathbf{r}_{\bot}\right) $, $V_{1}\left(
\mathbf{r}\right) =V_{1}w\left( \mathbf{r}_{\bot}\right) $, $V_{2}\left(
\mathbf{r},t\right) =V_{2}w\left( \mathbf{r}_{\bot}\right) ,$ where
$w\left( \mathbf{r}_{\bot}\right) $ is the transverse profile of the fields.
We further take $\delta q=0$ and $\Delta_{1}=\Delta_{2}=0$ for brevity. In the
diffusion regime, we rewrite Eqs. (\ref{Eq. B8}) and (\ref{Eq. B9}) as
\end{subequations}
\begin{subequations}
\label{Eq. B10}
\begin{align}
& \left[ i\Delta_{p}+\gamma+\left( K_{\text{1p}}\left\vert V_{1}\right\vert
^{2}+K_{\text{3p}}\left\vert V_{2}\right\vert ^{2}\right) w\left(
\mathbf{r}_{\bot}\right) ^{2}\right] R_{g_{1}g_{2}}=\nonumber\\
& bA\Gamma\left( 1+\frac{D}{\gamma_{\text{vcc}}}\nabla_{\bot}^{2}\right)
R_{e_{1}e_{2}}-K_{\text{1p}}V_{1}^{\ast}V_{p}n_{0}w\left( \mathbf{r}_{\bot
}\right) ^{2},\label{Eq. B10a}\\
& R_{e_{1}g_{2}}=iK_{\text{1p}}\left( V_{1}R_{g_{1}g_{2}}+V_{p}n_{0}\right)
w\left( \mathbf{r}_{\bot}\right) ,\label{Eq. B10b}\\
& \left( i\Delta_{p}+\Gamma+\gamma-D\nabla_{\bot}^{2}\right) R_{e_{1}e_{2}
}\nonumber\\
& =V_{1}\left( K_{\text{1p}}+K_{\text{3p}}\right) R_{g_{1}g_{2}}V_{2}
^{\ast}w\left( \mathbf{r}_{\bot}\right) ^{2}\nonumber\\
& +V_{p}\left( K_{\text{1p}}+K_{\text{pump}}\right) n_{0}V_{2}^{\ast
}w\left( \mathbf{r}_{\bot}\right) ^{2},\label{Eq. B10c}\\
& R_{g_{1}e_{2}}=-iK_{\text{3p}}V_{2}^{\ast}R_{g_{1}g_{2}}w\left(
\mathbf{r}_{\bot}\right) ,\label{Eq. B10d}\\
& R_{g_{1}e_{2}}=-iK_{\text{pump}}V_{2}^{\ast}n_{0}w\left( \mathbf{r}_{\bot
}\right) . \label{Eq. B10e}
\end{align}
We further consider a probe and pump beams with a uniform intensity and phase
within a sheet of thickness $2a$ in the $x-$direction (one-dimensional
stepwise beams):
\end{subequations}
\[
w\left( x,y\right) =\left\{
\genfrac{}{}{0pt}{}{1\text{ for }\left\vert x\right\vert \leq a}{0\text{ for
}\left\vert x\right\vert >a}
\right. .
\]
The solution for $R_{g_{1}g_{2}}$, symmetric in $x$ and decaying as
$\left\vert x\right\vert \rightarrow\infty$, is given by
\begin{subequations}
\label{Eq. B13}
\begin{align}
& R_{g_{1}g_{2}}\left( \left\vert x\right\vert \leq a\right) =\nonumber\\
& C_{2}\cosh\left( k_{1}x\right) +C_{1}\cosh\left( k_{2}x\right)
+\frac{bA\Gamma\beta_{2}+D\alpha_{2}^{2}\beta_{1}}{\left( D\alpha_{1}
\alpha_{2}\right) ^{2}+bA\Gamma\beta_{3}},\label{Eq. B13a}\\
& R_{e_{1}e_{2}}\left( \left\vert x\right\vert \leq a\right) =\frac
{C_{1}\left( k_{2}^{2}-\alpha_{2}^{2}\right) D\gamma_{\text{vcc}}}
{bA\Gamma\left( D\alpha_{2}^{2}+\gamma_{\text{vcc}}\right) }\cosh\left(
k_{2}x\right) \nonumber\\
& +\frac{C_{2}\left( k_{1}^{2}-\alpha_{1}^{2}\right) D\gamma_{\text{vcc}}
}{bA\Gamma\left( D\alpha_{2}^{2}+\gamma_{\text{vcc}}\right) }\cosh\left(
k_{1}x\right) +\frac{\beta_{1}\beta_{3}-\beta_{2}\alpha_{1}^{2}D}{\beta
_{3}bA\Gamma-\left( D\alpha_{1}\alpha_{2}\right) ^{2}},\\
& R_{g_{1}g_{2}}\left( \left\vert x\right\vert >a\right) =\nonumber\\
& \frac{C_{3}bA\Gamma\left( D\alpha_{2}^{2}+\gamma_{\text{vcc}}\right)
}{\left( \alpha_{3}^{2}-\alpha_{2}^{2}\right) D\gamma_{\text{vcc}}
}e^{-\alpha_{2}\left( \left\vert x\right\vert -a\right) }+C_{4}
e^{-\alpha_{3}\left( \left\vert x\right\vert -a\right) },\\
& R_{e_{1}e_{2}}\left( \left\vert x\right\vert >a\right) =C_{3}
e^{-\alpha_{2}\left( \left\vert x\right\vert -a\right) },
\end{align}
where $\alpha_{1}^{2}=(-i\Delta_{p}+\gamma+K_{\text{1p}}\left\vert
V_{1}\right\vert ^{2}+K_{\text{3p}}\left\vert V_{2}\right\vert ^{2})/D,$
$\alpha_{2}^{2}=\alpha_{3}^{2}+\Gamma/D,$ $\alpha_{3}^{2}=\left( -i\Delta
_{p}+\gamma\right) /D,$ and $\beta_{1}=V_{1}^{\ast}V_{p}K_{\text{1p}}n_{0},$
$\beta_{2}=V_{1}V_{2}^{\ast}(K_{\text{1p}}+K_{\text{3p}}),\beta_{3}
=V_{2}^{\ast}V_{p}(K_{\text{1p}}+K_{\text{pump}})n_{0}.$ The complex diffusion
wave-numbers are obtained from
\end{subequations}
\begin{align*}
& 2D\gamma_{\text{vcc}}k_{1,2}^{2}=D\gamma_{\text{vcc}}\alpha_{+}^{2}
+\beta_{3}bA\Gamma\\
& \mp\left[ (D\gamma_{\text{vcc}})^{2}\alpha_{-}^{4}+\beta_{3}
bA\Gamma\left( 4+2D\gamma_{\text{vcc}}\alpha_{+}^{2}+\beta_{3}bA\Gamma
\right) \right] ^{1/2},
\end{align*}
with $\alpha_{\pm}^{2}=\alpha_{2}^{2}\pm\alpha_{1}^{2}.$ The coefficients
$C_{i}$ ($i=1-4$) are obtained from the continuity conditions of
$R_{ss^{\prime}}$ and $\left( \partial/\partial x\right) R_{ss^{\prime}}$at
$\left\vert x\right\vert =a$. From Eq. (\ref{Eq. B10b}) one finds
\begin{align}
& R_{e_{1}g_{2}}\left( \left\vert x\right\vert \leq a\right) =iK_{2}
\Biggl[V_{1}\left( C_{2}\cosh\left( k_{1}x\right) \right. \nonumber\\
& +C_{1}\cosh\left( k_{2}x\right) +\left. \frac{bA\Gamma\beta_{2}
+D\alpha_{2}^{2}\beta_{1}}{\left( D\alpha_{1}\alpha_{2}\right) ^{2}
+bA\Gamma\beta_{3}}\right) +V_{p}n_{0}\Biggr] , \label{B15}
\end{align}
and the energy absorption at frequency $\omega_{p}$ is finally calculated from
$P\left( \Delta\right) =(\hbar\omega_{p}/a)\text{Im}\int_{-a}^{a}
dxR_{e_{1}g_{2}}\left( x\right) .$ Two examples for the resulting spectrum
are given in Fig. \ref{Fig. 7}.
\end{document} |
\begin{document}
\setcounter{Maxaffil}{2}
\title{On the adjacency matrix of a complex unit gain graph}
\author[a]{\rm Ranjit Mehatari\thanks{ranjitmehatari@gmail.com, mehatarir@nitrkl.ac.in}}
\author[b]{\rm M. Rajesh Kannan\thanks{rajeshkannan1.m@gmail.com, rajeshkannan@maths.iitkgp.ac.in}}
\author[b]{\rm Aniruddha Samanta\thanks{aniruddha.sam@gmail.com}}
\affil[a]{Department of Mathematics,}
\affil[ ]{National Institute of Technology Rourkela,}
\affil[ ]{Rourkela - 769008, India}
\affil[ ]{ }
\affil[b]{Department of Mathematics,}
\affil[ ]{Indian Institute of Technology Kharagpur,}
\affil[ ]{Kharagpur-721302, India.}
\maketitle
\begin{abstract}
A complex unit gain graph is a simple graph in which each orientation of an edge is given a complex number with modulus $1$ and its inverse is assigned to the opposite orientation of the edge. In this article, first we establish bounds for the eigenvalues of the complex unit gain graphs. Then we study some of the properties of the adjacency matrix of complex unit gain graph in connection with the characteristic and the permanental polynomials. Then we establish spectral properties of the adjacency matrices of complex unit gain graphs. In particular, using Perron-Frobenius theory, we establish a characterization for bipartite graphs in terms of the set of eigenvalues of gain graph and the set of eigenvalues of the underlying graph. Also, we derive an equivalent condition on the gain so that the eigenvalues of the gain graph and the eigenvalues of the underlying graph are the same.
\end{abstract}
{\bf AMS Subject Classification(2010):} 05C50, 05C22.
\textbf{Keywords:} Gain graph, Characteristic polynomial, Perron-Frobenius theory, Bipartite graph, Balanced gain graph.
\section{Introduction}
Let $G=(V,E)$ be a simple, undirected, finite graph with the vertex set $V(G)=\{v_1,v_2,\ldots,v_n\}$ and the edge set $E(G) \subseteq V \times V$. If two vertices $v_i$ and $v_j$ are adjacent, we write $v_i\sim v_j$, and the edge between them is denoted by $e_{ij}$. The degree of the vertex $v_i$ is denoted by $d_i$. The $(0,1)$-\textit{adjacency matrix} or simply the \textit{adjacency matrix} of $G$ is an $n \times n$ matrix, denoted by $A(G)=[a_{ij}]$, whose rows and columns are indexed by the vertex set of the graph and the entries are defined by
$$a_{ij}=\begin{cases}
1, &\text{if }v_i\sim v_j,\\
0, &\text{otherwise.}\end{cases}$$
The adjacency matrix of a graph is one of the well studied matrix class in the field of spectral graph theory. For more details about the study of classes of matrices associated with graphs, we refer to \cite{Bap-book, Brou, Chung, Cve2, Cve1, Mer}.
The notion of gain graph was introduced in \cite{Zas1}. For a given graph $G$ and a group $\mathfrak{G}$, first orient the edges of the graph $G$. For each oriented edge $e_{ij}$ assign a value (the \textit{gain} of the edge $e_{ij}$) $g$ from $\mathfrak{G}$ and assign $g^{-1 }$ to the orientated edge $e_{ji}$. If the group is taken to be the multiplicative group of unit complex numbers, the graph is called the \textit{complex unit gain graph}. Now let us recall the definition of complex unit gain graphs \cite{Reff1}. The set of all oriented edges of the graph $G$ is denoted by $\overrightarrow{E}(G)$.
\begin{definition}
A $\mathbb{T}$-gain graph (or complex unit gain graph) is a triple $\Phi=(G,\mathbb{T},\varphi)$, where
\begin{itemize}
\item[(i)] $G=(V,E)$ is a simple finite graph,
\item[(ii)] $\mathbb{T}$ is the unit complex circle, i.e., $\mathbb{T}=\{z\in\mathbb{C}:|z|=1\}$, and
\item[(iii)] the map $\varphi:\overrightarrow{E}(G)\rightarrow\mathbb{T}$ is such that $\varphi(e_{ij})=\varphi(e_{ji})^{-1}$.
\end{itemize}
Since, we consider $\mathbb{T}$-gain graphs throughout this paper, we use $\Phi=(G,\varphi)$ instead of $\Phi=(G,\mathbb{T},\varphi)$.
\end{definition}
The study of the spectral properties of $\mathbb{T}$-gain graphs is interesting because this generalizes the theory of adjacency matrix for undirected graphs. In \cite{Reff1}, the author introduced the adjacency matrix $A(\Phi)=[a_{ij}]_{n\times n}$ for a $\mathbb{T}$-gain graph $\Phi$ and provided some important spectral properties of $A(\Phi)$. The entries of $A(\Phi)$ are given by
$$a_{ij}=\begin{cases}
\varphi(e_{ij}),&\text{if } \mbox{$v_i\sim v_j$},\\
0,&\text{otherwise.}\end{cases}$$
If $v_i$ is adjacent to $v_j$, then $a_{ij} = \varphi(e_{ij}) = \varphi(e_{ji})^{-1}
= \overline{\varphi(e_{ji})} = \overline{a_{ji}}$ . Thus the matrix $A(\Phi)$ is Hermitian, and its eigenvalues are real. Let $\sigma(A(\Phi))$ denote the set of eigenvalues of the matrix $A(\Phi)$.
Particular cases of the notion of adjacency matrix of $\mathbb{T}$-gain graphs were considered with different weights in the literature \cite{Bap,Kat}. In \cite{Kat}, the authors considered complex weighted graphs with the weights taken from $\{\pm1,\pm i\}$, and characterized unicyclic graph having strong reciprocal eigenvalue property. In \cite{Ger}, the authors studied some of the properties of the characteristics polynomial for gain graphs. For some interesting spectral properties of gain graphs, we refer to \cite{Reff1, Reff2, Ger, Zas2}. When $\varphi(e_{ij})=1$ for all $e_{ij}$, then $A(\Phi)=A(G)$. Thus we can consider $G$ as a $\mathbb{T}$-gain graph and we write this by $(G,1)$. Recently the notion of incident matrix and Laplacian matrix for $\mathbb{T}$-gain graphs have been studied \cite{Reff1, lap-gain}. From the above discussion it is clear that, the spectral theory of $\mathbb{T}$-gain graphs generalizes the spectral theory of undirected graphs and some weighted graphs.
Next we recall some of the definitions and notation which we needed. For more details we refer to \cite{Reff1,Reff2,Zas4,Zas1,Zas3,Zas2}.
\begin{definition}
The \textit{gain} of a cycle (with some orientation) $C={v_1v_2\ldots v_lv_1}$, denoted by $\varphi(C)$, is defined as the product of the gains of its edges, that is
$$\varphi(C)=\varphi(e_{12})\varphi(e_{23})\cdots\varphi(e_{(l-1)l})\varphi(e_{l1}).$$
A cycle $C$ is said to be \textit{neutral} if $\varphi(C)=1$, and a gain graph is said to be \textit{balanced} if all its cycles are neutral. For a cycle $C$ of $G$, we denote the real part of the gain of $C$ by $\mathfrak{R}(C)$, and it is independent of the orientation.
\end{definition}
\begin{definition}
A function from the vertex set of $G$ to the complex unit circle $\mathbb{T}$ is called a \textit{switching function}. We say that, two gain graphs $\Phi_1=(G,\varphi_1)$ and $\Phi_2=(G,\varphi_2)$ are \textit{switching equivalent}, written as $\Phi_1\sim\Phi_2$, if there is a switching function $\zeta:V\rightarrow\mathbb{T}$ such that $$\varphi_2(e_{ij})=\zeta(v_i)^{-1}\varphi_1(e_{ij})\zeta(v_j).$$
The switching equivalence of two gain graphs can be defined in the following equivalent way:
Two gain graphs $\Phi_1=(G,\varphi_1)$ and $\Phi_2=(G,\varphi_2)$ are switching equivalent, if there exists a diagonal matrix $D_\zeta$ with diagonal entries from $\mathbb{T}$, such that
$$A(\Phi_2)=D_\zeta^{-1}A(\Phi_1)D_\zeta.$$
\end{definition}
\begin{definition}
A \textit{potential function} for $\varphi$ is a function $\psi:V\rightarrow\mathbb{T}$, such that for each edge $e_{ij}$, $\varphi(e_{ij})=\psi(v_i)^{-1}\psi(v_j).$
\end{definition}
\begin{theorem}\cite{Zas1}\label{Zas1}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain graph. Then the following statements are equivalent:
\begin{itemize}
\item[(i)] $\Phi$ is balanced,
\item[(ii)] $\Phi\sim(G,1)$,
\item[(iii)] $\varphi$ has a potential function.
\end{itemize}
\end{theorem}
The following necessary condition for switching equivalence is known.
\begin{theorem}\cite{Reff1}
\label{Th1}
Let $\Phi_1=(G,\varphi_1)$ and $\Phi_2=(G,\varphi_2)$ be $\mathbb{T}$-gain graphs. If $\Phi_1\sim\Phi_2$, then $\sigma(A(\Phi_1))=\sigma(A(\Phi_2))$.
\end{theorem}
In Section \ref{spec-bip}, we construct a counter example to show that the above necessary condition is not sufficient.
The next theorem gives a necessary condition for the for two $\mathbb{T}$-gain graphs to be switching equivalent.
\begin{theorem}\cite{Reff2}
\label{Th2}
Let $\Phi_1=(G,\varphi_1)$ and $\Phi_2=(G,\varphi_2)$ be two $\mathbb{T}$-gain graphs. If $\Phi_1\sim\Phi_2$ , then for any cycle $C$ in $G$, $\varphi_1(C)=\varphi_2(C)$ holds.
\end{theorem}
\begin{definition}
The \emph{characteristic polynomial} of a gain graph, denoted by $P_{\Phi}(x)$, is defined as $P_{\Phi}(x) = \det (x I - A(\Phi))= x^n+a_1x^{n-1}+\cdots+a_n$. The \emph{permanental polynomial} of a gain graph, denoted by $Q_{\Phi}(x)$, is defined as $Q_{\Phi}(x) =\text{per}(x I-A(\Phi))= x^n+b_1x^{n-1}+\cdots+b_n$. The characteristic and the permanental polynomials of the underlying graph $G$ is denoted by $P_{G}(x)$ and $Q_{G}(x)$, respectively. Some important and interesting properties of $P_G(x)$ and $Q_G(x)$ can be found in \cite{Cve1,Mer1}.
\end{definition}
\begin{definition}
A \textit{matching} in a graph $G$ is a set of edges such that no two of them have a vertex in common. {The number of edges in a matching is called the \emph{size of that matching}. The collection of all matchings of size $k$ in a graph $G$ is denoted by $\mathcal{M}_k(G)$. We define $m_k(G)=|\mathcal{M}_k(G)|$ with the convention that $m_0(G)=1$.} A matching is called \textit{maximal} if it is not contained in any other matching. The largest possible cardinality among all matchings is called the \textit{matching number} of $G$.\end{definition}
Let $\mathbb{C}^{n \times n}$ denote the set of all $n \times n$ matrices with complex entries. For a matrix $A = (a_{ij}) \in \mathbb{C}^{n \times n}$, define $|A| = (|a_{ij}|)$. Let $\rho(A)$ denote the spectral radius of the matrix $A$. The following results about nonnegative matrices will be useful in Section \ref{spec-bip}.
\begin{theorem}\cite[Theorem 8.1.18]{Horn}\label{Th0.3a}
Let $A, B\in \mathbb{C}^{n \times n}$ and suppose that $B$ is nonnegative. If $|A| \leq B$, then $\rho(A) \leq \rho(|A|) \leq \rho(B).$
\end{theorem}
\begin{theorem}\cite[Theorem 8.4.5]{Horn}\label{Th0.3}
Let $A, B\in \mathbb{C}^{n \times n}$. Suppose $A$ is nonnegative and irreducible, and $A\geq |B|$. Let $\lambda=e^{i \theta} \rho(B)$ be a given maximum-modulus eigenvalue of $B$. If $\rho(A)=\rho(B)$, then there is a diagonal unitary matrix $D \in \mathbb{C}^{n \times n}$ such that $B=e^{i\theta}DAD^{-1}$.\\
\end{theorem}
This article is organized as follows: In Section \ref{bounds}, we provide some results on the eigenvalue bounds for the adjacency matrix of $\mathbb{T}$-gain graphs. In Section \ref{coeff-char-per}, we study some of the properties of the coefficients of characteristic and permanental polynomials of gain adjacency matrices. In Section \ref{spec-bip}, we focus on the study of the spectral properties of $\mathbb{T}$-gain graphs. We establish an equivalent condition for the equality of set of eigenvalues of a $\mathbb{T}$-gain graph and that of the underlying graph. Finally, we give a characterization for the bipartite graphs in terms of the eigenvalues of the gains and the eigenvalues of the underlying graph.\\
\section{Eigenvalue bounds for $\mathbb{T}$-gain graphs}\label{bounds}
For any complex square matrix $B$, we use $\lambda(B)$ to denote its eigenvalues (or simply $\lambda$ when there is only one matrix under consideration). Let $B$ be a complex square matrix of order $n$ with real eigenvalues. We arrange the eigenvalues of $B$ as
$$\lambda_n\leq\lambda_{n-1}\leq\ldots\leq\lambda_2\leq\lambda_1.$$
We now recall some important results associated to complex square matrices having real eigenvalues.
\begin{theorem}\cite[Theorem 2.1]{Wolk}
\label{bound_thm1}
Let $B$ be an $n\times n$ complex matrix with real eigenvalues,
and let $$r=\frac{\text{trace } B}{n}\ \ \text{ and }\ \ s^2=\frac{\text{trace } B^2}{n}-r^2, $$
then
$$r-s(n-1)^\frac{1}{2}\leq\lambda_n\leq r-s/(n-1)^\frac{1}{2},~\mbox{and } $$
$$r+s/(n-1)^\frac{1}{2}\leq\lambda_1\leq r+s(n-1)^\frac{1}{2}.$$
\end{theorem}
\begin{theorem}\cite{Horn}
\label{bound_thm2}
Let $B$ be a Hermitian matrix of order n and let $B_r$ be any principal submatrix of $B$ of order $r$. Then for $1\leq k\leq r$,
$$\lambda_{n+k-r}(B)\leq\lambda_k(B_r)\leq\lambda_k(B).$$
\end{theorem}
It is well known that (\cite{Bap-book,Cve1}), the $(i,j)$-th entry of the $k$-th power adjacency matrix of a simple graph provide the number of $k$-walks (walks of length $k$) from the vertex $i$ to $j$. The next lemma is the counter part of the above statement for $\mathbb{T}$-gain graphs.
\begin{lemma}
\label{bound_lem1}
Let $\Phi$ be a $\mathbb{T}$-gain graph. Then the $(i,j)$-th entry $a_{ij}^{(k)}$ of $A(\Phi)^k$ is the sum of gains of all $k$-walks from the vertex $i$ to the vertex $j$.
\end{lemma}
\begin{proof}
Let $A(\Phi)^k=(a_{ij}^{(k)})$. Then $$a_{ij}^{(k)}=\sum a_{ii_1}a_{i_1i_2}\ldots a_{i_{k-2}i_{k-1}}a_{i_{k-1}j},$$ where $1\leq i_1,i_2,\ldots, i_{k-1}\leq n$. Now, any term in the right side is nonzero if and only if $v_i\sim v_{i_1}$, $v_{i_1}\sim v_{i_2}$, $\ldots$, $v_{i_{k-1}}\sim v_j$. Hence the result follows.
\end{proof}
Next, we derive lower and upper bounds for the largest and smallest eigenvalues of $\mathbb{T}$-gain graph in terms of the number of edges and vertices of the underlying graph. The following result is known for the adjacency matrices \cite{Bap-book}-- however, the proof technique the different from the adjacency matrix case.
\begin{theorem}
\label{key}
Let $\Phi$ be a $\mathbb{T}$-gain graph with the underlying graph $G$. If $G$ has $n$ vertices and $m$ edges, then the smallest and largest eigenvalues of $A(\Phi)$ satisfy
$$-\sqrt{\frac{2m(n-1)}{n}}\leq\lambda_n\leq-\sqrt{\frac{2m}{n(n-1)}},$$
and
$$\sqrt{\frac{2m}{n(n-1)}}\leq\lambda_1\leq\sqrt{\frac{2m(n-1)}{n}}.$$Both of the above bounds are tight.
\end{theorem}
\begin{proof}
Since the $i$-th diagonal entry of $A(\Phi)^2$ is $\sum_{v_i\sim v_k}\varphi(e_{ik})\varphi(e_{ki})=d_i$, we have
$r=\frac{\text{trace }A(\Phi)}{n}=0\ \ \text{and}\ \ s^2=\frac{\text{trace } A(\Phi)^2}{n}-r^2=\frac{2m}{n}.$
Thus, by Theorem \ref{bound_thm1}, we have
$$-\sqrt{\frac{2m(n-1)}{n}}\leq\lambda_n\leq-\sqrt{\frac{2m}{n(n-1)}},$$
and
$$\sqrt{\frac{2m}{n(n-1)}}\leq\lambda_1\leq\sqrt{\frac{2m(n-1)}{n}}.$$
Hence the proof is completed.
Let $ \Phi=(K_{n}, \varphi) $ be a $ \mathbb{T} $-gain graph with $ \varphi(e)=-1 $ for all edges, then both of the left equality occurs and if $ \varphi(e)=1 $ for all edges then both right equality attains.
\end{proof}
In the next theorem, we derive a lower bound for the largest eigenvalue of the $\mathbb{T}$-gain graph.
\begin{theorem}
\label{key}
Let $\Phi$ be a $\mathbb{T}$-gain graph with the underlying graph $G$. Then
$$\lambda_1\geq \sqrt[3]{\frac{6}{n}\sum_{C\in\mathcal{C}_3(G)}\mathfrak{R}(C)},$$
where $\mathcal{C}_3(G)$ denotes the collection of all cycles of length 3 in G.
\end{theorem}
\begin{proof}
If $\lambda_1$ is the largest eigenvalue of $A(\Phi)$, then $\lambda_1^3$ is the largest eigenvalue of $A(\Phi)^3$. So, we have $\lambda_1^3\geq\frac{1}{n}\text{ trace }A(\Phi)^3.$
By Lemma \ref{bound_lem1}, the $i$-th diagonal entry of $A(\Phi)^3$ is $ a_{ii}^{(3)}=\sum_{v_i\sim v_j\sim v_k\sim v_i}\varphi(e_{ij})\varphi(e_{jk})\varphi(e_{ki}) = 2\sum_{C\in \mathcal{C}_3(i)}\mathfrak{R}(C),$
where $\mathcal{C}_3(i)$ denotes collection of all triangles which contains the vertex $i$. Now, since each triangle contains 3 vertices, we have
$$ \lambda_1^3 \geq \frac{1}{n}\text{ trace }A(\Phi)^3 =\frac{1}{n}\sum_{i=1}^na_{ii}^{(3)}=\frac{6}{n}\sum_{C\in\mathcal{C}_3(G)}\mathfrak{R}(C).$$
The result follows by taking cube root on both sides.
\end{proof}
Next result gives a lower bound for the spectral radius of the $\mathbb{T}$-gain graph in terms of the degrees of its vertices. This extends \cite[Theorem 1]{ravi-kum} for the gain graphs.
\begin{theorem}
Let $\Phi$ be a $\mathbb{T}$-gain graph with the underlying graph $G$. Then
$$\sigma\geq\frac{1}{\sqrt{2}}\max_{i<j}\sqrt{d_i+d_j+\sqrt{(d_i-d_j)^2+4|a_{ij}^{(2)}|}},$$
where $\sigma=\max|\lambda_i|$ and $|a_{ij}^{(2)}|$ is defined as in Lemma \ref{bound_lem1}.
\end{theorem}
\begin{proof}
We have, $\sigma^2=\lambda_1(A(\Phi)^2)$. Let $A(\Phi)^2[i,j]=\left[\begin{array}{cc}
a_{ii}^{(2)}&a_{ij}^{(2)}\\a_{ji}^{(2)}&a_{jj}^{(2)}
\end{array}
\right]$ be a principal submatrix of $A(\Phi)^2$. Then, by Theorem \ref{bound_thm2}, we have
$\lambda_1(A(\Phi)^2)\geq\lambda_1(A(\Phi)^2[i,j]).$
By Lemma \ref{bound_lem1}, $a_{ii}^{(2)}=d_i$, $a_{jj}^{(2)}=d_j$ and
$\overline{a_{ji}^{(2)}}=a_{ij}^{(2)}=\sum_{v_i\sim v_k\sim v_j}\varphi(e_{ik})\varphi(e_{kj}).$
Thus, \begin{eqnarray*}
\lambda_1(A(\Phi)^2[i,j])&=&\frac{1}{2}\bigg{[}d_i+d_j+\sqrt{(d_i+d_j)^2-4(d_id_j-|a_{ij}^{(2)}|^2)}\bigg{]},\\
&=&\frac{1}{2}\bigg{[}d_i+d_j+\sqrt{(d_i-d_j)^2+4|a_{ij}^{(2)}|^2}\bigg{]}.
\end{eqnarray*}
Since the above relation holds for all $i\neq j$, we have
$$\sigma\geq\frac{1}{\sqrt{2}}\max_{i<j}\sqrt{d_i+d_j+\sqrt{(d_i-d_j)^2+4|a_{ij}^{(2)}|^2}}.$$
\end{proof}
\section{Characteristic and permanental polynomial of $\mathbb{T}$-gain graphs}\label{coeff-char-per}
In this section first we recall the some known definitions. Then, we compute the coefficients of the characteristic and permanental polynomials in terms of the gains of the edges.
\begin{definition}
Let $K_n$ denote the complete graph on $n$ vertices, and $K_{p,q}$ denote the complete bipartite graph on $p + q$ vertices with the vertex partition $V = V_1 \cup V_2 $, $|V_1| = p$ and $|V_2| = q$ . A graph $G$ is called an \textit{elementary graph}, if each of its component is either a $K_2$ or a cycle.
\end{definition}
Let $\mathcal{H}(G)$ denote the collection of all spanning elementary subgraphs of a graph $G$, and for any $H \in\mathcal{H}(G)$, let $\mathcal{C}(H)$ denote the collection of cycles in $H$. In \cite{Ger}, authors considered gain graphs with gains are taken from an arbitrary group. The following two results can be proved by taking the gains from the multiplicative group $\mathbb{T}$ in Corollary 2.3 and Theorem 2.2 of \cite{Ger}, respectively.
\begin{theorem}
\label{det1}
Let $\Phi$ be a $\mathbb{T}$-gain graph with the underlying graph $G$. Then
\begin{equation}
\det A(\Phi)=\sum_{H\in\mathcal{H}(G)}(-1)^{n-p(H)}2^{c(H)}\prod_{C\in \mathcal{C}(H)}\Re(C),
\end{equation}
where $p(H)$ is the number of components in $H$ and $c(H)$ is the number of cycles in $H$.
\end{theorem}
\begin{corollary}
\label{cor1}Let $\Phi$ be any $\mathbb{T}$-gain graph with the underlying graph $G$. Let $P_\Phi(x)=x^n+a_1x^{n-1}+\cdots+a_n$ be the characteristics polynomial of $\Phi$. Then
$$a_i=\sum_{H\in\mathcal{H}_i(G)}(-1)^{p(H)}2^{c(H)}\prod_{C\in \mathcal{C}(H)}\Re(C),$$
where $\mathcal{H}_i(G)$ is the set of elementary subgraphs of $G$ with $i$ vertices.
\end{corollary}
\begin{proof}
We have $$a_i=(-1)^i\sum i\times i \text{ principal minors}.$$
Now, the result follows from Theorem \ref{det1}.
\end{proof}
In the next theorem, we compute coefficients of the permanental polynomial of the gain graphs. This result is an extension of the well known Harary's theorem in the context of permanents to the $\mathbb{T}$-gain graphs. The proof is similar to that of \cite[Theorem 2.3.2]{Cve1} and \cite{har1}. For the sake of completeness we include a proof here.
\begin{theorem}
\label{per1}
Let $\Phi$ be a $\mathbb{T}$-gain graph with the underlying graph $G$. Then
\begin{equation}
\text{per } A(\Phi)=\sum_{H\in\mathcal{H}(G)}2^{c(H)}\prod_{C\in \mathcal{C}(H)}\Re(C),
\end{equation}
where $c(H)$ is the number of cycles in $H$.
\end{theorem}
\begin{proof}
We have \begin{equation}\label{det-form}\text{per }A(\Phi)=\sum_{\sigma\in S_n}b_\sigma,\end{equation}
where $b_\sigma=a_{1\sigma(1)}a_{2\sigma(2)}\ldots a_{n\sigma(n)}$, and $S_n$ denotes the collection of all permutations on the set $\{1,2,\ldots,n\}$. Since $a_{ii}=0$ for all $i=1,2,\ldots,n$, we have $b_{\sigma}\neq 0$ only if $v_i\sim v_{\sigma(i)}$ for all $i=1,2,\ldots,n$. Let $\gamma_1\gamma_2\ldots\gamma_r$ be the cycle decomposition of the permutation $\sigma$, where $\gamma_i$'s are disjoint cycles of length at least two. Thus, the decomposition of $\sigma$ determines an elementary spanning subgraph $H$ of $G$, whenever $b_\sigma\neq0$. Now, let us calculate the value of $b_\sigma$. For this we consider each $\gamma_i$'s in the decomposition of $\sigma$.
If $\gamma_i=(jk)$ is a transposition, then $a_{jk}$ and $a_{kj}$ occurs in the expression of $b_\sigma$. Also note that $a_{jk}a_{kj}=1$. If $\gamma_i=(i_1i_2\ldots i_k)$ is a $k$-cycle, then $b_\sigma$ contains $a_{i_1i_2}a_{i_2i_3}\ldots a_{i_ki_1}$. Let $C$ denote the cycle $v_{i_1}v_{i_2}\ldots v_{i_k}v_{i_1}$, then $a_{i_1i_2}a_{i_2i_3}\ldots a_{i_ki_1}=\varphi(C).$ By combining all these possibilities, we get $b_\sigma=\prod_{C\in\mathcal{C}(H)}\varphi(C).$ Let $\gamma_1,\gamma_2,\ldots,\gamma_s$ be the cycles of length at least $3$ in the decomposition of $\sigma$. Now if we replace any of $\gamma_1,\gamma_2,\ldots,\gamma_s$ by $\gamma_1^{-1},\gamma_2^{-1},\ldots,\gamma_s^{-1}$, then the sign of the obtained permutation is same that of $\sigma$. Let $\sigma'$ be the any of the $2^{c(H)}$ permutations, namely $\gamma_1^{\pm 1}\gamma_2^{\pm 1}\ldots\gamma_s^{\pm1}\gamma_{s+1}\ldots\gamma_r$, which have the same sign that of $\sigma$. Then the contribution of $b_{\sigma'}$ to the sum (\ref{det-form}) is $\prod_{C\in\mathcal{C}(H)}\varphi(C)^{\pm1}$.\\
Therefore,
\begin{eqnarray*}
\text{per } A(\Phi)&&=\sum_{H\in\mathcal{H}(G)}\prod_{C\in \mathcal{C}(H)}[\varphi(C)+\varphi(C)^{-1}],\\
&&=\sum_{H\in\mathcal{H}(G)}2^{c(H)}\prod_{C\in \mathcal{C}(H)}\Re(C).
\end{eqnarray*}
\end{proof}
The following result is an extension of the well known Sach's coefficient theorem (in the context of permanents) to the $\mathbb{T}$-gain graphs.
\begin{corollary}
\label{cor2}Let $\Phi$ be any $\mathbb{T}$-gain graph with the underlying graph $G$. Let $Q_\Phi(x)=x^n+b_1x^{n-1}+\cdots+b_n$ be the permanental polynomial of $\Phi$. Then
$$b_i= (-1)^i\sum_{H\in\mathcal{H}_i(G)}2^{c(H)}\prod_{C\in \mathcal{C}(H)}\Re(C),$$
where $\mathcal{H}_i(G)$ is the set of elementary subgraphs of $G$ with $i$ vertices.
\end{corollary}
Now, let us calculate the characteristic polynomial of certain $\mathbb{T}$-gain graph using the previous results. The following graph is a particular case of class of graphs known as windmill graphs \cite{gut-sci,ani-ranj-amc}.
\begin{example}[Star of triangles]{\rm
Let $S_m^\Delta$ denote the star with $m$ triangles, that is, the end vertices of $m$ copies of $K_2$ are joined to single vertex (see Figure \ref{fig3}). We label the vertices of $S_m^\Delta$ such that for each $ l\in \{1, \dots, m\}$, $v_1v_{2l}v_{2l+1}v_{1}$ denote a triangle in it. Let $\Phi=(S_m^\Delta,\varphi)$ be a $\mathbb{T}$-gain graph with
$\varphi(v_1v_{2l}v_{2l+1}v_{1})=e^{i\theta_l},$ for $1\leq l\leq m$. Let
$\alpha=2[\cos\theta_1+\cos\theta_2+\cdots+\cos\theta_l].$
Let $P_\Phi(x)=x^n+a_1x^{n-1}+\cdots+a_n$ be the characteristics polynomial of $\Phi$. {We have $a_1=0$, and we calculate $a_i$ for $1\leq i\leq 2m+1$. Since all the triangles in $S_m^\Delta$ shares the vertex $v_1$. Thus any elementary subgraph with even number (say $2l$) of vertices must be a matching of size $l$. A $l$-matching in $S_m^\Delta$ is either a set of $l$ edges of the form $v_{2i}v_{2i+1}$ or a set consisting of an edge of the form $v_1v_{2j}$ (or $v_1v_{2j+1}$) together with $l-1$ edges of the form $v_{2i}v_{2i+1}$, $j\neq i$.
Thus, by using Corollary \ref{cor1}, we have
\begin{eqnarray*}
a_{2l}&=&(-1)^lm_l(S_m^\Delta)\\&=&(-1)^l\bigg[\binom{m}{l}+2m\binom{m-1}{l-1}\bigg],
\end{eqnarray*}
where $m_l(S_m^\Delta)$ denote the number of $l$ matchings of $S_m^\Delta$.
On the other hand an elementary subgraph with odd number (say $2l+1$) of vertices must contain a triangle $v_1v_{2j}v_{2j+1}v_1$ and $l-1$ edges of the form $v_{2i}v_{2i+1}$, $j\neq i$. Therefore}
$a_{2l+1}=(-1)^l\binom{m-1}{l-1}\alpha.$
As a special case if $\alpha=0$, then $a_{2l+1}=0$ so in this case eigenvalues of $\Phi$ are symmetric about 0.
In a similar way, we can show that $b_{2l}=\binom{m}{l}+2m\binom{m-1}{l-1}
$ and $ b_{2l+1}=-\binom{m-1}{l-1}\alpha.$}
\end{example}
\begin{figure}
\caption{The graph $S^\Delta_4$.}
\label{fig3}
\end{figure}
\begin{example}{\rm
Let $G=K_4$ and $\varphi$ is taken in such a way that any three vertices $v_{i_1},v_{i_2},v_{i_3}$ with $i_1<i_2<i_3$ we have
$$\varphi(v_{i_1}v_{i_2}v_{i_3}v_{i_1})=e^{i\theta}.$$
Then for any cycle $C$ with three vertices we have $\mathfrak{R}(C)=\cos\theta$ and the gain of any cycle which can be written as a product of gains of two cycles of order three. The gains of the cycles of order four are
\begin{eqnarray*}
&&\varphi(v_{i_1}v_{i_2}v_{i_3}v_{i_4}v_{i_1})=\varphi(v_{i_1}v_{i_2}v_{i_3}v_{i_1})\varphi(v_{i_1}v_{i_3}v_{i_4}v_{i_1})=e^{2i\theta},\\
&&\varphi(v_{i_1}v_{i_2}v_{i_4}v_{i_3}v_{i_1})=\varphi(v_{i_1}v_{i_2}v_{i_4}v_{i_1})\varphi(v_{i_1}v_{i_4}v_{i_3}v_{i_1})=1, ~\mbox{and}\\
&&\varphi(v_{i_1}v_{i_3}v_{i_2}v_{i_4}v_{i_1})=\varphi(v_{i_1}v_{i_3}v_{i_2}v_{i_1})\varphi(v_{i_1}v_{i_2}v_{i_4}v_{i_1})=1.
\end{eqnarray*}
Therefore, the characteristic polynomial of $\Phi$ is
$$P_\Phi(x)=x^4-6x^2-8x\cos\theta+(1-4\cos^2\theta).$$}
\end{example}
Next, we study some of the relationships between the characteristic and permanental polynomials of the adjacency matrix of the $\mathbb{T}$-gain graph and that of the underlying graph $G$, respectively.
In the following theorem, we observe that, for a tree $G$, the characteristic (resp., the permanental) polynomial of the adjacency matrix and the characteristic polynomial (resp., the permanental) polynomial of any $\mathbb{T}$-gain graph $(G, \phi)$ are the same.
\begin{theorem}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain graph. If G is a tree, then
\begin{itemize}
\item[(i)]
$P_\Phi(x)=P_G(x)$, and
\item[(ii)]
$Q_\Phi(x)=Q_G(x)$.
\end{itemize}
\end{theorem}
\begin{proof}
The result follows from Corollary \ref{cor1} and Corollary \ref{cor2}.
\end{proof}
For a unicyclic graph $G$, we establish a relationship between the characteristic polynomial of the adjacency matrix of a graph $G$ and the characteristic polynomial of any $\mathbb{T}$-gain graph $(G, \phi)$ in terms of the length of the cycle and the matching number of the graph $G$.
\begin{theorem}\label{match-gain}
\label{unic}
Let $G$ be an unicyclic graph with the cycle $C$ of length $m$. If $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain graph such that $\varphi(C)=e^{i\theta}$. If $P_\Phi(x)$ and $P_G(x)$ denote the characteristic polynomial of $\Phi$ and $G$, respectively, then
$$P_\Phi(x)=P_G(x)+2(1-\cos \theta)\sum_{i=0}^k(-1)^i{m_i(G-C)}x^{n-m+2i},$$
where $k$ is the matching number of $G-C.$
\end{theorem}
\begin{proof}
Let $a_i(\Phi)$ and $a_i(G)$ denote the coefficients of $x^{n-i}$ in $P_\Phi(x)$ and $P_G(x)$, respectively. We have $a_i(\Phi)=a_i(G),$ for all $i<m$, and $a_m(\Phi)=a_i(G)+2(1-\cos\theta).$
Also note that, for $1\leq i\leq2k$,$$a_{m+i}(\Phi)=\begin{cases}a_{m+i}(G), &\text{if }i \text{ is odd},\\
a_{m+i}(G)+(-1)^{\frac{i}{2}}{m_\frac{i}{2}(G- C)}2(1-\cos\theta), &\text{if }i \text{ is even}.
\end{cases}$$
Again $G-C$ has matching number $k$ implies $G$ has no elementary subgraph of order greater than $m+2k$ which contains a cycle. Thus, for all $i>m+2k$, we have
$a_i(\Phi)=a_i(G).$
Therefore,
$$P_\Phi(x)=P_G(x)+2(1-\cos \theta)\sum_{i=0}^k(-1)^i{m_i(G-C)}x^{n-m+2i},$$
which completes the proof.
\end{proof}
Next theorem is a counterpart of Theorem \ref{match-gain} for the permanental polynomial of a graph.
\begin{theorem}
Let $G$ be an unicyclic graph with the cycle $C$ of length $m$. If $\Phi=(G,\varphi)$ is a $\mathbb{T}$-gain graph such that $\varphi(C)=e^{i\theta}$. If $Q_\Phi(x)$ and $Q_G(x)$ denote the permanental polynomial of $\Phi$ and $G$, respectively, then
$$Q_\Phi(x)=Q_G(x)+(-1)^{m+1}2(1-\cos \theta)\sum_{i=0}^k{m_i(G-C)}x^{n-m+2i},$$
where $k$ is the matching number of $G-C.$
\end{theorem}
\begin{proof}
Similar to the proof of Theorem \ref{unic}.
\end{proof}
In the following result, for a unicyclic graph $G$, we establish a relationship between the determinant (resp., permanent) of the adjacency matrix of a graph $G$ and the determinant (resp., permanent) of any $\mathbb{T}$-gain graph $(G, \phi)$.
\begin{corollary}
Let $G$ be unicyclic and $\Phi$ be any gain graph with the underlying graph $G$. Then
$$\det A(\Phi)=\begin{cases}
\det A(G),&\text{if }2k\neq n-m,\\
\det A(G)+(-1)^{k}2{m_k(G-C)}(1-\cos\theta),&\text{if }2k=n-m,
\end{cases}$$
and, similarly,
$$\text{per} A(\Phi)=\begin{cases}
\text{per} A(G),&\text{if }2k\neq n-m,\\
\text{per} A(G)+(-1)^{m+1}2{m_k(G-C)}(1-\cos\theta),&\text{if }2k=n-m.
\end{cases}$$
\end{corollary}
\section{Spectral properties of $\mathbb{T}$-gain graphs}\label{spec-bip}
In this section we study some of the spectral properties of the bipartite $\mathbb{T}$-gain graphs. First, let us establish that for a bipartite $\mathbb{T}$ gain graph $A(\Phi)$, the set of all eigenvalues $\sigma(A(\Phi))$ is symmetric about $0$. Proof of the unweighted case can be found in \cite{Bap-book}.
\begin{theorem}
If $G$ is bipartite $\mathbb{T}$-gain graph, then the eigenvalues of $A(\Phi)$ are symmetric about $0$.
\end{theorem}
\begin{proof}
Let $V=\{X,Y\}$ be the bipartition of the vertex set of $G$ such that $|X|=p.$ Let $\lambda$ be an eigenvalue of $A(\Phi)$ and $x=[x_1\ \cdots\ x_p\ x_{p+1}\ \cdots\ x_n]^T$ be a corresponding eigenvector. Then the vector $x'=[x_1\ \cdots\ x_p\ -x_{p+1}\ \cdots\ -x_n]^T$ is non-zero and satisfies
$A(\Phi)x'=-\lambda x'.$
Therefore, $-\lambda$ is also an eigenvalue of $A(\Phi)$.
\end{proof}
\begin{remark}
\label{rem1}{\rm
The converse of the above theorem need not be true for gain graphs. Consider the complete graph $K_3$ on three vertices with edge weights are equal to $i$. Then $$A(\Phi)=\left[\begin{array}{ccc}
0&i&i\\-i&0&i\\-i&-i&0
\end{array}
\right].$$
The eigenvalues of $A(\Phi)$ are $0,\ \pm\sqrt{3}$. But the underlying graph is not bipartite.}
\end{remark}
\begin{remark}
\label{rem2}{\rm
It is known that $r$-regular graph has the eigenvalue $r$. The example in Remark \ref{rem1} also shows that, $r$ may not be an eigenvalue for a $r$-regular $\mathbb{T}$-gain graph.}
\end{remark}
It is known that the eigenvalues of $A(G)$ are symmetric about $0$ if and only if $G$ is bipartite (see \cite{Cve1, Bap-book}). But Remark \ref{rem1} shows that this result need not true for $\mathbb{T}$-gain graphs. In the following theorem, we establish a sufficient condition for a $\mathbb{T}$-gain graph with eigenvalues are symmetric with respect to origin to be bipartite.
\begin{theorem}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain graph such that the eigenvalues of $A(\Phi)$ are symmetric about $0$. If
$$\sum_{C\in\mathcal{C}_i(G)}\mathfrak{R}(C)\neq 0,$$ where $\mathcal{C}_i(G)$ denotes the set of all cycles on $i$ vertices, then $G$ is bipartite.
\end{theorem}
\begin{proof}
It is sufficient to prove that $G$ does not have any odd cycles.
Let $P_\Phi(x)=x^n+a_1x^{n-1}+\cdots+a_n$ be the characteristics polynomial of $A(\Phi)$. Since, the eigenvalues of $A(\Phi)$ are symmetric about $0$, we have $a_i=0$ whenever $i$ is odd. Now, $a_3=-2\sum_{C\in\mathcal{C}_3(G)}\mathfrak{R}(C)=0,$ and by the assumption $\sum_{C\in\mathcal{C}_3(G)}\mathfrak{R}(C)\neq0.$ Thus $G$ does not have any triangle. Since $G$ does not contain $K_3$, the cycles with $5$ vertices are the only elementary subgraphs on $5$ vertices. A similar argument shows that $G$ does not have any cycles of length $5$. Proceeding in this way, we can prove $G$ does not contain any odd cycles.
\end{proof}
In the next theorem, we establish an upper bound for the largest eigenvalue of a bipartite graph.
\begin{theorem}
Let $\Phi$ be a $\mathbb{T}$-gain graph with the underlying graph G. If $G=K_{p,q}$, then $\lambda_1(A(\Phi))\leq \sqrt{pq}$. Equality holds if and only if $\Phi$ is balanced.
\end{theorem}
\begin{proof}
We have, $\sum\lambda_i^2=\Big{[}\sum\lambda_i\Big{]}^2-2\sum_{i\neq j}\lambda_i\lambda_j=2pq.$
Since, the eigenvalues of $A(\Phi)$ are symmetric about $0$, we get
$2\lambda_1^2\leq 2pq,\ \text{ and hence }\ \lambda_1\leq \sqrt{pq}.$
Now, let us prove the necessary and sufficient condition for the equality. If $\Phi$ is balanced, then $\sigma(A(\Phi))=\sigma(K_{p,q})$, and hence $\lambda_1(A(\Phi))=\sqrt{pq}.$
Conversely, let $\lambda_1(A(\Phi))=\sqrt{pq}$. Suppose that $\Phi$ is not balanced. Then there exists a smallest number $l\geq 2$ such that $\Phi$ contains cycles $C_1, C_2,\ldots,C_k$ of length $2l$ with $\varphi(C_i)\neq 1.$
Now computing coefficient of $x^{n-2l}$ in the characteristics polynomial of $\Phi$, we get
$$ a_{2l}=2k-2\sum_{i=1}^k\mathfrak{R}(C_i) \neq 0.$$
Thus $\lambda_2>0$, and hence $\lambda_1<\sqrt{pq}$. This contradicts the fact that $\lambda_1=\sqrt{pq}.$ Therefore $\Phi$ must be balanced.
\end{proof}
From Theorem \ref{Th1}, it is known that if two gain graphs are switching equivalent, then they have the same set of eigenvalues. The converse of this statement is not true in general, i.e., if the set of all eigenvalues of two $\mathbb{T}$-gain graphs {(with the same underlying graph $G$)} are the same, then they need not be switching equivalent. Consider $G$ as in the Figure \ref{Fig1}.
\begin{figure}
\caption{A graph with 5 vertices.}
\label{Fig1}
\end{figure}
The graph G contains two cycles, namely, $C_1=\{v_1,v_2,v_3,v_1\}$ and
$C_1=\{v_1,v_4,v_5,v_1\}$. We construct $\Phi_1$ and $\Phi_2$ so that
$$\begin{array}{cc}
\varphi_1(C_1)=i,&\ \varphi_1(C_2)=1\\ \varphi_2(C_1)=\frac{1+i\sqrt{3}}{2},&\ \varphi_2(C_2)=\varphi_2(C_1)=\frac{1+i\sqrt{3}}{2}.
\end{array}$$
Let $a_i(1)$ and $a_i(2)$ denote the coefficient of $x^{n-i}$ in the characteristics polynomial of $\Phi_1$ and $\Phi_2$, respectively. Then, by using Corollary \ref{cor1}, we have
\begin{eqnarray*}
&&a_1(1)=a_1(2)=0,\\
&&a_2(1)=a_2(2)=-6,\\
&&a_3(1)=a_3(2)=-2,\\
&&a_4(1)=a_4(2)=1,\\
&&a_5(1)=a_5(2)=2.
\end{eqnarray*}
Therefore $\sigma(A(\Phi_1))=\sigma(A(\Phi_2))$. But, by Theorem \ref{Th2}, $\Phi_1$ and $\Phi_2$ are not switching equivalent.
In the remaining part of this section, we study when the set of all eigenvalues or the spectral radius of $A(\Phi)$, for some $\Phi$, equals to the the set of all eigenvalues or the spectral radius of the underlying graph, respectively.
\begin{lemma}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain(connected) graph, then $ \rho(A(\Phi))\leq\rho(A(G)) $.
\end{lemma}
\begin{proof}
Since $ |A(\Phi)|\leq A(G) $, then, by Theorem \ref{Th0.3a}, we have $ \rho(A(\Phi))\leq \rho(A(G))$.
\end{proof}
In the next theorem, we establish an equivalent condition for $\rho(A(\Phi))=\rho(A(G))$.
\begin{theorem} \label{Th0.4}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain(connected) graph, then $\rho(A(\Phi))=\rho(A(G))$ if and only if either $\Phi$ or $ -\Phi $ is balanced.
\end{theorem}
\begin{proof}
If $\Phi$ or $ -\Phi $ is balanced, then $\rho(A(\Phi))=\rho(A(G))$. Conversely, suppose that $\rho(A(\Phi))=\rho(A(G))$. Let $\lambda_n\leq\lambda_{n-1}\leq \dots \leq\lambda_1$ be the eigenvalues of $A(\Phi)$. Since $A(\Phi)$ is Hermitian, either $\rho(A(\Phi))=\lambda_1$ or $\rho(A(\Phi))=-\lambda_n$.
Now we have the following two cases:\\
\textbf{Case 1:} Suppose that $\rho(A(\Phi))=\lambda_1$. Then, by Theorem \ref{Th0.3}, there is a diagonal unitary matrix $D\in \mathbb{C}^{n \times n}$ such that $A(\Phi)=DA(G)D^{-1}$. Hence $\Phi\sim (G,1)$.
Therefore, by Theorem \ref{Zas1}, $\Phi$ is balanced. \\
\textbf{Case 2:} If $\rho(A(\Phi))=-\lambda_n$, then $\lambda_n=e^{i \pi} \rho(A(\Phi))$. By Theorem \ref{Th0.3}, we have $A(\Phi)=e^{i \pi}DA(G)D^{-1}$, for some diagonal unitary matrix $D\in \mathbb{C}^{n \times n}$. Thus $A(-\Phi)= DA(G) D^{-1}$. Hence, $(-\Phi)\sim (G,1)$. Thus, $-\Phi$ is balanced.
\end{proof}
\begin{theorem}\label{lm0.5}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain(connected) graph. Then
\begin{enumerate}
\item[(i)] If $ G $ is bipartite, then whenever $ \Phi $ is balanced implies $-\Phi $ is balanced.
\item[(ii)] If $ \Phi $ is balanced implies $ -\Phi $ is balanced for some gain $ \Phi $, then the graph is bipartite.
\end{enumerate}
\end{theorem}
\begin{proof}
(i) Suppose $ G $ is bipartite and $ \Phi $ is balanced. Then due to the absence of odd cycles, $ -\Phi $ is balanced.
(ii) Let $ \Phi $ be a balanced cycle such that $-\Phi $ is balanced. Suppose that $ G $ is not bipartite.
Then, any odd cycle in $G$ can not be balanced with respect to $ -\Phi $, which contradicts the assumption. Thus $ G $ must be bipartite.
\end{proof}
In the next theorem, we answer the following problem: which gains adjacency matrices are cospectral to the adjacency matrix of underlying graph.
\begin{theorem}\label{Th0.6}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain(connected) graph. Then, $ \sigma(A(\Phi)) =\sigma(A(G))$ if and only if $ \Phi $ is balanced.
\end{theorem}
\begin{proof}
If $ \sigma(A(\Phi)) =\sigma(A(G))$, then $ \rho (A(\Phi))=\rho(A(G))$. Now, by Theorem \ref{Th0.4}, we have either $ \Phi $ or $ -\Phi $ is balanced. If $\Phi$ is balanced, then we are done. Suppose that $ -\Phi $ is balanced, then $ -A(G) $ and $ A(\Phi) $ have the same set of eigenvalues. Hence $ \sigma(A(G)) = \sigma(-A(G))$. Thus, we have $ G $ is bipartite. Therefore, by Theorem \ref{lm0.5}, $ \Phi $ is balanced.
\end{proof}
In the next theorem, we derive a characterization for bipartite graphs in terms gains.
\begin{theorem}\label{Th0.7}
Let $\Phi=(G,\varphi)$ be a $\mathbb{T}$-gain(connected) graph. Then, $ G $ is bipartite if and only if $ \rho(A(\Phi))=\rho(A(G))$ implies $\sigma(A(\Phi))=\sigma(A(G)) $ for every gain $ \varphi $.
\end{theorem}
\begin{proof}
Suppose $\rho(A(\Phi))=\rho(A(G))$ implies $\sigma(A(\Phi))=\sigma(A(G)) $ for any gain $ \varphi $. Let $ \Phi $ be balanced. We shall prove that $-\Phi$ is also balanced. By Theorem \ref{Th0.6}, we have $ \sigma(A(\Phi))=\sigma(A(G))$. Thus $ \rho(A(\Phi))=\rho(A(G))$. Also $ \rho(A(\Phi))=\rho(A(-\Phi)) $ implies $ \rho(A(-\Phi))=\rho(A(G)) $. Thus $\sigma(A(-\Phi))=\sigma(A(G))$, and hence, by Theorem \ref{Th0.6}, $ -\Phi$ is balanced. Now, by Theorem \ref{lm0.5}, $G$ is bipartite.
Conversely, let $ G $ be a bipartite graph, and $\Phi$ be such that $ \rho(A(\Phi))=\rho(A(G)) $. Now, by Theorem \ref{Th0.4} and Theorem \ref{lm0.5}, we have $ \Phi $ to be balanced. Hence $
\sigma(A(\Phi))=\sigma(A(G)) $.
\end{proof}
\section*{Acknowledgment}The authors are thankful to the referee for valuable comments and suggestions.
Ranjit Mehatari is funded by NPDF (File no.- PDF/2017/001312), SERB, India. M. Rajesh
Kannan thanks the Department of Science and Technology, India, for financial support through
the Early Carrier Research Award (ECR/2017/000643) . Aniruddha Samanta thanks University
Grants Commission(UGC) for financial support in the form of a junior research fellowship.
\end{document} |
\begin{document}
\title{Security Of Finite-Key-Length Measurement-Device-Independent Quantum
Key Distribution Using Arbitrary Number Of Decoys}
\author{H. F. Chau}
\thanks{email: \texttt{hfchau@hku.hk}}
\affiliation{Department of Physics, University of Hong Kong, Pokfulam Road,
Hong Kong}
\date{\today}
\begin{abstract}
In quantum key distribution, measurement-device-independent and decoy-state
techniques enable the two cooperative agents to establish a shared secret key
using imperfect measurement devices and weak Poissonian sources,
respectively.
Investigations so far are not comprehensive as they restrict to less than or
equal to four decoy states. Moreover, many of them involves pure numerical
studies.
Here I report a general security proof that works for any fixed number of
decoy states and any fixed raw key length.
The two key ideas involved here.
The first one is the repeated application of the inversion formula for
Vandermonde matrix to obtain various bounds on certain yields and error
rates.
The second one is the use of a recently proven generalization of the
McDiarmid inequality.
These techniques rise the best provably secure key rate of the
measurement-device-independent version of the BB84 scheme by at least
1.25~times and increase the workable distance between the two cooperative
agents from slightly less than 60~km to slightly greater than 130~km in case
there are $10^{10}$ photon pulse pair sent without a quantum repeater.
\end{abstract}
\maketitle
\section{Introduction}
\label{Sec:Intro}
Quantum key distribution (QKD\xspace) is the art for two trusted agents, commonly
refers to as Alice and Bob, to share a provably secure secret key by
preparing and measuring quantum states that are transmitted through a noisy
channel controlled by an eavesdropper Eve who has unlimited computational
power.
In realistic QKD\xspace setup, decoy-state technique allows Alice and Bob to obtain
their secret key using the much more practical weak phase-randomized
Poissonian sources~\cite{Wang05,LMC05}. In addition,
measurement-device-independent (MDI\xspace) method enables them to use imperfect
apparatus that may be controlled by Eve to perform measurement~\cite{LCQ12}.
Decoy-state technique has been extensively studied.
In fact, this technique can be applied to many different QKD\xspace
schemes~\cite{Wang05,LMC05,VV14,BMFBB16,DSB18}.
Researches on the effective use of a general number of decoys have been
conducted~\cite{Hayashi07,HN14,Chau18,CN20}.
The effect of finite raw key length on the key rate has been
investigated~\cite{HN14,LCWXZ14,BMFBB16,Chau18,CN20}.
Nonetheless, security and efficiency analyses on the combined use of
decoy-state and MDI\xspace techniques are less comprehensive. So far, they are
restricted to less than or equal to four decoy
states~\cite{MFR12,SGLL13,SGLL13e,CXCLTL14,XXL14,YZW15,ZYW16,ZZRM17,MZZZZW18,WBZJL19}.
Furthermore, it is unclear how to extend these methods analytically to an
arbitrary but fixed number of decoys.
Along a slightly different line, the case of finite raw key length for the
combined use of decoy-state and MDI\xspace techniques has been studied.
So far, these studies applied Azuma, Hoeffding and Sefling inequalities as
well as Chernoff bound in a straightforward
manner~\cite{CXCLTL14,YZW15,ZYW16,ZZRM17,MZZZZW18,WBZJL19}.
Here I report the security analysis and a key rate formula for the
BB84-based~\cite{BB84} MDI\xspace-QKD\xspace using passive partial Bell state detection
for finite raw key length with the condition that Alice and Bob each uses an
arbitrary but fixed number of decoys.
One of the key ideas in this work is the repeated use of the analytical
formula for the elements of the inverse of a Vandermonde matrix. A tight
bound on various yields and error rates for a general number of decoys can
then be obtained through this analytical formula. (Actually, Yuan
\emph{et al.} also used repeated Vandermonde matrix inversion to obtain upper
and lower bounds of the so-called two-single-photon yield in case one of the
photon intensities used is $0$~\cite{YZLM16}. Nevertheless, the bounds
reported here are more general and powerful than theirs.) The other key idea
used here is the application of a powerful generalization of the McDiarmid
inequality in mathematical statistics recently proven in Ref.~\cite{CN20}.
This inequality is effective to tackle finite size statistical fluctuation of
certain error rates involved in the key rate formula.
I compute the secure key rate for the MDI\xspace-version of the BB84 scheme using
the setup and channel studied by Zhou \emph{et al.} in Ref.~\cite{ZYW16}.
The best provably secure key rate for this setup before this work are
reported by Mao \emph{et al.} in Ref.~\cite{MZZZZW18}. Compared to their
work, in case the total number of photon pulse pair send by Alice and Bob is
$10^{10}$, the provably secure key rate using this new method is increased by
at least 125\%. Besides, the maximum transmission distance is increased from
slightly less than 60~km to slightly greater than 130~km.
This demonstrates the effectiveness of this new approach for MDI\xspace-QKD\xspace.
\section{The MDI\xspace-QKD\xspace Protocol}
\label{Sec:Protocol}
In this paper, the polarization of all photon pulses are prepared either in
${\mathtt X}\xspace$-basis with photon intensity $\mu_{{\mathtt X}\xspace,i}$ (for $i=1,2,\cdots,
k_{\mathtt X}\xspace$) or in ${\mathtt Z}\xspace$-basis with photon intensity $\mu_{{\mathtt Z}\xspace,i}$
(for $i=1,2,\cdots,k_{\mathtt Z}\xspace$). For simplicity, I label these photon
intensities in descending order by $\mu_{{\mathtt X}\xspace,1} > \mu_{{\mathtt X}\xspace,2} >
\cdots > \mu_{{\mathtt X}\xspace,k_{\mathtt X}\xspace} \ge 0$ and similarly for
$\mu_{{\mathtt Z}\xspace,i}$'s. I denote the probability of choosing the preparation
basis ${\mathtt B}\xspace \in \{ {\mathtt X}\xspace,{\mathtt Z}\xspace \}$ by $p_{\mathtt B}\xspace$ and the probability
of choosing photon intensity $\mu_{{\mathtt B}\xspace,i}$ given the preparation basis
${\mathtt B}\xspace$ by $p_{i\mid{\mathtt B}\xspace}$.
Here I study the following MDI\xspace-QKD\xspace protocol, which is a BB84-based scheme
originally studied in Refs.~\cite{LCQ12,CXCLTL14}.
\begin{enumerate}
\item Alice and Bob each has a phase-randomized Poissonian distributed
source. Each of them randomly and independently prepares a photon pulse and
sends it to the untrusted third party Charlie. They jot down the intensity
and polarization used for each pulse.
\label{Scheme:prepare}
\item Charlie performs a partial Bell state measurement like the ones in
Refs.~\cite{LCQ12,CXCLTL14,MR12}. He publicly announces the measurement
result including non-detection and inconclusive events.
\label{Scheme:measure}
\item Alice and Bob reveal the basis and intensity they used for each of
their prepared photon pulse. If the preparation bases of a pair of photon
pulses they have sent to Charlie for Bell basis measurement disagree, they
discard them. If both pulses are prepared in the ${\mathtt X}\xspace$-basis, they
reveal their preparation polarizations. They also randomly reveal the
preparation polarizations of a few pulses that they have both prepared in
the ${\mathtt Z}\xspace$-basis. In this way, they can estimate the various yields and
error rates to be defined in Sec.~\ref{Sec:Y_and_Q}.
\label{Scheme:estimate}
\item They use the preparation information of their remaining photon pulses
that have been conclusively measured by Charlie to generate their raw secret
keys and then perform error correction and privacy amplification on these
keys to obtain their final secret keys according to the MDI\xspace-QKD\xspace procedure
reported in Refs.~\cite{LCQ12,MR12}. (Here I assume that Alice and Bob use
forward reconciliation to establish the key. The case of reverse
reconciliation can be studied in a similar manner.)
\label{Scheme:post_process}
\end{enumerate}
\section{Bounds On Various Yields And Error Rates In The MDI\xspace-Setting}
\label{Sec:Y_and_Q}
I use the symbol $Q_{{\mathtt B}\xspace,i,j}$ to denote the yield given that both Alice
and Bob prepare their photons in ${\mathtt B}\xspace$-basis and that Alice (Bob) uses
photon intensity $\mu_{{\mathtt B}\xspace,i}$ ($\mu_{{\mathtt B}\xspace,j}$) for ${\mathtt B}\xspace =
{\mathtt X}\xspace, {\mathtt Z}\xspace$ and $i,j = 1,2,\cdots,k_{{\mathtt B}\xspace}$. More precisely, it is
the portion of photon pairs prepared using the above description that Charlie
declares conclusive detection. Furthermore, I define the error rate of these
photon pairs $E_{{\mathtt B}\xspace,i,j}$ as the portion of those conclusively detected
photons above whose prepared polarizations by Alice and Bob are the same.
And I set $\bar{E}_{{\mathtt B}\xspace,i,j} = 1 - E_{{\mathtt B}\xspace,i,j}$.
Similar to the case of standard (that is, non-MDI\xspace) implementation of QKD\xspace,
for phase randomized Poissonian photon sources~\cite{MR12},
\begin{equation}
Q_{{\mathtt B}\xspace,i,j} = \sum_{a,b=0}^{+\infty} \frac{\mu_{{\mathtt B}\xspace,i}^a
\mu_{{\mathtt B}\xspace,j}^b Y_{{\mathtt B}\xspace,a,b} \exp(-\mu_{{\mathtt B}\xspace,i})
\exp(-\mu_{{\mathtt B}\xspace,j})}{a!\ b!}
\label{E:Q_mu_def}
\end{equation}
and
\begin{align}
& Q_{{\mathtt B}\xspace,i,j} E_{{\mathtt B}\xspace,i,j} \nonumber \\
={} & \sum_{a,b=0}^{+\infty} \frac{\mu_{{\mathtt B}\xspace,i}^a \mu_{{\mathtt B}\xspace,j}^b
Y_{{\mathtt B}\xspace,a,b} e_{{\mathtt B}\xspace,a,b} \exp(-\mu_{{\mathtt B}\xspace,i})
\exp(-\mu_{{\mathtt B}\xspace,j})}{a!\ b!} .
\label{E:E_mu_def}
\end{align}
Here, $Y_{{\mathtt B}\xspace,a,b}$ is the probability of conclusive detection by Charlie
given that the photon pulses sent by Alice (Bob) contains $a$ ($b$)~photons
and $e_{{\mathtt B}\xspace,a,b}$ is the corresponding bit error rate of the raw key.
Furthermore, I denote the yield conditioned on Alice preparing a vacuum state
and Bob preparing in the ${\mathtt B}\xspace$-basis by the symbol
$Y_{{\mathtt B}\xspace,0,\star}$. Clearly, $Y_{{\mathtt B}\xspace,0,\star}$ obeys
\begin{equation}
Y_{{\mathtt B}\xspace,0,\star} = \sum_{j=1}^{k_{\mathtt B}\xspace} p_{j\mid{\mathtt B}\xspace}
\tilde{Y}_{{\mathtt B}\xspace,0,j} ,
\label{E:Y_0*_relation}
\end{equation}
where $\tilde{Y}_{{\mathtt B}\xspace,0,j}$ is the yield conditioned on Alice sending the
vacuum state and Bob sending photon with intensity $\mu_{{\mathtt B}\xspace,j}$ in the
${\mathtt B}\xspace$-basis.
I need to deduce the possible values of $Y_{{\mathtt B}\xspace,i,j}$'s and
$Y_{{\mathtt B}\xspace,i,j} e_{{\mathtt B}\xspace,i,j}$ from Eqs.~\eqref{E:Q_mu_def}
and~\eqref{E:E_mu_def}. One way to do it is to compute various lower and
upper bounds of $Y_{{\mathtt B}\xspace,i,j}$'s and $Y_{{\mathtt B}\xspace,i,j} e_{{\mathtt B}\xspace,i,j}$ by
brute force optimization of truncated versions of Eqs.~\eqref{E:Q_mu_def}
and~\eqref{E:E_mu_def} like the method reported in
Refs.~\cite{MFR12,CXCLTL14,XXL14}. However, this approach is rather
inelegant and ineffective.
Further note that Alice and Bob have no control on the values of
$Y_{{\mathtt B}\xspace,a,b}$'s and $e_{{\mathtt B}\xspace,a,b}$'s since Charlie and Eve are not
trustworthy. All they know is that these variables are between 0 and 1.
Fortunately, in the case of phase-randomized Poissonian distributed light
source, Corollaries~\ref{Cor:Y0*} and~\ref{Cor:bounds_on_Yxx} in the Appendix
can be used to bound $Y_{{\mathtt B}\xspace,0,\star}, Y_{{\mathtt B}\xspace,1,1}, Y_{{\mathtt B}\xspace,1,1}
e_{{\mathtt B}\xspace,1,1}$ and $Y_{{\mathtt B}\xspace,1,1} \bar{e}_{{\mathtt B}\xspace,1,1}$ analytically,
where $\bar{e}_{{\mathtt B}\xspace,1,1} \equiv 1 - e_{{\mathtt B}\xspace,1,1}$. More importantly,
these bounds are effective to analyze the key rate formula to be reported
in Sec.~\ref{Sec:Rate}.
Following the trick used in Refs.~\cite{Chau18,CN20}, by using the statistics
of either all the $k_{{\mathtt B}\xspace}$ different photon intensities or all but the
largest one used by Alice and Bob depending on the parity of $k_{{\mathtt B}\xspace}$,
Corollaries~\ref{Cor:Y0*} and~\ref{Cor:bounds_on_Yxx} imply the following
tight bounds
\begin{subequations}
\label{E:various_Y_and_e_bounds}
\begin{equation}
Y_{{\mathtt B}\xspace,0,\star} \ge \sum_{i,j=1}^{k_{\mathtt B}\xspace} p_{j\mid{\mathtt B}\xspace}
{\mathcal A}_{{\mathtt B}\xspace,0,i}^{\text{e}} Q_{{\mathtt B}\xspace,i,j} ,
\label{E:Y0*_bound}
\end{equation}
\begin{equation}
Y_{{\mathtt B}\xspace,1,1} \ge Y_{{\mathtt B}\xspace,1,1}^\downarrow \equiv
\sum_{i,j=1}^{k_{\mathtt B}\xspace} {\mathcal A}_{{\mathtt B}\xspace,1,i}^{\text{o}}
{\mathcal A}_{{\mathtt B}\xspace,1,j}^{\text{o}} Q_{{\mathtt B}\xspace,i,j} - C_{{\mathtt B}\xspace,2}^2 ,
\label{E:Y11_bound}
\end{equation}
\begin{equation}
Y_{{\mathtt B}\xspace,1,1} e_{{\mathtt B}\xspace,1,1} \le \left( Y_{{\mathtt B}\xspace,1,1} e_{{\mathtt B}\xspace,1,1}
\right)^\uparrow \equiv \sum_{i,j=1}^{k_{\mathtt B}\xspace}
{\mathcal A}_{{\mathtt B}\xspace,1,i}^{\text{e}} {\mathcal A}_{{\mathtt B}\xspace,1,j}^{\text{e}}
Q_{{\mathtt B}\xspace,i,j} E_{{\mathtt B}\xspace,i,j} ,
\label{E:Ye11_bound}
\end{equation}
\begin{align}
Y_{{\mathtt B}\xspace,1,1} e_{{\mathtt B}\xspace,1,1} &\ge{} \left( Y_{{\mathtt B}\xspace,1,1} e_{{\mathtt B}\xspace,1,1}
\right)^\downarrow \nonumber \\
&\equiv \sum_{i,j=1}^{k_{\mathtt B}\xspace} {\mathcal A}_{{\mathtt B}\xspace,1,i}^{\text{o}}
{\mathcal A}_{{\mathtt B}\xspace,1,j}^{\text{o}} Q_{{\mathtt B}\xspace,i,j} E_{{\mathtt B}\xspace,i,j} -
C_{{\mathtt B}\xspace,2}^2 ,
\label{E:Ye11_special_bound}
\end{align}
and
\begin{align}
Y_{{\mathtt B}\xspace,1,1} \bar{e}_{{\mathtt B}\xspace,1,1} &\ge \left( Y_{{\mathtt B}\xspace,1,1}
\bar{e}_{{\mathtt B}\xspace,1,1} \right)^\downarrow \nonumber \\
&\equiv \sum_{i,j=1}^{k_{\mathtt B}\xspace} {\mathcal A}_{{\mathtt B}\xspace,1,i}^{\text{o}}
{\mathcal A}_{{\mathtt B}\xspace,1,j}^{\text{o}} Q_{{\mathtt B}\xspace,i,j} \bar{E}_{{\mathtt B}\xspace,i,j} -
C_{{\mathtt B}\xspace,2}^2
\label{E:Y11bare1_bound}
\end{align}
\end{subequations}
for ${\mathtt B}\xspace = {\mathtt X}\xspace, {\mathtt Z}\xspace$.
(The reason for using ${\text{e}}$ and ${\text{o}}$ as superscripts is that it
will be self-evident from the discussion below that for fixed ${\mathtt B}\xspace$ and
$j$, there are even number of non-zero terms in
${\mathcal A}^{\text{e}}_{{\mathtt B}\xspace,j,i}$ and odd number of non-zero terms in
${\mathcal A}^{\text{o}}_{{\mathtt B}\xspace,j,i}$.)
For the above inequalities, in case $k_{\mathtt B}\xspace$ is even, then
\begin{subequations}
\label{E:various_A_ai}
\begin{equation}
{\mathcal A}_{{\mathtt B}\xspace,j,i}^{\text{e}} = A_j(\mu_{{\mathtt B}\xspace,i},\{ \mu_{{\mathtt B}\xspace,1},
\mu_{{\mathtt B}\xspace,2}, \cdots, \mu_{{\mathtt B}\xspace,i-1}, \mu_{{\mathtt B}\xspace,i+1},\cdots,
\mu_{{\mathtt B}\xspace,k_{\mathtt B}\xspace} \})
\label{E:A_xi_even_def_for_even}
\end{equation}
for $i=1,2,\cdots,k_{\mathtt B}\xspace$ and $j=0,1$. Furthermore,
\begin{equation}
{\mathcal A}_{{\mathtt B}\xspace,1,1}^{\text{o}} = 0
\label{E:A_11_odd_def_for_even}
\end{equation}
and
\begin{equation}
{\mathcal A}_{{\mathtt B}\xspace,1,i}^{\text{o}} = A_1(\mu_{{\mathtt B}\xspace,i},\{ \mu_{{\mathtt B}\xspace,2},
\mu_{{\mathtt B}\xspace,3},\cdots, \mu_{{\mathtt B}\xspace,i-1}, \mu_{{\mathtt B}\xspace,i+1},\cdots,
\mu_{{\mathtt B}\xspace,k_{\mathtt B}\xspace} \})
\label{E:A_1i_odd_def_for_even}
\end{equation}
for $i=2,3,\cdots,k_{\mathtt B}\xspace$. In addition,
\begin{widetext}
\begin{equation}
C_{{\mathtt B}\xspace,2} = \left( \sum_{\ell=2}^{k_{\mathtt B}\xspace} \mu_{{\mathtt B}\xspace,2}
\mu_{{\mathtt B}\xspace,3} \cdots \mu_{{\mathtt B}\xspace,\ell-1} \mu_{{\mathtt B}\xspace,\ell+1} \cdots
\mu_{{\mathtt B}\xspace,k_{\mathtt B}\xspace} \right) \sum_{i=2}^{k_{\mathtt B}\xspace} \left\{
\frac{1}{\mu_{{\mathtt B}\xspace,i} \prod_{t\ne 1,i} (\mu_{{\mathtt B}\xspace,i} -
\mu_{{\mathtt B}\xspace,t})} \left[ \exp(\mu_{{\mathtt B}\xspace,i}) - \sum_{j=0}^{k_{\mathtt B}\xspace-2}
\frac{\mu_{{\mathtt B}\xspace,i}^j}{j!} \right] \right\} .
\label{E:C2_def_even}
\end{equation}
\end{widetext}
Here I use the convention that the term involving $1/\mu_{{\mathtt B}\xspace,i}$ in the
above summards with dummy index $i$ is equal to $0$ if $\mu_{{\mathtt B}\xspace,i} = 0$.
Whereas in case $k_{\mathtt B}\xspace$ is odd, then
\begin{equation}
{\mathcal A}_{{\mathtt B}\xspace,1,i}^{\text{o}} = A_1(\mu_{{\mathtt B}\xspace,i},\{ \mu_{{\mathtt B}\xspace,1},
\mu_{{\mathtt B}\xspace,2}, \cdots, \mu_{{\mathtt B}\xspace,i-1}, \mu_{{\mathtt B}\xspace,i+1},\cdots,
\mu_{{\mathtt B}\xspace,k_{\mathtt B}\xspace} \})
\label{E:A_1i_odd_def_for_odd}
\end{equation}
for $i=1,2,\cdots,k_{\mathtt B}\xspace$. Furthermore,
\begin{equation}
{\mathcal A}_{{\mathtt B}\xspace,j,1}^{\text{e}} = 0
\label{E:A_x1_even_def_for_odd}
\end{equation}
and
\begin{equation}
{\mathcal A}_{{\mathtt B}\xspace,j,i}^{\text{e}} = A_j(\mu_{{\mathtt B}\xspace,i},\{ \mu_{{\mathtt B}\xspace,2},
\mu_{{\mathtt B}\xspace,3},\cdots, \mu_{{\mathtt B}\xspace,i-1}, \mu_{{\mathtt B}\xspace,i+1},\cdots,
\mu_{{\mathtt B}\xspace,k_{\mathtt B}\xspace} \})
\label{E:A_xi_even_def_for_odd}
\end{equation}
for $j=1,2$ and $i=2,3,\cdots,k_{\mathtt B}\xspace$. In addition,
\begin{widetext}
\begin{equation}
C_{{\mathtt B}\xspace,2} = \left( \sum_{\ell=1}^{k_{\mathtt B}\xspace} \mu_{{\mathtt B}\xspace,1}
\mu_{{\mathtt B}\xspace,2} \cdots \mu_{{\mathtt B}\xspace,\ell-1} \mu_{{\mathtt B}\xspace,\ell+1} \cdots
\mu_{{\mathtt B}\xspace,k_{\mathtt B}\xspace} \right) \sum_{i=1}^{k_{\mathtt B}\xspace} \left\{
\frac{1}{\mu_{{\mathtt B}\xspace,i} \prod_{t\ne i} (\mu_{{\mathtt B}\xspace,i} - \mu_{{\mathtt B}\xspace,t})}
\left[ \exp(\mu_{{\mathtt B}\xspace,i}) - \sum_{j=0}^{k_{\mathtt B}\xspace-1}
\frac{\mu_{{\mathtt B}\xspace,i}^j}{j!} \right] \right\} .
\label{E:C2_def_odd}
\end{equation}
\end{widetext}
\end{subequations}
Note that in Eq.~\eqref{E:various_A_ai},
\begin{subequations}
\label{E:generators_def}
\begin{equation}
A_0(\mu,S) = \frac{\displaystyle -\exp(\mu) \prod_{s\in S}
s}{\displaystyle \prod_{s\in S} (\mu - s)}
\label{E:A_00_template}
\end{equation}
and
\begin{equation}
A_1(\mu,S) = \frac{\displaystyle -\exp(\mu) \sum_{s\in S} \left(
\prod_{\substack{s'\in S \\ s' \ne s}} s' \right)}{\displaystyle
\prod_{s\in S} (\mu - s)} .
\label{E:A_11_template}
\end{equation}
\end{subequations}
Note that different upper and lower bounds for $Y_{{\mathtt B}\xspace,1,1}$ have
been obtained using similar Vandermonde matrix inversion technique in
Ref.~\cite{YZLM16}. The differences between those bounds and the actual
value of $Y_{{\mathtt B}\xspace,1,1}$ depend on the yields $Y_{{\mathtt B}\xspace,i,j}$ with $i,j
\ge 1$. In contrast, the difference between the bound in
Inequalities~\eqref{E:Y11_bound} and the actual value of $Y_{{\mathtt B}\xspace,1,1}$
depend on $Y_{{\mathtt B}\xspace,i,j}$ with $i,j\ge k_{\mathtt B}\xspace$. Thus,
Inequality~\eqref{E:Y11_bound} and similarly also
Inequality~\eqref{E:Ye11_bound} give more accurate estimates of
$Y_{{\mathtt B}\xspace,1,1}$ and $Y_{{\mathtt B}\xspace,1,1} e_{{\mathtt B}\xspace,1,1}$, respectively.
Furthermore, the bounds in Ref.~\cite{YZLM16} also work for the case of
$\mu_{{\mathtt B}\xspace,k_{{\mathtt B}\xspace}} = 0$. It is also not clear how to extend their
method to bound for yields other than the two-single-photon events that are
needed in computing the key rate for twin-field~\cite{LYDS18} and
phase-matching~\cite{MZZ18} MDI\xspace-QKD\xspace{s}.
\section{The Key Rate Formula}
\label{Sec:Rate}
The secure key rate $R$ is defined as the number of bits of secret key shared
by Alice and Bob at the end of the protocol divided by the number of photon
pulse pairs they have sent to Charlie. In fact, the derivation of the key
rate formula in Refs.~\cite{LCWXZ14,TL17,Chau18,CN20} for the case of
standard QKD\xspace can be easily modified to the case of MDI\xspace-QKD\xspace by making the
following correspondences.
(See also the key rate formula used in Ref.~\cite{MR12} for MDI\xspace-QKD\xspace.)
The vacuum event in the standard QKD\xspace is mapped to the event that both Alice
and Bob send a vacuum photon pulse to Charlie. The single photon event is
mapped to the event that both Alice and Bob send a single photon to Charlie.
The multiple photon event is mapped to the event that Alice and Bob are both
sending neither a vacuum nor a single photon pulse to Charlie. In the case
of forward reconciliation, the result is
\begin{widetext}
\begin{equation}
R \ge p_{\mathtt Z}\xspace^2 \left\{ \langle \exp(-\mu) \rangle_{{\mathtt Z}\xspace}
Y_{{\mathtt Z}\xspace,0,\star} + \langle \mu \exp(-\mu) \rangle_{{\mathtt Z}\xspace}^2
Y_{{\mathtt Z}\xspace,1,1} [1-H_2(e_p)] - \Lambda_\text{EC} - \frac{\langle
Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j}}{\ell_\text{raw}} \left[ 6\log_2 \left(
\frac{\chi}{\epsilon_\text{sec}} \right) + \log_2 \left(
\frac{2}{\epsilon_\text{cor}} \right) \right] \right\} ,
\label{E:key_rate_basic}
\end{equation}
\end{widetext}
where $\langle f(\mu)\rangle_{{\mathtt Z}\xspace} \equiv \sum_{i=1}^{k_{\mathtt Z}\xspace}
p_{i\mid{\mathtt Z}\xspace} f(\mu_{{\mathtt Z}\xspace,i})$, $\langle f({\mathtt Z}\xspace,i,j) \rangle_{i,j}
\equiv \sum_{i,j=1}^{k_{\mathtt Z}\xspace} p_{i\mid{\mathtt Z}\xspace} p_{j\mid{\mathtt Z}\xspace}
f({\mathtt Z}\xspace,i,j)$, $H_2(x) \equiv -x \log_2 x - (1-x) \log_2 (1-x)$ is the
binary entropy function, $e_p$ is the phase error rate of the single photon
events in the raw key, and $\Lambda_\text{EC}$ is the actual number of bits
of information that leaks to Eve as Alice and Bob perform error correction on
their raw bits. It is given by
\begin{equation}
\Lambda_\text{EC} = f_\text{EC} \langle Q_{{\mathtt Z}\xspace,i,j} H_2(E_{{\mathtt Z}\xspace,i,j})
\rangle_{i,j}
\label{E:information_leakage}
\end{equation}
where $f_\text{EC} \ge 1$ measures the inefficiency of the error-correcting
code used. In addition, $\ell_\text{raw}$ is the raw sifted key length
measured in bits, $\epsilon_\text{cor}$ is the upper bound of the probability
that the final keys shared between Alice and Bob are different, and
$\epsilon_\text{sec} = (1- p_\text{abort}) \| \rho_\text{AE} - U_\text{A}
\otimes \rho_\text{E} \|_1 / 2$.
Here $p_\text{abort}$ is the chance that the scheme aborts without generating
a key, $\rho_\text{AE}$ is the classical-quantum state describing the joint
state of Alice and Eve, $U_\text{A}$ is the uniform mixture of all the
possible raw keys created by Alice, $\rho_\text{E}$ is the reduced density
matrix of Eve, and $\| \cdot \|_1$ is the trace
norm~\cite{Renner05,KGR05,RGK05}.
In other words, Eve has at most $\epsilon_\text{sec}$~bits of information on
the final secret key shared by Alice and Bob.
(In the literature, this is often referred to it as a
$\epsilon_\text{cor}$-correct and $\epsilon_\text{sec}$-secure QKD\xspace
scheme~\cite{TL17}.)
Last but not least, $\chi$ is a QKD\xspace scheme specific factor which depends on
the detailed security analysis used.
In general, $\chi$ may also depend on other factors used in the QKD\xspace scheme
such as the number of photon intensities $k_{{\mathtt X}\xspace}$ and
$k_{{\mathtt Z}\xspace}$~\cite{LCWXZ14,Chau18,CN20}.
In Inequality~\eqref{E:key_rate_basic}, the phase error of the raw key $e_p$
obeys~\cite{Chau18,FMC10}
\begin{widetext}
\begin{equation}
e_p \le e_{{\mathtt X}\xspace,1,1} + \bar{\gamma} \left(
\frac{\epsilon_\text{sec}}{\chi}, e_{{\mathtt X}\xspace,1,1}, \frac{s_{\mathtt X}\xspace
Y_{{\mathtt X}\xspace,1,1} \langle \mu \exp(-\mu) \rangle_{{\mathtt X}\xspace}^2}{\langle
Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}}, \frac{s_{\mathtt Z}\xspace Y_{{\mathtt Z}\xspace,1,1} \langle \mu
\exp(-\mu) \rangle_{{\mathtt Z}\xspace}^2}{\langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j}}
\right)
\label{E:e_p_bound}
\end{equation}
\end{widetext}
with probability at least $1-\epsilon_\text{sec}/\chi$, where $\langle f(\mu)
\rangle_{\mathtt X}\xspace \equiv \sum_{i=1}^{k_{\mathtt X}\xspace} p_{i\mid{\mathtt X}\xspace}
f(\mu_{{\mathtt X}\xspace,i})$,
\begin{equation}
\bar{\gamma}(a,b,c,d) \equiv \sqrt{\frac{(c+d)(1-b)b}{c d} \ \ln \left[
\frac{c+d}{2\pi c d (1-b)b a^2} \right]} ,
\label{E:gamma_def}
\end{equation}
and $s_{\mathtt B}\xspace$ is the number of bits that are prepared and measured in
${\mathtt B}\xspace$ basis.
Clearly, $s_{\mathtt Z}\xspace = \ell_\text{raw}$ and $s_{\mathtt X}\xspace \approx p_{\mathtt X}\xspace^2
s_{\mathtt Z}\xspace \langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j} / (p_{\mathtt Z}\xspace^2 \langle
Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j})$.
I also remark that $\bar{\gamma}$ becomes complex if $a,c,d$ are too large.
This is because in this case no $e_p \ge e_{{\mathtt X}\xspace,1,1}$ exists with failure
probability $a$. In this work, all parameters are carefully picked so that
$\bar{\gamma}$ is real.
There are two ways to proceed. The most general way is to directly find a
lower bound for $Y_{{\mathtt Z}\xspace,1,1}$.
Specifically, by substituting Inequalities~\eqref{E:Y0*_bound},
\eqref{E:Y11_bound} and~\eqref{E:e_p_bound} into
Inequality~\eqref{E:key_rate_basic}, I obtain the following lower bound of
the key rate
\begin{align}
R &\ge \sum_{i,j=1}^{k_{\mathtt Z}\xspace} {\mathcal B}_{{\mathtt Z}\xspace,i,j} Q_{{\mathtt Z}\xspace,i,j} -
p_{\mathtt Z}\xspace^2 \left\{ \vphantom{\frac{\chi}{\epsilon_\text{sec}}} \langle \mu
\exp(-\mu) \rangle_{\mathtt Z}\xspace^2 C_{{\mathtt Z}\xspace,2}^2 [1 - H_2(e_p)] \right.
\nonumber \\
&\quad \left. + \Lambda_\text{EC} + \frac{\langle Q_{{\mathtt Z}\xspace,i,j}
\rangle_{i,j}}{\ell_\text{raw}} \left[ 6\log_2 \left(
\frac{\chi}{\epsilon_\text{sec}} \right) + \log_2 \left(
\frac{2}{\epsilon_\text{cor}} \right) \right] \right\} ,
\label{E:key_rate_asym}
\end{align}
where
\begin{align}
{\mathcal B}_{{\mathtt Z}\xspace,i,j} &= p_{\mathtt Z}\xspace^2 \left\{ \langle \exp(-\mu)
\rangle_{{\mathtt Z}\xspace} {\mathcal A}_{{\mathtt Z}\xspace,0,i}^{\text{e}} p_{j\mid{\mathtt Z}\xspace} \right.
\nonumber \\
&\quad \left. + \langle \mu \exp(-\mu) \rangle_{{\mathtt Z}\xspace}^2
{\mathcal A}_{{\mathtt Z}\xspace,1,i}^{\text{o}} {\mathcal A}_{{\mathtt Z}\xspace,1,j}^{\text{o}} [1-H_2(e_p)]
\right\} .
\label{E:b_n_def}
\end{align}
Here I would like to point out that unlike the corresponding key rate
formulae for standard QKD\xspace in Refs.~\cite{Hayashi07,LCWXZ14,Chau18,CN20}, a
distinctive feature of the key rate formula for MDI\xspace-QKD\xspace in
Eq.~\eqref{E:key_rate_asym} is the presence of the $C_{{\mathtt Z}\xspace,2}^2$ term.
From Eq.~\eqref{E:various_A_ai}, provided that $\mu_{{\mathtt Z}\xspace,i} -
\mu_{{\mathtt Z}\xspace,i+1}$ are all greater than a fixed positive number, the value of
$C_{{\mathtt Z}\xspace,2}^2$ decreases with $k_{\mathtt Z}\xspace$. This is the reason why the
MDI\xspace version of a QKD\xspace scheme may require more decoys to attain a key rate
comparable to the corresponding standard QKD\xspace scheme.
There is an alternative way to obtain the key rate formula discovered by
Zhou \emph{et al.}~\cite{ZYW16} that works for BB84~\cite{BB84} and the
six-state scheme~\cite{B98}. Suppose the photon pulses prepared by Alice and
Bob in Step~\ref{Scheme:prepare} of the MDI\xspace-QKD\xspace protocol in
Sec.~\ref{Sec:Protocol} both contain a single photon. Suppose further that
they are prepared in the same basis. Then, from Charlie and Eve's point of
view, this two-single-photon state are the same irrespective of their
preparation basis. Consequently, $Y_{{\mathtt X}\xspace,1,1} = Y_{{\mathtt Z}\xspace,1,1}$ (even
though $e_{{\mathtt X}\xspace,1,1}$ need not equal $e_{{\mathtt Z}\xspace,1,1}$). That is to say,
the secure key rate in Inequality~\eqref{E:key_rate_basic} also holds if
$Y_{{\mathtt Z}\xspace,1,1}$ there is replaced by $Y_{{\mathtt X}\xspace,1,1}$. (Here I stress
that the key generation basis is still ${\mathtt Z}\xspace$. But as $Y_{{\mathtt X}\xspace,1,1} =
Y_{{\mathtt Z}\xspace,1,1}$, I could use the bound on $Y_{{\mathtt X}\xspace,1,1}$ to obtain an
alternative key rate formula for the same MDI\xspace-QKD\xspace scheme.)
Following the same procedure above, I get
\begin{align}
R &\ge \sum_{i,j=1}^{k_{\mathtt Z}\xspace} {\mathcal B}'_{{\mathtt Z}\xspace,i,j} Q_{{\mathtt Z}\xspace,i,j} +
\sum_{i,j=1}^{k_{\mathtt X}\xspace} {\mathcal B}_{{\mathtt X}\xspace,i,j} Q_{{\mathtt X}\xspace,i,j}
\nonumber \\
&\quad - p_{\mathtt Z}\xspace^2 \left\{ \vphantom{\frac{\chi}{\epsilon_\text{sec}}}
\langle \mu \exp(-\mu) \rangle_{\mathtt Z}\xspace^2 C_{{\mathtt X}\xspace,2}^2 [1 - H_2(e_p)] +
\Lambda_\text{EC} \right. \nonumber \\
&\quad \left. + \frac{\langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j}}{\ell_\text{raw}}
\left[ 6\log_2 \left( \frac{\chi}{\epsilon_\text{sec}} \right) + \log_2
\left( \frac{2}{\epsilon_\text{cor}} \right) \right] \right\} ,
\label{E:key_rate_asym_alt}
\end{align}
where
\begin{subequations}
\begin{equation}
{\mathcal B}'_{{\mathtt Z}\xspace,i,j} = p_{\mathtt Z}\xspace^2 \langle \exp(-\mu)
\rangle_{{\mathtt Z}\xspace} {\mathcal A}_{{\mathtt Z}\xspace,0,i}^{\text{e}} p_{j\mid{\mathtt Z}\xspace}
\label{E:bprime_n_def}
\end{equation}
and
\begin{equation}
{\mathcal B}_{{\mathtt X}\xspace,i,j} = p_{\mathtt Z}\xspace^2 \langle \mu \exp(-\mu)
\rangle_{{\mathtt Z}\xspace}^2 {\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{o}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{o}} [1-H_2(e_p)] .
\label{E:bX_n_def}
\end{equation}
\end{subequations}
\section{Treatments Of Phase Error And Statistical Fluctuation Due To Finite
Raw Key Length On The Secure Key Rate}
\label{Sec:Finite_Size}
In order to compute the lower bound on the key rate $R$ in
Inequalities~\eqref{E:key_rate_asym} and~\eqref{E:key_rate_asym_alt}, I need
to know the value of $e_{{\mathtt X}\xspace,1,1}$ through the
Inequality~\eqref{E:e_p_bound}. More importantly, I need to take into
consideration the effects of finite raw key length on the key rate $R$ due to
the statistical fluctuations in $e_{{\mathtt X}\xspace,1,1}$ and $Q_{{\mathtt Z}\xspace,i,j}$'s.
Here I do so by means of a McDiarmid-type of inequality in statistics first
proven in Refs.~\cite{McDiarmid,McDiarmid1} and recently extended in
Ref.~\cite{CN20}.
Fluctuation of the first term in the R.H.S. of
Inequality~\eqref{E:key_rate_asym}
due to finite raw key length can be handled by Hoeffding inequality for
hypergeometrically distributed random
variables~\cite{Hoeffding,LCWXZ14,Chau18,CN20}, which is a special case of
the McDiarmid inequality. Using the technique reported in
Refs.~\cite{Chau18,CN20}, the first term in the R.H.S. of
Inequality~\eqref{E:key_rate_asym} can be regarded as a sum of $s_{\mathtt Z}\xspace$
hypergeometrically distributed random variables each taking on values from
the set $\{ \langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j} {\mathcal B}_{{\mathtt Z}\xspace,i,j}
/ (p_{i\mid{\mathtt Z}\xspace} p_{j\mid{\mathtt Z}\xspace}) \}_{i,j=1}^{k_{\mathtt Z}\xspace}$. Using
Hoeffding inequality for hypergeometrically distributed random
variables~\cite{Hoeffding}, I conclude that the measured value of $\sum_{i,j}
{\mathcal B}_{ij} Q_{{\mathtt Z}\xspace,i,j}$ minus its actual value is greater than
$\langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j} \left[ \frac{\ln
(\chi/\epsilon_\text{sec})}{2 s_{\mathtt Z}\xspace} \right]^{1/2} \Width \left( \left\{
\frac{{\mathcal B}_{{\mathtt Z}\xspace,i,j}}{p_{i\mid{\mathtt Z}\xspace} p_{j\mid{\mathtt Z}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt Z}\xspace} \right)$ with probability at most
$\epsilon_\text{sec} / \chi$, where $\Width$ of a finite set of real numbers
$S$ is defined as $\max S - \min S$.
The value of $e_{{\mathtt X}\xspace,1,1}$ in the finite sampling size situation is more
involved. Here I adapt the recent results in Ref.~\cite{CN20} to give four
upper bounds on $e_{{\mathtt X}\xspace,1,1}$. Surely, I pick the best upper bound out
of these four in the key rate analysis.
The first step is to use the equality
\begin{subequations}
\label{E:e_Z11_identities}
\begin{align}
e_{{\mathtt X}\xspace,1,1} &= \frac{Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}}{Y_{{\mathtt X}\xspace,1,1}}
\label{E:e_Z11_identity1} \\
&= \frac{Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}}{Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} +
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1}} .
\label{E:e_Z11_identity2}
\end{align}
\end{subequations}
To get the first two upper bounds of $e_{{\mathtt X}\xspace,1,1}$, I follow
Ref.~\cite{CN20} by using Inequalities~\eqref{E:Y11_bound},
\eqref{E:Ye11_bound} and~\eqref{E:Y11bare1_bound} together with applying
Hoeffding inequality for hypergeometrically distributed random variables to
study the statistical fluctuations of $\sum_{i,j=1}^{k_{\mathtt X}\xspace}
{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}} {\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}}
Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j}$, $\sum_{i,j=1}^{k_{\mathtt X}\xspace}
{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{o}} {\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{o}}
Q_{{\mathtt X}\xspace,i,j}$ and $\sum_{i,j=1}^{k_{\mathtt X}\xspace}
{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{o}} {\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{o}}
Q_{{\mathtt X}\xspace,i,j} \bar{E}_{{\mathtt X}\xspace,i,j}$. The result is
\begin{equation}
e_{{\mathtt X}\xspace,1,1} \le \frac{\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}
\right)^\uparrow + \Delta Y_{{\mathtt X}\xspace,1,1}
e_{{\mathtt X}\xspace,1,1}}{Y_{{\mathtt X}\xspace,1,1}^\downarrow - \Delta Y_{{\mathtt X}\xspace,1,1}}
\label{E:e_Z11_bound1}
\end{equation}
and
\begin{align}
e_{{\mathtt X}\xspace,1,1} &\le \left[ \left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}
\right)^\uparrow + \Delta Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right] \left[
\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right)^\uparrow \right. \nonumber \\
&\quad \left. + \left( Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1}
\right)^\downarrow + \Delta Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} - \Delta
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} \right]^{-1}
\label{E:e_Z11_bound2}
\end{align}
each with probability at least $1-2\epsilon_\text{sec}/\chi$, where
\begin{subequations}
\label{E:finite-size_Ye_s}
\begin{align}
\Delta Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} &= \left[ \frac{\langle
Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j} \langle Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j}
\rangle_{i,j} \ln (\chi/\epsilon_\text{sec})}{2 s_{\mathtt X}\xspace} \right]^{1/2}
\times \nonumber \\
&\qquad \Width \left( \left\{ \frac{{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt X}\xspace} \right) ,
\label{E:finite-size_Ye}
\end{align}
\begin{align}
\Delta Y_{{\mathtt X}\xspace,1,1} &= \langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j} \left[
\frac{\ln (\chi/\epsilon_\text{sec})}{2 s_{\mathtt X}\xspace} \right]^{1/2} \times
\nonumber \\
&\qquad \Width \left( \left\{ \frac{{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{o}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{o}}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt X}\xspace} \right)
\label{E:finite-size_Y}
\end{align}
and
\begin{align}
\Delta Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} &= \left[
\frac{\langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j} \langle Q_{{\mathtt X}\xspace,i,j}
\bar{E}_{{\mathtt X}\xspace,i,j} \rangle_{i,j} \ln (\chi/\epsilon_\text{sec})}{2
s_{\mathtt X}\xspace} \right]^{1/2} \times \nonumber \\
&\qquad \Width \left( \left\{ \frac{{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{o}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{o}}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt X}\xspace} \right) .
\label{E:finite-size_Yebar}
\end{align}
\end{subequations}
Note that in the above equations, $\langle f({\mathtt X}\xspace,i,j) \rangle_{i,j}
\equiv \sum_{i,j=1}^{k_{\mathtt X}\xspace} p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}
f({\mathtt X}\xspace,i,j)$.
Both the third and the fourth bounds of $e_{{\mathtt X}\xspace,1,1}$ use
Eq.~\eqref{E:e_Z11_identity2}, Inequality~\eqref{E:Y11bare1_bound} and the
modified McDiarmid inequality in Ref.~\cite{CN20}. For the third one, the
result is
\begin{align}
e_{{\mathtt X}\xspace,1,1} &\le \frac{\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}
\right)^\uparrow}{\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right)^\uparrow +
\left( Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} \right)^\downarrow - \Delta
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1}} \nonumber \\
&\quad + \Delta e_{{\mathtt X}\xspace,1,1}
\label{E:e_Z11_bound3}
\end{align}
with probability at least $1 - 2\epsilon_\text{sec} / \chi$, where
\begin{widetext}
\begin{align}
& \Delta e_{{\mathtt X}\xspace,1,1} \nonumber \\
={} & \left[ \frac{\langle Q_{{\mathtt X}\xspace,i,j}
\rangle_{i,j} \langle Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j} \rangle_{i,j} \ln
(\chi/\epsilon_\text{sec})}{2 s_{\mathtt X}\xspace} \right]^{1/2} \left[ \left(
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} \right)^\downarrow - \Delta
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} \right] \Width \left( \left\{
\frac{{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt X}\xspace} \right) \nonumber \\
& \quad \times \left[ \left( Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1}
\right)^\downarrow - \Delta Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} + \left(
Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right)^\downarrow \left( 1 -
\frac{\langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}}{s_{\mathtt X}\xspace \langle
Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j} \rangle_{i,j}} \right) + \frac{\langle
Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}^2}{s_{\mathtt X}\xspace^2 \langle Q_{{\mathtt X}\xspace,i,j}
E_{{\mathtt X}\xspace,i,j} \rangle_{i,j}} \max_{i,j=1}^{k_{\mathtt X}\xspace} \left\{
\frac{{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\} \right]^{-1} \nonumber
\displaybreak[1]
\\
& \quad \times \left[ \left( Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1}
\right)^\downarrow - \Delta Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} + \left(
Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right)^\downarrow \left( 1 -
\frac{\langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}}{s_{\mathtt X}\xspace \langle
Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j} \rangle_{i,j}} \right) + \frac{\langle
Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}^2}{s_{\mathtt X}\xspace^2 \langle Q_{{\mathtt X}\xspace,i,j}
E_{{\mathtt X}\xspace,i,j} \rangle_{i,j}} \min_{i,j=1}^{k_{\mathtt X}\xspace} \left\{
\frac{{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\} \right]^{-1} .
\label{E:finite-size_e_Z11_shift3}
\end{align}
And the fourth bound is
\begin{equation}
e_{{\mathtt X}\xspace,1,1} \le \frac{\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}
\right)^\uparrow}{\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right)^\uparrow +
\left( Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} \right)^\downarrow - \Delta
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1}} + \hat{r} \left[ \frac{\ln
(\chi/\epsilon_\text{sec})}{2} \right]^{1/2}
\label{E:e_Z11_bound4}
\end{equation}
with probability at least $1 - 3\epsilon_\text{sec} / \chi$, where
\begin{align}
\hat{r}^2 &\approx y^2 \sum_{m=1}^{k_{\mathtt X}\xspace^2} \frac{1}{w^{(m)}-x} \left( -
\frac{1}{y+(t-\sum_{i<m} n^{(i)}+1)x+ \min {\mathcal W} + \sum_{i<m}
n^{(i)} w^{(i)} + \mu[ w^{(m)}-x]} \right. \nonumber \\
& \qquad - \frac{1}{y+(t-\sum_{i<m} n^{(i)}+1)x+\max {\mathcal W} +
\sum_{i<m} n^{(i)} w^{(i)} + \mu[ w^{(m)}-x]} \nonumber \\
& \qquad \left. \left. + \frac{2}{\Width({\mathcal W})} \ln \left\{
\frac{y+(t-\sum_{i<m} n^{(i)}+1)x+\max {\mathcal W} + \sum_{i<m}
n^{(i)} w^{(i)} + \mu [w^{(m)}-x]}{y+(t-\sum_{i<m} n^{(i)}+1)x+\min
{\mathcal W} + \sum_{i<m} n^{(i)} w^{(i)} + \mu [w^{(m)}-x]} \right\}
\right) \right|_{\mu = 0}^{n^{(m)}} .
\label{E:finite-size_e_Z11_shift4}
\end{align}
\end{widetext}
In the above equation,
\begin{subequations}
\begin{equation}
y = \left( Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} \right)^\downarrow - \Delta
Y_{{\mathtt X}\xspace,1,1} \bar{e}_{{\mathtt X}\xspace,1,1} ,
\label{E:y_def_shift4}
\end{equation}
\begin{equation}
t \approx \frac{s_{\mathtt X}\xspace \langle Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j}
\rangle_{i,j}}{\langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}} ,
\label{E:t_def_shift4}
\end{equation}
\begin{equation}
x = \frac{\left( Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1} \right)^\downarrow - \Delta
Y_{{\mathtt X}\xspace,1,1} e_{{\mathtt X}\xspace,1,1}}{t}
\label{E:x_def_shift4}
\end{equation}
and
\begin{equation}
{\mathcal W} = \left\{ \frac{\langle Q_{{\mathtt X}\xspace,i',j'} \rangle_{i',j'}
{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}}
{\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}}}{s_{\mathtt X}\xspace p_{i\mid{\mathtt X}\xspace}
p_{j\mid{\mathtt X}\xspace}} \right\}_{i,j=1}^{k_{\mathtt X}\xspace}
\label{E:W_def_shift4}
\end{equation}
\end{subequations}
Last but not least, I need to define $w^{(m)}$ and $n^{(m)}$.
Recall that by following the analysis in Ref.~\cite{CN20}, there is a one-one
correspondence between a random variable in ${\mathcal W}$ taking the value
of $\langle Q_{{\mathtt X}\xspace,i',j'} \rangle_{i',j'}
{\mathcal A}_{{\mathtt X}\xspace,1,i}^{\text{e}} {\mathcal A}_{{\mathtt X}\xspace,1,j}^{\text{e}} /
(s_{\mathtt X}\xspace p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace})$ and an event that a photon
pulse pair is prepared by Alice (Bob) using intensity $\mu_{{\mathtt X}\xspace,i}$
($\mu_{{\mathtt X}\xspace,j}$) both in basis ${\mathtt X}\xspace$ and that the Bell basis
measurement result announced by Charlie is inconsistent with the photon states
prepared by Alice and Bob.
Now let us arrange the $k_{\mathtt X}\xspace^2$ elements in the set ${\mathcal W}$ are
arranged in descending order as $\{ w^{(1)}, w^{(2)}, \cdots,
w^{(k_{\mathtt X}\xspace^2)} \}$. Then, $n^{(i)}$ is the number of Bell basis
measurement events that corresponds to the value of $w^{(i)} \in
{\mathcal W}$.
There is an important subtlety that requires attention. In almost all cases
of interest, each summard in Eq.~\eqref{E:finite-size_e_Z11_shift4} consists
of three terms. The first two are positive and the third one is negative.
The sum of the first two terms almost exactly equal to the magnitude of the
third term. Hence, truncation error is very serious if one directly use
Eq.~\eqref{E:finite-size_e_Z11_shift4} to numerically compute $\hat{r}$.
The solution is to expand each term in powers of $1/D_m$ and/or $1/E_m$
defined below. This gives
\begin{align}
& \hat{r}^2 \nonumber \\
\approx{}& \frac{y^2 \Width({\mathcal W})^2}{3} \sum_{m=1}^{k_{\mathtt X}\xspace^2}
\frac{n^{(m)}}{D_m E_m} \left( \frac{1}{D_m^2} + \frac{1}{D_m E_m} +
\frac{1}{E_m^2} \right) ,
\label{E:e_Z11_bound4_approx}
\end{align}
where
\begin{subequations}
\begin{equation}
D_m = y+ (t - \sum_{i<m} n^{(i)} + 1)x + \min {\mathcal W} + \sum_{i<m}
n^{(i)} w^{(i)}
\label{E:D_m_def}
\end{equation}
and
\begin{align}
E_m &= y+ (t - \sum_{i<m} n^{(i)} + 1)x + \min {\mathcal W} + \sum_{i<m}
n^{(i)} w^{(i)} \nonumber \\
&\quad + n^{(m)} (w^{(m)} - x) \nonumber \\
&= y+ (t - \sum_{i\le m} n^{(i)} + 1)x + \min {\mathcal W} + \sum_{i\le m}
n^{(i)} w^{(i)} .
\label{E:E_m_def}
\end{align}
\end{subequations}
(Note that only the leading term is kept in
Eq.~\eqref{E:e_Z11_bound4_approx}. This is acceptable because the next order
term is of order of about $1/100$ that of the leading term in all cases of
practical interest.)
With all the above discussions, to summarize, the secure key rate $R$ of this
$\epsilon_\text{cor}$-correct and $\epsilon_\text{sec}$-secure QKD\xspace scheme in
the finite raw key length situation is lower-bounded by
\begin{widetext}
\begin{subequations}
\begin{align}
R &\ge \sum_{i,j=1}^{k_{\mathtt Z}\xspace} {\mathcal B}_{{\mathtt Z}\xspace,i,j} Q_{{\mathtt Z}\xspace,i,j} -
\langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j} \left[ \frac{\ln (\chi /
\epsilon_\text{sec})}{2s_{\mathtt Z}\xspace} \right]^{1/2} \Width \left( \left\{
\frac{{\mathcal B}_{{\mathtt Z}\xspace,i,j}}{p_{i\mid{\mathtt Z}\xspace} p_{j\mid{\mathtt Z}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt Z}\xspace} \right) - p_{\mathtt Z}\xspace^2 \left\{
\vphantom{\frac{\epsilon_\text{sec}}{s_{\mathtt Z}\xspace}}
\langle \mu \exp(-\mu) \rangle_{\mathtt Z}\xspace^2 C_{{\mathtt Z}\xspace,2}^2 [ 1 - H_2(e_p) ]
\right. \nonumber \\
&\quad \left. + f_\text{EC} \langle Q_{{\mathtt Z}\xspace,i,j} H_s(E_{{\mathtt Z}\xspace,i,j})
\rangle_{i,j} + \frac{\langle Q_{{\mathtt Z}\xspace,i,j}
\rangle_{i,j}}{\ell_\text{raw}} \left[ 6\log_2 \left(
\frac{\chi}{\epsilon_\text{sec}} \right) + \log_2 \left(
\frac{2}{\epsilon_\text{cor}} \right) \right] \right\} .
\label{E:key_rate_finite-size}
\end{align}
and
\begin{align}
R &\ge \sum_{i,j=1}^{k_{\mathtt Z}\xspace} {\mathcal B}'_{{\mathtt Z}\xspace,i,j} Q_{{\mathtt Z}\xspace,i,j} +
\sum_{i,j=1}^{k_{\mathtt X}\xspace} {\mathcal B}_{{\mathtt X}\xspace,i,j} Q_{{\mathtt X}\xspace,i,j} -
\langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j} \left[ \frac{\ln (\chi /
\epsilon_\text{sec})}{2s_{\mathtt Z}\xspace} \right]^{1/2} \Width \left( \left\{
\frac{{\mathcal B}'_{{\mathtt Z}\xspace,i,j}}{p_{i\mid{\mathtt Z}\xspace} p_{j\mid{\mathtt Z}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt Z}\xspace} \right) - \langle Q_{{\mathtt X}\xspace,i,j} \rangle_{i,j}
\times \nonumber \\
&\qquad \left[ \frac{\ln (\chi / \epsilon_\text{sec})}{2 s_{\mathtt X}\xspace}
\right]^{1/2} \Width \left( \left\{
\frac{{\mathcal B}_{{\mathtt X}\xspace,i,j}}{p_{i\mid{\mathtt X}\xspace} p_{j\mid{\mathtt X}\xspace}}
\right\}_{i,j=1}^{k_{\mathtt X}\xspace} \right) - p_{\mathtt Z}\xspace^2 \left\{
\vphantom{\frac{\epsilon_\text{sec}}{s_{\mathtt Z}\xspace}}
\langle \mu \exp(-\mu) \rangle_{\mathtt Z}\xspace^2
C_{{\mathtt X}\xspace,2}^2 [ 1 - H_2(e_p) ] + f_\text{EC} \langle Q_{{\mathtt Z}\xspace,i,j}
H_s(E_{{\mathtt Z}\xspace,i,j}) \rangle_{i,j} \right. \nonumber \\
&\qquad \left. + \frac{\langle Q_{{\mathtt Z}\xspace,i,j}
\rangle_{i,j}}{\ell_\text{raw}} \left[ 6\log_2 \left(
\frac{\chi}{\epsilon_\text{sec}} \right) + \log_2 \left(
\frac{2}{\epsilon_\text{cor}} \right) \right] \right\} .
\label{E:key_rate_finite-size_alt}
\end{align}
\end{subequations}
\end{widetext}
I remark that the R.H.S. of the above inequalities implicitly depends on
$e_{{\mathtt X}\xspace,1,1}$ whose upper bound obeys
Inequalities~\eqref{E:e_Z11_bound1}, \eqref{E:e_Z11_bound2},
\eqref{E:e_Z11_bound3}, \eqref{E:e_Z11_bound4}
and~\eqref{E:e_Z11_bound4_approx}. Furthermore, when using the key rate in
Inequality~\eqref{E:key_rate_finite-size}, $\chi = 9 = 4+1+4$ for the first
three inequalities concerning $e_{{\mathtt X}\xspace,1,1}$ and $\chi = 10$ for the last
inequality concerning $e_{{\mathtt X}\xspace,1,1}$~\cite{CN20}. While using the key
rate in Inequality~\eqref{E:key_rate_finite-size_alt} instead of
Inequality~\eqref{E:key_rate_finite-size}, $\chi = 9, 10, 10, 11$ for
Methods~A, B, C and D, respectively. (For reason for $\chi$ to increase by
1 except for Method~A by switching the rate formula from
Inequality~\eqref{E:key_rate_finite-size} to
Inequality~\eqref{E:key_rate_finite-size_alt} is due to the inclusion of the
finite-size statistical fluctuations of the lower bound on $Y_{{\mathtt X}\xspace,1,1}$.)
Compare with the corresponding key rate formula for standard QKD\xspace scheme, the
most noticeable difference is the presence of additional terms and factors
involving $C_{{\mathtt B}\xspace,2}^2$ which tend to lower the key rate. Fortunately,
$C_{{\mathtt B}\xspace,2}$ roughly scale as $\mu_{{\mathtt B}\xspace,1}^{k_{\mathtt B}\xspace}$ so that in
practice, these terms and factors are negligible if $k_{\mathtt B}\xspace \gtrsim 2$ to
$3$. Finally, I remark that the in the limit of $s_{\mathtt Z}\xspace \to +\infty$, the
key rate formulae in Inequalities~\eqref{E:key_rate_finite-size}
and~\eqref{E:key_rate_finite-size_alt} are tight in the sense that these
lower bound are reachable although the condition for attaining them is highly
unlikely to occur in realistic channels.
\section{Performance Analysis}
\label{Sec:Performance}
To study the key rate, I use the channel model reported by Ma and
Razavi in Ref.~\cite{MR12}, which I called the MR\xspace channel. For this
channel,
\begin{subequations}
\label{E:Channel_Model}
\begin{equation}
Q_{{\mathtt X}\xspace,i,j} = 2\beta_{ij}^2 [1+2\beta_{ij}^2-4\beta_{ij}
I_0(\alpha_{{\mathtt X}\xspace ij}) +I_0(2\alpha_{{\mathtt X}\xspace ij})] ,
\label{E:Channel_Q_Xij}
\end{equation}
\begin{equation}
Q_{{\mathtt X}\xspace,i,j} E_{{\mathtt X}\xspace,i,j} = e_0 Q_{{\mathtt X}\xspace,i,j} - 2(e_0 - e_d)
\beta_{ij}^2 [I_0(2\alpha_{{\mathtt X}\xspace ij}) - 1] ,
\label{E:Channel_QE_Xij}
\end{equation}
\begin{equation}
Q_{{\mathtt Z}\xspace,i,j} = Q^{(E)}_{ij} + Q^{(C)}_{ij}
\label{E:Channel_Q_Zij}
\end{equation}
and
\begin{equation}
Q_{{\mathtt Z}\xspace,i,j} E_{{\mathtt Z}\xspace,i,j} = e_d Q^{(C)}_{ij} + (1-e_d) Q^{(E)}_{ij} ,
\label{E:Channel_QE_Zij}
\end{equation}
where $I_0(\cdot)$ is the modified Bessel function of the first kind,
\begin{equation}
\alpha_{{\mathtt B}\xspace ij} = \frac{\sqrt{\eta_\text{A} \mu_{{\mathtt B}\xspace,i} \eta_\text{B}
\mu_{{\mathtt B}\xspace,i}}}{2} ,
\label{E:Channel_alpha_def}
\end{equation}
\begin{equation}
\beta_{ij} = (1-p_d) \exp \left( -\frac{\eta_\text{A} \mu_{{\mathtt X}\xspace,i} +
\eta_\text{B} \mu_{{\mathtt X}\xspace,j}}{4} \right) ,
\label{E:Channel_beta_def}
\end{equation}
\begin{equation}
e_0 = \frac{1}{2} ,
\label{E:Channel_e0_def}
\end{equation}
\begin{align}
Q^{(C)}_{ij} &= 2(1-p_d)^2 \exp \left( -\frac{\eta_\text{A} \mu_{{\mathtt Z}\xspace,i} +
\eta_\text{B} \mu_{{\mathtt Z}\xspace,j}}{2} \right) \times \nonumber \\
& \qquad \left[ 1 - (1-p_d) \exp \left( - \frac{\eta_\text{A}
\mu_{{\mathtt Z}\xspace,i}}{2} \right) \right] \times
\nonumber \\
& \qquad \left[ 1 - (1-p_d) \exp \left( - \frac{\eta_\text{B}
\mu_{{\mathtt Z}\xspace,j}}{2} \right) \right]
\label{E:Channel_QC_def}
\end{align}
and
\begin{align}
Q^{(E)}_{ij} &= 2p_d(1-p_d)^2 \exp \left( - \frac{\eta_\text{A}
\mu_{{\mathtt Z}\xspace,i} + \eta_\text{B} \mu_{{\mathtt Z}\xspace,j}}{2} \right) \times \nonumber
\\
& \qquad \left[ I_0(2\alpha_{{\mathtt Z}\xspace ij}) - (1-p_d) \exp \left( -
\frac{\eta_\text{A} \mu_{{\mathtt Z}\xspace,i} + \eta_\text{B} \mu_{{\mathtt Z}\xspace,j}}{2}
\right) \right] .
\label{E:Channel_QE_def}
\end{align}
Here $e_d$ is the misalignment probability, $p_d$ is the dark count rate per
detector. Moreover, $\eta_\text{A}$ ($\eta_\text{B}$) is transmittance of
the channel between Alice (Bob) and Charlie. They are given by
\begin{equation}
\eta_\text{A} = \eta_d 10^{-\eta_\text{att} L_\text{A} / 10}
\label{E:Channel_eta_d_def}
\end{equation}
\end{subequations}
and similarly for $\eta_\text{B}$, where $L_\text{A}$ is the length of the
fiber connecting Alice to Charlie, $\eta_d$ is the detection efficiency of
a detector, and $\eta_\text{att}$ is the transmission fiber loss constant.
I remark that the MR\xspace channel model assumes that the partial Bell state
measurement is performed using linear optics with idea beam and/or
polarization beam splitters. It also assumes that all photon detectors are
identical and that the dead time is ignored. Moreover, this channel does not
consider the use of quantum repeater.
\begin{figure}
\caption{\label{F:Mao_cmp}
\label{F:Mao_cmp}
\end{figure}
The state-of-the-art key rate formula for decoy-state MDI\xspace-QKD\xspace with finite
raw key length is the one by Mao \emph{et al.} in Ref.~\cite{MZZZZW18}, which
extended an earlier result by Zhou \emph{et al.} in Ref.~\cite{ZYW16}. (Note
that even higher key rates have been reported by Xu
\emph{et al.}~\cite{XXL14} and Zhou \emph{et al.}~\cite{ZYW16}. Note however
that the first work applied brute force optimization as well as the Chernoff
bound on a much longer raw key. Its effectiveness in handling short raw key
length situation is not apparent. Whereas the second work assumed that the
statistical fluctuation is Gaussian distributed which is not justified in
terms of unconditional security.)
To compare with the provably secure key rate reported in
Ref.~\cite{MZZZZW18}, I use their settings by putting $e_d = 1.5\%$, $p_d =
6.02\times 10^{-6}$, $\eta_\text{att} = 0.2$~db/km, $\eta_d = 14.5\%$,
$f_\text{EC} = 1.16$, and $L_\text{A} = L_\text{B} = L/2$ where $L$ is the
transmission distance between Alice and Bob.
For the security parameters, I follow Ref.~\cite{MZZZZW18} by setting
$\epsilon_\text{sec} / \chi = 10^{-10}$ although a more meaningful way is to
set $\epsilon_\text{sec}$ divided by the length of the final key to a fixed
small number~\cite{LCWXZ14}. Whereas for $\epsilon_\text{cor}$, its value
has not been specified in Refs.~\cite{MZZZZW18}. Fortunately,
Inequality~\eqref{E:key_rate_finite-size} implies that the provably secure
key rate does not sensitively dependent on $\epsilon_\text{cor}$. Here I
simply set it to $10^{-10}$.
Fig.~\ref{F:Mao_cmp} compares the key rates when the total number of photon
pulse pairs prepared by Alice and Bob, $N_t \approx \ell_\text{raw} /
(p_{\mathtt Z}\xspace^2 \langle Q_{{\mathtt Z}\xspace,i,j} \rangle_{i,j})$ is set to $10^{10}$.
For each of the curves, the number of photon intensities $k_{\mathtt X}\xspace$
($k_{\mathtt Z}\xspace)$ used for ${\mathtt X}\xspace$ (${\mathtt Z}\xspace$) are fixed. The smallest photon
intensities $\mu_{{\mathtt X}\xspace,k_{\mathtt X}\xspace}$ and $\mu_{{\mathtt Z}\xspace,k_{\mathtt Z}\xspace}$ are both
set to be $10^{-6}$. The optimized key rate is then calculated by varying
the other $\mu_{{\mathtt X}\xspace,i}$'s, $\mu_{{\mathtt Z}\xspace,i}$'s as well as
$p_{i|{\mathtt X}\xspace}, p_{i|{\mathtt Z}\xspace}$ and $p_Z$ by a combination of random sampling
(with a minimum of $10^7$~samples to a maximum of about $10^9$~samples per
data point on each curve) and adaptive gradient descend method (that is, the
step size is adjusted dynamically to speed up the descend).
For some of the curves, I introduce additional constraints that
$\mu_{{\mathtt X}\xspace,i} = \mu_{{\mathtt Z}\xspace,i}$ so as to reduce the number of different
photon intensities used. To aid discussion, I refer to the unconstrained and
constrained situations by $(k_{\mathtt X}\xspace,k_{\mathtt Z}\xspace)_G$ and
$(k_{\mathtt X}\xspace,k_{\mathtt Z}\xspace)_R$, respectively.
The $R-L$~graphs in Fig.~\ref{F:Mao_cmp} clearly show the advantage of using
the method in this text in computing the provably secure (optimized) key
rate.
The black curve, which is the distance-rate graph of $(3,2)_G$ that uses four
different photon intensities, is much better than the red one (which also
uses four different photon intensities) originally reported in
Ref.~\cite{MZZZZW18}. In fact, for any distance $L$ between Alice and Bob,
the key rate of the $(3,2)_G$ method is at least $2.25$~times that of the
state-of-the-art key rate reported in Ref.~\cite{MZZZZW18}.
(I also mention on passing that the rate of the black curve is even higher
than that of the two decoy key rate reported in Ref.~\cite{XXL14} using a
much longer raw key length of $\ell_\text{raw} = 10^{12}$.)
Besides, the $(3,2)_G$ method extends the working distance between Alice and
Bob from slightly less than 60~km to slightly over 130~km.
The blue dashed curve is the key rate of $(3,3)_R$, which uses the same set
of three different photon intensities for both preparation bases. Although
it uses one less photon intensity, it still outperforms the key rate of the
red curve when $L \gtrsim 45$~km.
\begin{figure}
\caption{\label{F:Zhou_cmp}
\label{F:Zhou_cmp}
\end{figure}
To further illustrate the power of this method, I compare the key rates
here with the ones obtained in Ref.~\cite{ZYW16} in which they used four
photon states and the following paramters: $e_d = 1.5\%$, $p_d = 10^{-7}$,
$\eta_\text{att} = 0.2$~db/km, $\eta_d = 40\%$, $f_\text{EC} = 1.16$,
$L_\text{A} = L_\text{B} = L/2$ and $\epsilon_\text{sec} / \chi =
\epsilon_\text{cor} = 10^{-7}$. The optimized key rates are then found using
the same method that produces Fig.~\ref{F:Mao_cmp}.
As shown in Fig.~\ref{F:Zhou_cmp}, the optimized key rate of $(3,2)_G$ (the
black curve that uses four different photon intensities) is at least 90\%
higher than those reported in Ref.~\cite{ZYW16}. Just like the previous
comparison, the key rate of $(3,3)_R$ which uses only three different photon
intensities is better than the one reported in Ref.~\cite{ZYW16} when $L
\gtrsim 60$~km. Last but not least, the maximum transmission distance
increases from about 87~km to about 156~km for $(3,2)_G$ and 162~km for
$(4,3)_G$ (the later not sure in the figure to avoid curve crowding).
\begin{figure}
\caption{\label{F:kappa_rates}
\label{F:kappa_rates}
\end{figure}
In a sense, instead of $\epsilon_\text{sec} / \chi$, a fairer security
parameter to use is $\kappa$, namely $\epsilon_\text{sec}$ per number of bits
of the final key~\cite{LCWXZ14}.
Fig.~\ref{F:kappa_rates} depicts the $R-L$ curves of various methods when
$\kappa = 10^{-15}$ and $\epsilon_\text{cor} = 10^{-10}$. Here instead of
fixing $N_t$, I keep $\ell_\text{raw} = 10^{10}$ which corresponds to a much
greater value of $N_t$ in general.
The blue dashed curve is the rate for $(3,3)_R$ which uses three photon
intensities. It already achieves a non-zero key rate at a distance of
slightly greater than 160~km. The black curve is the rate for $(3,2)_G$
which uses four photon intensities.
It allows Alice and Bob to share a secret key over a distance close to
200~km.
This finding makes sense for a larger raw key length $\ell_\text{raw}$
implies smaller statistical fluctuations in our estimates of various yields
and error rate, which in turn increase the provably secure key rate and the
maximum transmission distance.
Tables~\ref{T:keyrates_epsilon_sec_per_chi}
and~\ref{T:keyrates_kappa} shows the provably secure optimized key rates
using various values of $k_{\mathtt X}\xspace$ and $k_{\mathtt Z}\xspace$ for the case of fixing
$\epsilon_\text{sec} / \chi$ and $\kappa$, respectively. The following
points can be drawn from these figures and tables.
First, for the unconstrained photon intensity situation, the optimized key
rate increases as $k_{\mathtt X}\xspace$ increases.
For instance, as shown in Table~\ref{T:keyrates_epsilon_sec_per_chi}, the
key rate of $(4,2)_G$ is at least 39\% higher than that of $(3,2)_G$ by
fixing $\epsilon_\text{sec}/\chi$. And from Table~\ref{T:keyrates_kappa},
the corresponding increase in key rate by fixing $\kappa$ is about 18\% in
the distance range from 0~km to 150~km.
(I do not draw these curves in Figs.~\ref{F:Mao_cmp} and~\ref{F:kappa_rates}
because they almost overlap with the $(3,2)_G$ curve using the same plotting
scales.)
Second, for the constrained photon intensity situation, the optimized key
rate increases as $k_{\mathtt X}\xspace = k_{\mathtt Z}\xspace$ increases. These findings can be
understood by the fact that the more decoy intensity used, the closer the
various bounds of yields and error rates are to their actual values.
Third, the constrained key rate is in general several times lower than the
corresponding unconstrained one. So, using the same set of photon
intensities for the two bases is not a good idea, at least for the MR\xspace
channel.
\begin{table}[t]
\centering
\begin{tabular}{||c|c|c||}
\hline\hline
$L$/km & 0 & 50 \\
\hline
$(3,2)_G$ & $7.49\times 10^{-5}$ & $1.50\times 10^{-6}$ \\
\hline
$(3,3)_R$ & $9.65\times 10^{-6}$ & $1.25\times 10^{-7}$ \\
\hline
$(3,3)_G$ & $8.51\times 10^{-5}$ & $1.82\times 10^{-6}$ \\
\hline
$(4,2)_G$ & $1.04\times 10^{-4}$ & $2.22\times 10^{-6}$ \\
\hline
$(4,3)_G$ & $1.04\times 10^{-4}$ & $2.24\times 10^{-6}$ \\
\hline
$(4,4)_R$ & $3.10\times 10^{-5}$ & $3.75\times 10^{-7}$ \\
\hline
$(4,4)_G$ & $1.04\times 10^{-4}$ & $2.23\times 10^{-6}$ \\
\hline\hline
\end{tabular}
\caption{Optimized secure key rates for $N_t = 10^{10}$ and
$\epsilon_\text{sec} / \chi = \epsilon_\text{cor} = 10^{-10}$.
\label{T:keyrates_epsilon_sec_per_chi}}
\end{table}
There is an interesting observation that requires in-depth explanation.
From Table~\ref{T:keyrates_kappa}, for the case of fixing $k_{\mathtt X}\xspace$ and
$\kappa$, the increase in $R$ due to increase in $k_{\mathtt Z}\xspace$ is
insignificant.
Moreover, Table~\ref{T:keyrates_epsilon_sec_per_chi} shows that for the case
fixing $\epsilon_\text{sec} / \chi$ and $k_{\mathtt X}\xspace$, significant increase in
key rate occurs only when $k_{\mathtt X}\xspace = 3$.
The reason is that for the MR\xspace channel~\cite{MR12}, it turns out that the key
rate computed by Inequality~\eqref{E:key_rate_finite-size_alt} is greater
than that computed by Inequality~\eqref{E:key_rate_finite-size}.
That is to say, the lower bound of $Y_{{\mathtt X}\xspace,1,1}$ is a better estimate of
the single photon pair yield that the lower bound of $Y_{{\mathtt Z}\xspace,1,1}$.
Thus, increasing $k_{\mathtt Z}\xspace$ only gives a better estimate of
$Y_{{\mathtt Z}\xspace,0,\star}$. Since I fix the lowest photon intensity to $10^{-6}$,
which is very close to the vacuum state, the major error in estimating
$Y_{{\mathtt Z}\xspace,0,\star}$ comes from finite-size statistical fluctuation.
Consequently, by fixing a large enough raw key length $\ell_\text{raw}$, the
use of more than two photon intensities for the ${\mathtt Z}\xspace$ does not improve
the provably secure key rate in practice. In other words, the improvement on
the provably secure key rate by increasing $k_{\mathtt Z}\xspace$ alone for the MR\xspace
channel occurs only when $k_{\mathtt Z}\xspace$ is small, say, about 2 to 3 and when
$\ell_\text{raw}$ is small.
There is a systematic trend that worth reporting.
For the case of using unconstrained photon intensities, Method~D plus the
use of $Y_{{\mathtt X}\xspace,1,1}$ to bound the single photon-pair yield gives the
highest key rate over almost the whole range of distance $L$. Only when
close to the maximum transmission distance that the best rate is computed
using Method~C and $Y_{{\mathtt X}\xspace,1,1}$.
Whereas for the case of constrained photon intensities, for short transmission
distance, the best rate is computed using Method~D plus $Y_{{\mathtt Z}\xspace,1,1}$.
For longer transmission distance, the best rate is due to Method~B and
$Y_{{\mathtt X}\xspace,1,1}$.
\begin{table}[t]
\centering
\begin{tabular}{||c|c|c|c|c||}
\hline\hline
$L$/km & 0 & 50 & 100 & 150 \\
\hline
$(3,2)_G$ & $3.23\times 10^{-4}$ & $2.85\times 10^{-5}$ &
$2.44\times 10^{-6}$ & $1.51\times 10^{-7}$ \\
\hline
$(3,3)_R$ & $8.37\times 10^{-5}$ & $6.67\times 10^{-6}$ &
$4.33\times 10^{-7}$ & $1.27\times 10^{-8}$ \\
\hline
$(3,3)_G$ & $3.23\times 10^{-4}$ & $2.85\times 10^{-5}$ &
$2.44\times 10^{-6}$ & $1.51\times 10^{-7}$ \\
\hline
$(4,2)_G$ & $3.82\times 10^{-4}$ & $3.39\times 10^{-5}$ &
$2.89\times 10^{-6}$ & $1.78\times 10^{-7}$ \\
\hline
$(4,3)_G$ & $3.82\times 10^{-4}$ & $3.39\times 10^{-5}$ &
$2.89\times 10^{-6}$ & $1.78\times 10^{-7}$ \\
\hline
$(4,4)_R$ & $1.70\times 10^{-4}$ & $1.32\times 10^{-5}$ &
$8.27\times 10^{-7}$ & $2.64\times 10^{-8}$ \\
\hline
$(4,4)_G$ & $3.82\times 10^{-4}$ & $3.39\times 10^{-5}$ &
$2.89\times 10^{-6}$ & $1.78\times 10^{-7}$ \\
\hline\hline
\end{tabular}
\caption{Optimized secure key rates for $\ell_\text{raw} = 10^{10}$,
$\kappa = 10^{-15}$ and $\epsilon_\text{cor} = 10^{-10}$.
\label{T:keyrates_kappa}}
\end{table}
\section{Summary}
\label{Sec:Summary}
In summary, using the BB84 scheme in the MR\xspace channel as an example, I have
reported a key rate formula for MDI\xspace-QKD\xspace using Possionian photon sources
through repeated use the inversion of Vandermonde matrix and a McDiarmid-type
inequality.
This method gives a provably secure key rate that is at least 2.25~times that
of the current state-of-the-art result. It also shows that using five photon
intensities, more precisely the $(4,2)$-method, gives an additional 18\%
increase in the key rate for the MR\xspace channel.
This demonstrates once again the effectiveness of using McDiarmid-type
inequality in statistical data analysis in physical problems.
Provided that the photon source is sufficiently close to Possionian,
Note that the Vandermonde matrix inversion technique is rather general. As
pointed out in Remark~\ref{Rem:C_property} in the Appendix, by modifying the
proof of Lemma~\ref{Lem:C_property}, one can show that $C_{3i}
\ge 0$ if $k$ is even and $C_{3i} < 0$ if $k$ is odd for all $i \ge k$.
Thus, I can find the lower bound of $Y_{{\mathtt B}\xspace,0,2}$ and $Y_{{\mathtt B}\xspace,2,0}$.
In other words, I can extend the key rate calculation to the case of
twin-field~\cite{LYDS18} or phase-matching MDI\xspace-QKD\xspace~\cite{MZZ18}.
Note further that Inequalities~\eqref{E:various_Y_and_e_bounds} are still
valid by replacing the ${\mathcal A}_{{\mathtt B}\xspace,j,i}^{\text{e}}$'s and
${\mathcal A}_{{\mathtt B}\xspace,j,i}^{\text{o}}$'s by their perturbed expressions through
standard matrix inversion perturbation as long as the photon sources are
sufficiently close to Possionian.
In this regard, the theory developed here also applies to these sources.
Interested readers are invited to fill in the details.
Last but not least, it is instructive to extend this work to cover other
MDI\xspace-QKD\xspace protocols as well as more realistic quantum channels that take dead
time and imperfect beam splitters into account.
\appendix
\section{Auxiliary Results On Bounds Of Yields And Error Rates}
\label{Sec:results_on_Y_and_e_bounds}
I begin with the following lemma.
\begin{Lem}
Let $\mu_1,\mu_2,\cdots,\mu_k$ be $k \ge 2$ distinct real numbers. Then
\begin{equation}
\sum_{i=1}^k \frac{\mu_i^\ell}{\prod_{t\ne i} (\mu_i - \mu_t)} = 0
\label{E:auxiliary_sum}
\end{equation}
for $\ell = 0,1,\cdots,k-2$.
\label{Lem:sum}
\end{Lem}
\begin{proof}
Note that the L.H.S. of Eq.~\eqref{E:auxiliary_sum} is a symmetric function
of $\mu_i$'s.
Moreover, only its first two terms involve the factor $\mu_1-\mu_2$ in the
denominator. In fact, the sum of the these two terms equals
\begin{displaymath}
\frac{\mu_1^\ell \prod_{t>2} (\mu_2 - \mu_t) - \mu_2^\ell \prod_{t>2} (\mu_1
- \mu_t)}{(\mu_1 - \mu_2) \prod_{t>2} [(\mu_1 - \mu_t) (\mu_2 - \mu_t)]} .
\end{displaymath}
By applying reminder theorem, I know that the numerator of the above
expression is divisible by $\mu_1 - \mu_2$.
Consequently, the L.H.S. of Eq.~\eqref{E:auxiliary_sum} is a homogeneous
polynomial of degree $\le \ell - k + 1$. But as $\ell \le k-2$, this means
the L.H.S. of Eq.~\eqref{E:auxiliary_sum} must be a constant. By putting
$\mu_i = t^i$ for all $i$ and by taking the limit $t\to +\infty$, I conclude
that this constant is $0$. This completes the proof.
\end{proof}
Following the notation in Ref.~\cite{Chau18}, I define
\begin{equation}
C_{a+1,i} = \frac{(-1)^{k-a} a!}{i!} \sum_{t=1}^k \frac{\mu_t^i
S_{ta}}{\prod_{\ell\ne t} (\mu_t - \mu_\ell)} ,
\label{E:C_def}
\end{equation}
where
\begin{equation}
S_{ta} = \sideset{}{^{'}}{\sum} \mu_{t_1} \mu_{t_2} \cdots \mu_{t_{k-a-1}}
\label{E:S_in_def}
\end{equation}
with the primed sum being over all $t_j$'s $\ne i$ obeying $1 \le t_1 < t_2 <
\dots < t_{k-a-1} \le k$.
The following lemma is an extension of a result in Ref.~\cite{Chau18}.
\begin{Lem}
Let $\mu_1 > \mu_2 > \cdots > \mu_k \ge 0$.
Suppose $0\le i < k$. Then
\begin{equation}
C_{a+1,i} =
\begin{cases}
-1 & \text{if } a = i , \\
0 & \text{otherwise} .
\end{cases}
\label{E:C_value1}
\end{equation}
Whereas if $i \ge k$, then
\begin{equation}
\begin{cases}
C_{1i} \ge 0 \text{ and } C_{2i} < 0 & \text{if } k \text{ is even,} \\
C_{1i} \le 0 \text{ and } C_{2i} > 0 & \text{if } k \text{ is odd.}
\end{cases}
\label{E:C_value2}
\end{equation}
\label{Lem:C_property}
\end{Lem}
\begin{proof}
Using the same argument as in the proof of Lemma~\ref{Lem:sum}, I conclude
that $C_{a+1,i}$ is a homogeneous polynomial of degree $\le i-a$.
Consider the case of $i\le a$ so that $C_{a+1,i}$ is a constant. By putting
$\mu_t = \delta^t$ for all $t$ and then taking the limit $\delta\to 0^+$, it
is straightforward to check that $C_{a+1,i} = 0$ if $i < a$ and $C_{a+1,i} =
-1$ if $i = a$.
It remains to consider the case of $a < i < k$. I first consider the subcase
of $a = 0$. Here $C_{1i}$ contains a common factor of $\prod_{t=1}^k
\mu_t$, which is of degree $k > i$. Therefore, I could write $\prod_{t=1}^k
\mu_t = C_{1i} F$ where $F$ is a homogeneous polynomial of degree $\ge i$.
As a consequence, either $C_{1i}$ or $F$ contains $\mu_1$ and hence all
$\mu_t$'s. Thus, $C_{1i}$ must be a constant for $i < k$. By setting $\mu_k
= 0$, I know that $C_{1i} = 0$.
Next, I consider the subcase of $a = 1$. Since $i > 1$, from the findings of
the first subcase, I arrive at
\begin{align}
C_{2i} &= \frac{(-1)^{k-1}}{i!} \sum_{t=1}^k \frac{T_1 \mu_t^{i-1} - \left(
\prod_{\ell=1}^k \mu_\ell \right) \mu_t^{i-2}}{\prod_{\ell\ne t} (\mu_t -
\mu_\ell)} \nonumber \\
&= \frac{(-1)^{k-1} T_1}{i!} \sum_{t=1}^k \frac{\mu_t^{i-1}}{\prod_{\ell\ne
t} (\mu_t - \mu_\ell)} - C_{1,i-1} \nonumber \\
&= \frac{(-1)^{k-1} T_1}{i!} \sum_{t=1}^k \frac{\mu_t^{i-1}}{\prod_{\ell\ne
t} (\mu_t - \mu_\ell)} ,
\label{E:C_a=1}
\end{align}
where $T_1$ is the symmetric polynomial
\begin{equation}
T_1 = \sum_{t=1}^k \mu_1 \mu_2 \cdots \mu_{t-1} \mu_{t+1} \cdots \mu_k .
\label{E:T1_def}
\end{equation}
By Lemma~\ref{Lem:sum}, I find that $C_{2i} = 0$ as $i < k$.
The third subcase I consider is $a = 2$. As $i > 2$,
\begin{equation}
C_{3i} = \frac{2 (-1)^k}{i!} \sum_{t=1}^k \frac{T_2 \mu_t^{i-1} - T_1
\mu_t^{i-2} + \left( \prod_{\ell=1}^k \mu_\ell \right)
\mu_t^{i-3}}{\prod_{\ell\ne t} (\mu_t - \mu_\ell)} ,
\label{E:C_a=2}
\end{equation}
where
\begin{equation}
T_2 = \sideset{}{^{'}}{\sum} \mu_{t_1} \mu_{t_2} \cdots \mu_{t_{k-2}}
\label{E:T2_def}
\end{equation}
with the primed sum over all $t_j$'s with $1\le t_1 < t_2 < \dots < t_{k-2}
\le k$. By Lemma~\ref{Lem:sum}, I get $C_{3i} = 0$ as $i < k$.
By induction, the proof of the subcase $a=2$ can be extended to show the
validity for all $a \le k$ and $a < i < k$. This shows the validity of
Eq.~\eqref{E:C_value1}
The proof of Eq.~\eqref{E:C_value2} can be found in Ref.~\cite{Chau18}. I
reproduce here for easy reference.
By expanding the $1/(\mu_1 - \mu_t)$'s in $C_{ai}$ as a power series of
$\mu_1$ with all the other $\mu_t$'s fixed, I obtain
\begin{align}
C_{1i} &= \frac{(-1)^k}{i!} \left( \prod_{t=1}^k \mu_t \right) \left[
\mu_1^{i-k} \prod_{r=2}^k \left( 1 + \frac{\mu_r}{\mu_1} +
\frac{\mu_r^2}{\mu_1^2} + \cdots \right) \right. \nonumber \\
&\qquad \left. \vphantom{\prod_{r=2}^k \left( 1 + \frac{\mu_r}{\mu_1} +
\frac{\mu_r^2}{\mu_1^2} \right)}
+ f(\mu_2,\mu_3,\cdots,\mu_k) \right] + \BigOh(\frac{1}{\mu_1})
\label{E:C_1i_intermediate}
\end{align}
for some function $f$ independent of $\mu_1$.
As $C_{1i}$ is a homogeneous polynomial of degree $\le i$, by equating terms
in powers of $\mu_1$, I get
\begin{equation}
C_{1i} = \frac{(-1)^k}{i!} \left( \prod_{t=1}^k \mu_t \right)
\sum_{\substack{t_1+t_2+\dots+t_k = i-k, \\ t_1,t_2,\cdots,t_k \ge 0}}
\mu_1^{t_1} \mu_2^{t_2} \cdots \mu_k^{t_k}
\label{E:C_1i_explicit}
\end{equation}
for all $i\ge k$. As all $\mu_t$'s are non-negative, $C_{1i} \ge 0$ if
$k$ is even and $C_{1i} \le 0$ if $k$ is odd.
By the same argument, I expand all the $1/(\mu_1 - \mu_t)$ terms in $C_{2i}$
in powers of $\mu_1$ to get
\begin{align}
C_{2i} &= \frac{(-1)^{k-1} T_1}{i!}
\!\!\sum_{\substack{t_1+t_2+\cdots+t_k = i-k
\\ t_1 > 0, t_2,\cdots,t_k \ge 0}}
\mu_1^{t_1} \mu_2^{t_2} \cdots \mu_k^{t_k} \nonumber \\
& \quad + f'(\mu_2,\cdots,\mu_k)
\label{E:C_2j_intermediate}
\end{align}
for some function $f'$ independent of $\mu_1$. By recursively expanding
Eq.~\eqref{E:C_def} in powers of $\mu_2$ but with $\mu_1$ set to $0$, and
then in powers of $\mu_3$ with $\mu_1,\mu_2$ set to $0$ and so on, I conclude
that whenever $i \ge k$, then $C_{2i} < 0$ if $k$ is even and $C_{2i} > 0$ if
$k$ is odd.
This completes the proof.
\end{proof}
\begin{Rem}
By the same technique of expanding each factor of $1/(\mu_1 - \mu_t)$
in $C_{a+1,i}$ in powers of $\mu_1$, it is straightforward to show that if
$i \ge k$ and $j\ge 1$, then $C_{2j+1,i} \ge 0$ and $C_{2j,i} \le 0$ provided
that $k$ is even. And $C_{2j+1,i} \le 0$ and $C_{2j,i} \ge 0$ provided that
$k$ is odd.
\label{Rem:C_property}
\end{Rem}
The following theorem is an extension of a similar result reported in
Ref.~\cite{Chau18} by means of an explicit expression of the inverse of a
Vandermonde matrix.
\begin{Thrm}
Let $\mu_1 > \mu_2 > \cdots > \mu_k \ge 0$ and $\tilde{\mu}_1 > \tilde{\mu}_2 >
\cdots > \tilde{\mu}_{\tilde{k}} \ge 0$. Suppose
\begin{equation}
\sum_{a,b=0}^{+\infty} \frac{\mu_i^a}{a!} \frac{\tilde{\mu}_j^b}{b!} A_{ab}
\equiv \sum_{a,b=0}^{+\infty} M_{a+1,i} \tilde{M}_{b+1,j} A_{ab} =
B_{ij}
\label{E:A_B_relation}
\end{equation}
for all $i = 1,2,\cdots,k$ and $j = 1,2,\cdots,\tilde{k}$.
Then,
\begin{align}
A_{ab} &= \sum_{i=1}^k \sum_{j=1}^{\tilde{k}} \left( M^{-1} \right)_{a+1,i}
\left( \tilde{M}^{-1} \right)_{b+1,j} B_{ij} \nonumber \\
& \quad + \sum_{I=k}^{+\infty} C_{a+1,I} A_{Ib} +
\sum_{J=\tilde{k}}^{+\infty} \tilde{C}_{b+1,J} A_{aJ} \nonumber \\
& \quad - \sum_{I=k}^{+\infty} \sum_{J=\tilde{k}}^{+\infty} C_{a+1,I}
\tilde{C}_{b+1,J} A_{IJ}
\label{E:explicit_A_relation}
\end{align}
for all $a = 0,1,\cdots,k-1$ and $b = 0,1,\cdots,\tilde{k}-1$.
Here
\begin{equation}
\left( M^{-1} \right)_{a+1,i} = \frac{(-1)^{k-a-1} a! S_{ia}}{\prod_{t\ne i}
(\mu_i - \mu_t)}
\label{E:M_inverse}
\end{equation}
and similarly for $\left( \tilde{M}^{-1}
\right)_{b+1,j}$.
\label{Thrm:double_inversion}
\end{Thrm}
\begin{proof}
Note that for any fixed $a = 0,1,\cdots,k-1$ and $b = 0,1,\cdots,
\tilde{k}-1$,
\begin{equation}
\sum_{b=0}^{+\infty} \frac{\tilde{\mu}_{j}^b}{b!} A_{ab} = \sum_{i=1}^k
\left( M^{-1} \right)_{a+1,i} \left( B_{ij} - \sum_{b=0}^{+\infty}
\sum_{I=k}^{+\infty} \frac{\mu_i^I}{I!} \frac{\tilde{\mu}_j^b}{b!} A_{Ib}
\right) .
\label{E:inversion_intermediate1}
\end{equation}
Here $M^{-1}$ is the inverse of the $k\times k$ matrix
$(M_{a+1,i})_{a+1,i=1}^k$. From Ref.~\cite{Chau18}, the matrix elements of
$M^{-1}$ are related to inverse of certain Vandermonde matrix and are given
by the expression immediately after Eq.~\eqref{E:explicit_A_relation}. From
Lemma~\ref{Lem:C_property}, Eq.~\eqref{E:inversion_intermediate1} can be
rewritten as
\begin{equation}
\sum_{b=0}^{+\infty} \frac{\tilde{\mu}_j^b}{b!} A_{ab} = \sum_{a=1}^k \left(
M^{-1} \right)_{a+1,i} B_{ij} + \sum_{b=0}^{+\infty} \sum_{I=k}^{+\infty}
\frac{\tilde{\mu}_j^b}{b!} C_{a+1,I} A_{Ib} .
\label{E:inversion_intermediate2}
\end{equation}
By repeating the above procedure again, I find that for any fixed $a=0,1,
\cdots,k-1$ and $b = 0,1,\cdots,\tilde{k}-1$,
\begin{align}
A_{ab} &= \sum_{i=1}^k \sum_{j=1}^{\tilde{k}} \left( M^{-1} \right)_{a+1,i}
\left( \tilde{M}^{-1} \right)_{b+1,j} B_{ij} \nonumber \\
&\quad - \sum_{I=k}^{+\infty} \sum_{\tilde{t}=0}^{+\infty} C_{a+1,I}
\tilde{C}_{b+1,\tilde{t}} A_{I\tilde{t}} + \sum_{J=\tilde{k}}^{+\infty}
\tilde{C}_{b+1,J} A_{aJ} .
\label{E:inversion_intermediate3}
\end{align}
Here the $\tilde{k}\times\tilde{k}$ matrix $\tilde{C}$ is defined in the
exactly the same as the $k\times k$ matrix $C$ except that the $k$ and
$\mu_t$'s variables are replaced by $\tilde{k}$ and the corresponding
$\tilde{\mu}_t$'s.
Substituting Eq.~\eqref{E:C_value1} into the above equation gives
Eq.~\eqref{E:explicit_A_relation}.
\end{proof}
Applying Lemma~\ref{Lem:C_property} and Theorem~\ref{Thrm:double_inversion},
in particular, the Inequality~\eqref{E:C_value2}, I arrive at the following
two Corollaries.
\begin{Cor}
Suppose the conditions stated in Theorem~\ref{Thrm:double_inversion} are
satisfied. Suppose further that $A_{ab} = 0$ for all $b > 0$ and $a\ge 0$;
and $A_{a0} \in [0,1]$ for all $a$. Then
\begin{equation}
A_{00} \ge \sum_{i=1}^k \left( M^{-1} \right)_{1i} B_{i0} .
\label{E:A_0*_lower_bound}
\end{equation}
\label{Cor:Y0*}
\end{Cor}
\begin{Cor}
Suppose the conditions stated in Theorem~\ref{Thrm:double_inversion} are
satisfied. Suppose further that $A_{ab} \in [0,1]$ for all $a,b$. Then
\begin{subequations}
\begin{equation}
A_{00} \ge \sum_{i=1}^k \sum_{j=1}^{\tilde{k}} \left( M^{-1} \right)_{1i}
\left( \tilde{M}^{-1} \right)_{1j} B_{ij} - \sum_{I=k}^{+\infty}
\sum_{J=\tilde{k}}^{+\infty} C_{1I} \tilde{C}_{1J}
\label{E:A00_lower_bound}
\end{equation}
and
\begin{equation}
A_{11} \le \sum_{i=1}^k \sum_{j=1}^{\tilde{k}} \left( M^{-1} \right)_{2i}
\left( \tilde{M}^{-1} \right)_{2j} B_{ij}
\label{E:A11_upper_bound}
\end{equation}
provided both $k$ and $\tilde{k}$ are even. Furthermore,
\begin{equation}
A_{11} \ge \sum_{i=1}^k \sum_{j=1}^{\tilde{k}} \left( M^{-1} \right)_{2i}
\left( \tilde{M}^{-1} \right)_{2j} B_{ij} - \sum_{I=k}^{+\infty}
\sum_{J=\tilde{k}}^{+\infty} C_{2I} \tilde{C}_{2J}
\label{E:A11_lower_bound}
\end{equation}
\end{subequations}
if both $k$ and $\tilde{k}$ are odd.
\label{Cor:bounds_on_Yxx}
\end{Cor}
\begin{Rem}
Clearly, each of the bounds in the above Corollary are tight. Although the
conditions for attaining the bound in Inequality~\eqref{E:A11_upper_bound}
are not compatible with those for attaining the bounds in
Inequalities~\eqref{E:A00_lower_bound} and~\eqref{E:A11_lower_bound}, the
way I use these inequalities in Secs.~\ref{Sec:Rate}
and~\ref{Sec:Finite_Size} ensures that it is possible to attaining all
these bounds in the key rate formula.
\label{Rem:tightness}
\end{Rem}
\begin{acknowledgments}
This work is supported by the RGC grant 17302019 of the Hong Kong SAR
Government.
\end{acknowledgments}
\end{document} |
\begin{document}
\centerline{\bf \large Boundary non-crossing probabilities for fractional Brownian motion with trend}
\centerline{Enkelejd Hashorva\footnote{Department of Actuarial Science, University of Lausanne, UNIL-Dorigny, 1015 Lausanne, Switzerland,
email:enkelejd.hashorva@unil.ch},
Yuliya Mishura\footnote{Department of Probability, Statistics and Actuarial Mathematics, National Taras Shevchenko University of Kyiv, 01601 Volodymyrska 64, Kyiv, Ukraine, email: myus@univ.kiev.ua },
and Oleg Seleznjev\footnote{
Department of Mathematics and Mathematical Statistics,
Ume{\aa} University, SE-901 87 Ume\aa, Sweden, email:oleg.seleznjev@matstat.umu.se}}
{\bf Abstract}: In this paper we investigate the boundary non-crossing probabilities of a fractional Brownian motion considering some general deterministic trend function. We derive bounds for non-crossing probabilities and discuss the case of a large trend function. As a by-product we solve a minimization problem related to the norm of the trend function.
{\bf Key Words}: boundary crossings; Cameron-Martin-Girsanov theorem; reproducing kernel Hilbert space; large deviation principle; Molchan martingale; fractional Brownian motion.
\section{Introduction}
Calculation of boundary crossing (or non-crossing) probabilities of Gaussian processes with trend is a long-established
and interesting topic of applied probability, see, e.g.,
\cite{Durb92,Wang97,Nov99,MR1787122, Wang2001,MR2009980,BNovikov2005,MR2028621,1103.60040,1137.60023,1079.62047,MR2443083,MR2576883,Janssen08} and references therein. Numerous applications concerned with the evaluation of boundary non-crossing probabilities relate to mathematical finance, risk theory, queueing theory, statistics, physics, biology among many other fields.
In the literature,
most of contributions treat the case when the Gaussian process $X(t),t \ge 0$ is a Brownian motion which allows to calculate the boundary non-crossing probability $\pk{X(t)+ f(t) < u, t\in [0,T]}$,
for some trend function $f$ and two given constants $T,u>0$
by various methods (see, e.g., \cite{Alil2010,MR2752890}).
For particular $f$ including the case of a piecewise
constant function,
explicit calculations are possible, see, e.g., \cite{MR2065480}.
Those explicit calculations allow then to approximate the non-crossing probabilities for general $f$ and for $f$ being large, see \cite{MR2065480,MR2175400,MR2028621}.\\
In this paper the centered Gaussian process $X=B^H$ is a fractional Brownian motion (fBm) with Hurst index $H\in (0,1)$ for which no explicit calculations of the boundary non-crossing probability
are possible for the most of the trend functions.\\
Therefore, our interest in this paper is on the derivation of upper and lower bounds for
$$P_f:=\pk{B^H(t) + f(t)\le u(t), t \in \mathbb{R}_+}$$
for some admissible trend functions $f$ and measurable functions $u: \mathbb{R}_+ \to \mathbb{R}$ such that $u(0)\ge0$.
In the following we shall consider \cEE{$f\not=0$} to belong to the reproducing kernel Hilbert Space (RKHS) of $B^H$ which is denoted by $\rE{\mathcal{H}}$ defined by the covariance kernel of $B^H$ given as
\begin{eqnarray} \label{Rh}
\bE{R_{H}(s,t)}:=\E{B^H(s)B^H(t)}=\frac{1}{2}(t^{2H}+s^{2H}-|t-s|^{2H}),\quad t,s\geq0.
\end{eqnarray}
A precise description of $ \rE{{\mathcal{H}}} $ is given in Section 2,
where also the norm $\norm{f}_{ \rE{{\mathcal{H}}} }$ for $f\in \rE{{\mathcal{H}}} $ is defined; \rE{for notational simplicity we suppress the Hurst index $H$ and the specification of $\mathbb{R}_+$ avoiding the more common notation $\mathcal{H}_H(\mathbb{R}_+)$}.
The lack of explicit formulas (apart from $H=1/2$ case) for trend functions $f$ and given $u$ poses problems for judging the
accuracy of our bounds for $P_f$. A remedy for that is to consider the asymptotic performance of the bounds for trend functions $\gamma f$
with $\gamma \to 0$ and $\gamma \to \mathbf{I}F$.
The latter case is more tractable since \cEE{if for some $x_0$ we have $f(x_0)>0$, then (see Corollary \ref{corLDP} below)}
\begin{eqnarray}\label{LD}
\cEE{\ln P_{\gamma f} \ge -(1+o(1))\frac{\gamma^2}{2}\norm{{\wwF}}^2_{ \rE{{\mathcal{H}}} }, \quad \gamma \to \mathbf{I}F},
\end{eqnarray}
where ${\wwF} \in { \rE{{\mathcal{H}}} }, {\wwF}\ge f$ is such that it solves the following minimization problem
\begin{eqnarray} \label{OP}
\cEE{\text{ find the unique ${\wwF}\in { \rE{{\mathcal{H}}} }$ so that } \inf_{ g,f \in { \rE{{\mathcal{H}}} }, g \ge f}
\norm{g}_{ \rE{{\mathcal{H}}} }= \norm{{\wwF}}_{ \rE{{\mathcal{H}}} }.}
\end{eqnarray}
Clearly, \eqref{LD} does not show how to find ${\wwF}$, however it is very helpful for the derivation of upper and lower bounds for $P_f$
since it can be used to check their validity (at least asymptotically), and moreover, it gives further ideas how to proceed. \\
In this paper, for $f\in \rE{\mathcal{H}}$ with $f(x_0)>0$ for some $x_0>0$, we find explicitly for $H>1/2$ the unique solution ${\wwF}\in \rE{\mathcal{H}}$ of the minimization problem \eqref{OP};
for $H=1/2$ this has already been done in \cite{MR2016767}.
\rE{For the case $H \in (0,1/2)$, we determine again ${\wwF}$ under the assumption that ${\wwF}> f$}.
By making use of the Girsanov formula for fBm,
we derive in the main result presented in Theorem \ref{ThA} upper and lower bounds for $P_f$.
The paper is organized as follows: Section 2 briefly reviews some results from fractional calculus and related Hilbert spaces. We introduce weighted fractional integral operators, fractional kernels and briefly discuss the corresponding reproducing kernel Hilbert spaces.
The main result is presented in Section 3. Specific properties of fBm that are used in the proof of the main result are displayed in Section 4 followed then by two examples of the drifts, for $H>1/2$ and for $H<1/2$, when the main result holds.
Proofs are relegated to Section 5. A short Appendix concludes the article.
\section{Preliminaries}
This section reviews basic Riemann-Liouville fractional calculus; a classical reference on this topic is \cite{SamkoKM}.
We use also the notation and results from \cite{nualart},
\cite{bioks}, and \cite{Jost}. We proceed then with the RKHS of fBm.
\begin{defn}
Let $\alpha>0, T>0$. The (left-sided) Riemann-Liouville fractional integral operator of order $\alpha$ over interval $[0,T]$ (or over $\mathbb{R}_+$) is defined by
$$\left(I_{0+}^{\alpha}f\right)(t)=\frac{1}{\Gamma(\alpha)}\int_0^t (t-u)^{\alpha-1}f(u)du,\quad t\in[0,T] \quad (t\in \mathbb{R}_+), $$
where $\Gamma(\cdot)$ is the Euler gamma function. The corresponding right-sided integral operator on $[0,T]$ is defined by
$$\left(I_{T-}^{\alpha}f\right)(t)=\frac{1}{\Gamma(\alpha)}\int_t^T (u-t)^{\alpha-1}f(u)du, \quad t\in[0,T],
$$
and the right-sided integral operator
on $\mathbb{R}_+$ (also known as the Weyl fractional integral operator)
is defined by
$$\left(I_{\infty-}^{\alpha}f\right)(t)=\frac{1}{\Gamma(\alpha)}\int_t^\infty (u-t)^{\alpha-1}f(u)du,\quad t\in \mathbb{R}_+. $$
\end{defn}
Throughout the paper,
we suppose that $(I_{T-}^{\alpha}f)(t)=0,$ for $t>T$. Note that in the case $u^\alpha f(u)\in L_1(\mathbb{R}_+)$,
the integral $(I_{\infty-}^{\alpha}f)$
exists and belongs to $L_1(\mathbb{R}_+)$.
Next, for $p\geq1$, denote
\begin{equation*}
I_+^{\alpha}(L_p[0,T])=\{f: f=I_{0+}^{\alpha}\varphi \text{ for some } \varphi\in L_p[0,T]\},
\end{equation*}
\begin{equation*}\label{Eqn:2}
I_-^{\alpha}(L_p[0,T])=\{f: f=I_{T-}^{\alpha}\varphi \text{ for some } \varphi\in L_p[0,T]\},
\end{equation*}
and define similarly $I_-^{\alpha}(L_p(\mathbb{R}_+))$.
\cE{If} $0<\alpha<1$, then the function $\varphi$ used in the above definitions
(it is determined uniquely) coincides for almost all (a.a.)
$t\in[0,T]$ $(t\in \mathbb{R})$ with the left- (right-) sided Riemann-Liouville fractional derivative of $f$ of order $\alpha$. The derivatives are denoted by
$$(I_{0+}^{-\alpha}f)(t)=(\mathcal{D}_{0+}^{\alpha}f)(t)=\frac{1}{\Gamma(1-\alpha)}\frac{d}{dt}\left( \int_0^t(t-u)^{-\alpha}f(u) du \right),$$
$$(I_{\infty-}^{-\alpha}f)(t)=(\mathcal{D}_{\infty-}^{\alpha}f)(t)=-\frac{1}{\Gamma(1-\alpha)}\frac{d}{dt}\left( \int_t^{\infty}(u-t)^{-\alpha}f(u) du \right),$$
and
$$I_{T-}^{-\alpha}(t)=(\mathcal{D}_{T-}^{\alpha}f)(t)=(\mathcal{D}_{\infty-}^{\alpha}f1_{[0,T]})(t),$$
respectively. Let $f\in I_-^{\alpha}(L_p(\mathbb{R}))$ or $I_{\pm}^{\alpha}(L_p[0,T]), p\geq1, 0<\alpha<1$. Then for the corresponding indices $0,T$, and $\infty$,
we have
$$I_{\pm}^{\alpha}\mathcal{D}_{\pm}^{\alpha}f=f.$$ In the case when $f\in L_1(\mathbb{R}_+)$,
we have $\mathcal{D}_{\pm}^{\alpha}I_{\pm}^{\alpha}f=f$ (\cite{SamkoKM})
In the following we introduce weighted fractional integral operators, fractional kernels and briefly discuss the corresponding reproducing kernel Hilbert spaces.
\def\rE{C_1}{\rE{C_1}}
\def\rE{C_1}I{\bE{C_1^{-1}}}
Introduce weighted fractional integral operators by
$$(K_{0+}^H f)(t)= \rE{C_1} t^{H-1/2}(I_{0+}^{H-1/2}u^{1/2-H}f(u))(t),$$
$$(K_{0+}^{H,*} f)(t)= \rE{C_1}^{-1} t^{H-1/2}(I_{0+}^{1/2-H}u^{1/2-H}f(u))(t),$$
$$(K_{\infty-}^H f)(t)= \rE{C_1} t^{1/2-H}(I_{\infty-}^{H-1/2}u^{H-1/2}f(u))(t),$$
and
$$(K_{\infty-}^{H,*} f)(t)= \rE{C_1}^{-1} t^{1/2-H}(I_{\infty-}^{1/2- H}u^{H-1/2}f(u))(t),$$
where $ \rE{C_1} =\left(\frac{2H\Gamma(H+1/2)\Gamma(3/2-H)}{\Gamma(2-2H)}\right)^{1/2}$.
For $H=\frac{1}{2}$ we put $K_{0+}^H=K_{0+}^{H,*}=K_{\infty-}^{1/2}=K_{\infty-}^{1/2,*}=\mathbf{I}$, where $\mathbf{I}$ is the identity operator.
Let $H>\frac12$. If $u^{H-\frac12}f(u)\in L_1(\mathbb{R}_+)$,
then $K_{\infty-}^{H,*}K_{\infty-}^{H}f=f$. Furthermore, for $H<\frac{1}{2} $ and for such $f$ that $u^{\frac{1}{2}-H}f(u)\in L_1(\mathbb{R}_+) $,
we have that $K_{\infty-}^{H}K_{\infty-}^{H,*}f=f$. For ${H>\frac{1}{2}}$ and for such $f$ that $u^{H-\frac{1}{2}}f(u)\in I_{-}^{H-\frac{1}{2}}(L_p(\mathbb{R}_+)) $ for some $p\geq 1,$ we have that $K_{\infty-}^{H}K_{\infty-}^{H,*}f=f$. For $f\in L_2(\mathbb{R}_+)$ and $H\in(0,1)$,
$K_{0+}^{H}K_{0+}^{H,*}f=f$.
Denote $K_T^H f=K_{\infty-}^H(f 1_{[0,T]})$ and $K_T^{H,*}f=K_{\infty-}^{H,*}(f 1_{[0,T]})$. For $H\in(0,1)$ and $t > s$, define the
fractional kernel
$$K_H(t, s) := \rE{\frac{ \rE{C_1} }{\Gamma\Big(H+1/2\Big)}}\Big(\Big(\frac ts\Big)^{H-\frac12}(t-s)^{H-\frac12}-(H-\frac12)s^{\frac12-H}\int_s^t(u - s)^{H-\frac12}u^{H-\frac32}du\Big).
$$
For $H>\frac12$, the kernel $K_H(t, s)$
is simplified to $$K_H(t, s)
=\frac{ \rE{C_1} }{\Gamma\Big(H-\frac12\Big)}s^{\frac12-H}\int_s^t(u - s)^{H-\frac32}u^{H-\frac12}du.$$
In turn, introduce the fractional kernel
$$K_H^*(t, s) = \frac{1}{\rE{\rE{C_1} \Gamma(H+1/2)}}\Big(\Big(\frac ts\Big)^{H-\frac12}(t-s)^{\frac12-H}-(H-\frac12)s^{\frac12-H}\int_s^t(u - s)^{\frac12-H}u^{H-\frac32}du\Big).
$$
For $H<\frac12$, the kernel $K_H^*(t, s)$
is simplified to $$K_H^*(t, s)= \frac{s^{\frac12-H}}{\rE{ \rE{C_1} \Gamma(1/2-H)} }\int_s^t(u - s)^{-H-\frac12}u^{H-\frac12}du .$$
\rE{By direct calculations we obtain}
\begin{equation*}
(K_{\infty-}^H 1_{[0,t]})(s)=(K_t^H 1_{[0,t]})(s)=K_H(t, s)
\end{equation*}
and
\begin{equation*}
(K_{\infty-}^{H,*} 1_{[0,t]})(s)=(K_{t}^{H,*} 1_{[0,t]})(s)=K_H^*(t, s).
\end{equation*}
From the integration-by-parts formula for fractional integrals
$$\int_a^b g(x)I_{a+}^\alpha f(x)dx=\int_a^b f(x)I_{b-}^\alpha g(x)dx$$
for $f\in L_p[a,b]$, $g\in L_q[a,b]$ with $\frac1p+\frac1q\leq 1+\alpha$, we get that for $H>\frac12$ and $f\in L_p[0,t]$ with $p>1$
\begin{equation*}\int_0^t (K_{\infty-}^{H} 1_{[0,t]})(s)f(s)ds=\int_0^t (K_{0+}^{H} f)(s)ds
\end{equation*}
and for $H<\frac12$ and $f\in L_p[0,t]$ with $p>1$
\begin{equation*}
\int_0^t (K_{\infty-}^{H,*} 1_{[0,t]})(s)f(s)ds=\int_0^t (K_{0+}^{H,*} f)(s)ds.
\end{equation*}
\rE{Next,} we introduce the RKHS of fractional Brownian motion (corresponding results for finite interval are
described in detail in \cite{decreusefond},
\cite{nualart}, and \cite{bioks}).
Let \rE{$H \in (0,1)$ be fixed and} {recall that $R_H$ defined in (\ref{Rh}) can be defined also as follows}
$$R_H(t, s) =\int_0^{t\wedge s}K_H(t, u)K_H(s, u) du.$$
{ \begin{defn}
(\cite{bioks}) The reproducing kernel Hilbert space (RKHS) of the fractional Brownian motion \rE{on $[0,T]$}, denoted
by $\rE{\mathcal{H}}[0,T]$ is defined as the closure
of the vector space spanned by the set of functions ${R_H(t, \cdot), t\in [0,T]}$ with
respect to the scalar product
$\langle R_H(t, \cdot),R_H(s, \cdot)\rangle = R_H(t, s)$, $t, s \in[0,T].$
\end{defn}
\rE{In \cite{decreusefond} it is shown that}
$\rE{\mathcal{H}}[0,T]$ is the set of functions $f$ which
can be written as
$f(t) =\int_0^tK_H(t, s) {\phi} (s) ds$
for some $ {\phi} \in L_2([0, T]).$ By definition,
$\|f\|_{\rE{\mathcal{H}}[0,T]}=\| {\phi} \|_{L_2[0, T]}.$
Extending this definition to $\mathbb{R}_+$, we get the following definition of the RKHS $\rE{\mathcal{H}}:=\rE{\mathcal{H}}_H(\mathbb{R}_+)$.}
For any $H\in(0,1)$,
$ \rE{{\mathcal{H}}} $ is the set of functions $f$ which
can be written as
\begin{eqnarray}
f(t) =\int_0^t K_H(t, s) {\phi} (s) ds=\int_0^t(K_{\infty-}^H 1_{[0,t]})(s) {\phi} (s) ds
=\int_0^t (K_{0+}^{H} {\phi} )(s)ds
\end{eqnarray}
for some $\phi \in L_2(\mathbb{R}_+).$
Note that $f'(t)=(K_{0+}^{H} {\phi} )(t)$ \rE{and} $ {\phi} (t)=(K_{0+}^{H,*} {f'})(t)$,
therefore
$$\|f\|_{ \rE{{\mathcal{H}}} }=\| {\phi} \|_{L_2(\mathbb{R}_+)}=\|K_{0+}^{H,*} {f'}\|_{L_2(\mathbb{R}_+)}.$$
Next,
define the spaces $L_2^H(\mathbb{R}_+)$ in the following way:
$$L_2^H(\mathbb{R}_+)=\{f: K_{\infty-}^H|f|\in L_2(\mathbb{R}_+)\}.$$
If $H\in (0, \frac{1}{2})$,
we define further
$$\widetilde{L}_2^H(\mathbb{R}_+)=L_2^H(\mathbb{R}_+)\cap \Biggl\{f:\mathbb{R}_+\to\mathbb{R}:\int_0^T t^{1-2H}
\left(\int_T^{\infty}u^{H-1/2}f(u)(u-t)^{H-3/2}du \right)^2 dt \rightarrow 0 \text{ as } T\rightarrow\infty \Biggr\}.$$
For function $g$ that admits the representation $g(t)=\int_0^t g'(s)ds$ introduce the norm
\begin{eqnarray}\norm{g}=\norm{g'}_{L_2(\mathbb{R}_+)}.
\end{eqnarray}
\newcommand{\Abs}[1]{\Bigl\lvert #1 \Bigr\rvert}
\section{Main result}
In this section we study the boundary non-crossing probability
$P_f=\pk{B^H(t) + f(t) \le u(t),t\in \mathbb{R}_+}$ for $f\in \rE{{\mathcal{H}}} $ and a measurable function $u:\mathbb{R}_+ \to \mathbb{R}$ with $u(0)\ge 0$. Throughout this paper,
we assume that
$P_0=\pk{B^H(t)\le u(t), t\in \mathbb{R}_+} \in (0,1)$. In applications, see, e.g., \cite{1103.60040,1079.62047}
it is of interest to calculate the rate of decrease to 0 of $P_{\gamma f}$ as $\gamma \to \mathbf{I}F$
for some $f\in \rE{{\mathcal{H}}} $. On the other side, if $\norm{f}_{ \rE{{\mathcal{H}}} }$ is small, we expect that $P_f$ is close to $P_0$. Set below
$\alpha= \Phi^{-1}(P_0)$ where $\Phi$ is the distribution function of a $N(0,1)$ random variable. Our first result derives upper and lower bounds of $P_f$
for any $f\in \rE{{\mathcal{H}}} $.\\
\def\bE{g}{\bE{g}}
\begin{lem} \label{LemUL} For any $f\in \rE{{\mathcal{H}}} $
we have
\begin{eqnarray}\label{eq:00:2b}
\Abs{P_f - P_0} &\le \frac {1 }{\sqrt{2 \pi}} \norm{f}_{ \rE{{\mathcal{H}}} }.
\end{eqnarray}
If further $\bE{g}\in \rE{{\mathcal{H}}} $ is such that $\bE{g} \ge f$, then
\begin{eqnarray}\label{eq:WL}
\Phi(\alpha - \norm{\bE{g}}_{ \rE{{\mathcal{H}}} }) \le P_{\bE{g}}\le P_f \le \Phi(\alpha+ \norm{f}_{ \rE{{\mathcal{H}}} }).
\end{eqnarray}
\end{lem}
Clearly, \eqref{eq:00:2b} is useful only if $\norm{f}_{ \rE{{\mathcal{H}}} }$ is small. On the contrary, \rE{the lower bound of} \eqref{eq:WL} is important
for $f$ such that $\norm{f}_{ \rE{{\mathcal{H}}} }$ is large and
$\norm{\bE{g}}_{ \rE{{\mathcal{H}}} }>0$.
{Taking $g=\widehat f$, with $\widehat{f}$ being the solution of (3)} {and noting that for any $\gamma >0$ we have $\widehat{\gamma f}= \gamma \widehat{f}$ for any $f\in \rE{\mathcal{H}}$, then the lower bound in \eqref{eq:WL} implies the following result:}
\begin{cor} \label{corLDP} For any $f\in \rE{{\mathcal{H}}} $ such that $f(x_0)>0$ for some $x_0\in (0,\mathbf{I}F)$ the claims in \eqref{LD}
and \eqref{OP} hold.
\end{cor}
The main result of this section is Theorem \ref{ThA} below which presents upper and lower bounds for $P_f$ under some restriction on $f$ and a general
measurable $u$ as above. Let the function $f$ {be} differentiable with derivative $f'\in L_2(\mathbb{R}_+)$. Then the operator $(K_{0+}^{H,*} f')$ is \rE{well-defined}. Consider the following assumptions on $f$:
\begin{itemize}
\item[(i)] $(K_{0+}^{H,*} f')\in L_2(\mathbb{R}_+)$, i.e., $f\in \rE{{\mathcal{H}}} .$
\item[(ii)] Let $h(t):= \int_0^t (K_{0+}^{H,*} f')(s)ds$. We assume that the smallest concave nondecreasing majorant $\widetilde{h}$ of the function $h$ has the right-hand derivative $\widetilde{h}'$ such that $\widetilde{h}'\in L_2(\mathbb{R}_+)$ and moreover the function $$K(t):=(K^{H,*}_{\infty-}\widetilde{h}')(t)$$ is nonincreasing, $K\in L_2^H(\mathbb{R}_+)$ for $H>\frac{1}{2}$ and $K\in \widetilde{L}_2^H(\mathbb{R}_+)$ for $H<\frac{1}{2}$, $$K(t)=o(t^{-H})\;\text{as}\; t\to\infty.$$
\item[(iii)] The function $\widetilde{h}'$ can be presented as $\widetilde{h}'(t)=(K_{0+}^{H,*} \hat{f}' )(t),\;t\in\mathbb{R}_+$,
for some $ \hat{f}' \in L_2(\mathbb{R}_+)$. Evidently, in this case the function $\widetilde{h}$
admits the representation $\widetilde{h}(t) = \int_0^t (K_{0+}^{H,*} \hat{f}' )(s)ds$.
Denote $\widehat{f}(t)=\int_0^t \hat{f}' (s)ds=\int_0^t(K_{0+}^{H} \widetilde{h}')(s)ds$.
\end{itemize}
\begin{thm} \label{ThA} 1. Under assumptions $(i)$--$(iii)$ {we have $\widehat{f}\in \rE{{\mathcal{H}}} $ and}
\begin{eqnarray}\label{thA:1}
P_f \le P_{f- \cE{\widehat f} }\exp\left(\int_0^\infty u(s)d(-K(s))-\frac{1}{2}\norm{\widetilde{h}}^2 \right).
\end{eqnarray}
\cE{2. Suppose that $u_{-}: \mathbb{R}_+ \to \mathbb{R}$ is such that $u_{-}(t)< u(t), t\in \mathbb{R}_+$.
If $H< 1/2$,
assume additionally that $ \cE{\widehat f} \ge f$.
Then for any $H\in (0,1)\setminus \{1/2\}$,
\begin{eqnarray}\label{thA:2}
P_f \ge P_{ \cE{\widehat f} } \geq P( u_{-}(t) \le B^H(t)\leq u(t),t\in\mathbb{R}_+)
\exp\left(\int_0^\infty u_{-}(s)d(-K(s))-\frac{1}{2}\norm{\widetilde{h}}^2 \right)
\end{eqnarray}
holds, provided that $\int_0^\infty u_{-}(s)d(-K(s))$ is finite.}
\end{thm}
As we show below,
the upper and lower bounds above become (in the log scale) precise when ${f}$ is large.
\cEE{\begin{cor} \label{crA} Under the assumptions
and notation of Theorem \ref{ThA}, if further $f(x_0)>0$ for some $x_0 \in (0,\mathbf{I}F)$, then
\begin{eqnarray}\label{crA:1}
- \ln P_{\gamma f} \sim \frac{\gamma^2}{2} \norm{\widetilde{h}}^2, \quad \gamma \to \mathbf{I}F.
\end{eqnarray}
\end{cor}
}
As a by-product, we solve the minimization problem \eqref{OP}, namely we have
\cEE{\begin{cor} \label{crB} Under the assumptions
and notation of Theorem \ref{ThA}
\begin{eqnarray}\label{crB:1}
\cEE{\inf_{f,g\in { \rE{{\mathcal{H}}} },g \ge f} \norm{g}_{ \rE{{\mathcal{H}}} } = \norm{\cE{\widehat f}}_{ \rE{{\mathcal{H}}} }= \norm{\widetilde{h}}.}
\end{eqnarray}
\end{cor}
}
{\bf Remarks}: a) If $H\in (1/2, 1)$, then under conditions $(i)$--$(iii)$,
we find that $\cE{\widehat f}$ is the explicit solution of the minimization problem \eqref{OP}.\\
b) The case $H=1/2$ is discussed in \cite{BiHa1}, see also \cite{MR2016767}.\\
c) It follows from Lemma \ref{AppE} that for $H>\frac12$,
$ \cE{\widehat f} \ge f$ because it immediately follows from this lemma and inequality $\tilde{h}\geq h$ that $\hat{f}'\geq f'$.
\section{Auxiliary results}
For the proof of our main result,
we need to discuss several properties of fBm. This section discusses first
the relation between fBm, Molchan martingale and the underlying Wiener process. Then we consider the Girsanov theorem which is crucial for our analysis.
\subsection{Fractional Brownian motion, Molchan martingale and ``underlying'' Wiener process}
In what follows we consider continuous modification of fBm that exists due to well-known Kolmogorov's theorem.
Denote by $\mathcal{F}^{B^H}=\{\mathcal{F}_t^{B^H},t\in \mathbb{R}_+\}$ with $\mathcal{F}_t^{B^H}=\sigma\{B^H(s),0\leq s\leq t\}$ the filtration generated by $B^H$.
\rE{Below} we establish the following relation. According to \cite{nualart}, \cite{bioks}, \cite{Jost}, and \cite{NVV}, $B^H$ can be presented as
\begin{equation}\label{Eqn:4}
B^H(t)=\int_0^t(K_{\infty-}^H 1_{[0,t]})(s)dW(s)=\int_0^t(K_t^H 1_{[0,t]})(s)dW(s)=\int_0^t K_H(t,s)dW(s),
\end{equation}
where $W=\{W(t),t\in \mathbb{R}_+\}$ is an ``underlying'' Wiener process whose filtration coincides with $\mathcal{F}^{B^H}$. Evidently, \begin{equation}\label{Eqn:5}
W_t=\int_0^t(K_{\infty-}^{H,*}1_{[0,t]})(s)dB^H(s)=\int_0^t(K_t^{H,*}1_{[0,t]})(s)dB^H(s)=\int_0^tK_{H}^*(t,s)dB^H(s).
\end{equation}
Another form of relations (\ref{Eqn:4}) and (\ref{Eqn:5}) can be obtained in the following way. According to \cite{NVV}, we can introduce the kernel
\begin{eqnarray}\label{LH}
l_H (t,s)=\rE{\left(\frac{\Gamma(3-2H)}{2H\Gamma(3/2-H)^3\Gamma(H+1/2)}\right)^{1/2}}s^{1/2-H}(t-s)^{1/2-H} 1_{[0,t]}(s) ,\quad s, t\in\mathbb{R}_+
\end{eqnarray}
and consider the process
\begin{equation}\label{Eqn:7}
M^H(t)=\int_0^t l_H(t,s)dB^H(s),\quad t \in\mathbb{R}_+,\quad H\in(0,1).
\end{equation}
The process $M^H$ from (\ref{Eqn:7}) defines a Gaussian square-integrable martingale with square characteristics $\langle M^H\rangle(t)=t^{2-2H}$, $t\in\mathbb{R}_+$,
and with filtration $\mathcal{F}^{M^H}\equiv\mathcal{F}^H$. Then
the process $\widetilde{W}(t)=(2-2H)^{-1/2}\int_0^t s^\alpha dM^H(s)$ is a Wiener process with the same filtration.
\begin{lem} \label{Lem1} The processes $\widetilde{W}$ and $W$ coincide.
\end{lem}
\begin{defn} (\cite{bioks}, \cite{Jost}, \cite{nualart}) Wiener integral w.r.t. fBm is defined for any $T\in \mathbb{R}_+$ and $H\in(0,1)$ as
\begin{eqnarray}Y
\int_0^Tf(s)dB^H(s)&=&\int_0^T(K_{\infty-}^Hf 1_{[0,T]})(s)dW(s)
=\int_0^{\infty}(K_{\infty-}^Hf 1_{[0,T]})(s)dW(s)\\
&=&\int_0^{\infty}(K_T^Hf)(s)dW(s)=\int_0^T(K_T^Hf)(s)dW(s)
\end{eqnarray}Y
and the integral $\int_0^Tf(s)dB^H(s)$ exists for $f\in L_2^H (\mathbb{R}_+).$
\end{defn}
Now we extend the notion of integration w.r.t.\ fBm on the $\mathbb{R}_+$ from $[0,T]$ by the following definition.
\begin{defn}\begin{equation}\label{Eqn:8}\int_0^\infty f(s)dB^H(s)=L_2\text{-}\lim\limits_{T\to\infty}\int_0^T f(s)dB^H(s),\end{equation}
if this limit exists.
\end{defn}
\begin{lem}\label{Lemma 2.2} Let function $f\in\L_2^H(\mathbb{R}_+)$ for $H>\frac{1}{2}$ and $f\in\widetilde{L}_2^H(\mathbb{R}_+)$ for $H<\frac{1}{2}$.
Then the limit in the right-hand side of (\ref{Eqn:8}) exists \bE{and}
\begin{eqnarray}
\int_0^\infty f(s)dB^H(s)=\int_0^\infty (K^H_{\infty-} f)(s)dW(s).
\end{eqnarray}
\end{lem}
\begin{lem}\label{Lemma2.33} Let $h=h(t),t\in R_+$,
be a nonrandom measurable function \bE{such that}
\begin{enumerate} \item $h\in L_2^H(\mathbb{R}_+)$ for $H>\frac{1}{2}$ and $h\in \widetilde{L}_2^H(\mathbb{R}_+)$ for $H<\frac{1}{2}$; \item $h$ is nonincreasing; \item $s^H h(s)\to 0 $ as $s\to\infty.$
\end{enumerate}
Then there exists integral $\int_0^\infty h(s)dB_s^H$ in the sense of Lemma 2.2 and \bE{moreover}
\begin{eqnarray}
\int_0^\infty h(s)dB^H(s)=\int_0^\infty B^H(s)d(-h(s)),
\end{eqnarray}
where the integral in the right-hand side is a Riemann-Stieltjes integral with continuous integrand and nondecreasing integrator.
\end{lem}
\COM{{Next, we introduce few elements from convex analysis.}
Let $h:\mathbb{R}_+\to\mathbb{R}$ be a measurable function from the space $\mathcal{H}_{\frac{1}{2}}.$ Denote $\widetilde{h}$ the smallest nondecreasing concave majorant of $h$.
According to \cite{BiHa1}, $\widetilde{h}\in\mathcal{H}_{\frac{1}{2}},$ $\widetilde{h}\geq0$ and $\widetilde{h}$ for a.a.\
$t\in\mathbb{R}_+$ has a derivative $\widetilde{h}'$.
Furthermore, we can assume that $\widetilde{h}'$ is the right-hand derivative. As it was mentioned in \cite{BiHa1}, $\mathbf{I}nf_{g\geq h,g\in\mathcal{H}_{\frac{1}{2}}}\norm{g}=\norm{\widetilde{h}}$ and $\norm{h}^2=\norm{\widetilde{h}}^2+\norm{h-\widetilde{h}}^2.$
}
\subsection{Girsanov theorem for fBm} Let $H\in(0,1)$. Consider a fBm with absolutely continuous drift $f$ that admits a following representation: $ B^H(t)+f(t)=B^H(t)+\int_0^t f'(s)ds.$
To annihilate the drift, there are two equivalent approaches. The first one is to assume that $K_{H}^*(t,\cdot)f'(\cdot)=(K_{0+}^{H,*}f')(\cdot)\in L_1[0,t]$ for any $t\in R_+$, to equate \begin{equation}\label{Eqn:11}B^H(t)+f(t)=\widehat{B}^H(t),\end{equation}
where $\widehat{B}^H$ is the fBm with respect to the new probability measure,
and accordingly to \eqref{Eqn:5},
to transform (\ref{Eqn:11}) as
$$\int_0^t(K_{\infty-}^{H,*}1_{[0,t]})(s)dB^H(s)+\int_0^t(K_{\infty-}^{H,*}1_{[0,t]})f'(s)ds
=\int_0^t(K_{\infty-}^{H,*}1_{[0,t]})(s)d\widehat{B}^H(s),$$
or,
$$\int_0^tK_{H}^*(t,s)dB^H(s)+\int_0^tK_{H}^*(t,s)f'(s)ds
=\int_0^tK_{H}^*(t,s)d\widehat{B}^H(s),$$
or, at last,
$$W(t)+\int_0^t(K_{\infty-}^{H,*}1_{[0,t]})(s)f'(s)ds=W(t)+\int_0^t(K_{0+}^{H,*}f')(s)ds=\widehat{W}(t),$$
where $\widehat{W}=\{\widehat{W}_t,t\in\mathbb{R}_+\}$ is a Wiener process with respect to a new probability measure $Q$, say. The second one is to apply Girsanov's
theorem from \cite{Yu08LNotes}. We start with (\ref{Eqn:11}); suppose that $s^{\frac12-H}f'(s)\in L_1[0,t]$ for any $t\in\mathbb{R}_+$ and transform (\ref{Eqn:11}) as follows {(recall $l_H$ is defined in \eqref{LH})}:
$$M^H(t)+\int_0^t l_H(t,s)f'(s)ds=\widehat{M}^H(t).$$
Further, suppose that the function $q(t)=\int_0^t l_H(t,s)f'(s)ds$ admits the representation
\begin{equation}\label{equ.2.23}
q(t)=\int_0^t q'(s)ds.
\end{equation} Then
$$(2-2H)^{\frac{1}{2}}\int_0^t s^{\frac{1}{2}-H}dW(s)+\int_0^t q'(s)ds=(2-2H)^{\frac{1}{2}}\int_0^t s^{\frac{1}{2}-H}d\widehat{W}(s),$$
whence $ W(t)+{(2-2H)^{-1/2}}\int_0^tq'(s)s^{H-\frac{1}{2}}ds=\widehat{W}(t).$ Evidently, if the representation \eqref{equ.2.23} holds, then \begin{equation}\label{equ.2.24}{(2-2H)^{-1/2}}
\int_0^tq'(s)s^{H-\frac{1}{2}}ds=\int_0^tK_{H}^*(t,s)f'(s)ds=\int_0^t(K_{0+}^{H,*}f')(s)ds.\end{equation}
Now we give simple sufficient conditions of existence of $q'$ and $\int_0^tq'(s)s^{H-\frac{1}{2}}ds$. {The proof} consists in differentiation and integration by parts therefore \bE{it} is omitted.
\begin{lem} \label{lemdrif} (i) Let $H<\frac12 $. Suppose that {the} drift $f$ is absolutely continuous and for any $t>0$, the derivative
$|f'(s)|\leq C(t)s^{H-\frac32+\varepsilon}$, $s\leq t$, for some $\varepsilon>0$ and some nondecreasing function $C(t):\mathbb{R}_+\rightarrow\mathbb{R}_+$.
Then for any $t>0$,
$$q'(t)={\left(\frac{\Gamma(3-2H)}{2H\Gamma(3/2-H)^3\Gamma(H+1/2)}\right)^{1/2}}\int_0^t s^{1/2-H}(t-s)^{-1/2-H}f'(s)ds $$ and \eqref{equ.2.24} holds.
(ii) Let $H>\frac12 $. Suppose that the drift $f$
is absolutely continuous. Also, suppose that
there exists the continuous derivative $(s^{\frac12-H}f'(s))'$ and
$ (s^{\frac12-H}f'(s))'\rightarrow 0$ as $s\rightarrow 0$.
Then for any $t>0$
$$q'(t)={\left(\frac{\Gamma(3-2H)}{2H\Gamma(3/2-H)^3\Gamma(H+1/2)}\right)^{1/2}}\int_0^t (t-s)^{1/2-H}(s^{\frac12-H}f'(s))'ds $$ and \eqref{equ.2.24} holds.
\end{lem}
{For a drift $f$ as in Lemma \ref{lemdrif}, then}
$B^H(t)+\int_0^t f'(s)ds$ is fBm $\widehat{B}^H(t),$ $t\in\mathbb{R}$, say,
under such measure $Q$ that
\begin{equation}\begin{gathered}
\label{Eqn:12}\frac{dQ}{dP}=\exp\Big(-\int_0^\infty (K_{0+}^{H,*}f')(s)dW(s)-\frac{1}{2}\int_0^\infty|(K_{0+}^{H,*}f')(s)|^2 ds\Big)\\=\exp\Big(-\int_0^\infty (K_{0+}^{H,*}f')(s)dW(s)-\frac{1}{2}\|f\|^2_{ \rE{{\mathcal{H}}} }\Big)\end{gathered}\end{equation}
if (\ref{Eqn:12}) defines a new probability measure.
So, we get the following result.
\begin{thm}\label{thm4.1} If $f\in \rE{{\mathcal{H}}} $, then $B^H(t)+\int_0^t f'(s)ds=\widehat{B}^H(t), $ where $\widehat{B}^H(t)$ is a fBm under \bE{a} measure $Q$ that satisfies relation \eqref{Eqn:12}.
\end{thm}
\section{Examples of admissible drifts}
We present next two examples of drifts satisfying conditions $(i)$-$(iii)$.
\begin{exm}\label{exa4.1} In order to construct the drift, we start with $h$ and $\widetilde{h}$. Let $H>\frac{1}{2}$, $h(t)=\widetilde{h}(t)=\int_0^ t s^{1/2-H}e^{-s}ds.$ Note that $\widetilde{h}'(t)=t^{1/2-H}e^{-t},$ $t>0$, $\widetilde{h}'\in L_2(\mathbb{R}_+),$ $\widetilde{h}'>0$ and decreases on $\mathbb{R}_+$, therefore $\widetilde{h}$ is a concave function as well as $h$, and evidently, $\widetilde{h}$ is a smallest concave nondecreasing majorant of $h$. Further,
\begin{eqnarray}Y
(K_{\infty-}^{H,*}\widetilde{h}')(t)&=&- \rE{C_1}I t^{1/2-H}\frac{d}{dt}\left(\int_t^\infty(\bE{z}-t)^{1/2-H}e^{-\bE{z}}d\bE{z}\right)\\
&=&- \rE{C_1}I t^{1/2-H} \frac{d}{dt}\left(\int_0^\infty {\bE{z}}^{1/2-H}e^{-\bE{z}-t}d\bE{z}\right)\\
&=& \rE{C_1}I \Gamma\Big(\frac{3}{2}-H\Big)t^{\frac{1}{2}-H}e^{-t} = \rE{C_1}I \Gamma\Big(\frac{3}{2}-H\Big)\widetilde{h}'(t).
\end{eqnarray}Y
Consequently, the function $K(t):=(K_{\infty-}^{H,*}\widetilde{h}')(t)$ is nonincreasing, $$K_{\infty-}^H(K_{\infty-}^{H,*}\widetilde{h}')(t)=\widetilde{h}'(t)\in L_2(\mathbb{R}_+)$$ implying thus $K\in L_2^H(\mathbb{R}_+)$ and moreover, $K(t)t^H\to 0$ as $t\to \infty$. It means that condition $(ii)$ holds.
Denote $f$, yet the unknown drift, and let $q(t)=C_2\int_0^t s^{1/2-H}(t-s)^{1/2-H}f'(s)ds,$ \rE{with $C_2:=\rE{\frac{ \rE{C_1} }{\Gamma\Big(H+1/2\Big)}}$.}
Then $s^{H-1/2}q'(s)=h'(s)=s^{1/2-H}e^{-s},$ $q'(s)=s^{1-2H}e^{-s}$ and
\begin{eqnarray}Y C_2\int_0^t(t-s)^{1/2-H}s^{1/2-H}f'(s)ds=\int_0^t s^{1-2H}e^{-s}ds
\end{eqnarray}Y
and hence with $C_3=C_2B(\frac{3}{2}-H,H-\frac{1}{2})$ \rE{where $B(a,b)=\Gamma(a)\Gamma(b)/\Gamma(a+b)$}, we obtain
$$(H-\frac{1}{2})C_3\int_0^t s^{1/2-H}f'(s)ds=\int_0^t (t-s)^{H-1/2}s^{1-2H}e^{-s}ds$$
implying that
$$f(t)={\left(\frac{\Gamma\Big(\frac32-H\Big)}{2H\Gamma(2-2H)\Gamma\Big(H+\frac12\Big)}\right)^{-\frac12}}
\int_0^t s^{H-\frac{1}{2}}\int_0^s(s-\bE{z})^{H-\frac{3}{2}}{\bE{z}}^{1-2H}e^{-\bE{z}}d\bE{z}ds.$$
{Since} $(K_{0+}^{H,*}f')(t)= \rE{C_1} t^{H-1/2}q'(t)=t^{1/2-H}e^{-t}\in L_2(\mathbb{R}_+)$ condition $(i)$ holds. Condition $(iii)$ is clearly satisfied since
we can put $\cE{\widehat f}=f$. \cE{Note in particular that the assumption $\cE{\widehat f}\ge f$ if $H \in (0,1/2)$ also holds.}
\end{exm}
\begin{exm}
Let $H<\frac{1}{2}$ and put $h(t)=\widetilde{h}(t)=\int_0^t s^\gamma e^{-s}ds$ with some $0>\gamma>-\frac{1}{2}$ to have $h'$ and $\widetilde{h}'$ in $L_2(\mathbb{R}_+)$. Then, as before, $\widetilde{h}$ is a smallest nondecreasing concave majorant of $h$. Further, we may write
\begin{eqnarray}Y
(K_{\infty-}^{H,*}\widetilde{h}')(t)&=& \rE{C_1}I t^{1/2-H}\int_t^\infty(\bE{z}-t)^{-H-1/2}{\bE{z}}^{H-1/2+\gamma}e^{-\bE{z}}d\bE{z}\\
&=&- \rE{C_1}I t^{1/2-H+\gamma}\int_1^\infty(\bE{z}-1)^{-H-1/2}{\bE{z}}^{H-1/2+\gamma}e^{-\bE{z}t}d\bE{z}
\end{eqnarray}Y
and $K(t):=(K_{\infty-}^{H,*}\widetilde{h}')(t)$ is nonincreasing for $\frac{1}{2}-H+\gamma\leq0$, or $-\frac{1}{2}<\gamma\leq H-\frac{1}{2}.$ Moreover, for $\gamma=H-\frac{1}{2}$
$$|(K_{\infty-}^{H,*}\widetilde{h}')(t)|\leq \rE{C_1}I t^{1/2-H}e^{-t/2}\int_1^\infty {\bE{z}}^{H-3/2}d\bE{z},$$
$$K_{\infty-}^H(|K_{\infty-}^{H,*}\widetilde{h}'|)(t)=\widetilde{h}'(t)\in L_2(\mathbb{R}_+)$$
implying $K\in L_2^H (\mathbb{R}_+)$ and ${\lim_{t\to \mathbf{I}F}}K(t)t^H= 0$. Consequently, condition $(ii)$ holds. Similarly to Example \ref{exa4.1}, $$s^{H-1/2}q'(s)=h'(s)=s^{H-1/2}e^{-s},\;q'(s)=e^{-s}$$ and $C_2\int_0^t(t-s)^{1/2-H}s^{1/2-H}f'(s)ds=\int_0^t e^{-s}ds,$ whence
\begin{equation}\label{Eqn:21}\Big(\frac{1}{2}-H\Big)C_2\int_0^t (t-s)^{-1/2-H}s^{1/2-H}f'(s)ds=1-e^{-t}.\end{equation}
It follows from (\ref{Eqn:21}) that
$$\Big(\frac{1}{2}-H\Big)C_2B(H+\frac{1}{2},\frac{1}{2}-H)\int_0^t s^{1/2-H}f'(s)ds=\int_0^t(t-s)^{H-1/2}(1-e^{-s})ds.$$
Denote $C_4:=(\frac{1}{2}-H)B(H+\frac{1}{2},\frac{1}{2}-H).$
Then
$$\int_0^t s^{1/2-H}f'(s)ds=\rE{\frac{1}{C_4}}\int_0^t\frac{(t-s)^{H+1/2}}{H+1/2}e^{-s}ds,$$
and
$$t^{1/2-H}f'(t)=\rE{\frac{1}{C_4}}(H)\int_0^t(t-s)^{H-1/2}e^{-s}ds,$$
whence
$$f'(t)=\rE{\frac{1}{C_4}}t^{H-1/2}\int_0^t(t-s)^{H-1/2}e^{-s}ds.$$
Consequently,
\begin{eqnarray}Y
f(t)&=&\rE{\frac{1}{C_4}}\int_0^t s^{H-1/2}\int_0^s(s- {z})^{H-1/2}e^{- {z}}d {z}ds\\
&=&\rE{\frac{1}{C_4}}\int_0^t e^{- {z}}\int_{ {z}}^t s^{H-1/2}(s- {z})^{H-1/2}dsd\bE{z}.
\end{eqnarray}Y
Clearly, $(K_{0+}^{H,*}f')(t)= \rE{C_1} t^{H-1/2}q'(t)=t^{H-1/2}e^{-t}\in L_2(\mathbb{R}_+),$ and condition $(i)$ holds. Condition $(iii)$ is evident.
\end{exm}
\section{Proofs}
\subsection{Proofs of auxiliary results}
{\bf Proof of Lemma \ref{Lem1}}: It was established in \cite{NVV} that fBm $B^H$ can be ``restored'' from $\widetilde{W}$ by the following formula
$B^H(t)=\int_0^t K_H(t,s)d\widetilde{W}(s)$,
but it means
$$\widetilde{W}(t)=\int_0^t(K_{\infty-}^{H,*} 1_{[0,t]})(s)dB^H(s)=W(t),$$
hence the proof follows. \qed
{\bf Proof of Lemma \ref{Lemma 2.2}}:
On one hand, we have that $\int_0^\infty(K^H_{\infty-} f)(s)dW(s)$ exists. On the other hand, we have the equality $\int_0^T f(s)dB^H(s)=\int_0^T(K_{\infty-}^H f 1_{[0,T]})(s)dW(s)$. At last,
$$\E{\Biggl(\int_0^\infty(K^H f)(s)dW(s)-\int_0^T(K_{\infty-}^H f 1_{[0,T]})(s)dW(s) \Biggr)^2}$$
\begin{equation}\label{Eqn:9}=\int_T^\infty((K_{\infty-}^H f)(s))^2 ds+\int_0^T((K_{\infty-}^H f)(s)-K_T^H f)(s))^2 ds.\end{equation}
Since $f\in L_2^H(\mathbb{R}_+),$ we have that $\int_T^\infty((K_{\infty-}^H f)(s))^2 ds\to 0,$ $T\to\infty.$ Further, let $H>\frac{1}{2}.$ Then
\begin{equation}\label{Eqn:10}\int_0^T((K_{\infty-}^H f-K_T^H f)(s))^2 ds=\rE{C_1}\int_0^T s^{1-2H}\left(\int_T^\infty f(t)t^{H-\frac{1}{2}}(t-s)^{H-\frac{3}{2}}dt\right)^2 ds.\end{equation}
Since $|f|\in L_2^H (\mathbb{R}_+)$ together with $f$, we have that for any $s\leq T$
$$\int_T^\infty|f(t)|t^{H-\frac{1}{2}}(t-s)^{H-\frac{3}{2}}dt\to 0\text{ as }T\to\infty$$
and is dominated by $\int_s^\infty |f(t)|t^{H-1/2}(t-s)^{H-3/2}dt.$ Therefore, the right-hand side of (\ref{Eqn:10}) tends to $0$ due to the Lebesgue dominated convergence theorem.
Next, for $0<H<\frac{1}{2}$ and by the definition $\widetilde{L}_2^H(\mathbb{R}_+)$, we have
\begin{eqnarray}Y
\int_0^T((K_{\infty-}^H f)(s)-(K_T^H f)(s))^2 ds&=&C_1^2\int_0^T s^{1-2H}\Big(\frac{d}{ds}\Big(\int_s^\infty u^{H-\frac{1}{2}} f(u)(u-s)^{H-\frac{1}{2}}du\Big)\\
&&-\frac{d}{ds}\Big(\int_s^T u^{H-\frac{1}{2}}f(u)(u-s)^{H-\frac{1}{2}}du\Big)\Big)^2ds\\
&=&C_1^2\int_0^T s^{1-2H}\Big(\int_T^\infty u^{H-\frac{1}{2}} f(u)(u-s)^{H-\frac{3}{2}}du\Big)^2 ds\\
& =& C_1^2\int_0^T s^{1-2H}\Big(\int_T^\infty u^{H-\frac{1}{2}}f(u)(u-s)^{H-\frac{3}{2}}du\Big)^2 ds\to 0
\end{eqnarray}Y
as $T\to\infty$ implying that the right-hand side of (\ref{Eqn:9}) vanishes as $T\to\infty$, hence the claim follows. $
\Box$\\
{\bf Proof of Lemma \ref{Lemma2.33}}: According to Lemma \ref{Lemma 2.2}, under condition 1) the integral $\int_0^\infty h(s)dB^H(s)$ exists,
\begin{equation}\label{Eqn:16}\int_0^\infty h(s) dB^H(s)=\int_0^\infty(K^H_{\infty-} f)(s)dW(s)=L_2\text{-}\lim_{T\to\infty}\int_0^T f(s)dB^H(s).\end{equation}
Further, it was mentioned in \cite{Jost} that $\int_0^T h(s)dB^H(s)$ is an $L_2$-limit of the corresponding integrals for the elementary functions:
\begin{eqnarray} \label{Eqn:17}
\int_0^T h(s) dB^H(s)&=&L_2\text{-}\lim_{|\pi|\to 0}\sum_{i=1}^{N} h(s_{i-1})(B^H(s_i)-B^H(s_{i-1}))\notag\\
&=&L_2\text{-}\lim_{|\pi|\to 0}(\sum_{i=1}^{N}B^H(s_i)(s_{i-1})-h(s_i))+B^H(T)h(T))\notag\\
&=&\int_0^T B^H(s)d(-h(s))+B^H(T)h(T).
\end{eqnarray}
{In view of} (\ref{Eqn:16}), the limit
in the right-hand side of (\ref{Eqn:17}) exists and due to condition 3),
it equals $\int_0^\infty B^H(s)d(-h(s)),$ whence the proof follows. $
\Box$\\
\subsection{{Proofs} of the main results}
{\it Proof of Lemma \ref{LemUL}}:
If $f=0$, then $\norm{f}_{ \rE{{\mathcal{H}}} }=0$, hence the first claim follows. Assume therefore that $\norm{f}_{ \rE{{\mathcal{H}}} }>0$.
In view of \cite{Mandjes07} (see page 47 and 48 therein),
a standard {fBm} $B_H(t),t \ge 0$ can be
realized in the separable Banach space
$$E=\biggl\{\omega: \mathbb{R} \to \mathbb{R}, \text{ continuous}, \quad \omega(0)=0, \quad \lim_{t \to \mathbf{I}F} \frac{\lvert \omega(t) \rvert }{1+ t}=0\biggr\}$$
equipped with the norm $\norm{\omega}_E= \sup_{t\ge 0} \frac{\lvert \omega(t) \rvert}{1+ t}$.
Consequently, Theorem 1' in \cite{LiKuelbs} can be applied, hence
\begin{eqnarray}\label{eq:WL0}
\Phi(\alpha - \norm{f}_{ \rE{{\mathcal{H}}} }) \le P_f \le \Phi(\alpha+ \norm{f}_{ \rE{{\mathcal{H}}} }).
\end{eqnarray}
Since for any $\bE{g} \ge f$ we have $P_{\bE{g}}\le P_f$,
then the claim in \eqref{eq:WL} follows.
Next, in view of \eqref{eq:WL0},
we have by the mean value theorem (see also Lemma 5 in \cite{Janssen08})
\begin{eqnarray}Y
P_f- P_0 &\le & \Phi(\alpha + \norm{f}_{ \rE{{\mathcal{H}}} })- \Phi(\alpha)
= \norm{f}_{ \rE{{\mathcal{H}}} } \Phi'(c)\le \frac{\norm{f}_{ \rE{{\mathcal{H}}} }}{\sqrt{2 \pi}}
\end{eqnarray}Y
for some real $c$ and similarly using again \eqref{eq:WL0},
\begin{eqnarray}Y
P_f- P_0 &\ge & \Phi(\alpha - \norm{f}_{ \rE{{\mathcal{H}}} })- \Phi(\alpha)
\ge -\frac{\norm{f}_{ \rE{{\mathcal{H}}} }}{\sqrt{2 \pi}},
\end{eqnarray}Y
hence the proof is complete.
$
\Box$\\
\cEE{{\it Proof of Corollary \ref{corLDP}:} In view of \eqref{eq:WL} we have for any $\gamma >0$ and any $g\in { \rE{{\mathcal{H}}} }, g\ge f$
\begin{eqnarray}Y
P_{\gamma f} \ge P_{\gamma g} \ge \Phi(\alpha - \gamma \norm{g}_{ \rE{{\mathcal{H}}} }).
\end{eqnarray}Y
Since $g(x_0)>0$ follows from $f(x_0)>0$, then $\norm{g}_ { \rE{{\mathcal{H}}} }>0$, hence for all $\gamma$ large
$$ \ln P_{\gamma f} \ge - (1+o(1))\frac{\gamma^2}{2} \inf_{ g\in { \rE{{\mathcal{H}}} }, g\ge f} \norm{g}_{ \rE{{\mathcal{H}}} }^2.$$
Since the norm is a convex function and the set $A_f:=\{g\in { \rE{{\mathcal{H}}} }, g\ge f\}$ is convex, then the minimization problem \eqref{OP}
has a unique solution $\cE{\widehat f}$, and thus the proof is complete. $
\Box$\\}
{\bf Proof of Theorem \ref{ThA}:} Define the function $h(t)=\int_0^t h'(s) ds$ with $$h'(s)=f_H(s):=(K_{0+}^{H,*}f')(s)$$
and introduce its smallest concave nondecreasing majorant $\widetilde{h}$.
As shown in \cite{BiHa1} $\widetilde{h}(t)=\int_0^t \widetilde{h}'(s) ds$
and
$$\norm{h}^2:=\int_0^\infty (h'(s))^2ds=\int_0^\infty (f_H(s))^2 ds=\|f\|^2_{ \rE{{\mathcal{H}}} }
=\norm{\widetilde{h}}^2 +\norm{h-\widetilde{h}}^2 .$$
Next, let the probability measure $Q$ be defined by the relation
\begin{eqnarray}\label{Eqn:18}
\frac{dQ}{dP}&=&\exp\left(-\int_0^\infty f_H(s) dW(s)-\frac{1}{2}\norm{h}^2\right)
= \exp\left(-\int_0^\infty f_H(s) d \widehat{W}(s)+\frac{1}{2}\norm{h}^2\right),
\end{eqnarray}
where $W$ is the ``underlying'' Wiener process, $d\widehat{W}=dW+f_H(s)ds,$ $\widehat{W}$ is a Wiener process w.r.t. the measure $Q$. Note that (\ref{Eqn:18}) defines {a} probability measure since $ f_H\in L_2(\mathbb{R}_+)$, due to $(i)$ and Theorem \ref{thm4.1}. Then
\begin{eqnarray*}
P_f&=&\mathbb{E}_Q \left\{\mathbb{I}\{B^H(t)+f(t)\leq u(t),t\in\mathbb{R}_+\}\frac{dP}{dQ} \right\}\\
&=&\mathbb{E}_Q \left\{\mathbb{I}\{\widehat{B}^H(t)\leq u(t),t\in\mathbb{R}_+\} \exp\left(\int_0^\infty f_H(s)
d \widehat{W}(s)-\frac{1}{2}\norm{h}^2\right\} \right\}
\\
&=&\mathbb{E}\left\{ \mathbb{I}\{B^H(t)\leq u(t),t\in\mathbb{R}_+\}\exp\left(\int_0^\infty f_H(s)
d \widehat{W}(s)-\frac{1}{2}\norm{h}^2\right)\right\} .
\end{eqnarray*}
Furthermore,
\begin{eqnarray}Y
\int_0^\infty f_H(s)dW(s)=\int_0^\infty( f_H(s)-\widetilde{h}'(s))dW(s)+\int_0^\infty \widetilde{h}'{(s)}dW(s)\\=\int_0^\infty(h'(s)-\widetilde{h}'(s))dW(s)+\int_0^\infty \widetilde{h}'{(s)}dW(s).
\end{eqnarray}Y
Next setting $K(t):=(K^{H,*}_{\infty-}\widetilde{h}')(t)$, we have
$$\int_0^\infty \widetilde{h}'(s)dW(s)=\int_0^\infty(K^{H,*}_{\infty-}\widetilde{h}')(s)dB^H(s)=
\int_0^\infty {K(s)} dB^H(s) $$ and both integrals are correctly defined.
Indeed, $\widetilde{h}'\in L_2(\mathbb{R}_+)$ implying that $\int_0^\infty \widetilde{h}'(s)dW(s)$ exists.
Moreover,
{in view of $(ii)$},
$$ {K}\in L_2^H(\mathbb{R}_+)$$ for $H>\frac{1}{2}$ and $ {K}\in \widetilde{L}_2^H(\mathbb{R}_+)$ for $H<\frac{1}{2}$,
therefore $\int_0^\infty {K}(s)dB^H(s)$ exists, according to Lemma \ref{Lemma 2.2} and, furthermore,
equality (\ref{Eqn:17}) holds. {In the light of} Lemma \ref{Lemma2.33},
we get $$\int_0^\infty K(s)d {B^H(s)}=\int_0^\infty B^H(s) d(-K(s)).$$
Consequently, condition $(iii)$ implies (set $I_{K,u}:= \int_0^\infty u(s)d(-K(s)))$
\begin{eqnarray}Y
P_f&=&\mathbb{E}\Biggl\{\mathbb{I}\{B^H(t)\leq u(t),t\in\mathbb{R}_+\}
\exp\Biggl(\int_0^\infty(h'(s)-\widetilde{h}'(s))dW(s) -\frac{1}{2}\norm{h-\widetilde{h}}^2
+ {\int_0^\infty B^H(s)d(-K(s))}-\frac{1}{2}\norm{\widetilde{h}}^2\Biggr)\Biggr\}\\
&\cE{\le}&\mathbb{E}\Biggl\{\mathbb{I}\{B^H(t)\leq u(t),t\in\mathbb{R}_+\}
\exp\Biggl(\int_0^\infty(h'(s)-\widetilde{h}'(s))dW(s) -\frac{1}{2}\norm{h-\widetilde{h}}^2 +I_{K,u}-\frac{1}{2}\norm{\widetilde{h}}^2\Biggl)\Biggr\}\\
&=&\exp\Bigg(I_{K,u}-\frac{1}{2}\norm{\widetilde{h}}^2\Bigg) \mathbb{E}\Bigg(\mathbb{I}\{B^H(t)\leq u(t),t\in\mathbb{R}_+\}\\
&& \times \exp\Bigg(\int_0^\infty ((K_{0+}^{H,*}f')(s)-(K_{0+}^{H,*}\widehat{h})(s))dW(s) -\frac{1}{2}\int_0^\infty ((K_{0+}^{H,*}f')(s)-(K_{0+}^{H,*}\widehat{h}))^2ds\Bigg)\Bigg)\\
&=&\exp \Bigg(I_{K,u}-\frac{1}{2}\norm{\widetilde{h}})^2\Bigg)P_{f-\cE{\widehat f}}.
\end{eqnarray}Y
So, the upper bound \eqref{thA:1} is proved. In order to prove \eqref{thA:2}, \cE{note that in view of Lemma \ref{AppE} for $H\in (1/2, 1)$
\begin{eqnarray}Y
\widehat f \ge f,
\end{eqnarray}Y
which is also assumed to hold if $H \in (0,1/2)$. Clearly the above inequality
implies that $P_f \ge P_{\widehat f}$.
As above, we have for some function $u_{-}(t) < u(t),t\in \mathbb{R}_+$
\begin{eqnarray}Y
P_{\widehat f}&=& \mathbb{E}\Bigg\{\mathbb{I}\{B^H(t)\leq u(t),t\in\mathbb{R}_+\}
\exp\Bigg(\cE{\int_0^\infty B^H(s)d(-K(s))}-\frac{1}{2}\norm{\widetilde{h}}^2\Bigg)\Bigg\}\\
&\ge & \mathbb{E}\Bigg\{\mathbb{I}\{u_{-}(t) \le B^H(t)\leq u(t),t\in\mathbb{R}_+\}
\exp\Bigg(\cE{\int_0^\infty B^H(s)d(-K(s))}-\frac{1}{2}\norm{\widetilde{h}}^2\Bigg)\Bigg\}\\
&\ge & \mathbb{E}\Bigg\{\mathbb{I}\{u_{-}(t) \le B^H(t)\leq u(t),t\in\mathbb{R}_+\}
\exp\Bigg(\cE{\int_0^\infty u_{-}(s)d(-K(s))}-\frac{1}{2}\norm{\widetilde{h}}^2\Bigg)\Bigg\}\\
&\ge & P( u_{-}(t) \le B^H(t)\leq u(t),t\in\mathbb{R}_+)
\exp\Bigg(\cE{\int_0^\infty u_{-}(s)d(-K(s))}-\frac{1}{2}\norm{\widetilde{h}}^2\Bigg)\Bigg\},
\end{eqnarray}Y
hence the proof is complete.} $
\Box$\\
{{\bf Proof of Corollary \ref{crA}:} \cE{Since ${\wwF} \ge f$ and $f(x_0)>0$, then $\norm{{\wwF}}>0$ \rE{and further for any measurable function $u: \mathbb{R}_+ \to \mathbb{R}$ with $u(0)>0$}
\begin{eqnarray}Y
\lim_{\gamma \to \mathbf{I}F} P_{\gamma f - \widehat{\gamma f}}&=& \lim_{\gamma \to \mathbf{I}F} P_{\gamma f - \gamma \cE{\widehat f}}\\
&=& \lim_{\gamma \to \mathbf{I}F} P( B^H(t) + \gamma (f(t)- \cE{\widehat f}(t)) \le u(t), t\in \mathbb{R}_+) \\
&=& P( B^H(t) \le u(t), t\in \mathbb{R}_+: f(t)= \cE{\widehat f}(t)) >0.
\end{eqnarray}Y
By Theorem \ref{ThA} for all $\gamma$ large,
\begin{eqnarray}Y
P_{\gamma f} &\le & P_{\gamma f - \gamma {\wwF}} \exp\Biggl( - \frac{1}{2} \gamma^2 \norm{\widetilde{h}}^2 +
\gamma \int_0^\mathbf{I}F u(s) \, d (- K(s))\Biggr)\\
&=& P_{\gamma f - \gamma {\wwF}} \exp\Biggl( - \frac{1}{2} \gamma^2 \norm{\widetilde{h}}^2 (1+ o(1))\Biggr),
\end{eqnarray}Y
hence as $\gamma \to \mathbf{I}F$,
\begin{eqnarray}Y
\ln P_{\gamma f} &\le & - \frac{1}{2} \gamma^2 \norm{\widetilde{h}}^2 (1+ o(1)) +\ln P_{\gamma f - \gamma {\wwF}}
= - \frac{1}{2} \gamma^2 \norm{\widetilde{h}}^2 (1+ o(1)).
\end{eqnarray}Y
It is clear that we can find $u_{-}$ such that $u_{-}(t)< u(t), t\in (0,\mathbf{I}F)$ such that $\int_{0}^\mathbf{I}F u_{-}(t) d (-K(t))$ is finite and
$\pk{u_{-}(t) < B_H(t) \le u(t), t\in R_{+}}>0$. Applying again Theorem \ref{ThA} for such $ u_{-}$ we have
\begin{eqnarray}Y
\ln P_{\gamma f} & \ge & -\frac{1}{2} \gamma^2 \norm{\widetilde{h}}^2 (1+o(1))
\end{eqnarray}Y
as $\gamma\to \mathbf{I}F$, and thus the claim follows.} $
\Box$\\
{\bf Proof of Corollary \ref{crB}:} In view of \eqref{LD} and the result of Corollary \ref{crA}, we have
$$ \frac{1}{2} \gamma^2 \inf_{g\in \rE{\mathcal{H}}, g\ge f} \norm{g}^2_{ \rE{{\mathcal{H}}} } \sim \frac{1}{2} \gamma^2 \norm{\widetilde{h}}^2 $$
as $\gamma \to \mathbf{I}F$. Since further $\norm{{\wwF}}_{H}= \norm{\widetilde{h}}$ and the solution of the minimization problem is unique, then
$\cE{\widehat f}$ is its solution, thus the claim follows. $
\Box$\\
\section{Appendix}
We present next one technical result.
\begin{lem} \label{AppE} Let $H\in (1/2,1)$ {and suppose that the function} $g:\mathbb{R}_+\rightarrow \mathbb{R}_+$
{is such that} $g(t)=\int_0^t \rE{(K_{0+}^{H,*} f')}(s) ds$
for some $f$ such that $\rE{(K_{0+}^{H,*} f')}\in L_2(\mathbb{R}_+)$ and $f(0)=0$. Then $f(t)\geq 0, t\in \mathbb{R}_+$, holds.
\end{lem}
{\bf Proof}: We have that $$g(t)=\int_0^t\mathcal{D}_{0+}^{H-\frac12}(f'(u)u^{\frac12-H})(s)s^{H-\frac12}ds, \quad
\text{ with }f'(u)u^{\frac12-H}=I_{0+}^{H-\frac12}(g'(t)t^{\frac12-H})(u)$$
and
\begin{equation}\label{eq7.1}\begin{gathered}
f(u)=\int_0^us^{H-\frac12}I_{0+}^{H-\frac12}(g'(t)t^{\frac12-H})(s)ds\\
=\Big(\Gamma \Big(H-\frac12\Big)\Big)^{-1}\int_0^u\Big(\int_s^uz^{H-\frac12}(z-s)^{H-\frac32}dz\Big) g'(s)s^{\frac12-H}ds.
\end{gathered}
\end{equation}
Setting $r(s)=s^{\frac12-H}\int_s^uz^{H-\frac12}(z-s)^{H-\frac32}dz$, we may further write
\begin{eqnarray}Y
f(u)&=&-\Big(\Gamma \Big(H-\frac12\Big)\Big)^{-1}\int_0^ug(s)r'(s)ds
\end{eqnarray}Y
and
\begin{eqnarray}Y
-r'(s)&=&-\Bigl(s^{\frac12-H}\int_s^uz^{H-\frac12}(z-s)^{H-\frac32}dz \Bigr)'_s =-(s^{\frac12-H}\int^{u-s}_0(z+s)^{H-\frac12}z^{H-\frac32}dz)'_s
\\
&=&(H-\frac12)s^{-\frac12-H}\int^{u-s}_0(z+s)^{H-\frac12}z^{H-\frac32}dz +s^{\frac12-H}u^{H-\frac12}(u-s)^{H-\frac32}
\\
&&-(H-\frac12)s^{\frac12-H} \int^{u-s}_0(z+s)^{H-\frac32}z^{H-\frac32}dz\\
&=&s^{\frac12-H}u^{H-\frac12}(u-s)^{H-\frac32}
+\cE{\int^{u-s}_0}(H-\frac12)s^{-\frac12-H}(z+s)^{H-\frac32}z^{H-\frac12}dz>0,
\end{eqnarray}Y
whence the claim follows. $
\Box$\\
\textbf{Acknowledgments.} E.\ Hashorva and Y. Mishura acknowledge support from
the Swiss National Science Foundation Grant 200021-1401633/1. E.\ Hashorva kindly acknowledges partial support from the project RARE -318984, a Marie Curie IRSES Fellowship within the 7th European Community Framework.
\end{document} |
\begin{document}
\title{Existence of nonparametric solutions for a capillary problem in warped products}
\author{Jorge H. Lira, Gabriela A. Wanderley}
\maketitle
\begin{abstract}
We prove that there exist solutions for a non-parametric capillary problem in a wide class of Riemannian manifolds endowed with a Killing vector field. In other terms, we prove the existence of Killing graphs with prescribed mean curvature and prescribed contact angle along its boundary. These results may be useful for modelling stationary hypersurfaces under the influence of a non-homogeneous gravitational field defined over an arbitrary Riemannian manifold.
\noindent {\bf MSC:} 53C42, 53C21.
\noindent {\bf Keywords:} capillary, mean curvature, Killing graphs.
\end{abstract}
\section{Introduction}
Let $M$ be a $(n+1)$-dimensional Riemannian manifold endowed with a Killing vector field $Y$. Suppose that the distribution orthogonal to $Y$ is of constant rank and integrable. Given an integral leaf $P$ of that distribution, let $\Omega\subset P$ be a bounded domain with regular boundary $\Gamma =\partial\Omega$. We suppose for simplicity that $Y$ is complete. In this case, let $\vartheta: \mathbb{R}\times \bar\Omega \to M$ be the flow generated by $Y$ with initial values in $M$. In geometric terms, the ambient manifold is a warped product $M = P\times_{1/\sqrt{\gamma}} \mathbb{R}$ where $\gamma = 1/|Y|^2$.
The Killing graph of a differentiable function $u:\bar\Omega\to \mathbb{R}$ is the hypersurface $\Sigma \subset M$ parametrized by the map
\[
X(x)=\vartheta(u(x),x), \quad x\in\bar\Omega.
\]
The Killing cylinder $K$ over $\Gamma$ is by its turn defined by
\begin{equation}
K=\{\vartheta(s,x): s\in \mathbb{R}, \, x \in \Gamma\}.
\end{equation}
The height function with respect to the leaf $P$ is measured by the arc lenght parameter $\varsigma$ of the flow lines of $Y$, that is,
\[
\varsigma=\frac{1}{\sqrt\gamma}s.
\]
Fixed these notations, we are able to formulate a capillary problem in this geometric context which model stationary graphs under a gravity force whose intensity depends on the point in the space. More precisely, given a \emph{gravitational potential} $\Psi \in C^{1,\alpha}(\bar\Omega \times \mathbb{R})$ we define the functional
\begin{equation}
\mathcal{A}[u] = \int_\Sigma \bigg(1+\int_0^{u/\sqrt\gamma}\Psi(x, s(\varsigma)) \,\textrm{d}\varsigma\bigg)\textrm{d}\Sigma.
\end{equation}
The volume element $\textrm{d}\Sigma$ of $\Sigma$ is given by
\[
\frac{1}{\sqrt\gamma}\sqrt{\gamma+|\nabla u|^2}\,\textrm{d}\sigma,
\]
where $\textrm{d}\sigma$ is the volume element in $P$.
The first variation formula of this functional may be deduced as follows. Given an aarbitrary function $v\in C^\infty_c(\Omega)$ we compute
\begin{eqnarray*}
& & \frac{d}{d\tau}\Big|_{\tau=0}\mathcal{A}[u+\tau v] =\int_\Omega \bigg(\frac{1}{\sqrt\gamma}\frac{\langle \nabla u, \nabla v\rangle}{\sqrt{\gamma+|\nabla u^2|}} + \frac{1}{\sqrt\gamma}\Psi (x, u(x)) v\bigg) \sqrt{\sigma}\textrm{d}x\\
& & \,\, = \int_\Omega \bigg(\textrm{div}\Big(\frac{1}{\sqrt\gamma}\frac{\nabla u}{W}v\Big) - \textrm{div}\Big(\frac{1}{\sqrt\gamma}\frac{\nabla u}{W}\Big) v + \frac{1}{\sqrt\gamma}\Psi (x, u(x)) v\bigg) \sqrt{\sigma}\textrm{d}x
\\
& & \,\,\,\, -\int_\Omega \bigg(\frac{1}{\sqrt\gamma}\textrm{div}\Big(\frac{\nabla u}{W}\Big) - \frac{1}{\sqrt\gamma}\langle \frac{\nabla \gamma}{2\gamma}, \frac{\nabla u}{W}\rangle -\frac{1}{\sqrt\gamma}\Psi (x, u(x)) \bigg) v \sqrt{\sigma}\textrm{d}x,
\end{eqnarray*}
where $\sqrt\sigma \textrm{d}x$ is the volume element $\textrm{d}\sigma$ expressed in terms of local coordinates in $P$. The differential operators $\textrm{div}$ and $\nabla$ are respectively the divergence and gradient in $P$ with respect to the metric induced from $M$.
We conclude that stationary functions satisfy the capillary-type equation
\begin{equation}
\label{capillary}
\textrm{div}\Big(\frac{\nabla u}{W}\Big) - \langle \frac{\nabla \gamma}{2\gamma}, \frac{\nabla u}{W}\rangle = \Psi.
\end{equation}
Notice that a Neumann boundary condition arises naturally from this variational setting: given a $C^{2,\alpha}$ function $\Phi:K \to (-1,1)$, we impose the following prescribed angle condition
\begin{equation}
\label{neumann-condition}
\langle N, \nu\rangle = \Phi
\end{equation}
along $\partial\Sigma$, where
\begin{equation}
N = \frac{1}{W}\big(\gamma Y - \vartheta_* \nabla u\big)
\end{equation}
is the unit normal vector field along $\Sigma$ satisfying $\langle N, Y\rangle >0$ and $\nu$ is the unit normal vector field along $K$ pointing inwards the Killing cylinder over $\Omega$.
Equation (\ref{capillary}) is the prescribed mean curvature equation for Killing graphs. A general existence result for solutions of the Dirichlet problem for this equation may be found in \cite{DHL}. There the authors used local perturbations of the Killing cylinders as barriers for obtaining height and gradient estimates. However this kind of barrier is not suitable to obtain \emph{a priori} estimates for solutions of Neumann problems. For that reason we consider now local perturbations of the graph itself adapted from the original Korevaar's approach in \cite{korevaar} and its extension by M. Calle e L. Shahriyari \cite{calle}.
Following \cite{calle} and \cite{korevaar} we suppose that the data $\Psi$ and $\Phi$ satisfy
\begin{itemize}
\item[i.] $|\Psi|+|\bar\nabla\Psi|\le C_\Psi$ in $\bar\Omega\times \mathbb{R}$,
\item[ii.] $\langle \bar\nabla \Psi, Y\rangle \ge \beta>0$ in $\bar\Omega\times \mathbb{R}$,
\item[iii.] $\langle \bar\nabla\Phi, Y\rangle \le 0$,
\item[iv.] $(1-\Phi^2)\ge \beta'$,
\item[v.] $|\Phi|_2\le C_\Phi$ in $K$,
\end{itemize}
for some positive constants $C_\Psi, C_\Phi, \beta$ and $\beta'$, where $\bar\nabla$ denotes the Riemannian connection in $M$. Assumption ($ii)$ is classically referred to as the \emph{positive gravity} condition. Even in the Euclidean space, it seems to be an essential assumption in order to obtain \emph{a priori} height estimates. A very geometric discussion about this issue may be found at \cite{concus-finn}. Condition ($iii$) is the same as in \cite{calle} and \cite{korevaar} since at those references $N$ is chosen in such a way that $\langle N, Y\rangle >0$.
The main result in this paper is the following one
\begin{theorem}
\label{main} Let $\Omega$ be a bounded $C^{3,\alpha}$ domain in $P$.
Suppose that the $\Psi\in C^{1,\alpha}(\bar\Omega\times\mathbb{R})$ and $\Phi\in C^{2,\alpha}(K)$ with $|\Phi|\le 1$ satisfy conditions {\rm (i)-(v)} above. Then there exists a unique solution $u\in C^{3,\alpha}(\bar\Omega)$ of the capillary problem {\rm (\ref{capillary})-(\ref{neumann-condition})}.
\end{theorem}
We observe that $\Psi=nH$, where $H$ is the mean curvature of $\Sigma$ calculated with respect to $N$. Therefore Theorem \ref{main} establishes the existence of Killing graphs with prescribed mean curvature $\Psi$ and prescribed contact angle with $K$ along the boundary. Since the Riemannian product $P\times \mathbb{R}$ corresponds to the particular case where $\gamma=1$, our result extends the main existence theorem in \cite{calle}. Space forms constitute other important examples of the kind of warped products we are considering. In particular, we encompass the case of Killing graphs over totally geodesic hypersurfaces in the hyperbolic space $\mathbb{H}^{n+1}$.
In Section \ref{section-height}, we prove \emph{a priori} height estimates for solutions of (\ref{capillary})-(\ref{neumann-condition}) based on Uraltseva's method as presented in \cite{uraltseva}. These height estimates are one of the main steps for using the well-known Continuity Method in order to prove Theorem \ref{main}. At this respect, we refer the reader to the classical references \cite{concus-finn}, \cite{gerhardt} and \cite{spruck-simon}.
Section \ref{section-gradient} contains the proof of interior and boundary gradient estimates. There we follow closely a method due to N. Korevaar \cite{korevaar} for graphs in the Euclidean spaces and extended by M. Calle and L. Shahriyari \cite{calle} for Riemannian products. Finally the classical Continuity Method is applied to (\ref{capillary})-(\ref{neumann-condition}) in Section \ref{section-proof} for proving the existence result.
\section{Height estimates}
\label{section-height}
In this section, we use a technique developed by N. Uraltseva \cite{uraltseva} (see also \cite{uraltseva-book} and \cite{GT} for classical references on the subject) in order to obtain a height estimate for solutions of the capillary problem (\ref{capillary})-(\ref{neumann-condition}). This estimate requires the \emph{positive gravity} assumption ($ii$) stated in the Introduction.
\begin{proposition} Denote
\begin{equation}
\beta = \inf_{\Omega\times \mathbb{R}}\langle \bar\nabla \Psi, Y\rangle
\end{equation}
and
\begin{equation}
\mu = \sup_\Omega \Psi(x,0).
\end{equation}
Suppose that $\beta >0$. Then any solution $u$ of (\ref{capillary})-(\ref{neumann-condition}) satisfies
\begin{equation}
|u(x)|\le \frac{\sup_\Omega |Y|}{\inf_\Omega |Y|}\frac{\mu}{\beta}
\end{equation}
for all $x\in \bar\Omega$.
\end{proposition}
\noindent \emph{Proof.}
Fix an arbitrary real number $k$ with
\begin{equation*}
k > \frac{\sup_\Omega |Y|}{\inf_\Omega |Y|}\frac{\mu}{\beta}.
\end{equation*}
Suppose that the superlevel set
\begin{equation*}
\Omega_k = \{x\in \Omega: u(x)>k\}
\end{equation*}
has a nonzero Lebesgue measure. Define $u_k:\Omega \to \mathbb{R}$ as
\begin{equation*}
u_k(x) = \max\{u(x)-k,0\}.
\end{equation*}
From the variational formulation we have
\begin{eqnarray*}
0 &=&\int_{\Omega_k} \bigg(\frac{1}{\sqrt\gamma}\frac{\langle \nabla u, \nabla u_k\rangle}{\sqrt{\gamma+|\nabla u^2|}} + \frac{1}{\sqrt\gamma}\Psi (x, u(x)) u_k\bigg) \sqrt{\sigma}\textrm{d}x\\
&=& \int_{\Omega_k} \bigg(\frac{1}{\sqrt\gamma}\frac{|\nabla u|^2}{W} +\frac{1}{\sqrt\gamma} \Psi (x, u(x)) (u-k)\bigg) \sqrt{\sigma}\textrm{d}x\\
& = & \int_{\Omega_k} \bigg(\frac{1}{\sqrt\gamma}\frac{W^2-\gamma}{W} +\frac{1}{\sqrt\gamma} \Psi (x, u(x)) (u-k)\bigg) \sqrt{\sigma}\textrm{d}x \\
&= &
\int_{\Omega_k} \bigg(\frac{W}{\sqrt\gamma}-\frac{\sqrt\gamma}{W} + \frac{1}{\sqrt\gamma}\Psi (x, u(x)) (u-k)\bigg) \sqrt{\sigma}\textrm{d}x .
\end{eqnarray*}
However
\begin{equation*}
\Psi(x,u(x)) = \Psi(x,0) +\int_0^{u(x)} \frac{\partial \Psi}{\partial s}\textrm{d}s \ge -\mu +\beta u(x).
\end{equation*}
Since $\frac{\sqrt{\gamma}}{W}\leq 1$ we conclude that
\begin{eqnarray*}
|\Omega_k|-|\Omega_k|-\mu\int_{\Omega_k}\frac{1}{\sqrt{\gamma}}(u-k)+\beta\int_{\Omega_k}\frac{1}{\sqrt{\gamma}}u(u-k)\le 0.
\end{eqnarray*}
Hence we have
\begin{eqnarray*}
\beta\int_{\Omega_k}\frac{1}{\sqrt{\gamma}}u(u-k) \le \mu\int_{\Omega_k}\frac{1}{\sqrt{\gamma}}(u-k).\nonumber
\end{eqnarray*}
It follows that
\begin{eqnarray*}
\beta k \inf_\Omega |Y| \int_{\Omega_k}(u-k) \le \mu\sup_\Omega |Y|\int_{\Omega_k}(u-k)\nonumber
\end{eqnarray*}
Since $|\Omega_k|\neq 0$ we have
\[
k \le \frac{\sup_\Omega |Y|}{\inf_\Omega |Y|}\frac{\mu}{\beta},
\]
what contradicts the choice of $k$. We conclude that $|\Omega_k|=0$ for all $k \ge \frac{\sup_\Omega |Y|}{\inf_\Omega |Y|}\frac{\mu}{\beta}$. This implies that
\[
u(x)\le \frac{\sup_\Omega |Y|}{\inf_\Omega |Y|}\frac{\mu}{\beta},
\]
for all $x\in \bar\Omega$. A lower estimate may be deduced in a similar way. This finishes the proof of the Proposition. $
\square$
\begin{remark}
The construction of geometric barriers similar to those ones in \cite{concus-finn} is also possible at least in the case where $P$ is endowed with a rotationally invariant metric and $\Omega$ is contained in a normal neighborhood of a pole of $P$.
\end{remark}
\section{Gradient estimates}
\label{section-gradient}
Let $\Omega'$ be a subset of $\Omega$ and define
\begin{equation}
\Sigma'= \{\vartheta(u(x),x): x\in \Omega'\}\subset \Sigma
\end{equation}
be the graph of $u|_{\Omega'}$. Let $\mathcal{O}$ be an open subset in $M$ containing $\Sigma'$. We consider a vector field $Z\in \Gamma(TM)$ with bounded $C^2$ norm and supported in $\mathcal{O}$. Hence there exists $\varepsilon>0$ such that the local flow
$\Xi:(-\varepsilon, \varepsilon)\times \mathcal{O}\to M$ generated by $Z$ is well-defined. We also suppose that
\begin{equation}
\label{Zboundary}
\langle Z(y), \nu (y)\rangle = 0,
\end{equation}
for any $y\in K\cap\mathcal{O}$. This implies that the flow line of $Z$ passing through a point $y\in K\cap\mathcal{O}$ is entirely contained in $K$.
We define a variation of $\Sigma$ by a one-parameter family of hypersurfaces $\Sigma_\tau$, $\tau \in (-\varepsilon, \varepsilon)$, parameterized by $X_\tau:\bar\Omega\to M$ where
\begin{equation}
\label{perturbation}
X_\tau (x) = \Xi(\tau, \vartheta(u(x),x)), \quad x\in \bar\Omega.
\end{equation}
It follows from the Implicit Function Theorem that there exists $\Omega_\tau \subset P$ and $u_\tau:\bar\Omega_\tau\to \mathbb{R}$ such that $\Sigma_\tau$ is the graph of $u_\tau$. Moreover,
(\ref{Zboundary}) implies that the $\Omega_\tau\subset\Omega$.
Hence given a point $y\in \Sigma$, denote $y_\tau = \Xi(\tau, y)\in \Sigma_\tau$. It follows that there exists $x_\tau\in \Omega_\tau$ such that $y_\tau= \vartheta(u_\tau(x_\tau), x_\tau)$. Then we denote
by $\hat y_\tau = \vartheta(u(x_\tau), x_\tau)$ the point in $\Sigma$ in the flow line of $Y$ passing through $y_\tau$. The vertical separation between $y_\tau$ and $\hat y_\tau$ is by definition the function $s(y,\tau)=u_\tau(x_\tau)- u(x_\tau)$.
\begin{lemma}\label{lema1} For any $\tau\in (-\varepsilon, \varepsilon)$, let $A_\tau$ and $H_\tau$ be, respectively, the Weingarten map and the mean curvature of the hypersurface $\Sigma_\tau$ calculated with respect to the unit normal vector field $N_\tau$ along $\Sigma_\tau$ which satisfies $\langle N_\tau, Y\rangle >0$. Denote $H=H_0$ and $A=A_0$. If
$\zeta\in C^\infty(\mathcal{O})$ and $T\in \Gamma(T\mathcal{O})$ are defined by
\begin{equation}
Z = \zeta N_\tau + T
\end{equation}
with $\langle T, N_\tau\rangle=0$ then
\begin{itemize}
\item[i.]
$\frac{\partial s}{\partial\tau}\big|_{\tau=0} = \langle Z, N\rangle W.$
\item[ii.]
$\bar{\nabla}_Z N\big|_{\tau=0} = -AT-\nabla^{\Sigma}\zeta$
\item[iii.]
$\frac{\partial H}{\partial\tau}\big|_{\tau=0}=\Delta_\Sigma\zeta+(|A|^2+{\rm Ric}_M(N,N))\zeta+\langle\bar\nabla \Psi, Z\rangle,$
\end{itemize}
where $W=\langle Y, N_\tau\rangle^{-1}=(\gamma+|\nabla u_\tau|^2)^{-1/2}$. The operators $\nabla^\Sigma$ and $\Delta_\Sigma$ are, respectively, the intrinsic gradient operator and the Laplace-Beltrami operator in $\Sigma$ with respect to the induced metric. Moreover, $\bar\nabla$ and ${\rm Ric}_M$ denote, respectively, the Riemannian covariant derivative and the Ricci tensor in $M$.
\end{lemma}
\noindent \textit{Proof.} (i) Let $(x^i)_{i=1}^n$ a set of local coordinates in $\Omega\subset P$. Differentiating (\ref{perturbation}) with respect to $\tau$ we obtain
\begin{eqnarray*}
X_{\tau*}\frac{\partial}{\partial\tau} = Z|_{X_\tau} = \zeta N_\tau + T
\end{eqnarray*}
On the other hand differentiating both sides of
\[
X_\tau(x) =\vartheta(u_\tau(x_\tau), x_\tau)
\]
with respect to $\tau$ we have
\begin{eqnarray*}
X_{\tau*}\frac{\partial}{\partial\tau} &=&\Big( \frac{\partial u_\tau}{\partial \tau}+\frac{\partial u_\tau}{\partial x^i}\frac{\partial x_\tau^i}{\partial \tau}\Big)\vartheta_* Y +\frac{\partial x_\tau^i}{\partial \tau}
\vartheta_* \frac{\partial}{\partial x^i}\\
& = & \frac{\partial u_\tau}{\partial \tau}\vartheta_* Y+\frac{\partial x_\tau^i}{\partial \tau}\Big(\vartheta_* \frac{\partial}{\partial x^i}+\frac{\partial u_\tau}{\partial x^i}\vartheta_* Y\Big)
\end{eqnarray*}
Since the term between parenthesis after the second equality is a tangent vector field in $\Sigma_\tau$ we conclude that
\begin{eqnarray*}
\frac{\partial u_\tau}{\partial \tau}\langle Y, N_\tau\rangle = \langle X_{\tau*}\frac{\partial}{\partial\tau}, N_\tau\rangle = \zeta
\end{eqnarray*}
from what follows that
\[
\frac{\partial u_\tau}{\partial \tau} = \zeta W
\]
and
\begin{eqnarray}
\frac{\partial s}{\partial\tau} = \frac{\partial }{\partial\tau} (u_{\tau}-u) = \frac{\partial u_{\tau}}{\partial\tau} = \zeta W.\nonumber
\end{eqnarray}
\noindent (ii) Now we have
\begin{eqnarray}
& & \langle\bar{\nabla}_{Z}N_\tau,X_*\partial_i\rangle = -\langle N_\tau,\bar{\nabla}_{Z}X_*\partial_i\rangle= -\langle N_\tau,\bar{\nabla}_{X_*\partial_i} Z\rangle= -\langle N_\tau,\bar{\nabla}_{X_*\partial_i} (\zeta N+T)\rangle\nonumber\\
& & \,\, = -\langle N_\tau,\bar{\nabla}_{X_*\partial_i} T\rangle-\langle N_\tau,\bar{\nabla}_{X_*\partial_i} \zeta N_\tau\rangle= -\langle A_\tau T, X_*\partial_i\rangle- \langle\nabla^{\Sigma}\zeta, X_*\partial_i\rangle,\nonumber
\end{eqnarray}
for any $1\le i\le n$. It follows that
\[
\bar{\nabla}_Z N = -AT-\nabla^{\Sigma}\zeta.
\]
\noindent (iii) This is a well-known formula whose proof may be found at a number of references (see, for instance, \cite{gerhardt-book}).
$\square$
For further reference, we point out that the Comparison Principle \cite{GT} when applied to (\ref{capillary})-(\ref{neumann-condition}) may be stated in geometric terms as follows. Fixed $\tau$, let $x\in \bar\Omega'$ be a point of maximal vertical separation $s(\cdot, \tau)$. If $x$ is an interior point we have
\[
\nabla u_\tau (x,\tau) -\nabla u(x) = \nabla s (x,\tau) = 0,
\]
what implies that the graphs of the functions $u_\tau$ and $u+s(x,\tau)$ are tangent at their common point $y_\tau =\vartheta(u_\tau(x), x)$. Since the graph of $u+s(x, \tau)$ is obtained from $\Sigma$ only by a translation along the flow lines of $Y$ we conclude that the mean curvature of these two graphs are the same at corresponding points. Since the graph of $u+s(x,\tau)$ is locally above the graph of $u_\tau$ we conclude that
\begin{equation}
\label{comparison-int}
H(\hat y_\tau)\ge H_\tau (y_\tau).
\end{equation}
If $x\in \partial\Omega\subset \partial\Omega'$ we have
\[
\langle \nabla u_\tau, \nu\rangle|_{x} - \langle \nabla u, \nu\rangle|_x = \langle \nabla s, \nu\rangle \le 0
\]
since $\nu$ points toward $\Omega$. This implies that
\begin{equation}
\label{comparison-bdry}
\langle N, \nu\rangle|_{y_\tau} \ge \langle N, \nu\rangle|_{\hat y_\tau}
\end{equation}
\subsection{Interior gradient estimate}
\label{section-int}
\begin{proposition}\label{interior}
Let $B_R(x_0)\subset \Omega$ where $R<{\rm inj}P$. Then there exists a constant $C>0$ depending on $\beta, C_\Psi, \Omega$ and $K$ such that
\begin{equation}
|\nabla u(x)|\le C\frac{R^2}{R^2 -d^2(x)},
\end{equation}
where $d={\rm dist}(x_0, x)$ in $P$.
\end{proposition}
\noindent \emph{Proof.} Fix $\Omega'= B_R(x_0)\subset \Omega$. We consider the vector field $Z$ given by
\begin{equation}
\label{Zint}
Z=\zeta N,
\end{equation}
where $\zeta$ is a function to be defined later. Fixed $\tau\in [0, \varepsilon)$, let $x\in B_R(x_0)$ be a point where the vertical separation $s(\cdot, \tau)$ attains a maximum value.
If $y=\vartheta(u(x), x)$ it follows that
\begin{equation}
H_\tau (y_\tau) - H_0(y) = \frac{\partial H_\tau}{\partial\tau}\Big|_{\tau=0}\tau + o(\tau).
\end{equation}
However the Comparison Principle implies that $H_0(\hat y_\tau)\ge H_\tau (y_\tau)$. Using Lemma \ref{lema1} ($iii$) we conclude that
\begin{eqnarray*}
H_0(\hat y_\tau)- H_0(y) \ge \frac{\partial H_\tau}{\partial\tau}\Big|_{\tau=0}\tau + o(\tau)= (\Delta_\Sigma\zeta+ |A|^2\zeta + \textrm{Ric}_M(N,N)\zeta)\tau + o(\tau).
\end{eqnarray*}
Since $\hat y_\tau = \vartheta (-s(y,\tau), y_\tau)$ we have
\begin{eqnarray}
\label{dd}
\frac{d\hat y_\tau}{d\tau}\Big|_{\tau=0} =-\frac{ds}{d\tau}\vartheta_{*}\frac{\partial}{\partial s}+\frac{\partial y_\tau^i}{\partial\tau}\vartheta_{*}\frac{\partial}{\partial x^{i}}
=- \frac{ds}{d\tau} Y + \frac{d y_\tau}{d\tau}\Big|_{\tau=0}=-\frac{ds}{d\tau} Y + Z(y).
\end{eqnarray}
Hence using Lemma \ref{lema1} ($i$) and (\ref{Zint}) we have
\begin{equation}
\label{dtau}
\frac{d\hat y_\tau}{d\tau}\Big|_{\tau=0}=-\zeta WY+\zeta N.
\end{equation}
On the other hand for each $\tau\in (-\varepsilon, \varepsilon)$ there exists a smooth $\xi: (-\varepsilon, \varepsilon)\to TM$ such that
\[
\hat y_\tau = \exp_y \xi(\tau).
\]
Hence we have
\begin{eqnarray}
\frac{d\hat y_\tau}{d\tau}\Big|_{\tau=0} =\xi'(0).\nonumber
\end{eqnarray}
With a slight abuse of notation we denote $\Psi(s,x)$ by $\Psi(y)$ where $y=\vartheta(s,x)$.
It results that
\begin{equation*}
H_0(\hat y_\tau)- H_0(y) = \Psi(x_\tau, u(x_\tau)) - \Psi(x, u(x)) = \Psi(\exp_y \xi_\tau)-\Psi(y)= \langle \bar\nabla\Psi|_y, \xi'(0)\rangle \tau + o(\tau).
\end{equation*}
However
\begin{eqnarray}
\langle\bar\nabla\Psi, \xi'(0)\rangle =\zeta \langle \bar\nabla\Psi, N-WY \rangle= -\zeta W\frac{\partial\Psi}{\partial s}+\zeta\langle \bar\nabla\Psi, N\rangle.
\end{eqnarray}
We conclude that
\begin{equation*}
-\zeta W\frac{\partial\Psi}{\partial s}\tau+\zeta\langle \bar\nabla\Psi, N\rangle \tau + o(\tau) \ge (\Delta_\Sigma\zeta+ |A|^2\zeta + \textrm{Ric}_M(N,N)\zeta)\tau + o(\tau).
\end{equation*}
Suppose that
\begin{equation}
W(x) > \frac{C+|\bar\nabla\Psi|}{\beta}
\end{equation}
for a constant $C>0$ to be chosen later. Hence we have
\begin{equation*}
(\Delta_\Sigma\zeta+ \textrm{Ric}_M(N,N)\zeta)\tau + C\zeta \tau \le o(\tau).
\end{equation*}
Following \cite{calle} and \cite{korevaar} we choose
\[
\zeta = 1-\frac{d^2}{R^2},
\]
where $d=\textrm{dist}(x_0, \cdot)$. It follows that
\begin{eqnarray*}
\nabla^\Sigma\zeta = -\frac{2d}{R^2}\nabla^\Sigma d
\end{eqnarray*}
and
\begin{eqnarray*}
\Delta_\Sigma \zeta = -\frac{2d}{R^2}\Delta_\Sigma d -\frac{2}{R^2}|\nabla^\Sigma d|^2
\end{eqnarray*}
However using the fact that $P$ is totally geodesic and that $[Y,\bar\nabla d]=0$ we have
\begin{eqnarray}
& & \Delta_{\Sigma}d=\Delta_M d-\langle\bar\nabla_{N}\bar\nabla d,N\rangle + nH\langle\bar\nabla d,N\rangle\nonumber\\
& &\,\, = \Delta_P d -\langle \nabla_{\frac{\nabla u}{W}} \nabla d, \frac{\nabla u}{W}\rangle -\gamma^2\langle Y, N\rangle^2 \langle \bar\nabla_Y \bar\nabla d, Y\rangle+ nH\langle\bar\nabla d,N\rangle\nonumber \nonumber
\end{eqnarray}
Let $\pi:M\to P$ the projection defined by $\pi(\vartheta(s,x))=x$. Then
\[
\pi_* N = -\frac{\nabla u}{W}.
\]
We denote
\[
\pi_*N^\perp = \pi_* N -\langle \pi_* N, \nabla d\rangle\nabla d.
\]
If $\mathcal{A}_d$ and $\mathcal{H}_d$ denote, respectively, the Weingarten map and the mean curvature of the geodesic ball $B_d(x_0)$ in $P$ we conclude that
\begin{eqnarray}
& & \Delta_{\Sigma}d= n\mathcal{H}_d -\langle \mathcal{A}_d(\pi_* N^\perp), \pi_*N^\perp\rangle +\gamma\langle Y, N\rangle^2 \kappa+ nH\langle\bar\nabla d,N\rangle. \nonumber
\end{eqnarray}
where
\[
\kappa = -\gamma\langle \bar\nabla_Y \bar\nabla d, Y\rangle
\]
is the principal curvature of the Kiling cylinder over $B_d(x_0)$ relative to the principal direction $Y$. Therefore we have
\[
|\Delta_\Sigma d|\le C_1(C_\Psi, \sup_{B_R(x_0)}(\mathcal{H}_d+\kappa), \sup_{B_R(x_0)}\gamma )
\]
in $B_R(x_0)$. Hence setting
\[
C_2 = \sup_{B_R(x_0)}\textrm{Ric}_M
\]
we fix
\begin{equation}
\label{C}
C =\max\{2(C_1+C_2), \sup_{\mathbb{R}\times\Omega} |\bar\nabla \Psi|\}.
\end{equation}
With this choice we conclude that
\[
C\zeta \le \frac{o(\tau)}{\tau},
\]
a contradiction. This implies that
\begin{equation}
W(x) \le \frac{C-|\bar\nabla \Psi|}{\beta}.
\end{equation}
However
\[
\zeta(z) W(z) + o(\tau) = s(X(z), \tau) \le s(X(x), \tau) = \zeta(x) W(x)+o(\tau),
\]
for any $z\in B_R(x_0)$. It follows that
\[
W(z) \le \frac{R^2-d^2(z)}{R^2-d^2(x)} W(x) + o(\tau) \le \frac{R^2}{R^2-d^2(x)} \frac{C-|\bar\nabla \Psi|}{\beta}+o(\tau) \le \widetilde C \frac{R^2}{R^2-d^2(x)},
\]
for very small $\varepsilon>0$. This finishes the proof of the proposition.
$\square$
\begin{remark}
\label{sphere}
If $\Omega$ satisfies the interior sphere condition for a uniform radius $R>0$ we conclude that
\begin{equation}
W(x)\le \frac{C}{d_\Gamma(x)},
\end{equation}
for $x\in \Omega$, where $d_\Gamma(x) ={\rm dist}(x, \Gamma)$.
\end{remark}
\subsection{Boundary gradient estimates}
Now we establish boundary gradient estimates using other local perturbation of the graph which this time has also tangential components.
\begin{proposition}\label{boundary} Let $x_0\in P$ and $R>0$ such that $3R <{\rm inj}P$. Denote by $\Omega'$ the subdomain $\Omega \cap B_{2R}(x_0)$. Then there exists a positive constant $C=C(R, \beta, \beta', C_\Psi, C_\Phi, \Omega, K)$ such that
\begin{equation}
W(x) \le C,
\end{equation}
for all $x\in \overline\Omega'$.
\end{proposition}
\noindent \emph{Proof.} Now we consider the subdomain $\Omega'=\Omega\cap B_{R}(x_0)$. We define
\begin{equation}
Z = \eta N + X,
\end{equation}
where
\[
\eta = \alpha_0 v + \alpha_1 d_\Gamma
\]
and $\alpha_0$ and $\alpha_1$ are positive constants to be chosen and $d_\Gamma$ is a smooth extension of the distance function $\textrm{dist}(\,\cdot\, , \Gamma)$ to $\Omega'$ with $|\nabla d_\Gamma|\le 1$
and
\[
v =4R^2-d^2,
\]
where $d=\textrm{dist}(x_0, \cdot)$.
Moreover
\[
X = \alpha_0\Phi (v\nu-d_\Gamma\nabla v).
\]
In this case we have
\begin{eqnarray*}
\zeta = \eta +\langle X, N\rangle = \alpha_0 v + \alpha_1 d_\Gamma + \alpha_0\Phi (v\langle N, \nu\rangle-d_\Gamma\langle N, \nabla v\rangle).
\end{eqnarray*}
Fixed $\tau\in [0,\varepsilon)$, let $x\in\bar\Omega'$ be a point where the maximal vertical separation between $\Sigma$ and $\Sigma_\tau$ is attained. We first suppose that $x\in \textrm{int}(\partial\Omega'\cap \partial\Omega)$. In this case denoting $y_\tau =\vartheta (u_\tau(x), x)\in \Sigma_\tau$ and $\hat y_\tau=\vartheta(u(x), x)\in \Sigma$ it follows from the Comparison Principle that
\begin{equation}
\langle N_\tau, \nu\rangle|_{y_\tau}\ge \langle N, \nu\rangle|_{\hat y_\tau}.
\end{equation}
Notice that $\hat y_\tau \in \partial\Sigma$. Moreover since $Z|_{K\cap\mathcal{O}}$ is tangent to $K$ there exists $y\in \partial\Sigma$ such that
\[
y = \Xi (-\tau, y_\tau).
\]
We claim that
\begin{equation}
\label{der-1}
|\langle \bar\nabla \langle N_\tau, \nu\rangle, \frac{dy_\tau}{d\tau}\big|_{\tau=0}\rangle| \le \alpha_1 (1-\Phi^2) +\widetilde C\alpha_0
\end{equation}
for some positive constant $\widetilde C=C(C_\Phi, K, \Omega, R)$.
Hence (\ref{neumann-condition}) implies that
\begin{eqnarray*}
\langle N, \nu\rangle|_{\hat y_\tau} - \langle N, \nu\rangle|_{y} = \Phi(\hat y_\tau) - \Phi(y) = \tau \langle \bar\nabla \Phi, \frac{d\hat y_\tau}{d\tau}\big|_{\tau=0}\rangle+ o(\tau).
\end{eqnarray*}
Therefore
\begin{eqnarray*}
\langle N, \nu\rangle|_{y_\tau} - \langle N, \nu\rangle|_{y} \ge \tau \langle \bar\nabla \Phi, \frac{d\hat y_\tau}{d\tau}\big|_{\tau=0}\rangle+ o(\tau).
\end{eqnarray*}
On the other hand we have
\begin{eqnarray*}
\langle N, \nu\rangle|_{y_\tau} - \langle N, \nu\rangle|_{y} = \tau \langle \bar\nabla \langle N, \nu\rangle, \frac{dy_\tau}{d\tau}\big|_{\tau=0}\rangle+ o(\tau).
\end{eqnarray*}
We conclude that
\begin{equation*}
\label{ineq-fund}
\tau \langle \bar\nabla \langle N, \nu\rangle, \frac{dy_\tau}{d\tau}\big|_{\tau=0}\rangle \ge \tau \langle \bar\nabla \Phi, \frac{d\hat y_\tau}{d\tau}\big|_{\tau=0}\rangle+ o(\tau).
\end{equation*}
Hence we have
\begin{equation*}
\label{ineq-fund}
\alpha_1 (1-\Phi^2)\tau +\widetilde C\alpha_0\tau \ge \tau \langle \bar\nabla \Phi, \frac{d\hat y_\tau}{d\tau}\big|_{\tau=0}\rangle+ o(\tau).
\end{equation*}
It follows from (\ref{dd}) that
\begin{equation*}
\label{ineq-fund2}
\alpha_1 (1-\Phi^2) +\widetilde C\alpha_0 \ge -\zeta W \langle \bar\nabla \Phi,Y\rangle+ \zeta\langle \bar\nabla \Phi, N\rangle+ o(\tau)/\tau.
\end{equation*}
Since
\[
\langle \bar\nabla\Phi, Y\rangle =\frac{\partial\Phi}{\partial s}\le 0
\]
we conclude that
\begin{equation}
\label{ineq-fund3}
W(x) \le C(C_\Phi, \beta', K, \Omega, R).
\end{equation}
We now prove the claim. For that, observe that Lemma \ref{lema1} ($ii$) implies that
\begin{eqnarray*}
& &\langle N, \nu\rangle|_{y_\tau} - \langle N, \nu\rangle|_{y} = \tau \frac{\partial}{\partial\tau}\Big|_{\tau=0}\langle N_\tau, \nu\rangle|_{y_\tau} + o(\tau) \\
& & \,\, = \tau (\langle N, \bar\nabla_Z \nu\rangle|_y-\langle AT+\nabla^\Sigma \zeta, \nu\rangle|_y)+o(\tau).
\end{eqnarray*}
Since $Z|_y\in T_y K$ it follows that
\begin{eqnarray*}
\langle N, \nu\rangle|_{y_\tau} - \langle N, \nu\rangle|_{y} = -\tau (\langle A_K Z, N\rangle|_y+\langle AT+\nabla^\Sigma \zeta, \nu\rangle|_y)+o(\tau),
\end{eqnarray*}
where $A_K$ is the Weingarten map of $K$ with respect to $\nu$. We conclude that
\begin{equation}
\label{ineq222-2}
-\tau (\langle A_K Z, N\rangle|_y+\langle AT+\nabla^\Sigma \zeta, \nu\rangle|_y) \ge \tau \langle \bar\nabla \Phi, \frac{d\hat y_\tau}{d\tau}\big|_{\tau=0}\rangle+ o(\tau)
\end{equation}
where
\[
\nu^T = \nu-\langle N, \nu\rangle N.
\]
We have
\begin{eqnarray*}
\langle \nabla^{\Sigma}\zeta+AT, \nu^T\rangle = \alpha_0 \langle \nabla v, \nu^T\rangle+\alpha_1\langle\nabla^{\Sigma}d_\Gamma, \nu^T\rangle+\langle\nabla^{\Sigma}\langle X,N\rangle, \nu^T\rangle + \langle AT, \nu^T\rangle.
\end{eqnarray*}
We compute
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\langle X,N\rangle,\nu^T\rangle =\alpha_0 (v\langle N, \nu\rangle -d_\Gamma\langle N, \nabla v\rangle) \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, + \alpha_0 \Phi \big(\langle \nabla v, \nu^T\rangle \langle N, \nu\rangle + v (\langle\bar\nabla_{\nu^T} N, \nu\rangle+\langle N, \bar\nabla_{\nu^T}\nu\rangle) - \langle \nabla d_\Gamma, \nu^T\rangle \langle N, \nabla v\rangle\\
& & \,\,\,\, - d_\Gamma
(\langle \bar\nabla_{\nu^T} N, \nabla v\rangle+\langle N, \bar\nabla_{\nu^T}\nabla v\rangle)\big).
\end{eqnarray*}
Hence we have at $y$ that
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\langle X,N\rangle,\nu^T\rangle =\alpha_0 (v\Phi -d_\Gamma\langle N, \nabla v\rangle) \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, + \alpha_0 \Phi \big(\langle \nabla v, \nu^T\rangle \Phi + v (-\langle A\nu^T, \nu^T\rangle+\langle N, \bar\nabla_{\nu}\nu\rangle-\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nu\rangle) \\
& & \,\,\,\,-\langle \nu, \nu^T\rangle \langle N, \nabla v\rangle - d_\Gamma
(-\langle A\nu^T, \nabla v\rangle+\langle N, \bar\nabla_{\nu}\nabla v\rangle-\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nabla v\rangle)\big).
\end{eqnarray*}
Therefore we have
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\langle X,N\rangle,\nu^T\rangle =\alpha_0 (v\Phi -d_\Gamma\langle N, \nabla v\rangle) \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, +\alpha_0 \Phi \big(\langle \nabla v, \nu^T\rangle \Phi - v (\langle A\nu^T, \nu^T\rangle+\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nu\rangle) \\
& & \,\,\,\,-\langle \nu, \nu^T\rangle \langle N, \nabla v\rangle + d_\Gamma
(\langle A\nu^T, \nabla v\rangle-\langle N, \bar\nabla_{\nu}\nabla v\rangle+\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nabla v\rangle)\big).
\end{eqnarray*}
It follows that
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\zeta + AT, \nu^T\rangle =\langle AT, \nu^T\rangle+\alpha_0 \langle \nabla v, \nu^T\rangle+\alpha_1\langle\nu, \nu^T\rangle \\
& & \,\,\,\, +\alpha_0 (v\Phi -d_\Gamma\langle N, \nabla v\rangle) \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, +\alpha_0 \Phi \big(\langle \nabla v, \nu^T\rangle \Phi - v (\langle A\nu^T, \nu^T\rangle+\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nu\rangle) \\
& & \,\,\,\,-\langle \nu, \nu^T\rangle \langle N, \nabla v\rangle + d_\Gamma
(\langle A\nu^T, \nabla v\rangle-\langle N, \bar\nabla_{\nu}\nabla v\rangle+\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nabla v\rangle)\big).
\end{eqnarray*}
However
\[
\langle AT, \nu^T\rangle= \langle A\nu^T, X\rangle =\alpha_0 \Phi v\langle A\nu^T, \nu^T\rangle -\alpha_0 \Phi d_\Gamma\langle A\nu^T, \nabla v\rangle.
\]
Hence we have
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\zeta + AT, \nu^T\rangle =\alpha_0 \langle \nabla v, \nu^T\rangle+\alpha_1\langle\nu, \nu^T\rangle +\alpha_0 (v\Phi -d_\Gamma\langle N, \nabla v\rangle) \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, +\alpha_0 \Phi \big(\langle \nabla v, \nu^T\rangle \Phi - v \Phi\langle N, \bar\nabla_{N}\nu\rangle -\langle \nu, \nu^T\rangle \langle N, \nabla v\rangle\\
& & \,\,\,\, - d_\Gamma
(\langle N, \bar\nabla_{\nu}\nabla v\rangle-\langle N,\nu\rangle\langle N, \bar\nabla_{N}\nabla v\rangle)\big).
\end{eqnarray*}
Since $d_\Gamma(y)=0$ we have
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\zeta + AT, \nu^T\rangle =\alpha_0 \langle \nabla v, \nu^T\rangle+\alpha_1\langle\nu, \nu^T\rangle +\alpha_0 v\Phi \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, +\alpha_0 \Phi \big(\langle \nabla v, \nu^T\rangle \Phi - v \Phi\langle N, \bar\nabla_{N}\nu\rangle -\langle \nu, \nu^T\rangle \langle N, \nabla v\rangle\big).
\end{eqnarray*}
Rearranging terms we obtain
\begin{eqnarray*}
& & \langle \nabla^{\Sigma}\zeta + AT, \nu^T\rangle =\alpha_1(1-\langle N, \nu\rangle^2)+\alpha_0 \langle \nabla v, \nu^T\rangle (1+\Phi^2) +\alpha_0 v\Phi \langle \bar\nabla\Phi, \nu^T\rangle \\
& & \,\,\,\, -\alpha_0 \Phi \big(v \Phi\langle N, \bar\nabla_{N}\nu\rangle +(1-\langle N, \nu\rangle^2) \langle N, \nabla v\rangle\big).
\end{eqnarray*}
Therefore there exists a constant $C=C(\Phi, K, \Omega, R)$ such that
\begin{equation}
\label{est-1}
|\langle \nabla^{\Sigma}\zeta + AT, \nu^T\rangle|\le \alpha_1 (1-\Phi^2) +C\alpha_0.
\end{equation}
Since $d_\Gamma(y)=0$ it holds that
\[
|\langle A_K Z, N\rangle| = |A_K| |Z|\le |A_K|(\eta +|X|) \le 4R^2\alpha_0|A_K|(1+\Phi).
\]
from what we conclude that
\begin{equation}
\label{der-1}
|\langle \bar\nabla\langle N_\tau, \nu\rangle, \frac{dy_\tau}{d\tau}\big|_{\tau=0}\rangle| \le \alpha_1 (1-\Phi^2) +\widetilde C\alpha_0
\end{equation}
for some constant $\widetilde C(C_\Phi, K, \Omega, R)>0$.
Now we suppose that $x\in \overline{\partial\Omega'\cap \Omega}$. In this case, we have $v(x)=0$. Then $\eta=\alpha_1 d_\Gamma$ and
\[
X=-\alpha_0 \Phi d_\Gamma\nabla v
\]
at $x$. Thus
\[
\zeta = \eta + \langle X,N\rangle = \alpha_1 d_\Gamma +2\alpha_0 \Phi dd_\Gamma \langle \nabla d, N\rangle.
\]
Moreover we have
\[
W(x) \le \frac{C}{d_\Gamma(x)}
\]
(see Remark \ref{sphere}). It follows that
\begin{eqnarray}
\zeta W \le C(\alpha_1 +2\alpha_0 \Phi d\langle \nabla d, N\rangle)\le C(\alpha_1 +4R\alpha_0 \Phi).
\end{eqnarray}
We conclude that
\begin{eqnarray}
W(x) \le C(C_\Phi, K, \Omega, R).
\end{eqnarray}
Now we consider the case when $x\in \Omega \cap \Omega'$. In this case we have
\begin{eqnarray*}
& &\Delta_\Sigma\zeta =\alpha_0 \Delta_\Sigma v + \alpha_1 \Delta_\Sigma d_\Gamma + \alpha_0\Delta_\Sigma\Phi (v\langle N, \nu\rangle-d_\Gamma\langle N, \nabla v\rangle)\\
& & \,\, +\alpha_0\Phi (\Delta_\Sigma v\langle N, \nu\rangle + v\Delta_\Sigma\langle N, \nu\rangle+2\langle \nabla^\Sigma v, \nabla^\Sigma\langle N, \nu\rangle\rangle-\Delta_\Sigma d_\Gamma\langle N, \nabla v\rangle-d_\Gamma\Delta_\Sigma\langle N, \nabla v\rangle\\
& &\,\,\,\, -2\langle \nabla^\Sigma d_\Gamma, \nabla^\Sigma \langle N, \nabla v\rangle)\\
& & \,\,\,\, +2\alpha_0 \langle \nabla^\Sigma \Phi, \nabla^\Sigma v \langle N, \nu\rangle+v \nabla^\Sigma\langle N, \nu\rangle-\nabla^\Sigma d_\Gamma\langle N, \nabla v\rangle-d_\Gamma\nabla^\Sigma\langle N, \nabla v\rangle\rangle
\end{eqnarray*}
Notice that given an arbitrary vector field $U$ along $\Sigma$ we have
\begin{equation*}
\langle \nabla^\Sigma \langle N, U\rangle, V\rangle = -\langle AU^T, V\rangle +\langle N, \bar\nabla_V U\rangle,
\end{equation*}
for any $V\in \Gamma(T\Sigma)$. Here, $U^T$ denotes the tangential component of $U$.
Hence using Codazzi's equation we obtain
\begin{eqnarray*}
\Delta_\Sigma\langle N, U\rangle \le \langle \bar\nabla (nH), U^T\rangle +\textrm{Ric}_M (U^T, N) + C|A|
\end{eqnarray*}
for a constant $C$ depending on $\bar\nabla U$ and $\bar\nabla^2 U$. Hence using (\ref{capillary}) we conclude that
\begin{eqnarray}
\Delta_\Sigma\langle N, U\rangle \le \langle \bar\nabla \Psi, U^T\rangle +\widetilde C|A|
\end{eqnarray}
where $\widetilde C$ is a positive constant depending on $\bar\nabla U, \bar\nabla^2 U$ and $\textrm{Ric}_M$.
We also have
\begin{eqnarray*}
\Delta_\Sigma d_\Gamma & = & \Delta_P d_\Gamma +\gamma \langle\bar\nabla_Y\bar\nabla d, Y\rangle -\langle \bar\nabla_N \bar\nabla d_\Gamma, N \rangle +nH\langle \bar\nabla d_\Gamma, N\rangle \\
& \le & C_0\Psi + C_1,
\end{eqnarray*}
where $C_0 $ and $C_1$ are positive constants depending on the second fundamental form of the Killing cylinders over the equidistant sets $d_\Gamma = \delta$ for small values of $\delta$. Similar estimates also hold for $\Delta_\Sigma d$ and then for $\Delta_\Sigma v$.
We conclude that
\begin{equation}
\Delta_\Sigma \zeta \ge -\widetilde C_0 - \widetilde C_1 |A|,
\end{equation}
where $\widetilde C_0$ and $\widetilde C_1$ are positive constants depending on $\Omega$, $K$, $\textrm{Ric}_M$, $|\Phi|_2$.
Now proceeding similarly as in the proof of Proposition \ref{interior}, we observe that Lemma \ref{lema1} ($iii$) and the Comparison Principle yield
\begin{eqnarray*}
H_0(\hat y_\tau)- H_0(y) \ge \frac{\partial H_\tau}{\partial\tau}\Big|_{\tau=0}\tau + o(\tau)= (\Delta_\Sigma\zeta+ |A|^2\zeta + \textrm{Ric}_M(N,N)\zeta)\tau +\tau\langle \bar\nabla\Psi, T\rangle+ o(\tau).
\end{eqnarray*}
However
\begin{equation*}
H_0(\hat y_\tau)- H_0(y) = \langle \bar\nabla\Psi|_y, \xi'(0)\rangle \tau + o(\tau).
\end{equation*}
Using (\ref{dd}) we have
\begin{eqnarray*}
\langle\bar\nabla\Psi, \xi'(0)\rangle =\langle \bar\nabla\Psi, Z-\zeta WY \rangle= \langle \bar\nabla\Psi, Z\rangle-\zeta W\frac{\partial\Psi}{\partial s}.
\end{eqnarray*}
We conclude that
\begin{equation*}
-\zeta W\frac{\partial\Psi}{\partial s}\tau+\zeta\langle \bar\nabla\Psi, N\rangle \tau + o(\tau) \ge (\Delta_\Sigma\zeta+ |A|^2\zeta + \textrm{Ric}_M(N,N)\zeta)\tau + o(\tau).
\end{equation*}
Suppose that
\begin{equation}
W > \frac{C+|\bar\nabla\Psi|}{\beta}
\end{equation}
for a constant $C>0$ as in (\ref{C}). Hence we have
\begin{equation*}
(\Delta_\Sigma\zeta+|A|^2\zeta+ \textrm{Ric}_M(N,N)\zeta)\tau + C\zeta \tau \le o(\tau)
\end{equation*}
We conclude that
\begin{equation*}
- C_0 - C_1 |A| + C_2 |A|^2 +C\le \frac{o(\tau)}{\tau},
\end{equation*}
a contradiction. It follows from this contradiction that
\begin{equation}
W(x) \le \frac{C+|\bar\nabla\Psi|}{\beta}.
\end{equation}
Now, proceeding as in the end of the proof of Proposition \ref{interior}, we use the estimate for $W(x)$ in each one of the three cases for obtaining a estimate for $W$ in $\Omega'$. This finishes the proof of the Proposition. $
\square$
\section{Proof of the Theorem \ref{main}}
\label{section-proof}
We use the classical Continuity Method for proving Theorem \ref{main}. For details, we refer the reader to \cite{gerhardt} and \cite{uraltseva-book}. For any $\tau\in [0,1]$ we consider the Neumann boundary problem $\mathcal{N}_\tau$ of finding $u\in C^{3,\alpha}(\bar\Omega)$ such that
\begin{eqnarray}
& & \mathcal{F}[\tau, x,u,\nabla u, \nabla^2 u] = 0,\\
& & \langle \frac{\nabla u}{W}, \nu\rangle + \tau \Phi=0,
\end{eqnarray}
where $\mathcal{F}$ is the quasilinear elliptic operator defined by
\begin{eqnarray}
\mathcal{F}[x,u,\nabla u, \nabla^2 u]= \textrm{div}\bigg(\frac{\nabla u}{W}\bigg) - \langle \frac{\nabla \gamma}{2\gamma}, \frac{\nabla u}{W}\rangle -\tau\Psi.
\end{eqnarray}
Since the coefficients of the first and second order terms do not depend on $u$ it follows that
\begin{equation}
\label{implicit}
\frac{\partial\mathcal{F}}{\partial u}= -\tau\frac{\partial\Psi}{\partial u} \le -\tau\beta <0.
\end{equation}
We define $\mathcal{I}\subset [0,1]$ as the subset of values of $\tau\in [0,1]$ for which the Neumann boundary problem $\mathcal{N}_\tau$ has a solution. Since $u=0$ is a solution for $\mathcal{N}_0$, it follows that $\mathcal{I}\neq \emptyset$. Moroever, the Implicit Function Theorem (see \cite{GT}, Chapter 17) implies that $\mathcal{I}$ is open in view of (\ref{implicit}). Finally, the height and gradient \emph{a priori} estimates we obtained in Sections \ref{section-height} and \ref{section-gradient} are independent of $\tau\in [0,1]$. This implies that (\ref{capillary}) is uniformly elliptic. Moreover, we may assure the existence of some $\alpha_0 \in (0,1)$ for which there there exists a constant $C>0$ independent of $\tau$ such that
\[
|u_\tau|_{1,\alpha_0,\bar\Omega}\le C.
\]
Redefine $\alpha = \alpha_0$. Thus, combining this fact, Schauder elliptic estimates and the compactness of $C^{3,\alpha_0}(\bar\Omega)$ into $C^3(\bar\Omega)$ imply that $\mathcal{I}$ is closed. It follows that $\mathcal{I}=[0,1]$.
The uniqueness follows from the Comparison Principle for elliptic PDEs. We point out that a more general uniqueness statement - comparing a nonparametric solution with a general hypersurface with the same mean curvature and contact angle at corresponding points - is also valid. It is a consequence of a flux formula coming from the existence of a Killing vector field in $M$. We refer the reader to \cite{DHL} for further details.
This finishes the proof of the Theorem \ref{main}.
\noindent
Jorge H. Lira\\
Gabriela A. Wanderley\\
Departamento de Matem\'atica \\ Universidade Federal do Cear\'a\\
Campus do Pici, Bloco 914\\ Fortaleza, Cear\'a\\ Brazil\\ 60455-760
\end{document} |
\begin{document}
\authortitle{Anders Bj\"orn and Daniel Hansevi}
{Semiregular and strongly irregular boundary points on unbounded sets}
{Semiregular and strongly irregular boundary points for {$p\mspace{1mu}$}-harmonic functions
on unbounded sets \\ in metric spaces}
\author
{Anders Bj\"orn \\
\it\small Department of Mathematics, Link\"oping University, \\
\it\small SE-581 83 Link\"oping, Sweden\/{\rm ;}
\it \small anders.bjorn@liu.se
\\
\\
Daniel Hansevi \\
\it\small Department of Mathematics, Link\"oping University, \\
\it\small SE-581 83 Link\"oping, Sweden\/{\rm ;}
\it \small daniel.hansevi@liu.se
}
\date{Preliminary version, \today}
\date{}
\title{#2}
\noindent{\small
{\bf Abstract}.
The trichotomy between regular, semiregular,
and strongly irregular boundary points for {$p\mspace{1mu}$}-harmonic functions
is obtained for
unbounded open sets in
complete metric spaces with a doubling measure
supporting a {$p\mspace{1mu}$}-Poincar\'e inequality, $1<p<\infty$.
We show that these are local properties.
We also deduce
several characterizations of semiregular points
and strongly irregular points. In particular,
semiregular points
are characterized by means of
capacity, {$p\mspace{1mu}$}-harmonic measures, removability, and semibarriers.
\noindent {\small \emph{Key words and phrases}:
barrier,
boundary regularity,
Dirichlet problem,
doubling measure,
metric space,
nonlinear potential theory,
Perron solution,
{$p\mspace{1mu}$}-harmonic function,
Poincar\'e inequality,
semibarrier,
semiregular boundary point,
strongly irregular boundary point.
}
\noindent {\small Mathematics Subject Classification (2010):
Primary: 31E05; Secondary: 30L99, 35J66, 35J92, 49Q20.
}
}
\section{Introduction}
\label{sec:intro}
Let $\Omega\subset\mathbb{R}^n$ be a nonempty bounded open set
and let $f\in C({$p\mspace{1mu}$}artial\Omega)$.
The Perron method
provides us with a unique function $P f$
that is harmonic in $\Omega$ and takes the boundary values $f$
in a weak sense, i.e.,
$P f$ is a solution of the Dirichlet problem for the Laplace equation
$\Delta u=0$.
It was introduced on $\mathbb{R}^2$ in 1923 by Perron~\cite{Perron23}
and independently by Remak~\cite{remak}.
A point $x_0\in{$p\mspace{1mu}$}artial\Omega$ is
\emph{regular} if
$\lim_{\Omega\ni y\to x_0}P f(y)=f(x_0)$ for every $f\in C({$p\mspace{1mu}$}artial\Omega)$.
Wiener~\cite{Wiener} characterized
regular boundary points by means of the
\emph{Wiener criterion}
in 1924.
In the same year Lebesgue~\cite{Lebesgue}
gave a different characterization using barriers.
This definition of boundary regularity can be paraphrased in the following way:
The point $x_0\in{$p\mspace{1mu}$}artial\Omega$ is regular if the following two conditions hold:
\begin{enumerate}
\renewcommand{\textup{(\roman{enumi})}}{\textup{(\roman{enumi})}}
\item For all $f\in C({$p\mspace{1mu}$}artial\Omega)$ the limit
$\lim_{\Omega\ni y\to x_0}P f(y)$ exists.
\item For all $f\in C({$p\mspace{1mu}$}artial\Omega)$ there is a sequence
$\Omega\ni y_j\to x_0$ such that $\lim_{j\to\infty}P f(y_j)=f(x_0)$.
\end{enumerate}
Perhaps surprisingly,
it is the case that for irregular boundary points
\emph{exactly one} of these two properties fails;
one might have guessed that
both can fail at the same time
but this can in fact never happen.
A boundary point $x_0\in{$p\mspace{1mu}$}artial\Omega$ is
\emph{semiregular}
if the first condition holds but not the second;
and \emph{strongly irregular}
if the second condition holds but not the first.
For the Laplace equation it is well known that all boundary
points are either regular, semiregular, or strongly irregular,
and this trichotomy (in an abstract linear setting) was developed
in detail in Luke\v{s}--Mal\'y~\cite{lukesmaly}.
Key examples of semiregular and strongly irregular points
are Zaremba's punctured ball and
the Lebesgue spine, respectively,
see Examples~13.3 and~13.4 in \cite{BBbook}.
A nonlinear analogue is to consider the
Dirichlet problem for {$p\mspace{1mu}$}-harmonic functions,
which are solutions of the {$p\mspace{1mu}$}-Laplace equation
$\Delta_p u:=\Div(|\nabla u|^{p-2}\,\nabla u)=0$, $1<p<\infty$.
This leads to a nonlinear potential theory
that has been studied since the 1960s.
Initially, it was developed for $\mathbb{R}^n$,
but it has also been extended to weighted $\mathbb{R}^n$,
Riemannian manifolds,
and other settings.
In more recent years, it has been generalized to metric spaces,
see, e.g., the monograph
Bj\"orn--Bj\"orn~\cite{BBbook} and the references therein.
The Perron method was extended to such metric spaces by
Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS2}
for bounded open sets and Hansevi~\cite{hansevi2} for
unbounded open sets.
Boundary regularity for {$p\mspace{1mu}$}-harmonic functions on metric spaces
was first studied by Bj\"orn~\cite{BjIll} and
Bj\"orn--MacManus--Shan\-mu\-ga\-lin\-gam~\cite{BMS},
and a rather extensive study was undertaken
by Bj\"orn--Bj\"orn~\cite{BB} on bounded open sets.
Recently this theory was generalized to
unbounded open sets by Bj\"orn--Hansevi~\cite{BHan1};
see also Bj\"orn--Bj\"orn--Li~\cite{BBLi}.
For further references and
a historical discussion on regularity for {$p\mspace{1mu}$}-harmonic functions
we refer the interested reader to the introduction
in \cite{BHan1}.
For {$p\mspace{1mu}$}-harmonic functions on $\mathbb{R}^n$ and metric spaces
the trichotomy was obtained by Bj\"orn~\cite{ABclass} for bounded open sets.
It was also obtained for unbounded sets in certain Ahlfors regular metric spaces
by Bj\"orn--Bj\"orn--Li~\cite{BBLi}.
Adamowicz--Bj\"orn--Bj\"orn~\cite{ABB} obtained
the trichotomy for $p(\cdot)$-harmonic functions on
bounded open sets in $\mathbb{R}^n$.
In this paper we obtain the trichotomy in the following form,
where regularity is defined using upper Perron solutions
(Definition~\ref{def:reg}).
(We use upper Perron solutions as
it is not known whether
continuous functions are resolutive
with respect to unbounded {$p\mspace{1mu}$}-hyperbolic sets.)
\begin{theorem}\label{thm:trichotomy}
\textup{(Trichotomy)}
Assume that $X$ is a complete metric space equipped with a doubling measure
supporting a {$p\mspace{1mu}$}-Poincar\'e inequality\textup{,} $1<p<\infty$.
Let\/ $\Omega\subset X$ be a nonempty\/
\textup{(}possibly unbounded\/\textup{)} open set with
the capacity ${C_p}(X\setminus\Omega)>0$.
Let $x_0\in{$p\mspace{1mu}$}artialOmegaX$.
Then $x_0$ is either
regular\textup{,} semiregular\textup{,} or strongly irregular
for functions that are {$p\mspace{1mu}$}-harmonic in $\Omega$.
Moreover,
\begin{itemize}
\item $x_0$ is strongly irregular
if and only if
$x_0\in\itoverline{R}\setminus R$,
where
\[
R
:= \{x\in{$p\mspace{1mu}$}artialOmegaX:x\text{ is regular}\}.
\]
\item The relatively open set
\begin{equation} \label{eq-S}
S
:= \{x\in{$p\mspace{1mu}$}artialOmegaX:
\text{there is $r>0$ such that ${C_p}(B(x,r)\cap{$p\mspace{1mu}$}artial\Omega)=0$}\}
\end{equation}
consists exactly of all semiregular boundary points of ${$p\mspace{1mu}$}artialOmegaX$.
\end{itemize}
\end{theorem}
The importance of the distinction between semiregular and
strongly irregular boundary points is perhaps best illustrated by the
equivalent characterizations
given in Theorems~\ref{thm:rem-irr-char}
and~\ref{thm:ess-irr-char}.
Semiregular points are in some ways not seen by Perron solutions.
Our contribution here is to extend the results in \cite{ABclass}
to unbounded open sets.
In order to do so there are extra complications,
most notably the fact that it is not known whether
continuous functions are resolutive
with respect to unbounded {$p\mspace{1mu}$}-hyperbolic sets.
We will also rely on the recent results by Bj\"orn--Hansevi~\cite{BHan1}
on regularity for {$p\mspace{1mu}$}-harmonic functions on unbounded sets
in metric spaces.
Most of our results are new also on unweighted $\mathbb{R}^n$.
\end{ack}
\section{Notation and preliminaries}
\label{sec:prel}
We assume that $(X,d,\mu)$
is a metric measure space (which we simply refer to as $X$)
equipped with a metric $d$ and a
positive complete Borel measure $\mu$ such that
$0<\mu(B)<\infty$
for every ball $B\subset X$.
It follows that $X$ is second countable.
For balls
$B(x_0,r):=\{x\in X:d(x,x_0)<r\}$ and $\lambda>0$,
we let $\lambda B=\lambda B(x_0,r):=B(x_0,\lambda r)$.
The $\sigma$-algebra
on which $\mu$ is defined
is the completion of the Borel $\sigma$-algebra.
We also assume that $1<p<\infty$.
Later we will impose further requirements
on the space and on the measure.
We will keep the discussion short,
see the monographs
Bj\"orn--Bj\"orn~\cite{BBbook} and
Heinonen--Koskela--Shanmugalingam--Tyson~\cite{HKSTbook}
for proofs,
further discussion,
and references on the topics in this section.
The measure $\mu$ is
\emph{doubling} if there exists
a constant $C\geq 1$ such that
\[
0
< \mu(2B)
\leq C\mu(B)
< \infty
\]
for every ball $B\subset X$.
A metric space is
\emph{proper}
if all bounded closed subsets are compact,
and this is in particular true if the metric space
is complete and the measure is doubling.
We say that a property holds for \emph{{$p\mspace{1mu}$}-almost every curve}
if it fails only for
a curve family $\Gamma$ with zero {$p\mspace{1mu}$}-modulus,
i.e.,
there exists a nonnegative $\rho\inL^p(X)$ such that
$\int_\gamma\rho\,ds=\infty$ for every curve $\gamma\in\Gamma$.
For us, a curve in $X$ is a rectifiable nonconstant continuous mapping
from a compact interval into $X$,
and it can thus be parametrized
by its arc length $ds$.
Following Koskela--MacManus~\cite{KoMac98}
we make the following definition,
see also Heinonen--Koskela~\cite{HeKo98}.
\begin{definition}\label{def:upper-gradients}
A measurable function $g\colon X\to[0,\infty]$ is a
\emph{{$p\mspace{1mu}$}-weak upper gradient}
of the function $u\colon X\to{\overline{\R\kern-0.08em}\kern 0.08em}:=[-\infty,\infty]$
if
\[
|u(\gamma(0)) - u(\gamma(l_{\gamma}))|
\leq \int_{\gamma}g\,ds
\]
for {$p\mspace{1mu}$}-almost every curve
$\gamma\colon[0,l_{\gamma}]\to X$,
where we use the convention that the left-hand side is $\infty$
whenever at least one of the terms on the left-hand side is infinite.
\end{definition}
One way of controlling functions by their
{$p\mspace{1mu}$}-weak upper gradients is to require
a Poincar\'e inequality to hold.
\begin{definition}\label{def:Poincare-inequality}
We say that $X$ supports a {$p\mspace{1mu}$}-\emph{Poincar\'e inequality}
if there exist constants,
$C>0$ and $\lambda\geq 1$ (the dilation constant),
such that for all balls $B\subset X$,
all integrable functions $u$ on $X$,
and all {$p\mspace{1mu}$}-weak upper gradients $g$ of $u$,
\begin{equation}\label{def:Poincare-inequality-ineq}
\vint_B|u-u_B|\,d\mu
\leq C\diam(B)\biggl(\vint_{\lambda B}g^p\,d\mu\biggr)^{1/p},
\end{equation}
where $u_B:=\vint_B u\,d\mu:=\frac{1}{\mu(B)}\int_B u\,d\mu$.
\end{definition}
Shanmugalingam~\cite{Shanmugalingam00}
used {$p\mspace{1mu}$}-weak upper gradients to
define so-called Newtonian spaces.
\begin{definition}\label{def:Newtonian-space}
The \emph{Newtonian space} on $X$,
denoted
$N^{1,p}(X)$,
is the space of all
extended real-valued functions $u\inL^p(X)$
such that
\[
\|u\|_{N^{1,p}(X)}
:= \biggl(\int_X|u|^p\,d\mu + \inf_g\int_X g^p\,d\mu\biggr)^{1/p}<\infty,
\]
where the infimum is taken over all {$p\mspace{1mu}$}-weak upper gradients $g$ of $u$.
\end{definition}
The quotient space $N^{1,p}(X)/\sim$,
where $u\sim v$ if and only if $\|u-v\|_{N^{1,p}(X)}=0$,
is a Banach space, see
Shanmugalingam~\cite{Shanmugalingam00}.
\begin{definition}\label{def:Dirichlet-space}
The \emph{Dirichlet space} on $X$,
denoted
$D^p(X)$,
is the space of all
measurable extended real-valued functions on $X$
that have a {$p\mspace{1mu}$}-weak upper gradient
in $L^p(X)$.
\end{definition}
In this paper we assume that functions in $N^{1,p}(X)$ and $D^p(X)$
are defined everywhere (with values in ${\overline{\R\kern-0.08em}\kern 0.08em}$),
not just up to an equivalence class.
This is important, in particular for the definition of
{$p\mspace{1mu}$}-weak upper gradients to make sense.
A measurable set $A\subset X$ can itself be
considered to be a metric space
(with the restriction of $d$ and $\mu$ to $A$) with
the Newtonian space $N^{1,p}(A)$ and the Dirichlet space $D^p(A)$
given by
Definitions~\ref{def:Newtonian-space}~and~\ref{def:Dirichlet-space},
respectively.
If $X$ is proper and $\Omega\subset X$ is open,
then $u\inN^{1,p}loc(\Omega)$
if and only if
$u\inN^{1,p}(V)$
for every open $V$ such that $\overline{V}$
is a compact subset of $\Omega$,
and similarly for $D^ploc(\Omega)$.
If $u\inD^ploc(X)$,
then there exists
a \emph{minimal {$p\mspace{1mu}$}-weak upper gradient} $g_u\inL^ploc(X)$ of $u$ such
that $g_u\leq g$ a.e.\ for
all {$p\mspace{1mu}$}-weak upper gradients $g\inL^ploc(X)$ of $u$.
\begin{definition}\label{def:capacity}
The (\emph{Sobolev}) \emph{capacity} of a set $E\subset X$ is the number
\[
{C_p}(E)
:= \inf_u\|u\|_{N^{1,p}(X)}^p,
\]
where the infimum is taken over all
$u\inN^{1,p}(X)$ such that $u\geq 1$ on $E$.
A property that holds for all points
except for those in a set of capacity zero
is said to hold \emph{quasieverywhere} (\emph{q.e.}).
\end{definition}
The capacity is countably subadditive,
and it is the correct gauge
for distinguishing between two Newtonian functions:
If $u\inN^{1,p}(X)$, then $u\sim v$ if and only if $u=v$ q.e.
Moreover,
if $u,v\inN^{1,p}loc(X)$ and $u=v$ a.e., then $u=v$ q.e.
Continuous functions will be assumed to be real-valued
unless otherwise stated,
whereas semicontinuous functions are allowed to take values in ${\overline{\R\kern-0.08em}\kern 0.08em}$.
We use the common notation
$u_\limplus=\max\{u,0\}$,
let $\chi_E$ denote the characteristic function of the set $E$,
and consider all neighbourhoods to be open.
\section{The obstacle problem and \texorpdfstring{\boldmath$p\mspace{1mu}$}{p}-harmonic functions}
\label{sec:p-harmonic}
\emph{We assume from now on
that\/ $1<p<\infty$\textup{,}
that $X$ is a complete metric measure space
supporting a {$p\mspace{1mu}$}-Poincar\'e inequality\textup{,}
that $\mu$ is doubling\textup{,}
and that\/ $\Omega\subset X$
is a nonempty \textup{(}possibly unbounded\textup{)}
open subset with ${C_p}(X\setminus\Omega)>0$.}
\begin{definition}\label{def:min}
A function $u\inN^{1,p}loc(\Omega)$
is a \emph{minimizer}
in $\Omega$ if
\[
\int_{{$p\mspace{1mu}$}hi\neq 0}g_u^p\,d\mu
\leq \int_{{$p\mspace{1mu}$}hi\neq 0}g_{u+{$p\mspace{1mu}$}hi}^p\,d\mu
\quad\text{for all }{$p\mspace{1mu}$}hi\inN^{1,p}_0(\Omega),
\]
where
$N^{1,p}_0(\Omega)=\{u|_\Omega:u\inN^{1,p}(X)\text{ and }u=0\text{ in }X\setminus\Omega\}$.
Moreover,
a function is \emph{{$p\mspace{1mu}$}-harmonic}
if it is a continuous minimizer.
\end{definition}
Kinnunen--Shanmugalingam~\cite[Proposition~3.3 and Theorem~5.2]{KiSh01}
used De Giorgi's method to
show that every minimizer
$u$ has a H\"older continuous representative
$\tilde{u}$ such that $\tilde{u}=u$ q.e.
Bj\"orn--Marola~\cite[p.\ 362]{BMarola} obtained the same conclusions
using Moser iterations.
See alternatively Theorems~8.13 and 8.14 in \cite{BBbook}.
Note that $N^{1,p}loc(\Omega)=D^ploc(\Omega)$, by
Proposition~4.14 in \cite{BBbook}.
The following obstacle problem
is an important tool.
In this generality, it was
considered
by Hansevi~\cite{hansevi1}.
\begin{definition}\label{def:obst}
Let $V\subset X$ be a nonempty open subset with ${C_p}(X\setminus V)>0$.
For ${$p\mspace{1mu}$}si\colon V\to{\overline{\R\kern-0.08em}\kern 0.08em}$ and $f\inD^p(V)$,
let
\[
{\mathscr{K}}_{{$p\mspace{1mu}$}si,f}(V)
= \{v\inD^p(V):v-f\inD^p_0(V)\textup{ and }v\geq{$p\mspace{1mu}$}si\text{ q.e.\ in }V\},
\]
where $D^p_0(V)=\{u|_V:u\inD^p(X)\text{ and }u=0\text{ in }X\setminus V\}$.
We say that $u\in{\mathscr{K}}_{{$p\mspace{1mu}$}si,f}(V)$ is a
\emph{solution of the }${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}(V)$-\emph{obstacle problem
\textup{(}with obstacle ${$p\mspace{1mu}$}si$ and boundary values $f$\,\textup{)}}
if
\[
\int_V g_u^p\,d\mu
\leq \int_V g_v^p\,d\mu
\quad\textup{for all }v\in{\mathscr{K}}_{{$p\mspace{1mu}$}si,f}(V).
\]
When $V=\Omega$,
we usually denote ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}(\Omega)$ by ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}$.
\end{definition}
The ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}$-obstacle problem has a unique
(up to sets of capacity zero) solution
whenever ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}\neq\varnothing$, see
Hansevi~\cite[Theorem~3.4]{hansevi1}.
Furthermore,
there is
a unique lsc-regularized
solution of the ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}$-obstacle problem,
by Theorem~4.1 in~\cite{hansevi1}.
A function $u$ is \emph{lsc-regularized} if $u=u^*$,
where the \emph{lsc-regularization} $u^*$ of $u$ is defined by
\[
u^*(x)
= \essliminf_{y\to x}u(y)
:= \lim_{r\to 0}\essinf_{B(x,r)}u.
\]
If ${$p\mspace{1mu}$}si\colon\Omega\to[-\infty,\infty)$
is continuous as an extended real-valued function,
and ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}\neq\varnothing$,
then the lsc-regularized solution of the ${\mathscr{K}}_{{$p\mspace{1mu}$}si,f}$-obstacle problem
is continuous,
by Theorem~4.4 in
\cite{hansevi1}.
Hence the following generalization
of Definition~3.3 in
Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS}
(and Definition~8.31 in \cite{BBbook})
to Dirichlet functions and to unbounded sets makes sense.
It was first used by
Hansevi~\cite[Definition~4.6]{hansevi1}.
\begin{definition}\label{def:ext}
Let $V\subset X$ be a nonempty open set with ${C_p}(X\setminus V)>0$.
The \emph{{$p\mspace{1mu}$}-harmonic extension}
$H_V f$ of $f\inD^p(V)$ to $V$ is the continuous solution
of the ${\mathscr{K}}_{-\infty,f}(V)$-obstacle problem.
When $V=\Omega$, we usually write $H f$ instead of $H_\Omega f$.
\end{definition}
\begin{definition}\label{def:superharm}
A function $u\colon\Omega\to(-\infty,\infty]$
is \emph{superharmonic} in $\Omega$ if
\begin{enumerate}
\renewcommand{\textup{(\roman{enumi})}}{\textup{(\roman{enumi})}}
\item $u$ is lower semicontinuous;
\item $u$ is not identically $\infty$ in any component of $\Omega$;
\item for every nonempty open set
$V$ such that $\overline{V}$ is a compact subset of $\Omega$
and all $v\in\Lip(\overline{V})$,
we have $H_{V}v\leq u$ in $V$
whenever $v\leq u$ on ${$p\mspace{1mu}$}artial V$.
\end{enumerate}
A function $u\colon\Omega\to[-\infty,\infty)$ is
\emph{subharmonic} if $-u$ is superharmonic.
\end{definition}
There are several other equivalent definitions of superharmonic functions,
see, e.g.,
Theorem~6.1 in Bj\"orn~\cite{ABsuper}
(or Theorem~9.24 and Propositions~9.25 and~9.26 in \cite{BBbook}).
An lsc-regularized
solution of the obstacle problem is always superharmonic,
by Proposition~3.9 in \cite{hansevi1}
together with Proposition~7.4 in Kinnunen--Martio~\cite{KiMa02}
(or Proposition~9.4 in \cite{BBbook}).
On the other hand,
superharmonic functions are always lsc-regularized,
by Theorem~7.14 in Kinnunen--Martio~\cite{KiMa02}
(or Theorem~9.12 in \cite{BBbook}).
\section{Perron solutions}
\label{sec:perron}
\emph{In addition to the assumptions given at the
beginning of Section~\ref{sec:p-harmonic}\textup{,}
from now on we make the convention that if\/ $\Omega$ is unbounded\textup{,}
then the point at infinity\textup{,} $\infty$\textup{,}
belongs to the boundary ${$p\mspace{1mu}$}artial\Omega$.
Topological notions should therefore be understood with respect to the
one-point compactification $X^*:=X\cup\{\infty\}$.}
Note that this convention does not affect any of the definitions in Sections~\ref{sec:prel} or~\ref{sec:p-harmonic}, as $\infty$ is \emph{not} added to $X$ (it is added solely to ${$p\mspace{1mu}$}artial\Omega$).
Since continuous functions are assumed to be real-valued,
every function in $C({$p\mspace{1mu}$}artial\Omega)$ is bounded
even if $\Omega$ is unbounded.
Note that since $X$ is second countable so is $X^*$,
and hence $X^*$ is metrizable by Urysohn's metrization theorem,
see, e.g., Munkres~\cite[Theorems~32.3 and~34.1]{Munkres00}.
We will only consider Perron solutions
and {$p\mspace{1mu}$}-harmonic measures with respect to $\Omega$
and therefore omit $\Omega$ from the notation below.
\begin{definition}\label{def:Perron}
Given a function $f\colon{$p\mspace{1mu}$}artial\Omega\to{\overline{\R\kern-0.08em}\kern 0.08em}$,
let $\mathscr{U}_f$ be the collection of all
functions
$u$ that are superharmonic in $\Omega$,
bounded from below, and such that
\[
\liminf_{\Omega\ni y\to x}u(y)
\geq f(x)
\quad\textup{for all }x\in{$p\mspace{1mu}$}artial\Omega.
\]
The \emph{upper Perron solution} of $f$ is defined by
\[
\itoverline{P} f(x)
= \inf_{u\in \mathscr{U}_f }u(x),
\quad x\in\Omega.
\]
The \emph{lower Perron solution}
can be defined similarly using subharmonic functions,
or by letting $\itunderline{P} f=-\itoverline{P}(-f)$.
If $\itoverline{P} f=\itunderline{P} f$,
then we denote the common value by $P f$.
Moreover, if $P f$ is real-valued,
then $f$ is said to be
\emph{resolutive} (with respect to $\Omega$).
\end{definition}
An immediate consequence of the definition is that
$\itoverline{P} f\leq\itoverline{P} h$ whenever $f\leq h$ on ${$p\mspace{1mu}$}artial\Omega$.
Moreover, if $\alpha\in\mathbb{R}$ and $\beta\geq 0$,
then $\itoverline{P}(\alpha + \beta f)=\alpha+\beta\itoverline{P} f$.
Corollary~6.3 in Hansevi~\cite{hansevi2} shows that $\itunderline{P} f\leq\itoverline{P} f$.
In each component of $\Omega$, $\itoverline{P} f$ is either {$p\mspace{1mu}$}-harmonic or
identically ${$p\mspace{1mu}$}m\infty$,
by Theorem~4.1 in Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS2}
(or Theorem~10.10 in \cite{BBbook});
the proof is local and
applies also to unbounded $\Omega$.
\begin{definition}\label{def:p-para}
Assume that $\Omega$ is unbounded.
Then
$\Omega$ is \emph{{$p\mspace{1mu}$}-parabolic} if
for every compact $K\subset\Omega$,
there exist functions $u_j\inN^{1,p}(\Omega)$ such that
$u_j\geq 1$ on $K$ for all $j=1,2,\ldots$\,, and
\[
\int_\Omega g_{u_j}^p\,d\mu
\to 0
\quad\text{as }j\to\infty.
\]
Otherwise, $\Omega$ is
\emph{{$p\mspace{1mu}$}-hyperbolic}.
\end{definition}
For examples of {$p\mspace{1mu}$}-parabolic sets, see, e.g.,
Hansevi~\cite{hansevi2}.
The main reason for introducing {$p\mspace{1mu}$}-parabolic sets
in \cite{hansevi2}
was to be able to obtain resolutivity results,
and in particular,
establishing the following resolutivity and invariance result
for {$p\mspace{1mu}$}-parabolic unbounded sets.
The first such invariance result for {$p\mspace{1mu}$}-harmonic functions
was obtained, for bounded sets, by Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS2}.
\begin{theorem}\label{thm-hansevi2-main}
\textup{(\cite[Theorem~6.1]{BBS2}
and~\cite[Theorem~7.8]{hansevi2})}
Assume that\/ $\Omega$ is bounded or {$p\mspace{1mu}$}-parabolic.
Let $h\colon{$p\mspace{1mu}$}artial\Omega\to{\overline{\R\kern-0.08em}\kern 0.08em}$
be $0$ q.e.\ on ${$p\mspace{1mu}$}artialOmegaX$ and
$f\in C({$p\mspace{1mu}$}artial\Omega)$.
Then $f$ and $f+h$ are resolutive and $P(f+h)=P f$.
\end{theorem}
Resolutivity of
continuous functions is not known for unbounded {$p\mspace{1mu}$}-hyperbolic sets, but it is rather
trivial to show that constant functions are resolutive.
We shall
show that a similar invariance result as in
Theorem~\ref{thm-hansevi2-main} can be obtained
for constant functions on unbounded {$p\mspace{1mu}$}-hyperbolic sets.
This fact will be an important tool when
characterizing semiregular boundary points.
We first need to define {$p\mspace{1mu}$}-harmonic measures,
which despite the name are (usually)
not
measures,
but
nonlinear generalizations of the harmonic measure.
\begin{definition}\label{def:p-harmonic-measure}
The
\emph{upper and lower {$p\mspace{1mu}$}-harmonic measures}
of $E\subset{$p\mspace{1mu}$}artial\Omega$ are
\[
{\overline{\omega}}(E)
:= \itoverline{P}\chi_E
\quad\text{and}\quad
{\itunderline{\omega}}(E)
:= \itunderline{P}\chi_E,
\]
respectively.
\end{definition}
\begin{proposition}\label{prop-inv-pharm}
Let $E\subset{$p\mspace{1mu}$}artialOmegaX$\textup{,}
$a\in\mathbb{R}$\textup{,} and $f\colon{$p\mspace{1mu}$}artial\Omega\to{\overline{\R\kern-0.08em}\kern 0.08em}$ be such that
${C_p}(E)=0$ and $f(x)=a$ for all $x\in{$p\mspace{1mu}$}artial\Omega\setminus E$.
Then $P f\equiv a$.
In particular\textup{,} ${\overline{\omega}}(E)={\itunderline{\omega}}(E)\equiv 0$.
\end{proposition}
\begin{proof}
Without loss of generality we may assume that $a=0$.
As the capacity ${C_p}$ is an outer capacity,
by Corollary~1.3 in Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS5}
(or \cite[Theorem~5.31]{BBbook}),
we can find open sets $G'_j\supset E$ such that ${C_p}(G'_j)<2^{-j-1}$,
$j=1,2,\ldots$\,.
From the decreasing sequence
$\{\bigcup_{k=j}^\infty G'_k\}_{j=1}^\infty$, we can choose
a decreasing subsequence of open sets
$G_k$ with ${C_p}(G_k)<2^{-kp}$, $k=1,2,\ldots$\,.
By Lemma~5.3 in Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS2}
(or \cite[Lemma~10.17]{BBbook}),
there is a decreasing sequence
$\{{$p\mspace{1mu}$}si_j\}_{j=1}^\infty$ of nonnegative functions
such that $\lim_{j\to\infty}\|{$p\mspace{1mu}$}si_j\|_{N^{1,p}(X)}=0$ and
${$p\mspace{1mu}$}si_j\geq k-j$ in $G_k$ whenever $k>j$.
In particular, ${$p\mspace{1mu}$}si_j=\infty$ on $E$ for each $j=1,2,\ldots$\,.
Let $u_j$ be the lsc-regularized
solution of the ${\mathscr{K}}_{{$p\mspace{1mu}$}si_j,0}(\Omega)$-obstacle
problem, $j=1,2,\ldots$\,.
As $u_j$ is lsc-regularized
and $u_j\geq{$p\mspace{1mu}$}si_j$ q.e.,
we see that $u_j\geq k-j$ everywhere in $G_k$ whenever $k>j$,
and also that $u_j\geq 0$ everywhere in $\Omega$.
In particular,
$\liminf_{\Omega\ni y\to x}u_j(y)=\infty$ for $x\in E$,
which shows that $u_j\in\mathscr{U}_f(\Omega)$ and thus $u_j\geq\itoverline{P} f$.
On the other hand, Theorem~3.2 in Hansevi~\cite{hansevi2}
shows that the sequence $u_j$ decreases q.e.\ to $0$,
and hence $\itoverline{P} f\leq 0$ q.e.\ in $\Omega$.
Since $\itoverline{P} f$ is continuous, we get that $\itoverline{P} f\leq 0$
everywhere in $\Omega$.
Applying this to $-f$ shows that
$\itunderline{P} f=-\itoverline{P}(-f)\geq 0$ everywhere in $\Omega$,
which together with the inequality $\itunderline{P} f\leq\itoverline{P} f$ shows that
$\itunderline{P} f=\itoverline{P} f\equiv 0$.
In particular, ${\itunderline{\omega}}(E)=\itunderline{P}\chi_E\equiv 0$
and ${\overline{\omega}}(E)=\itoverline{P}\chi_E\equiv 0$.
\end{proof}
We will also need the following result.
\begin{proposition}\label{prop:Perron-semicont}
If $f\colon{$p\mspace{1mu}$}artial\Omega\to[-\infty,\infty)$
is an upper semicontinuous function\textup{,}
then
\[
\itoverline{P} f
= \inf_{C({$p\mspace{1mu}$}artial\Omega)\ni{$p\mspace{1mu}$}hi\geq f}\itoverline{P}{$p\mspace{1mu}$}hi.
\]
\end{proposition}
\begin{proof}
Let $\mathscr{F}=\{{$p\mspace{1mu}$}hi\in C({$p\mspace{1mu}$}artial\Omega):{$p\mspace{1mu}$}hi\geq f\}$.
Then $\mathscr{F}$ is downward directed,
i.e., for each pair of functions $u,v \in \mathscr{F}$
there is a function $w\in\mathscr{F}$ such that $w\leq\min\{u,v\}$.
Because $f$ is upper semicontinuous, ${$p\mspace{1mu}$}artial\Omega$ is compact,
and $X^*$ is metrizable,
it follows from Proposition~1.12 in \cite{BBbook}
that $f=\inf_{{$p\mspace{1mu}$}hi\in\mathscr{F}}{$p\mspace{1mu}$}hi$.
Hence by Lemma~10.31 in~\cite{BBbook}
(whose proof is valid also for unbounded $\Omega$)
$\itoverline{P} f=\inf_{{$p\mspace{1mu}$}hi\in\mathscr{F}}\itoverline{P}{$p\mspace{1mu}$}hi$.
\end{proof}
\section{Boundary regularity}
\label{sec:bdy-regularity}
It is not known whether
continuous functions are resolutive also
with respect to unbounded {$p\mspace{1mu}$}-hyperbolic sets.
We therefore define regular boundary points in the following way.
\begin{definition}\label{def:reg}
We say that a boundary point $x_0\in{$p\mspace{1mu}$}artial\Omega$ is \emph{regular} if
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
= f(x_0)
\quad\text{for all }f\in C({$p\mspace{1mu}$}artial\Omega).
\]
This can be paraphrased in the following way:
A point $x_0\in{$p\mspace{1mu}$}artial\Omega$ is regular if the following two conditions hold:
\begin{enumerate}
\renewcommand{\textup{(\roman{enumi})}}{\textup{(\mathbb{R}oman{enumi})}}
\item\label{semi}
For all $f\in C({$p\mspace{1mu}$}artial\Omega)$ the limit
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
\quad\text{exists}.
\]
\item\label{strong}
For all $f\in C({$p\mspace{1mu}$}artial\Omega)$ there is a sequence
$\{y_j\}_{j=1}^\infty$ in $\Omega$ such that
\[
\lim_{j\to\infty}y_j
= x_0
\quad\text{and}\quad
\lim_{j\to\infty}\itoverline{P} f(y_j)
= f(x_0).
\]
\end{enumerate}
Furthermore, we say that a boundary point
$x_0\in{$p\mspace{1mu}$}artial\Omega$ is \emph{semiregular}
if \ref{semi} holds but not \ref{strong};
and \emph{strongly irregular}
if \ref{strong} holds but not \ref{semi}.
\end{definition}
We do not require $\Omega$ to be bounded in this definition,
but if it is,
then it follows from
Theorem~6.1 in Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS2}
(or Theorem~10.22 in \cite{BBbook}) that our
definition coincides with the definitions of regularity in
Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS}, \cite{BBS2},
and
Bj\"orn--Bj\"orn~\cite{BB}, \cite{BBbook},
where
regularity is defined using
$P f$ or
$H f$.
Thus we can use the boundary regularity results
from these papers
when considering bounded sets.
Since $\itoverline{P} f=-\itunderline{P}(-f)$,
the same concept of regularity is obtained if we replace the
upper Perron solution by the lower Perron solution in
Definition~\ref{def:reg}.
Boundary regularity for {$p\mspace{1mu}$}-harmonic functions
on unbounded sets in metric spaces was
recently studied by Bj\"orn--Hansevi~\cite{BHan1}.
We will need some of the characterizations
obtained therein.
For the reader's convenience we state these results here.
We will not discuss regularity of the point $\infty$
in this paper.
One of the important results we will need from \cite{BHan1}
is the Kellogg property.
\begin{theorem}\label{thm:kellogg}
\textup{(The Kellogg property)}
If $I$ is the set of irregular points in ${$p\mspace{1mu}$}artialOmegaX$\textup{,}
then ${C_p}(I)=0$.
\end{theorem}
\begin{definition}\label{def:barrier}
A function $u$ is a \emph{barrier} (with respect to $\Omega$)
at $x_0\in{$p\mspace{1mu}$}artial\Omega$ if
\begin{enumerate}
\renewcommand{\textup{(\roman{enumi})}}{\textup{(\roman{enumi})}}
\item\label{barrier-i}
$u$ is superharmonic in $\Omega$;
\item\label{barrier-ii}
$\lim_{\Omega\ni y\to x_0}u(y)=0$;
\item\label{barrier-iii}
$\liminf_{\Omega\ni y\to x}u(y)>0$ for every $x\in{$p\mspace{1mu}$}artial\Omega\setminus\{x_0\}$.
\end{enumerate}
\end{definition}
Superharmonic functions satisfy the
strong minimum principle, i.e.,
if $u$ is superharmonic and attains its minimum in some component
$G$ of $\Omega$, then $u|_G$ is constant
(see Theorem~9.13 in \cite{BBbook}).
This implies that a barrier is always nonnegative,
and furthermore, that a barrier is positive if
${$p\mspace{1mu}$}artial G\setminus\{x_0\}\neq\varnothing$ for
every component $G\subset\Omega$.
The following result is a collection
of the key facts we will need from
Bj\"orn--Hansevi~\cite[Theorems~5.2, 5.3, 6.2, and 9.1]{BHan1}.
\begin{theorem}\label{thm:reg}
Let $x_0\in{$p\mspace{1mu}$}artialOmegaX$
and $\delta>0$.
Also define $d_{x_0}\colon X^*\to[0,1]$
by
\begin{equation}\label{eq-dx0}
d_{x_0}(x)
= \begin{cases}
\min\{d(x,x_0),1\}
& \text{if }x\neq\infty, \\
1
& \text{if }x=\infty.
\end{cases}
\end{equation}
Then the following are equivalent\/\textup{:}
\begin{enumerate}
\item\label{reg-reg}
The point $x_0$ is regular.
\item\label{barrier-bar-Om}
There is a barrier at $x_0$.
\item\label{barrier-bar-pos-Om}
There is a positive continuous barrier at $x_0$.
\item\label{barrier-reg-B}
The point $x_0$ is regular with respect to $\Omega\cap B(x_0,\delta)$.
\item\label{reg-cont-x0}
It is true that
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
= f(x_0)
\]
for all $f\colon{$p\mspace{1mu}$}artial\Omega\to\mathbb{R}$ that are
bounded on ${$p\mspace{1mu}$}artial\Omega$ and continuous at $x_0$.
\item \label{reg-Pd}
It is true that
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
= 0.
\]
\item\label{reg-2-obst-dist}
The continuous solution $u$ of the ${\mathscr{K}}_{d_{x_0},d_{x_0}}$-obstacle
problem\textup{,}
satisfies
\[
\lim_{\Omega\ni y\to x_0}u(y)
= 0.
\]
\item\label{reg-2-obst-cont}
If $f\in C(\overline{\Omega})\capD^p(\Omega)$\textup{,}
then the continuous solution $u$ of the ${\mathscr{K}}_{f,f}$-obstacle
problem\textup{,}
satisfies
\[
\lim_{\Omega\ni y\to x_0}u(y)
= f(x_0).
\]
\end{enumerate}
\end{theorem}
\section{Semiregular and strongly irregular points}
\label{sec:trichotomy}
We are now ready to start our discussion of semiregular
and strongly irregular boundary points.
We begin by proving Theorem~\ref{thm:trichotomy}.
\begin{proof}[Proof of Theorem~\ref{thm:trichotomy}]
We consider two complementary cases.
\emph{Case} 1:
\emph{There exists $r>0$ such that ${C_p}(B\cap{$p\mspace{1mu}$}artial\Omega)=0$\textup{,}
where $B:=B(x_0,r)$.}
Let $G$ be the component of $B$ containing $x_0$.
Since $X$ is quasiconvex, by, e.g., Theorem~4.32 in \cite{BBbook},
and thus locally connected, it follows that $G$ is open.
Let $F=G\setminus\Omega$.
Then
\[
{C_p}(G\cap{$p\mspace{1mu}$}artial F)
= {C_p}(G\cap{$p\mspace{1mu}$}artial\Omega)
\leq {C_p}(B\cap{$p\mspace{1mu}$}artial\Omega)
= 0,
\]
and hence
${C_p}(F)=0$,
by
Lemma~8.6
in Bj\"orn--Bj\"orn--Shanmugalingam~\cite{BBS2}
(or Lemma~4.5 in \cite{BBbook}).
Let $f\in C({$p\mspace{1mu}$}artial\Omega)$.
Then the Perron solution $\itoverline{P} f$ is bounded (as $f$ is bounded),
and thus $\itoverline{P} f$ has a {$p\mspace{1mu}$}-harmonic extension $U$
to $\Omega\cup G$,
by
Theorem~6.2 in Bj\"orn~\cite{ABremove}
(or Theorem~12.2 in \cite{BBbook}).
Since $U$ is continuous,
it follows that
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
= \lim_{\Omega\ni y\to x_0}U(y)
= U(x_0),
\]
i.e.,
condition~\ref{semi} in Definition~\ref{def:reg}
holds,
and hence $x_0$ is either regular or semiregular.
To show that $x_0$ must be semiregular,
we let $f(x)=(1-d_{x_0}(x)/{\min\{r,1\}})_\limplus$
on ${$p\mspace{1mu}$}artial\Omega$,
where $d_{x_0}$ is defined by \eqref{eq-dx0}.
Then $f=0$ q.e.\ on ${$p\mspace{1mu}$}artial\Omega$,
and Proposition~\ref{prop-inv-pharm}
shows that $P f\equiv 0$.
Since
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
= 0
\neq 1
= f(x_0),
\]
$x_0$ is not regular,
and hence must be semiregular.
\emph{Case} 2:
\emph{For all $r>0$\textup{,} ${C_p}(B(x_0,r)\cap{$p\mspace{1mu}$}artial\Omega)>0$.}
For every $j=1,2,\ldots$\,,
${C_p}(B(x_0,1/j)\cap{$p\mspace{1mu}$}artial\Omega)>0$,
and by the Kellogg property (Theorem~\ref{thm:kellogg})
there exists a regular boundary point
$x_j\in B(x_0,1/j)\cap{$p\mspace{1mu}$}artial\Omega$.
(We do not require the $x_j$ to be distinct.)
Let $f\in C({$p\mspace{1mu}$}artial\Omega)$.
Because $x_j$ is regular,
there is $y_j\in B(x_j,1/j)\cap\Omega$
so that $|\itoverline{P} f(y_j)-f(x_j)|<1/j$.
It follows that $y_j\to x_0$ and $\itoverline{P} f(y_j)\to f(x_0)$
as $j\to\infty$,
i.e.,
condition~\ref{strong} in Definition~\ref{def:reg}
holds,
and hence $x_0$ must be either regular or strongly irregular.
As there are no strongly irregular points in case~1,
it follows that $x_0\in{$p\mspace{1mu}$}artialOmegaX$
is strongly irregular if and only if $x_0\in\itoverline{R}\setminus R$,
where $R:=\{x\in{$p\mspace{1mu}$}artialOmegaX:x\text{ is regular}\}$.
And since there are no semiregular points in case~2,
the set $S$ in \eqref{eq-S}
consists exactly of all semiregular boundary points of ${$p\mspace{1mu}$}artialOmegaX$.
\end{proof}
In fact, in case~2
it is possible to improve upon the result above.
The sequence
$\{y_j\}_{j=1}^\infty$
can be chosen independently of $f$,
see the characterization \ref{not-reg-one-seq}
in Theorem~\ref{thm:rem-irr-char}.
We will characterize
semiregular points by
a number of equivalent conditions in Theorem~\ref{thm:rem-irr-char}.
But first we obtain
the following characterizations
of relatively open sets of semiregular points.
\begin{theorem}\label{thm:irr-char-V}
Let $V\subset{$p\mspace{1mu}$}artialOmegaX$ be relatively open.
Then the following statements are equivalent\/\textup{:}
\begin{enumerate}
\item\label{V-semireg}
The set $V$ consists entirely of semiregular points.
\item\label{V-R}
The set $V$ does not contain any regular point.
\item\label{V-Cp-V-bdy}
The capacity ${C_p}(V)=0$.
\item\label{V-upharm}
The upper {$p\mspace{1mu}$}-harmonic measure
${\overline{\omega}}(V)\equiv 0$.
\item\label{V-lpharm}
The lower {$p\mspace{1mu}$}-harmonic measure
${\itunderline{\omega}}(V)\equiv 0$.
\item\label{V-alt-def-irr-super}
The set\/ $\Omega\cup V$ is open in $X$\textup{,}
${C_p}(X\setminus(\Omega\cup V))>0$\textup{,}
$\mu(V)=0$\textup{,}
and every function that is bounded and superharmonic in $\Omega$
has a superharmonic extension to $\Omega\cup V$.
\item\label{V-alt-def-irr}
\setcounter{saveenumi}{\value{enumi}}
The set\/ $\Omega\cup V$ is open in $X$\textup{,}
${C_p}(X\setminus(\Omega\cup V))>0$\textup{,}
and every function that is bounded and {$p\mspace{1mu}$}-harmonic in $\Omega$
has a {$p\mspace{1mu}$}-harmonic extension to $\Omega\cup V$.
\end{enumerate}
If moreover
$\Omega$ is bounded or {$p\mspace{1mu}$}-parabolic\textup{,}
then also the following statement
is equivalent to the statements above.
\begin{enumerate}
\setcounter{enumi}{\value{saveenumi}}
\item\label{V-rem-motiv}
For every $f\in C({$p\mspace{1mu}$}artial\Omega)$\textup{,}
the Perron solution $P f$ depends only on $f|_{{$p\mspace{1mu}$}artial\Omega\setminus V}$
\textup{(}i.e., if $f,h\in C({$p\mspace{1mu}$}artial\Omega)$ and
$f=h$ on ${$p\mspace{1mu}$}artial\Omega\setminus V$\textup{,}
then $P f\equivP h$\textup{)}.
\end{enumerate}
\end{theorem}
Note that there are examples of sets
with positive capacity and even positive measure
which are removable for bounded {$p\mspace{1mu}$}-harmonic functions,
see Section~9 in Bj\"orn~\cite{ABremove} (or \cite[Section~12.3]{BBbook}).
For superharmonic functions it is not known whether
such examples exist.
This motivates the formulations of
\ref{V-alt-def-irr-super}
and~\ref{V-alt-def-irr}.
The following example shows that the condition
${C_p}(X\setminus(\Omega\cup V))>0$
cannot be dropped from \ref{V-alt-def-irr},
nor from \ref{alt-def-irr} in
Theorem~\ref{thm:rem-irr-char} below.
We do not know whether the conditions
${C_p}(X\setminus(\Omega\cup V))>0$ and $\mu(V)=0$
can be dropped from \ref{V-alt-def-irr-super},
but they are needed for our proof.
Similarly they are needed in
\ref{alt-def-irr-super} in
Theorem~\ref{thm:rem-irr-char} below.
The condition ${C_p}(X\setminus(\Omega\cup V))>0$
was unfortunately overlooked in Bj\"orn~\cite{ABclass}
and in Bj\"orn--Bj\"orn~\cite{BBbook}:
It should be added to
conditions (d$'$) and (e$'$) in
\cite[Theorem~3.1]{ABclass},
to (h) and (i) in
\cite[Theorem~3.3]{ABclass},
to (f$'$) and (g$'$) in
\cite[Theorem~13.5]{BBbook},
and to (j) and (l) in
\cite[Theorem~13.10]{BBbook}.
\begin{example}
Let $X=[0,1]$ be equipped with the Lebesgue measure,
and let $1<p<\infty$, $\Omega=(0,1]$ and $V=\{0\}$.
Then ${C_p}(V)>0$.
In this case the {$p\mspace{1mu}$}-harmonic functions on $\Omega$ are just
the constant functions, and these trivially
have {$p\mspace{1mu}$}-harmonic extensions to $X$.
Thus the condition
${C_p}(X\setminus(\Omega\cup V))>0$
cannot be dropped from \ref{V-alt-def-irr}.
On the other hand, the set $V$ is not removable
for bounded superharmonic functions on $\Omega$,
see Example~9.1 in Bj\"orn~\cite{ABremove}
or Example~12.17 in \cite{BBbook}.
\end{example}
\begin{proof}[Proof of Theorem~\ref{thm:irr-char-V}]
\ref{V-R} $\ensuremath{\Rightarrow} $ \ref{V-Cp-V-bdy}
This follows from the Kellogg property (Theorem~\ref{thm:kellogg}).
\ref{V-Cp-V-bdy} $\ensuremath{\Rightarrow} $ \ref{V-upharm}
This follows directly from Proposition~\ref{prop-inv-pharm}.
\ref{V-upharm} $\ensuremath{\Rightarrow} $ \ref{V-lpharm}
This is trivial.
\ref{V-lpharm} $\ensuremath{\Rightarrow} $ \ref{V-R}
Suppose that $x\in V$ is regular.
Because $\chi_V$ is continuous at $x$,
this yields a contradiction,
as it follows from Theorem~\ref{thm:reg} that
\[
0
= \lim_{\Omega\ni y\to x}{\itunderline{\omega}}(V)(y)
= \lim_{\Omega\ni y\to x}\itunderline{P}\chi_V(y)
= -\lim_{\Omega\ni y\to x}\itoverline{P}(-\chi_V)(y)
= \chi_V(x)
= 1.
\]
Thus $V$ does not contain any regular point.
\ref{V-Cp-V-bdy} $\ensuremath{\Rightarrow} $ \ref{V-alt-def-irr-super}
Suppose that ${C_p}(V)=0$.
Then ${C_p}(X\setminus(\Omega\cup V))={C_p}(X\setminus\Omega)>0$
and $\mu(V)=0$.
Let $x\in V$ and
let $G$ be a connected neighbourhood of $x$ such that
$G\cap{$p\mspace{1mu}$}artial\Omega\subset V$.
Sets of capacity zero cannot separate space,
by Lemma~4.6 in Bj\"orn--Bj\"orn~\cite{BBbook},
and hence $G\setminus{$p\mspace{1mu}$}artial\Omega$ must be connected, i.e.,
$G\subset\overline\Omega$,
from which it follows that $\Omega\cup V$ is open in $X$.
The superharmonic extension is now provided by
Theorem~6.3 in Bj\"orn~\cite{ABremove}
(or Theorem~12.3 in \cite{BBbook}).
\ref{V-alt-def-irr-super} $\ensuremath{\Rightarrow} $ \ref{V-alt-def-irr}
Let $u$ be a bounded {$p\mspace{1mu}$}-harmonic function on $\Omega$.
Then, by assumption, $u$ has a superharmonic extension $U$ to $\Omega\cup V$.
Moreover, as $-u$ is also bounded and {$p\mspace{1mu}$}-harmonic,
there is a superharmonic extension $W$ of $-u$ to $\Omega\cup V$.
Now, as $-W$ is clearly a subharmonic extension of $u$ to $\Omega\cup V$,
Proposition~6.5 in Bj\"orn~\cite{ABremove}
(or Proposition~12.5 in \cite{BBbook}) asserts that
$U=-W$ is {$p\mspace{1mu}$}-harmonic
(it is here that we use that $\mu(V)=0$).
\ref{V-alt-def-irr} $\ensuremath{\Rightarrow} $ \ref{V-semireg}
Let $x_0\in V$.
Since $\Omega\cup V$ is open in $X$, we see that $V\cap{$p\mspace{1mu}$}artial(\Omega\cup V)=\varnothing$,
and hence $x_0\notin{$p\mspace{1mu}$}artial(\Omega\cup V)$.
Let
\[
h(x)
= \biggl(1-\frac{d_{x_0}(x)}{\min\{
\dist(x_0,{$p\mspace{1mu}$}artial(\Omega\cup V)),1\}}\biggr)_\limplus,
\quad x\in{$p\mspace{1mu}$}artial\Omega,
\]
where $d_{x_0}$ is defined by \eqref{eq-dx0}.
Then $\itoverline{P} h$ is bounded and
has a {$p\mspace{1mu}$}-harmonic extension $U$ to $\Omega\cup V$,
and hence the
Kellogg property (Theorem~\ref{thm:kellogg}) implies that
\begin{equation}\label{eq-U=0}
\lim_{\Omega\cup V\ni y\to x}U(y)
= \lim_{\Omega\ni y\to x}\itoverline{P} h(y)
= h(x)
= 0
\quad\text{for q.e. }x\in{$p\mspace{1mu}$}artial(\Omega\cup V)\setminus\{\infty\}.
\end{equation}
Let $G$ be the component of $\Omega\cup V$ containing $x_0$.
Then
\[
{C_p}(X\setminus G)
\geq {C_p}(X\setminus(\Omega\cup V))
>0.
\]
It then follows from
Lemma~4.3 in Bj\"orn--Bj\"orn~\cite{BB}
(or Lemma~4.5 in \cite{BBbook})
that ${C_p}({$p\mspace{1mu}$}artial G)>0$.
In particular, it follows from \eqref{eq-U=0} that $U\not\equiv 1$ in $G$,
and thus, by the strong maximum principle
(see Corollary~6.4 in Kinnunen--Shanmugalingam~\cite{KiSh01}
or \cite[Theorem~8.13]{BBbook}),
that
$U(x_0)<1$.
Therefore
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} h(y)
= U(x_0)
< 1
= h(x_0),
\]
and hence $x_0$ must be irregular.
However, if $f\in C({$p\mspace{1mu}$}artial\Omega)$,
then $\itoverline{P} f$ has a {$p\mspace{1mu}$}-harmonic extension $W$ to $\Omega\cup V$.
Since $W$ is continuous in $\Omega\cup V$,
it follows that
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
= W(x_0),
\]
and hence the limit on the left-hand side always exists.
Thus $x_0$ is semiregular.
\ref{V-semireg} $\ensuremath{\Rightarrow} $ \ref{V-R}
This is trivial.
We now assume that $\Omega$ is bounded or {$p\mspace{1mu}$}-parabolic.
\ref{V-Cp-V-bdy} $\ensuremath{\Rightarrow} $ \ref{V-rem-motiv}
This implication follows from
Theorem~\ref{thm-hansevi2-main}.
\ref{V-rem-motiv} $\ensuremath{\Rightarrow} $ \ref{V-lpharm}
As $-\chi_V\colon{$p\mspace{1mu}$}artial\Omega\to\mathbb{R}$ is upper semicontinuous,
it follows from Proposition~\ref{prop:Perron-semicont},
and \ref{V-rem-motiv},
that
\[
0
\leq {\itunderline{\omega}}(V)
= -\itoverline{P}(-\chi_V)
= -\inf_{\substack{{$p\mspace{1mu}$}hi\in C({$p\mspace{1mu}$}artial\Omega)\\-\chi_V\leq{$p\mspace{1mu}$}hi\leq 0}}\itoverline{P}{$p\mspace{1mu}$}hi \\
= 0,
\]
and hence ${\itunderline{\omega}}(V)=0$.
\end{proof}
\begin{definition}\label{def:semibarrier}
A function $u$ is a \emph{semibarrier} (with respect to $\Omega$)
at $x_0\in{$p\mspace{1mu}$}artial\Omega$ if
\begin{enumerate}
\renewcommand{\textup{(\roman{enumi})}}{\textup{(\roman{enumi})}}
\item\label{semibarrier-i}
$u$ is superharmonic in $\Omega$;
\item\label{semibarrier-ii}
$\liminf_{\Omega\ni y\to x_0}u(y)=0$;
\item\label{semibarrier-iii}
$\liminf_{\Omega\ni y\to x}u(y)>0$ for every $x\in{$p\mspace{1mu}$}artial\Omega\setminus\{x_0\}$.
\end{enumerate}
Moreover, we say that $u$ is a
\emph{weak semibarrier}
(with respect to $\Omega$) at $x_0\in{$p\mspace{1mu}$}artial\Omega$
if $u$ is a positive superharmonic function such that
\ref{semibarrier-ii} holds.
\end{definition}
Now we are ready to characterize
the semiregular points by means
of capacity, {$p\mspace{1mu}$}-harmonic measures, removable singularities,
and semibarriers.
In particular, we show that semiregularity is a local property.
\begin{theorem}\label{thm:rem-irr-char}
Let $x_0\in{$p\mspace{1mu}$}artialOmegaX$\textup{,} $\delta>0$\textup{,} and
$d_{x_0}\colon X^*\to[0,1]$ be defined by \eqref{eq-dx0}.
Then the following statements are equivalent\/\textup{:}
\begin{enumerate}
\item\label{semireg}
The point $x_0$ is semiregular.
\item\label{semireg-local}
The point $x_0$ is semiregular with respect to $G:=\Omega\cap B(x_0,\delta)$.
\item\label{not-reg-one-seq}
There is no sequence $\{y_j\}_{j=1}^\infty$ in $\Omega$ such that
$y_j\to x_0$ as $j\to\infty$ and
\[
\lim_{j\to\infty}\itoverline{P} f(y_j)
= f(x_0)
\quad\text{for all }f\in C({$p\mspace{1mu}$}artial\Omega).
\]
\item\label{not-reg}
The point $x_0$ is neither regular nor strongly irregular.
\item\label{R}
It is true that
$x_0\notin\overline{\{x\in{$p\mspace{1mu}$}artial\Omega:x\text{ is regular}\}}$.
\item\label{Cp-V-bdy}
There is a neighbourhood $V$ of $x_0$ such that ${C_p}(V\cap{$p\mspace{1mu}$}artial\Omega)=0$.
\item\label{Cp-V}
There is a neighbourhood $V$ of $x_0$
such that ${C_p}(V\setminus\Omega)=0$.
\item\label{upharm}
There is a neighbourhood $V$ of $x_0$
such that ${\overline{\omega}}(V\cap{$p\mspace{1mu}$}artial\Omega)\equiv 0$.
\item\label{lpharm}
There is a neighbourhood $V$ of $x_0$
such that ${\itunderline{\omega}}(V\cap{$p\mspace{1mu}$}artial\Omega)\equiv 0$.
\item\label{alt-def-irr}
There is a neighbourhood $V\subset\overline\Omega$ of $x_0$\textup{,}
with ${C_p}(X\setminus(\Omega\cup V))>0$\textup{,}
such that every function that is bounded and
{$p\mspace{1mu}$}-harmonic in $\Omega$ has a {$p\mspace{1mu}$}-harmonic extension to
$\Omega\cup V$.
\item\label{rem-irr}
There is a neighbourhood $V$ of $x_0$
such that every function that is bounded and
{$p\mspace{1mu}$}-harmonic in $\Omega$ has a {$p\mspace{1mu}$}-harmonic extension to
$\Omega\cup V$\textup{,} and moreover $x_0$ is irregular.
\item\label{alt-def-irr-super}
There is a neighbourhood $V$ of $x_0$\textup{,}
with ${C_p}(X\setminus(\Omega\cup V))>0$
and $\mu(V\setminus\nobreak\Omega)=0$\textup{,}
such that every function that is bounded and
superharmonic in $\Omega$ has a superharmonic extension to
$\Omega\cup V$.
\item\label{d-lim}
It is true that
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
> 0.
\]
\item\label{d-liminf}
It is true that
\[
\liminf_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
> 0.
\]
\item\label{weaksemibarrier}
There is no weak semibarrier at $x_0$.
\item\label{semibarrier}
There is no semibarrier at $x_0$.
\item\label{obst-dist-semibarrier}
\setcounter{saveenumi}{\value{enumi}}
The continuous solution of the ${\mathscr{K}}_{d_{x_0},d_{x_0}}$-obstacle problem
is not a semibarrier at $x_0$.
\end{enumerate}
If moreover
$\Omega$ is bounded or {$p\mspace{1mu}$}-parabolic\textup{,}
then also the following statement
is equivalent to the statements above.
\begin{enumerate}
\setcounter{enumi}{\value{saveenumi}}
\item \label{rem-motiv}
There is a neighbourhood $V$ of $x_0$ such that
for every $f\in C({$p\mspace{1mu}$}artial\Omega)$\textup{,}
the Perron solution $P f$ depends only on $f|_{{$p\mspace{1mu}$}artial\Omega\setminus V}$
\textup{(}i.e., if $f,h\in C({$p\mspace{1mu}$}artial\Omega)$ and
$f=h$ on ${$p\mspace{1mu}$}artial\Omega\setminus V$\textup{,}
then $P f\equivP h$\textup{)}.
\end{enumerate}
\end{theorem}
\begin{proof}
\ref{R}
$\eqv$ \ref{Cp-V-bdy}
$\eqv$ \ref{upharm}
$\eqv$ \ref{lpharm}
$\ensuremath{\Rightarrow} $ \ref{semireg}
This follows directly from Theorem~\ref{thm:irr-char-V},
with $V$ therein
corresponding to $V\cap{$p\mspace{1mu}$}artial\Omega$ here.
\ref{semireg} $\ensuremath{\Rightarrow} $ \ref{d-lim}
Since $x_0$ is semiregular, the limit
\[
\alpha
:= \lim_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
\]
exists.
If $\alpha=0$, then $x_0$ must be regular by Theorem~\ref{thm:reg},
which is a contradiction.
Hence $\alpha>0$.
\ref{d-lim} $\ensuremath{\Rightarrow} $ \ref{d-liminf} $\ensuremath{\Rightarrow} $
\ref{not-reg} $\ensuremath{\Rightarrow} $ \ref{not-reg-one-seq}
These implications are trivial.
$\neg$\ref{R} $\ensuremath{\Rightarrow} $ $\neg$\ref{not-reg-one-seq}
Suppose that $x_0\in\overline{\{x\in{$p\mspace{1mu}$}artial\Omega:x\textup{ is regular}\}}$.
For each integer $j\geq 2$,
there exists a regular point $x_j\in B(x_0,1/j)\cap{$p\mspace{1mu}$}artial\Omega$.
Define $f_j\in C({$p\mspace{1mu}$}artial\Omega)$
by letting
\[
f_j(x)
= (jd_{x_0}(x)-1)_\limplus,
\quad j=2,3,\ldots.
\]
Because $x_j$ is regular,
there is $y_j\in B(x_j,1/j)\cap\Omega$ such that
\[
|\itoverline{P} f_j(y_j)|
= |f_j(x_j)-\itoverline{P} f_j(y_j)|
< 1/j.
\]
Hence $y_j\to x_0$ and $\itoverline{P} f_j(y_j)\to 0$ as $j\to\infty$.
Let $f\in C({$p\mspace{1mu}$}artial\Omega)$ and $\alpha:=f(x_0)$.
Let $\varepsilon>0$.
Then we can find an integer $k\geq 2$ such that
$|f-\alpha|\leq\varepsilon$ on $B(x_0,2/k)\cap{$p\mspace{1mu}$}artial\Omega$.
Choose $m$ such that $|f-\alpha|\leq m$.
It follows that $f-\alpha\leq mf_j+\varepsilon$ for every $j\geq k$, and thus
\[
\limsup_{j\to\infty}\itoverline{P} f(y_j)
\leq \limsup_{j\to\infty}\itoverline{P}(mf_j+\alpha+\varepsilon)(y_j)
= m\lim_{j\to\infty}\itoverline{P} f_j(y_j)+\alpha+\varepsilon
= \alpha+\varepsilon.
\]
Letting $\varepsilon\to 0$ shows that
$\limsup_{j\to\infty}\itoverline{P} f(y_j)\leq\alpha$.
Applying this to $\tilde{f}=-f$ yields
$\limsup_{j\to\infty}\itoverline{P} \tilde{f}(y_j)\leq-\alpha$.
It follows that
\[
\liminf_{j\to\infty}\itoverline{P} f(y_j)
\geq \liminf_{j\to\infty}\itunderline{P} f(y_j)
= -\limsup_{j\to\infty}\itoverline{P} \tilde{f}(y_j)
\geq \alpha,
\]
and hence
$\lim_{j\to\infty}\itoverline{P} f(y_j)=f(x_0)$.
\ref{Cp-V-bdy} $\eqv$ \ref{semireg-local}
Observe that \ref{Cp-V-bdy} is equivalent to
the existence of a neighbourhood $U$ of $x_0$
with ${C_p}(U\cap{$p\mspace{1mu}$}artial G)=0$,
which is equivalent to \ref{semireg-local},
by the already proved equivalence \ref{Cp-V-bdy} $\eqv$ \ref{semireg}
applied to $G$ instead of $\Omega$.
\ref{Cp-V-bdy} $\ensuremath{\Rightarrow} $ \ref{Cp-V}
Let $V$ be a neighbourhood of $x_0$ such that ${C_p}(V\cap{$p\mspace{1mu}$}artial\Omega)=0$.
By Theorem~\ref{thm:irr-char-V},
\ref{V-Cp-V-bdy} $\ensuremath{\Rightarrow} $ \ref{V-alt-def-irr-super},
the set $U:=\Omega\cup(V\cap{$p\mspace{1mu}$}artial\Omega)$ is open and
${C_p}(U\setminus\Omega)=0$.
\ref{Cp-V} $\ensuremath{\Rightarrow} $ \ref{Cp-V-bdy}
This is trivial.
\ref{Cp-V} $\eqv$ \ref{alt-def-irr} $\eqv$
\ref{alt-def-irr-super}
In all three statements
it follows directly that $V\subset\overline\Omega$.
Thus their equivalence follows directly from Theorem~\ref{thm:irr-char-V},
with $V$ in Theorem~\ref{thm:irr-char-V} corresponding to
$V\cap{$p\mspace{1mu}$}artial\Omega$ here.
\ref{alt-def-irr} $\ensuremath{\Rightarrow} $ \ref{rem-irr}
We only have to show the last part, i.e.,
that $x_0$ is irregular,
but this follows from the already proved implication
\ref{alt-def-irr} $\ensuremath{\Rightarrow} $ \ref{semireg}.
\ref{rem-irr} $\ensuremath{\Rightarrow} $ \ref{semireg}
Let $f\in C({$p\mspace{1mu}$}artial\Omega)$.
Then $\itoverline{P} f$ has a {$p\mspace{1mu}$}-harmonic extension $U$ to $\Omega\cup V$
for some neighbourhood $V$ of $x_0$,
and hence
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
= U(x_0).
\]
Since $x_0$ is irregular it follows that $x_0$ must be semiregular.
\ref{alt-def-irr-super} $\ensuremath{\Rightarrow} $ \ref{weaksemibarrier}
Let $u$ be a positive superharmonic function on $\Omega$.
Then $\min\{u,1\}$ is superharmonic
by Lemma~9.3 in Bj\"orn--Bj\"orn~\cite{BBbook},
and hence has a superharmonic extension $U$ to $\Omega\cup V$.
As $U$ is lsc-regularized
(see Section~\ref{sec:p-harmonic}) and
$\mu(V\setminus\Omega)=0$,
it follows that $U\geq 0$ in $\Omega\cup V$.
Suppose that $U(x_0)=0$.
Then the strong minimum principle
\cite[Theorem~9.13]{BBbook}
implies that $U\equiv 0$
in the component of $\Omega\cup V$ that contains $x_0$.
But this is in contradiction with $u$ being positive in $\Omega$,
and thus
\[
\liminf_{\Omega\ni y\to x_0}u(x_0)
\geq U(x_0)
> 0.
\]
Thus there is no weak semibarrier at $x_0$.
$\neg$\ref{semibarrier} $\ensuremath{\Rightarrow} $ $\neg$\ref{weaksemibarrier}
Let $u$ be a semibarrier at $x_0$.
If $u>0$ in all of $\Omega$,
then $u$ is a weak semibarrier at $x_0$.
On the other hand,
assume that there exists $x\in\Omega$
such that $u(x)=0$
(in this case $u$ is not a weak semibarrier).
Then the strong minimum principle
\cite[Theorem~9.13]{BBbook}
implies that
$u\equiv 0$ in the component $G\subset\Omega$
that contains $x$,
and hence $x_0$ must be the only boundary point of $G$,
because $u$ is a semibarrier.
As ${C_p}(X\setminus G)\geq{C_p}(X\setminus\Omega)>0$,
Lemma~4.3 in Bj\"orn--Bj\"orn~\cite{BB}
(or Lemma~4.5 in \cite{BBbook})
implies that
${C_p}(\{x_0\})={C_p}({$p\mspace{1mu}$}artial G)>0$.
By the Kellogg property (Theorem~\ref{thm:kellogg}),
$x_0$ is regular,
and hence
Theorem~\ref{thm:reg}
asserts that there is a positive
barrier $v$ at $x_0$,
and thus $v$ is a weak semibarrier.
\ref{semibarrier} $\ensuremath{\Rightarrow} $
\ref{obst-dist-semibarrier}
This is trivial.
$\neg$\ref{R} $\ensuremath{\Rightarrow} $ $\neg$\ref{obst-dist-semibarrier}
Let $u$ be the continuous solution
of the ${\mathscr{K}}_{d_{x_0},d_{x_0}}$-obstacle problem,
which is superharmonic
(see Section~\ref{sec:p-harmonic}).
Moreover, it is clear that
\[
\liminf_{\Omega\ni y\to x}u(y)
> 0
\quad\text{whenever }x\in{$p\mspace{1mu}$}artial\Omega\setminus\{x_0\},
\]
and thus $u$ satisfies \ref{semibarrier-i} and \ref{semibarrier-iii}
in Definition~\ref{def:semibarrier}.
Let $\{x_j\}_{j=1}^\infty$ be a sequence of regular boundary points
such that $d_{x_0}(x_j)<1/j$.
By Theorem~\ref{thm:reg},
$\lim_{\Omega\ni y\to x_j}u(y)=d_{x_0}(x_j)$.
Hence we can find $y_j\in B(x_j,1/j)\cap\Omega$ so that
$u(y_j)<2/j$.
Thus $u$ satisfies
\ref{semibarrier-ii} in Definition~\ref{def:semibarrier} as
\[
0
\leq \liminf_{\Omega\ni y\to x_0}u(y)
\leq \liminf_{j\to\infty}u(y_j)
= 0.
\]
We now assume that $\Omega$ is bounded or {$p\mspace{1mu}$}-parabolic.
\ref{R} $\eqv$ \ref{rem-motiv}
This follows directly from Theorem~\ref{thm:irr-char-V},
with $V$ therein
corresponding to $V\cap{$p\mspace{1mu}$}artial\Omega$ here.
\end{proof}
We conclude our description of boundary points with some
characterizations
of strongly irregular points.
As for regular and semiregular points,
strong irregularity is a local property.
\begin{theorem}\label{thm:ess-irr-char}
Let $x_0\in{$p\mspace{1mu}$}artialOmegaX$\textup{,} $\delta>0$\textup{,} and
$d_{x_0}\colon X^*\to[0,1]$ be defined by \eqref{eq-dx0}.
Then the following are equivalent\/\textup{:}
\begin{enumerate}
\item\label{ess-irr}
The point $x_0$ is strongly irregular.
\item\label{ess-local}
The point $x_0$ is strongly irregular with respect to
$G:=\Omega\cap B(x_0,\delta)$.
\item\label{ess-one-seq}
The point $x_0$ is irregular and there exists a sequence
$\{y_j\}_{j=1}^\infty$ in $\Omega$ such that
$y_j\to x_0$ as $j\to\infty$\textup{,}
and
\[
\lim_{j\to\infty}\itoverline{P} f(y_j)
= f(x_0)
\quad\text{for all }f\in C({$p\mspace{1mu}$}artial\Omega).
\]
\item\label{ess-R}
It is true that $x_0\in\itoverline{R}\setminus R$\textup{,}
where $R:=\{x\in{$p\mspace{1mu}$}artial\Omega:x\text{ is regular}\}$.
\item\label{ess-d-liminf}
It is true that
\[
\liminf_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
= 0
< \limsup_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y).
\]
\item\label{ess-f-nolim}
There exists $f\in C({$p\mspace{1mu}$}artial\Omega)$ such that
\[
\lim_{\Omega\ni y\to x_0}\itoverline{P} f(y)
\]
does not exist.
\item\label{obst-dist-irr}
The continuous solution $u$ of the ${\mathscr{K}}_{d_{x_0},d_{x_0}}$-obstacle problem satisfies
\[
\liminf_{\Omega\ni y\to x_0}u(y)
= 0
< \limsup_{\Omega\ni y\to x_0}u(y).
\]
\item\label{barrier-irr}
There is a semibarrier \textup{(}or equivalently there is
a weak semibarrier\/\textup{)} but no barrier at $x_0$.
\end{enumerate}
\end{theorem}
The trichotomy property (Theorem~\ref{thm:trichotomy}) shows that
a boundary point
is either regular, semiregular, or strongly
irregular.
We will use this in the following proof.
\begin{proof}
\ref{ess-irr} $\eqv$ \ref{ess-local}
By Theorems~\ref{thm:reg} and~\ref{thm:rem-irr-char},
regularity and semiregularity are local properties,
and hence this must be true also for strong irregularity.
\ref{ess-irr} $\eqv$ \ref{ess-one-seq} $\eqv$ \ref{ess-R}
This follows from
Theorem~\ref{thm:rem-irr-char}
\ref{semireg} $\eqv$ \ref{not-reg-one-seq} $\eqv$ \ref{R}.
\ref{ess-irr} $\ensuremath{\Rightarrow} $ \ref{ess-d-liminf}
Since $x_0$ is strongly irregular and $\itoverline{P} d_{x_0}$ is nonnegative,
it follows that
\[
\liminf_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
= 0
\leq \limsup_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y).
\]
If $\limsup_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)=0$,
then $x_0$ must be regular by Theorem~\ref{thm:reg},
which is a contradiction.
Thus
\[
\limsup_{\Omega\ni y\to x_0}\itoverline{P} d_{x_0}(y)
> 0.
\]
\ref{ess-d-liminf} $\ensuremath{\Rightarrow} $ \ref{ess-f-nolim}
This is trivial.
\ref{ess-f-nolim} $\ensuremath{\Rightarrow} $ \ref{ess-irr}
By definition, $x_0$ is neither regular nor semiregular,
and hence must be strongly irregular.
\ref{ess-irr} $\eqv$ \ref{obst-dist-irr}
Theorem~\ref{thm:reg}
shows that $x_0$ is regular if and only if
$\lim_{\Omega\ni y\to x_0}u(y)=0$.
On the other hand,
Theorem~\ref{thm:rem-irr-char} implies that $x_0$ is semiregular
if and only if
$\liminf_{\Omega\ni y\to x_0}u(y)>0$.
The equivalence follows by combining these two facts.
\ref{ess-irr} $\eqv$ \ref{barrier-irr}
By Theorem~\ref{thm:rem-irr-char},
$x_0$ is semiregular if and only if there is no (weak)
semibarrier at $x_0$.
On the other hand,
by Theorem~\ref{thm:reg},
there is a barrier at $x_0$ if and only if $x_0$ is regular.
Combining these two facts gives the equivalence.
\end{proof}
\end{document} |
\begin{document}
\title{An Important Corollary for the Fast Solution of Dynamic Maximal Clique Enumeration Problems}
\begin{abstract}
In this paper we modify an algorithm for updating a maximal clique enumeration after an edge insertion to provide an algorithm that runs in linear time with respect to the number of cliques containing one of the edge's endpoints, whereas existing algorithms take quadratic time.
\end{abstract}
\section{Introduction}
\subsection{Terminology and Notation}
For a graph $G(V,E)$, a \textit{clique} $C$ in $G$ is a subset of vertices that are all adjacent to each other in $G$, i.e., $C$ is a complete subgraph of $G$. A clique $C$ in $G$ is \textit{maximal} if no other clique in $G$ contains $C$. An edge with endpoints $u, v \in V$ is denoted $uv$. For convenience, when we refer to the \textit{neighborhood} of a vertex $u \in V$, we actually mean the closed neighborhood, i.e., the set $\{v : v = u \text{ or } uv \in E \}$, and denote this $N(u)$.
\subsection{Problem Statement}
The solution to a Maximal Clique Enumeration (MCE) Problem for a graph $G$ is a list of all maximal cliques in $G$. In dynamic MCE Problems, our task is to update the list of maximal cliques after inserting or deleting an edge. In the case of inserting an edge, our task is to list all maximal cliques in $G(V, E \cup \{uv\})$ given the list of all maximal cliques in $G(V,E)$.
\subsection{Motivation}
The MCE problem for a static graph has many important applications, such as in solving graph coloring problems: since every pair of vertices in a clique must be assigned different colors, a list of maximal cliques allows us to prune the search tree immensely. Graph coloring, in turn, is used in many scheduling problems. For example, given a list of time intervals during which various flights will need to use a gate, we can construct a graph whose vertices represent flights and whose edges connect vertices whose corresponding flights require a gate at the same time. The chromatic number of this graph is the number of gates required to service all the flights. However, unexpected flight delays induce changes to the graph throughout the day, requiring the solution of another slightly different graph coloring problem and thus another slightly different maximal clique enumeration problem.
The dynamic MCE problem also finds application in computational topolgoy. Given a point cloud $V$ in a metric space and a radius $\epsilon$, we can construct a graph analogous to the Vietoris-Rips Complex $R_{\epsilon}(V)$, where vertices represent points in the point cloud and where two vertices are connected by an edge if the corresponding points in the point cloud lie within distance $\epsilon$ of each other. For $\epsilon = 1$ and $V \subset \mathbb{R}^2$, the graph is a unit disk graph. In computational topology, we can compute the homology for a single graph by enumerating its maximal cliques and using the algorithm proposed by \cite{zomorodian}. Characterizing the topology of the point cloud by the homology of the VR Complex for a single $\epsilon$ is a bad idea, since noise can produce false topological features. What we really want to know is which topological features persist over a wide range of $\epsilon$. Calculting and sorting all pairwise distances of points in $V$ induces a sequence of graphs $\{G_n\}$ where the edges of the $n$th graph are the first $n$ edges of the sorted list of distances. Listing all maximal cliques for each of these graphs is a dynamic MCE Problem, the solution of which is used to construct a \textit{persistence barcode} that illustrates the emergence and disappearance of topological features of the VR Complex as $\epsilon$ varies.
\section{Existing Method}
For completeness, we'll begin by proving the correctness of the existing method for updating the list of maximal cliques after an edge insertion.
\begin{lemma}
\label{Symmetry}
For a graph $G(V,E)$, if $C$ is a clique in $G(V,E \cup \{uv\})$, then $C \setminus u$ (and, by symmetry, $C \setminus v$) is a clique in $G(V,E)$.
\end{lemma}
\begin{proof}
$C \setminus u \subset C$, so $C \setminus u$ is a clique in $G(V, E \cup \{uv\})$, i.e., $s,t \in C \setminus u \implies st \in E \cup \{uv\}$. Since $u \not \in C \setminus u$, we must have $st \in E$, i.e., $C$ is a clique in $G(V,E)$.
\end {proof}
The following lemma says that if $C$ is a maximal clique in $G(V,E)$ but not in $G(V, E \cup \{uv\})$, then $C \subset N(u) \cup N(v)$.
\begin{lemma}
\label{Disappear}
For a graph $G(V,E)$ and $C \subset V$, if $u,v \not \in C$, then $C$ is a maximal clique in $G(V,E)$ $\implies$ $C$ is a maximal clique in $G(V,E\cup \{uv\})$.
\end{lemma}
\begin{proof}
Since $C$ is a clique in $G(V,E)$, $s,t \in C \implies st \in E \implies st \in E \cup \{uv\}$, so $C$ is a clique in $G(V,E \cup \{uv\})$. Let $B \subset V$ be a clique in $G(V, E \cup \{uv\})$ such that $C \subset B$. Since $B$ is a clique in $G(V, E\cup \{uv\})$, $s,t \in B \implies st \in E \cup \{uv\}$. However, $u,v \not \in C \implies u \not \in B$ or $v \not \in B$, since $u,v \in B \implies B \setminus u \supset C$ strictly and $B \setminus u$ is a clique in $G(V,E)$, which contradicts the maximality of $C$ in $G(V,E)$. So $s,t \in B \implies st \in E$, i.e., $B$ is a clique in $G(V,E)$. Since $C$ is maximal in $G(V,E)$, we must have $B = C$. Since $B$ was arbitrary, $C$ is maximal in $G(V, E \cup \{uv\})$.
\end{proof}
The following lemma says that if $C$ is a maximal clique in $G(V, E \cup \{uv\})$ but not in $G(V, E)$, then $C \subset N(u) \cap N(v)$.
\begin{lemma}
\label{Emerge}
For a graph $G(V,E)$ and $C \subset V$, if $u \not \in C$ or $v \not \in C$, then $C$ is a maximal clique in $G(V,E \cup \{uv\})$ $\implies$ $C$ is a maximal clique in $G(V,E)$.
\end{lemma}
\begin{proof}
Without loss of generality, $u \not \in C$. Since $C$ is a clique in $G(V, E \cup \{uv\})$, $s,t \in C \implies st \in E \cup \{uv\}$. Since $u \not \in C$, we must have $st \in E$, i.e., $C$ is a clique in $G(V,E)$. Let $B \subset V$ be a clique in $G(V,E)$ such that $C \subset B$. Then $s,t \in B \implies st \in E \implies st \in E \cup \{uv\}$, so B is a clique in $G(V, E \cup \{uv\})$. Since $C$ is maximal in $G(V, E \cup \{uv\})$, we must have $B = C$. Since $B$ was arbitrary, $C$ is maximal in $G(V,E)$.
\end{proof}
The following is the main theorem that justifies the existing method. It's also the source of the bottleneck in the existing method and the target of our improvement.
\begin{theorem}
\label{ExistingMethod}
For a graph $G(V,E)$ and $C \subset V$, if $u,v \in C$ and $C$ is a maximal clique in $G(V, E \cup \{uv\})$, then there exist maximal cliques $C_u, C_v \subset V$ in $G(V,E)$ such that $u \in C_u$, $v \in C_v$, and $C$ = $(C_u \cap C_v) \cup \{u,v\}$.
\end{theorem}
\begin{proof}
$C \setminus v$ and $C \setminus u$ are cliques in $G(V,E)$, and thus each lie within some maximal clique in $G(V,E)$, say $C_u$ and $C_v$, respectively. $u \in C \setminus v \implies u \in C_u$, and $v \in C \setminus u \implies v \in C_v$. Since $C \setminus v \subset C_u$ and $C \setminus u \subset C_v$, we have $(C \setminus v) \cap (C \setminus u) \subset C_u \cap C_v$. So $C = ((C \setminus v) \cap (C \setminus u)) \cup \{u, v\} \subset (C_u \cap C_v) \cup \{u,v\}$. However, this inclusion cannot be strict, since $(C_u \cap C_v) \cup \{u,v\}$ is a clique in $G(V, E \cup \{u,v\})$ and $C$ is maximal. Thus $C = (C_u \cap C_v) \cup \{u,v\}$.
\end{proof}
The previous theorems are enough to justify the existing method. Lemma ~\ref{Emerge} tells us that new maximal cliques emerge only in $N(u) \cap N(v)$, and Lemma ~\ref{Disappear} says that cliques lose their maximality only if they lie in $N(u) \cup N(v)$. Together, they say that everything outside $N(u) \cup N(v)$ remains the same. Theorem ~\ref{ExistingMethod} gives us a method for finding the new maximal cliques: for each maximal clique $C_u$ in $G$ containing $u$ and each maximal clique $C_v$ in G containing $v$, generate $(C_u \cap C_v) \cup \{u,v\}$ and mark it as a candidate for maximality. Then we test each candidate for maximality and add the candidate to the enumeration only if it passes the test. Theorem 5.3 in \cite{hendrix} gives a method for testing a candidate's maximality in $O(|N(u) \cap N(v)|^2)$ time. If there's $|\{C_u\}|$ maximal cliques containing $u$ and $|\{C_v\}|$ maximal cliques containing $v$, then checking each candidate against the others takes $O(|\{C_u\}| |\{C_v\}| |N(u) \cap N(v)|^2)$ time.
\section{Main Result}
The following Corollary justifies our improvement to the existing method.
\begin{corollary}
\label{ProposedMethod}
For a graph $G(V,E)$ and $C \subset V$, if $u,v \in C$ and $C$ is a maximal clique in $G(V,E \cup \{u,v\})$, then there exist maximal cliques $C_u, C_v \subset V$ in $G(V,E)$ such that $u \in C_u$, $v \in C_v$, and $C = (C_u \cap N(v)) \cup \{u,v\} = (N(u) \cap C_v) \cup \{u,v\}$.
\end{corollary}
\begin{proof}
Let $C_u, C_v$ be the maximal cliques in $G(V,E)$ guaranteed by Theorem ~\ref{ExistingMethod}. $C = (C_u \cap C_v) \cup \{u,v\} \subset (C_u \cap N(v)) \cup \{u,v\}, (N(u) \cap C_v) \cup \{u,v\}$. Since every subset of a clique is a clique, $(C_u \cap N(v)) \cup \{u,v\}$ and $(N(u) \cap C_v) \cup \{u,v\}$ are cliques in $G(V, E \cup \{u,v\})$. Since $C$ is a maximal clique in $G(V, E \cup \{u,v\})$, the inclusion must actually be equality.
\end{proof}
Now that we no longer need to generate pairwise intersections, our list of candidate cliques is $|\{C_u\}|$ (or $|\{C_v\}|$ if we generate candidates from $\{C_v\}$) and checking the candidates for maximality takes $O(|\{C_u\}| |N(u) \cap N(v)|^2)$ time. This is significant since $|\{C_v\}|$ can grow exponentially with the number of vertices in the graph $|V|$. Note that generating candidates from $\{C_u\}$ and $\{C_v\}$ is redundant, since in the proof $(C_u \cap N(v)) \cup \{u,v\} = (N(u) \cap C_v) \cup \{u,v\}$. This means we now only require either $\{C_u\}$ or $\{C_v\}$. If we have both lists, we could save time by choosing to generate candidates using the shorter list only.
\section{Maximal k-Cliques}
A \textit{maximal $k$-clique} $C \subset V$ is a subset of vertices that is either a clique in $G$ of size $k$ or a maximal clique in $G$ of size $< k$. The number of $k$-cliques in a graph grows in polynomial time: there are at most $\sum_{i=1}^k \binom{|V|}{i} \in O(|V|^k)$ maximal $k$-cliques in a graph. The task of the maximal $k$-clique enumeration problem is to enumerate all maximal $k$-cliques in a graph, and is much easier than the classic MCE problem. Similarly, the task of the dynamic maximal $k$-clique enumeration problem is to update the maximal $k$-clique enumeration after inserting or deleting an edge. Researchers are sometimes willing to sacrifice information about larger cliques in exchange for better runtimes. Returning to computational topology, surface reconstruction from a point cloud only requires knowledge about 3-cliques. Fortunately, Corollary ~\ref{ProposedMethod} can also be used to accelerate the dynamic maximal $k$-clique enumeration problem in the case of inserting an edge.
The proofs of Lemma ~\ref{Symmetry}, Lemma ~\ref{Disappear}, and Lemma ~\ref{Emerge} are identical to those used to prove equivalent statements for maximal $k$-cliques. However, adjusting Theorem ~\ref{ExistingMethod} for $k$-cliques requires some attention to detail:
\begin{theorem}
\label{ExistingKClique}
For a graph $G(V,E)$ and $C \subset V$, if $u,v \in C$ and $C$ is a maximal k-clique in $G(V, E \cup \{uv\})$, then either $|C| < k$ and there exist maximal k-cliques $C_u, C_v \subset V$ in $G(V,E)$ such that $u \in C_u$, $v \in C_v$, and $C$ = $(C_u \cap C_v) \cup \{u,v\}$, or $|C| = k$ and $C \subset (C_u \cap C_v) \cup \{u,v\}$ and $|(C_u \cap C_v) \cup \{u,v\}| \leq k+1$.
\end{theorem}
\begin{proof}
$C \setminus v$ and $C \setminus u$ are cliques in $G(V,E)$ of size $\leq k$, and thus each lie within some maximal $k$-clique in $G(V,E)$, say $C_u$ and $C_v$, respectively. $u \in C \setminus v \implies u \in C_u$, and $v \in C \setminus u \implies v \in C_v$. Since $C \setminus v \subset C_u$ and $C \setminus u \subset C_v$, we have $(C \setminus v) \cap (C \setminus u) \subset C_u \cap C_v$. So $C = ((C \setminus v) \cap (C \setminus u)) \cup \{u, v\} \subset (C_u \cap C_v) \cup \{u,v\}$. If $|C| < k$, then $C$ is maximal in $G$ and this inclusion cannot be strict, since $(C_u \cap C_v) \cup \{u,v\}$ is a clique in $G(V, E \cup \{u,v\})$ and $C$ is maximal; in this case, $C = (C_u \cap C_v) \cup \{u,v\}$. Else, $|C| = k$. $|C_u|, |C_v| \leq k$, so $|C_u \cap C_v| \leq k$. If $C_u = C_v$, as may be the case when $uv \in E$, then $(C_u \cap C_v) \cup \{u,v\} = C_u \cap C_v$ so $|(C_u \cap C_v) \cup \{u,v\}| \leq k$. If $C_u \neq C_v$, then $|C_u \cap C_v| \leq k-1$ so $|(C_u \cap C_v) \cup \{u,v\}| \leq |(C_u \cap C_v)| + |\{u,v\}| = k+1$.
\end{proof}
So updating the list of maximal $k$-cliques is similar to updating the list of maximal cliques, except that now in the case where $k \leq |(C_u \cap C_v) \cup \{u,v\}|$, we don't have to run any maximality test; every size-$k$ subset of the candidate becomes a maximal $k$-clique.
Corollary ~\ref{ProposedMethod} lets us generate a linear number of candidates for $k$-cliques, too.
\begin{corollary}
\label{ProposedKClique}
For a graph $G(V,E)$ and $C \subset V$, if $u,v \in C$ and $C$ is a maximal $k$-clique in $G(V,E \cup \{u,v\})$, then there exist maximal $k$-cliques $C_u, C_v \subset V$ in $G(V,E)$ such that $u \in C_u$, $v \in C_v$, and $C = (C_u \cap N(v)) \cup \{u,v\} = (N(u) \cap C_v) \cup \{u,v\}$ or $|C| = k$ and $C \subset (C_u \cap N(v)) \cup \{u,v\}, (N(u) \cap C_v) \cup \{u,v\}$.
\end{corollary}
\begin{proof}
Let $C_u, C_v$ be the maximal $k$-cliques in $G(V,E)$ guaranteed by Theorem ~\ref{ExistingKClique}. $C \subset (C_u \cap C_v) \cup \{u,v\} \subset (C_u \cap N(v)) \cup \{u,v\}, (N(u) \cap C_v) \cup \{u,v\}$. Since every subset of a clique is a clique, $(C_u \cap N(v)) \cup \{u,v\}$ and $(N(u) \cap C_v) \cup \{u,v\}$ are cliques in $G(V, E \cup \{u,v\})$. If $|C| < k$, then $C$ is a maximal clique in $G(V, E \cup \{u,v\})$, so the inclusion must actually be equality.
\end{proof}
We follow the same procedure for testing candidates as before: if $|(C_u \cap N(v)) \cup \{u,v\}| < k$, use the maximality test from \cite{hendrix}; else, every size-$k$ subset becomes a maximal $k$-clique. As in the application of Corollary ~\ref{ProposedMethod} to the traditional MCE setting, we only need either $\{C_u\}$ or $\{C_v\}$ to correctly generate the list of $k$-cliques for the updated graph since $C \subset (C_u \cap N(v)) \cup \{u,v\}$ and $C \subset (N(u) \cap C_v) \cup \{u,v\}$.
\section{Parallelism}
Lemmas ~\ref{Disappear} and ~\ref{Emerge} tell us that the list of maximal cliques doesn't change outside of $N(u) \cup N(v)$. If $E_n \subset E_{n+1} = E_n \cup \{u_1 v_1\} \subset E_{n+2} = E_{n+1} \cup \{u_2 v_2\}$, and $(N(u_2) \cup N(v_2)) \cap \{u_1 v_1 \} = \emptyset$, then inserting $u_1 v_1$ affects neither $\{C_{u_2}\}$ nor $\{C_{v_2}\}$, so the updates from $G(V,E_n)$ to $G(V,E_{n+1})$ and from $G(V,E_{n+1})$ to $G(V,E_{n+2})$ are independent and can run in parallel, even with the existing method. Since we only need either $\{C_u\}$ or $\{C_v\}$ to use the proposed method, we have more opportunities for parallelism: if only $N(u_2) \cap \{u_1 v_1\} = \emptyset$, then inserting $u_1 v_1$ doesn't affect $\{C_{u_2}\}$, so the updates can still run in parallel if we use the proposed method.
\end{document} |
\begin{document}
\title[Lightlike surfaces]{Lightlike surfaces with planar normal sections in
Minkowski $3-$ space}
\author{Feyza Esra Erdo\u{g}an}
\address{Faculty of Arts and Science, Department of Mathematics, Ad\i yaman
University, 02040 Ad\i yaman, TURKEY}
\email{ferdogan@adiyaman.edu.tr}
\author{Bayram \c{S}ah\.{I}n$^*$}
\address{Department of Mathematics, \.{I}n\"{o}n\"{u} University, 44280\ \
Malatya, TURKEY}
\email{bayram.sahin@inonu.edu.tr}
\author{R\i fat G\"{u}ne\c{s}}
\address{Department of Mathematics, \.{I}n\"{o}n\"{u} University, 44280\ \
Malatya, TURKEY}
\email{rifat.gunes@inonu.edu.tr}
\begin{abstract}
In this paper we study lightlike surfaces of Minkowski $3-$ space such that
they have degenerate or non-degenerate planar normal sections. We first show
that every lightlike surface of Minkowski $3-$ space has degenerate planar
normal sections. Then we study lightlike surfaces with non-degenerate planar
normal sections and obtain a characterization for such lightlike surfaces
\end{abstract}
\maketitle
\section{Introduction}
Surfaces with planar normal sections\textbf{\ }in Euclidean spaces were
first studied by Bang-Yen Chen \cite{Bang-yen Chen}. Later such surfaces or
submanifolds have been studied by many authors \cite{Bang-yen Chen}, \cite
{Young Ho Kim1}, \cite{Young Ho Kim2}, \cite{Shi-jie Li},\cite{Young Ho Kim3}
. In \cite{Young Ho Kim2}, Y. H. Kim initiated the study of semi-Riemannian
setting of such surfaces. But as far as we know, lightlike surfaces with
planar normal sections have not been studied so far. Therefore, in this
paper we study lightlike surfaces with planar normal sections of $\mathbb{R}
^3_1$. \newline
We first define the notion of surfaces with planar normal sections as
follows. Let $M$ be a lightlike surface of $\mathbb{R}^3_1$. For a point $p$
in $M$ and a lightlike vector $\xi $ which spans the radical distribution of
a lightlike surface, the vector $\xi $ and transversal space $tr(TM)$ to $M$
at $p$ determine a 2- dimensional subspace $E(p,\xi )$ in $\mathbb{R}^3_1$
through $p$. The intersection of $M$ and $E(p,\xi )$ gives a lightlike curve
$\gamma $ in a neighborhood of $p,$ which is called the normal section of $M$
at the point $p$ in the direction of $\xi $.\newline
For non-degenerate planar normal sections, we present the following notion.
Let $w$ be a spacelike vector tangent to $M$ at $p$ which spans the chosen
screen distribution of $M$. Then the vector $w$ and transversal space $
tr(TM) $ to $M$ at $p$ determine a 2- dimensional subspace $E(p,w)$ in $
\mathbb{R}^3_1$ through $p$. The intersection of $M$ and $E(p,w)$ gives a
spacelike curve $\gamma $ in a neighborhood of $p$ which is called the
normal section of $M$ at $p$ in the direction of $w$. According to both
identifications above, $M$ is said to have degenerate pointwise and
spacelike pointwise planar normal sections, respectively if each normal
section $\gamma $ at $p$ satisfies $\gamma ^{\prime }\wedge \gamma ^{\prime
\prime }\wedge \gamma ^{\prime \prime \prime }=0$ at for each $p$ in $M$.
For a lightlike surface with degenerate planar normal sections, in fact, we
show that every lightlike surface of Minkowski $3-$ space has degenerate
planar normal sections. Then for a lightlike surface with non-degenerate
planar normal sections, we obtain two characterizations.
\noindent \line(1,0){100}\newline
{\footnotesize \textit{\small * Corresponding author}}
We first show that a lightlike surface $M$ in $\mathbb{R}^3_1$ is a
lightlike surface with non-degenerate planar sections if and only if $M$ is
either screen conformal and totally umbilical or $M$ is totally geodesic. We
also obtain a characterization for non-umbilical screen conformal lightlike
surface with non-degenerate planar normal sections.
\section{Preliminaries}
Let $(\bar{M},\bar{g})$ be an $(m+2)$-dimensional semi-Riemannian manifold
with the indefinite metric $\bar{g}$ of index $q\in \{1,...,m+1\}$ and $M$
be a hypersurface of $\bar{M}$. We denote the tangent space at $x \in M$ by $
T_x M$. Then
\begin{equation*}
T_x M^{\perp}=\{V_x \in T_x \bar{M}|\bar{g}_x(V_x,W_x)=0, \forall W_x \in
T_x M\}
\end{equation*}
and
\begin{equation*}
RadT_x M=T_x M \cap T_x M^{\perp}.
\end{equation*}
Then, $M$ is called a lightlike hypersurface of $\bar{M}$ if $RadT_x M \neq
\{0\}$ for any $x \in M$. Thus $TM^{\perp}=\bigcap_{x\in M} T_x M^{\perp}$
becomes a one- dimensional distribution $Rad TM$ on $M$. Then there exists a
vector field $\xi \neq 0$ on $M$ such that
\begin{equation*}
g\left( \xi ,X\right) =0,\newline
\ \ \forall X\in \Gamma \left( TM\right),
\end{equation*}
where $g$ is the induced degenerate metric tensor on $M$. We denote $F(M)$
the algebra of differential functions on $M$ and by $\Gamma(E)$ the $F(M)$-
module of differentiable sections of a vector bundle $E$ over $M$.\newline
A complementary vector bundle $S\left( TM\right) $ of $TM^{\perp }=RadTM$ in
$TM$ i,e.,
\begin{equation}
TM=RadTM\oplus _{orth}S(TM) \label{1.1}
\end{equation}
is called a screen distribution on $M$. It follows from the equation above
that $S(TM)$ is a non-degenerate distribution. Moreover, since we assume
that $M$ is paracompact, there always exists a screen $S(TM)$. Thus, along $M
$ we have the decomposition
\begin{equation}
T\bar{M}_{|M} = S(TM) \oplus_{orth} S(TM)^{\perp}, \quad S(TM) \cap
S(TM)^{\perp} \neq \{0\}, \label{1.2}
\end{equation}
that is, $S(TM)^{\perp }$ is the orthogonal complement to $S(TM)$ in $T\bar{M
}\mid _{M}$. Note that $S(TM)^{\perp }$ is also a non-degenerate vector
bundle of rank 2. However, it includes $TM^{\perp }=RadTM$ as its sub-bundle.
\newline
Let $\left( M,g,S(TM)\right) $ be a lightlike hypersurface of a
semi-Riemannian manifold $\left( \bar{M},\bar{g}\right) $. Then there exists
a unique vector bundle $tr(TM)$ of rank 1 over $M$, such that for any
non-zero section $\xi $ of $TM^{\perp }$ on a coordinate neighborhood $
U\subset M$, there exists a unique section $N$ of $tr(TM)$ on $U$
satisfying: $TM^{\perp }$ in $S(TM)^{\perp }$ and take $V\in \Gamma \left(
F\mid _{U}\right) ,V\neq 0$. Then $\bar{g}\left( \xi ,V\right) \neq 0$ on $U$
, otherwise $S(TM)^{\perp }$ would be degenerate at a point of $U$ \cite
{Krishan L. Duggal and Bayram Sahin}. Define a vector field
\begin{equation*}
N=\frac{1}{\bar{g}\left( V,\xi \right) }\left\{ V-\frac{\bar{g}\left(
V,V\right) }{2\bar{g}\left( V,\xi \right) }\xi \right\}
\end{equation*}
on $U$ where $V\in $ $\Gamma \left( F\mid _{U}\right) $ such that $\ \bar{g}
\left( \xi ,V\right) \neq 0$. Then we have
\begin{equation}
\bar{g}\left( N,\xi \right) =1,\ \bar{g}\left( N,N\right) =0,\ \bar{g}\left(
N,W\right) =0,\ \forall W\in \Gamma \left( S(TM)\mid _{U}\right) \label{1.3}
\end{equation}
Moreover, from (\ref{1.1}) and (\ref{1.2}) we have the following
decompositions:\
\begin{equation}
T\bar{M}\mid _{M}=S(TM)\oplus _{orth}\left( TM^{\perp }\oplus tr\left(
TM\right) \right) =TM\oplus tr\left( TM\right) \label{1.4}
\end{equation}
Locally, suppose $\left\{ \xi ,N\right\} $ is a pair of sections on $
U\subset M$ satisfying (\ref{1.3}). Define a symmetric $\digamma \left(
U\right) $-bilinear from $B$ and a 1-form $\tau $ on $U$ . Hence on $U$, for
$X,Y\in \Gamma \left( TM\mid _{U}\right) $
\begin{eqnarray}
\bar{\nabla}_{X}Y &=&\nabla _{X}Y+B\left( X,Y\right) N \label{1.5} \\
\bar{\nabla}_{X}N &=&-A_{N}X+\tau \left( X\right) N, \label{1.6}
\end{eqnarray}
equations (\ref{1.5}) and (\ref{1.6}) are local Gauss and Weingarten
formulae. Since $\bar{\nabla}$ is a metric connection on $\bar{M},$ it is
easy to see that
\begin{equation}
B\left( X,\xi \right) =0,\forall X\in \Gamma \left( TM\mid _{U}\right) .
\label{1.7}
\end{equation}
Consequently, the second fundamental form of $M$ is degenerate \cite{Krishan
L. Duggal and Bayram Sahin}. Define a local 1-from $\eta $ by
\begin{equation}
\eta \left( X\right) =\bar{g}\left( X,N\right) ,\forall \in \Gamma (TM\mid
_{U}). \label{1.8}
\end{equation}
Let $P$ denote the projection morphism of $\Gamma \left( TM\right) $ on $
\Gamma \left( S(TM)\right) $ with respect to the decomposition (\ref{1.1}).
We obtain
\begin{eqnarray}
\nabla_{X}PY &=&\nabla_{X}^{\ast }PY+C\left( X,PY\right) \xi \label{1.9} \\
\nabla_{X}\xi &=&-A_{\xi }^{\ast }X+\varepsilon \left( X\right) \xi \notag
\\
&=&-A_{\xi }^{\ast }X-\tau \left( X\right) \xi \label{1.10}
\end{eqnarray}
where $\nabla _{X}^{\ast }Y$ and $A_{\xi }^{\ast }X$ belong to $\Gamma
\left( S\left( TM\right) \right) ,\nabla $ and $\nabla ^{\ast t}$ are linear
connections on $\Gamma \left( S(TM)\right) $ and $TM^{\perp }$ respectively,
$h^{\ast }$is a $\Gamma \left( TM^{\perp }\right) $-valued $\digamma \left(
M\right) $-bilinear form on $\Gamma \left( TM\right) \times $ $\Gamma \left(
S(TM)\right) $ and $A_{\xi }^{\ast }$ is $\Gamma \left( S(TM)\right) $
-valued $\digamma \left( M\right) $-linear operator on $\Gamma \left(
TM\right) $. We called them the screen fundamental form and screen shape
operator of $S\left( TM\right) ,$ respectively. Define
\begin{eqnarray}
C\left( X,PY\right) &=&\bar{g}\left( h^{\ast }(X,PY\right) ,N)\
\label{1.11} \\
\varepsilon \left( X\right) &=&\bar{g}\left( \nabla _{X}^{\ast t}\xi
,N\right) ,\forall X,Y\in \Gamma \left( TM\right) ,\ \label{1.12}
\end{eqnarray}
one can show that $\varepsilon \left( X\right) =-\tau \left( X\right) $.
Here $C\left( X,PY\right) $ is called the local screen fundamental form of $
S(TM)$. Precisely, the two local second fundamental forms of $M$ and $S(TM)$
are related to their shape operators by
\begin{eqnarray}
B\left( X,Y\right) &=&\bar{g}\left( Y,A_{\xi }^{\ast }X\right) ,\
\label{1.13} \\
A_{\xi }^{\ast }\xi &=&0\ , \label{1.14} \\
\bar{g}\left( A_{\xi }^{\ast }PY,N\right) &=&0\ , \label{1.15} \\
C\left( X,PY\right) &=&\bar{g}\left( PY,A_{N}X\right) , \label{1.16} \\
\bar{g}\left( N,A_{N}X\right) &=&0.\ \label{1.17}
\end{eqnarray}
A lightlike hypersurface $\left(M,g,S(TM)\right) $ of a semi-Riemannian manifold is called totally umbilical\cite{Krishan L. Duggal and Bayram Sahin}
if there is a smooth function $\varrho ,$ such that
\begin{equation}
B\left( X,Y\right) =\varrho g\left( X,Y\right) ,\forall X,Y\in \Gamma \left(
TM\right) \label{1.18}
\end{equation}
where $\varrho $ is non-vanishing smooth function on a neighborhood $U$ in $M
$.
\qquad A lightlike hypersurface $\left( M,g,S(TM)\right) $ of a
semi-Riemannian manifold is called screen locally conformal if the shape
operators $A_{N}$ and $A_{\xi }^{\ast }$ of $M$ and $S(TM)$, respectively,
are related by
\begin{equation}
A_{N}=\varphi A_{\xi }^{\ast } \label{1.19}
\end{equation}
where $\varphi $ is non-vanishing smooth function on a neighborhood $U$ in $M
$. Therefore, it follows that $\forall X,Y\in \Gamma \left( S\left(
TM\right) \right) ,$ $\xi \in RadTM$
\begin{equation}
C\left( X,\xi \right) =0, \label{1.20}
\end{equation}
For details about screen conformal lightlike hypersurfaces, see: \cite{A-D} and \cite
{Krishan L. Duggal and Bayram Sahin} .
\section{Planar normal sections \ of lightlike surfaces in $\mathbb{R}^3_1$}
Let $M$ be a lightlike surface of \textbf{\ }$\mathbb{R}^3_1$. Now we
investigate lightlike surfaces with degenerate planar normal sections. If $
\gamma $ is a null curve, for a point $p$ in $M,$ we have
\begin{eqnarray}
\gamma ^{\prime }\left( s\right) &=&\xi \ \label{2.1} \\
\gamma ^{\prime \prime }\left( s\right) &=&\bar{\nabla }_{\xi }\xi =-\tau
\left( \xi \right) \xi \label{2.2} \\
\gamma ^{\prime \prime \prime }\left( s\right) &=&\left[ \xi \left( \tau
\left( \xi \right) \right) +\tau ^{2}\left( \xi \right) \right] \xi
\label{2.3}
\end{eqnarray}
Then, $\gamma ^{\prime \prime \prime }\left( 0\right) $ is a linear
combination of $\gamma ^{\prime }\left( 0\right) $ and $\gamma ^{\prime
\prime }\left( 0\right) $. Thus (\ref{2.1}), (\ref{2.2}) and (\ref{2.3})
give $\gamma ^{\prime \prime \prime }\left( 0\right) \wedge \gamma ^{\prime
\prime }\left( 0\right) \wedge \gamma ^{\prime }\left( 0\right) =0$. Thus
lightlike surfaces always have planar normal sections.
\begin{corollary}
\textbf{\ }Every lightlike surface of $\mathbb{R}^3_1$ has degenerate planar
normal sections.
\end{corollary}
In fact Corollary 3.1 tells us that the above situation is not
interesting. Now, we will check lightlike surfaces with non-degenerate
planar normal sections. Let $M$ be a lightlike hypersurface of \textbf{\ }$
\mathbb{R}^3_1$. For a point $p$ in $M$ and a spacelike vector $w\in S(TM)$
tangent to $M$ at $p$ , the vector $w$ and transversal space $tr(TM)$ to $M$
at $p$ determine a 2-dimensional subspace $E(p,w)$ in $\mathbb{R}^3_1$
through $p $. The intersection of $M$ and $E(p,w)$ give a spacelike curve $
\gamma $ in a neighborhood of $p,$ which is called the normal section of $M$
at $p$ in the direction of $w$. Now, we research the conditions for a
lightlike surface of $\mathbb{R}^3_1$ to have non-degenerate planar normal
sections.
Let $\left( M,g,S(TM)\right) $ be a totally umbilical and screen
conformal\ lightlike surface of $\left( \bar{g},\mathbb{R}^3_1\right) $. In
this case $S(TM)$ is integrable\cite{A-D}. We denote integral submanifold of $S(TM)$
by $M^{\prime }$. Then, using (\ref{1.6})$,$ (\ref{1.10}) and (\ref{1.19} )
we find
\begin{equation}
C\left( w,w\right) \xi +B\left( w,w\right) N=\bar{g}\left( w,w\right)
\left\{ \alpha \xi +\beta N\right\} =\left\{ \alpha \xi +\beta N\right\} ,
\label{2.4}
\end{equation}
where $t,$ $\alpha ,\beta \in \mathbb{R}$. In this case, we obtain
\begin{eqnarray}
\gamma ^{\prime }\left( s\right) &=&w \label{2.5} \\
\gamma ^{\prime \prime }\left( s\right) &=&\bar{\nabla}_{w}w=\nabla
_{w}^{\ast }w+C\left( w,w\right) \xi +B\left( w,w\right) N \label{2.6} \\
\gamma ^{\prime \prime }\left( s\right) &=&\nabla _{w}^{\ast }w+\alpha \xi
+\beta N\ \label{2.7} \\
\gamma ^{\prime \prime \prime }\left( s\right) &=&\nabla _{w}^{\ast }\nabla
_{w}^{\ast }w+C\left( w,\nabla _{w}^{\ast }w\right) \xi +w\left( C\left(
w,w\right) \right) \xi -C\left( w,w\right) A_{\xi }^{\ast }w \label{2.8} \\
&&+w\left( B\left( w,w\right) \right) N-B\left( w,w\right) A_{N}w+B\left(
w,\nabla _{w}^{\ast }w\right) N \notag \\
\gamma ^{\prime \prime \prime }\left( s\right) &=&\nabla _{w}^{\ast }\nabla
_{w}^{\ast }w+t\left\{ \alpha \xi +\beta N\right\} -\alpha A_{\xi }^{\ast
}w-\beta A_{N}w. \label{2.9}
\end{eqnarray}
Where $\nabla ^{\ast }$ and $\nabla $ are linear connections on $S(TM)$ and $
\Gamma \left( TM\right) $, respectively and $\gamma ^{\prime }\left(
s\right) =w$. From the definition of planar normal section and that $
S(TM)=Sp\left\{ w\right\} ,$ we have
\begin{equation}
w\wedge \nabla _{w}^{\ast }w=0 \label{2.10}
\end{equation}
and
\begin{equation}
w\wedge \nabla _{w}^{\ast }\nabla _{w}^{\ast }w=0. \label{2.11}
\end{equation}
Then, from (\ref{2.4}), (\ref{2.6}), (\ref{2.8}) and (\ref{2.10}), (\ref
{2.11}) we obtain $\gamma ^{\prime \prime \prime }\left( s\right) \wedge
\gamma ^{\prime \prime }\left( s\right) \wedge \gamma ^{\prime }\left(
s\right) =0$. Thus, $M$ has planar non-degenerate normal sections.\newline
If $M$ is totally geodesic lightlike surface of $\mathbb{R}^3_1$. Then, we
have $B=0$, $A_{\xi }^{\ast }=0$. Hence (\ref{2.5})-(\ref{2.8}) become
\begin{eqnarray*}
\gamma ^{\prime }\left( s\right) &=&w \\
\gamma ^{\prime \prime }\left( s\right) &=&\nabla _{w}^{\ast }w+\alpha \xi \\
\gamma ^{\prime \prime \prime }\left( s\right) &=&\nabla _{w}^{\ast }\nabla
_{w}^{\ast }w+t\alpha \xi -\beta A_{N}w.
\end{eqnarray*}
Since $A_{N}w\in \Gamma \left( TM\right) ,$ we have $\gamma ^{\prime \prime
\prime }\left( s\right) \wedge \gamma ^{\prime \prime }\left( s\right)
\wedge \gamma ^{\prime }\left( s\right) =0$.
Conversely, we assume that $M$ has planar non-degenerate normal
sections. Then, from (\ref{2.5}), (\ref{2.6}), (\ref{2.8}) and (\ref{2.10}),
(\ref{2.11}) we obtain
\begin{equation*}
(C\left( w,w\right) \xi +B\left( w,w\right) N)\wedge \left( C\left(
w,w\right) A_{\xi }^{\ast }w+B\left( w,w\right) A_{N}w\right) =0,
\end{equation*}
thus $\left( C\left( w,w\right) A_{\xi }^{\ast }w+B\left( w,w\right)
A_{N}w\right) =0$ or $C\left( w,w\right) \xi +B\left( w,w\right) N=0$. If
\newline
$C\left( w,w\right) A_{\xi }^{\ast }w+B\left( w,w\right) A_{N}w=0,$ then,
from
\begin{equation*}
A_{\xi }^{\ast }w=-\frac{B\left( w,w\right) }{C\left( w,w\right) }A_{N}w
\end{equation*}
at $p\in M,$ $M$ is a screen conformal lightlike surface with $C\left(
w,w\right) \neq 0$. If $C\left( w,w\right) \xi +B\left( w,w\right) N=0$,
then $RadTM$ is parallel and $M$ is totally geodesic.
Consequently, we have the following,
\begin{theorem}
Let $M$ be a lightlike surface of $\mathbb{R}^3_1$. Then $M$ has
non-degenerate planar normal sections if and only if \ either $M$ is
umbilical and screen conformal or $M$ is totally geodesic.
\end{theorem}
\begin{theorem}
Let\textbf{\ }$\left( M,g,S(TM)\right) $ be a screen conformal non-umbilical
lightlike surface of $\mathbb{R}^3_1$. Then, for $T\left( w,w\right)
=C\left( w,w\right) \xi +B\left( w,w\right) N$ the following statements are
equivalent
\begin{enumerate}
\item $\left( \bar{\nabla}_{w}T\right) \left( w,w\right) =0$, every
spacelike vector $w\in S(TM)$
\item $\bar{\nabla}T=0$
\item $M$ has non-degenerate planar normal sections and each normal section
at $p$ has one of its vertices at $p$
\end{enumerate}
By the vertex of curve $\gamma \left( s\right) $ we mean a point $p$ on $
\gamma $ such that its curvature $\kappa $ satisfies $\frac{d\kappa
^{2}\left( p\right) }{ds}=0,$ $\kappa ^{2}=\left\langle \gamma ^{\prime
\prime }\left( s\right) ,\gamma ^{\prime \prime }\left( s\right)
\right\rangle $.
\end{theorem}
\begin{proof}
From (\ref{2.5}), (\ref{2.6}) and that a screen conformal $M$, we have
\begin{equation*}
\left( \bar{\nabla}_{w}T\right) \left( w,w\right) =\bar{\nabla}_{w}T\left(
w,w\right)
\end{equation*}
which shows $\left( a\right) \Leftrightarrow \left( b\right) $. $\left(
b\right) $ $\Rightarrow \left( c\right) $ Assume that $\bar{\nabla}T=0$ . If
$\bar{\nabla}T=0$ then $M$ is totally geodesic and Theorem 3.1 implies that $
M$ has (pointwise) planar normal sections. Let the $\gamma \left( s\right) $
be a normal section of $M$ at $p$ in a given direction $w\in S(TM)$. Then (
\ref{2.5}) shows that the curvature $\kappa \left( s\right) $ of $\gamma
\left( s\right) $ satisfies
\begin{eqnarray}
\kappa ^{2}\left( s\right) &=&\left\langle \gamma ^{\prime \prime }\left(
s\right) ,\gamma ^{\prime \prime }\left( s\right) \right\rangle \ \notag \\
&=&2C(w,w)B(w,w) \notag \\
&=&\left\langle T\left( w,w\right) ,T\left( w,w\right) \right\rangle \
\label{2.12}
\end{eqnarray}
where $w=\gamma ^{\prime }\left( s\right) $. Therefore we find
\begin{equation}
\frac{d\kappa ^{2}\left( p\right) }{ds}=\left\langle \bar{\nabla}_{w}T\left(
w,w\right) ,T\left( w,w\right) \right\rangle =\left\langle \left( \bar{\nabla
}_{w}T\right) \left( w,w\right) ,T\left( w,w\right) \right\rangle \
\label{2.13}
\end{equation}
Since $\bar{\nabla}_{w}T\left( w,w\right) =0,$ this implies
\begin{equation*}
\frac{d\kappa ^{2}\left( 0\right) }{ds}=0
\end{equation*}
at $p=\gamma \left( 0\right) $. Thus $p$ is a vertex of the normal section $
\gamma \left( s\right) $. $\left( c\right) \Rightarrow \left( a\right) :$ If
$M$ has planar normal sections, then Theorem 3.1 gives
\begin{equation}
T\left( w,w\right) \wedge \left( \bar{\nabla}_{w}T\right) \left( w,w\right)
=0. \label{2.14}
\end{equation}
If $p$ is a vertex of $\gamma \left( s\right) $, then we have
\begin{equation*}
\frac{d\kappa ^{2}\left( 0\right) }{ds}=0.
\end{equation*}
Thus, since $M$ has planar normal sections using (\ref{2.13}) we find
\begin{eqnarray*}
\gamma ^{\prime }\left( s\right) \wedge \gamma ^{\prime \prime }\left(
s\right) \wedge \gamma ^{\prime \prime \prime }\left( s\right) &=&w\wedge
\left( \nabla _{w}^{\ast }w+T\left( w,w\right) \right) \\
&&\wedge \left( \nabla _{w}^{\ast }\nabla _{w}^{\ast }w+tT\left( w,w\right)
+\left( \bar{\nabla}_{w}T\right) \left( w,w\right) \right) =0 \\
\gamma ^{\prime }\left( s\right) \wedge \gamma ^{\prime \prime }\left(
s\right) \wedge \gamma ^{\prime \prime \prime }\left( s\right) &=&T\left(
w,w\right) \wedge \left( \bar{\nabla}_{w}T\right) \left( w,w\right) =0
\end{eqnarray*}
and
\begin{equation}
\left\langle \left( \bar{\nabla}_{w}T\right) \left( w,w\right) ,T\left(
w,w\right) \right\rangle =0. \label{2.15}
\end{equation}
Combining (\ref{2.14}) and (\ref{2.15}) we obtain $\left( \bar{\nabla}
_{w}T\right) \left( w,w\right) =0$ or $T\left( w,w\right) =0$. Let us define
$U=\left\{ w\in S(TM)\mid T(w,w)=0\right\} $. If $int(U)\neq \varnothing ,$
we obtain $\left( \bar{\nabla}_{w}T\right) \left( w,w\right) =0$ on $int(U)$
. Thus, by continuity we have $\bar{\nabla}T=0$.
\end{proof}
\begin{example}
Consider the null cone of $\mathbb{R}^3_1$ given by
\begin{equation*}
\wedge =\left\{ \left( x_{1},x_{2},x_{3}\right) \mid
-x_{1}^{2}+x_{2}^{2}+x_{3}^{2}=0,\newline
x_{1},\newline
x_{2},\newline
x_{3}\in IR\newline
\right\} .
\end{equation*}
The radical bundle of null cone is
\begin{equation*}
\xi =x_{1}\frac{\partial }{\partial x_{1}}+x_{2}\frac{\partial }{\partial
x_{2}}+x_{3}\frac{\partial }{\partial x_{3}}
\end{equation*}
and screen distribution is spanned by
\begin{equation*}
Z_{1}=x_{2}\frac{\partial }{\partial x_{1}}+x_{3}\frac{\partial }{\partial
x_{2}}
\end{equation*}
Then the lightlike transversal vector bundle is given by
\begin{equation*}
Itr(TM)=Span\{N=\frac{1}{2(-x_{1}^{2}+x_{2}^{2})}\left( x_{1}\frac{\partial
}{\partial x_{1}}+x_{2}\frac{\partial }{\partial x_{2}}-x_{3}\frac{\partial
}{\partial x_{3}}\right) \}.
\end{equation*}
It follows that the corresponding screen distribution $S(TM)$ is spanned by $
Z_{1}$. Thus
\begin{eqnarray*}
\nabla _{\xi }\xi &=&x_{1}\frac{\partial }{\partial x_{1}}+x_{2}\frac{
\partial }{\partial x_{2}}+x_{3}\frac{\partial }{\partial x_{3}} \\
\bar{\nabla}_{\xi }\nabla _{\xi }\xi &=&x_{1}\frac{\partial }{\partial x_{1}}
+x_{2}\frac{\partial }{\partial x_{2}}+x_{3}\frac{\partial }{\partial x_{3}}.
\end{eqnarray*}
Then, we obtain
\begin{equation*}
\gamma ^{\prime \prime \prime }\left( s\right) \wedge \gamma ^{\prime \prime
}\left( s\right) \wedge \gamma ^{\prime }\left( s\right) =0
\end{equation*}
which shows that null cone has degenerate planar normal sections.
\end{example}
\begin{example}
\textbf{\ }Let $\mathbb{R}^3_1$ be the space $IR^{3}$ endowed with the semi
Euclidean metric
\begin{equation*}
\bar{g}(x,y)=-x_{0}y_{0}+x_{1}y_{1}+x_{2}y_{2},(x=(x_{0},x_{1},x_{2})).
\end{equation*}
The lightlike cone $\wedge _{0}^{2}$ is given by the equation $
-(x_{0})^{2}+(x_{1})^{2}+\left( x_{2}\right) ^{2}=0$, $x\neq 0$. It is known
that $\wedge _{0}^{2}$ is a lightlike surface of $\mathbb{R}^3_1$ and the
radical distribution is spanned by a global vector field
\begin{equation}
\xi =x_{0}\frac{\partial }{\partial x_{0}}+x_{1}\frac{\partial }{\partial
x_{1}}+x_{2}\frac{\partial }{\partial x_{2}} \label{2.16}
\end{equation}
on $\wedge _{0}^{2}$. The unique section $N$ is given by
\begin{equation}
N=\frac{1}{2(x_{0})^{2}}\left( -x_{0}\frac{\partial }{\partial x_{0}}+x_{1}
\frac{\partial }{\partial x_{1}}+x_{2}\frac{\partial }{\partial x_{2}}\right)
\label{2.17}
\end{equation}
and is also defined. As $\xi $ is the position vector field we get
\begin{equation}
\bar{\nabla}_{X}\xi =\nabla _{X}\xi =X,\text{ \ \ }\forall X\in \Gamma
\left( TM\right) . \label{2.18}
\end{equation}
Then, $A_{\xi }^{\ast }X+\tau \left( X\right) \xi +X=0$. As $A_{\xi }^{\ast
} $ is $\Gamma \left( S(TM)\right) $-valued we obtain
\begin{equation}
A_{\xi }^{\ast }X=-PX,\text{ \ \ }\forall X\in \Gamma \left( TM\right)
\label{2.19}
\end{equation}
Next, any $X\in \Gamma \left( S(T\wedge _{0}^{2})\right) $ is expressed by $
X=X_{1}\frac{\partial }{\partial x_{1}}+X_{2}\frac{\partial }{\partial x_{2}}
$ \ where $(X_{1},X_{2})$ satisfy
\begin{equation}
x_{1}X_{1}+x_{2}X_{2}=0 \label{2.20}
\end{equation}
and then
\begin{eqnarray}
\nabla _{\xi }X &=&\bar{\nabla}_{\xi }X=\overset{2}{\sum\limits_{A=0}}
\sum\limits_{a=1}^{2}x_{A}\frac{\partial X_{a}}{\partial x_{A}}\frac{
\partial }{\partial x_{a}}, \label{2.21} \\
\bar{g}\left( \nabla _{\xi }X,\xi \right) &=&\overset{2}{\sum\limits_{A=0}}
\sum\limits_{a=1}^{2}x_{a}x_{A}\frac{\partial X^{a}}{\partial x_{A}}=-\left(
x_{1}X_{1}+x_{2}X_{2}\right) =0 \label{2.22}
\end{eqnarray}
where (\ref{2.20}) is derived with respect to $x_{0},x_{1},x_{2}$. It is
known that $\wedge _{0}^{2}$ is a screen conformal lightlike surface with
conformal function $\varphi =\frac{1}{2(x_{0})^{2}}$. We also know that $
A_{N}\xi =0$. By direct compute we find
\begin{equation*}
A_{N}X=\frac{1}{2(x_{0})^{2}}A_{\xi }^{\ast }X.
\end{equation*}
Now we evaluate $\gamma ^{\prime },\gamma ^{\prime \prime }$ and $\gamma
^{\prime \prime \prime }$
\begin{eqnarray*}
\gamma ^{\prime } &=&X=\left( 0,-x_{2},x_{1}\right) \\
\gamma ^{\prime \prime } &=&\nabla _{X}X+B\left( X,X\right) N \\
&=&\frac{1}{2}x_{0}\frac{\partial }{\partial x_{0}}-\frac{3}{2}x_{1}\frac{
\partial }{\partial x_{1}}+x_{2}\frac{\partial }{\partial x_{2}} \\
\gamma ^{\prime \prime \prime } &=&\bar{\nabla}_{X}\nabla _{X}X+X\left(
B\left( X,X\right) \right) N+B\left( X,X\right) \bar{\nabla}_{X}N \\
&=&\nabla _{X}\nabla _{X}X+B\left( X,\nabla _{X}X\right) N+X\left( B\left(
X,X\right) \right) N-B\left( X,X\right) A_{N}X
\end{eqnarray*}
using $A_{N}X$ in $\gamma ^{\prime \prime \prime }$ we get
\begin{equation*}
\gamma ^{\prime \prime \prime }=\frac{1}{2}x_{2}\frac{\partial }{\partial
x_{1}}-\frac{1}{2}x_{1}\frac{\partial }{\partial x_{2}}.
\end{equation*}
Therefore $\gamma ^{\prime \prime \prime }$ and $\gamma ^{\prime }$are
linear dependence at $\forall p\in \wedge _{0}^{2}$ and we have
\begin{equation*}
\gamma ^{\prime }\wedge \gamma ^{\prime \prime }\wedge \gamma ^{\prime
\prime \prime }=0.
\end{equation*}
Namely, $\wedge _{0}^{2}$ has non-degenerate planar normal sections.
\end{example}
\end{document} |
\begin{document}
\numberwithin{equation}{section}
\numberwithin{table}{section}
\numberwithin{figure}{section}
\title{Wavelet Shrinkage in Nonparametric Regression Models with Positive Noise}
\begin{abstract}
Wavelet shrinkage estimators are widely applied in several fields of science for denoising data in wavelet domain by reducing the magnitudes of empirical coefficients. In nonparametric regression problem, most of the shrinkage rules are derived from models composed by an unknown function with additive gaussian noise. Although gaussian noise assumption is reasonable in several real data analysis, mainly for large sample sizes, it is not general. Contaminated data with positive noise can occur in practice and nonparametric regression models with positive noise bring challenges in wavelet shrinkage point of view. This work develops bayesian shrinkage rules to estimate wavelet coefficients from a nonparametric regression framework with additive and strictly positive noise under exponential and lognormal distributions. Computational aspects are discussed and simulation studies to analyse the performances of the proposed shrinkage rules and compare them with standard techniques are done. An application in winning times Boston Marathon dataset is also provided.
\end{abstract}
\section{Introduction}
\indent Wavelet based methods have been applied in several fields of statistics such as time series modelling, functional data analysis, computational methods and nonparametric regression for example. Their success can be justified by several mathematical and computational reasons. In nonparametric regression, the application of this work, it is possible to expand an unknown squared integrable function in orthogonal wavelet basis, which are composed by dilations and translations of a specified function usually called wavelet function or mother wavelet $\psi$. Examples of wavelet functions are Daubechies wavelets, whose are usually indexed by their number of null moments and shown in Figure \ref{fig:wave} for one (Haar or Daub1), two (Daub2), four (Daub4) and ten (Daub10) null moments. This wavelet representation allows the visualization of the data that are obtained from the unknown function by resolution levels and performs a multiresolution analysis by the application of discrete wavelet transform on them. Further, the wavelet representation of a function is typically sparse, i.e, the coefficients of the expansion are majority equal to zero or very close to zero at smooth regions of the represented function domain. This property is important because, once wavelets are well localized in space and frequency domains, the sparsity representation feature provides the identification of the main properties of the unknown function, such as peaks, discontinuities, maximum and minimum by a few amount of nonzero coefficients. For a review of wavelet methods in statistics, see Vidakovic (1999) and Nason (2008). For a general overview about wavelets and their mathematical properties, see Daubechies (1992) and Mallat (1998).
\begin{figure}
\caption{Daubechies wavelet functions with $N=1$ (Haar wavelet), $2$ (Daub2), $4$ (Daub4) and $10$ (Daub10) null moments.}
\label{fig:wave}
\end{figure}
Wavelet coefficients are essentially sparse at smooth locations of the unknown function, but in practice, after the application of the discrete wavelet transformation on the data, one observes contaminated wavelet coefficients with random noise, called empirical wavelet coefficients, which are not sparse due the noise effect. For denoising the empirical coefficients and estimating the wavelet coefficients of the function representation, thresholding and shrinkage methods are usually applied on the empirical coefficients by reducing their magnitudes. There are several nonlinear thresholding and shrinkage methods available in the literature, most of them based in the seminal works of Donoho (1993a,b), Donoho (1995a,b), Donoho and Johnstone (1994a,b) and Donoho and Johnstone (1995), with the proposition of the so called soft and hard thresholding rules. Bayesian shrinkage procedures have also been successfully proposed for denoising empirical wavelet coefficients. These methods allow the incorporation of prior information regarding to the coefficients, such as the their sparsity, support, dispersion and extreme values by means of a prior probabilistic distribution. In this context, the proposed priors are usually composed by a mixture of a high concentrated distribution around zero to assign sparsity and a symmetric distribution around zero. Prior distributions already proposed to the wavelet coefficients include mixtures of normals by Chipman et al. (1997), mixtures of a point mass function at zero and double exponential distribution by Vidakovic and Ruggeri (2001), Bickel prior by Angelini and Vidakovic (2004), double Weibull by Reményi and Vidakovic (2015), Dirichlet-Laplace priors by Bhattacharya el al. (2015) and, recently, logistic and beta priors by Sousa (2020) and Sousa et al. (2020) respectively. For a general overview abour wavelet shrinkage and thresholding techniques, see Jansen (2001).
Although the well succeeded performance of the proposed thresholding and bayesian shrinkage methods for denoising wavelet coefficients, most of them suppose that the data points from the underlying function are contaminated with additive normal random noise. Despite this assumption can occurs in practice and implies in several good estimation properties, it is not general, mainly under small sample sizes, where central limit theorem can not be applied. Little attention is given for wavelet denoising problems in nonparametric regression models under non-normal random noise or, specifically, additive strictly positive random noise. Neumann and von Sachs (1995) discuss normal approximations to the wavelet empirical coefficients for thresholding without the normality supposition of the noises and independent and identically distributed (iid) assumption of them. Leporini and Pesquet (2001) proposed the use of Besov priors on the wavelet coefficients to derive a bayesian thresholding rule under a possible resolution level dependent generalized normal distributed noise in the wavelet domain. Antoniadis et al. (2002) provided explicit bayesian thresholding rules based on Maximum a Posteriori (MAP) estimation procedure under exponential power distribution prior on the wavelet coefficients and supposing exponential power and Cauchy distributions to the noise in the wavelet domain. Averkamp and Houdré (2003) analyzed the ideal denoising in the sense of Donoho and Johnstone (1995) considering some classes of noise, including identically distributed symmetric around zero noises in the wavelet domain. Thresholding under compactly support noises in wavelet domain is also discussed. Thus, the above cited works dealt with non-gaussian noise but, no one of them assumes positive noise in the original model. Further, the noise distributions assumtpions occur directly in the wavelet domain, after the discrete wavelet transform application on the original data.
In this sense, this paper proposes bayesian shrinkage procedures for denoising empirical wavelet coefficients in nonparametric regression models with strictly positive random noise contamination in the original data, assuming additive noises to be independent and identically distributed exponential and lognormal. The adopted priors are the mixture of a point mass function at zero and the logistic prior proposed by Sousa (2020) and beta prior proposed by Sousa et al. (2020), both works under the classical gaussian noise structure. Assuming additive and positive random noise in the original nonparametric model brings several challenges in estimation point of view. First, independent noises property is lost after wavelet transformation, i.e, noises in the wavelet domain are possibly correlated. The consequence of this fact is that the wavelet coefficient estimation can not be performed individually as usually is done under gaussian noise assumption, but jointly by a joint posterior distribution of the wavelet coefficients vector, which requires computational methods, such as Markov Chain Monte Carlo (MCMC) methods to sample from the joint posterior distribution. Further, noises in the wavelet domain are not necessarily positive, but only linear combinations of them. Finally, several statistical models with multiplicative positive noise were proposed and dealt with by logarithmic transformations, but models with additive positive noise are not so common in the literature, although additive positive noise can be observed in a wide variety of real measurements. For example, arrival times of radio or waves measures typically contain positive errors due possibly delays of equipment detection. See Radnosrati et al. (2020) for an interesting study of classical estimation theory of models with additive positive noise and a nice application involving global navigation satellite systems (GNSS) with positive noise arrival times.
Thus, the main novelty of this work is to perform wavelet shrinkage under additive positive noise in the original nonparametric model. To do so, logistic and beta priors are put on the wavelet coefficients. Logistic prior is suitably for coefficients with support in the Real set. Its scale hyperparameter has easy and direct interpretation in terms of shrinkage, as can be seen in Sousa (2020). The beta prior (Sousa et al., 2020) is a good choice for bounded coefficients and its well known shape flexibility brings advantages in modelling. This paper is organized as follows: the considered statistical models are defined in Section 2 and their associated shrinkage rules with computational aspects described in Section 3. Parameters and hyperparameters choices are discussed in Section 4. Simulation studies to obtain the performance of the shrinkage rules and to compare with standard shrinkage/thresholding techniques are analysed in Section 5. A real data application involving winning times of Boston Marathon is done in Section 6. The paper is concluded with final considerations in Section 7.
\section{Statistical models}
We consider $n=2^J$, $J \in \mathbb{N}$, points $(x_1,y_1),\cdots,(x_n,y_n)$ from the nonparametric regression model
\begin{equation}\label{regmodel}
y_i = f(x_i) + e_i, \hspace{0.5cm} i=1,\cdots,n
\end{equation}
where $f \in \mathrm{L}^2(\mathbb{R}) = \{f:\int f^2 < \infty\}$ is an unknown function and $e_i$ are independent and identically distributed (iid) random noises such that $e_i>0$, $i=1,\cdots,n$. The goal is to estimate $f$ without assumptions about its functional structure, i.e, the estimation procedure will take only the data points into account. In this work, we consider random noise with exponential and lognormal distributions, given by
\begin{itemize}
\item Exponential distributed noise: $e_i \sim \mathrm{Exp}(\lambda)$
\begin{equation}\label{exp}
h(e_i;\lambda) = \lambda \exp\{-\lambda e_i\}\mathbb{I}_{(0,\infty)}(e_i), \hspace{0.5cm} \lambda > 0,
\end{equation}
\item Lognormal distributed noise: $e_i \sim \mathrm{LN}(0,\sigma)$
\begin{equation}\label{lognor}
h(e_i;\sigma) = \frac{1}{e_i\sigma \sqrt{2\pi}}\exp \Big\{-\frac{\log^2(e_i)}{2\sigma^2}\Big\}\mathbb{I}_{(0,\infty)}(e_i), \hspace{0.5cm} \sigma > 0,
\end{equation}
\end{itemize}
\noindent where $\mathbb{I}_{A}(\cdot)$ is the usual indicator function on the set $A$ and $\log(\cdot)$ is the natural logarithm. We suppose both the noise distribution parameters $\lambda$ and $\sigma$ as known, although a brief discussion for the unknown case is provided in Section 4.
The unknown function $f$ can be represented by
\begin{equation} \label{expan}
f(x) = \sum_{j,k \in \mathbb{Z}}\theta_{j,k} \psi_{j,k}(x),
\end{equation}
where $\{\psi_{j,k}(x) = 2^{j/2} \psi(2^j x - k),j,k \in \mathbb{Z} \}$ is an orthonormal wavelet basis for $L^2(\mathbb{R})$ constructed by dilations $j$ and translations $k$ of a function $\psi$ called wavelet or mother wavelet and $\theta_{j,k}$ are wavelet coefficients that describe features of $f$ at spatial location $2^{-j}k$ and scale $2^j$ or resolution level $j$. In this context, the data points $(x_1,y_1),\cdots,(x_n,y_n)$ can be viewed as an approximation of $f$ at the finest resolution level $J$ with additive and positive noise contamination. As an example, Figure \ref{fig:ex} displays a Donoho-Johnstone (D-J) test function called Blocks, that will be defined in Section 5, and $1024 = 2^{10}$ data points generated from this function with additive exponential distributed random noises.
\begin{figure}
\caption{Blocks function and 1024 data points with additive exponential noises.}
\label{fig:ex}
\end{figure}
The estimation process of $f$ is done by estimation of the wavelet coefficients. In vector notation, model \eqref{regmodel} can be written as
\begin{equation}\label{regvec}
\boldsymbol{y} = \boldsymbol{f} + \boldsymbol{e},
\end{equation}
where $\boldsymbol{y} = [y_1,\cdots,y_n]'$, $\boldsymbol{f} = [f(x_1),\cdots,f(x_n)]'$ and $\boldsymbol{e}= [e_1,\cdots,e_n]'$. A discrete wavelet transform (DWT), which is typically represented by an orthonormal transformation matrix $\boldsymbol{W}_{n \times n} = (w_{ij})_{1 \leq i,j \leq n}$, is applied on both sides of \eqref{regvec}, obtaining the following model in wavelet domain
\begin{equation}\label{wavmodel}
\boldsymbol{d} = \boldsymbol{\theta} + \boldsymbol{\varepsilon},
\end{equation}
where $\boldsymbol{d} = \boldsymbol{Wy}$ is called empirical coefficients vector, $\boldsymbol{\theta} = \boldsymbol{Wf}$ is the wavelet coefficients vector and $\boldsymbol{\varepsilon} = \boldsymbol{We}$ is the random noise vector. Although $\boldsymbol{W}$ is used as DWT representation, fast algorithms are applied to perform DWT in practice, which are more computationally efficient, see Mallat (1998). When $e_i$ is assumed to be iid normal distributed in model \eqref{regmodel}, as occurs in most of the studied nonparametric models in wavelet shrinkage methods research, the distribution of the noise in wavelet domain remains normal, $\varepsilon_i$ is iid normal with the same scale parameter as in the time domain model noise. This property brings several estimation advantages, once the problem of estimating $\theta$ in this context is equivalent of estimating a location parameter of a normal distribution. Moreover, as the noises in wavelet domain remain independent, $\theta$-estimation could be done individually. When $e_i$'s are positive, most of these advantages are lost. Actually, $\varepsilon_i$'s are correlated and not necessarily positive. Also, their distribution is not the same as their counterparts in time domain. The main impact of these facts is that the estimation of $\theta$ can not be performed individually, but according to a joint posterior distribution of $\boldsymbol{\theta}$.
The wavelet coefficients vector $\boldsymbol{\theta}$ could be estimated by application of a shrinkage rule $\boldsymbol{\delta(d)}$ on the empirical coefficients vector $\boldsymbol{d}$. This procedure essentially performs denoising on the observed coefficients by reducing their magnitudes in order to estimate the wavelet coefficients. After the estimation $\boldsymbol{\hat{\theta}} = \boldsymbol{\delta(d)}$, $f$ is estimated by the inverse discrete wavelet transform (IDWT), $\boldsymbol{\hat{f}} = \boldsymbol{W^{t}\hat{\theta}}$.
In this work, we apply a bayesian shrinkage procedure assuming prior distributions to a single wavelet coefficient $\theta$ (the subindices are dropped by simplicity). The priors have the general structure
\begin{equation}\label{prior}
\pi(\theta;\alpha,\boldsymbol{\eta}) = \alpha \delta_{0}(\theta) + (1-\alpha)g(\theta;\boldsymbol{\eta}),
\end{equation}
for $\alpha \in (0,1)$, $\delta_{0}(\cdot)$ is the point mass function at zero and $g(\cdot;\boldsymbol{\eta})$ is a probability distribution defined according to a hyperparameters vector $\boldsymbol{\eta}$. The choice of $g(\cdot;\boldsymbol{\eta})$ can be made according to the support of $\theta$. We consider in this work two quite flexible distributions $g(\cdot;\boldsymbol{\eta})$, the symmetric around zero logistic distribution proposed by Sousa (2020) given by
\begin{equation}\label{log}
g(\theta;\tau) = \frac{\exp\Big\{-\frac{\theta}{\tau}\Big\}}{\tau \left(1+\exp\Big\{-\frac{\theta}{\tau}\Big\}\right)^2}\mathbb{I}_{\mathbb{R}}(\theta), \hspace{0.5cm} \tau > 0,
\end{equation}
and the beta distribution on the interval $[-m,m]$ proposed by Sousa et al. (2020) given by
\begin{equation}\label{beta}
g(\theta;a,b,m) = \frac{(\theta+m)^{a-1}(m-\theta)^{b-1}}{(2m)^{a+b-1}B(a,b)}\mathbb{I}_{[-m,m]}(\theta), \hspace{0.5cm} a,b,m>0,
\end{equation}
where $B(\cdot,\cdot)$ is the beta function. Sousa (2020) and Sousa et al. (2020) developed shrinkage rules under logistic and beta priors respectively under the standard gaussian noise framework. Figures \ref{fig:densities} (a) and (b) show logistic and beta densities for several hyperparameters values respectively. The beta densities are considered on interval $[-3,3]$.
\begin{figure}
\caption{Logistic and beta densities for several hyperparameters values $\tau$ and $(a,b)$ respectively.}
\label{lognormal}
\label{blocls}
\label{fig:densities}
\end{figure}
The logistic prior centered at zero is suitable in bayesian wavelet shrinkage for real valued wavelet coefficients, i.e, when $\theta \in \mathbb{R}$. Further, its hyperparameter $\tau$ has an important role in determining the degree of shrinkage to be applied on the empirical coefficients, as described in Sousa (2020). The beta prior offers great flexibility in modelling bounded wavelet coefficients, i.e, when $\theta \in [-m,m]$, once it allows symmetric ($a=b$) and asymmetric ($a \neq b$) distributions around zero. As the logistic prior, its hyperparameters $a$ and $b$ control the amount of shrinkage of the associated bayesian rule. For $b=a$, bigger values of $a$ imply the increase of the shrinkage level imposed on the shrinkage rule, i.e, the associated rule tends to a severe reduction of the empirical coefficients' magnitudes. More details about beta priors on wavelet coefficients can be found in Sousa et al. (2020). Thus, logistic and beta priors are convenient choices for $g$ in \eqref{prior} for modelling several prior information about the wavelet coefficients to be estimated, such as their support, symmetry and sparsity.
\section{Shrinkage rules and computational aspects}
The general shrinkage rules $\delta$ associated to the models \eqref{regmodel}, \eqref{exp}, \eqref{lognor}, \eqref{wavmodel} and \eqref{prior} under quadratic loss function are obtained by the posterior expected value, i.e, $\delta(\boldsymbol{d}) = \mathbb{E}_{\pi}\left(\boldsymbol{\theta}|\boldsymbol{d} \right)$. Once it is infeasible to obtain the posterior expected value analitically, we use an adaptive Markov Chain Monte Carlo (MCMC) method to be described later to generate $L$ samples $\boldsymbol{\theta_1},\boldsymbol{\theta_2},\cdots,\boldsymbol{\theta_L}$ from the joint posterior distribution $\pi(\cdot|\boldsymbol{d})$ of $\boldsymbol{\theta}|\boldsymbol{d}$ and estimate a particular wavelet coefficient $\theta_i$ by the sample mean,
\begin{equation}\label{rule}
\hat{\theta}_i = \delta_i(\boldsymbol{d}) \approx \frac{1}{L}\sum_{l=1}^{L}\theta_{li},
\end{equation}
where $\theta_{li}$ is the $i$-th element of the generated sample $\boldsymbol{\theta_l}$, $l=1,\cdots,L$ and $i=1,\cdots,n$.
The posterior sample generation process is performed using the robust adaptive Metropolis (RAM) algorithm proposed by Vihola (2012) and implemented computationally in the \textit{adaptMCMC} R package by Scheidegger (2021). The algorithm estimates the shape of the target distribution $\pi(\cdot|\boldsymbol{d})$ and simultaneously coerces the mean acceptance rate of process. For each iteration of the chain generation, a single shape matrix $\boldsymbol{S}$ is adaptively updated. Let $\boldsymbol{S_1} \in \mathbb{R}^{n \times n}$ be a lower-diagonal matrix with positive diagonal elements, $\{\eta_k\}_{k \geq 1} \subset (0,1]$ be a sequence decaying to zero, $\gamma \in (0,1)$ be the target mean acceptance rate and $\boldsymbol{\theta_1}$ such that $\pi(\boldsymbol{\theta_1|\boldsymbol{d}})>0$, the RAM algorithm works as follows for $k \geq 2$,
\begin{enumerate}
\item Generate $\boldsymbol{\theta_{k}^{*}} = \boldsymbol{\theta_{k-1}} + \boldsymbol{S_{k-1}}\boldsymbol{U_k}$,
where $\boldsymbol{U_k} \sim N_n(\boldsymbol{0},\boldsymbol{I_{n}})$ and $\boldsymbol{I_n}$ is the identity matrix of dimension $n \times n$.
\item Do $\boldsymbol{\theta_k} = \boldsymbol{\theta_{k}^{*}}$ with probability
$$\gamma_k = \min\left(1,\frac{\pi(\boldsymbol{\theta_{k}^{*}}|\boldsymbol{d})}{\pi(\boldsymbol{\theta_{k-1}}|\boldsymbol{d})}\right),$$
or $\boldsymbol{\theta_k} = \boldsymbol{\theta_{k-1}}$ else.
\item Computer the lower diagonal matrix $\boldsymbol{S_{k}}$ with positive diagonal elements satisfying the equation
$$ \boldsymbol{S_{k}}\boldsymbol{S_{k}^{t}} = \boldsymbol{S_{k-1}}\left(\boldsymbol{I}+\eta_k(\gamma_k - \gamma)\frac{\boldsymbol{U_k}\boldsymbol{U_{k}^{t}}}{||\boldsymbol{U_k}||^2}\right)\boldsymbol{S_{k-1}^{t}}.$$
\end{enumerate}
We applied $\eta_k = \min\{1,nk^{-2/3}\}$ and $\gamma = 0.234$ as suggested by Vihola (2012) along the simulation studies and application to obtain the posterior distributions samples of the wavelet coefficients. The next subsections provide the posterior distributions that are considered as target distributions in RAM algorithm.
\subsection{Posterior distributions under exponential noise}
Considering the model under exponential noise \eqref{regmodel}, \eqref{exp} and the model after DWT application \eqref{wavmodel}, it is straightforward to obtain the likelihood function of the empirical coefficients $\mathcal{L}(\boldsymbol{d} | \boldsymbol{\theta})$ by the application of the Jacobian method to the transformation $\boldsymbol{d} = \boldsymbol{\theta} + \boldsymbol{W}\boldsymbol{e}$. The likelihood function is given by
\begin{equation} \label{likexp}
\mathcal{L}(\boldsymbol{d}|\boldsymbol{\theta}) = |\boldsymbol{W}|\lambda^n\exp\bigg\{-\lambda \sum_i \sum_j w_{ji}(d_j - \theta_j)\bigg\}\prod_i \mathbb{I}_{(0,\infty)}\left(\sum_j w_{ji}(d_j - \theta_j)\right).
\end{equation}
The posterior distribution of $\boldsymbol{\theta}|\boldsymbol{d}$ can be obtained by the well known relationship
\begin{equation}\label{bayes}
\pi(\boldsymbol{\theta}|\boldsymbol{d}) \propto \pi(\boldsymbol{\theta}) \mathcal{L}(\boldsymbol{d} | \boldsymbol{\theta}).
\end{equation}
Thus, applying \eqref{bayes} for \eqref{likexp} and the logistic prior model \eqref{prior} and \eqref{log}, we have the following posterior distribution to the wavelet coefficients given the empirical ones under logistic prior model and exponential noise on the original data,
\begin{align}\label{1postlog}
\pi(\boldsymbol{\theta}|\boldsymbol{d}) &\propto \prod_i \left[\alpha \delta_{0}(\theta_i) + (1-\alpha)\frac{\exp\Big\{-\frac{\theta_i}{\tau}\Big\}}{\tau \left(1+\exp\Big\{-\frac{\theta_i}{\tau}\Big\}\right)^2} \right] \times \exp\bigg\{-\lambda \sum_i \sum_j w_{ji}(d_j - \theta_j)\bigg\} \times \nonumber \\
& \times \prod_i \mathbb{I}_{(0,\infty)}\left(\sum_j w_{ji}(d_j - \theta_j)\right).
\end{align}
Analogously, we can have the posterior distribution of $\boldsymbol{\theta}|\boldsymbol{d}$ under beta prior model and exponential noise on the original data by considering now \eqref{beta} instead of \eqref{log}, given by
\begin{align}\label{1postbeta}
\pi(\boldsymbol{\theta}|\boldsymbol{d}) &\propto \prod_i \left[\alpha \delta_{0}(\theta_i) + (1-\alpha)\frac{(\theta_i+m)^{a-1}(m-\theta_i)^{b-1}}{(2m)^{a+b-1}B(a,b)} \right] \times \exp\bigg\{-\lambda \sum_i \sum_j w_{ji}(d_j - \theta_j)\bigg\} \times \nonumber \\
& \times \prod_i \mathbb{I}_{[-m,m]}(\theta_i) \times \prod_i \mathbb{I}_{(0,\infty)}\left(\sum_j w_{ji}(d_j - \theta_j)\right).
\end{align}
\subsection{Posterior distributions under lognormal noise}
The likelihood function of the empirical coefficients for the model under lognormal noise \eqref{regmodel}, \eqref{lognor} and the model after DWT application \eqref{wavmodel} is obtained as described in Subsection 3.1 and given by
\begin{align}\label{liklog}
\mathcal{L}(\boldsymbol{d}|\boldsymbol{\theta}) &= \frac{|\boldsymbol{W}|}{(\sigma \sqrt{2 \pi})^{n} \prod_{i} \left[\sum_{j} w_{ji}(d_j - \theta_j) \right]} \times \exp \Bigg \{ -\frac{1}{2\sigma^2} \sum_{i} \log^2\left(\sum_{j} w_{ji}(d_j - \theta_j) \right) \Bigg \} \times \nonumber \\
& \times \prod_i \mathbb{I}_{(0,\infty)}\left(\sum_j w_{ji}(d_j - \theta_j)\right).
\end{align}
Thus, the posterior distribution of $\boldsymbol{\theta}|\boldsymbol{d}$ under lognormal noise in the original data and logistic prior model \eqref{prior} and \eqref{log} is obtained by application of \eqref{bayes} for the likelihood function \eqref{liklog} and given by
\begin{align}\label{2postlog}
\pi(\boldsymbol{\theta}|\boldsymbol{d}) &\propto \prod_i \left[\alpha \delta_{0}(\theta_i) + (1-\alpha)\frac{\exp\Big\{-\frac{\theta_i}{\tau}\Big\}}{\tau \left(1+\exp\Big\{-\frac{\theta_i}{\tau}\Big\}\right)^2} \right] \times \frac{\exp \Bigg \{ -\frac{1}{2\sigma^2} \sum_{i} \log^2\left(\sum_{j} w_{ji}(d_j - \theta_j) \right) \Bigg \}}{\prod_{i} \left[\sum_{j} w_{ji}(d_j - \theta_j) \right]} \times \nonumber \\
& \times \prod_i \mathbb{I}_{(0,\infty)}\left(\sum_j w_{ji}(d_j - \theta_j)\right),
\end{align}
and the posterior distribution of $\boldsymbol{\theta}|\boldsymbol{d}$ under beta prior model \eqref{prior} and \eqref{beta} is
\begin{align}\label{2postbeta}
\pi(\boldsymbol{\theta}|\boldsymbol{d}) &\propto \prod_i \left[\alpha \delta_{0}(\theta_i) + (1-\alpha)\frac{(\theta_i+m)^{a-1}(m-\theta_i)^{b-1}}{(2m)^{a+b-1}B(a,b)} \right]\times \frac{\exp \Bigg \{ -\frac{1}{2\sigma^2} \sum_{i} \log^2\left(\sum_{j} w_{ji}(d_j - \theta_j) \right) \Bigg \}}{\prod_{i} \left[\sum_{j} w_{ji}(d_j - \theta_j) \right]} \times \nonumber \\
& \times \prod_i \mathbb{I}_{[-m,m]}(\theta_i) \times \prod_i \mathbb{I}_{(0,\infty)}\left(\sum_j w_{ji}(d_j - \theta_j)\right).
\end{align}
Therefore, the posterior distributions \eqref{1postlog} and \eqref{1postbeta} of $\boldsymbol{\theta}|\boldsymbol{d}$ are the considered target distributions under logistic and beta prior models respectively in RAM algorithm to be sampled and estimate the wavelet coefficients by the shrinkage rule \eqref{rule} for original data contaminated by exponential noise. Similarly, the posterior distributions \eqref{2postlog} and \eqref{2postbeta} are the target ones under logistic and beta priors respectively for lognormal noise contaminated observations.
\section{Parameters elicitation}
The performance of the bayesian procedure is closely related to a good choice or estimation of the involved parameters and hyperparameters of the models. The proposed shrinkage rules depend on the parameters $\lambda$ and $\sigma$ of the noise exponential and lognormal distributions respectively, which were considered as known throughout the paper, the weight $\alpha$ of the point mass function of the prior models and the hyperparameters $\tau$ and $(a,b,m)$ of the logistic and beta priors respectively.
Angelini and Vidakovic (2004) proposed the hyperparameters $\alpha$ and $m$ be dependent on the resolution level $j$ according to the expressions
\begin{equation}\label{eq:alpha}
\alpha = \alpha(j) = 1 - \frac{1}{(j-J_{0}+1)^r},
\end{equation}
\begin{equation}\label{eq:m}
m = m(j) = \max_{k}\{|d_{jk}|\},
\end{equation}
where $J_ 0 \leq j \leq J-1$, $J_0$ is the primary resolution level, $J$ is the number of resolution levels, $J=\log_{2}(n)$ and $r > 0$. They also suggest that in the absence of additional information, $r = 2$ can be adopted.
The choices of the hyperparameters $\tau$ and $(a,b)$ are discussed respectively by Sousa (2020) and Sousa et al. (2020). In fact, their values have a direct impact on the shrinkage level of the associated rule. Higher denoising level on empirical coefficients requires higuer values of $\tau$ and $(a,b)$. Moreover, these hyperparameters can be resolution level dependent, such as $\alpha$ and $m$. As default values, $\tau = a = b = 5$ can be used. Further discussion about how to choose $(a,b)$ of a beta prior distribution can also be seen in Chaloner and Duncan (1983) and Duran and Booker (1988).
The noise distribution parameters $\lambda$ and $\sigma$ of exponential and lognormal respectively, although considered as known, can be be included in the bayesian framework, independently of the wavelet coefficients, by attributing suitable priors to them, such inverse gamma prior for example. In this case, the general prior model \eqref{prior} under exponential noise could be updated by
\begin{equation}
\pi(\theta,\lambda;\alpha,\boldsymbol{\eta},\boldsymbol{\zeta}) = \pi(\theta;\alpha,\boldsymbol{\eta}) \times \pi(\lambda;\boldsymbol{\zeta}), \nonumber
\end{equation}
where $\pi(\lambda;\boldsymbol{\zeta})$ is the prior distribution of $\lambda$ and $\boldsymbol{\zeta}$ is its hyperparameter vector. Analogous procedure can be done for the lognormal noise case.
\section{Simulation studies}
The performances of the proposed shrinkage rules were obtained in simulation studies and compared against standard shrinkage/thresholding tecnhiques. The so called Donoho-Johnstone (D-J) test functions (Donoho and Johnstone, 1995) were considered as underlying functions to be estimated, which are composed by four test functions called Bumps, Blocks, Doppler and Heavisine defined on $[0,1]$ by,
\begin{itemize}
\item \textbf{Bumps}
$$ f(x) = \sum_{l=1}^{11} h_l K\left(\frac{x - x_l}{w_l} \right), \nonumber $$
where
$K(x) = (1 + |x|)^{-4}$;
$(x_l)_{l=1}^{11} = (0.1, 0.13, 0.15, 0.23, 0.25, 0.40, 0.44, 0.65, 0.76, 0.78, 0.81)$;
$(h_l)_{l=1}^{11} = (4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 5.1, 4.2)$ and
$(w_l)_{l=1}^{11} = (0.005, 0.005, 0.006, 0.01, 0.01, 0.03, 0.01, 0.01, 0.005, 0.008, 0.005)$.
\item \textbf{Blocks}
$$ f(x) = \sum_{l=1}^{11} h_l K(x - x_l), \nonumber $$
where
$K(x) = (1 + sgn(x))/2$;
$(x_l)_{l=1}^{11} = (0.1, 0.13, 0.15, 0.23, 0.25, 0.40, 0.44, 0.65, 0.76, 0.78, 0.81)$ and
$(h_l)_{l=1}^{11} = (4, -5, 3, -4, 5, -4.2, 2.1, 4.3, -3.1, 2.1, -4.2)$.
\item \textbf{Doppler}
$$ f(x) = \sqrt{x(1-x)}\sin\left(\frac{2.1 \pi}{x + 0.05} \right). \nonumber$$
\item \textbf{Heavisine}
$$ f(x) = 4\sin(4 \pi x) - sgn(x - 0.3) - sgn(0.72 - x). \nonumber $$
\end{itemize}
The functions are presented in Figure \ref{fig:dj}. In fact, the D-J functions have important features such as peaks, discontinuities, constant parts and oscillations to be captured by denoising data, representing most of the signals that occur in practice.
\begin{figure}
\caption{Donoho-Johnstone test functions used as underlying signals in the simulation studies.}
\label{fig:dj}
\end{figure}
For a particular test function, data were generated by adding exponential and lognormal noises to the function points according to two signal to noise ratio (SNR) values, SNR = $3$ and $9$ and two sample sizes, $n = 32$ and $64$. Each scenario of underlying function, SNR and sample size data generation was replicated $M = 100$ times and the averaged mean square error (AMSE) was calculated as performance measure, given by
$$\mathrm{AMSE} = \frac{1}{Mn}\sum_{m=1}^{M} \sum_{i=1}^{n}[{\hat f^{(m)}(x_i)} - f(x_i)]^2, \nonumber$$
where $\hat f^{(m)}(\cdot)$ is the estimate of the function at a particular point in the $m$-th replication, $m = 1, \cdots, M = 100$. For each replication, $L = 10,000$ samples of the posterior distributions \eqref{1postlog}, \eqref{1postbeta}, \eqref{2postlog} and \eqref{2postbeta} were obtained by RAM algorithm and the associated shrinkage rules were calculated by \eqref{rule}. The performances of the shrinkage rules under logistic (LOGISTIC) and beta (BETA) priors were compared against four extensively used shrinkage and thresholding methods, Universal thresholding (UNIV) proposed by Donoho and Johnstone (1994), Cross Validation (CV) proposed by Nason (1996), False Discovery Rate (FDR) proposed by Abramovich and Benjamini (1996) and Stein Unbiased Risk Estimator (SURE) proposed by Donoho and Johnstone (1995) .
\subsection{Simulation under exponential noise}
Table \ref{tab:amse1} shows the AMSEs of the shrinkage and thresholding rules under exponential noise simulated data. In fact, the proposed shrinkage rules had great performances in terms of AMSE in almost all the scenarios. The shrinkage rule under logistic prior was the best estimator for all the scenarios with sample size $n=32$ and for most of the times when $n=64$, being the best estimator in general. The shrinkage rule under beta prior was the best for Bumps function, SNR=3 and $n=64$ and Blocks, SNR=9 and also $n=64$. Even when beta shrinkage rule was not the best one, its performance was close to the logistic rule in general, being the second best estimator. Moreover, the proposed rules worked much better against the standard rules in some of the cases, for example, for Bumps function, SNR = 9 and $n=32$, the AMSEs of logistic and beta rules were respectively 0.787 and 1.140. The third best estimator in those scenarios was SURE, with AMSE = 6.287, almost 8 times the AMSE of logistic rule. Only for heavisine function and $n=64$ we did not have the proposed rules as the best ones, losing for UNIV and CV methods, but even in these cases, their performances were close to these ones. Finally, it should be noted the good behavior of the rules for low signal to noise ratio, i.e, for SNR=3, which is an evidence of good work for high noise datasets.
Figure \ref{fig:expest} presents the estimates obtained by the shrinkage rule under logistic prior for $n=64$ and SNR=9. The main features of each test function were captures by the estimates, such as spikes of Bumps, piecewise constant regions of Blocks, oscillations of Doppler and the discontinuity point of Heavisine function. Boxplots of the estimators MSEs are also provided in Figure \ref{fig:expbp} and showed low variation for the proposed shrinkage rules MSEs.
\begin{table}[H]
\scalefont{0.5}
\centering
\label{my-label}
\begin{tabular}{|c|c|c|c|c|||c|c|c|c|c|}
\hline
Signal & n & Method & SNR = 3 & SNR = 9 & Signal & n & Method & SNR = 3 & SNR = 9 \\ \hline
Bumps& 32 & UNIV& 18.721 & 2.882 & Blocks & 32 & UNIV &17.631 & 3.292 \\
& & CV &38.439 & 23.175 & & & CV &21.504 & 15.457 \\
& & FDR &31.603 & 12.530 & & & FDR &21.684 & 16.227 \\
& & SURE & 30.872 & 6.287 & & & SURE &21.841 & 16.211 \\
& & LOGISTIC & \textbf{7.069} & \textbf{0.787} & & & LOGISTIC & \textbf{5.960} & \textbf{0.748} \\
& & BETA &7.081 & 1.140 & & & BETA & 6.542 & 0.769 \\ \hline
& 64 & UNIV & 17.052 & 2.615 & & 64 & UNIV &18.002 & 3.211 \\
& & CV &28.317 & 9.140 & & & CV & 24.277 & 16.021 \\
& & FDR &20.496 & 4.562 & & & FDR & 21.586 & 7.864 \\
& & SURE & 12.325 & 1.718 & & & SURE & 24.728 & 8.419 \\
& & LOGISTIC &8.449 & \textbf{1.028} & & & LOGISTIC & \textbf{8.303} & 1.033 \\
& & BETA & \textbf{8.408} & 1.110 & & & BETA & 8.903 & \textbf{1.022} \\ \hline \hline
Doppler&32 & UNIV & 11.977 & 1.881 & Heavisine &32 & UNIV & 7.374 & 1.146 \\
& & CV & 12.795 & 3.573 & & & CV & 7.429 & 1.150 \\
& & FDR & 17.121 & 4.993 & & & FDR & 7.564 & 1.161 \\
& & SURE & 11.207 & 1.312 & & & SURE & 7.526 & 1.148 \\
& & LOGISTIC & \textbf{6.422} & \textbf{0.834} & & & LOGISTIC & \textbf{6.373} & \textbf{0.779} \\
& & BETA & 8.488 & 1.109 & & & BETA & 8.410 & 0.995 \\ \hline
&64 & UNIV & 11.845 & 2.098 & & 64& UNIV & \textbf{6.425} & 1.054 \\
& & CV & 12.566 & 3.556 & & & CV & 6.436 & \textbf{1.004} \\
& & FDR & 13.281 & 2.517 & & & FDR & 6.460 & 1.019 \\
& & SURE & 10.735 & 1.235 & & & SURE & 6.439 & 1.046 \\
& & LOGISTIC & \textbf{8.230} & \textbf{1.031} & & & LOGISTIC & 8.194 & 1.045 \\
& & BETA & 9.780 & 1.124 & & & BETA & 9.736 & 1.145 \\ \hline
\end{tabular}
\caption{AMSE of the shrinkage/thresholding rules in the simulation study for DJ-test functions under exponential noise.}\label{tab:amse1}
\end{table}
\begin{figure}
\caption{Estimates of the D-J test functions by the shrinkage rule under logistic prior in the simulation study for $n=64$, SNR = 9 and for simulated points under exponential noise.}
\label{fig:expest}
\end{figure}
\begin{figure}
\caption{Boxplots of the mean square errors (MSE) of the shrinkage and thresholding rules in the simulation study for $n=64$, SNR = 9 and for simulated points under exponential noise. The associated rules are: 1-UNIV, 2-CV, 3-FDR, 4-SURE, 5-LOGISTIC and 6-BETA.}
\label{fig:expbp}
\end{figure}
\subsection{Simulation under lognormal noise}
The obtained results for simulated data under lognormal noise are available in Table \ref{tab:amse2}. In general, the shrinkage rule under logistic prior had the best performance in terms of AMSE, beating the other estimators in practically all scenarios with SNR=9. The rule under beta prior also presented good performance, with AMSEs close to the logistic rule ones and being the best for Blocks function, $n=64$ and SNR=9. Further, the beta rule worked better than logistic one in scenarios with low signal to noise ratio, SNR=3.
Although logistic rule was the best in general, it should be observed that the behaviors of the standard rules under lognormal noise were better in general than the respective ones under exponential noise. For example, considering data with SNR=3, SURE was the best for Bumps and Doppler underlying functions, while UNIV was the best one for Blocks and Heavisine. Under exponential noise, these rules were dominated by the proposed estimators for these same functions and scenarios.
Figure \ref{fig:logest} shows the estimates of the D-J functions by the shrinkage rule under logistic prior, for $n=64$ and SNR=9. As occured in exponential noise context, the estimates captured well the main characteristics of the test functions. Boxplots of the MSEs are shown in Figure \ref{fig:logbp}, where it is possible to note low MSE variation for the proposed shrinkage rules.
\begin{table}[H]
\scalefont{0.5}
\centering
\label{my-label}
\begin{tabular}{|c|c|c|c|c|||c|c|c|c|c|}
\hline
Signal & n & Method & SNR = 3 & SNR = 9 & Signal & n & Method & SNR = 3 & SNR = 9 \\ \hline
Bumps&32 & UNIV & \textbf{16.940} & 3.787 & Blocks &32 & UNIV & \textbf{16.718} & 4.106 \\
& & CV & 33.612 & 23.744 & & & CV & 19.909 & 16.154 \\
& & FDR & 25.158 & 13.599 & & & FDR & 20.177 & 17.023 \\
& & SURE & 24.201 & 6.772 & & & SURE & 20.306 & 17.049 \\
& & LOGISTIC & 47.405 & \textbf{2.447} & & & LOGISTIC & 45.090 & \textbf{2.304} \\
& & BETA & 39.527 & 7.735 & & & BETA & 50.059 & 2.440 \\ \hline
&64 & UNIV & 14.688 & 3.535 & & 64& UNIV & \textbf{15.721} & 4.026 \\
& & CV & 20.985 & 9.886 & & & CV & 20.917 & 16.077 \\
& & FDR & 13.639 & 5.711 & & & FDR & 15.816 & 8.375 \\
& & SURE & \textbf{8.441} & 2.555 & & & SURE & 20.266 & 7.314 \\
& & LOGISTIC & 27.249 & \textbf{2.053} & & & LOGISTIC & 28.904 & 2.004 \\
& & BETA & 23.539 & 2.958 & & & BETA & 37.280 & \textbf{1.970} \\ \hline \hline
Doppler& 32 & UNIV & 9.562 & 2.596 &Heavisine &32 & UNIV & 5.926 & 2.006 \\
& & CV & 9.662 & 3.977 & & & CV & \textbf{6.071} & 2.001 \\
& & FDR & 14.961 & 4.901 & & & FDR & 6.175 & 2.032 \\
& & SURE & \textbf{7.603} & \textbf{1.984} & & & SURE & 7.037 & 2.020 \\
& & LOGISTIC & 30.643 & 2.064 & & & LOGISTIC & 29.421 & \textbf{1.903} \\
& & BETA & 15.774 & 3.358 & & & BETA & 10.235 & 1.926 \\ \hline
&64 & UNIV & 9.833 & 2.912 & &64 & UNIV & \textbf{4.520} & 1.935 \\
& & CV & 9.749 & 4.517 & & & CV & 4.647 & 1.895 \\
& & FDR & 9.180 & 3.508 & & & FDR & 4.888 & 1.926 \\
& & SURE & \textbf{7.641} & 2.113 & & & SURE & 5.463 & 1.940 \\
& & LOGISTIC & 22.200 & \textbf{1.818} & & & LOGISTIC & 20.448 & \textbf{1.687} \\
& & BETA & 18.927 & 2.140 & & & BETA & 14.719 & 2.076 \\ \hline
\end{tabular}
\caption{AMSE of the shrinkage/thresholding rules in the simulation study for DJ-test functions under lognormal noise.}\label{tab:amse2}
\end{table}
\begin{figure}
\caption{Estimates of the D-J test functions by the shrinkage rule under logistic prior in the simulation study for $n=64$, SNR = 9 and for simulated points under lognormal noise.}
\label{fig:logest}
\end{figure}
\begin{figure}
\caption{Boxplots of the mean square errors (MSE) of the shrinkage and thresholding rules in the simulation study for $n=64$, SNR = 9 and for simulated points under lognormal noise. The associated rules are: 1-UNIV, 2-CV, 3-FDR, 4-SURE, 5-LOGISTIC and 6-BETA.}
\label{fig:logbp}
\end{figure}
\section{Real data application}
Boston Marathon is one of the most important marathon of the world. It occurs yearly since 1897 with a trajectory of 42,195 Km between Hopkinton and Boston cities, at US Massachussetts state. As mentioned in the introduction, arrival times are classical examples of measurements contaminated by positive noise due possible delays of detection by instruments.
In this sense, we applied the proposed shrinkage rule with logistic prior under exponential noise assumption for denoising $n=64$ winning times (in minutes) of Boston Marathon Men's Open Division from 1953 to 2016. The data is publicly available at Boston Athletic Association (BAA) webpage \textit{https://www.baa.org/races/boston-marathon/results/champions}. We used a DWT with Daub10 basis and the prior hyperparameters were adopted according to \eqref{eq:alpha} and $\tau = 5$.
Figure \ref{fig:app1} shows original and denoised data by the shrinkage rule with logistic prior under exponential noise. As expected, the denoised winning times are less than or equal the measured ones, depending on the shrinkage level. Since the good precision of measured times for this competition, it was not necessary the application of a high shrinkage level rule. The empirical wavelet coefficients (represented by vertical bars) by resolution level and the differences between them and the estimated coefficients, $d - \hat{\theta}$, are shown in Figures \ref{fig:app2} (a) and (b) respectively. It is possible to note that, although residuals in original data are positive, which can be seen in Figure \ref{fig:app3} (a), their counterparts in the wavelet domain are not necessarily positive, i.e., there are estimated coefficients bigger than their respective empirical ones.
Finally, Figure \ref{fig:app3} (b) presents the histogram (with area equals to 1) of the residuals in time domain, i.e, $y - \hat{y}$, with a superposed exponential density curve, for $\hat{\lambda} = n/\sum_i (y_i - \hat{y}_i)=3.987$, the maximum likelihood estimate. In fact, the one sample Kolmogorov-Smirnov test for exponential distribution with $\lambda = 3.987$ of the residuals provided a p-value = 0.7057, not rejecting the null hypothesis under $5\%$ of significance level. Thus, the exponential noise assumption for these dataset seems to be reasonable.
\begin{figure}
\caption{Original and denoised winning times of Boston Marathon Men's Open Division between 1953-2016. Denoising was performed by the proposed shrinkage rule with logistic prior under exponential noise model.}
\label{fig:app1}
\end{figure}
\begin{figure}
\caption{Empirical coefficients by resolution level (a) and differences between empirical and estimated wavelet coefficients (b) of winning times of Boston Marathons dataset. Denoising obtained by application of the shrinkage rule with logistic prior under exponential noise.}
\label{lognormal}
\label{blocls}
\label{fig:app2}
\end{figure}
\begin{figure}
\caption{Differences between observed and denoised data (residuals) (a) and histogram of residuals with exponential density curve ($\hat{\lambda}
\label{lognormal}
\label{blocls}
\label{fig:app3}
\end{figure}
\section{Final considerations}
We proposed bayesian wavelet shrinkage rules to estimate wavelet coefficients under nonparametric models with exponential and lognormal additive noise. The adopted priors to the wavelet coefficients were mixtures of a point mass function at zero with logistic and beta distributions. Under the standard gaussian noise assumption, the distribution is preserved on wavelet domain, i.e, the noises after discrete wavelet transform application on original data remain iid gaussian, which allow estimation process coefficient by coefficient. Under positive noise model, this feature is lost. Noises on wavelet domain are not necessarily positive and are correlated. The main impact is that shrinkage is performed on the empirical coefficients vector, which required the application of a robust adaptive MCMC algorithm to calculate posterior expectations, once these are the shrinkage rule under quadratic loss assumption.
The performances of the proposed shrinkage rules in terms of averaged mean square error (AMSE) were better than standard shrinkage and thresholding techniques in most of the scenarios of the simulation studies. Although the rules are more expensive computationally than the classical methods, their performances in simulation studies can indicate them as promissing shrinkage rules for denoising contaminated data with positive noise.
The behaviour of the shrinkage rules for other positive support distributed noises and the impact of the wavelet basis choice for performing DWT are suggested as future important questions to be studied in future works.
\end{document} |
\begin{document}
\renewcommand{(\roman{enumi})}{(\roman{enumi})}
\renewcommand{(\roman{enumi})}{(\roman{enumi})}
\title{Asymptotic counting of minimal surfaces in hyperbolic $3$-manifolds}
\begin{flushleft}
\advance\leftskip .3\textwidth
\it To Srishti Dhar Chatterji, \\
who attended séminaire Bourbaki for most of half a century.
\end{flushleft}
\tableofcontents
\section*{Introduction}
The study of the geodesic flow in closed negatively curved manifolds is a beautiful mix of topology, Riemannian geometry, geometric group theory and ergodic theory. We know in this situation that closed geodesics are in one-to-one correspondence with conjugacy classes of elements of the fundamental group, or equivalently, with the set of homotopy classes of maps of circles in the manifold. Even though closed geodesics are infinite in number, we have a good grasp ---thanks to the notion of {\em topological entropy}--- of how the number of these geodesics grows with respect to the length. We also have a computation of this topological entropy in hyperbolic spaces by \textcite{Bowen:1972} and \textcite{Margulis:1969ve} and rigidity results for this entropy by \textcite{Besson:1995um} and \textcite{Hamenstadt:1990tx}.
While the statements of this first series of results seem to deal only with closed geodesics, the foliation of the unit tangent bundle by orbits of the geodesic flow plays a fundamental role. The study of invariant measures by the geodesic flow is a crucial tool, and the equidistribution of closed geodesics by \textcite{Bowen:1972} and \textcite{Margulis:1969ve} for hyperbolic manifolds a central result. We refer to section~\ref{sec:par} for more precise definitions, results and references.
For many reasons ---as we discuss in section~\ref{sec:tg}--- closed totally geodesic submanifolds of dimension at least $2$ are quite rare. However, in constant curvature, the foliation of the Grassmannian of $k$-planes coming from totally geodesic planes is a natural generalization of the geodesic flow and several crucial results of \textcite{Ratner:1991wl,Ratner:1991tu,Shah:1991wr} as well as \textcite{McMullen:2017ub} describe closed invariant sets and invariant measures. This foliation stops to make sense in variable curvature, at least far away from the constant curvature situation, although for metrics close to hyperbolic ones, a result by \textcite{Gromov:1991uv} ---see also \cite{Lowe:2020vu}--- shows that the foliation of the Grassmann bundle persists when one replaces totally geodesic submanifolds by minimal ones.
If we move in the topological direction, going from circles to surfaces, Kahn--Markovi\'c Surface Subgroup Theorem (\cite{Kahn:2009wh}) provides the existence of many surface subgroups in the fundamental group of a hyperbolizable $3$-manifold $M$. A subsequent result of \textcite{Kahn:2010uo} gives an asymptotic of the number of these surface groups with respect to the genus ---see Theorem~\ref{theo:KMcount}.
However this asymptotic counting does not involve the underlying Riemannian geometry as opposed to the topological entropy that we discussed in the first paragraph. The next step is to use fundamental results of \textcite{Schoen:1979} and \textcite{Sacks:1982}, which tells us that every such surface group can be realized by a minimal surface ---although non necessarily uniquely.
In \textcite{Calegari:2020uo}, the authors propose a novel idea: count asymptotically with respect to the area these minimal surfaces, but when the boundary at infinity of those minimal surfaces becomes more and more circular, or more precisely are $K$-quasicircles, with $K$ approaching~$1$. The precise definition of this counting requires the description of quasi-Fuchsian groups and their boundary at infinity, done in section~\ref{par:qf}, and their main result (Theorem~\ref{theo:main}) is presented in section~\ref{sec:last}. These results define an entropy-like constant $E(M,h)$ for minimal surfaces in a Riemannian manifold $(M,h)$ of curvature less than $-1$. The main result of \textcite{Calegari:2020uo} is to compute it for hyperbolic manifolds, gives bounds in the general case and most notably proves a rigidity result: $E(M,h)=2$ if and only if $h$ is hyperbolic. Altogether, these results mirror those for closed geodesics.
When one moves to studying solution of elliptic partial differential equations, for instance minimal surfaces or pseudo-holomophic curves, the situation is different from the chaotic behavior of the geodesic flow. While there is a huge literature about moduli spaces of solutions when one imposes constraints such as homology classes, we do not have that many results describing a moduli space of all solutions: possibly immersed with dense images, in other words to continue the process for minimal surfaces described in the introduction of \textcite{Gromov:1991uv} for geodesics: {\em if one wishes to understand closed geodesics not as individuals but as members of a community one has to look at all (not only closed) geodesics in X which form an $1$-dimensional foliation of the projectivized tangent bundle.}
The presentation of these notes shifts around the ideas used in \textcite{Calegari:2020uo} and follows more directly the philosophy introduced in
\textcite{Gromov:1991uv}. We focus on the construction of such a moduli space
---that we call the {\em phase space of stable minimal surfaces}--- and its
topological properties ---see section~\ref{par:phasespace} and
Theorem~\ref{theo:phasespace}. These properties are a rephrasing of
Theorem~\ref{coro:KK} about quasi-isometric properties of stable minimal
surfaces, relying on results of \textcite{Seppi:2016ut} and a ``Morse type Lemma'' argument by \textcite[Theorem 3.1]{Calegari:2020uo}. This space is the analogue, in our situation, of the geodesic flow and the $\mathbb R$-action is replaced by an $\mathsf{SL}_2(\mathbb R)$-action.
Then we move to studying $\mathsf{SL}_2(\mathbb R)$-invariant measures on this phase space and show they are related to what we call {\em laminar currents} which are the analogues in our situation of geodesic currents ---see \textcite{Bonahon:1997tl}. The main result is now an equidistribution result in this situation: Theorem~\ref{theo:equi}. This theorem follows from the techniques of the proof of Surface Subgroup Theorem using the presentation given in \textcite{Kahn:2018wx}.
This Equidistribution Theorem and the construction of the phase space allows, by comparing the counting with respect to area and the genus ---as in \cite{Kahn:2010uo}--- to proceed quickly to the proof of the results of \textcite{Calegari:2020uo} when, for the rigidity result, we assume that $h$ is close enough to a hyperbolic metric.
The whole article of \textcite{Calegari:2020uo} mixes beautiful ideas from many subjects, adding to the mix of topology, Riemannian geometry, geometric group theory and ergodic theory used in the study of the geodesic flow, a pinch of geometric analysis. The approach given in these notes is not just to present the proof but also to take the opportunity to tour some of the fundamental results in these various mathematics\footnote{The introduction of \textcite{Calegari:2020uo} also addresses minimal hypersurfaces in higher dimension that we do not discuss here}. We take some leisurely approach and explain some of the main results and take the time to give a few simple proofs and elementary discussions: the clever proof of Thurston showing that there are only finitely many surface groups of a given genus in the fundamental group of a hyperbolic manifold, the discussion of stable minimal surfaces, the geometric analysis trick that derives from a rigidity result (here the characterization of the plane as the unique stable minimal surface in $\mathbb R^3$) some compactness results (proposition~\ref{pro:F-Sch}).
During the preparation of these notes, I benefited from the help of many colleagues, as well as the insight of the authors. I want to thank them here for their crucial input: Dick Canary, Thomas Delzant, Olivier Guichard, Fanny Kassel, Shahar Mozes, Hee Oh, Pierre Pansu, Andrea Seppi, Jérémy Toulisse and Mike Wolf.
\section{Counting geodesics and equidistribution}\label{sec:par}
When $(M,h)$ is a negatively curved manifold, there is a one-to-one correspondence between conjugacy classes of elements of $\pi_1(M)$ and closed geodesics. Even though there are infinitely many closed geodesics, we can count them ``asymptotically''. Equivalently, this will give an asymptotic count of the conjugacy classes of elements of $\pi_1(M)$, or to start a point of view that we shall pursue later, the set of free homotopy classes of maps of $S^1$ in $M$.
We review here some important results that will be useful in our discussion and serve as a motivation.
\subsection{Entropy and asymptotic counting of geodesics} Let $(M,h)$ be a closed manifold of negative curvature. Fixing a positive constant $T$, there are only finitely many closed geodesics of length less than $T$. Let us define
$$
\mathsf Gamma_h(T)\coloneqq\{\hbox{geodesic }\gamma\mid \operatorname{length}(\gamma)\leqslant T\}\ .
$$
The following limit, when it is defined,
$$
{\rm h}_{top}(M,h)\coloneqq \lim_{t\to\infty}\frac{1}{T}\log\left(\sharp \mathsf Gamma_h(T) \right)\ ,
$$
is called the {\em topological entropy of $M$}. We will see it is always defined in negative curvature. It measures the exponential growth of the number of geodesics with respect to the length. The topological entropy is related to the {\em volume entropy of $M$} defined by
$$
{\rm h}_{vol}(M,h)=\liminf_{t\to\infty}\frac{1}{R}\log\bigl(\operatorname{Vol}(B(x,R)) \bigr)\ ,
$$
where $B(x,R)$ is the ball of radius $R$ in the universal cover $\tilde M$ of $M$, $x$ any point in $\tilde M$. The volume entropy does not depend on the choice of the point $x$ and we have
\begin{theo}\label{theo:entropy} Let $(M,h)$ be a closed negatively curved manifold.
\begin{enumerate}
\item The topological ${\rm h}_{top}(M,h)$ is well-defined. When $h_0$ is hyperbolic\footnote{that is when the curvature is constant and equal to $-1$}, $${\rm h}_{top}(M,h_0)={\rm d}im(M)-1\ .$$
\item We have
$$
{\rm h}_{top}(M,h)={\rm h}_{vol}(M,h)=\lim_{R\to\infty}\frac{\log\left(\sharp\{\gamma\in\pi_1(M)\mid d_M(\gamma.x,x)\leqslant R\}\right)}{R}\ .
$$
\end{enumerate}
\end{theo}
The first item is a celebrated result by \textcite{Bowen:1972} and \textcite{Margulis:1969ve}. The second item is due to \textcite{Manning:1979vk}.
\subsubsection{Rigidity of the entropy}
We have several rigidity theorems for the entropy.
First in the presence of an upper bound on the curvature, a metric on closed manifold has curvature less than $-1$, then $$
{\rm h}_{vol}(M,h)\geqslant {\rm d}im(M)-1\ ,
$$
with equality if and only if $h$ is hyperbolic.
For deeper results in the presence of upper bounds on the curvature, see \textcite{Pansu:1989ug} and \textcite{Hamenstadt:1990tx}.
As a special case of \textcite{Besson:1995um}, we have, when we drop the condition on the curvature
\begin{theo}
Let $(M,h_0)$ be a hyperbolic manifold of dimension $m$ and $h$ another metric on $M$, then
$$
{\rm h}_{vol}(M,h)^m\operatorname{Vol}(M,h)\geqslant {\rm h}_{vol}(M,h_0)^m\operatorname{Vol}(M,h_0)\ .
$$
The equality implies that $h$ has constant curvature.
\end{theo}
In this expos\'e, we will only use the case of $m=2$, which is due to \textcite{Katok:1982uv}.
\subsection{Equidistribution} This asymptotic counting has a counterpart
called {\em equidistribution}. Let us first recall that geodesics are
solutions of some second order differential equation, and we may as well
consider non closed geodesics in the Riemannian manifold $M$. Let us consider the {\em phase space} $\mathcal G$ of this equation as the space of maps $\gamma$ from $\mathbb R$ to $M$, where $\gamma$ is an arc length parametrized solution of the equation. The precomposition by translation gives a right action by $\mathbb R$, and thus $\mathcal G$ is partitioned into {\em leaves} which are orbits of the right action of $\mathbb R$. The space $\mathcal G$ canonically identifies with the unit tangent bundle~$\mathsf U M$ by the map $\gamma\mapsto(\gamma(0),{\rm d}ot\gamma(0))$, and the above $\mathbb R$-action corresponds to the action of the {\em geodesic flow}.
We may thus associate to each closed orbit~$\gamma$ of length~$\ell$ a unique probability measure~${\rm d}elta_\gamma$ on $\mathcal G=\mathsf U M$ supported on $\gamma$, $\mathbb R$-invariant and so that for any function on~$\mathsf U M$
$$
\int_{\mathsf U M} f {\rm d}{\rm d}elta_\gamma\coloneqq\frac{1}{\ell}\int_0^\ell f(\gamma(s)){\rm d}s\ .
$$
When $M$ is hyperbolic, another natural and $\mathbb R$-invariant probability measure comes from the left invariant $\mu_{Leb}$ measure (under the group of isometries) in the universal cover.
The next result is intimately related to Theorem~\ref{theo:entropy} and also due to \textcite{Bowen:1972} and \textcite{Margulis:1969ve}.
\begin{theo}\label{theo:equi} Assume $(M,h_0)$ is hyperbolic, then
$$ \lim_{T\to\infty} \frac{1}{\sharp \mathsf Gamma_{h_0}(T)}\sum_{\gamma\in \mathsf Gamma_{h_0}(T)}{\rm d}elta_\gamma=\mu_{{Leb}}\ .$$
\end{theo}
\section{Totally geodesic submanifolds of higher dimension}\label{sec:tg}
As a first attempt of generalization, it is quite tempting to understand what happens to {\em totally geodesic} submanifolds of higher dimension, where by totally geodesic we mean complete and such that any geodesic in the submanifold is a geodesic for the ambient manifold.
\subsection{Closed totally geodesic submanifolds are rare}
One easily constructs by arithmetic means hyperbolic manifolds with infinitely many closed totally geodesic submanifolds, however this situation is exceptional and we have, as a special case of a beautiful recent theorem by \textcite{Margulis:2020wp} -- generalized in \textcite{Bader:2019wu}:
\begin{theo}
If a closed hyperbolic 3-manifold $M$ contains infinitely many closed totally geodesic subspaces of dimension at least $2$, then $M$ is arithmetic.
\end{theo}
Thus, having infinitely many closed totally geodesic submanifolds is quite rare for hyperbolic $3$-manifolds, and an asymptotic counting as we defined for geodesics does not yield interesting results in general -- see however \textcite{Jung:2019tq} for a result in the arithmetic case and a general upper bound in \textcite[Corollary 1.12]{Mohammadi:2020wi}.
\subsection{The set of pointed totally geodesic spaces and Shah's Theorem}\label{par:Ratner}
Let $(M,h)$ be an oriented Riemannian $3$-manifold and $G(M)$ be the bundle over $M$ whose fiber at a point $x$ is the set of oriented $2$-planes in the tangent space at $x$. Every surface $S$ in $M$ then has a {\em Gauß lift} $G(S)$ in $G(M)$ which consists of the set of tangent spaces to~$S$.
\subsubsection{Totally geodesic hyperbolic planes, the frame bundle and the $\mathsf{PSL}_2(\mathbb R)$-action}
When $h$ is hyperbolic, the space $G(M)$ has a natural foliation $\mathcal F$ whose leaves are Gauß lifts of immersed totally geodesic hyperbolic planes. We can thus interpret $G(M)$ as the space of pointed totally geodesic planes, or equivalently as the set of (local) totally geodesic embeddings of ${\bf H}^2$ into $M$, equipped with the right action by precomposition of~$\mathsf{PSL}_2(\mathbb R)$.
Let $F_{h_0}(M)$ be the {\em frame bundle} over $M$ whose fiber at $x$ is the set of oriented orthonormal frames in the tangent space of $x$. We have a natural fibration
$$
S^1\to F_{h_0}(M)\to G(M)\ .
$$
The choice of a frame at a point $x$ in ${\bf H}^2h$ identifies $\operatorname{Isom}({\bf H}^2h)$, the group of orientation preserving isometries of ${\bf H}^2h$, with $\mathsf{PSL}(2,\mathbb C)$. Thus $F_{h_0}({\bf H}^2h)$ is interpreted as the space of isomorphisms of $\operatorname{Isom}({\bf H}^2h)$ with $\mathsf{PSL}(2,\mathbb C)$ and as such carries commuting actions of $\operatorname{Isom}({\bf H}^2h)$ on the left by postcomposition and $\mathsf{PSL}(2,\mathbb C)$ on the right by precomposition. Then the foliation of $F_{h_0}({\bf H}^2h)$ by the orbits of the right action of the subgroup $\mathsf{PSL}_2(\mathbb R)$ of $\mathsf{PSL}(2,\mathbb C)$, projects to the foliation $\mathcal F$ of $G({\bf H}^2h)$ that we just described, with the corresponding action of $\mathsf{PSL}_2(\mathbb R)$.
\subsubsection{Shah and Ratner's Theorem}
From this interpretation of the $\mathsf{PSL}_2(\mathbb R)$-action on $G(M)$, a theorem by \textcite{Shah:1991wr}, which is now also a consequence of the celebrated theorem by \textcite{Ratner:1991wl}, gives:
\begin{theo}\label{theo:Ratner}
Let $(M,h_0)$ be a closed hyperbolic $3$-manifold. Then any orbit of the $\mathsf{PSL}_2(\mathbb R)$-action on $F_{h_0}(M)$ is either dense or closed. Moreover any closed set invariant by $\mathsf{PSL}_2(\mathbb R)$ in $F_{h_0}(M)$ is either everything or a finite union of closed orbits.
\end{theo}
Recent work by \textcite[Theorem 11.1]
{McMullen:2017ub} give results in the non closed case, see also~\textcite{Tholozan:2019uw}.
We state this result as part of our promenade in the subject and will not use it in the proof, as opposed to \textcite{Calegari:2020uo}. However, we will use as special case of the measure classification theorem of \textcite{Ratner:1991tu}.
\begin{theo}\label{theo:Ratner2}
Let $(M,h_0)$ be a closed hyperbolic $3$-manifold. Then any ergodic $\mathsf{SL}_2(\mathbb R)$-invariant measure on $F_{h_0}(M)$ is $\mu_{Leb}$ or is supported on a closed leaf in $F_{h_0}(M)$.
\end{theo}
In particular, there are only countably many ergodic $\mathsf{SL}_2(\mathbb R)$-invariant measures. For a short and accessible proof of Ratner's theorem in the context of $\mathsf{SL}_2(\mathbb R)$-action see \textcite{Einsiedler:2006wz}.
We only need the following corollary that may have a direct proof.
\begin{coro}\label{coro:Ratner3}
Let $\mu$ be an $\mathsf{SL}_2(\mathbb R)$-invariant probability measure on $F_{h_0}(M)$. Let~$p$ be the projection of $F_{h_0}(M)$ to $M$. Assume that $p_*\mu$ is \textup{(}up to a constant\textup{)} the volume form on $M$, then $\mu=\mu_{Leb}$.
\end{coro}
\subsection{In variable curvature} For a generic metric, there are no totally geodesic surfaces, not even locally. Thus, even non closed totally geodesic surfaces are rare in variable curvature. We actually explain now a more precise result of \textcite{Calegari:2020uo} which implies that if a negatively curved manifold has too many hyperbolic planes, then it is hyperbolic.
We start by discussing briefly the boundary at infinity of negatively curved manifolds.
\subsubsection{The boundary at infinity and circles}\label{par:circ}
In the Poincaré ball model, ${\bf H}^2h$ is the interior of a ball. The boundary of the ball is denoted $\partial_\infty {\bf H}^2h$ and carries an action of the isometry group $\operatorname{Isom}({\bf H}^2h)$ of ${\bf H}^2h$, which is isomorphic to $\mathsf{PSL}(2,\mathbb C)$. Under this isomorphism, $\partial_\infty{\bf H}^2h$ identifies as a homogeneous space with $\mathbf{CP}^1$.
Observe that the choice of real plane $P$ in $\mathbb C^2$ defines a {\em circle} in $\mathbf{CP}^1$ which is the set of complex lines intersecting non trivially the plane $P$. In the ball model of ${\bf H}^2h$, these circles are boundaries of hyperbolic planes totally geodesically embedded in ${\bf H}^2h$ .
The boundary at infinity $\partial_\infty{\bf H}^2h$ has an intrinsic definition as the set of equivalence classes of oriented geodesics which are {\em parallel at infinity}, where the equivalence is defined as
$$
\gamma_1\sim\gamma_2\ \ \hbox{ if and only }\ \ \ \limsup_{t\to\infty}d(\gamma_1(t),\gamma_2(t))<\infty\ .$$
This notion also makes sense in the case of a nonpositively curved simply connected manifold $\tilde M$ and allow us to define the notion of
the boundary at infinity $\partial_\infty\tilde M$.
We recall here briefly that $\tilde M\sqcup\partial_\infty\tilde M$ admits a topology and becomes so a compactification of $\tilde M$ as a closed ball. Moreover, if $\tilde M$ is the universal cover of a closed manifold that admits a hyperbolic metric, we have an identification of $\partial_\infty\tilde M$ with $\partial_\infty{\bf H}^2h$.
If $M$ has dimension at least 3, by Mostow rigidity, the above identification of $\partial_\infty\tilde M$ with $\mathbf{CP}^1$ is unique up to the action of $\mathsf{PSL}(2,\mathbb C)$ and thus circles make perfect sense in~$\partial_\infty \tilde M$.
\subsubsection{A characterization of hyperbolic metrics}
\begin{prop}\label{pro:manyhyp}
Assume that the curvature of the closed $3$-manifold $M$ is less than $-1$. Assume that every circle at infinity bounds a totally geodesic hyperbolic surface. Then $M$ is hyperbolic.
\end{prop}
\begin{proof}[Sketch of a proof] Since the curvature is negative, a circle at
infinity cannot bound more than one totally geodesic surface. Let $\mathcal
T$ be the space of
triples of pairwise distinct elements
in $\partial_\infty \mathbf{H}^3h$. Every such triple $\tau$ defines a unique circle $\gamma$ and thus a unique totally geodesic oriented hyperbolic plane $\mathbf H$ in $\tilde M$, the universal cover of $M$. Since $\gamma$ is the boundary at infinity of $\mathbf H$, a triple of points $\tau=(a,b,c)$ define a frame $(x,u,v)$ where $x$ is a point in $\mathbf H$ and $(u,v)$ an orthonormal basis of $\mathsf T_x\mathbf H$ by the following procedure: $x$ is on the geodesic joining $a$ to $c$, $u$ is the vector tangent at $x$ in the direction of $b$ and $v$ the tangent vector in the direction of $c$. Let us define
$$
\beta(\tau)=(x,u,v,n)\ ,\hbox{ such that $(u,v,n)$ is an oriented orthonormal basis of $\mathsf T_x\tilde M$}\ .
$$
Then $\beta$ is $\pi_1(M)$-equivariant from $F_{h_0}({\bf H}^2h)$ to $F_{h}(M)$. One can show that $\beta$~has degree~$1$ and is surjective. It follows in particular that there is a totally geodesic hyperbolic plane through any tangent plane in~$M$. Thus $M$~has constant curvature~$-1$.
\end{proof}
\section{Surface subgroups in hyperbolic $3$-manifolds}\label{par:qf}
Let us move to topological questions. The natural question is, after we have spent some time in the first sections studying homotopy classes of circles in negatively curved manifolds, to understand conjugacy classes of fundamental groups of surfaces in $3$-manifolds. We will spend some time recalling classical facts about quasi-Fuchsian surface groups, the surface subgroup theorem by Kahn and Markovi\'c, and finally asymptotic counting of those surface subgroups by the genus.
To be explicit, a {\em surface group} is the fundamental group of a compact
connected orientable surface of genus greater than or equal to~$2$.
Any such group can be represented as a {\em Fuchsian subgroup}, that is a discrete cocompact subgroup of the isometry group $\mathsf{PSL}_2(\mathbb R)$ of the hyperbolic plane ${\bf H}^2$.
We concentrate our discussion first on discrete surface subgroups of $\operatorname{Isom}({\bf H}^2h)$.
\subsection{Quasi-Fuchsian groups and quasicircles} Let $S$ be a closed
connected oriented surface $S$ of genus greater than $2$. Let $\rho_0$ be a faithful representation of the fundamental group $\pi_1(S)$ in $\operatorname{Isom}({\bf H}^2)$ whose image is a cocompact lattice $\mathsf Gamma$. Seeing ${\bf H}^2$ sitting as a geodesic plane in ${\bf H}^2h$, gives rise to an embedding of $\operatorname{Isom}({\bf H}^2)$ in $\operatorname{Isom}({\bf H}^2h)$. The corresponding morphism of $\mathsf Gamma<\operatorname{Isom}({\bf H}^2)$ in $\operatorname{Isom}({\bf H}^2h)$ is called a {\em Fuchsian representation} and its image a {\em Fuchsian group}\footnote{we warn the reader our definitions are slightly non standard here}.
We saw in paragraph~\ref{par:circ} that such a Fuchsian group preserves a circle in $\partial_\infty{\bf H}^2h$. This motivates the following definitions.
\begin{defi}
\begin{enumerate}
\item a {\em quasi-Fuchsian representation} is a morphism $\rho$ from a cocompact lattice $\mathsf Gamma$ of $\operatorname{Isom}({\bf H}^2)$ in $\operatorname{Isom}({\bf H}^2h)$, such that there exists a continuous injective map $\Lambda$ from $\partial_\infty{\bf H}^2$ to $\partial_\infty{\bf H}^2h$ which is $\rho$-equivariant.
\item The map $\Lambda$ is called the {\em limit map} of the quasi-Fuchsian morphism.
\item a {\em quasi-Fuchsian group} is the image of a quasi-Fuchsian representation.
\item a {\em quasi-Fuchsian manifold} is the quotient of the hyperbolic space by a quasi-Fuchsian group.
\item The {\em limit set $\partial_\infty\mathsf Gamma$} of a quasi-Fuchsian\ group $\mathsf Gamma$ is the image of its limit map.
\end{enumerate}
\end{defi}
In this definition, observe that the choice of a quasi-Fuchsian representation
depends on the choice of a realization of the surface group as a lattice in $\mathsf{PSL}_2(\mathbb R)$. Similarly, the limit map of a quasi-Fuchsian\ representation depends on the choice of a lattice in $\mathsf{PSL}_2(\mathbb R)$, while $\partial_\infty\mathsf Gamma$ only depends on the quasi-Fuchsian\ group $\mathsf Gamma$.
Quasi-Fuchsian manifolds are not compact: they are homeomorphic to $S\times \mathbb R$, if the quasi-Fuchsian group is isomorphic to $\pi_1(S)$. However, they keep some cocompactness feature:
\begin{prop}
Let $\partial_\infty\mathsf Gamma$ be the limit set of a quasi-Fuchsian surface group $\mathsf Gamma$ and $\operatorname{Env}(\partial_\infty\mathsf Gamma)$ be the convex hull \textup{(}for instance in the projective Klein model\textup{)} of $\partial_\infty\mathsf Gamma$ in ${\bf H}^2h$. Then
\begin{enumerate}
\item the distance to $\operatorname{Env}(\partial_\infty\mathsf Gamma)$ is convex\footnote{A function is {\em convex} its restriction to any geodesic is convex. In nonpositive curvature, any distance to a convex set is convex},
\item the group $\mathsf Gamma$ acts cocompactly on $\operatorname{Env}(\partial_\infty\mathsf Gamma)$.
\end{enumerate}
The quotient $\operatorname{Env}(\partial_\infty\mathsf Gamma)/\mathsf Gamma$ is called the {\em convex core} of the quasi-Fuchsian manifold associated to $\mathsf Gamma$.
\end{prop}
Quasi-Fuchsian groups are plentiful, and in particular
\begin{prop}
Any small deformation of a Fuchsian group is quasi-Fuchsian.
\end{prop}
The limit map of a quasi-Fuchsian group has many remarkable properties:
\begin{prop}
Given the limit map $\Lambda$ of a quasi-Fuchsian group, there exists a constant $K$, such that for any quadruple of pairwise distinct points $(x,y,z,t)$
in $\partial_\infty{\bf H}^2\simeq\bf{RP}^1$, then
\begin{equation}
\bigl\vert [\Lambda(x),\Lambda(y),\Lambda(z),\Lambda(w)]\bigr\vert\leqslant K \bigl\vert [x,y,z,w]\bigr\vert\ , \label{def:Kquasi}
\end{equation}
where $[a,b,c,d]$ denotes the cross-ratio of the quadruple $(a,b,c,d)$ in either~$\bf{RP}^1$ or~$\bf{CP}^1$.
\end{prop}
More generally a map $\Lambda$ from
$\bf{RP}^1$ to $\bf{CP}^1$ is called {\em $K$-quasisymmetric} if it satisfies inequality~\eqref{def:Kquasi}. The image of a $K$-quasisymmetric map is called a {\em $K$-quasicircle}.
The above proposition can be strengthened as
\begin{prop}
The limit map of any quasi-Fuchsian representation is $K$-quasisymmetric for some $K$. If $K=1$, then the group is actually Fuchsian.
\end{prop}
Accordingly, a surface group is {\em $K$-quasi-Fuchsian} if it admits a $K$-quasisymmetric limit map. The constant $K$ gives a feeling of how far a quasi-Fuchsian\ group is from being Fuchsian.
Not all discrete surface groups in $\operatorname{Isom}({\bf H}^2h)$ are quasi-Fuchsian. We shall see an example of that in the next paragraph.
\subsection{Surface subgroups in fundamental groups of closed hyperbolic $3$-manifolds}
Solving a crucial conjecture
of Thurston, Kahn and Markovi\'c proved that fundamental groups of closed hyperbolic $3$-manifolds contain surface groups. The amazing proof, in \textcite{Kahn:2009wh}, uses mixing and equidistribution of the geodesic flow and we shall have to extract further information from it.
Kahn--Markovi{\'c} surface subgroup theorem states the existence of many surface groups which are ``more and more'' Fuchsian in some precise way
\begin{theo}{\sc[Kahn--Markovi{\'c} surface subgroup theorem]} Let $M$ be a closed hyperbolic $3$-manifold. Let $\varepsilon$ be any positive constant. Then there exists a quasi-Fuchsian subgroup in $\pi_1(M)$ whose limit map is a $(1+\varepsilon)$-quasicircle.
\end{theo}
This result was explained in a Bourbaki exposé by \textcite{Bergeron2013}. The quantitative part of the result plays a crucial role in the proof of Agol's Virtual Haken Theorem by \textcite{Agol:2013ts} stating that any hyperbolic $3$-manifold has a finite covering which is a surface bundle over the circle. Quite interestingly, in those manifolds fibering over the circle the fundamental group of the fiber is not quasi-Fuchsian.
To add a little perspective that will come up later, recall that surface groups and fundamental groups of hyperbolic manifolds are prototypes of {\em Gromov-hyperbolic groups}. Gromov has broadened Thurston's conjecture in the following question.
\begin{question}
Does any one-ended Gromov-hyperbolic group contain a surface group?
\end{question}
\subsection{Counting surface subgroups}
A classical theorem in geometric group theory says
\begin{theo}\label{theo:finitely}
A Gromov-hyperbolic group contains only finitely many conjugacy classes of surface groups of a given genus.
\end{theo}
This result is suggested in \textcite{Gromov:1987tk}, a proof and a generalization is given in \textcite{Delzant:1995un}.
The special case of the fundamental group of a hyperbolic $3$-manifold is due to \textcite{Thurston:1997ux} and his beautifully simple proof works in general for fundamental groups of negatively curved manifolds. We prove it in theorem~\ref{theo:thurs} after our discussion of minimal surfaces.
For a hyperbolic manifold $M$, let $S(M,g)$ be the number of conjugacy classes of surface subgroups in $\pi_1(M)$ of genus $g$. Thurston already gave a crude estimate of an upper bound for $S(M,g)$, later on improved by \textcite{Masters:2005vi} and \textcite{Soma:1991tr}. A crucial improvement of this count is made in \textcite{Kahn:2010uo}.
\begin{theo}\label{theo:KMcount}
Let $M$ be a hyperbolic $3$-manifold, then there exist constants~$c_1$ and~$c_2$ so that for~$g$ large enough
\begin{equation}
(c_1 g)^{2g}\leqslant S(M,g)\leqslant (c_2g)^{2g}\ ,
\end{equation}
where $c_2$ only depends on the injectivity radius of $M$.
\end{theo}
The previous upper bound by Masters was of the form $g^{c_2g}$. To get the lower bound, it is actually enough ---and distressing--- to have the existence of one surface group, and counts its covers ---see proposition~\ref{pro:puchta}. However, Kahn and Markovi\'c also have the same estimates in the harder case when one counts commensurability classes.
We deduce,
\begin{coro}\label{cor:KMcount}
\begin{equation}
\lim_{g\to\infty}\frac{\log(S(M,g))}{2g\log(g)}=1\ .
\end{equation}
\end{coro}
Kahn and Markovi\'c conjecture a more precise asymptotic\footnote{also for commensurability classes}:
\begin{conj}
Let $M$ be a hyperbolic $3$-manifold, then there exists a constant $c(M)$ only depending on $M$, so that
$$
\lim_{g\to\infty}\frac{1}{g}\left(S(M,g)\right)^{\frac{1}{2g}}=c(M)\ .
$$
\end{conj}
Observe that this counting is purely topological, the results do not make any reference to the underlying Riemannian structure of the manifold.
\vskip 0.2 truecm
To summarize this discussion, according to \textcite{Kahn:2009wh}, there are many surface groups in $\pi_1(M)$ and we have a purely topological asymptotic of the growth of the numbers of those when the genus goes to infinity. Calegari--Marques--Neves article also addresses the question of counting those subgroups but with a geometric twist. Before explaining their result, let us review some fundamental results on minimal surfaces.
\section{Minimal surfaces in $3$-manifolds}
In order to recover the flexibility that we lost when considering closed totally geodesic submanifolds, let us now introduce {\em minimal immersions} which will allow us to extend our discussion about geodesics. We first spend some time recalling some basic properties and definition of minimal immersions, before actually addressing the question of counting surface subgroups.
\subsection{Minimal immersions}
Let $(M,g)$ be a Riemannian manifold. We denote in general by ${\rm d}\operatorname{vol}(h)$, the volume density of a metric $h$. An immersion $f$ from a compact manifold $N$ into $M$ is a {\em minimal immersion} if $f$ is a critical point for the {\em volume functional}
$$
\operatorname{Vol}(f)\coloneqq \int_N \ {\rm d}\operatorname{vol}{f^*g}\ .
$$
More precisely, this means that for any family of smooth deformations $\{f_t\}_{t\in]
-1,1[}$ with $f_0=f$ we have
\begin{equation}
\left.\frac{{\rm d}}{{\rm d}t}\right\vert_{t=0} \operatorname{Vol}(f_t)=0\ .\label{eq:defmin}
\end{equation}
To the family of deformations $\{f_t\}_{t\in]-1,1[}$ is associated the {\em infinitesimal deformation vector} $\xi$ which is the section of $f^*(\mathsf T M)$ given by
\begin{equation}
\xi(x)=f^*\Bigl(\Bigl.\frac{{\rm d}}{{\rm d}t}\Bigr\vert_{t=0} f_t(x)\Bigr)\ .\label{eq:definf}
\end{equation}
\subsubsection{The first variation formula} One can now compute effectively
the left hand side of equation~\eqref{eq:defmin} by a classical computation
which is called the {\em first variation formula}. Let us introduce the second fundamental form $\rm{II}$ which is the symmetric tensor with values in the normal bundle given by
$$
\rm{II}(X,Y)=p(\nabla_X Y)\,
$$
where $X$ and $Y$ are tangent vectors to $N$, $\nabla$ is the Levi-Civita connection of $g$ pulled back on $f^*(\mathsf T M)$, and $p$ is the orthogonal projection of $f^*(\mathsf T M)$ on the normal bundle of $N$. Then the first variation formula reads
$$
\left.\frac{{\rm d}}{{\rm d}t}\right\vert_{t=0} \operatorname{Vol}(f_t)= \int_N\braket{\xi\mid H} {\rm d}\operatorname{vol}(f^* g)\ .
$$
where $H$ is {\em the mean curvature vector} defined as the trace of $\rm{II}$.
Thus being a minimal immersion is equivalent to the fact that the mean curvature vanishes identically. As an important corollary, we have two useful properties
\begin{coro}\label{cor:subh}
\phantomsection
\begin{enumerate}
\item The restriction of a convex function to a minimal submanifold is subharmonic.
\item The curvature of a minimal surface at a point is less than the ambient curvature of its tangent plane.
\end{enumerate}
\end{coro}
One then defines a {\em minimal immersion} from a (possibly non compact, possibly with boundary) manifold as one for which the mean curvature vanishes everywhere. Equivalently, one can show that those are the immersions $f$ for which for any variation $\{f_t\}_{t\in]-1,1[}$ with $f_0=f$, constant on the boundary as well as outside a bounded open set $U$, we have
$$
\Bigl.\frac{{\rm d}}{{\rm d}t}\Bigr\vert_{t=0} \int_U{\rm d}\operatorname{vol}(f_t^* g)=0\ .
$$
When the dimension of~$N$ is~$1$, minimal immersions are exactly parameterizations of geodesics.
\subsubsection{The second variation formula} The misleading terminology ``minimal immersions'' or ``minimal surfaces'' tends to suggest that minimal surfaces are not only critical point of the area functional but actual minima. This not always the case.
In order to understand whether the immersion is actually a local minimum of the volume functional, we need ---as in the case for geodesics--- to study the {\em second variation formula of the volume}.
Let us assume for simplicity that the source is compact, since our goal is only to present the subject.
Let us denote by $\xi$ an infinitesimal variation of $f$ as in equation~\eqref{eq:definf}. We may as well assume that this infinitesimal variation is normal since tangent deformation do not affect the volume. Then the second variation formula is given by
\begin{equation}
{\rm D}^2_f\operatorname{Vol}(\xi,\xi)\coloneqq\Bigl.\frac{{\rm d^2}}{{\rm d}t^2}\Bigr\vert_{t=0} \int_N{\rm d}\operatorname{vol}(f_t^* g)= 2\int_N (R_\xi+a_\xi -b_\xi){\rm d}\operatorname{vol}(f^* g) \ ,\label{eq:2vf}
\end{equation}
where $R_\xi$, $a_\xi$ and $b_\xi$ are the trace ---with respect to the induced metric--- of the symmetric tensors defined by respectively
\begin{align*}
R_\xi(X,Y)&\coloneqq\braket{R(\xi,X)\xi\mid Y}\ ,\\
a_\xi(X,Y)&\coloneqq\braket{p(\nabla_X\xi)\mid p(\nabla_Y\xi)}\ , \\
b_\xi(X,Y)&\coloneqq\braket{B(X)\xi\mid B(Y)\xi}\ ,
\end{align*}
where $X$ and $Y$ are tangent vectors and $\xi$ is normal, $R$ is the curvature tensor of the Levi-Civita curvature $\nabla$ of the ambient manifold, $B$ is the {\em shape operator} defined by
$$
\braket{B(X)\xi\mid Y}=\braket{\rm{II}(X,Y)\mid \xi}\ .
$$
Since our ultimate goal is to understand the sign of ${\rm D}^2\operatorname{Vol}$, we now comment on the sign of these quantities:
\begin{enumerate}
\item $a_\xi$ is nonnegative.
\item $b_\xi$ is nonnegative, but vanishes when the submanifold is {\em totally geodesic}.
\item $R_\xi$ is nonnegative when the ambient curvature is nonpositive. When the ambient manifold is hyperbolic, $R_\xi=2\Vert\xi\Vert^2$.
\end{enumerate}
In particular, when the ambient curvature is nonpositive and the submanifold is totally geodesic then $D_f^2\operatorname{Vol}(\xi,\xi)$ is nonnegative and the minimal immersion is a local minimum. This covers for instance the case of geodesics in nonpositive curvature.
However, in general one cannot expect that just controlling the sign of the curvature would guarantee that the minimal immersion is an actual local minimum. Nevertheless, we shall see that under some other additional assumptions the minimal immersion will be a local minimum. We now introduce the standard terminology:
\begin{defi}
A minimal immersion $f$ is {\em stable}\footnote{The terminology is unstable here: some people call this condition {\em semistable}} if for any compactly supported infinitesimal deformation $\xi$, ${\rm D}^2_f\operatorname{Vol}(\xi,\xi)\geqslant 0$.
\end{defi}
Thus a totally geodesic surface in a nonpositively curved manifold is stable.
A famous result proved independently in
\textcite{Pogorelov:1981uq}, \textcite{Carmo:1979uo}, and \textcite{Fischer-Colbrie:1980vl} states
\begin{theo}\label{theo:CP}
The plane is the only stable embedded complete minimal surface in~$\mathbb R^3$.
\end{theo}
As a standard phenomenon, we will explain later on, that such a rigidity result implies a compactness property for the space of stable minimal surfaces (proposition~\ref{pro:F-Sch}).
\subsection{Minimal surfaces in $3$-manifolds} Let us now focus on minimal immersions of surfaces in $3$-manifolds. The volume is then called the {\em area} of the immersion,
and denoted by $\operatorname{Area}(f)$.
Two important results by \textcite{Schoen:1979} and \textcite{Sacks:1982} guarantee the existence of minimal surfaces from some topological data. Here is a special case of their result.
Say a continuous map between two connected manifolds is {\em incompressible} if it is injective at the level of fundamental groups.
\begin{theo}\label{theo:SY}
Let $f$ be a continuous incompressible map from a closed surface to a compact negatively curved $3$-manifold. Then there exists a minimal immersion, homotopic to $f$, which is minimal and achieves the minimum of the area amongst all possible maps homotopic to~$f$.
\end{theo}
In particular, surface groups in fundamental groups of compact hyperbolic manifolds can be represented by minimal surfaces, albeit not necessarily uniquely as we shall see.
As an application, let us now give a hint of the proof by \textcite{Thurston:1997ux} of theorem~\ref{theo:finitely} in the case of the fundamental groups of a hyperbolic manifold as was explained to us by Delzant. \begin{theo}\label{theo:thurs}
The fundamental group of a hyperbolic manifold only contains finitely many conjugacy classes of surface groups of a given genus.
\end{theo}
\begin{proof} Let $S$ be a minimal surface in $M$ representing a surface group.
The curvature of the minimal surface $S$ is bounded from above by the curvature of $M$, and thus the area $\operatorname{Area}(S)$ of $S$ is bounded from above by $4\pi(g-1)$, where $g$ is the genus of $S$.
Moreover since the surface is incompressible, the injectivity radius $i_S$ of $S$ is bounded from below by the injectivity radius $i_M$ of $M$. Let then $a(i_M)$ be a lower bound of the area of a ball of radius $i_M/2$ in $S$ and observe that by comparison theorems, we can have an explicit formula for $a(i_M)$ in terms of $i_M$.
Thus we can cover $S$ by $\operatorname{Area}(S)/a(i_M)$ balls of radius $i_M/2$. Hence $\pi_1(S)$ is generated by curves of length (in $S$) less than $2i_M\operatorname{Area}(S)/a(i_M)$ and hence less than $8\pi i_M (g-1)/a(i_M)$. The same holds {\it a fortiori} for the length of those curves in $M$. This implies that there is only finitely many possibilities for conjugacy classes of surface groups.
\end{proof}
Observe that Thurston originally used pleated surfaces rather than minimal ones: we only use the fact that the surface representing the surface group has curvature no greater than $-1$.
\subsubsection{Back to quasi-Fuchsian manifolds}
The work of Schoen and Yau carries on immediately in the context of quasi-Fuchsian manifolds, due to the existence of a convex hull which traps minimal surfaces:
\begin{prop}\label{pro:trap} Let $M$ be a $3$-manifold of curvature less than $-1$. Let $\rho$ be a representation of $\pi_1(S)$ in the isometries of $M$. Let $C$ be a convex set in $M$ invariant by the action of $\rho(\mathsf Gamma)$.
Let $f$ be a minimal immersion of the universal cover $\tilde S$ of $S$, equivariant under $\rho$, then $f(S)$ is a subset of $C$.
\end{prop}
\begin{proof} The distance function to the convex set $C$ is convex and strictly convex for positive values. Hence by corollary~\ref{cor:subh}, its pullback on $S$ is strictly subharmonic for positive values and $\pi_1(S)$-invariant, hence vanishes identically.
\end{proof}
However the number of those surfaces is not {\it a priori} bounded, as it follows from results of \textcite{Anderson:1983cm} and \textcite{Huang:2015wl}
\begin{theo}
For any given positive integer $N$, there exists a quasi-Fuchsian manifold that contains at least $N$ distinct \textup{(}closed, incompressible and embedded\textup{)} minimal surfaces.
\end{theo}
For a survey about the use of minimal surfaces in $3$-manifolds, see \textcite{Hass:2005tr}.
\subsection{Compactness results}
The set of minimal surfaces enjoy compactness properties. In particular we have the standard fact valid in all dimensions.
\begin{prop}\label{pro:compac}
Let $\seqm{(M_m,h_m,x_m)}$ be a sequence of pointed Riemannian manifolds converging to a Riemannian manifold $(M_\infty,h_\infty,x_\infty)$. For each $m$, let $S_m$ be a complete minimal surface without boundary in $M_m$, so that $x_m$ belongs to $S_m$. Assume that, for every $R$, the second fundamental form of $S_m$ is bounded independently on $m$, on every ball in $S_m$ containing $x_m$ and radius $R$.
Then the sequence of pointed minimal surfaces $\seqm{(S_m,x_m)}$ converges uniformly on every compact to a pointed minimal surface $(S_\infty,x_\infty)$ in $M_\infty$
\end{prop}
The following is essentially contained in \textcite{Fischer-Colbrie:1980vl} and is a consequence of Theorem~\ref{theo:CP}. Let $\lambda$ be the function on a minimal surface defined as the positive eigenvalue of the shape operator.
\begin{prop}\label{pro:F-Sch}
Let $M$ be a $3$-manifold with a metric $h$ with curvature bounded from above by $-1$. Then there is a positive $K$ only depending on $M$, such that for any stable minimal disk $D$ embedded in the universal cover $\tilde M$, we have $\lambda(D)\leqslant K$.
\end{prop}
We sketch a proof to emphasize the standard philosophy in geometric analysis that a rigidity result yields a compactness result.
\begin{proof}[Sketch of a proof]
Let us give a proof by contradiction and assume that there exists a sequence of stable complete minimal disks $\seq{D}$ and a point $x_m$ in $D_m$ so that $\seqm{\lambda_m(x_m)}$ goes to infinity. Here we denote by $\lambda_m$ the function $\lambda$ on $D_m$. We can assume using the cocompact group $\pi_1(M)$ in $\operatorname{Isom}(\tilde M)$ that $x_m$ lies in a compact fundamental domain for $\pi_1(M)$.
The optimum would be to find a point~$x_m$ in~$D_m$ where $\lambda_m$~achieves its maximum value~$\Lambda_m$, which happens for instance if the disk projects to a closed surface in~$M$. However, since this is not necessarily the case, we use a classical trick in geometric analysis.
Let then $K_m$ be the maximum of $\lambda_m$ on the ball of center $x_m$ and radius $10$.
According to the {\em $\Lambda$-maximum lemma} as in \textcite[Paragraph 1.D]{Gromov:1991uy}, and assuming $\lambda_m(x_m)\geqslant 1$ there exists $y_m$, such that
\begin{equation}
\Lambda_m\coloneqq \lambda_m(y_m)\geqslant \sup\Bigl\{K_m,\frac{1}{2}\lambda_m(z)\mid d(z,y_m)\leqslant \frac{1}{2\sqrt{\lambda_m(y_m)}}\Bigr\}\ .\label{eq:lambda}
\end{equation}
Observe that $\seq{\Lambda}$ also goes to infinity.
We now consider the metric $h_m=\Lambda_mh$ on~$M$,
associated to a distance $d_m$, and observe that $(\tilde M,h_m,y_m)$ converges smoothly on every compact to a Euclidean space. The new eigenvalue function $\tilde\lambda_m$ is now equal to~\mbox{$\lambda_m/\Lambda_m$}.
Thus we obtain from the assertion~\eqref{eq:lambda}, that
$$
\tilde\lambda_m(z)\leqslant 2, \hbox{ if } d_m(z,y_m)\leqslant \frac{\sqrt{\Lambda_m}}{2}\ .
$$
Thus the sequence of minimal surface $\seqm{(S_m,y_m)}$ has bounded second
fundamental form on larger and larger balls, and thus, by
proposition~\ref{pro:compac}, the sequence converges (on every compact) to
a minimal surface $(S_\infty,y_\infty)$ in $(M,h_\infty,y_\infty)$. Now
$S_\infty$ is a stable minimal surface in the Euclidean $3$-space $(M,h_\infty)$. By Theorem~\ref{theo:CP}, $S_\infty$ is a plane and thus its second fundamental form is zero. Hence, $\tilde\lambda_m$ converges to zero uniformly on every compact and this contradicts $\tilde\lambda_m(y_m)=1$. This proves by contradiction that $\seq{\Lambda}$ is bounded. It follows that $\seq{K}$ --- and in particular $\seqm{\lambda_m(x_m)}$ --- is bounded. \end{proof}
\subsection{Almost Fuchsian minimal surfaces, Uhlenbeck's result and the asymptotic Plateau problem}
The examples constructed by Anderson, then by Huang and Wang are far from being Fuchsian but the situation improves when we are close to being Fuchsian.
\subsubsection{Almost Fuchsian minimal surfaces}
Let us go back to the second variation formula~\eqref{eq:2vf} for surfaces in the case of hyperbolic $3$-manifolds. We saw that the Hessian of the volume at a minimal immersion $f$ is given by \begin{equation}
{\rm D}^2_f\operatorname{Vol}(\xi,\xi)= 2\int_N (R_\xi+a_\xi -b_\xi){\rm d}\operatorname{vol}(f^* g)\ ,
\end{equation}
where $a_\xi\geqslant 0$, $R_\xi=2\Vert\xi\Vert^2$ and
$$
b_\xi=\operatorname{Trace}\braket{B(X)\xi\mid B(Y)\xi}=2\lambda^2\Vert\xi\Vert^2\ ,
$$
where $\lambda$ is the positive eigenvalue of $B$. Thus if we assume that $\lambda<1$ we can guarantee that $b_\xi\leqslant R_\xi$ and thus that ${\rm D}^2\operatorname{Vol}(\xi,\xi)>0$, for a non vanishing $\xi$, hence that $S$ is stable.
This suggest the following definition, where the term {\em almost Fuchsian} was coined by \textcite{Krasnov:2007wd}.
\begin{defi}
\begin{enumerate}
\item A {\em nearly geodesic minimal surface} is a complete minimal surface $S$ in ${\bf H}^2h$ with $\lambda(S)<1$. If the nearly geodesic surface is invariant under a quasi-Fuchsian group, we say the quasi-Fuchsian group is {\em almost Fuchsian} and the nearly geodesic surface almost Fuchsian.
\item An {\em almost Fuchsian manifold} is a quasi-Fuchsian manifold that contains an almost Fuchsian minimal surface.
\end{enumerate}
\end{defi}
Using our freshly minted terminology, we can rephrase the previous discussion as the first part of the proposition
\begin{prop}
A nearly geodesic minimal surface in a hyperbolic $3$-manifold is stable.
\end{prop}
Then, as suggested by this stability result, \textcite{Uhlenbeck:1983wl} proved the following, which is a simple application of the maximum principle.
\begin{theo}\label{theo:uhl}
An almost Fuchsian manifold contains a unique minimal embedded incompressible surface, which is then stable.
\end{theo}
\subsubsection{Asymptotic Plateau problem} Let us quit the realm of equivariant minimal surfaces. As a special case of a theorem of \textcite{Anderson:1982vw} we have
\begin{theo}
Given any embedded circle $C$ in $\partial_\infty \mathbf{H}^3h$, there exists a minimal embedded surface in ${\bf H}^2h$ bounded by $C$ in the Poincaré ball model.
\end{theo}
Let us then say that
\begin{defi}
An embedded minimal surface $S$ is solution of the {\em asymptotic Plateau problem} defined by the embedded circle $C$ in $\partial_\infty \mathbf{H}^3h$ if in the ball model the closure $\overline{S}$ of $S$ is
$
S\sqcup C.
$
Alternatively we say that $C$ is the {\em boundary at infinity} of $S$, that $S$ is bounded by $C$ and write $C=\partial_\infty S$.
\end{defi}
One naturally hopes there should be a correspondence between $K$-quasicircle and almost Fuchsian minimal surfaces. This is indeed obtained as a consequence of a theorem of \textcite[Theorem A]{Seppi:2016ut}, while the second part follows from an extension of \textcite{Guo:2010wk}, where the result is only stated for almost Fuchsian surfaces.
\begin{theo}\label{theo:Seppi}
There exist constants $K_0$ and $C_0$ such that if $S$ is a complete embedded minimal surface whose boundary at infinity is a $K$-quasicircle, with $K$ less than $K_0$, then
$$
\lambda(S)\leqslant C_0 \log(K)\ .
$$
Conversely, there exists $\lambda_0$, such that if $\lambda(S)\leqslant \lambda\leqslant \lambda_0$, then the surface $S$ is embedded in ${\bf H}^2h$ and the boundary at infinity is a $K(\lambda)$-quasicircle, with
$$
\lim_{\lambda\to 0}K(\lambda)=1\ .
$$
\end{theo}
For a survey in the asymptotic Plateau problem for minimal surfaces see \textcite{Coskunuzer:2014uk}, for results when the target is negatively curved see \textcite{Lang:2003we}.
\subsection{In variable curvature} Assume now that the closed hyperbolic manifold $(M,h_0)$ is also equipped with a metric $h$ of curvature less than $-1$.
For any set $\Lambda$ in $\partial_\infty\tilde M$, let $\operatorname{Env}_h(\Lambda)$ be the {\em convex hull of $\Lambda$} that is the intersection of all convex subsets of $\tilde M$ whose closure in $\tilde M\sqcup\partial_\infty\tilde M$ contains $\Lambda$.
Observe again that thanks to Mostow rigidity $K$-quasicircles in the boundary at infinity of the universal cover of $(M,h)$ is a topological notion. We denote by $\tilde M$ the universal cover of $M$.
For the paper being discussed, the authors need to obtain a control between minimal surfaces for both $(M,h)$ and $(M,h_0)$ given in \cite[Theorem 3.1.]{Calegari:2020uo}. This result
follows from results of \textcite{Bowditch:1995wo}.
\begin{theo}\label{theo:var}{\sc [Morse Lemma for minimal surfaces]}
There exists a positive constant~$R$, such that if $S$ and $S_0$ are incompressible minimal surfaces in $M$, for~$h$ and~$h_0$, having the same fundamental group, then
$$
d_0(S_0,S)\leqslant R\ ,
$$
where $R$ only depends on $h$, $h_0$ and $\lambda(S_0)$, and $d_0$ is the distance with respect to $h_0$.
\end{theo}
Correctly extended this result also makes sense for other minimal surfaces than equivariant ones, and could be understood as a Morse Lemma for minimal surfaces.
Since $S$ and $S_0$ are trapped in $\operatorname{Env}_h(\Lambda)$ and $\operatorname{Env}_{h_0}(\Lambda)$ respectively by proposition~\ref{pro:trap}, it is enough to prove
\begin{prop} For any set $\Lambda$ in $\partial_\infty \tilde M=\partial_\infty {\bf H}^2h$, we have
\begin{equation}
d_h(\operatorname{Env}_h(\Lambda),\operatorname{Env}_{h_0}(\Lambda))\leqslant R\ , \label{ineq:CC0}
\end{equation}
for some constant only depending and $h$ and $h_0$.
\end{prop}
\begin{proof}[Indication of the proof]
We prove that by introducing the following set:
for any $p$, let
$$
\operatorname{Clo}^h_p(\Lambda)\coloneqq \{\gamma(t)\mid t>0\ , \gamma \hbox{ geodesic for $h$ with }\gamma(0)=p, \gamma(+\infty)\in\Lambda\}\ .
$$
Then, by a result of \textcite[proposition 2.5.4]{Bowditch:1995wo}, there is some positive constant $R_1(h)$ only depending on $h$, so that for any $p$ in $\operatorname{Env}_h(\Lambda)$,
\begin{equation}
d_h(\operatorname{Env}_{h}(\Lambda),\operatorname{Clo}^h_p(\Lambda))\leqslant R_1(h)\ .\label{ineq;CloC}
\end{equation}
Take now geodesics $\gamma$ and $\gamma_0$ joining two points of $\Lambda$, then by the Morse Lemma for geodesics, we can find points $p$ and $p_0$ in $\gamma$ and $\gamma_0$ respectively so that
\begin{equation}
d_h(p,p_0)\leqslant R_2\ ,\label{ineq;pp0}
\end{equation}
where $R_2$ only depends on $h$ and $h_0$. As a final ingredient observe that for any $p$ and $q$,
\begin{equation}
d_h(\operatorname{Clo}^h_p(\Lambda),\operatorname{Clo}^h_q(\Lambda))\leqslant R_3+d_h(p,q)\ ,\label{ineq;pp3}
\end{equation}
where $R_3$ only depends on $h$. Observing that $p$ belongs to $\operatorname{Env}_h(\Lambda)$ while $p_0$ belongs to $\operatorname{Env}_{h_0}(\Lambda)$, and combining inequalities~\eqref{ineq;CloC}, \eqref{ineq;pp0} and~\eqref{ineq;pp3}, we get the desired inequality~\eqref{ineq:CC0} with
$$
R=R_2+R_2+R_1(h)+R_1(h_0)\ . \qedhere
$$
\end{proof}
\subsubsection{Minimal surfaces and quasi-isometries}
Let again $(M,h_0)$ be a closed hyperbolic $3$-manifold and $h$ another metric on $M$ of curvature less than $-1$.
As a consequence of proposition~\ref{theo:var}, {\it a priori} bounds on the curvature of minimal surfaces given by proposition~\ref{pro:F-Sch}, Theorem~\ref{theo:Seppi} that gives this result in the hyperbolic case, and classical arguments about quasi-isometries, we have
\begin{theo}\label{coro:KK}
There exist positive constants $\varepsilon_0$ and $K$ so that the following holds. Assume $h$ is close enough to a hyperbolic metric $h_0$.
Let $S$ be an area minimizing minimal incompressible surface in $(M,h)$, such that the boundary at infinity of $\pi_1(S)$ is $(1+\varepsilon_0)$-quasicircle, then
\begin{enumerate}
\item the conformal minimal parametrization $\phi$ from ${\bf H}^2$ to $S$, is, as a map to the universal cover of $M$, a $K$-quasi-isometric embedding,
\item $\phi$ admits an extension to $\mathbf{RP}^1$ which is a $K$-quasi-symmetric map with values in $\partial_\infty \tilde M=\partial_\infty{\bf H}^2h$.
\end{enumerate}
\end{theo}
We recall that a map is a {\em $K$-quasi-isometric embedding} if the image of every geodesic is a $K$-quasi-geodesic.
\subsection{The case of fibered manifolds}\label{par:agol}
To conclude our promenade in minimal surfaces in hyperbolic manifolds, and after discussing almost Fuchsian manifolds, let us say a word about manifolds fibering over the circle, even though none of this will be used further on.
By Agol's Virtual Haken Theorem, any hyperbolic $3$-manifold has a finite cover that fibers over the circle. The fibers of these fibrations are not quasi-Fuchsian, but nevertheless can be represented by minimal surfaces by theorem~\ref{theo:SY}.
This fibration is {\em taut} by a result of \textcite{Sullivan:1979vg}, which means one can realize this foliation by minimal surfaces for some metric.
A long standing question was whether this fibration could be realized by a minimal fibration in the hyperbolic metric. The answer to this question is no: there exists $3$-manifolds fibering over the circle, so that the fibers of this fibration cannot be all minimal surfaces. This is a result of \textcite{Hass:2015we} ---see also \textcite{Huang:2019tp}.
\section{Equidistribution in the phase space of minimal surfaces}
\subsection{A phase space for stable minimal surfaces} \label{par:phasespace}
Dealing with solutions of ordinary differential equations, for instance geodesics, we introduced the phase space of the problem, which can be identified the space of pairs $(x,L)$\footnote{One has to be careful of what we call ``orbit'' to avoid the space to be non Hausdorff}, where $x$ is a point in the orbit $L$ of the ordinary differential equation. One can generalize this construction to solutions of partial differential equations as was done in \textcite{Gromov:1991uv} for minimal surfaces and harmonic mappings and studied in \textcite{Labourie:2005b} for surfaces with constant Gaußian curvature in negatively curved $3$-manifolds. We will do so for stable minimal surfaces and describe measures on this space.
Let $M$ be a closed manifold equipped with a metric~$h$ of curvature less than~$-1$, and~$\tilde M$ its universal cover.
In this section ${\bf H}^2$ will be the upper half plane model of the hyperbolic plane, which comes with a canonical identification of $\partial_\infty{\bf H}^2$ with $\mathbf{RP}^1$ and $\operatorname{Isom}({\bf H}^2)$ with $\mathsf{PSL}_2(\mathbb R)$. We say a minimal immersion from ${\bf H}^2$ to $M$ is {\em conformal} if the pullback metric is in the conformal class of the hyperbolic metric.
\begin{defi} {\sc[Conformal minimal lamination]}
\phantomsection
Let us fix some small $\varepsilon_0$ and large constant $K$ so that Theorem~\ref{coro:KK} holds.
\begin{enumerate}
\item Let $
\mathcal F_h(\tilde M)
$
be the space of stable minimal conformal immersions of ${\bf H}^2$ in $\tilde M$ which are $K$-quasi-isometric embeddings, equipped with the topology of uniform convergence on every compact, and
$\mathcal F_h(M)\coloneqq\mathcal F_h(\tilde M)/\pi_1(M)$.
\item For $\varepsilon\leqslant\varepsilon_0$, let $
\mathcal F_h(\tilde M,\varepsilon)
$ be the set of those $\phi$ in $\mathcal F_h(\tilde M)$ so that $\phi(\partial_\infty{\bf H}^2)$ is a $(1+\varepsilon)$-quasicircle. Similarly, let $\mathcal F_h(M,\varepsilon)\coloneqq\mathcal F_h(\tilde M,\varepsilon)/\pi_1(M)$.
\end{enumerate}
The space $\mathcal F_h(M)$ together with the action of $\mathsf{PSL}_2(\mathbb R)$ by precomposition is called the {\em conformal minimal lamination} of $M$. \end{defi}
Finally denote by $\mathcal Q(K)$ the space of $K$-quasicircles in $\partial_\infty{\bf H}^2h$ equipped with the Gromov--Hausdorff topology. Then
\begin{theo}\phantomsection\label{theo:phasespace}
\begin{enumerate}
\item The map from $\mathcal F_h(\tilde M)$ to $\tilde M$, given by $\phi\mapsto \phi(i)$ is a proper map.
\item The action of $\mathsf{PSL}_2(\mathbb R)$ by precomposition on $\mathcal F_h(M)$ is continuous and proper.
\item Moreover, the map $\partial$ from $\mathcal F_h(\tilde M)$ to $\mathcal Q(1+\varepsilon)$, which maps $\phi$ to $\phi(\mathbf{RP}^1)$ is continuous and $\mathsf{PSL}_2(\mathbb R)$-invariant.
\end{enumerate}
\end{theo}
We may assume that $(1+\varepsilon)\leqslant K$, and we will consider from now on $Q(1+\varepsilon)$ as a subset of $Q(K)$ to lighten the notation.
\begin{proof}
The first point is a rephrasing of proposition~\ref{pro:F-Sch} and~\ref{pro:compac}. The second point and third point also follow from the first and from the fact that any element of $\mathcal F_h(M)$ is a $K$-quasi-isometric embedding.
\end{proof}
Here is a corollary, using the constants that appear in the previous theorem.
\begin{coro}\label{cor:cont}
The map $\partial$ gives rise to a continuous map ---also denoted $\partial$--- from $\mathcal F_h(M)/\mathsf{PSL}_2(\mathbb R)$ to $\mathcal Q(K)$.
\end{coro}
A recent preprint of \textcite{Lowe:2020vu} states ---in this language--- that upon small deformation $h$ of the hyperbolic metric the projection from $\mathcal F_h(M,0)$ to $G(M)$ is a homeomorphism. This is a special case of a theorem by \textcite{Gromov:1991uv}.
\subsection{Laminar measures and conformal currents}
This paragraph is an extension of the theory of geodesic currents and invariant measures as in \textcite{Bonahon:1997tl}.
\begin{defi}
\begin{enumerate}
\item A {\em laminar measure} on $\mathcal F_h(\tilde M)/\pi_1(M)$ is a $\mathsf{PSL}_2(\mathbb R)$-invariant finite measure.
\item A {\em conformal current} is a $\pi_1(M)$-invariant locally finite measure on $\mathcal Q(K)$.
\end{enumerate}
\end{defi}
Here are two examples which are the analogues of the situation for closed
geodesics. Let $\mathsf Gamma$ be a Fuchsian group acting on ${\bf H}^2$. Let $U$ be a fundamental domain of the action of $\mathsf Gamma$ on $\mathsf{PSL}_2(\mathbb R)$. Let $\rho$ be a representation of $\mathsf Gamma$ into $\pi_1(M)$.
\begin{prop}
For $\varepsilon$ small enough, let $\phi$ be a an element of $\mathcal F_h(\tilde M)$, equivariant under a representation $\rho$ from $\mathsf Gamma$ to $\pi_1(M)$, such that $\rho$ is injective and its boundary at infinity is a $(1+\varepsilon)$-quasicircle $\Lambda_0$.
\begin{enumerate}
\item Let ${\rm d}elta^h_\phi$ be the measure on $\mathcal F_h(M)$ defined by
$$
\int_{\mathcal F_h(M)} f\ {\rm d} {\rm d}elta^h_\phi=\frac{1}{\operatorname{Vol}(U)}\int_U f(\phi\circ g)\ {\rm d}\mu(g) ,
$$
where $\mu$ is the bi-invariant measure on $\mathsf{PSL}_2(\mathbb R)$. Then ${\rm d}elta^h_\phi$ is a $\mathsf{PSL}_2(\mathbb R)$-invariant probability measure.
\item Let ${\rm d}elta_\rho$ be the measure on $\mathcal Q(K)$ defined by $$
{\rm d}elta_\rho=\sum_{\gamma\in \pi_1(M)/\rho(\mathsf Gamma)}\gamma_*{\rm d}elta_{\Lambda_0}\ ,
$$
where ${\rm d}elta_{\Lambda_0}$ is the Dirac measure supported on $\Lambda_0$. Then ${\rm d}elta_\rho$ is a locally finite $\pi_1(M)$-invariant measure on $\mathcal Q(K)$.
\end{enumerate}
\end{prop}
The only non-trivial point is the fact that ${\rm d}elta_\rho$ is a locally finite measure. This is checked at the end of the proof of the next proposition.
The next proposition is crucial
\begin{prop}\label{pro:defpi} There exist some positive constant $\varepsilon$ and a continuous map $\pi_h$ from the space of laminar measures \textup{(}up to multiplication by a constant\textup{)} to the space of conformal currents \textup{(}up to multiplication by a positive constant\textup{)} so that
$$
\pi_h({\rm d}elta_\phi^h)={\rm d}elta_\rho\ ,
$$
if $\phi$ is an element of $\mathcal F_h(M,\varepsilon)$ equivariant under a representation $\rho$.
Moreover the support of $\pi_h(\mu)$ is the image by $\partial$ of the support of $\mu$.
\end{prop}
\begin{proof} Let us fix a nonnegative function $\Xi$ supported on a bounded neighborhood of the identity in $\mathsf{PSL}_2(\mathbb R)$.
Let $\mu$ be a laminar measure on $\mathcal F_h(M)$. Let us lift $\mu$ to a locally finite $\pi_1(M)$-invariant measure $\tilde\mu$ on $\mathcal F_h(\tilde M)$. Let $\Lambda$ be a $(1+\varepsilon)$-quasicircle. Let $\mathcal T$ be the space of triples of pairwise distinct points of $\partial_\infty$. Let $U$ be a small neighborhood of $\Lambda$ in $\mathcal Q(K)$. Let ${\bf F}$ be a continuous map from $U$ to $\mathcal T$ so that if $(a_0,a_1,a_\infty)={\bf F}(\Lambda)$, then $a_0$, $a_1$, $a_\infty$ belong to~$\Lambda$.
For any $\phi$ in $\mathcal F_h(M)$ so that $\partial\phi=\Lambda$ is in $U$, let $g_\phi$ be the unique element of $\mathsf{PSL}_2(\mathbb R)$ so that $\phi\circ g_\phi(0,1,\infty)={\bf F}(\Lambda)$. Let then $\xi_{\bf F}$ be the function defined on $\partial^{-1}U$, by
$$
\xi_{\bf F}(\phi)=\Xi(g_\phi)\ .
$$
Finally let us define, when $f$ is supported in $U$,
\begin{equation}
\int_{\Lambda} f \ {\rm d}\pi_h(\mu) \coloneqq \int_{\mathcal F_h(\tilde M)} \xi_{\bf F}\cdot (f\circ\partial)\ {\rm d}\tilde\mu .\label{def:pi}
\end{equation}
Since $\xi_{\bf F}\cdot (f\circ\partial)$ is compactly supported, the left hand side is a well-defined finite real number.
Then, one sees that the left hand side does not depend on the choice of ${\bf F}$ since $\tilde\mu$ is $\mathsf{PSL}_2(\mathbb R)$-invariant. By construction $\pi(\mu)$ is locally finite since $\tilde\mu$ is, similarly $\pi(\mu)$ is invariant under the action of $\pi_1(M)$ since $\tilde\mu$ is.
Finally the formula shows that $\pi$ is continuous in the weak topology.
We leave the reader check the equality $
\pi({\rm d}elta_\phi^h)={\rm d}elta_\rho$.
\end{proof}
\subsection{Equidistribution}
We can now explain the equidistribution result that follows from the techniques in \textcite{Kahn:2009wh}.
\begin{theo}{\sc [Equidistribution]}\label{theo:KMseq} Let $M$ be a closed hyperbolic $3$-manifold. There exists a sequence $\seq{{\rm d}elta^{0}}$ of laminar measures on $\mathcal F_{h_0}(M,1/m)$, such that ${\rm d}elta^0_m$ is supported on finitely many closed leaves and so that the sequence of $\seq{{\rm d}elta^0}$ converges to $\mu_{Leb}$.
\end{theo}
We will call in the sequel the sequence of measures obtained in this theorem a {\em Kahn--Markovi{\'c} sequence}.
This result is an extended version of \textcite[Theorem 4.2]{Calegari:2020uo}.
\begin{proof}[Sketch of the proof]
We use a different geometric presentation than \textcite{Hamenstadt:2015wa} and \textcite{Kahn:2009wh}, developed in \textcite{Kahn:2018wx}. The following convention will hold through this sketch
\begin{enumerate}
\item All references in this sketch are from \textcite{Kahn:2018wx}.
\item $K_i$ will be constants only depending on the closed hyperbolic manifold $M={{\bf H}^2h}/\pi_1(M)$.
\item $o(m)$ will denote a function that converges to $0$ when $m$ goes to infinity.
\item $\alpha^-$ and $\alpha^+$ are the repulsive and attractive fixed points of the element $\alpha$ in $\pi_1(M)$, while $\ell(\alpha)$ is the length of the associated geodesic.
\item $\varepsilon$ will be a (small) positive constant and $R$ be a (large) positive constant.
\end{enumerate}
A {\em tripod} is a triple of pairwise distinct points in $\mathbf{CP}^1\simeq\partial_\infty{\bf H}^2h$.
Let $\mathcal T$ be the space of tripods. The space $\mathcal T$ is canonically identified with the frame bundle $F_{h_0}({\bf H}^2h)$ and carries a canonical metric. Every point $x$ in $\mathcal T$ also defines an ideal triangle $\Delta_x$ in ${\bf H}^2h$ and we denote by $b(x)$ the barycenter of this triangle. We see the {\em barycentric map} $x\mapsto b(x)$ as a projection from $\mathcal T$ to ${\bf H}^2h$
We remark that there is an open subset $\Delta$ in $\mathsf{PSL}_2(\mathbb R)$, invariant by the right action of $S^1$, so that
\begin{equation*}
b(\Delta(x))=\Delta_x\ ,
\end{equation*}
and thus for any function defined on ${\bf H}^2h$, we have
\begin{equation}
\frac{1}{\operatorname{Vol}(\Delta)}\int_\Delta g\circ b\circ u(x)\ {\rm d}\mu(u)=\frac{1}{\pi}\int_{\Delta_x}g\ {\rm d}\operatorname{area}\ ,\label{eq:bD}
\end{equation}
where ${\rm d}\mu$ is the bi-invariant measure in $\mathsf{PSL}_2(\mathbb R)$.
A {\em triconnected pair of tripods} [definition~10.1.1] is a quintuple $(t,s,c_0,c_1,c_2)$ so that $t$ and $s$ are points in $\mathcal T/\pi_1(M)$ and $c_i$ are three homotopy classes of paths from $t$ to $s$. We denote by $\pi$ the projection from $\mathcal T$ to $\mathcal T/\pi_1(M)$.
Let also define $\pi^0$ and $\pi^1$ as the forgetting maps taking values in
the frame bundle $$\pi^0\colon (t,s,c_0,c_1,c_2)\mapsto t\ \ \ ,\ \ \
\pi^1\colon (t,s,c_0,c_1,c_2)\mapsto s\ .$$
The space of triconnected pair of tripods tripods carries a measure $\mu_{\varepsilon,R}$ [definition~12.2.3] satisfying the following property, property which is established by a suitable closing lemma [Theorem~10.3.1][Theorem~9.2.2]:
Let $\varepsilon$ be small enough, then $R$ large enough. If $(t,s,c_0,c_1,c_2)$ is in the support of $\mu_{\varepsilon,R}$, there exists three elements $\alpha$, $\beta$ and $\gamma$ of $\pi_1(M)$ so that,
\begin{enumerate}
\item let $t_0=(\alpha^-,\beta^-,\gamma^-)$ and $s_0=(\alpha^-,\alpha(\gamma^-),\beta^-)$, then $\pi(t_0)$ and $\pi(s_0)$ are $K_1\frac{\varepsilon}{R}$ close to $t$ and $s$ respectively,
\item $\alpha$, $\beta$ and $\gamma$ are in the homotopy classes of $c_0\cdot c_1^{-1}$, $c_2\cdot c_0^{-1}$ and $c_1\cdot c_2^{-1}$ respectively.
\item The complex cross-ratio of $(\alpha^-, \alpha(\gamma^-),\beta^-,\gamma^-)$ is $K_1\frac{\varepsilon}{R}$ close to $R$.
\item The complex length of $\alpha$, $\beta$, $\gamma$ is $K_2\frac{\varepsilon}{R}$ close to $2R$.
\end{enumerate}
Observe that $\alpha\gamma\beta=1$. Moreover, gluing the two ideal triangles $T_{0}\coloneqq \Delta_{t_0}$ and $S_0=\Delta_{s_0}$, then taking the quotient by $\pi_1(M)$ one gets a pleated pair of pants $P$ in $M$, whose fundamental group is generated by $(\alpha,\beta,\gamma)$.
Conversely, given three elements in $\pi_1(M)$, $\alpha$, $\gamma$, and $\beta$ so that $\alpha\gamma\beta=1$, one gets a unique triconnected pair of tripods (called {\em exact}) by setting $t=(\alpha^-,\beta^-,\gamma^-)$ and $s=(\alpha^-,\alpha(\gamma^-),\beta^-)$ and $c_0$, $c_1$, $c_2$ the obvious paths.
We define a triple $(\alpha,\beta,\gamma)$ satisfying the last two items $(iii)$ and $(iv)$ as an {\em $(\varepsilon,R)$-pair of pants}.
the sequence of measures $\pi^0_*\mu_{\varepsilon,R}$ and $\pi^1_*\mu_{\varepsilon,R}$ converges to $\mu_{Leb}$ by a mixing argument as $R$ goes to infinity, when $\varepsilon$ is fixed.
More precisely, we can choose a sequence $\seq{R}$ going to $\infty$, so that setting $\mu_m=\mu_{\frac{1}{m},R_m}$, then for any continuous function $f$ on $F_{h_0}(M)$, then we have [proposition~10.2.6][equation~93]:
\begin{equation}
\lim_{m\to\infty}\int (f\circ \pi^0) \ \ {\rm d}\mu_{m}=\lim_{m\to\infty}\int (f\circ \pi^1) \ \ {\rm d}\mu_{m}=\int f {\rm d}\mu_{Leb}\ .
\end{equation}
According to [proposition~18.0.3], and using mixing again, the proof goes by showing that one can approximate $\mu_{m}$ by measures $\nu_{m}$ with finite support $\{P^m_1,\ldots, P^m_{N_m}\}$, on exact triconnected pair of tripods and rational weights, with the following property.
The sequence of measures $\pi^0_*\nu_{m}$ and $\pi^1_*\nu_{m}$ converges to $\mu_{Leb}$ by a mixing argument as $R$ goes to infinity. In other words, for any function $f$,
\begin{equation}
\lim_{m\to\infty}\frac{1}{N_m}\sum_{i=1}^{N_m}f (\pi_0(P^m_i))
=\lim_{m\to\infty}\frac{1}{N_m}\sum_{i=1}^{N_m}f (\pi_1(P^m_i))=\int f {\rm d}\mu_{Leb}\ .\label{eq:fppp}
\end{equation}
Without loss of generality, we assume that the weight of each $P^i_m$ ---that may appear with multiplicity--- is $1/{N_m}$: in other words the $P_i$ are counted with multiplicities to have the same weights.
Let us assume now assume that $f=g\circ b$, where $b$ is the barycentric map from $F_{h_0}(M)$ to on $M$ and $g$ is defined on $M$.
Using the invariance of $\mu_{Leb}$ under the left action of $\mathsf{PSL}_2(\mathbb R)$ we get that
\begin{equation}
\lim_{m\to\infty}
\frac{1}{2\pi N_m}\Bigl(\sum_{i=1}^{N_m}\int_{\Delta_0(P^i_m)}g\ {\rm d}\operatorname{area} + \sum_{i=1}^{N_m}\int_{\Delta_1(P^i_m)}g\ {\rm d}\operatorname{area}\Bigr)=\int f {\rm d}\mu_{Leb}\ , \label{eq:intP}
\end{equation}
where $\Delta_0(P)\coloneqq\Delta_{\pi_0(P)}$ and $\Delta_1(P)\coloneqq\Delta_{\pi_1(P)}$ are ideal triangles: Indeed we apply equation~\eqref{eq:fppp}, to $f\circ u$, for all $u$ in $\Delta$ and apply formula~\eqref{eq:bD}.
Moreover, the measure $\nu_m$ satisfies a matching
condition ({\it} [lemma~14.2.1]) that we now describe. Let $\{P^m_1,\ldots, P^m_{N_m}\}$ be the support of $\nu_{m}$.
The matching condition is that for each $m$, we can find by (see [definition~14.1.1][Theorem~16.3.1]) a family of closed surfaces $S_m=(S_1^m,\ldots, S^m_{M_m})$ in $M$ obtained by gluing the pleated pair of pants $\{P^m_1,\ldots, P^m_{N_m}\}$ in $M$ such that every pair of pants appears exactly once. In particular, we get from equation~\eqref{eq:intP} that
\begin{equation}
\lim_{m\to\infty}\frac{1}{2\pi \chi(S_m)}\int_{S_m} g {\rm d}\operatorname{area} =\int f {\rm d}\mu_{Leb}\ . \label{eq:int S}
\end{equation}
For each $S^i_m$, let $\Sigma^i_m$ be the associated minimal surface and $\Sigma_m$ the union of all $\Sigma^i_m$. As part of the construction, each $S^i_m$ is $(1+o(m))$-almost Fuchsian and thus the projection~$p_m$ from the pleated surface~$S^i_m$ to the minimal surface~$\Sigma_i$ is $(1+ o(m))$-bi-Lipsichitz and satisfies $d(x,p^i_m(x))\leqslant o(m)$.
It follows from equation~\eqref{eq:int S} that
\begin{equation*}
\lim_{m\to\infty}\frac{1}{2\pi \chi(\Sigma_m)}\int_{\Sigma_m} g {\rm d}\operatorname{area} =\int f {\rm d}\mu_{Leb}\ .
\end{equation*}
In other words, setting ${\rm d}elta^0_m$ the measure on $\mathcal F_{h_0}(M)$ supported on $\Sigma_m$, we have
\begin{equation*}
\lim_{m\to\infty}p_*{\rm d}elta^0_m= p_*\mu_{Leb}\ .
\end{equation*}
We can now conclude using corollary~\ref{coro:Ratner3}.
\end{proof}
\section{Calegari--Marques--Neves asymptotic counting}\label{sec:last}
The article of \textcite{Calegari:2020uo} deals with the counting of surface subgroups. Let $\Sigma$ be the set of conjugacy classes of surface subgroups in $\pi_1(M)$ for a manifold $M$. For $\Pi$ an element of $\Sigma$ and $h$ a Riemannian metric on $M$, we define
$$
\operatorname{MinArea}_h(\Pi)\coloneqq \inf\{\operatorname{Area}(S)\mid S \hbox{ is an incompressible surface in $M$ with $\pi_1(S)\in\Pi$}\}\ .
$$
Assume now that $M$ is a $3$-manifold that admits a hyperbolic metric. For a conjugacy class of surface group $\Pi$, we define $\varepsilon(\Pi)$ as
$$
\varepsilon(\Pi)\leqslant \varepsilon\ ,
$$
if $\Pi$ is quasi-Fuchsian and the limit circle is a $(1+\varepsilon)$-quasicircle. Let then
\begin{align}
S_h(M,T,\varepsilon)&\coloneqq\{\Pi\in\Sigma\mid \operatorname{MinArea}_h(\Pi)\leqslant T\, ,\ \varepsilon(\Pi)\leqslant \varepsilon\}\ , \\
E_\varepsilon(M,h)
&\coloneqq \frac{1}{4\pi}\liminf_{T\to\infty}\frac{\log\left(\sharp S_h(M,T,\varepsilon)\right)}{T\log T}\ ,\\
E(M,h)&\coloneqq \lim_{\varepsilon\to 0}E_\varepsilon(M,h)\ .
\end{align}
The main result of \textcite{Calegari:2020uo} is then
\begin{theo}\label{theo:main}Let $(M,h_0)$ be a hyperbolic $3$-manifold
\begin{enumerate}
\item for any Riemannian metric $h$ on
$M$, we have $E(M,h)\leqslant 2{\rm h}_{vol}(M,h)^2$,
\item assume furthermore that $h$ has curvature less than $-1$ then $E(M,h)\geqslant 2$, with equality if only if the metric $h$ is hyperbolic.
\end{enumerate}
\end{theo}
This result is only for counting conjugacy classes of surface groups. Except for the equality case for which the method does not apply, the theorem also holds for counting commensurability classes. We believe the same result holds for commensurability classes after a suitable adaptation of theorem~\ref{theo:equi}.
Thus we have an exact analogue to \textcite{Bowen:1972} and \textcite{Margulis:1969ve} for the first assertion as well as a rigidity result analogue to \textcite{Hamenstadt:1990tx} for the second assertion.
The equality case involves a mix of analytical and dynamical properties.
Let us now explain a proof of their result insisting on the use of the phase space of stable minimal surfaces, which synthesizes some of the proofs in \textcite{Calegari:2020uo}.
\subsection{Counting surfaces: from genus to area}
The next proposition is an easy exercise on counting, inclusion and inequalities and is used several times in the proof.
\begin{prop}\label{pro:denum}
Let $\mathcal S$ be a set of conjugacy classes of surface subgroups. Let $\mathcal S(g)$ be the set of those elements of $\mathcal S$ of genus less than $g$,
\begin{enumerate}
\item assume that we have a positive constant $c$, so that $$\sharp{\mathcal S(g)}\leqslant (cg)^{2g},$$ as well as constants $K_0$ and $K_1$, so that for any $\Pi$ in $\mathcal S$ of genus $g$, we have
\begin{equation*}
g- K_1\leqslant K_0 \operatorname{MinArea}_h(\Pi) \ .\end{equation*}
Then
$$
\limsup_{T\to\infty}\frac{\log\left(\sharp\left\{\Pi\in\mathcal S\mid\operatorname{MinArea}_h(\Pi)\leqslant T \right\}\right)}{T\log(T)}\leqslant 2K_0\ .
$$
\item Assume that we have a constant $c$, so that $$\sharp{\mathcal S(g)}\geqslant (cg)^{2g},$$ as well as constants $K_0$ and $K_1$ so that for any $\Pi$ in $\mathcal S$ of genus $g$, we have
\begin{equation*}
g- K_1\geqslant K_0\operatorname{MinArea}_h(\Pi) \ .
\end{equation*}
Then
$$
\limsup_{T\to\infty}\frac{\log\left(\sharp\left\{\Pi\in\mathcal S\mid\operatorname{MinArea}_h(\Pi)\leqslant T \right\}\right)}{T\log(T)}\geqslant 2 K_0\ .
$$
\end{enumerate}
\end{prop}
\subsection{The upper bound in variable curvature}
Our first proposition is
\begin{prop}
Let $h$ be any Riemannian metric on a manifold $M$ admitting a
hyperbolic metric. Then for all $\varepsilon$, we have
$
E(M,h,\varepsilon)\leqslant 2 {\rm h}_{top}(M)^2
$.
\end{prop}
\begin{proof} Recall that
$$
{\rm h}_{top}(M)={\rm h}_{vol}(M)=\lim_{R\to\infty}\frac{\log\left(\sharp\{\gamma\in\pi_1(M)\mid d_M(\gamma.x,x)\leqslant R\}\right)}{R}\ .
$$
where $x$ is a point in the universal cover $\tilde M$ of $M$ and $d_M$ the distance in this universal cover. If $S$ is an incompressible surface in $M$ lifting to a disk in $\tilde M$, we have
$$
\{\gamma\in\pi_1(S)\mid d_S(\gamma.x,x)\leqslant R\}\subset \{\gamma\in\pi_1(M)\mid d_M(\gamma.x,x)\leqslant R\}\ ,
$$
and thus
$$
{\rm h}_{vol}(S)\leqslant {\rm h}_{vol}(M)\ .
$$
Combining with Theorem~\ref{theo:entropy} as in \textcite{Katok:1982uv}, we get
$$
{\rm h}_{vol}(M)^2\operatorname{Area}(S)\geqslant {\rm h}_{vol}(S)^2\operatorname{Area}(S)\geqslant 4\pi(g-1)\ .
$$
Proposition~\ref{pro:denum} applied to $\mathcal S=\mathcal S_h(M,\varepsilon)$
---using the upper bound given by theorem~\ref{theo:KMcount}--- yields the result.\end{proof}
\subsection{The upper bound in constant curvature} \label{par:gmg} When $(M,h_0)$ is hyperbolic, the previous upper bound gives $E(M,h_0)\leqslant 8$, we however
have a finer estimate.
\begin{prop}
For the hyperbolic metric $h_0$, we have $E(M,h_0)\leqslant 2$.
\end{prop}
\begin{proof}
For a closed minimal surface in $\mathcal S_{h_0}(M,T,\varepsilon)$, recall that by Theorem~\ref{theo:Seppi}
$$
\lambda(S)\leqslant C_0\log(1+\varepsilon)\eqqcolon \eta(\varepsilon)\ .
$$
Thus by the Gauß--Bonnet formula
$$
4\pi(g-1)=\int_S(1+\lambda(S)^2){\rm d}\operatorname{area}\leqslant (1+\eta(\varepsilon)^2)\operatorname{Area}(S) \ .
$$
From proposition~\ref{pro:denum} applied to $\mathcal{S}=\mathcal S_{h_0}(M,\varepsilon)$, we get that
$$
E_\varepsilon(M,h_0)\leqslant 2(1+\eta(\varepsilon)^2)\ ,
$$
and the result follows.
\end{proof}
\subsection{The lower bound}
\begin{prop}
Let $h$ be any Riemannian metric of curvature less than $-1$ on a $3$-manifold $M$ then $
E(M,h)\geqslant 2
$.
\end{prop}
\begin{proof} Since the curvature of a minimal surface $S$ is less than that of the ambient manifold by Gauß equation, in that case we get that all minimal surfaces have curvature less than $-1$. Hence by Gauß--Bonnet
$$ 4\pi(g-1)\geqslant \operatorname{Area}(S) \ .
$$
Applying proposition~\ref{pro:denum} yields the inequality. \end{proof}
\subsection{The equality case}
The equality case is the rigidity result for the asymptotic counting of \textcite{Calegari:2020uo}.
\begin{theo}
Let $h$ be a metric of curvature less than $-1$. Assume that $E(M,h)= 2$. Then $h$ is hyperbolic.
\end{theo}
We assume in the sequel that $h$ is a metric of curvature less than $-1$ with $E(M,h)= 2$. We give a proof in the simpler case when $h$ is close to a hyperbolic metric, so that Theorem~\ref{coro:KK} holds.
\subsubsection{Finding surfaces which are more and more hyperbolic}
Let $S$ be a closed surface.
Let $G_S(g)$ be the set of connected finite covers of $S$ of genus less than $g$, and $G_S$ the set of all connected finite covers. Then we have
\begin{prop}\label{pro:puchta} There is a constant $c_1$ only depending on $S$, so that
for $g$ large enough
$$
\sharp(G_S(g))\geqslant (c_1g)^{2g}\ .
$$
\end{prop}
\begin{proof}
By \textcite{Muller:2002tt}, the number of index $n$ subgroups of the fundamental group $\pi_1(S)$ of a genus $g_0$ orientable surface grows like $2n(n!)^{2g_0-2}(1+o(1))$. On the other hand, the genus $g$ of a surface whose fundamental group has index $n$ in $\pi_1(S_0)$ is $g=n(g_0-1)+1$. It follows that
$$
\sharp(G_S(g))\geqslant (c_1g)^{2g}\ .
$$
where $c_1$ only depends on $g_0$.
\end{proof}
\begin{prop}\label{pro:area-lim} For every positive integer $m$,
let $S_m$ be a finite union of stable minimal surfaces $S^1_m,\ldots, S^p_m$ with $\varepsilon(S^i_m)\leqslant \frac{1}{m}$. Let $\lambda^i_m$ be positive numbers so that
$$
\sum_{i=1}^{p_m}\lambda^i_m=1\ .
$$
Then
\begin{equation}
\lim_{m\to\infty}\sum_{i=1}^{p_m}\lambda^i_m\frac{\operatorname{Area}(S^i_m)}{4\pi(g^i_m-1)}=1\ .\label{eq:mmhh}
\end{equation}
where $g^i_m$ is the genus of $S^i_m$. \end{prop}
\begin{proof} Since Gauß--Bonnet formula gives $4\pi(g^i_m-1)\geqslant \operatorname{Area}(S^i_m)$, we have
\begin{equation*}
\limsup_{m\to\infty}\sum_{i=1}^{p_m}\lambda^i_m\frac{\operatorname{Area}(S^i_m)}{4\pi(g^i_m-1)}\leqslant
1\ .
\end{equation*}
Assume now that the limit in the equation~\eqref{eq:mmhh} is smaller than $k$, with $k<1$,
then for arbitrarily large $m$, we can find $i_m$ in $\{1,\ldots,p_m\}$ so that
\begin{equation}
k4\pi(g^{i_m}_m-1)\geqslant \operatorname{Area}(S^{i_m}_m) \ .\label{ineq:ksm}
\end{equation}
As in the beginning of paragraph~\ref{par:gmg}, let~$G_m$ be the set of
of connected finite covers of~$S^{i_m}_m$, and~$G_m(g)$ the set of connected finite covers of genus~$g$. Obviously inequality~\eqref{ineq:ksm} holds for all surfaces in~$G_m$.
Thus we could apply proposition~\ref{pro:denum} ---using proposition~\ref{pro:puchta}--- to get
$$E_\varepsilon(M,h)\geqslant \frac{2}{k}> 2\ ,$$ and our contradiction.
\end{proof}
\subsubsection{From more and more hyperbolic to more and more totally geodesic}
The previous proposition has a consequence for laminar measures
\begin{prop}\label{pro:muinf}
Let $\seq{\mu}$ be a sequence of laminar probability measures on ${\mathcal F_h(M)}$ converging to a laminar measure $\mu_\infty$. Assume that each $\mu_m$ is supported on finitely many closed leaves of $\mathcal F_h(M,\frac{1}{m})$. Then $\mu_\infty$ is supported on the set of totally geodesic maps from ${\bf H}^2$ to $M$ whose boundaries are circles. \end{prop}
\begin{proof} Let us consider the function $F$ on $\mathcal F_h(M)$ defined by associating to a conformal minimal immersion $\phi$ of ${\bf H}^2$ in $M$, the conformal factor $F(\phi)$ of $\phi$ at $i$.
Assume that $\phi_0$ is equivariant under a representation $\rho$ of a Fuchsian group $\mathsf Gamma$ and if $S$~is the image of $\phi_0$ in $M$, we have $$
\frac{\operatorname{Area}(S)}{4\pi (g-1)}=\int_{\mathcal F_h(M)} F{\rm d}{\rm d}elta_{\phi_0}\ .
$$
Then, proposition~\ref{pro:area-lim} tells us that
$$
\lim_{m\to\infty}\Bigl(\int_{\mathcal F_h(M)} F \ {\rm d}\mu_m\Bigr)=1\ ,
\hbox{
hence
}
\int_{\mathcal F_h(M)} F \ {\rm d}\mu_\infty =1\ .
$$
By the Ahlfors--Schwartz--Pick Lemma since the curvature of $S$ is less than $-1$, we have the inequality $F\leqslant 1$. It follows that $\mu_\infty$ is supported on the set $F=1$. In particular for any $\phi$ in the support of $\mu_\infty$, $\phi$ is an isometric immersion. Thus the curvature of the image of $S$ is $-1$. Since the curvature of $M$ is less than $-1$, this only happens when $S$ is a totally geodesic hyperbolic disk.
Finally, $\mu_\infty$ is supported on the intersection for all $m$ of $\mathcal F_h(M,\frac{1}{m})$. Since the map $\partial$ is continuous (corollary~\ref{cor:cont}), this intersection is $\mathcal F_h(M,0)$.
Thus the support of $\mu_\infty$ is contained in the set of conformal isometries into totally geodesic disks whose boundaries are circles. \end{proof}
\subsubsection{Conclusion}
In this conclusion, we finally use the restricting hypothesis that $h$ is close to a hyperbolic metric so that Theorem~\ref{coro:KK} holds.
Let $\seqm{{\rm d}elta^0_m}$ be the Kahn--Markovi\'c sequence of laminar measures obtained in Theorem~\ref{theo:equi} for $\mathcal F_{h_0}(M)$. For every $m$, let us write
$$
{\rm d}elta^0_m=\sum_{i=1}^m \lambda^i_m{\rm d}elta_{\phi^i_m}\ , \hbox{ with } \sum_{i=1}^m \lambda^i_m=1\ ,
$$
where the $\phi^i_m$ are stable conformal immersions in $(M,h_0)$ equivariant under a cocompact group. Let
$$
{\rm d}elta_m=\sum_{i=1}^m \lambda^i_m{\rm d}elta_{\psi^i_m}\ .
$$
where $\psi^i_m$ is a stable conformal immersion in $(M,h)$ equivariant under a cocompact group and homotopic to $\phi^i_m$.
Let us extract a subsequence so that $\seq{{\rm d}elta}$ converges to $\mu_\infty$, while, by the Equidistribution Theorem~\ref{theo:equi}, $\seqm{{\rm d}elta^0_m}$ converges to $\mu_{Leb}$.
Let us consider the projections $\pi_h$ and $\pi_{h_0}$ as in proposition~\ref{pro:defpi} and observe that
$$
\pi_h({\rm d}elta_m)=\pi_{h_0}({\rm d}elta^0_m)\ .
$$
Thus by taking limits and using the continuity of $\pi_h$ and $\pi_{h_0}$, we have
$$
\pi_h(\mu_\infty)=\pi_{h_0}(\mu_{Leb})\ ,
$$
and in particular $\pi_h(\mu_\infty)$ and $\pi_{h_0}(\mu_{Leb})$ have the same support.
We conclude by making two observations
\begin{enumerate}
\item the support of $\pi_{h_0}(\mu_{Leb})$ is the set of all circles,
\item any circle in the support of $\pi_{h_0}(\mu_\infty)$ bounds a totally geodesic hyperbolic plane by proposition~\ref{pro:muinf}.
\end{enumerate}
Thus every circle at infinity bounds a totally geodesic hyperbolic disk in $M$, hence by proposition~\ref{pro:manyhyp}, $h$ is hyperbolic.
\printbibliography
\end{document} |
\begin{document}
\title{Adjacency relationships forced by a degree sequence}
\begin{abstract}
There are typically several nonisomorphic graphs having a given degree sequence, and for any two degree sequence terms it is often possible to find a realization in which the corresponding vertices are adjacent and one in which they are not. We provide necessary and sufficient conditions for two vertices to be adjacent (or nonadjacent) in every realization of the degree sequence. These conditions generalize degree sequence and structural characterizations of the threshold graphs, in which every adjacency relationship is forcibly determined by the degree sequence. We further show that degree sequences for which adjacency relationships are forced form an upward-closed set in the dominance order on graphic partitions of an even integer.
\end{abstract}
\section{Introduction}\label{sec: intro}
A fundamental goal of the study of graph degree sequences is to identify properties that must be shared by all graphs having the same degree sequence. In this paper we address one of the simplest of graph properties: whether two given vertices are adjacent.
Most degree sequences $d$ are shared by multiple distinct graphs. We call these graphs the \emph{realizations} of $d$. In this paper we consider only labeled graphs, that is, we distinguish between realizations having distinct edge sets, even if these realizations are isomorphic. Throughout the paper we will consider a degree sequence $d=(d_1,\dots,d_n)$ and all realizations of $d$ with vertex set $V = \{1,\dots,n\}$ (we denote such a range of natural numbers by $[n]$) that satisfy the condition that each vertex $i$ has the corresponding degree $d_i$. We will assume in each case, unless otherwise stated, that $d_1 \geq \dots \geq d_n$.
For only one type of degree sequence are all the adjacency relationships in a realization completely determined. These are the \emph{threshold sequences}, those sequences having only one realization. \emph{Threshold graphs}, the graphs realizing threshold sequences, were introduced (via an equivalent definition) by Chv\'{a}tal and Hammer in \cite{ChvatalHammer73,ChvatalHammer77}, as well as by many other authors independently. These graphs have a number of remarkable properties; see the monograph~\cite{MahadevPeled95} for a survey and bibliography. We will refer to several of these properties in the course of the paper.
On the other end of the spectrum from the threshold sequences, many degree sequences have the property that \emph{any} fixed pair of vertices may be adjacent in one realization and nonadjacent in another; such is the case, for example, with $(1,1,1,1)$. For still other sequences, some adjacency relationships are determined while others are not; notice that in the two realizations of $(2,2,1,1,0)$ the vertices of degree $2$ must be adjacent, the vertices of degree $1$ must be nonadjacent, and the vertex of degree $0$ cannot be adjacent to anything, while a fixed vertex of degree $1$ may or may not be adjacent to a fixed vertex of degree $2$.
Suppose that $d$ is an arbitrary degree sequence. We classify pairs $\{i,j\}$ of vertices from $V$ as follows. If $i$ and $j$ are adjacent in every realization of $d$, we say that $\{i,j\}$ is a \emph{forced edge}. If $i$ and $j$ are adjacent in no realization of $d$, then $\{i,j\}$ is a \emph{forced non-edge}. Vertices in a forced edge or forced non-edge are \emph{forcibly adjacent} or \emph{forcibly nonadjacent}, respectively. If $\{i,j\}$ is either a forced edge or forced non-edge, we call it a \emph{forced pair}; otherwise, it is \emph{unforced}. By definition, in threshold graphs every pair of vertices is forced.
In this paper we characterize the forced pairs for general degree sequences. We present conditions that allow us to recognize these pairs from the degree sequence and describe the structure they as a set create in any realization of the degree sequence.
As an alternative viewpoint, given a degree sequence $d$, we may define the \emph{intersection envelope graph} $I(d)$ (respectively, \emph{union envelope graph $U(d)$}) to be the graph with vertex set $[n]$ whose edge set is the intersection (resp., union) of the edge sets of all realizations of $d$. The forced edges of $d$ are precisely the edges of $I(d)$, and the forced non-edges of $d$ are precisely the non-edges of $U(d)$. As we will see, $I(d)$ and $U(d)$ are threshold graphs, and our results allow us to describe these graphs.
One particular property of threshold sequences contextualized by a study of forced pairs is the location of these sequences in the dominance (majorization) order on degree sequences having the same sum. Threshold sequences comprise the maximal elements in this order, and we show that as a collection, degree sequences with forced pairs majorize degree sequences having no forced pairs.
The structure of the paper is as follows: In Section 2 we provide necessary and sufficient conditions on a degree sequence for a pair $\{i,j\}$ to be a forced edge or forced non-edge among realizations of a degree sequence $d$. We then give an alternative degree sequence characterization in terms of Erd\H{o}s--Gallai differences, which we introduce. In Section 3 we study the overall structure of forced pairs in a graph, describing the envelope graphs $I(d)$ and $U(d)$. Finally, in Section 4 we present properties of forced pairs in the context of the dominance order on degree sequences.
Throughout the paper all graphs are assumed to be simple and finite. We use $V(H)$ to denote the vertex set of a graph $H$. A list of nonnegative integers is \emph{graphic} if it is the degree sequence of some graph. A \emph{clique} (respectively, \emph{independent set}) is a set of pairwise adjacent (nonadjacent) vertices.
\section{Degree sequence conditions for forced pairs} \label{sec: degree conditions}
We begin with a straightforward test for determining whether a pair of vertices is forced.
\begin{thm} \label{thm: forced iff not graphic}
Given the degree sequence $d=(d_1,\dots,d_n)$ and vertex set $[n]$, let $i,j$ be distinct elements of $[n]$ such that $i<j$. The pair $\{i,j\}$ is a forced edge if and only if the sequence \[d^+(i,j) = (d_1,\dots,d_{i-1},d_i + 1, d_{i+1}, \dots, d_{j-1}, d_j + 1, d_{j+1},\dots, d_n)\] is not graphic. The pair $\{i,j\}$ is a forced non-edge if and only if the sequence \[d^-(i,j) = (d_1,\dots,d_{i-1},d_i - 1, d_{i+1}, \dots, d_{j-1}, d_j - 1, d_{j+1},\dots, d_n)\] is not graphic.
\end{thm}
Before proving this theorem, we introduce some notation. Given a degree sequence $\pi$ of length $n$, let $\overline{\pi}$ denote the degree sequence of the complement of a realization of $\pi$, i.e., $\overline{\pi} = (n-1-d_n, \dots, n-1-d_1)$; we call $\overline{\pi}$ the complementary degree sequence of $\pi$. Note that $\pi$ is also the complementary degree sequence of $\overline{\pi}$.
\begin{proof}
We begin by proving the contrapositives of the statements in the first equivalence. Suppose first that $\{i,j\}$ is not a forced edge for $d$. There must exist a realization $G$ of $d$ in which $i$ and $j$ are not adjacent. The graph $H$ formed by adding edge $ij$ to $G$ has degree sequence $d^+(i,j)$, so $d^+(i,j)$ is graphic.
Suppose now that $d^+(i,j)$ is graphic, and let $H$ be a realization. Suppose also that $G$ is a realization of $d$. If $\{i,j\}$ is not an edge of $G$, then it is not a forced edge for $d$. Furthermore, if $\{i,j\}$ is an edge of $H$, then removing that edge produces a realization of $d$ with no edge between $i$ and $j$, so once again $\{i,j\}$ is not a forced edge. Suppose now that $ij$ is an edge of $G$ but not of $H$. Let $J$ be the symmetric difference of $G$ and $H$, that is, the graph on $[n]$ having as its edges all edges belonging to exactly one of $G$ and $H$. Color each edge in $J$ red if it is an edge of $G$ and blue if it is an edge of $H$. Since the degree of any vertex in $[n]$ other than $i$ and $j$ is the same in both $G$ and $H$, there is an equal number of red and blue edges meeting at such a vertex. For all such vertices, partition the incident edges into pairs that each contain a red and a blue edge. Now vertices $i$ and $j$ each are incident with one more blue edge than red; fix a vertex $v$ such that $iv$ is a blue edge in $J$ and partition the other edges incident with $i$ into pairs containing a red and a blue edge. Do the same thing for the edges incident with $j$ other than a fixed blue edge $jw$. We now find a path from $i$ to $j$ in $J$ whose edges alternate between blue and red. Note that $iv$ is a blue edge, and that this edge is paired with a red edge incident with $v$, which is in turn paired with a blue edge at its other endpoint, and so on. Since each edge of $J$ other than $iv$ and $jw$ is paired with a unique edge of the opposite color at each of its endpoints, the path beginning with $iv$ must continue until it terminates with edge $wj$. Now let $v_0,v_1,\dots,v_\ell$ be the vertices encountered on this path, in order, so that $v_0=i$, $v_1=v$, $v_{\ell-1} = w$, and $v_\ell = j$. The graph $G$ contains edges $v_1v_2, v_3v_4, \dots, v_{\ell-2}v_{\ell-1}$ and $v_\ell v_0$ and non-edges $v_0v_1, v_2v_3, \dots, v_{\ell-1}v_\ell$. Deleting these edges and adding the non-edges as new edges creates a realization of $d$ where $i$ and $j$ are not adjacent, so once again $\{i,j\}$ is not a forced edge for $d$.
Since $\{i,j\}$ is an edge in a realization of $\pi$ if and only if it is not an edge in a realization of $\overline{\pi}$, the pair $\{i,j\}$ is a forced non-edge for $d$ if and only if it is a forced edge for $\overline{d}$, which is equivalent by the preceding paragraph to having $\overline{d}^{+}(i,j)$ not be graphic. Since a list $\pi$ of integers is a degree sequence if and only if $\overline{\pi}$ is a degree sequence, and we can easily verify that $d^-(i,j) = \overline{\overline{d}^+(i,j)}$, the pair $\{i,j\}$ is a forced non-edge in $G$ if and only if $d^{-}(i,j)$ is not a graphic sequence.
\end{proof}
By combining Theorem~\ref{thm: forced iff not graphic} with a test for graphicality we may find alternate characterizations of forced pairs. Here we will use the well known Erd\H{o}s--Gallai criteria~\cite{ErdosGallai60} with a simplification due to Hammer, Ibaraki, and Simeone~\cite{HammerIbarakiSimeone78,HammerIbarakiSimeone81}). For any integer sequence $\pi=(\pi_1,\dots,\pi_n)$, define $m(\pi) = \max\{i:\pi_i \geq i-1\}$.
\begin{thm}[\cite{ErdosGallai60,HammerIbarakiSimeone78,HammerIbarakiSimeone81}] \label{thm: ErdosGallai}
A list $\pi=(\pi_1,\dots,\pi_n)$ of nonnegative integers in descending order is graphic if and only if $\sum_k \pi_k$ is even and \[\sum_{\ell \leq k} \pi_\ell \leq k(k-1) + \sum_{\ell>k} \min\{k, d_\ell\}\] for each $k \in \{1,\dots,m(\pi)\}$.
\end{thm}
For each $k \in [n]$, let $\operatorname{LHS}_k(\pi) = \sum_{\ell \leq k} \pi_\ell$ and $\mathbb{R}HS_k(\pi) = k(k-1) + \sum_{\ell>k} \min\{k, d_\ell\}$. We now define the \emph{$k$th Erd\H{o}s--Gallai difference} $\Delta_k(\pi)$ by \[\Delta_k(\pi) = \mathbb{R}HS_k(\pi) - \operatorname{LHS}_k(\pi).\] Note that an integer sequence with even sum is graphic if and only if these differences are all nonnegative.
\begin{thm} \label{thm: forced via EG diff}
Let $d=(d_1,\dots,d_n)$ be a graphic list, and let $i,j$ be integers such that $1 \leq i<j \leq n$. The pair $\{i,j\}$ is a forced edge for $d$ if and only if there exists $k$ such that $1 \leq k \leq n$ and one of the following is true:
\begin{enumerate}
\item[\textup{(1)}] $\Delta_k(d) \leq 1$ and $j \leq k$.
\item[\textup{(2)}] $\Delta_k(d) = 0$; $i \leq k < j$; and $k \leq d_j$.
\end{enumerate}
The pair $\{i,j\}$ is a forced non-edge for $d$ if and only if there exists $k$ such that $1 \leq k \leq n$ and one of the following is true:
\begin{enumerate}
\item[\textup{(3)}] $\Delta_k(d) \leq 1$ and $d_i < k < i$.
\item[\textup{(4)}] $\Delta_k(d) = 0$; $k<i$; and $d_j \leq k \leq d_i$.
\end{enumerate}
\end{thm}
\begin{proof}
By Theorems~\ref{thm: forced iff not graphic} and~\ref{thm: ErdosGallai}, $\{i,j\}$ is a forced edge if and only if there exists an integer $k$ such that $\Delta_k\left(d^+(i,j)\right) < 0$. We prove that this happens if and only if condition (1) or condition (2) holds. Let $k$ be an arbitrary element of $[n]$.
\emph{Case: $k<i$.} In this case neither condition (1) nor condition (2) holds. Furthermore, $\operatorname{LHS}_k(d^+(i,j)) = \operatorname{LHS}_k(d) \leq \mathbb{R}HS_k(d) \leq \mathbb{R}HS_k(d^+(i,j))$, so $\Delta_k\left(d^+(i,j)\right) \geq 0$.
\emph{Case: $j \leq k$.} Here condition (2) does not hold. Since $\mathbb{R}HS_k(d^+(i,j))=\mathbb{R}HS_k(d)$ and $\operatorname{LHS}_k(d^+(i,j)) \leq \operatorname{LHS}_k(d)+2$, we see that $\Delta_k\left(d^+(i,j)\right) < 0$ if and only if $\Delta_k(d) \leq 1$, which is equivalent to condition (1).
\emph{Case: $i \leq k < j$.} Note that condition (1) cannot hold in this case. Since $i \leq k$, we have $\operatorname{LHS}_k(d^+(i,j))=\operatorname{LHS}_k(d)+1$. If $\Delta_k(d) \geq 1$, then $\Delta_k(d^+(i,j)) \geq 0$. If $d_j<k$, then $\mathbb{R}HS_k(d^+(i,j)) = \mathbb{R}HS_k(d)+1$ and $\Delta_k(d^+(i,j)) \geq 0$. If $\Delta_k(d)=0$ and $d_j \geq k$, then $\mathbb{R}HS_k(d^+(i,j))=\mathbb{R}HS_k(d)$ and hence $\Delta_k\left(d^+(i,j)\right) = -1$. Hence $\Delta_k\left(d^+(i,j)\right) < 0$ is equivalent to condition (2).
We now consider forced non-edges of $d$. By Theorem~\ref{thm: forced iff not graphic}, $\{i,j\}$ is a forced edge if and only if there exists an integer $k$ such that $\Delta_k\left(d^-(i,j)\right) < 0$. We show that this happens if and only if condition (3) or condition (4) holds. Let $k$ be an arbitrary element of $[n]$. Note that if $k \geq i$ then neither (3) nor (4) holds, and $\operatorname{LHS}_k(d^-(i,j)) < \operatorname{LHS}_k(d)$, forcing $\Delta_k(d^-(i,j)) \geq 0$. Assume now that $k < i$. This forces $\operatorname{LHS}_k(d^-(i,j))=\operatorname{LHS}_k(d)$.
\emph{Case: $k\leq d_j$.} Neither condition (3) nor condition (4) holds, and $\mathbb{R}HS_k(d^-(i,j)) = \mathbb{R}HS_k(d)$, so $\Delta_k\left(d^-(i,j)\right) \geq 0$.
\emph{Case: $d_i < k$.} Here condition (4) fails. Since $\mathbb{R}HS_k(d^-(i,j))=\mathbb{R}HS_k(d)-2$, we see that $\Delta_k\left(d^-(i,j)\right) < 0$ if and only if $\Delta_k(d) \leq 1$, which is equivalent to condition (3).
\emph{Case: $d_j \leq k \leq d_i$.} Here condition (3) fails. Since $d_j\leq k$, we have $\mathbb{R}HS_k(d^-(i,j))=\mathbb{R}HS_k(d)-1$. If $\Delta_k(d) \geq 1$, then $\Delta_k(d^-(i,j)) \geq 0$. If $\Delta_k(d) = 0$, then $\Delta_k\left(d^+(i,j)\right) = -1$. Hence $\Delta_k\left(d^+(i,j)\right) < 0$ is equivalent to condition (4).
\end{proof}
\section{Structure induced by forced pairs}
Theorems~\ref{thm: forced iff not graphic} and~\ref{thm: forced via EG diff} allow us to determine if a single pair of vertices comprises a forced edge or forced non-edge by examining the degree sequence. In this section we determine the structure of all forcible adjacency relationships by describing the envelope graphs $I(d)$ and $U(d)$ introduced in Section~\ref{sec: intro}.
Recall that the edge set of $I(d)$ is the intersection of all edge sets of realizations of $d$, and $U(d)$ is the union of all edge sets of realizations, and realizations have the property that vertex $i$ has degree $d_i$ for all $i \in [n]$. Given a degree sequence $d$ and a realization $G$ of $d$, observe that $I(d)=U(d)=G$ if and only if $G$ is the unique realization of $d$; by definition this happens if and only if $G$ is a threshold graph. As we will see in Theorem~\ref{thm: envelopes are thresholds}, threshold graphs have a more general connection to envelope graphs of degree sequences.
Before proceeding we need a few basic definitions and results. An \emph{alternating 4-cycle} in a graph $G$ is a configuration involving four vertices $a,b,c,d$ of $G$ such that $ab,cd$ are edges of $G$ and neither $ad$ nor $bc$ is an edge. Observe that if $G$ has such an alternating 4-cycle, then deleting $ab$ and $cd$ from $G$ and adding edges $bc$ and $ad$ creates another graph in which every vertex has the same degree as it previously had in $G$. It follows that none of the pairs $\{a,b\}$, $\{b,c\}$, $\{c,d\}$, $\{a,d\}$ is forced in $G$. By a well known result of Fulkerson, Hoffman, and McAndrew~\cite{FulkersonEtAl65}, a graph $G$ shares its degree sequence with some other realization if and only if $G$ contains an alternating 4-cycle. Thus threshold graphs are precisely those without alternating 4-cycles.
\begin{lem}\label{lem: forcible interval}
Suppose that $d_k \geq d_j$. If $ij$ is a forced edge for $d$, then $ik$ is also a forced edge. If $ik$ is a forced non-edge, then $ij$ is also a forced non-edge.
\end{lem}
\begin{proof}
Suppose that $ij$ is a forced edge. If $ik$ is not a forced edge, then let $G$ be a realization of $d$ where $ik$ is not an edge. Since $d_k \geq d_j$ and $j$ has a neighbor (namely $i$) that $k$ does not, $k$ must be adjacent to a vertex $\ell$ to which $j$ is not. However, then there is an alternating 4-cycle with vertices $i,j,k,\ell$ that contains the edge $ij$, a contradiction, since $ij$ is a forced edge. By considering complementary graphs and sequences, this argument also shows that if $ik$ is a forced non-edge, then $ij$ is a forced non-edge as well.
\end{proof}
\begin{thm} \label{thm: envelopes are thresholds}
For any degree sequence $d$, both $I(d)$ and $U(d)$ are threshold graphs.
\end{thm}
\begin{proof}
If $I(d)$ is not a threshold graph, then it contains an alternating 4-cycle with edges we denote by $pq, rs$ and non-edges $qr, ps$. Without loss of generality we may suppose that $p$ has the smallest among the degrees of these four vertices. Since $q$ is forcibly adjacent to $p$, by Lemma~\ref{lem: forcible interval} it must be forcibly adjacent to $r$, a contradiction, since $qr$ is not an edge in $I(d)$.
Similarly, if $U(d)$ has an alternating 4-cycle on $p,q,r,s$ as above, and if we assume that $p$ has the largest degree of these vertices, then by Lemma~\ref{lem: forcible interval} since $s$ is forcibly nonadjacent to $p$ it must be forcibly nonadjacent to $r$, a contradiction.
\end{proof}
We now turn to a precise description of $I(d)$ and $U(d)$. Examining the four scenarios in Theorem~\ref{thm: forced via EG diff} under which forcible adjacency relationships may occur, we notice that if for some $k$ we have $\Delta_k=0$, then
\begin{itemize}
\item the set $B=\{i:1 \leq i \leq k\}$ is a clique in which all pairs of vertices are forcibly adjacent;
\item the set $A=\{i:i>k \text{ and } d_i<k\}$ is an independent set in which all pairs of vertices are forcibly nonadjacent; and
\item each vertex in $C=\{i:i>k \text{ and } d_i \geq k\}$ belongs to a forced edge with each vertex in $B$ and belongs to a forced non-edge with each vertex in $A$.
\end{itemize}
This structure of adjacencies within and between $A$, $B$, and $C$ has arisen many times in the literature. In particular, R.I.~Tyshkevich and others described a graph decomposition based upon it, which we now briefly recall. Our presentation is adapted from \cite{Tyshkevich00}, which contains a more detailed presentation and references to earlier papers.
A \emph{split graph} is a graph $G$ for which there exist disjoint sets $A,B$ such that $A$ is an independent set and $B$ is a clique in $G$, and $V(G)=A \cup B$. We define an operation $\circ$ with two inputs. The first input is a split graph $F$ with a given partition of its vertex set into an independent set $A$ and a clique $B$ (denote this by $(F,A,B)$), and the second is an arbitrary graph $H$. The \emph{composition $(F,A,B)\circ H$} is defined as the graph resulting from adding to the disjoint union $F+H$ all edges having an endpoint in each of $B$ and $V(H)$. For example, if we take the composition of the 5-vertex split graph with degree sequence $(3,2,1,1,1)$ (with the unique partition of its vertex set into a clique and an independent set) and the graph $2K_2$, then the result is the graph on the right in Figure~\ref{fig: composition}.
\begin{figure}
\caption{The composition of a split graph and a graph.}
\label{fig: composition}
\end{figure}
If $G$ contains nonempty induced subgraphs $H$ and $F$ and a partition $A,B$ of $V(F)$ such that $G = (F,A,B) \circ H$, then G is \emph{(canonically) decomposable}; otherwise $G$ is \emph{indecomposable}. Tyshkevich showed in~\cite{Tyshkevich00} that each graph can be expressed as a composition $(G_k,A_k,B_k) \circ \dots \circ (G_1,A_1,B_1) \circ G_0$ of indecomposable induced subgraphs (note that $\circ$ is associative); indecomposable graphs are those for which $k = 0$. This representation is known as the \emph{canonical decomposition} of the graph and is unique up to isomorphism of the indecomposable (partitioned) subgraphs involved.
As observed by Tyshkevich~\cite{Tyshkevich00}, the canonical decomposition corresponds in a natural way with a decomposition of degree sequences of graphs, and it is possible from the degree sequence to deduce whether a graph is canonically indecomposable. In~\cite{HeredUniII}, the author made explicit some relationships between the canonical decomosition of degree sequences and the Erdos--Gallai inequalities recalled in Section~\ref{sec: degree conditions}.
Let $EG(d)$ be the list of nonnegative integers $\ell$ for which $\Delta_\ell=0$, ordered from smallest to largest. We adopt the convention that empty sums have a value of zero in the definitions of $\operatorname{LHS}_\ell(d)$ and $\mathbb{R}HS_\ell(d)$; thus $\Delta_0(d)=0$ for all $d$, and $EG(d)$ always begins with $0$.
\begin{thm}[\cite{HammerSimeone81, Tyshkevich80, TyshkevichEtAl81}] \label{thm: split seqs}
A graph $G$ with degree sequence $d=(d_1,\dots,d_n)$ is split if and only if $m(d)$ is a term of $EG(d)$.
\end{thm}
\begin{thm}[\cite{HeredUniII}, Theorem~5.6] \label{thm: EG and canon decomp}
Let $G$ be a graph with degree sequence $d=(d_1,\dots,d_n)$ and vertex set $[n]$. Suppose that $(G_k,A_k,B_k) \circ \dots \circ (G_1,A_1,B_1) \circ G_0$ is the canonical decomposition of $G$, where $A_0$ and $B_0$ partition $V(G_0)$ into an independent set and a clique, respectively, if $G_0$ is split. A nonempty set $W \subseteq V(G)$ is equal to the clique $B_j$ in the canonical component $G_j$ if and only if $W=\{\ell : t<\ell \leq t'\}$ for a pair $t,t'$ of consecutive terms in $EG(d)$. In this case the corresponding independent set $A_j$ is precisely the set $\{\ell \in [n]: t<d_\ell<t'\}$. Given a term $t$ of $EG(d)$, if $\ell > t$ and $d_\ell = t$ then the canonical component containing $\ell$ consists of only one vertex.
\end{thm}
Thus the condition $\Delta_k(d)=0$ in Theorem~\ref{thm: forced via EG diff} is intimately related to the composition operation $\circ$ and to the canonical decomposition. More generally, we now show that $\Delta_k(d)$ actually measures how far a realization of $d$ is from being a composition of the form described earlier, with slightly relaxed definitions of the sets $A$, $B$, and $C$. Given a subset $S$ of a vertex set of a graph, let $e(S)$ denote the number of edges in the graph having both endpoints in $S$, and let $\overline{e}(S)$ be the number of pairs of nonadjacent vertices in $S$. Given another vertex subset $T$, disjoint from $S$, let $e(S,T)$ denote the number of edges having an endpoint both in $S$ and in $T$, and let $\overline{e}(S,T)$ denote the number of pairs of nonadjacent vertices containing a vertex from each of $S$ and $T$.
\begin{lem}\label{lem: EG diff counts this}
Let $G$ be an arbitrary realization of a degree sequence $d=(d_1,\dots,d_n)$. Given fixed $k \in [n]$, let $B=\{i:1 \leq i \leq k\}$, and let $A$ and $C$ be disjoint sets such that $A \cup C = V(G)-B$, each vertex in $A$ has degree at most $k$, and each vertex in $C$ has degree at least $k$.
The $k$th Erd\H{o}s--Gallai difference is given by \[\Delta_k(d) = 2e(A) + 2\overline{e}(B) + e(A,C) + \overline{e}(B,C).\]
\end{lem}
\begin{proof}
Observe that summing the degrees in $B$ yields $2e(B)+e(A,B)+e(B,C)$, and a similar statement holds for $A$. Then
\begin{align*}
\Delta_k(d) &= k(k-1) + \sum_{\ell > k}\min\{k,d_\ell\} - \sum_{\ell \leq k} d_\ell\\
&= k(k-1) + \sum_{\ell \in A} d_\ell + k|C| - (2e(B)+e(A,B)+e(B,C))\\
&= 2\left(\binom{k}{2} - e(B)\right) + 2e(A) + e(A,C) + (|B||C| - e(B,C)),
\end{align*}
and the claim follows.
\end{proof}
Observe that Lemma~\ref{lem: EG diff counts this}, besides providing the corollary below, gives another illustration of the role that a value of $0$ or $1$ for $\Delta_k (d)$ has in producing forced edges and non-edges (as in Theorem~\ref{thm: forced via EG diff}) and in forcing the canonical decomposition structure (as in Theorem~\ref{thm: EG and canon decomp}).
\begin{cor} \label{cor: EG diff at least 2}
Let $d=(d_1,\dots,d_n)$ be a degree sequence. For all $k > m(d)$, we have $\Delta_k(d) \geq 2$.
\end{cor}
\begin{proof}
Since $k>m(d)$, we know that $d_k<k-1$, so any set $B$ of $k$ vertices of highest degree in a realization of $d$ cannot form a clique; thus $\Delta_k(d) \geq 2$ by Lemma~\ref{lem: EG diff counts this}.
\end{proof}
We now use our results in Section~\ref{sec: degree conditions} to determine $I(d)$ and $U(d)$. We begin with a quick observation and some definitions we will use throughout the theorem and its proof.
\begin{obs}\label{obs: two vtcs}
If an indecomposable canonical component $(G_i,A_i,B_i)$ has more than one vertex, then both $A_i$ and $B_i$ must have at least two vertices.
\end{obs}
Let $G$ be a graph with degree sequence $d=(d_1,\dots,d_n)$ on vertex set $[n]$, and suppose that $G$ has canonical decomposition $(G_k,A_k,B_k)\circ\dots\circ(G_1,A_1,B_1)\circ G_0$.
Let $p$ be the last element of $EG(d)$, and let $q$ be the largest value of $k$ for which $\Delta_k(d) \leq 1$. If $G_0$ is split, let $A_0,B_0$ be a partition of $G_0$ into an independent set and a clique, respectively. If $G_0$ is not split, then define \begin{align*}
B'_0 &= \{i \in [n]: p < i \leq q\};\\
A'_0 &= V(G_0) - B'_0;\\
A''_0 &= \{i \in [n] : i > q \text{ and } p < d_i < q \}; \\
B''_0 &= V(G_0) - A''_0.
\end{align*}
Further let $C_1$ (respectively $C_2$) denote an abstract split canonical component consisting of a single vertex lying in the independent set of the component (in the clique of the component). For $i \in \{1,2\}$ and $j$ a natural number, let $C_i^{j}$ represent the expression $C_i \circ \dots \circ C_i$, where there are $j$ terms $C_i$ in the composition.
\begin{thm}\label{thm: envelope formulas}
Given the graph $G$ with degree sequence $d$, with the canonical components of $G$ and other sets as defined above,
the graph $I(d)$ is isomorphic to
\[C_1^{|A_k|} \circ C_2^{|B_k|}\circ \dots \circ C_1^{|A_{1}|} \circ C_2^{|B_{1}|} \circ C_1^{|A_0|}\circ C_2^{|B_0|}\] if $G$ is split, and to
\[C_1^{|A_k|} \circ C_2^{|B_k|}\circ \dots \circ C_1^{|A_{1}|} \circ C_2^{|B_{1}|} \circ C_1^{|A'_0|}\circ C_2^{|B'_0|}\] otherwise.
Similarly, the graph $U(d)$ is isomorphic to
\[C_2^{|B_k|} \circ C_1^{|A_k|} \circ \dots \circ C_2^{|B_{1}|} \circ C_1^{|A_{1}|} \circ C_2^{|B_0|} \circ C_1^{|A_0|}\] if $G$ is split, and to \[C_2^{|B_k|} \circ C_1^{|A_k|} \circ \dots \circ C_2^{|B_{1}|} \circ C_1^{|A_{1}|} \circ C_2^{|B''_0|}\circ C_1^{|A''_0|}\] otherwise.
\end{thm}
\begin{proof}
By definition, $\Delta_q\leq 1$, and by Theorem~\ref{thm: EG and canon decomp}, it follows that each vertex $i \in [n]$ of $G$ belonging to a set $B_j$ for $j \geq 0$ satisfies $i \leq q$. Theorem~\ref{thm: forced via EG diff}(1) implies that any two vertices in a clique $B_j$ are joined by a forced edge, as are any two vertices in $B'_0$, if $G_0$ is not split.
Consider any two vertices $i,i'$ belonging to the set $A_j$ for $j \geq 0$. By Observation~\ref{obs: two vtcs}, $B_j$ must be nonempty, so it follows from Theorem~\ref{thm: EG and canon decomp} that $d_i<p$ and $d_{i'}<p$. Since $i,i'$ do not belong to $B_\ell$ for any $\ell$, Theorem~\ref{thm: EG and canon decomp} also implies that $i,i' > p$, so by Theorem~\ref{thm: forced via EG diff}(3) the pair $\{i,i'\}$ is a forced non-edge. Similarly, any two vertices in $A''_0$ are forcibly nonadjacent.
Now suppose that vertices $i,i'$ satisfy $i \in B_j$ and $i' \in V(G_\ell)$, with $\ell < j$. From the adjacencies required by the canonical decomposition we see that $d_{i'}$ is at least as large as $|B_k \cup B_{k-1} \cup \dots \cup B_j|$, and it follows from Theorem~\ref{thm: EG and canon decomp} that this latter number equals a term $t'$ of $EG(d)$ for which $i \leq t'$. By Theorem~\ref{thm: forced via EG diff}(2), the pair $\{i,i'\}$ is a forced edge.
Suppose instead that vertices $i,i'$ satisfy $i \in A_j$ and $i' \in V(G_\ell)$, with $\ell < j$. Again letting $t'=|B_k \cup B_{k-1} \cup \dots \cup B_j|$, Theorem~\ref{thm: EG and canon decomp} implies that $\Delta_{t'}(d)=0$ and that $i,i'>t'$. The adjacencies of the canonical decomposition imply that $d_{i'} \geq t'$ and that $d_{i} \leq t'$. Theorem~\ref{thm: forced via EG diff}(4) then implies that $\{i,i'\}$ is a forced non-edge.
We now show that all other pairs of vertices in $G$ are unforced, beginning with those within a split canonical component. Suppose that $i \in B_j$ and $i' \in A_j$ for some $j \geq 0$. Any neighbor of $i'$ in $G$ other than $i$ is a neighbor of $i$; furthermore, since $G_j$ is indecomposable, $i'$ has at least one non-neighbor in $B_j$, which must be a neighbor of $i$, so we conclude that $d_i > d_{i'}$ and $i<i'$. Now by Theorem~\ref{thm: EG and canon decomp}, there exist consecutive terms $t$ and $t'$ of $EG(d)$ such that $t < i \leq t'$ and $t < d_{i'} < t'$.
We verify that none of the conditions in Theorem~\ref{thm: forced via EG diff} are satisfied by the pair $\{i,i'\}$. First, since $G_j$ is indecomposable, vertex $t'$ must have at least one neighbor in $A_j$, so $d_{t'} \geq t'$. Thus $i'>t'$, and since $d_{i'}<t'$, we see that $\{1,\dots,i'\}$ is not a clique, so by Lemma~\ref{lem: EG diff counts this} we see that $\Delta_\ell(d) \geq 2$ for all $\ell \geq i'$. Thus condition (1) of Theorem~\ref{thm: forced via EG diff} does not apply to the pair $\{i,i'\}$.
Condition (2) does not apply, since $t'$ is the smallest term of $EG(d)$ at least as large as $i$, and $d_{i'}<t'$. Condition (3) likewise cannot apply, since $\{1,\dots,t'\}$ is a clique and hence $d_i \geq i-1$. Finally, since $t'$ is the smallest term of $EG(d)$ at least as large as $d_{i'}$, but $i \leq t'$, condition (4) does not apply.
It remains to show that $\{i,i'\}$ is unforced if $G_0$ is not split and vertices $i,i' \in V(G_0)$ don't both belong to $B'_0$ or both belong to $A''_0$. Assume that $i<i'$.
If at least one of $i,i'$ does not belong to $B'_0$, then we claim that $\{i,i'\}$ cannot be a forced edge. Indeed, note that $i'>q$ and $i>p$, so neither of conditions (1) or (2) of Theorem~\ref{thm: forced via EG diff} applies.
If at least one of $i,i'$ does not belong to $A''_0$, then we claim that $\{i,i'\}$ is not a forced non-edge. Indeed, note that $d_i \geq d_{i'}$, and since $G_0$ is indecomposable and has more than one vertex, we have $d_{i'} > p$; this implies that $\{i,i'\}$ fails condition (4). We also see that $i \leq q$ or $d_i \geq q$; in either case condition (3) does not apply.
Having characterized all pairs of vertices as forced or unforced, we can now summarize the structure of $I(d)$ and $U(d)$. If we form a correspondence between each vertex in $A_j$ (respectively, in $A'_0$, in $A''_0$, in $B_j$, in $B'_0$, in $B''_0$) with a vertex of $C_1^{|A_j|}$ (of $C_1^{|A'_0|}$, of $C_1^{|A''_0|}$, of $C_2^{|B_j|}$, of $C_2^{|B'_0|}$, of $C_2^{|B''_0|}$) in the claimed expressions for $I(d)$ and $U(d)$, the correspondence naturally leads to an exact correspondence between the edges in either of the first two expressions and the edges in $I(d)$. Likewise, the edges in the third and fourth expressions in the theorem statement correspond precisely to the edges in $U(d)$.
\end{proof}
A well known and useful characterization of threshold graphs (see~\cite[Theorem 1.2.4]{MahadevPeled95}) states that a graph is threshold if and only if it can be constructed from a single vertex by iteratively adding dominating and/or isolated vertices. The expressions in Theorem~\ref{thm: envelope formulas} describe how the envelope graphs of $d$ can be constructed in this way: because of the requirements of the operation $\circ$, as we read from right to left, a term $C_1^a$ corresponds to adding $a$ isolated vertices in sequence, and a term $C_2^b$ corresponds to adding $b$ dominating vertices.
\begin{exa}
If $d$ is the degree sequence of the graph on the right in Figure~\ref{fig: composition}, then $I(d)$ is formed by starting with a single vertex, adding three more isolated vertices in turn, adding two dominating vertices, and finishing with three more isolated vertices. The graph $U(d)$ is formed by starting with a single vertex, adding three more dominating vertices, three more isolated vertices, and then two more dominating vertices.
\end{exa}
Note that if $d$ is threshold, then $I(d)=U(d)=d$ (as expected), because every canonical component $(G_i,A_i,B_i)$ of a threshold graph contains only a single vertex (we see this from the dominating/isolated vertex construction described above), so for all $i$ either $A_i$ or $B_i$ is empty, and the expressions in Theorem~\ref{thm: envelope formulas} simplify to return the canonical decomposition of the unique realization of $d$.
\section{Threshold graphs and the dominance order}
In this section we compare the forcible adjacency relationships of degree sequences that are comparable under the dominance order.
Given lists $a=(a_1,\dots,a_k)$ and $b=(b_1,\dots,b_\ell)$ of nonnegative integers, with $a_1 \geq \dots \geq a_k$ and $b_1 \geq \dots \geq b_\ell$, we say that $a$ \emph{majorizes} $b$ and write $a \succeq b$ if $\sum_{i=1}^k a_i = \sum_{i=1}^\ell b_i$ and for each $j \in \{1,\dots,\min(k,\ell)\}$ we have $\sum_{i=1}^j a_i \geq \sum_{i=1}^j b_i$. The partial order induced by $\succeq$ on lists of nonnegative integers with a fixed sum $s$ and length $n$ is called the \emph{dominance} (or \emph{majorization}) \emph{order}, and we denote the associated partially ordered set by $\mathcal{P}_{s,n}$.
(We remark that requiring sequences to have the same length and allowing terms to equal 0 are slight departures from how the dominance poset is often described. We do so here for convenience in the statements of results below.)
The dominance order plays an important role in the study of graphic lists. It is known that if for $a,b \in \mathcal{P}_{s,n}$ it is true that $a$ is graphic and $a \succeq b$, then $b$ is also graphic; thus the degree sequences form an ideal in $\mathcal{P}_{s,n}$. The maximal graphic lists are precisely the threshold sequences~\cite{PeledSrinivasan89}.
We define a \emph{unit transformation} on a nonincreasing integer sequence to be the act of decreasing a sequence term by 1 and increasing an earlier term by 1 while maintaining the descending order of terms. This operation is best illustrated by the Ferrers diagram of the sequences, where sequence terms are depicted by left-justified rows of dots. Note that if $a$ results from a unit tranformation on $b$, then the Ferrers diagram of $a$ differs from that of $b$ by the removal of a dot from one row of $b$ to a row higher up in the diagram. In Figure~\ref{fig: elem trans}, the second and third sequences each result from a unit transformation on the first sequence.
\begin{figure}
\caption{Ferrers diagrams depicting elementary transformations}
\label{fig: elem trans}
\end{figure}
A fundamental lemma due to Muirhead~\cite{Muirhead03} says that $a \succeq b$ if and only if $a$ may be obtained by performing a sequence of unit transformations on $b$.
\begin{thm} \label{thm: majorization preserves forced}
Let $d$ and $e$ be graphic elements of $\mathcal{P}_{s,n}$. If $d \succeq e$ and $\{i,j\}$ is a forced pair for $e$, then $\{i,j\}$ is a forced pair for $d$.
\end{thm}
\begin{proof}
We may obtain $d$ from a sequence of unit transformations on the sequence $e$. The intermediate sequences resulting from these individual transformations all majorize $e$, so it suffices to assume that $d$ can be obtained from just one unit transformation. In other words, we assume that there exist indices $s$ and $t$ such that $s<t$ and \[ d_{\ell} = \begin{cases}
e_\ell+1 & \ell = s\\
e_\ell-1 & \ell = t\\
e_\ell & \text{otherwise}
\end{cases}.
\]
Suppose now that $\{i,j\}$ is a forced edge for $e$. By Theorem~\ref{thm: forced iff not graphic} $e^+(i,j)$ is not graphic, so by Theorem~\ref{thm: ErdosGallai} there exists an index $k$ such that $k \geq i$ and \[\sum_{\ell \leq k} e^+(i,j)_\ell > k(k-1) + \sum_{\ell>k} \min\{k, e^+(i,j)_\ell\}.\] Since the actions of increasing two terms of a sequence and performing a unit transformation on a sequence together yield the same result regardless of the order in which they are carried out, we have
\begin{align*}
\sum_{\ell \leq k} d^+(i,j)_\ell &\geq \sum_{\ell \leq k} e^+(i,j)_\ell\\
&> k(k-1) + \sum_{\ell>k} \min\{k, e^+(i,j)_\ell\}\\
&\geq k(k-1) + \sum_{\ell>k} \min\{k, d^+(i,j)_\ell\}.
\end{align*}
Thus $d^+(i,j)$ is not graphic, and by Theorem~\ref{thm: forced iff not graphic} $\{i,j\}$ is a forced edge for $d$.
A similar argument holds if $\{i,j\}$ is a forced non-edge for $e$, making $\{i,j\}$ a forced non-edge for $d$.
\end{proof}
\begin{exa}
The degree sequence $(2,1,1,1,1)$ is majorized by $(2,2,1,1,0)$, which is in turn majorized by $(3,1,1,1,0)$. The first sequence has has no forced pair. In the second sequence vertex 5 is forcibly nonadjacent to all other vertices, $\{3,4\}$ is a forced non-edge, and $\{1,2\}$ is a forced edge. These relationships are all preserved in $(3,1,1,1,0)$, and every other pair of vertices is forced as well, since $(3,1,1,1,0)$ is a threshold sequence.
\end{exa}
As illustrated in the previous example, forcible adjacency relationships come into existence as we progress upward in the dominance order, and they persist until the threshold sequences are reached, where every pair of vertices is a forced pair. Thus the proportion of all vertex pairs that are forced may be considered a measure of how close a degree sequence is to being a threshold sequence.
Our results now yield a consequence of Merris~\cite[Lemma 3.3]{Merris03}. We call a degree sequence \emph{split} if it has a realization that is a split graph.
\begin{cor}\label{cor: splits upward closed}
Let $d$ and $e$ be graphic elements of $\mathcal{P}_{s,n}$. If $d \succeq e$ and $e$ is split, then $d$ is split.
\end{cor}
\begin{proof}
As usual, let $G$ be a realization of $e$ with vertex set $[n]$. Since $e$ is split, by Theorem~\ref{thm: split seqs} we know that $\Delta_{m(e)} = 0$. By Theorem~\ref{thm: forced via EG diff} every pair of vertices from $\{1,\dots,m(e)\}$ forms a forced edge. Likewise, any pair of vertices from $\{m(e)+1,\dots,n\}$ forms a forced non-edge. By Theorem~\ref{thm: majorization preserves forced}, these forcible adjacency relationships exist also for $d$, so $\{1,\dots,m(e)\}, \{m(e)+1,\dots,n\}$ is a partition of the vertex set of any realization of $d$ into a clique and an independent set; hence $d$ is also split.
\end{proof}
Note that by Theorems~\ref{thm: forced via EG diff} and~\ref{thm: EG and canon decomp}, adjacencies and non-adjacencies between vertices in distinct canonical components, as well as adjacencies between two clique vertices and non-adjacencies between two independent-set vertices in split canonical components, are all forcible adjacency relationships. Thus every realization of the degree sequence of a canonically decomposable graph is canonically decomposable. It is natural to then, as we did for split sequences, refer to a degree sequence itself as \emph{decomposable} if it has a decomposable realization.
The forcible adjacency relationships between canonical components and inside split components further imply, via an argument similar to that of Corollary~\ref{cor: splits upward closed}, that canonically decoposable graphs have the same majorization property that split graphs do.
\begin{cor}
Let $d$ and $e$ be graphic elements of $\mathcal{P}_{s,n}$. If $d \succeq e$ and $e$ is canonically decomposable, then $d$ is canonically decomposable.
\end{cor}
More generally, all sequences with at least one forced pair form an upward-closed set in $\mathcal{P}_{s,n}$. We now show, in fact that these sequences lie close in $\mathcal{P}_{s,n}$ to split or decomposable sequences. The key will be the observation that according to Lemma~\ref{lem: EG diff counts this}, having a small Erd\H{o}s--Gallai difference requires a graph to have a vertex partition that closely resembles that of a split or decomposable graph.
Our measurement of ``closeness'' in $\mathcal{P}_{s,n}$ will involve counting covering relationships. A unit transformation on a nonincreasing integer sequence is said to be an \emph{elementary transformation} if there is no longer sequence of unit transformations that produces the same result; in other words, an elementary transformation changes an integer sequence into one that immediately covers it in $\mathcal{P}_{s,n}$. As shown by Brylawski~\cite{Brylawski73}, a unit transformation on a sequence $b=(b_1,\dots,b_\ell)$ is an elementary transformation if and only if, supposing that the $p$th term of $b$ is increased and the $q$th term is decreased, we have either $q=p+1$ or $b_p=b_q$. The rightmost sequence in Figure~\ref{fig: elem trans} shows the result of an elementary transformation on the original sequence, while the middle sequence shows a non-elementary unit transformation.
\begin{thm}\label{thm: three steps}
If $e$ is a graphic sequence in $\mathcal{P}_{s,n}$ that induces any forcible adjacency relationships among the vertices of its realizations, then some sequence $d$ that is split or canonically decomposable is located at most three elementary transformations above $e$ in $\mathcal{P}_{s,n}$.
\end{thm}
\begin{proof}
By Theorem~\ref{thm: forced via EG diff}, $e$ can only force vertices to be adjacent or nonadjacent if $\Delta_k(e) \leq 1$ for some positive $k$. If for such a $k$ we have $\Delta_k(e)=0$, then by Theorems~\ref{thm: split seqs} and~\ref{thm: EG and canon decomp} we may let $d=e$.
Suppose instead that $\Delta_k(e)=1$ for some $k$, and let $G$ be a realization of $e$ on vertex set $[n]$. Partition $V(G)$ into sets $A$, $B$, and $C$ as in the statement of Lemma~\ref{lem: EG diff counts this}, with $B=\{i: 1 \leq i \leq k\}$. Since $\Delta_k(e)=1$, this lemma implies that $A$ is an independent set, $B$ is a clique, and exactly one of the following cases holds:
\begin{enumerate}
\item[(1)] there is a single edge joining a vertex in $A$ with a vertex in $C$, and all edges possible exist joining vertices in $B$ with vertices in $C$;
\item[(2)] there is a single non-edge between a vertex in $B$ and a vertex in $C$, and there are no edges joining vertices in $A$ with vertices in $C$.
\end{enumerate}
We consider each of these cases in turn. We will use the following statement, which is proved using elementary edge-switching arguments:
\noindent \textsc{Fact}~\cite[Lemma~3.2]{BarrusDonovan16}: \emph{Given a vertex $v$ of a graph $G$ and a set $T \subseteq V(G)-\{v\}$, suppose that $v$ has $p$ neighbors in $T$. For any set $S$ of $p$ vertices of $T$ having the highest degrees in $G$, there exists a graph $G'$ with the same vertex set as $G$ in which the neighborhood of $v$, restricted to $T$, is $S$, all neighbors of $v$ outside of $T$ are the same as they are in $G$, and every vertex has the same degree in $G'$ as in $G$.}
In the first case, let $a$ be the vertex of $A$ having a neighbor in $C$. By the fact above we may assume that the neighbor of $a$ in $C$ is a vertex having the highest degree in $G$ among vertices of $C$; call this neighbor $c$. Since the degree of $a$ is at most $k$, there must be some vertex in $B$ to which $a$ is not adjacent; using the fact again, we may assume that this non-neighbor (call it $b$) has the smallest degree in $G$ among vertices of $B$. Now deleting edge $ac$ and adding edge $ab$ produces a graph having a degree sequence $d$ which, using partition $A,B,C$, we see is canonically decomposable.
In the second case, some vertex of $C$ has a non-neighbor in $B$. By the fact above, we may assume that the non-neighbor in $B$ is a vertex $b$ having the lowest degree in $G$ among vertices of $B$. Now since every vertex of $C$ has degree at least $k$ and no neighbors in $A$, but some vertex in $C$ has a non-neighbor in $B$, this vertex in $C$ must have a neighbor in $C$. Using the fact again, we may then assume that the non-neighbor of $b$ in $C$ is a vertex $u$ having smallest degree among the vertices in $C$. Using the fact once again, we may assume that the neighbors of $u$ in $C-\{u\}$ have as high of degree as possible. Now let $c$ be a neighbor of $u$ that has maximum degree among the vertices of $C$. Deleting the edge $uc$ and adding the edge $ub$ produces a graph a graph having degree sequence $d$ for which, using partition $A,B,C$, we see is canonically decomposable.
In both cases, the effect of creating degree sequence $d$ from $e$ was to perform a unit transformation which reduced the largest degree of a vertex in $C$ and increased the smallest degree of a vertex in $B$. Since by assumption the degrees of vertices in $B$ are the highest in the graph, and the degrees of vertices in $C$ may be assumed to precede those of vertices in $A$ in the degree sequence, the creation of $d$ from $e$ is equivalent to a unit transformation on $e$. In fact, it is equivalent to an elementary transformation if $b$ and $c$ are the unique vertices with their respective degrees or if $b$ has the same degree as $c$. Otherwise, we may accomplish this unit transformation using two or three elementary transformations, as follows (we use $\operatorname{deg}(v)$ to denote the degree of a vertex $v$):
If both $\operatorname{deg}(b)$ and $\operatorname{deg}(c)$ appear multiple times in $d$ and $\operatorname{deg}(b) > \operatorname{deg}(c)+1$, then decrease the last term equal to $\operatorname{deg}(b)$ while increasing the first term equal to $\operatorname{deg}(b)$; decrease the last term equal to $\operatorname{deg}(c)$ while increasing the first term equal to $\operatorname{deg}(c)$; and decrease the term currently equal to $\operatorname{deg}(c)+1$ while increasing the term currently equal to $\operatorname{deg}(b)-1$.
If $\operatorname{deg}(b)$ appears multiple times in $d$ and either $\operatorname{deg}(c)$ appears only once or $\operatorname{deg}(b)=\operatorname{deg}(c)+1$, then decrease the last term equal to $\operatorname{deg}(b)$ while increasing the first term equal to $\operatorname{deg}(b)$; then decrease the last term equal to $\operatorname{deg}(c)$ while increasing the first term currently equal to $\operatorname{deg}(b)-1$.
If $\operatorname{deg}(c)$ appears multiple times in $d$ and $\operatorname{deg}(b)$ appears only once, then decrease the last term equal to $\operatorname{deg}(c)$ while increasing the first term equal to $\operatorname{deg}(c)$; then decrease the last term currently equal to $\operatorname{deg}(c)+1$ while increasing the first term currently equal to $\operatorname{deg}(b)$.
\end{proof}
We conclude by showing that the bound in Theorem~\ref{thm: three steps} is sharp for infinitely many degree sequences.
\begin{exa} Consider the sequence $s=\left((15+2j)^{(5)}, 6^{(7+2j)}, 3^{(7)}\right)$, where $j$ is any nonnegative integer; note that $\Delta_5(s) = 1$ and that $\Delta_k(s) \neq 0$ for all positive $k$. Let $s'$ and $s''$ denote sequences obtained by performing respectively one and two elementary transformations on $s$.
Observe by inspection that $s''_7 \geq s'_7 \geq s_7 = 6$ and $s''_8 = s'_8 = s_8 < 7$; thus $m(s)=m(s')=m(s'')=7$. To test whether any of $s,s',s''$ has a realization that is decomposable, by Theorem~\ref{thm: EG and canon decomp} and Corollary~\ref{cor: EG diff at least 2}, it suffices to test whether equality holds in any of the first seven Erd\H{o}s--Gallai inequalities for the corresponding sequence.
Recalling our notation from before, we see that since $\operatorname{LHS}_k(s) \leq \operatorname{LHS}_k(s') \leq \operatorname{LHS}_k(s'')$ and $\mathbb{R}HS_k(s) \leq \mathbb{R}HS_k(s') \leq \mathbb{R}HS_k(s'')$, if any of $s,s',s''$ satisfied the $k$th Erd\H{o}s--Gallai inequality with equality, it would follow that $\mathbb{R}HS_k(s) \leq LHS_k(s'')$. Now consider the table below, which shows the maximum possible value for $\operatorname{LHS}_k(s'')$ and the value of $\mathbb{R}HS_k(s)$ for each $k \in \{1,\dots,7\}$.
\begin{center}
\begin{tabular}{ccc}
\hline
$k$ & max $\operatorname{LHS}_k(s'')$ & $\mathbb{R}HS_k(s)$\\ \hline
$1$ & $16+2j$ & $18+2j$ \\ \hline
$2$ & $32+4j$ & $36+4j$ \\ \hline
$3$ & $47+6j$ & $54+6j$ \\ \hline
$4$ & $61+8j$ & $65+8j$ \\ \hline
$5$ & $75+10j$ & $76+10j$ \\ \hline
$6$ & $82+10j$ & $87+12j$ \\ \hline
$7$ & $89+10j$ & $93+12j$ \\ \hline
\end{tabular}
\end{center}
We see that each of $s,s',s''$ is graphic. Furthermore, since in no case does $\mathbb{R}HS_k(s) \leq LHS_k(s'')$, we conclude that any canonically decomposable degree sequence that majorizes $s$ must be separated from $s$ by at least three elementary transformations. (As guaranteed above, the sequence $\left(16+2j, (15+2j)^{(4)}, 6^{(6+2j)},5,3^{(7)}\right)$ is located three elementary transformations above $s$ and is the degree sequence of a canonically decomposable graph.)
\end{exa}
\end{document} |
\begin{document}
\title{Improper coloring of graphs on surfaces}
\author{Ilkyoo Choi}
\address{Department of Mathematics, Hankuk University of Foreign Studies, Yongin-si, Gyeonggi-do, Republic of Korea}
\email{ilkyoo@hufs.ac.kr}
\thanks{Ilkyoo Choi is supported by the Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Education (NRF-2018R1D1A1B07043049), and also by Hankuk University of Foreign Studies Research Fund.
}
\author{Louis Esperet}
\address{Laboratoire G-SCOP (CNRS, Universit\'e Grenoble-Alpes), Grenoble, France}
\email{louis.esperet@grenoble-inp.fr}
\thanks{Louis Esperet is partially supported by ANR Projects STINT
(\textsc{anr-13-bs02-0007}) and GATO (\textsc{anr-16-ce40-0009-01}), and LabEx PERSYVAL-Lab (\textsc{anr-11-labx-0025}).}
\date{}
\sloppy
\begin{abstract}
A graph $G$ is $(d_1,\ldots,d_k)$-colorable if its vertex set can be partitioned into $k$ sets $V_1,\ldots,V_k$, such that for each $i\in\{1, \ldots, k\}$, the subgraph of $G$ induced by $V_i$ has maximum degree at most $d_i$.
The Four Color Theorem states that every planar graph is $(0,0,0,0)$-colorable, and a classical result of Cowen, Cowen, and Woodall shows that every planar graph is $(2,2,2)$-colorable.
In this paper, we extend both of these results to graphs on surfaces.
Namely, we show that every graph embeddable on a surface of Euler
genus $g>0$ is $(0,0,0,9g-4)$-colorable and
$(2,2,9g-4)$-colorable. Moreover, these graphs are also $(0,0,O(\sqrt{g}),O(\sqrt{g}))$-colorable and
$(2,O(\sqrt{g}),O(\sqrt{g}))$-colorable.
We also prove that every triangle-free graph that is embeddable on a surface of Euler genus $g$ is $(0, 0, O(g))$-colorable.
This is an extension of Gr\"{o}tzsch's Theorem, which states that triangle-free planar graphs are $(0, 0, 0)$-colorable. Finally, we prove that every graph of girth at least 7 that is embeddable on a surface of Euler genus $g$ is $(0,O(\sqrt{g}))$-colorable. All these results are best possible in several ways as the girth condition is sharp, the constant maximum degrees cannot be improved, and the bounds on the maximum degrees depending on $g$ are tight up to a constant multiplicative factor.
\end{abstract}
\maketitle
\section{Introduction}
For a sequence $(d_1,d_2,\ldots,d_k)$ of $k$ integers, we say that a graph $G$ is \emph{$(d_1,d_2,\ldots,d_k)$-colorable} if each vertex of $G$ can be assigned a color from the set $\{1,2,\ldots,k\}$ in such a way that for each $i\in \{1, \ldots, k\}$, a vertex colored $i$ has at most $d_i$ neighbors colored $i$.
In other words, each color class $i$ induces a subgraph of maximum degree at most $d_i$.
Note that a proper coloring is the same as a $(0,0,\ldots,0)$-coloring.
For an integer $d$, a $(d,d,\ldots,d)$-coloring is sometimes called a \emph{$d$-improper coloring} or \emph{$d$-defective coloring}.
The Four Color Theorem~\cite{AH77a,AH77b} states that every planar graph is $(0,0,0,0)$-colorable, and it was proved by Cowen, Cowen, and Woodall~\cite{CCW86} that every planar graph is also $(2,2,2)$-colorable.
For any integer $k$, it is not difficult to construct a planar graph that is not $(k, k)$-colorable; one can even find such planar graphs that are triangle-free (see \cite{Skr99}).
A natural question to ask is how these results can be extended to graphs embeddable on surfaces with higher (Euler) genus.
Cowen, Cowen, and Woodall~\cite{CCW86} proved that every graph of Euler genus $g$ is $(c_4,c_4,c_4,c_4)$-colorable with $c_4=\max \{14,\tfrac13(4g-11)\}$, and conjectured that the same should hold with three colors instead of four.
This was proved by Archdeacon~\cite{Arc87}, who showed that every graph of Euler genus $g$ is $(c_3,c_3,c_3)$-colorable with
$c_3=\max \{15,\tfrac12(3g-8)\}$.
The value $c_3$ was subsequently improved to $\max\{12,6+\sqrt{6g}\}$ by Cowen, Goddard, and Jesurum~\cite{CGJ97}, and eventually to $\max\{9,2+\sqrt{4g+6}\}$ by Woodall~\cite{Woo11}.
In this paper, we will show that in the original result of Cowen, Cowen, and
Woodall~\cite{CCW86}, it suffices that only one of the four color classes is not a stable set.
Namely, we will prove that every graph that is embeddable on a surface of Euler genus $g>0$ is both $(0,0,0,9g-4)$-colorable and $(2,2,9g-4)$-colorable.
These come as natural extensions of the fact that planar graphs are $(0,0,0,0)$-colorable and $(2,2,2)$-colorable.
Interestingly, there is a constant $c_1>0$ such that the bound $9g-4$ in these results cannot be replaced by $c_1\cdot g$, so there is no hope to obtain a bound of the same order as $c_3$ above.
In other words, the growth rate of the bound $9g-4$ cannot be improved to a sublinear function of $g$ in both results.
However, when two color classes are allowed to have non-constant maximum degrees, we show that the bound $9g-4$ can be improved to a sublinear function of $g$ in both results.
Namely, any graph embeddable on a surface of Euler genus $g$ is both $(0,0,K_1(g),K_1(g))$-colorable and $(2, K_2(g), K_2(g))$-colorable with $K_1(g)=20+\sqrt{48g+481}$ and $K_2(g)=38+\sqrt{84g+1682}$.
We also show that the growth rate of $K_1(g)$ and $K_2(g)$ are tight in terms of $g$.
A famous theorem of Gr\"{o}tzsch~\cite{G59} states that every triangle-free planar graph is 3-colorable. In this paper, we prove that this can be extended to graphs embeddable on surfaces as follows:
every triangle-free graph embeddable on a surface of Euler genus $g$ is $(0, 0, K_3(g))$-colorable where $K_3(g)=\lceil{10g+32\over 3}\rceil$.
We prove that $K_3(g)$ cannot be replaced by a sublinear function of $g$, even for graphs of girth at least 6.
It was proved by \v{S}krekovski~\cite{Skr99} that for any $k$, there exist triangle-free planar graphs that are not $(k,k)$-colorable.
This shows that there does not exist any 2-color analogue of our result on triangle-free graphs on surfaces.
Choi, Choi, Jeong, and Suh~\cite{CCJS14} proved that every graph of girth at least 5 embeddable on a surface of Euler genus $g$ is $(1, K_4(g))$-colorable where $K_4(g)=\max\{10,\lceil {12g+47\over 7}\rceil\}$.
They also show that the growth rate of $L(g)$ cannot be replaced by a sublinear function of $g$.
On the other hand, for each $k$, Borodin, Ivanova, Montassier, Ochem, and Raspaud~\cite{BIMOR10} constructed a planar graph of girth 6 that is not $(0,k)$-colorable.
Finally, we prove that every graph of girth at least 7 embeddable on a surface of Euler genus $g$ is $(0,5+\ceil{\sqrt{14g+22}})$-colorable.
On the other hand, we show that there is a constant $c_2>0$ such that for infinitely many values of $g$, there exist graphs of girth at least 7 embeddable on a surface of Euler genus $g$, with no $(0,\lfloor c_2 \sqrt{g}\rfloor)$-coloring.
The results of this paper together with the aforementioned results completely solve\footnote{up to a constant multiplicative factor for
the maximum degrees $d_i$, depending on $g$.} the following problem: \emph{given
integers $\ell\leqslantslant 7$, $k$, and $g$, find the smallest $k$-tuple $(d_1,\ldots,d_k)$ in
lexicographic order, such that every
graph of girth at least $\ell$ embeddable on a surface of Euler genus $g$ is $(d_1,\ldots,d_k)$-colorable.}
\section{Preliminaries}
\subsection{Graphs on surfaces}
All graphs in this paper are simple, which means without loops and multiple edges.
In this paper, a {\em surface} is a non-null compact connected 2-manifold without boundary.
We refer the reader to the monograph of Mohar and Thomassen~\cite{MoTh} for background on graphs on surfaces.
A surface is either orientable or non-orientable.
The \emph{orientable surface~$\mathbb{S}_h$ of genus~$h$} is obtained by adding $h\geqslantslant0$
\emph{handles} to the sphere, and the \emph{non-orientable surface~$\mathbb{N}OS_k$ of genus~$k$} is formed by adding $k\geqslantslant1$ \emph{cross-caps} to the sphere.
The {\em Euler genus} $\mathbf{eg}(\Sigma)$ of a surface $\Sigma$ is defined as twice its genus if $\Sigma$ is orientable, and as its genus if $\Sigma$ is non-orientable.
We say that an embedding is \emph{cellular} if every face is homeomorphic to an open disc of~$\mathbb{R}^2$.
Euler's Formula states that if~$G$ is a graph with a cellular embedding in a surface~$\Sigma$, with vertex set~$V$, edge set~$E$, and face set~$F$, then $|V|-|E|+|F|\:=\:2-\mathbf{eg}(\Sigma)$.
If $f$ is a face of a graph~$G$ cellularly embedded in a surface~$\Sigma$, then a \emph{boundary walk of~$f$} is a walk consisting of vertices and edges as they are encountered when walking along the whole boundary of~$f$, starting at some vertex and following some orientation of the face.
The \emph{degree of a face~$f$}, denoted~$d(f)$, is the number of edges on a boundary walk of~$f$.
Note that some edges may be counted more than once.
Let $G$ be a graph embedded in a surface $\Sigma$.
A cycle $C$ of $G$ is said to be {\em non-contractible} if $C$ is non-contractible as a closed curve in $\Sigma$.
Also, $C$ is called {\em separating} if $C$ separates $\Sigma$ in two connected pieces, otherwise $C$ is \emph{non-separating}. It is well known that only three types of non-contractible cycles exist (see~\cite{MoTh}): 2-sided separating cycles, 2-sided non-separating cycles, and 1-sided cycles (the latter only appear in non-orientable surfaces, and are non-separating).
The following fact, which is often called the \emph{3-Path Property}, will be used:
if $P_{1}, P_{2},P_{3}$ are three internally disjoint paths with the same endpoints in $G$, and $P_{1} \cup P_{2}$ is a non-contractible cycle, then at least one of the two cycles $P_{1} \cup P_{3}$, $P_{2} \cup P_{3}$ is also non-contractible; see for instance~\cite[Proposition 4.3.1]{MoTh}.
We will need the following simple observation about shortest non-contractible cycles.
The proof presented here is due to Gwena\"el Joret.
\begin{obs}\label{obs:shortest}
Let $G$ be a graph embedded on some surface.
If $C$ is a shortest non-contractible cycle in $G$, then $C$ is an induced cycle and each vertex of $G$ has at most 3 neighbors in $C$.
\end{obs}
\begin{proof}
It is easy to see that if $C$ has a chord, then by the 3-Path Property, $G$ contains a non-contractible cycle shorter than $C$ (recall that $G$ is simple), a contradiction.
This shows that $C$ is an induced cycle, and in particular, every vertex of $C$ has at most 2 neighbors in $C$.
Assume now that some vertex $v$ not in $C$ has $k\geqslantslant 4$ neighbors in $C$ (in particular, $C$ contains at least 4 vertices).
Each subpath of $C$ whose end points are neighbors of $v$ and whose internal vertices are not adjacent to $v$ is called a \emph{basic subpath} of $C$.
Note that the edges of $C$ are partitioned into $k$ basic subpaths of $C$.
Since $v$ has at least 4 neighbors in $C$, each basic subpath contains at most $|C|-3$ edges.
A \emph{basic cycle} is obtained from a basic subpath $P$ of $C$ with endpoints $u,w$ by adding the vertex $v$ and the edges $vu$ and $vw$, which are the \emph{rays} of the basic cycle.
The embedding of $G$ gives an order on the edges incident to $v$.
If the rays of some basic cycle are not consecutive (among the rays of basics cycles) in the order around $v$, then this basic cycle cannot bound a region homeomorphic to an open disk, and is thus non-contractible.
Since this basic cycle has length at most $|C|-3+2<|C|$, this contradicts the minimality of $C$.
We can therefore assume the two rays of each basic cycle are consecutive in the order around $v$, and each basic cycle bounds a region homeomorphic to an open disk.
By gluing these $k$ regions together, we obtain that $C$ bounds a region homeomorphic to an open disk, which contradicts the fact that $C$ is non-contractible.
\end{proof}
\subsection{Coloring Lemmas}
Let $K\geqslantslant 1$, and $k>j\geqslantslant 1$ be three integers. Let $d_1,d_2,\ldots,d_k$ be such that
$d_1=\cdots=d_j=K$ and $\max\{d_{j+1},\ldots,d_k\}<K$. In this
section, we study the properties of a graph $G_{j, k}$ that is not $(d_1, \ldots,
d_k)$-colorable, while all its induced subgraphs are $(d_1, \ldots,
d_k)$-colorable.
Let $c_1, \ldots, c_k$ be the $k$ colors of a $(d_1, \ldots, d_k)$-coloring $\varphi$ such that the maximum degree of the graph induced by the color $c_i$ is at most $d_i$ for $i\in\{1, \ldots, k\}$.
A vertex $v$ is {\it $c_i$-saturated} if $\varphi(v)=c_i$ and $v$ has $d_i$ neighbors colored $c_i$.
Note that by definition, a $c_i$-saturated vertex has at least $d_i$ neighbors.
For any integer $d$, a \emph{$d$-vertex} is a vertex of degree $d$, a
\emph{$d^+$-vertex} is a vertex with degree at least $d$, and a \emph{$d^-$-vertex} is a vertex with degree at most $d$ .
The same notation applies to faces instead of vertices.
\begin{lem}\label{lem:vx-degree}
Every $(K+k-1)^-$-vertex of $G_{j, k}$ has at least $j$ neighbors that are $(K+k)^+$-vertices.
\end{lem}
\begin{proof}
Assume for the sake of contradiction that a $(K+k-1)^-$-vertex $v$ has at most $j-1$ neighbors that are $(K+k)^+$-vertices.
By hypothesis, $G_{j, k}-v$ has a $(d_1, \ldots, d_k)$-coloring
$\varphi$. Observe that the colors $c_1, \ldots, c_k$ must all appear in the neighborhood of $v$, since otherwise we could extend $\varphi$ to $G_{j, k}$ by coloring $v$ with the missing color from $c_1, \ldots, c_k$.
Since $v$ is adjacent to at most $j-1$ vertices of degree at least
$K+k$, there exists a color $c_\ell$ with $\ell\in\{1, \ldots, j\}$
such that no neighbor of $v$ that is a $(K+k)^+$-vertex is colored
with $c_\ell$. Assume that $v$ has a $c_\ell$-saturated neighbor
$u$. Then $u$ has degree at most $K+k-1$, and thus has at most $K+k-2$
neighbors distinct from $v$. Among these neighbors, color $c_\ell$ has
to appear $K$ times, so there exists a color distinct from $c_\ell$
(there are $k-1$ such colors) that does not appear in the neighborhood of $u$.
Therefore, we can extend the coloring $\varphi$ to all of $G_{j, k}$
by recoloring all $c_\ell$-saturated neighbors of $v$ with colors
distinct from $c_\ell$ and then letting $\varphi(v)=c_\ell$.
We obtained a $(d_1, \ldots, d_k)$-coloring of $G_{j, k}$, which is a contradiction.
\end{proof}
\begin{lem}\label{lem:num-high}
There are at least $1+\sum_{i=2}^{k}(d_i+1)$ vertices in $G_{j, k}$ that are $(K+k)^+$-vertices.
\end{lem}
\begin{proof}
Let $H$ be the set of $(K+k)^+$-vertices of $G_{j, k}$, and assume for the sake of contradiction that $|H|\leqslantslantq \sum_{i=2}^{k}(d_i+1)$.
Partition $H$ into $k-1$ sets $S_2, \ldots, S_k$ such that $|S_i|\leqslantslantq d_i+1$ for $i\in\{2, \ldots, k\}$.
Let $\varphi$ be a coloring of the vertices of $H$ obtained by
assigning color $c_i$ to each vertex of $S_i$, for each $i\in\{2, \ldots, k\}$.
Since each $S_i$ contains at most $d_i+1$ vertices, the maximum degree of
the graph induced by $S_i$ cannot be more than $d_i$, so $\varphi$ is
indeed a $(d_1, \ldots, d_k)$-coloring of the subgraph of $G_{j,k}$
induced by $H$.
We now extend $\varphi$ to a $(d_1, \ldots, d_k)$-coloring $\varphi'$
of $G_{j, k}$ in the following greedy fashion: consider a fixed
ordering of the vertices in $V(G_{j, k})-H$ and for each vertex $v$ in
this ordering, we do the following: if the neighborhood of $v$ does
not contain some color $c_i$ with $i\geqslantslant 2$ then we assign $c_i$ to
$v$, and otherwise we assign $c_1$ to $v$.
To verify that $\varphi'$ is a $(d_1, \ldots, d_k)$-coloring of $G_{j, k}$, we only need to check that the vertices colored with $c_1$ induce a graph of maximum degree at most $d_1+1$.
Since no vertex in $H$ is colored with $c_1$ we know that every vertex $v$ colored with $c_1$ has degree at most $K+k-1$.
Also, $v$ must have neighbors colored with $c_2, \ldots, c_k$ by the greedy algorithm.
Now, $v$ cannot have $K+1$ neighbors colored with $c_1$ since it has degree at most $K+k-1$.
This shows that $\varphi'$ is a $(d_1, \ldots, d_k)$-coloring of $G_{j, k}$, which is a contradiction.
\end{proof}
\subsection{Discharging procedure}\label{subsec:discharging}
When an embedding of a counterexample $G$ is fixed, we can let $F(G)$ denote the set of faces of this embedding.
We will prove that $G$ cannot exist by assigning an \emph{initial charge} $\mu(z)$ to each $z\in V(G) \cup F(G)$, and then applying a {\it discharging procedure} to end up with {\it final charge} $\mu^*(z)$ at $z$.
The discharging procedure will preserve the sum of the initial charge, yet, we will prove that the final charge sum is greater than the initial charge sum, and hence we find a contradiction to conclude that the counterexample $G$ does not exist.
\section{Graphs on Surfaces}\label{sec:main}
\subsection{One part with large maximum degree}
Given a connected subgraph $H$ of a graph $G$, let $G/H$ denote the
graph obtained from $G$ by contracting the edges of $H$ into a single
vertex (and deleting loops and multiple edges in the resulting graph).
The proof of the next result uses a technique that is similar to a tool introduced in~\cite{KT12}, yet our presentation is quite different.
\begin{thm}\label{thm:deg}
For every $g\geqslantslant 0$, every connected graph $G$ of Euler genus $g$,
and every vertex $v$ of $G$, the graph $G$ has a connected subgraph $H$ containing $v$, such that $G/H$ is planar and every vertex of $G$ has at most
$\max\{9g-4,1\}$ neighbors in $V(H)$.
\end{thm}
\begin{proof}
We will prove the theorem by induction on $g\geqslantslant 0$. If $g=0$, then $G$ is
planar and the result directly follows by taking $H$ as the subgraph
of $G$ induced by $\{v\}$. In the
remainder, we may thus assume that $g>0$.
Let $G'$ be a connected graph of Euler genus $g'$ with $0\leqslantslant g' <g$, and let $P$ be a shortest path between two vertices $u$ and $w$ of $G'$.
Since $P$ is a shortest path, each vertex of $G'$ has at most 3 neighbors in $V(P)$.
Note that the graph $G^*=G'/P$, which is the graph obtained from $G'$ by contracting $P$ into a single vertex $v^*$, has Euler genus at most $g'$.
If $g'=0$, then both $G'$ and $G^*$ are planar.
If $g'>0$, then by the induction hypothesis, $G^*$ has a connected subgraph $H^*$ containing $v^*$, such that $G^*/H^*$ is planar and every vertex of $G^*$ has at most $9g'-4$ neighbors in $V(H^*)$.
Let $H'$ be the subgraph of $G'$ induced by the vertices of $H^*-v^*$ and $P$.
Since $H^*$ contains $v^*$, we know that $H'$ is connected, and thus $G'/H'$ is well-defined.
Note that $G'/H'$ is planar.
For a vertex $x$ of $G'$, if $x$ is on $P$, then $x$ has at most two neighbors in $V(P)$ and at most $9g'-4$ neighbors in $V(H^*)$, and therefore $x$ has at most $9g'-2$ neighbors in $V(H')$.
Otherwise $x \not\in P$, and $x$ has at most three neighbors in $V(P)$ and at most $9g'-4$ neighbors in $V(H^*)$ (including $v^*$ if $x$ has a neighbor in $P$), and therefore $x$ has at most $9g'-2$ neighbors in $V(H')$.
We proved that for any $0\leqslantslant g' <g$, any connected graph $G'$ of Euler genus $g'$, and any pair $u,w$ of vertices of $G'$, there is a connected subgraph $H'$ of $G'$ containing $u$ and $w$ such that $G'/H'$ is planar and every vertex of $G'$ has at most $\max\{3,9g'-2\}$ neighbors in $V(H')$.
This shall be used repeatedly in the remainder of the proof and we sometimes call it the \emph{refined induction}.
Given a graph $G$ with positive Euler genus $g$ and a specified vertex
$v$, let $C$ be a shortest non-contractible cycle in some minimum
Euler genus embedding of $G$.
Such a cycle exists, since otherwise $G$ would be embeddable in the plane and we would have $g=0$.
Assume first that $C$ is a 2-sided separating cycle. By cutting along $C$ (as described in~\cite[Section 4.2]{MoTh}, for example), we obtain two graphs $G_1$ and $G_2$ embedded in two surfaces $\Sigma_1$ and $\Sigma_2$ of Euler genus $g_1>0$ and $g_2>0$, respectively, such that $g=g_1+g_2$.
By symmetry, we can assume that $v$
lies in $G_1$.
Note that $C$ corresponds to a face $f_1$ and a face $f_2$ in $G_1$ and $G_2$, respectively. For $i=1,2$, let $G_i^*$ be the graph obtained from $G_i$ by contracting all the vertices incident with $f_i$ into a single vertex $v_i$.
Note that $G_1^*$ and $G_2^*$ are embeddable on surfaces of Euler genus $g_1$ and $g_2$, respectively.
By the refined induction hypothesis, there is a connected subgraph $H_1^*$ of $G_1^*$ containing $v$ and $v_1$, such that $G_1^*/H_1^*$ is planar, and every vertex of $G_1^*$ has at most $9g_1-2$ neighbors in $V(H_1^*)$.
By the induction hypothesis, there is also a connected subgraph $H_2^*$ of $G_2^*$ containing $v_2$, such that $G_2^*/H_2^*$ is planar, and every vertex of $G_2^*$ has at most $9g_2-4$ neighbors in $V(H_2^*)$.
Let $H$ be subgraph of $G$ induced by the vertices of $H_1^*-\{v_1\}$, $C$, and $H_2^*-\{v_2\}$.
We know that $H$ is connected and contains $v$.
Moreover, $G/H$ is also planar since it is obtained by identifying $v_1$ and $v_2$ from the two planar graphs $G_1^*/H_1^*$ and $G_2^*/H_2^*$.
Since $C$ is a shortest non-contractible cycle, it follows from Observation~\ref{obs:shortest} that $C$ is an induced cycle and each vertex not in $C$ has at most three neighbors in $C$.
As a consequence, each vertex of $C$ has at most $(9g_1-2)+(9g_2-4)+2=9g-4$ neighbors in $V(H)$, and each vertex not in $C$ has at most $\max\{(9g_1-2)+2,(9g_2-4)+2\}\leqslantslant 9g-9$ neighbors in $V(H)$.
Thus, we have obtained a connected subgraph $H$ containing $v$ such that $G/H$ is planar and every vertex of $G$ has at most $9g-4$ neighbors in $V(H)$, as desired.
Assume now that $C$ is a 1-sided cycle.
By cutting along $C$ we obtain a graph $G'$ embedded in a surface $\Sigma'$ of Euler genus
$g'\in\{0, \ldots, g-1\}$ in which $C$ corresponds to a face $f$.
Contract all the vertices incident with $f$ into a single vertex $v^*$, and note that the resulting graph $G^*$ can also be embedded in $\Sigma'$.
By the refined induction hypothesis, $G^*$ has a connected subgraph $H^*$ containing $v$ and $v^*$ such that $G^*/H^*$ is planar and every vertex of $G^*$ has at most $\max\{3,9g'-2\}\leqslantslant 9g'+3\leqslantslantq 9g-6$ neighbors in $V(H^*)$.
Using the same argument as above, the subgraph $H$ of $G$ induced by
the vertices of $H^*-\{v^*\}$ and $C$ is connected, $G/H$ is planar, and every vertex of $G$ has at most $9g-4$ neighbors in $V(H)$.
It remains to consider the case when $C$ is a 2-sided non-separating cycle.
In this case, cutting along $C$ yields a graph $G'$ embeddable on a surface $\Sigma'$ of Euler genus $g'\leqslantslant g-2$, in which $C$ corresponds to two faces $f_1$ and $f_2$ lying in the same connected component.
We take a shortest path $P$ between $f_1$ and $f_2$ in $G'$, and then
contract all the vertices of $P$ and vertices incident with $f_1$ or $f_2$ into a single vertex $v^*$.
Note that the resulting graph $G^*$ is embeddable on $\Sigma'$.
By the refined induction, $G^*$ has a connected subgraph $H^*$ containing $v$ and $v^*$ such that $G^*/H^*$ is planar and every vertex of $G^*$ has at most $\max\{3,9g'-2\}\leqslantslant 9g'+3$ neighbors in $V(H^*)$.
Let $H$ be the subgraph of $G$ induced by the vertices of $C$, $P$, and $H^*-\{v^*\}$.
Since $H$ is connected, $G/H$ is well-defined and is therefore planar.
Let $u$ be a vertex of $G$ not in $C\cup P$.
Since $C$ is a shortest non-contractible cycle in $G$, by Observation~\ref{obs:shortest} the vertex $u$ has at most 3 neighbors in $C$.
Since $P$ is a shortest path in $G'$, $v$ has at most 3 neighbors in $V(P)$ and therefore $v$ has at most 6 neighbors in $C\cup P$.
It follows that $u$ has at most $(9g'+3)+6-1=9g'+8\leqslantslant 9g-10$ neighbors in $V(H)$.
By Observation~\ref{obs:shortest}, a vertex $u$ of $C\cup P$ has at most $3+2=5$ neighbors in $C\cup P$.
It follows that $u$ has at most $(9g'+3)+5\leqslantslant 9g-10$ neighbors in $V(H)$.
Consequently, $H$ is a connected subgraph containing $v$ such that $G/H$ is planar and each
vertex of $G$ has at most $9g-4$ neighbors in $V(H)$, as desired.
\end{proof}
We are now able to obtain the two following results as simple consequences of Theorem~\ref{thm:deg}.
\begin{thm}\label{thm:000k}
For each $g> 0$, every graph of Euler genus $g$ has a $(0,0,0,9g-4)$-coloring.
\end{thm}
\begin{proof}
Let $G$ be a graph of Euler genus $g>0$.
We may assume that $G$ is connected, since we can color each connected component independently (and each of its connected components has Euler genus at most $g$).
By Theorem~\ref{thm:deg}, $G$ has a connected subgraph $H$ such that $G/H$ is planar and every vertex of $G$ has at most $9g-4$ neighbors in $V(H)$.
By the Four Color Theorem, $G/H$ has a proper 4-coloring.
Assume without loss of generality that the vertex $v$ of $G/H$ resulting from the contraction of $H$ has the fourth color.
We extend this coloring to $G$ by assigning the fourth color to all vertices of $H$.
Since each vertex of $H$ has at most $9g-4$ neighbors in $V(H)$, and each neighbor of a vertex of $H$ outside $H$ does not have the fourth color, the obtained coloring is indeed a $(0,0,0,9g-4)$-coloring of $G$, as desired.
\end{proof}
\begin{thm}\label{thm:22k}
For each $g> 0$, every graph of Euler genus $g$ has a $(2,2,9g-4)$-coloring.
\end{thm}
\begin{proof}
Let $G$ be a graph of Euler genus $g>0$.
As before, we may assume that $G$ is connected, since we can color each connected component independently.
By Theorem~\ref{thm:deg}, $G$ has a connected subgraph $H$ such that $G/H$ is planar and every vertex of $G$ has at most $9g-4$ neighbors in $V(H)$.
Cowen, Cowen, and Woodall~\cite{CCW86} proved that for every planar graph $G'$ and any specified vertex $v'$ in $G'$, the graph $G'$ has a $(2,2,2)$-coloring in which $v'$ has no neighbor of its color.
It follows that $G/H$ has a $(2,2,2)$-coloring in which the vertex $v$ of $G/H$ resulting from the contraction of $H$ has no neighbor of its color;
without loss of generality assume that $v$ has the third color.
We extend this coloring to $G$ by assigning the third color to all vertices of $H$.
Since each vertex of $H$ has at most $9g-4$ neighbors in $V(H)$, and each neighbor of a vertex of $H$ outside $H$ does not have the third color, the obtained coloring is indeed a $(2,2,9g-4)$-coloring of $G$, as desired.
\end{proof}
We now prove that Theorems~\ref{thm:000k} and~\ref{thm:22k} are best
possible, up to the multiplicative constant 9. More precisely, we will
show that $9g-4$ cannot be replaced by a sublinear function of $g$ in
Theorems~\ref{thm:000k} and~\ref{thm:22k}.
Given a graph $H$ and an integer $k$, we construct the graph $S(H,k)$ as
follows. We start with a copy of $H$, which we call the \emph{basic
copy} of $H$.
For each vertex $v$ in the basic copy, we add $k$ new pairwise
disjoint copies of $H$, and add all possible edges between $v$ and
these $k$ copies of $H$.
Consider the graph $G_1=S(K_4,k+1)$, for some integer $k$. Note that the
blocks of $G_1$ consist of one copy
of $K_4$ and $4(k+1)$ copies of $K_5$. Since the Euler genus of a
graph is the sum of the Euler genera of its blocks (see for instance
Theorem 4.4.3 in~\cite{MoTh}), $G_1$ has Euler genus $4(k+1)$.
Assume for the sake of contradiction that $G_1$ has a
$(0,0,0,k)$-coloring, say with colors $1,2,3,4$, where the fourth
color induces a graph with maximum degree at most $k$. Since colors
$1,2,3$ induce stable sets, at least one of the vertices of the basic
copy of $K_4$, call it $v$, is colored 4. Since $v$ has at most $k$
neighbors colored 4, at least one of the copies of $K_4$ joined to $v$
has no vertex colored 4, and hence is properly colored with $1,2,3$, which is a contradiction.
It follows that $G_1$ is a graph of Euler genus $g$
with no $(0,0,0,\tfrac{g}{4}-1)$-coloring (and such a
graph can be constructed for infinitely many values of $g$).
We now consider $G_2=S(K_7,k+1)$, for some integer $k$. The blocks of
the graph
$G_2$ consist of one copy of $K_7$ (of Euler genus 2) and $7(k+1)$
copies of $K_8$ (each of Euler genus 4). Therefore, $G_2$ has Euler
genus $28k+30$. If $G_2$ admits some $(2,2,k)$-coloring with colors $1,2,3$,
where colors $1,2$ induce a graph with maximum degree 2 and color 3
induces a graph with maximum degree $k$, then some vertex $v$ of the
basic copy of $K_7$ in $G_2$ has color $3$. As before, $v$ has to be
joined to a copy of $K_7$ in which all the vertices have color 1 or 2,
which is a contradiction. Therefore, we found, for infinitely many values of $g$, a graph with Euler genus
$g$ with no $(2,2,\lceil \tfrac{g-30}{28}\rceil)$-coloring.
By considering the graph $S(K_n,\ell)$, for large $n$ and $\ell$, it
is not difficult to see that for any $k$, there is a constant
$\epsilon>0$ such that we can construct (for infinitely many values of
$g$) graphs of Euler genus $g$ with no $(k,k,\lceil\epsilon\,
g\rceil)$-coloring.
Note however that if we let the maximum degree of the
second color class be a function of $g$, then the
maximum degree of the third color class can be made sublinear: it can
be derived from the main result of~\cite{Woo11} that every graph of
Euler genus $g$ is $(9,O(\sqrt{g}),O(\sqrt{g}))$-colorable. In the
next subsection, we will prove that every graph of
Euler genus $g$ is $(2,O(\sqrt{g}),O(\sqrt{g}))$-colorable and the
constant 2 there is best possible. It is a folklore result that for any $k$, there exist planar graphs that are not
$(1,k,k)$-colorable. Since we have not been able to find a reference
of this result, we include a construction below for the sake of
completeness. This result implies that Theorems~\ref{thm:000k},
\ref{thm:22k}, \ref{thm:2kk}, and \ref{thm:00kk} cannot be improved by reducing the number of colors,
or the maximum degree of the monochromatic components (except for the
color classes whose degree depends on $g$).
{\bf Construction of planar graphs that are not
$(1,k,k)$-colorable.}
In a $(1, k, k)$-coloring, let $1, k_1,k_2$ be
the three colors where the vertices of color $1, k_1, k_2$ induce a
graph of maximum degree at most $1, k, k$, respectively.
Given a planar graph $G$ and two adjacent vertices $x$ and $y$, by
\emph{thickening} the edge $xy$ we mean adding $2k+1$ pairwise disjoint paths on 3
vertices to $G$, and making all the newly added vertices adjacent to
both $x$ and $y$ (see Figure~\ref{fig-non1kk}, left). Note that this can be done in such way that the
resulting graph $H$ is also planar. We claim that in any $(1, k,
k)$-coloring $c$ of $H$, we do not have
$\{c(x),c(y)\}=\{k_1,k_2\}$. Otherwise, some path on 3 vertices joined
to $x$ and $y$ would not contain colors $k_1$ and $k_2$, and then some
vertex of color 1 would have two neighbors colored 1, a contradiction.
Now, take a cycle $C$ on $3k+1$
vertices, and add a vertex $z$ adjacent to all the vertices of $C$. The
obtained graph $G_z$ is planar. Now, thicken all the edges of $G$
joining $z$ and $C$, and
call the resulting graph $H_z$ (see Figure~\ref{fig-non1kk}, center). We claim that in any $(1, k,k)$-coloring $c$ of $H_z$, $c(z)=1$. Suppose for the sake of
contradiction that $z$ has color $k_1$ or $k_2$ (say $k_1$ by
symmetry). Then at least $2k+1$ of the neighbors of $z$ in $C$ have
color 1 or $k_2$. Since no three consecutive vertices of $C$ have
color 1, at least one vertex $u$ of $C$ has color $k_2$. Since the
edge $uz$ was thickened, this contradicts the previous paragraph.
\begin{figure}
\caption{A construction of a planar graph that is not $(1,k,k)$-colorable.}
\label{fig-non1kk}
\end{figure}
Our construction now proceeds as follows. Start with a triangle $abc$,
and then identify $a$ with the vertex $a$ of some copy of $H_a$, $b$
with the vertex $b$ of some copy of $H_b$ and $c$ with the vertex $c$ in
some copy of $H_c$ (see Figure~\ref{fig-non1kk}, right). Note that in any $(1, k, k)$-coloring of this graph,
at least one of $a,b,c$ has a color distinct from 1. It then follows from
the previous paragraph that this graph is not $(1, k, k)$-colorable.
\subsection{Two parts with large maximum degrees}
\begin{thm}\label{thm:2kk}
Every graph embeddable on a surface of Euler genus $g$ is $(2, K, K)$-colorable where $K=K(g)=38+\sqrt{84g+1682}$.
\end{thm}
\begin{proof}
Assume for the sake of contradiction that there is a graph $G$ embeddable on a surface of Euler genus $g$ that is not $(2,K,K)$-colorable. We choose $g$ minimum, and with respect to this, we choose $G$ such that the sum of the number of vertices and the number of edges is minimum. By the minimality of $g$ we may assume that $G$ is cellularly embedded on a surface of Euler genus $g$ (see~\cite[Propositions 3.4.1 and 3.4.2]{MoTh}) and from now on, we fix this embedding.
By the minimality of $G$, we can also assume that $G$ is connected and has minimum degree at least 3.
A \emph{high} and \emph{low} vertex is a vertex of degree at least $K+3$ and at most $K+2$, respectively.
By Lemma~\ref{lem:vx-degree}, every low vertex is adjacent to at least two high vertices.
By Lemma~\ref{lem:num-high}, $G$ contains at least $K+5$ high vertices.
\begin{claim}\label{cl:8}
No two vertices of degree at most 4 are adjacent.
\end{claim}
\begin{proof}
Assume for the sake of contradiction that two vertices $u,v$ of degree at most 4 are adjacent.
By the minimality of $G$, the graph obtained from $G$ by removing the edge $uv$ has a $(2,K,K)$-coloring $c$.
Let the three colors be $2,k_1,k_2$ so that the maximum degree of the graph induced by $2,k_1,k_2$ is at most $2,K,K$, respectively.
Since $G$ itself is not $(2,K,K)$-colorable, both $u$ and $v$ are colored 2 and at least one of $u,v$, say $u$, has two neighbors (distinct from $v$) that are also colored 2.
As a consequence, either $k_1$ or $k_2$ does not appear in the neighborhood of $u$.
We can therefore recolor $u$ with the missing color to get a $(2,K,K)$-coloring of $G$, a contradiction.
\end{proof}
We will use the discharging procedure laid out in Subsection~\ref{subsec:discharging}.
For a vertex $v$ and a face $f$ of $G$, the initial charge is $d(v)-6$ and $2d(f)-6$, respectively.
The initial charge sum is $6g-12$ by Euler's formula.
Here are the discharging rules:
\begin{enumerate}[(R1)]
\item Each face distributes its initial charge (evenly) to its incident vertices of degree 3.
\item Each high vertex sends charge $\tfrac{13}{14}$ to each low neighbor.
\item For each high vertex $v$ and each sequence of three consecutive neighbors $u_1,u_2,u_3$ of $v$ in clockwise order around $v$ such that $u_2$ is high, $v$ sends charge $\tfrac{13}{28}$ to each of $u_1$ and $u_3$.
\item Every low vertex of degree at least 5 sends charge $\tfrac3{14}$ to each neighbor of degree at most 4.
\end{enumerate}
We now analyze the charge of each vertex and each face after the discharging procedure.
Since every face has degree at least 3, every face has nonnegative initial charge and does not send more that its initial charge by (R1).
Therefore, the final charge of each face is nonnegative.
For a $3$-vertex $v$, let $x,y,z$ be the neighbors of $v$.
By Lemma~\ref{lem:vx-degree} and Claim~\ref{cl:8}, we may assume without loss of generality that $x,y$ are high and $z$ has degree at least 5.
First, assume that the face $f$ incident to the edges $vx$ and $vy$ is a triangle, which implies that $x$ and $y$ are adjacent.
Then $v$ receives charge $\tfrac{13}{14}$ (by (R2)) and $\tfrac{13}{28}$ (by (R3)) from $x$, and the same amount from $y$. Note that $v$ also receives $\tfrac3{14}$ from $z$ by (R4).
As a consequence, the final charge of $v$ is at least $-3 +2\cdot \tfrac{13}{14}+2 \cdot \tfrac{13}{28}+\tfrac3{14}=0$.
Now assume that the face $f$ has degree $d\geqslantslant 4$.
Then, $v$ receives charge $\tfrac{13}{14}$ from each of $x$ and $y$ by (R2), and $\tfrac3{14}$ from $z$ by (R4). But since $f$ contains at least two high vertices, it also sends charge at least $\tfrac{2d-6}{d-2}\geqslantslant 1$ to $v$ (since $d\geqslantslant 4$).
As a consequence, the final charge of $v$ is at least $-3 +2\cdot \tfrac{13}{14}+\tfrac3{14}+1=\tfrac1{14}$.
Let $v$ be a vertex of degree 4. Since $v$ has at least two high neighbors, and all the neighbors of $v$ have degree at least 5, $v$ receives charge at least $2\cdot \tfrac{13}{14}$ by (R2) and $2\cdot \tfrac{3}{14}$ by (R4). The final charge of $v$ is therefore at least $-2+2\cdot \tfrac{13}{14}+2\cdot \tfrac{3}{14}=\tfrac27$.
Let $v$ be a low vertex of degree $d\geqslantslant 5$.
Then $v$ receives charge at least $2\cdot \tfrac{13}{14}$ by (R2) since it has at least two high neighbors, and sends charge at most $(d-2)\tfrac3{14}$ by (R4). Therefore, the final charge of $v$ is at least $d-6+2\cdot \tfrac{13}{14}-(d-2)\tfrac3{14}\geqslantslant \tfrac{3}{14}$.
Finally, let $v$ be a high vertex of degree $d$ (recall by definition, $d\geqslantslant K+3$).
Then $v$ sends charge at most $\tfrac{13}{14}d$ by (R2) and (R3) and its final charge is at least $d-6-\tfrac{13}{14}d=\tfrac{d}{14}-6\geqslantslant \tfrac{K-81}{14}$.
Since there are at least $K+5$ high vertices, the total final charge (which equals $6g-12$) is at least $(K+5)\tfrac{K-81}{14}$. We obtain $K^2-76K-84g-237\leqslantslant 0$, and this contradicts our choice of $K$ since $K$ satisfies $K^2-76K-84g-237=1$.
\end{proof}
We now prove that the order of magnitude of $K(g)$ in Theorem~\ref{thm:2kk} is best possible. For a given $k\geqslantslant 0$, we construct the following graph $G_k$. Start with a copy of $K_4$ (which we call the {\it basic copy} of $K_4$), together with $k+1$ other disjoint copies of $K_4$, and add all possible edges between the vertices of the basic copy of $K_4$ and the vertices of the other copies of $K_4$ (but no edge between two non-basic copies of $K_4$).
These edges are called the {\it support edges} of the construction.
Now, for each support edge $uv$, create $2k+1$ new disjoint copies of $K_4$ and join $u$ and $v$ to all the newly created vertices.
Note that the resulting graph $G_k$ has $128k^2+196k+72$ vertices and
$448k^2+694k+252$ edges. It follows from Euler's Formula (and the fact
that any connected graph has a minimum Euler genus embedding that is
cellular) that any connected
graph on $n$ vertices and $m$ edges has Euler genus at most $m-n+1$.
In particular, the graph $G_k$ has Euler genus at most $320k^2+498k+181$.
Consider any $(2,k,k)$-coloring of $G_k$. We adopt the same convention as in the previous proof (the colors are named $2,k_1,k_2$).
Then at least one of the 4 vertices of the basic copy of $K_4$, call it $u$, is colored $k_1$ or $k_2$, say $k_1$.
Since $u$ is adjacent to all the vertices in the $k+1$ non-basic copies of $K_4$, at least one of them contains a vertex $v$ of color $k_2$.
At most $k$ of the $2k+1$ copies of $K_4$ joined to both $u,v$ contain a vertex colored $k_1$, and at most $k$ of them contain color $k_2$.
Therefore, at least one copy of $K_4$ contains vertices only colored with 2, which is a contradiction.
It follows that $G_k$ is not $(2,k,k)$-colorable.
Consequently, there is a constant $c>0$ and infinitely many values of $g$, for which we can construct a graph embeddable on a surface of Euler genus $g$, with no $(2,\lfloor c\sqrt{g}\rfloor,\lfloor c\sqrt{g}\rfloor)$-coloring.
Note that the same analysis shows that $G_k$ is not $(0,0,k,k)$-colorable.
We can even replace each copy of $K_4$ by a triangle, and this property remains true.
Therefore this graph also shows that we can construct, for infinitely many values of $g$, a graph embeddable on a surface of Euler genus $g$, with no $(0,0,\lfloor c\sqrt{g}\rfloor,\lfloor c\sqrt{g}\rfloor)$-coloring. The next result shows that this is also asymptotically best possible.
\begin{thm}\label{thm:00kk}
Every graph embeddable on a surface of Euler genus $g$ is $(0,0,K,K)$-colorable, with $K=K(g)=20+\sqrt{48g+481}$.
\end{thm}
\begin{proof}
Assume for the sake of contradiction that there is a graph $G$ embeddable on a surface of Euler genus $g$ that is not $(0,0,K,K)$-colorable. We choose $g$ minimum, and with respect to this, we choose $G$ such that the sum of the number of vertices is minimum. By the minimality of $g$ we may assume that $G$ is cellularly embedded on a surface of Euler genus $g$ (see~\cite[Propositions 3.4.1 and 3.4.2]{MoTh}) and from now on, we fix this embedding.
Moreover, we can assume that $G$ is edge-maximal with respect to this embedding (and such that $G$ is simple), since if a supergraph of $G$ can be $(0,0,K,K)$-colored, then $G$ can also be $(0,0,K,K)$-colored. In particular, it follows that for every vertex $v$, there is a circular ordering on the neighbors of $v$ such that any two consecutive vertices in this ordering are adjacent (note that $G$ does not necessarily triangulate the surface it is embedded in).
By the minimality of $G$, we can also assume that $G$ is connected and has minimum degree at least 4.
A \emph{high} and \emph{low} vertex is a vertex of degree at least $K+4$ and at most $K+3$, respectively.
By Lemma~\ref{lem:vx-degree}, every low vertex is adjacent to at least two high vertices.
By Lemma~\ref{lem:num-high}, $G$ contains at least $K+4$ high vertices.
\begin{claim}\label{cl:4tri}
Let $v$ be a $4$-vertex with neighbors $u_1,u_2,u_3,u_4$.
If $vu_1u_2$, $vu_2u_3$, $vu_3u_4$, and $vu_4u_1$ are triangular faces, then $u_1u_3$ and $u_2u_4$ are edges in $G$.
\end{claim}
\begin{proof}
Without loss of generality, assume that $u_1$ and $u_3$ are not adjacent.
Remove $v$ and identify $u_1$ and $u_3$ into a single vertex.
Note that this can be done in such a way that the resulting graph is still embeddable on the same surface.
By the minimality of $G$, the resulting graph is $(0,0,K,K)$-colorable and any $(0,0,K,K)$-coloring can easily be extended to $v$ since only three colors appear in its neighborhood, a contradiction.
\end{proof}
We will use the discharging procedure laid out in Subsection~\ref{subsec:discharging}.
For a vertex $v$ and a face $f$ of $G$, the initial charge is $d(v)-6$ and $2d(f)-6$, respectively.
The initial charge sum is $6g-12$ by Euler's formula.
Here are the discharging rules:
\begin{enumerate}[(R1)]
\item Each face of degree at least 4 sends charge $\tfrac14$ to each incident vertex of degree 4.
\item Each high vertex sends charge $\tfrac78$ to each low neighbor.
\item For each high vertex $v$ and each sequence of three consecutive neighbors $u_1,u_2,u_3$ of $v$ in clockwise order around $v$ such that $u_2$ is high, $v$ sends charge $\tfrac7{16}$ to each of $u_1$ and $u_3$.
\item Every low vertex of degree at least 5 sends charge $\tfrac14$ to each neighbor of degree 4.
\end{enumerate}
We now analyze the charge of each vertex and each face after the discharging procedure.
Every face of degree 3 has initial charge $0$, and since it is not involved in any discharging rules, the final charge is also $0$.
Every face $f$ of degree $d\geqslantslant 4$ starts with charge $2d-6$ and sends at most $\tfrac{d}{4}$ by (R1). The final charge of $f$ is therefore at least $2d-6-\tfrac{d}{4}\geqslantslant 1$.
Let $v$ be a vertex of degree 4. Then $v$ receives charge $\tfrac78$ from each of its (at least) two high neighbors by (R2).
If $v$ either has another neighbor of degree at least 5 or is incident to a face of degree at least 4, then $v$ receives an additional charge of $\tfrac14$ and its final charge is therefore at least $-2+\tfrac78+\tfrac78+\tfrac14=0$.
Otherwise, we can assume that $v$ is adjacent to precisely two high vertices $u_1,u_3$ and two vertices $u_2, u_4$ of degree 4, and is incident to four triangular faces.
Note also that if $vu_1u_3$ is a face of $G$, then by rule (R3) $v$ receives an additional charge of $\tfrac{7}{16}$ and therefore its final charge is $-2+\tfrac78+\tfrac78+\tfrac7{16}\geqslantslant \tfrac3{16}$.
As a consequence, we can assume without loss of generality that the faces incident with $v$ are $vu_1u_2$, $vu_2u_3$, $vu_3u_4$, and $vu_4u_1$.
It follows from Claim~\ref{cl:4tri} that $u_1$ and $u_3$ are adjacent and $u_2$ and $u_4$ are adjacent.
Recall that the embedding of $G$ is edge-maximal, and thus there is an ordering of the neighbors of $u_1$ such that any two consecutive vertices in the ordering are adjacent.
Since $u_1$ has more than 4 neighbors, it follows that at least one of $v,u_2,u_4$ is adjacent to a vertex not in $\{v,u_1,u_2,u_3,u_4\}$, a contradiction.
Let $v$ be a low vertex of degree $d\geqslantslant 5$. Then $v$ receives charge $\tfrac78$ from each of its (at least) two high neighbors by (R2), and sends at most $(d-2)\tfrac14$ by (R4).
Its final charge is therefore at least $d-6+{7\over 8}+{7\over 8}-(d-2)\tfrac14\geqslantslant 0$.
Finally, let $v$ be a high vertex. Then $v$ sends charge at most $\tfrac78d$ by (R2) and (R3), so its new charge is at least $d-6-\tfrac78d=\tfrac{d}8-6\geqslantslant \tfrac{K-44}8$.
We proved that each vertex and face has nonnegative charge, and each high vertex has charge at least $\tfrac{K-44}8$.
Since there are at least $K+4$ high vertices, the total charge (which equals $6g-12$) is at least $(K+4)\tfrac{K-44}8$. We obtain $K^2-40K-48g-80\leqslantslant 0$, and this contradicts our choice of $K$ since $K$ satisfies $K^2-40K-48g-80=1$.
\end{proof}
\section{Triangle-free graphs on surfaces}
\begin{thm}\label{tri-free}
Every triangle-free graph embeddable on a surface of Euler genus $g$ is $(0, 0, K)$-colorable where $K=K(g)=\lceil{10g+32\over 3}\rceil$.
\end{thm}
\begin{proof}
Assume for the sake of contradiction that there is a triangle-free graph $G$ embeddable on a surface of Euler genus $g$ that is not $(0,0,K)$-colorable.
We choose $g$ minimum, and with respect to this, we choose $G$ such that the sum of the number of vertices and the number of edges is minimum.
By the minimality of $g$ we may assume that $G$ is cellularly embedded on a surface of Euler genus $g$ (see~\cite[Propositions 3.4.1 and 3.4.2]{MoTh}) and from now on, we fix this embedding.
By the minimality of $G$, we can also assume that $G$ is connected and has minimum degree at least 3.
A \emph{high} and \emph{low} vertex is a vertex of degree at least $K+3$ and at most $K+2$, respectively.
A $4^+$-vertex that is not high is a {\it medium} vertex.
By Lemma~\ref{lem:vx-degree}, every low vertex has at least one high neighbor.
We will also assume that for a (partial) $(0, 0, K)$-coloring $\varphi$ of $G$, the three colors will be $a, b, k$ and the graph induced by the color $a$, $b$, $k$ has maximum degree at most $0, 0, K$, respectively.
\begin{claim}\label{vx-3}
Every $3$-vertex in $G$ that is adjacent to at least two $3$-vertices is incident to a $5^+$-face.
\end{claim}
\begin{proof}
Assume for the sake of contradiction that there is a $3$-vertex $v$ that is adjacent to two $3$-vertices and is incident to only $4$-faces.
Note that $v$ cannot be adjacent to three $3$-vertices since it must be adjacent to a high vertex by Lemma~\ref{lem:vx-degree}.
Let $u_1, u_2, u_3$ be the neighbors of $v$ where $u_2$ and $u_3$ are $3$-vertices.
Also, for $i\in\{1, 2, 3\}$, let $w_i$ be the neighbor of $u_i$ so that $v, u_i, w_i, u_{i+1}$ are the vertices incident with a $4$-face in this order (where $u_4=u_1$).
See Figure~\ref{fig-bad3vx} (where the white vertices do not have incident edges besides the ones drawn, and the black vertices may have other incident edges).
It is easy to check that $v, u_1, u_2, u_3, w_1, w_2, w_3$ must be all
distinct vertices, since $v, u_2, u_3$ are $3$-vertices, $u_1$ has
degree at least 3, $G$ has no $3$-cycles, and all faces incident
to $v$ are 4-faces.
Also, $u_2$ and $w_3$ have no common neighbors, since that would create a $3$-cycle.
Since $u_2$ and $w_3$ have no common neighbor, removing $v$ and adding the edge $u_2w_3$ results in a smaller graph $H$ that has no $3$-cycles and is embeddable on the same surface.
Thus, $H$ has a $(0, 0, K)$-coloring $\varphi$.
We will extend this coloring of $H$ to $G$ to obtain a contradiction.
If $\{\varphi(u_1), \varphi(u_2), \varphi(u_3)\}\ne \{a,b,k\}$, then we can use the missing color on $v$ to extend the coloring.
Moreover, it must be that $\varphi(u_1)=k$, otherwise we could color $v$ with the color $k$ since $K\geqslantslantq 4$.
We know that $\varphi(w_2)=k$ since $\{\varphi(u_2), \varphi(u_3)\}=\{a, b\}$.
Also, $\varphi(w_3)=k$ since $\{\varphi(u_2), \varphi(u_3)\}=\{a, b\}$ and $u_2w_3$, $u_3w_3$ are edges in $H$.
Now we can color $v$ with $\varphi(u_3)$ and recolor $u_3$ with $\varphi(u_2)$.
This is a $(0, 0, K)$-coloring of $G$, which is a contradiction.
\end{proof}
\begin{figure}
\caption{Obtaining $H$ from $G$ in Lemma~\ref{vx-3}
\label{fig-bad3vx}
\end{figure}
By Lemma~\ref{lem:num-high}, $G$ contains at least three high vertices.
By using the fact that the graph is triangle-free, we can guarantee more high vertices.
\begin{claim}\label{num-high}
There are at least six high vertices in $G$.
\end{claim}
\begin{proof}
Let $S$ be the set of high vertices in $G$, and assume for the sake of
contradiction that $|S|\leqslantslant 5$. If $S$ induces a bipartite subgraph of
$G$, we color $S$ properly with colors $a$ and $b$. Otherwise, since
$G$ is triangle-free, it follows that $S$ induces a 5-cycle $C$. In this
case, we color the vertices of $C$ using colors $a,b,a,b,k$, in this order, and call
$v$ the unique vertex of $C$ colored $k$. Let $N$ be the set of
neighbors of $v$ not in $C$. Since $G$ is triangle-free, $N$ is a
stable set and each vertex $u\in N$ has
at most one neighbor in $C$ distinct from $v$. It follows that the
coloring of $C$ can be properly extended to $N$ by assigning colors
$a$ and $b$ only.
We now complete the coloring of $G$ greedily (by considering the
uncolored vertices in an arbitrary order) as follows: if $w$ has no
neighbor colored $a$ or $b$, then assign the free color to
$w$. Otherwise, assign color $k$ to $w$. Note that each vertex that
has been colored $k$ during the greedy coloring has degree at most $K+2$, and at least one
neighbor colored $a$ and one neighbor colored $b$. Therefore, it has
at most $K$ neighbors colored $k$. This shows that $G$ is
$(0,0,K)$-colorable, which is a contradiction.
\end{proof}
We will use the discharging procedure laid out in Subsection~\ref{subsec:discharging}.
For a vertex $v$ and a face $f$ of $G$, the initial charge is $d(v)-4$ and $d(f)-4$, respectively.
The initial charge sum is $4g-8$ by Euler's formula.
Here are the discharging rules:
\begin{enumerate}[(R1)]
\item Each high vertex sends charge $4\over 5$ to each adjacent vertex.
\item Each medium vertex sends charge ${1\over 5}$ to each adjacent $3$-vertex.
\item Each $5^+$-face sends charge $1\over 5$ to each incident $3$-vertex.
\end{enumerate}
The discharging rules (R1) and (R2) indicate how the vertices send their charge to adjacent vertices.
Rule (R3) is the only rule where a face is involved.
We now analyze the charge of each vertex and each face after the discharging procedure.
Let $f$ be a face.
Since $G$ has no $3$-cycles, the length of $f$ is at least $4$.
If $f$ is a $4$-face, then no rule applies to $f$, thus $\mu^*(f)=\mu(f)=d(f)-4=0$.
If $f$ is a $5^+$-face, then (R3) is the only rule that applies to $f$, and therefore $\mu^*(f)\geqslantslantq d(f)-4-{d(f)\over 5}={4d(f)\over 5}-4\geqslantslantq 0$.
If $v$ is a $3$-vertex, then $\mu(v)=d(v)-4=-1$.
Also, $v$ receives charge ${4\over 5}$ by (R1), since $v$ is adjacent to a high vertex by Lemma~\ref{lem:vx-degree},
By Lemma~\ref{vx-3}, $v$ is either adjacent to at most one $3$-vertex or incident to a $5^+$-face.
If $v$ is adjacent to at most one $3$-vertex, then $v$ receives either an additional charge of ${4\over 5}$ by (R1) or charge ${1\over 5}$ by (R2).
Thus, $\mu^*(v)\geqslantslantq -1+{4\over 5}+{1\over 5}=0$.
If $v$ is incident to a $5^+$-face, then $v$ receives charge ${1\over 5}$ by (R3).
Thus, $\mu^*(v)\geqslantslantq -1+{4\over 5}+{1\over 5}=0$.
If $v$ is a medium vertex, then $v$ receives charge ${4\over 5}$ by (R1) since $v$ is adjacent to a high vertex by Lemma~\ref{lem:vx-degree}.
Also, by (R2), $v$ sends charge ${1\over 5}$ to each adjacent $3$-vertex.
Thus, $\mu^*(v)\geqslantslantq d(v)-4+{4\over 5}-{d(v)-1\over 5}={4d(v)-15\over 5}> 0$.
If $v$ is a high vertex, then it sends charge ${4\over 5}$ to each neighbor.
Thus,
$\mu^*(v)
=d(v)-4-{4d(v)\over 5}
={d(v)\over 5}-4
\geqslantslantq {K(g)+3\over 5}-4
={\lceil{10g+32\over 3}\rceil+3\over 5}-4
\geqslantslantq {{10g+32\over 3}+3\over 5}-4
={10g-19\over 15}
>{2g-4\over 3}$.
Thus, each high vertex $v$ has final charge greater than $2g-4\over 3$.
According to Claim~\ref{num-high}, there are at least six high vertices.
Since each high vertex has final charge greater than ${4g-8\over 6}$ and every other vertex and face has nonnegative final charge, the sum of the final charge is greater than $4g-8$.
This is a contradiction since the initial charge sum was $4g-8$.
Therefore, a counterexample to Theorem~\ref{tri-free} does not exist.
\end{proof}
\subsection{Tightness example}\label{subsection-tight}
In this subsection, we will show that the growth rate of $K(g)$ in Theorem~\ref{tri-free} is tight by constructing, for some constant $\epsilon>0$ and infinitely many values of $g$, a triangle-free graph that is embeddable on a surface of Euler genus $g$ but is not $(0, 0, \lceil\epsilon g\rceil)$-colorable.
We will actually do better and construct a graph with girth $6$ that
is not $(0, 0, \lceil\epsilon g\rceil)$-colorable. Our construction is inspired by a
classical construction of Blanche Descartes~\cite{Des54}.
Given a set $S$ of seven vertices in a graph that are pairwise distance at least $3$ apart from each other, let ``adding $C_7$ to $S$'' mean that you add a disjoint copy of $C_7$ and add a perfect matching between the seven new vertices and vertices in $S$.
Note that this operation does not create $3$-, $4$-, or $5$-cycles.
Now, construct $H_k$ by starting with seven disjoint copies $D_1, \ldots, D_7$ of $C_7$.
For every set $\{v_1, \ldots, v_7\}$ of seven vertices where $v_i\in D_i$ for $i\in\{1, \ldots, 7\}$, do the operation of ``adding $C_7$ to $\{v_1, \ldots, v_7\}$'' $7k+1$ times.
For every $(0, 0, k)$-coloring of $H_k$, there is a vertex $u_i$ colored with the third color in each $D_i$ for $i\in \{1, \ldots, 7\}$, since a $7$-cycle cannot be properly colored with two colors.
Now consider the $7k+1$ copies of $C_7$ added to $\{u_1, \ldots, u_7\}$.
Since each vertex $u_i$ is adjacent to at most $k$ vertices of the third color, there must exist a copy of $C_7$ where none of the vertices are colored with the third color, a contradiction.
Hence, $H_k$ is not $(0, 0, k)$-colorable.
Note that $H_k$ has $7^8 (7k+1)+49$ vertices and $2\cdot 7^8 (7k+1)+49$ edges, and therefore $H_k$ has Euler genus at most $7^8 (7k+1)+1$.
Hence, $H_k$ is a graph with girth $6$ that is embeddable on a surface of Euler genus at most $7^8 (7k+1)+1$ and is not $(0, 0, k)$-colorable. It follows that there is a constant $\epsilon>0$ and infinitely many values of $g$, for which we can construct a graph of girth 6 that is embeddable on a surface of Euler genus $g$ but is not $(0, 0, \lceil\epsilon g\rceil)$-colorable.
\section{Graphs of girth at least 7 on surfaces}
\begin{thm}\label{thm:g7}
Every graph of girth at least 7 embeddable on a surface of Euler genus $g$ is $(0, K)$-colorable where $K=K(g)=5+\lceil\sqrt{14g+22}\rceil$.
\end{thm}
\begin{proof}
Assume for the sake of contradiction that there is a graph $G$ with girth at least $7$ embeddable on a surface of Euler genus $g$ that is not $(0,K)$-colorable. We choose $g$ minimum, and with respect to this, we choose $G$ such that the sum of the number of vertices and the number of edges is minimum. By the minimality of $g$ we may assume that $G$ is cellularly embedded on a surface of Euler genus $g$ (see~\cite[Propositions 3.4.1 and 3.4.2]{MoTh}) and from now on, we fix this embedding.
By the minimality of $G$, we can also assume that $G$ is connected and has minimum degree at least 2.
A \emph{high} and \emph{low} vertex is a vertex of degree at least $K+2$ and at most $K+1$, respectively.
By Lemma~\ref{lem:vx-degree}, every non-high vertex is adjacent to at least one high vertex.
By Lemma~\ref{lem:num-high}, $G$ contains at least two high vertices.
By using the fact that the graph has girth at least $7$, we can guarantee more high vertices.
We will also assume that for a (partial) $(0, K)$-coloring $\varphi$ of $G$, the two colors will be $0$ and $k$, and the graph induced by the color 0 and $k$ has maximum degree at most $0$ and at most $K$, respectively.
\begin{claim}
There are at least $K+2$ high vertices.
\end{claim}
\begin{proof}
Assume for the sake of contradiction that the set $H$ of high vertices has size at most $K+1$.
First color all the vertices of $H$ with the color $k$.
Let $M$ be the set of vertices not in $H$ that have at least one neighbor in $H$, and let $S$ be a maximum independent set in $M$.
Now color all vertices of $S$ with the color $0$ and color all vertices of $M-S$ with the color $k$.
For the remaining vertices, we proceed by a greedy algorithm: if a vertex $v$ has a neighbor colored $0$, then use color $k$ on $v$, otherwise, use color $0$ on $v$.
We now show that this coloring is indeed a $(0, K)$-coloring of $G$.
For a vertex $v$ in $H$, the neighbors of $v$ that are colored with $k$ are partitioned into two sets $T_1$ and $T_2$ where $T_1\subseteq H$ and $T_2\subseteq M$.
Consider a vertex $u\in T_2$. It follows from the definition of $S$ that $u$ is adjacent to a vertex $u_1$ in $M$ that is colored $0$.
This vertex $u_1$ must have a neighbor $u_2$ in $H$, since $u_1$ is in $M$.
Moreover, since $G$ has girth at least $7$, we know that $u_2\not\in T_1$ and for any two vertices $u,w\in T_2$, we have $u_2=w_2$ if and only if $u=w$.
Therefore the number of neighbors of $v$ that are colored with $k$ is at most $|T_1|+|T_2|\leqslantslantq |H|-1\leqslantslantq K$.
A vertex in $V(G)-H$ that is colored with $k$ must be adjacent to a
vertex of color $0$, and thus has at most $K$ neighbors colored with
$k$. It is easy to check that no vertex in $V(G)-H$ that is colored
with $0$ has a neighbor colored with $0$.
Hence, we obtain a $(0,K)$-coloring of $G$, which is a contradiction. It follows that there are at least $K+2$ high vertices.
\end{proof}
We will use the discharging procedure laid out in Subsection~\ref{subsec:discharging}.
For a vertex $v$ and a face $f$ of $G$, the initial charge is $5d(v)-14$ and $2d(f)-14$, respectively.
The initial charge sum is $14g-28$ by Euler's formula.
Here is the unique discharging rule:
\begin{enumerate}[(R1)]
\item Every high vertex $v$ sends charge 4 to each of its neighbors
\end{enumerate}
We now analyze the charge of each vertex and each face after the discharging procedure.
Observe that the charge of a face remains the same, and since $G$ has girth at least 7, all faces have nonnegative final charge.
A non-high vertex $v$ starts with initial charge $5d(v)-14\geqslantslant -4$ and receives a charge of 4 from each of its (at least one) high neighbors, and therefore the final charge of $v$ is also nonnegative.
Finally, since a high vertex $v$ sends a charge of 4 to each of its neighbors, its final charge is $5d(v)-14-4d(v)=d(v)-14\geqslantslant K+2-14=K-12$.
Consequently, the total charge $14(g-2)$ is at least $(K+2)(K-12)$.
This is equivalent to $K^2-10K+4-14g\leqslantslantq 0$,
which contradicts the definition of $K$ ($K$ satisfies $K^2-10K+4-14g> 0$).
\end{proof}
\subsection{Tightness example}
We now prove that the bound on $K(g)$ in the statement of
Theorem~\ref{thm:g7} is best possible, up to a multiplicative constant factor.
We construct, for some constant $c>0$ and infinitely many values of $g$, a graph of girth at least 7 embeddable on a surface of Euler genus $g$, with no $(0,K)$-coloring where $K=K(g)=\lfloor c\sqrt{g}\rfloor$.
In a $(0, K)$-coloring, let $0$ and $k$ be the two colors where the vertices of color $0$ and $k$ induce a graph of maximum degree at most $0$ and $K$, respectively.
A \emph{2-star} is obtained from a star by subdividing every edge once. Take a 2-star with $3K+2$ leaves, and for any two leaves $u$ and $v$, add an edge between $u$ and $v$ and then subdivide this edge exactly twice (in other words, replace it by a path on 3 edges).
Let $S_K$ be the resulting graph. Now, take two copies of $S_K$, and
join their centers by an edge (see Figure~\ref{fig-g7non0k} for the
case $K=1$). The resulting graph has $
(3K+2)(6K+6)+2$ vertices and $(3K+2)(9K+7)+1$ edges, and therefore has Euler genus at most $(3K+2)(3K+1)=9K^2+9K+2$.
At least one of the two centers is colored with $k$.
Consider the corresponding copy of $S_K$. At least $2K+2$ of the neighbors of the center (in the copy of $S_K$) are colored with $0$.
The corresponding $2K+2$ leaves of the 2-star are then colored with $k$.
Let $L$ be the set of these leaves, and let $D$ be the sum, over all vertices $v$ of $L$, of the number of neighbors of $v$ colored $k$.
Observe that in each added path on 3 edges, at least one of the newly added vertices is colored with $k$, so each added path on 3 edges between two vertices of $L$ contributes at least 1 to $D$. Since there are $|L|(|L|-1)/2$ such paths, at least one of the vertices of $L$ has at least $(|L|-1)/2$ vertices colored $k$.
If $(|L|-1)/2>K$, then this is a contradiction.
It follows that there is a constant $c>0$ and infinitely many values of $g$, for which we can construct a graph of girth at least 7 embeddable on a surface of Euler genus $g$, with no $(0,\lfloor c\sqrt{g}\rfloor)$-coloring.
\begin{figure}
\caption{A construction of a graph with girth 7 that is not $(0, K)$-colorable.}
\label{fig-g7non0k}
\end{figure}
\section{Open problems}
A natural question is to find a version of Theorem~\ref{thm:g7} for
graphs of arbitrary large girth. A slight variation of the proof of
Theorem~\ref{thm:g7} easily shows that a graph of girth at least
$\ell$ embeddable on a surface of Euler genus $g$ is $(0,
O(\sqrt{g/\ell}))$-colorable, where the hidden constant depends on
neither $g$ nor $\ell$. In an early version of this manuscript, we conjectured the following stronger statement.
\begin{conj}\label{conj}
There is a function $c=o(1)$ such that any graph of girth at least $\ell$ embeddable on a surface of Euler genus $g$ is $(0, O(g^{c(\ell)}))$-colorable.
\end{conj}
Note that a graph that is $(0,k)$-colorable has a proper coloring with
$k+2$ colors (since a graph with maximum degree $k$ has a proper
$(k+1)$-coloring). As a consequence, the following result of Gimbel
and Thomassen~\cite{GT97} gives a lower bound of the order
$\tfrac1{2\ell+2}$ on such a function $c$.
\begin{thm}[\cite{GT97}]
For any $\ell$, there exist a constant $c>0$ such that for arbitrarily small $\epsilon>0$ and sufficiently large $g$, there are graphs of girth at least $\ell$ embeddable on surfaces of Euler genus $g$ that have no proper coloring with less than $c\, g^{\tfrac{1-\epsilon}{2\ell+2}}$ colors.
\end{thm}
It was subsequently observed by Fran\c cois Dross that an argument similar to that
of the proof of Theorem~\ref{thm:g7} shows that if the girth is at
least $6t+1$, then there are $K^t$ vertices of degree at
least $K$; we just have to consider paths of length $3t$ starting from
some vertex $v$. Using a similar computation as in the proof of
Theorem~\ref{thm:g7}, this shows that any graph of girth at least
$\ell$ embeddable on a surface of Euler genus $g$ is $(0,
O(g^{6/(\ell+5)}))$-colorable, which proves Conjecture~\ref{conj}.
\section*{Acknowledgments}
The authors would like to thank Gwena\"el Joret for the interesting
discussions on short non-contractible cycles, Fran\c cois Dross for
allowing us to mention his remark about Conjecture~\ref{conj}, and a
reviewer for the excellent suggestions.
\end{document} |
\begin{document}
\title{Bound entanglement in the Jaynes-Cummings model}
\author{Nicol\'as Quesada}
\address{McLennan Physical Laboratories, Institute for Optical Sciences and Centre for Quantum Information and Quantum Control, University of Toronto, 60 St. George Street, Toronto, Ontario, Canada M5S 1A7}
\ead{nquesada@physics.utoronto.ca}
\author{Anna Sanpera}
\address{ICREA, Instituci\`o Catalana de Recerca i Estudis Avan\c{c}ats, E-08010 Barcelona, Spain.}
\address{Departament de F\'{i}sica, Universitat Aut\`{o}noma de Barcelona, E-08193 Bellaterra, Spain.}
\date{\today}
\begin{abstract}
We study in detail entanglement properties of the Jaynes-Cummings model assuming a two-level atom (qubit) interacting with the first $N$ levels of an electromagnetic field mode (qudit) in a cavity. In the Jaynes-Cummings model, the number operator is the conserved quantity that allows for the exact diagonalization of the Hamiltonian and thus we study states that commute with this conserved quantity and whose structure is preserved under the Jaynes-Cummings dynamics. Contrary to the common belief, we show that there are bound entangled states that satisfy the symmetries imposed by the conservation of the number of excitations when $N>3$. Furthermore we show that \emph{the Jaynes-Cummings interaction can be used to generate bound-entanglement} between the atom and the mode.
\end{abstract}
\maketitle
\section{Introduction}
The Jaynes-Cummings (JC) model is one of the workhorses of quantum optics research\cite{Jaynes63,Knight93}. In this model the interaction of a two level atom with a single electromagnetic field (EMF) mode is studied under the dipole and rotating wave approximations. Its rich dynamics has been used to model and understand phenomenology both in the realm of cavity\cite{Rempe87,Brune96,Haroche01} and circuit\cite{Blais04} QED and in trapped ions\cite{Wineland96,Onofrio97}, where the EMF is replaced by the quantized vibrations of the ions. There is also a significant amount of literature devoted to entanglement in the JC model. While some of the previous results concerned pure states\cite{Knight92,Liu07,Zhang10}, mixed state entanglement has been addressed using either entropic relations\cite{Tannor05,Guo11,Cummings99,Lendi98,Obada04} or detection techniques drawn directly from the field of quantum information. In particular, projecting the Fock space of the EMF mode onto a two dimensional subspace results into a combined Hilbert space that essentially reduces to a two-qubit system whose entanglement properties are well understood\cite{Rendell03,Cai05,Kayhan11,Masanori99,Bose01,Vera09}. Also, the partial transpose criterion (PPT) introduced by Peres \cite{Peres96} has been used to this aim ~\cite{Struntz05,Plenio03,Farsi07,Quesada11,Suarez12}. We note, however, that neither the use entropic inequalities (which are a corollary of the majorization criterion\cite{Nielsen01}) or the projection of the system onto $2\times2$ subspaces leads to entanglement criteria that are stronger than the PPT one\cite{Hiroshima03,Chen12}. Thus, so far only ``distillable'' (or free) entanglement has been detected in these systems \cite{Horodeckireview}.\\
In this paper we first formalize the problem that most previous studies have analyzed in terms of the conservation of the number of excitations that makes the JC Hamiltonian an exactly solvable problem. We then show that this symmetry gives rise to a super selection rule\cite{Schuch04} that severely constrains the structure of the density matrices describing the states.
With these tools at hand we review entanglement detection techniques previously used and show that they are, in general, weaker than the Peres criterion. Further, we show numerically that other entanglement criteria that are not based on the use of positive maps but not completely positive ones are not able to supplement the Peres criterion. This is the case of the computable cross-norm or realignment (CCNR) criterion\cite{Rudolph03,Guhne09} and some covariance matrices (CM) corollaries\cite{Zhang08,Oleg08}. We construct explicitly states that are PPT but entangled demonstrating that there exist bound entangled states in the JC model that are not detected by any of the previous methods and that some of these states can be generated by the JC interaction. We emphasize that to the best of our knowledge, this is the first study that addresses the possibility of generating bound entanglement using the JC dynamics which is a commonly occurring interaction in different physical systems. We note however that several theoretical\cite{geza07,patane07,ferraro08} and experimental\cite{Amselem09,lavoie10,barreiro10,di11} investigations have been performed seeking bound entanglement in other physical systems.
This paper is organized as follows: In section \ref{sec:moti}, we introduce and define the class of states to be studied here and argue that those are the natural mixed states resulting from the dynamics of the JC model. Further, we introduce the notation that will be used in the rest of the paper in terms of qubit-qudit ($2\times N$) density operators. In section \ref{sec:dete}, we derive under which conditions these states are PPT, \emph{i.e.} have a positive partial transpose.
In section \ref{sec:bound}, we focus at $N =4$ and show that despite the high symmetry of the states considered bound entangled states exist. Following some ideas presented in \cite{kraus00,tura12}, we demonstrated the existence of bound entanglement by using the range criterion \cite{Horo97}.
Therefore, under the JC dynamics, the PPT criterion is necessary but not sufficient to ensure entanglement. In section \ref{sec:dyn} we show how using the JC interaction it is possible to generate bound entangled states starting from uncorrelated ones. Final remarks are given in section \ref{sec:conc} .
For completeness, in \ref{app:crit} we present two well known entanglement criteria that have been used to detect bound entanglement and numerically show that they are ineffective for the states considered here and in \ref{app:hull} we construct the convex hull for the PPT separable states when $N=2,3$.
\section{Motivation and Definitions}\label{sec:moti}
Assume a cavity with a finite quality factor where a $N-1$ photon state, $\ket{N-1}$, has been prepared. After such initial preparation an atom in its ground state $\ket{{\mathbf 0}}$ enters the cavity and starts to interact with the photons. Typically, three processes will occur in the dynamics of the system:
\begin{itemize}
\item The atom and the cavity will reversibly exchange one excitation which under the assumptions of the JC model will causes the transition $\ket{{\mathbf 0}}\otimes\ket{N-1} \Leftrightarrow \ket{{\mathbf 1}}\otimes \ket{N-2}$ with $\ket{{\mathbf 1}}$ being the excited state of the atom.
\item The cavity might irreversibly lose one of its photons since it has a finite quality factor.
\item Once the atom is excited it can spontaneously and irreversibly emit a photon that escapes the cavity.
\end{itemize}
As time goes by, the dynamics of the system is in general quite complicated and to treat it completely it is necessary to include the modes outside the cavity that gives rise to an irreversible behavior. Nevertheless, under very general assumptions such as that the interaction with the outside modes preserves the number of photons (\emph{i.e.} that each photon that disappears from the cavity becomes a photon in one of the outside modes) the state of the system commutes with the number of excitations of the atom plus cavity. Including the first $N$ Fock states in the dynamics one can write the number of excitations operator as:
\begin{eqnarray}\label{def}
\Pi&=&\Pi_2 \otimes \mathbb{I}+ \mathbb{I} \otimes \Pi_N,\\
\Pi_2&=&\sum_{{\mathbf i} = {\mathbf 0}}^{\mathbf 1} {\mathbf i} \ket{{\mathbf i}}\bra{{\mathbf i}}=\ket{{\mathbf 1}}\bra{{\mathbf 1}},\nonumber \quad
\Pi_N=\sum_{n=0}^{N-1}n\ket{n}\bra{n}.\nonumber
\end{eqnarray}
At any time the state of the atom and photons $\rho$ satisfies:
\begin{eqnarray}\label{comm}
\left[\rho, \Pi \right]=0.
\end{eqnarray}
The above symmetry gives rise directly to the following super-selection rule\cite{Schuch04}:
\begin{eqnarray}\label{super}
\bra{{\mathbf i} n} \rho \ket{\mathbf{j} m} \propto \delta_{{\mathbf i}+n,\mathbf{j}+m},
\end{eqnarray}
where as usual $\ket{{\mathbf i} n} \equiv \ket{{\mathbf i}}\otimes\ket{n}$ and $\ket{{\mathbf i}},\ket{\mathbf{j}}$ are two atomic states and $\ket{m},\ket{n}$ are Fock states with $m$ and $n$ photons respectively.
It can be easily shown that if at $t=0$ the state $\rho(t=0)$ satisfies (\ref{comm}) and if it evolves under the von Neumann equation with the JC Hamiltonian,
\begin{eqnarray}\label{neu}
\frac{d}{dt} \rho=i\left[\rho, H_{JC}\right],
\end{eqnarray}
then at later times it also satisfies (\ref{comm}). In (\ref{neu}) the JC Hamiltonian is given by:
\begin{eqnarray}\label{HJC}
H_{JC}&=&\omega_0 \mathbb{I} \otimes a^\dagger a+\left(\omega_0-\Delta\right) \sigma^\dagger \sigma \otimes \mathbb{I}-i g\left(\sigma \otimes a^\dagger-\sigma^\dagger \otimes a \right),
\end{eqnarray}
where $a, (a^\dagger)$ is a annihilation (creation) bosonic operator satisfying $[a,a^\dagger ]=\mathbb{I}$, $\sigma=\ket{{\mathbf 0}}\bra{{\mathbf 1}}$, $g$ is the light-matter coupling constant (the Rabi frequency), $\omega_0$ is the frequency of the mode and $\Delta$ is the detuning between the mode and the transition frequency between the two atomic states.
We note that the JC Hamiltonian also commutes with the number operator when all the states in the Fock ladder are included (\emph{i.e.} $N \to \infty$ in (\ref{def})) and thus it is reasonable to think of the states $\rho$ in (\ref{comm}) as the natural states of the JC dynamics. Even more interesting is the fact that when some phenomenological dissipation terms are added to the RHS of (\ref{neu}) the dynamics still preserves the structure of the density matrix describing the state. This is the case if Lindblad terms of the form:
\begin{eqnarray}
\mathcal{L}_{\sqrt{{\mathbf 0}amma_O}O}\{\rho\}={\mathbf 0}amma_O\left(O \rho O^\dagger -\frac{\rho O^\dagger O+O^\dagger O \rho}{2} \right),
\end{eqnarray}
with $O= \mathbb{I} \otimes a$ for photon depopulation, $O=\mathbb{I} \otimes a^\dagger$ for photon re-population, $O=\mathbb{I} \otimes a^\dagger a$ for photon dephasing, $O=\sigma \otimes \mathbb{I}$ for atom depopulation, $O=\sigma^\dagger \otimes \mathbb{I} $ for atom re-population, $O=\sigma^\dagger \sigma \otimes \mathbb{I} $ for atomic dephasing are added.
Even if a microscopic treatment is used and a rigorous derivation of the system operator dynamics is performed by first coupling it to a bosonic reservoir and then tracing it out, if the dynamics of the system plus reservoir as a whole preserves the overall number of excitations the shape of the states satisfying (\ref{comm}) will be preserved \cite{Scala07}.\\
For future convenience we name the non-zero elements of the density matrix that satisfy the super-selection rule (\ref{super}) as follows. The two sets of $N$ nonzero elements along the diagonal (populations) we label:
\begin{eqnarray}\label{not1}
a_n \equiv \bra{{\mathbf 0} n }\rho \ket{{\mathbf 0} n} \quad b_n \equiv \bra{{\mathbf 1} n}\rho \ket{{\mathbf 1} n},
\end{eqnarray}
and the two sets of $N-1$ non-zero off-diagonal elements or coherences we parametrize as:
\begin{eqnarray}\label{not2}
c_n \equiv \bra{{\mathbf 0} n} \rho \ket{{\mathbf 1} n-1} \quad c_n^\star \equiv \bra{{\mathbf 1} n-1}\rho \ket{{\mathbf 0} n}.
\end{eqnarray}
In the above equation it is assumed that $\rho$ is a state and thus it is automatically Hermitian. The state is explicitly given by:
\begin{eqnarray}\label{state}
\rho&=&\sum_{n=0}^{N-1} \left(a_n\ket{{\mathbf 0} n}\bra{{\mathbf 0} n}+b_n\ket{{\mathbf 1} n}\bra{{\mathbf 1} n}\right) \\
&&+ \sum_{n=1}^{N-1} \left(c_n^\star \ket{{\mathbf 1} n-1}\bra{{\mathbf 0} n}+c_n\ket{{\mathbf 0} n}\bra{{\mathbf 1} n-1}\right). \nonumber
\end{eqnarray}
It can be represented as a $2\times 2$ matrix in the atom-field basis whose entries are $N\times N$ sparse matrices:
\begin{eqnarray}
\rho=\left(
\begin{array}{c|c}
A & C \\
\hline
C^{\dagger } & B \\
\end{array}
\right),
\end{eqnarray}
more explicitly in the basis given by the tensor product of $\left\{\ket{{\mathbf i}} \right\}_{{\mathbf i}={\mathbf 0}}^{{\mathbf 1}}$ and $\left\{\ket{n} \right\}_{n=0}^{N-1}$ the matrix has the following structure
\begin{eqnarray}\label{bigmat}
\rho = \left(
\begin{array}{cccc|cccc}
a_0 & & & & & & & \\
& \ddots & & & c_1 & & & \\
& & \ddots & & & \ddots & & \\
& & & a_{N-1} & & & c_{N-1} & \\
\hline
& c_1^\star & & & b_0 & & & \\
& & \ddots & & & \ddots & & \\
& & & c_{N-1}^\star & & & \ddots & \\
& & & & & & & b_{N-1}\\
\end{array}
\right).
\end{eqnarray}
In spite of its simplicity, the separability properties of such state are intricate. Notice that although the above matrix was constructed to be explicitly hermitian, to represent a physical state the density operator must also be positive semi-definite, which is equivalent to require that:
\begin{eqnarray}\label{pos}
a_n,b_n {\mathbf 0}eq 0, \quad |c_n|^2 \leq a_n b_{n-1}.
\end{eqnarray}
The positivity conditions (\ref{pos}) guarantees that all the eigenvalues of $\rho$ will be non-negative.
Moreover, unless at least one of the inequalities (\ref{pos}) is saturated $\rho$ is of full rank.
Notice that by applying a local unitary transformation to the density matrix it is possible to make all the coefficients in it non-negative. Such local unitary is given by
\begin{eqnarray}\label{local}
\mathcal{V}=\sum_{n=0}^{N-1} e^{i \theta_n} \ket{n}\bra{n}, \quad \theta_n-\theta_{n-1}=\arg(c_n),
\end{eqnarray}
and since the entanglement properties of the state are invariant under local unitaries it suffices to consider only the absolute values of the $c_n$. In other words, from now on we deal with density operators whose matrix elements are non-negative.
\section{Entanglement detection}\label{sec:dete}
To determine if the state (\ref{bigmat}) is entangled or not \emph{i.e.} whether it can be written as:
\begin{eqnarray}
\rho=\sum_i p_i \ket{\phi_i}\bra{\phi_i} \otimes \ket{{\mathbf 1}i_i}\bra{{\mathbf 1}i_i},
\end{eqnarray}
is not a trivial question since the state acts in a Hilbert space of dimension $2N$ where only sufficient but not necessary separability criteria are known.
Furthermore, one would like to quantify the extent to which the state cannot be written as a separable mixture. One often used metric is the concurrence $\mathcal{C}\left(\rho \right)$~\cite{Wootters98}. For a pure state $\psi=\ket{\psi}\bra{\psi}$ in a $2 \times N$ system the concurrence is simply:
\begin{eqnarray}
\mathcal{C}(\psi )=\sqrt{2\left(1-\tr\left(\mu^2\right)\right)}\\
\mu=\tr_{A/B}(\psi),
\end{eqnarray}
where $\tr_{A/B}$ denotes the partial trace over subsystem $A$ or $B$. For mixed states the concurrence is extended via the convex roof construction:
\begin{eqnarray}
\mathcal{C}(\rho)=\min_{\sum_i p_i \psi_i =\rho} \sum_i p_i \mathcal{C}(\psi_i),
\end{eqnarray}
with $p_i$ being probabilities and the $\psi_i$ being pure states. This quantity ranges from zero (for separable states) to one for maximally entangled ones.\\
A possible approach to answer whether a state is separable or not is to obtain the partial transpose (PT) with respect one of its subsystems (does not matter which one) and see if it has negative eigenvalues\cite{Peres96}: if there is at least one negative eigenvalue then $\rho$ must be entangled.
The PT of $\rho$ with respect to subsystem $B$ reads:
\begin{eqnarray}
\rho^\Gamma=\left(
\begin{array}{c|c}
A & C^T \\
\hline
C^{\star} & B \\
\end{array}
\right).
\end{eqnarray}
We point out that once the local unitary (\ref{local}) has been applied to $\rho$ then $\rho^{T_A}=\rho^{T_B}\equiv \rho^\Gamma$, where $\rho^{T_i}$ is the partial transpose with respect to the $i^{th}$ party.
For $\rho^\Gamma$ to be positive semi-definite one new constraint is needed:
\begin{eqnarray}\label{postrans}
|c_n|^2 \leq a_{n-1} b_n.
\end{eqnarray}
The violation of the constraints above gives a sufficient condition for $\rho$ to be entangled. One reaches the same condition by finding an upper bound to the concurrence of the state projecting the qudit into the subspace spanned by two consecutive Fock states $\ket{n},\ket{n+1}$ \cite{Rendell03}.
Another way to arrive to the constraints given by (\ref{postrans}) is by using the bound on the concurrence for a $2\times N$ dimensional system derived in \cite{gerjuoy03} in which a set of $N(N-1)/2$ quantities are proposed to bound the concurrence of the system. Of these only $N-1$ turn out to be not trivial and they express the same bounds contained in (\ref{postrans}). Using the results from \cite{gerjuoy03} the following bound for the concurrence is found:
\begin{eqnarray}\label{ger}
\mathcal{C}(\rho){\mathbf 0}eq \mathcal{G}(\rho)=\sqrt{\sum_{l=1}^{N-1} \mathcal{G}_l(\rho)^2} \\
\mathcal{G}_l(\rho)=2\max\left\{0,|c_l|-\sqrt{a_{l-1}b_l} \right\}
\end{eqnarray}
It was pointed out in \cite{Chen12} that the above bound cannot have more entanglement detection capabilities than the positive PT criterion, it is interesting to notice that for these states they have \emph{exactly} the same entanglement detection capabilities.
The Negativity\cite{Vidal02} can also be used to quantify the failure of $\rho^\Gamma$ to be positive semi-definite:
\begin{eqnarray}\label{neg}
\mathcal{N}(\rho)&=& -\sum_{n=1}^N \min \left\{0, \lambda^\Gamma_{n-} \right\},\\
\lambda^\Gamma_{n-}&=&a_{n-1}+b_n-\sqrt{(a_{n-1}-b_n)^2+4|c_n|^2}.
\end{eqnarray}
The normalization is such that it takes a value of $1$ when $\rho$ is a maximally entangled pure state $\rho=\ket{\psi}\bra{\psi}$ with $\ket{\psi}=\frac{1}{\sqrt{2}}\left( \ket{{\mathbf 0} n}+\ket{{\mathbf 1} n-1} \right)$. It was pointed out in \cite{Kai05} that the negativity with the normalization used above is another lower bound for the concurrence:
\begin{eqnarray}
\mathcal{C}(\rho) {\mathbf 0}eq \mathcal{N}(\rho).
\end{eqnarray}
When the Negativity (or for these states the bound (\ref{ger})) is zero and $N>3$ the criterion is inconclusive \cite{Horo96}, \emph{i.e.} it is known that in the general case of $2\times N$ there are entangled states not detected by the negativity criterion~\cite{Horo97}. These PPT states were detected using the so-called Range criterion. It states that if $\rho$ is separable then there exist a separable vector $\ket{\mathbf{e}f}$ such that $\ket{\mathbf{e}f} \in R(\rho)$ and $\ket{\mathbf{e}^\star f} \in R(\rho^{T_A})$, with $\ket{\mathbf{e}^\star f}$ indicating complex conjugation in the first subsystem and $R$ is used to indicate the range of an operator. Another way of expressing this criterion is to say that if there is no separable vector $\ket{\mathbf{e}f}$ such that:
\begin{eqnarray}\label{rangecrit}
\braket{k_i|\mathbf{e}f}=0 \ \forall \ket{k_i}\in K(\rho) \quad and \quad \braket{\kappa_i|\mathbf{e}^\star f}=0 \ \forall \ket{\kappa_i}\in K(\rho^\Gamma),
\end{eqnarray}
with $K$ representing the kernel of an operator, then $\rho$ must be entangled. Bound entangled states satisfying (\ref{rangecrit}) are called edge states \cite{Lewenstein00}.
\section{Bound entanglement in the JC model}\label{sec:bound}
In this section we show that there are bound entangled states that satisfy (\ref{comm}) and thus from now on, we only consider states that are PPT. We will assume that the local unitary operation in equation (\ref{local}) has been applied and thus $c_n=c_n^\star {\mathbf 0}eq 0$.
To reduce the complexity of the problem and facilitate the proof it is convenient to apply the following local filtering operation to the state:
\begin{eqnarray}
F &=& \mathbb{I} \otimes F_B\\
F_B&=&\sum_{n=0}^{N-1} \frac{1}{\sqrt{b_n}} \ket{n}\bra{n}. \nonumber
\end{eqnarray}
Notice that if there is some $b_m=0$ then from (\ref{pos}) and (\ref{postrans}) $c_{m+1}=c_m=0$. In such case the density matrix can be split as follows:
\begin{eqnarray}
\rho&=&\rho_{[0:m-1]}+\rho_{[m]}+\rho_{[m+1:N-1]}\\
\rho_{[0:m-1]}&=& \sum_{n=0}^{m-1} \left( a_n \ket{{\mathbf 0} n}\bra{{\mathbf 0} n}+b_n \ket{{\mathbf 1} n}\bra{{\mathbf 1} n} \right)\\
&&+\sum_{n=1}^{m-1} c_n \left(\ket{{\mathbf 1} n-1}\bra{{\mathbf 0} n}+\ket{{\mathbf 0} n}\bra{{\mathbf 1} n-1}\right) \nonumber \\
\rho_{[m]}&=& a_m \ket{{\mathbf 0} m} \bra{{\mathbf 0} m}\\
\rho_{[m+1:N-1]}&=& \sum_{n=m+1}^{N-1} \left( a_n \ket{{\mathbf 0} n}\bra{{\mathbf 0} n}+b_n \ket{{\mathbf 1} n}\bra{{\mathbf 1} n} \right)\\
&&+\sum_{n=m+2}^{N-1} c_n \left(\ket{{\mathbf 1} n-1}\bra{{\mathbf 0} n}+\ket{{\mathbf 0} n}\bra{{\mathbf 1} n-1}\right) \nonumber
\end{eqnarray}
where $\rho_{[0;m-1]}$ is a state supported in $2 \times m-1$, $\rho_{[m+1,N-1]}$ is a state supported in $2\times (N-m)$ and $\rho_{[m]}$ is a separable state. Because of this the problem is reduced to 2 lower rank density matrices with the same structure as the original one plus a trivially separable state. Now, without loss of generality we assume that all the populations of the density matrix are nonzero and after applying the above filtering operation to $\rho$ one obtains:
\begin{eqnarray}\label{filter}
\rho \rightarrow \sigma = F \rho F^\dagger &=& \sum_{n=0}^{N-1}\left( x_n^2 \ket{{\mathbf 0} n}\bra{{\mathbf 0} n}+ \ket{{\mathbf 1} n}\bra{{\mathbf 1} n}\right)\\
&&+ \sum_{n=1}^{N-1} y_n \left(\ket{{\mathbf 1} n-1}\bra{{\mathbf 0} n}+\ket{{\mathbf 0} n}\bra{{\mathbf 1} n-1}\right)\nonumber
\end{eqnarray}
\begin{eqnarray}
x_n^2&=&a_n/b_n
\end{eqnarray}
\begin{eqnarray}\label{yn}
y_n&=&c_n/\sqrt{b_{n-1} b_n}.
\end{eqnarray}
Local filtering operations map entangled states to entangled states and separable states to separable states and thus $\sigma$ is still positive and is PPT. The positivity and PPT conditions equivalent to equations (\ref{pos},\ref{postrans}) are now
\begin{eqnarray}\label{ppt}
y_i \leq \min(x_i,x_{i-1})
\end{eqnarray}
or equivalently\footnote{
We note that there is a slight abuse of notation in writing (\ref{ppts}) since there are only $N-1$ variables $y_i$ with the index $1\leq i \leq N-1$ (see (\ref{yn}) in which the $y_i$ are defined in terms of the $c_i$). Because of this when $i=0$ or $i=N-1$ in (\ref{ppts}) the equation becomes meaningless since $y_0$ or $y_N$ are not defined (they are not part of the problem) and indeed going back to equation (\ref{ppt}), $x_0$ and $x_{N-1}$ only need to satisfy $x_0 {\mathbf 0}eq y_1$ and $x_{N-1}{\mathbf 0}eq y_{N-1}$. To solve this inconvenience one could simply define $\max(y_0,y_1)\equiv y_1$ and $\max(y_{N-1},y_N)\equiv y_{N-1}$ or introduce two fictitious or auxiliary (in the sense that they are not part of the problem) quantities $y_0$ and $y_N$ with the values $y_0 \equiv y_1$ and $y_N \equiv y_{N-1}$.
}:
\begin{eqnarray}\label{ppts}
x_i {\mathbf 0}eq \max(y_i,y_{i+1}).
\end{eqnarray}
Further, notice that the above matrix can be split as a trivially separable part $\sigma_s$ plus a PPT state $\tau$ that satisfies $r(\tau)+r(\tau^\Gamma) \leq 3N$ with $r(\omega)$ being the dimensionality of the range of $\omega$:
\begin{eqnarray}\label{pptness}
\sigma&=&\sigma_s+\tau\\
\sigma_s&=&\sum_{n=0}^{N-1}\left( x_n^2-\max(y_n^2,y_{n+1}^2) \right) \ket{{\mathbf 0} n}\bra{{\mathbf 0} n} \\
\tau &=&\sum_{n=0}^{N-1}\left(\max(y_n^2,y_{n+1}^2)\ket{{\mathbf 0} n}\bra{{\mathbf 0} n} +\ket{{\mathbf 1} n}\bra{{\mathbf 1} n}\right)\\
&&+ \sum_{n=1}^{N-1} y_n \left(\ket{{\mathbf 1} n-1}\bra{{\mathbf 0} n}+\ket{{\mathbf 0} n}\bra{{\mathbf 1} n-1}\right).\nonumber
\end{eqnarray}
From the above it is clearly seen that the separability properties of $\sigma$ are the same as those of $\tau$.\\
Having transformed the original states (\ref{state}) to the form (\ref{pptness}) in what follows we will show that there are bound entangled states that satisfy (\ref{comm}). Since we are only interested in showing the existence of bound entangled states of the type (\ref{state}) we will consider the smallest dimension in which this is possible, \emph{i.e.}, $N=4$. The study of the entanglement properties of these states for arbitrary $N$ will be presented elsewhere \cite{NicoAnna13}, nevertheless we mention that based on the same ideas that will be presented in the following paragraphs it is possible to show that there are bound entangled states for arbitrary $N$. For the sake of concreteness we will only examine the cases in which:
\begin{eqnarray}\label{ineq}
y_{i}\leq y_{i+1},
\end{eqnarray}
thus,
\begin{eqnarray}\label{N4}
\tau=\left(
\begin{array}{cccc|cccc}
y_1^2 & & & & & & & \\
& y_2^2 & & & y_1 & & & \\
& & y_3^2 & & & y_2 & & \\
& & & y_3^2 & & & y_3 & \\
\hline
& y_1 & & & 1 & & & \\
& & y_2 & & & 1 & & \\
& & & y_3 & & & 1 & \\
& & & & & & & 1 \\
\end{array}
\right)
\end{eqnarray}
The dimensions of the ranges of $\tau$, and $\tau^\Gamma$, are given by $r(\tau)=7$ and $r(\tau^\Gamma)=5$ if none of the inequalities (\ref{ineq}) are saturated. It is known \cite{kraus00} that the matrix is separable if $r(\tau)=4$ or $r(\tau^\Gamma)=4$, therefore, one should proof if there are enough product vectors in the range so that the rank of the matrices diminishes appropriately. The $3$ vectors in the Kernel of $\tau^\Gamma$, are given by:
\begin{eqnarray}\label{consn}
\ket{\phi_1}&=&-\ket{{\mathbf 0} 0}+y_1\ket{{\mathbf 1} 1},\\
\ket{\phi_2}&=&-\ket{{\mathbf 0} 1}+y_2\ket{{\mathbf 1} 2},\nonumber\\
\ket{\phi_3}&=&-\ket{{\mathbf 0} 2}+y_3\ket{{\mathbf 1} 3}\nonumber,
\end{eqnarray}
whereas the vector in the kernel of $\tau$, is simply:
\begin{eqnarray}\label{cons1}
\ket{\chi_3}&=&-\ket{{\mathbf 0} 3}+y_3\ket{{\mathbf 1} 2}.
\end{eqnarray}
Now we need to find a separable vector $\ket{\mathbf{e}f}$ such that
\begin{eqnarray}
\braket{\chi_3|\mathbf{e}f}=0 \quad and \quad \braket{\phi_n|\mathbf{e}^\star f}=0 \ \forall n.
\end{eqnarray}
It is found that the unique (up to a phase and normalization) separable vector that is orthogonal to (\ref{consn}) and that upon complex conjugation in the qubit system is orthogonal to (\ref{cons1}) is:
\begin{eqnarray}\label{states}
\ket{\mathbf{e}f}_{\theta}&=&\left(\ket{{\mathbf 0}}+\frac{e^{i\theta}}{y_3}\ket{{\mathbf 1}} \right)\nonumber\\
&&\otimes \left(\frac{y_1 y_2}{y_3} \ket{0}+y_2 e^{i \theta} \ket{1}+y_3 e^{i 2\theta}\ket{2}+ e^{i 3\theta} y_3\ket{3}\right).
\end{eqnarray}
Now, to show that there bound entangled states of the type defined by (\ref{state}) we will look at a subset of the states defined by (\ref{N4}). Up to now we assumed that none of the inequalities (\ref{ineq}) was saturated. To construct our bound entangled state we will look at the case when one of them is saturated, to be precise, we fix $y_2=y_1$ and thus our state is simply:
\begin{eqnarray}\label{bound4}
\tau(y_1,y_2)=\left(
\begin{array}{cccc|cccc}
y_2^2 & & & & & & & \\
& y_2^2 & & & y_2 & & & \\
& & y_3^2 & & & y_2 & & \\
& & & y_3^2 & & & y_3 & \\
\hline
& y_2 & & & 1 & & & \\
& & y_2 & & & 1 & & \\
& & & y_3 & & & 1 & \\
& & & & & & & 1 \\
\end{array}
\right).
\end{eqnarray}
With this new constraint the dimension of the range of $\tau$ is decreased by one and a new vector appears in the Kernel of $\tau$:
\begin{eqnarray}\label{ketn}
\ket{\chi_{1}}=-\ket{{\mathbf 0} 1}+y_{1}\ket{{\mathbf 1} 0}.
\end{eqnarray}
As we mentioned before, the vector (\ref{states}) is the \emph{only} separable vector that is orthogonal to (\ref{consn}) and (\ref{cons1}) and such constraints are not modified by assuming $y_2=y_1$. Nevertheless this last assumption also implies that vector (\ref{states}) must also be orthogonal to (\ref{ketn}) for the state $\tau$ to be separable. The inner product between the separable vector (\ref{states}), and (\ref{ketn}) is:
\begin{eqnarray}
\braket{\chi_{1}|\mathbf{e} f}_{\theta}=e^{i \theta} y_2\left(\frac{y_2}{y_3}-1 \right) \neq 0.
\end{eqnarray}
Since we are free to take $y_2 < y_3$ we conclude that in such case there is no separable vector $\ket{\mathbf{e}f}$ that satisfies the hypothesis of the range criterion (\ref{rangecrit}) and hence we conclude that \emph{there exist bound entangled states of the type defined by (\ref{comm}) for $N > 3$}. \\
We point out that the above argument requires at least three $y_i$ and thus not surprisingly will only work for $N > 3$. In \ref{app:hull} we construct the convex hulls for the PPT separable states for $N=2,3$. In \ref{app:crit} we explicitly evaluate two often used criteria that have been shown to detect some bound entangled states and numerically show that they are unable to detect bound entanglement for the states considered here.
\section{Generation of bound entanglement using the JC interaction}\label{sec:dyn}
In the last section it was shown that there are bound entangled states compatible with the JC symmetries. In this section we shall show that the JC interaction can be used to generate bound entanglement from uncorrelated states. We will study the interaction of an atom prepared in the unpolarized state:
\begin{eqnarray}
\rho(0)_A = (1-\lambda) \ket{{\mathbf 0}} \bra{{\mathbf 0}}+\lambda \ket{{\mathbf 1}}\bra{{\mathbf 1}}
\end{eqnarray}
where $\lambda$ represents the probability of having the atom in the excited state and a field prepared in the thermal state:
\begin{eqnarray}
\rho(0)_B=\sum_{n=0}^{\infty} p_n \ket{n}\bra{n}
\end{eqnarray}
with
\begin{eqnarray}
p_n=\frac{m^n}{(1+m)^{n+1}}
\end{eqnarray}
and $m=\braket{ a ^\dagger a}$ is the mean number of photons in the field. The atom-cavity system starts in the product state:
\begin{eqnarray}
\rho(0)= \rho(0)_A \otimes \rho(0)_F
\end{eqnarray}
The dynamics of this type of states under the resonant JC hamiltonian ($\Delta=0$ in equation (\ref{HJC})) has been studied by Scheel \emph{et.al.} in \cite{Plenio03}. They find that in the $(\lambda,m)$ parameter space there are three separate regions:
\begin{enumerate}
\item In the first region the state becomes free entangled immediately after the interaction between the atom and the field starts, \emph{i.e.}, for $t>0$ the state does not have a positive partial transpose.
\item In the second region the state becomes free entangled only after some finite $\bar t$, \emph{i.e.} for some finite $\bar t$ the state has positive partial transpose and then after this time the state becomes free entangled.
\item In the third region they find that the state is PPT for all times.
\end{enumerate}
In this section we will be interested in the cases for which the state remains PPT for a finite time after the interaction starts, \emph{i.e.} regions (ii) and (iii).\\
To understand how bound entanglement is generated we first write explicitly the time evolution generated by $H_{JC}$ in the \emph{resonant} case:
\begin{eqnarray}
\rho(t)&=&\exp\left(-i H_{JC} t \right) \rho(0) \exp\left(i H_{JC} t \right)\\&=&
\alpha_0^- \ket{ {\mathbf 0} 0}\bra{ {\mathbf 0} 0}+\sum_{n=1}^\infty f_n \left(\alpha_{n}^- \pr{{\mathbf 0} n}+\alpha_{n}^+ \pr{{\mathbf 1} n-1} \right.\nonumber\\
&&\quad \quad \quad \quad \quad \quad \quad \left. +\beta_n \left[ \ket{{\mathbf 0} n}\bra{{\mathbf 1} n-1}+ \ket{{\mathbf 1} n-1}\bra{{\mathbf 0} n} \right] \right) \nonumber
\end{eqnarray}
with:
\begin{eqnarray}
f_n&=&\frac{1}{2} m^{n-1} (m+1)^{-n-1}\\
\alpha_{n}^{\pm}&=&m+\lambda \pm (\lambda+m (2 \lambda-1)) \cos \left(2 g \sqrt{n} t\right)\\
\beta_n&=&(m (2 \lambda-1)+\lambda) \sin \left(2 g \sqrt{n} t\right).
\end{eqnarray}
Remembering the notation introduced in equations (\ref{not1}) and (\ref{not2}) it is found that the nonzero elements of $\rho$ are:
\begin{eqnarray}
a_n=f_n \alpha_{n}^-, \quad b_n=f_{n+1} \alpha_{n+1}^+, \quad c_n=|f_n \beta_n|,
\end{eqnarray}
and from them the $y_n$ defined in equation (\ref{yn}) can be found. We will be interested in finite times short enough that the Taylor expansion
\begin{eqnarray}\label{yshort}
y_n&=&\frac{f_n \beta_n}{\sqrt{f_n \alpha_{n}^+ f_{n+1} \alpha_{n+1}^+}} \\
&\approx& \left|\sqrt{n}T +\mathcal{O}\left(T^3\right) \right|
\end{eqnarray}
with
\begin{eqnarray}
T &=& \left|\frac{\lambda+ m (2 \lambda-1)}{\lambda \sqrt{m (m+1)} } \right| g t
\end{eqnarray}
remains valid. Finally, since we are only interested in the existence of bound entanglement we will truncate the photon ladder in 3 photons thus including only the lowest 4 Fock states. This corresponds to a local operation in the photonic system and thus cannot create entanglement. In particular that means that if the truncated density operator is entangled then it can be concluded that the full state is entangled.\\
Applying the truncation and decomposing the state according to equations (\ref{filter}) and (\ref{pptness}) it is found that the bound entanglement properties of the state $\rho(t)$ are equivalent to those of the state (\ref{N4}) with the $y_i$ given in equation (\ref{yshort}). Now to show that such state is entangled we note that it can be written as:
\begin{eqnarray}
\tau=\tau_1+\lambda \ket{\mathbf{e}f}\bra{\mathbf{e}f}_{\theta=0}
\end{eqnarray}
with $\ket{\mathbf{e}f}_{\theta}$ given by equation (\ref{bound4}) and $\lambda=\frac{27}{43} T^2$ and $\tau_1$ being a PPT state that satisfies:
\begin{eqnarray}
K(\tau_1)&=&span\left(\ket{\chi_3},\ket{\zeta} \right)\\
K(\tau_1^\Gamma)&=&span\left(\ket{\phi_1}, \ket{\phi_2},\ket{\phi_3} \right)
\end{eqnarray}
with $\ket{\chi_3}$ and the $\ket{\phi_i}$ given in equations (\ref{cons1}) and (\ref{consn}) and
\begin{eqnarray}
\ket{\zeta}&=&\frac{\sqrt{6}}{ T } \ket{{\mathbf 0} 0}+ \frac{2 \sqrt{2}}{ T } \ket{{\mathbf 0} 1}+\frac{\sqrt{3}}{ T}\ket{{\mathbf 0} 2}+\frac{\sqrt{3}}{ T }\ket{{\mathbf 0} 3} -\sqrt{2}\ket{{\mathbf 1} 0}+3 \ket{{\mathbf 1} 3}.
\end{eqnarray}
To show that $\tau_1$ is entangled it is sufficient to notice that the only separable vector that satisfies being orthogonal to $\ket{\chi_3}$ and that upon conjugation on the first subsystem is orthogonal to $\ket{\phi_1},\ket{\phi_2},\ket{\phi_3}$ is $\ket{\mathbf{e}f}_{\theta}$. Nevertheless, it is easily seen that:
\begin{eqnarray}
\braket{\zeta|\mathbf{e}f}_\theta \neq 0 \quad \forall \theta
\end{eqnarray}
and thus we conclude that $\tau_1$ is an edge entangled state and that $\tau$ is bound entangled since it is a mixture of an entangled state and a pure separable one.\\
This result implies that for any value of the parameters $m$ and $\lambda$ (other than $m=0$ or $\lambda=0$ in which case the Taylor expansion (\ref{yshort}) is meaningless) the state always becomes entangled for short times after the JC interaction between the atom and field starts. If the initial state happens to be sitting in the region (i) mentioned at the beginning of this section it will become free entangled. More interestingly for our purposes, \emph{if it happens to be sitting in region (ii) or (iii) it will become bound entangled}.
\section{Conclusion}\label{sec:conc}
In this paper we have analyzed the entanglement properties in the JC model. To this aim we have first used the conservation of the number operator to formalize the mixed states that naturally occur when a two-level atom interacts with an electromagnetic field in a cavity evolving under the JC dynamics. We have first examined the limitations of the entanglement criteria used so far to study these states. Then, to the best of our knowledge, we have for the first time demonstrated (analytically) that bound entanglement exist in such model and the failure of some of the criteria used so far to detect such type of entanglement. Finally, we shown that the JC interaction can be used to generate bound entangled states starting from mixed uncorrelated ones. Our results have implications for all systems whose dynamics can be approximated by a Jaynes-Cummings Hamiltonian, such as ion traps and cavity/circuit QED.
\section*{Acknowledgments}\label{sec:ack}
We are indebted to M. Lewenstein for crucial enlightening discussions. N.Q. gratefully acknowledges valuable discussions with O. Gittsovich, G. T\'oth, D. \v{Z}. Djokovi{\'c}, E. Wolfe, D.F.V. James and financial support from the National Sciences and Engineering Research Council of Canada. A.S. acknowledges support from the Spanish MICINN FIS2008-01236 European Regional Development Fund (FEDER), discussions with R. Quesada and specially the kind hospitality of J. H. Thywissen and the Univ. of Toronto where this work was initiated.
\appendix
\section{Bound entanglement detection criteria}\label{app:crit}
In the entanglement detection criteria to be evaluated in this appendix it is assumed that the states are normalized:
\begin{eqnarray}
\tr(\rho)=\sum_{n=0}^{N-1}\left(a_n+b_n \right)=1.
\end{eqnarray}
it will also be convenient to write the marginals (reduced density matrices) of the qubit and the qudit:
\begin{eqnarray}\label{marginals}
\rho_{A}=\tr_{B}\left(\rho\right)=\sum_{{\mathbf i} = {\mathbf 0}}^{{\mathbf 1}} \alpha_{{\mathbf i}} \ket{{\mathbf i}}\bra{{\mathbf i}}, \quad
\rho_{B}=\tr_{A}\left(\rho\right)=\sum_{n=0}^{N-1} \beta_n \ket{n}\bra{n},
\end{eqnarray}
where the coefficients are given by:
\begin{eqnarray}\label{coeff}
\alpha_{{\mathbf 0}}=\sum_{n=0}^{N-1} a_n, \quad \alpha_{{\mathbf 1}}=\sum_{n=0}^{N-1} b_n, \quad
\beta_n=a_n+b_n.
\end{eqnarray}
They satisfy the normalization $\alpha_{\mathbf 1} +\alpha_{\mathbf 0} =\tr\left(\rho \right)=\sum_{n=0}^{N-1} \beta_n$.\\
The first criterion that we evaluate is the computable cross-norm or realignment (CCNR) criterion. The CCNR criterion states that if $\rho$ is separable, then the following inequality must hold \cite{Rudolph03,Guhne09},
\begin{equation}\label{CCN}
\|\mathcal{R}(\rho)\|\leq1,
\end{equation}
here $\|\cdot\|$ stands for the trace norm (\emph{i.e.} the sum of the singular values). The realignment operation is defined as $\mathcal{R}(A\otimes B)=|A\rangle\langle B^{*}|$, with scalar product $\langle B|A\rangle=\mathrm{tr}(B^{\dag} A)$ in the Hilbert Schmidt space of operators. For the states considered here the singular values are:
\begin{eqnarray}
s(\mathcal{R}(\rho))=\{ |\vec c|, |\vec c|, x_+,x_-\},
\end{eqnarray}
with
\begin{eqnarray*}
x_{\pm}=\sqrt{\frac{|\vec a|^2+|\vec b|^2\pm\sqrt{(|\vec a|^2-|\vec b|^2)^2+4 (\vec a \cdot \vec b)^2}}{2}},
\end{eqnarray*}
and where the following real vectors are defined in terms of the populations and coherences of the density matrix:
\begin{eqnarray}
\vec a& =& \left\{a_0,\ldots, a_{N-1}\right\} \in \mathbb{R}^N\\
\vec b& =& \left\{b_0,\ldots, b_{N-1}\right\} \in \mathbb{R}^N \nonumber\\
\vec c& =& \left\{|c_1|,\ldots,|c_{N-1}|\right\} \in \mathbb{R}^{N-1}. \nonumber
\end{eqnarray}
and $|\vec x|=\sqrt{\vec x \cdot \vec x}$.
Note that the four singular values will be nonzero unless $|\vec c|=0$ \emph{i.e.} $c_n=0 \ \forall n$ or $(\vec a \cdot \vec b)^2=|\vec a|^2 |\vec b|^2$ \emph{i.e.} $a_n= w b_n \ \forall n$.
If $\|\mathcal{R}(\rho)\|>1$ the state $\rho$ will be entangled, explicitly one finds that:
\begin{eqnarray}
\|\mathcal{R}(\rho)\|&=&2 |\vec c|+x_+ + x_-=2|\vec c|+\sqrt{|\vec a|^2+|\vec b|^2+2\sqrt{|\vec a|^2 |\vec b|^2-(\vec a \cdot \vec b)^2}}.
\end{eqnarray}
In \cite{Kai05} it is shown that $ \|\mathcal{R}(\rho)\|-1$ will provide a lower bound of the concurrence that is \emph{in principle} independent of whether the state has positive PT or not. We generated more than ten million states for $N=4$ of which a million and a half where PPT. In all cases $\mathcal{N}(\rho){\mathbf 0}eq \max\{ \| \mathcal{R}(\rho) \| -1,0 \}$.
Finally, one can improve the entanglement detection capabilities of the CCNR criterion by the following corollary of the CM method \cite{Zhang08,Oleg08}. The corollary states that if $\rho$ is separable then:
\begin{eqnarray}\label{zzzg}
\|\mathcal{R}(\rho-\rho_A \otimes \rho_B)\|^2
\leq \left(1-\tr\left\{\rho_A^2 \right\} \right) \left(1-\tr\left\{\rho_B^2 \right\} \right).
\end{eqnarray}
For the states considered here one can show that:
\begin{eqnarray}
\|\mathcal{R}(\rho-\rho_A \otimes \rho_B)\|=2|\vec c|+\sqrt{2}|\alpha_{{\mathbf 1}} \vec a-\alpha_{{\mathbf 0}} \vec b|.
\end{eqnarray}
The coefficients $\alpha_{{\mathbf i}}$ are given in equations (\ref{coeff}). To test if this criterion could detect any bound entanglement we generated a million and a half PPT states in $2 \times 4$ and none of them violated inequality (\ref{zzzg}). To confirm that indeed the CM corollary (which is always stronger than the CCNR criterion \cite{Oleg08}) does not detect the states (\ref{bound4}) we considered $N=4$ and looked at the two parameter family of normalized states $\rho(y_2,y_3) = \tau(y_2,y_3)/\tr(\tau(y_2,y_3))$ in the range $0 < y_2 <y_3 \leq 10$ and in none of the parameter values the inequality (\ref{zzzg}) was violated.
\section{Convex hulls for $N=2,3$}\label{app:hull}
As it is well known in these cases being PPT \emph{implies} separability. In this appendix we explicitly construct the convex hull of the PPT states.
For future convenience we will introduce the following projector CP-map that acts on states $\omega$:
\begin{eqnarray}
\mathcal{P}(\omega)\equiv \frac{1}{2N}\sum_{k=0}^{2N-1} \exp\left(i \frac{\pi k \Pi}{N}\right)\omega \exp\left(-i \frac{\pi k \Pi}{N}\right).
\end{eqnarray}
The superoperator $\mathcal{P}$ has the property that for any $\omega$:
\begin{eqnarray}
\left[\mathcal{P}(\omega), \Pi \right]=0.
\end{eqnarray}
Thus $\mathcal{P}$ projects onto the subspace defined by equation (\ref{comm}), it satisfies $\mathcal{P}( \mathcal{P}(\omega))=\mathcal{P}(\omega)$, cannot generate entanglement since it can be implemented with local operations and classical communication and it maps density operators to density operators.\\
In the $N=2$ case one has:
\begin{eqnarray}\label{22}
\tau=
\left(
\begin{array}{cc|cc}
y_1^2 & & & \\
& y_1^2 & y_1 & \\
\hline
& y_1 & 1 & \\
& & & 1 \\
\end{array}
\right).
\end{eqnarray}
It is easy to see that there are separable product in vectors $\ket{\mathbf{e} f}$ in the range of $\tau$, $R(\tau)$ such that $\ket{\mathbf{e}^\star f}$ is in the range of $\tau^\Gamma$, $R(\tau^\Gamma)$. One of such product vectors is:
\begin{eqnarray}
\ket{\mathbf{g} h}=( \ket{{\mathbf 0}}+\frac{1}{y_1}\ket{{\mathbf 1}})\otimes(y_1 \ket{0}+y_1\ket{1}).
\end{eqnarray}
It is easily shown that $\mathcal{P}(\ket{\mathbf{g}h}\bra{\mathbf{g}h})$ is precisely $\tau$ thus showing the convex hull of the separable state. Not surprisingly the four vectors that are added to construct $\mathcal{P}(\ket{\mathbf{g}h}\bra{\mathbf{g}h})$ and obtain (\ref{22}) are the same four vectors that one would obtain by using the Wootters formula for a system of 2 qubits\cite{Wootters98}.\\
For $N=3$ one has two possibilities for $\tau$:
\begin{eqnarray}
\tau= \left(
\begin{array}{ccc|ccc}
y_1^2 & & & & & \\
& y_1^2 & & y_1 & & \\
& & y_2^2 & & y_2 & \\
\hline
& y_1 & & 1 & & \\
& & y_2 & & 1 & \\
& & & & & 1 \\
\end{array}
\right) \quad or \quad
\left(
\begin{array}{ccc|ccc}
y_1^2 & & & & & \\
& y_2^2 & & y_1 & & \\
& & y_2^2 & & y_2 & \\
\hline
& y_1 & & 1 & & \\
& & y_2 & & 1 & \\
& & & & & 1 \\
\end{array}
\right)
\end{eqnarray}
depending on whether $y_1 {\mathbf 0}eq y_2$ or $y_2 {\mathbf 0}eq y_1$. In the first case a vector that is in $R(\tau)$ and $R(\tau^\Gamma)$ is simply:
\begin{eqnarray}\label{33}
\ket{\mathbf{g}h}= \left( \ket{{\mathbf 0}}+\frac{1}{y_1}\ket{{\mathbf 1}}\right)\otimes(y_1\ket{0}+y_1\ket{1}+y_2\ket{2})
\end{eqnarray}
upon subtracting $\mathcal{P}(\ket{\mathbf{g}h}\bra{\mathbf{g}h})$ one obtains $\left(1-\frac{y_2^2}{y_1^2} \right)\ket{{\mathbf 1}}\bra{{\mathbf 1}}\otimes \ket{2}\bra{2}$. In the case where $y_2 {\mathbf 0}eq y_1$ one finds a product vector in $R(\tau)$ and $R(\tau^\Gamma)$ by swapping $y_1$ and $y_2$ in (\ref{33})upon subtracting $\mathcal{P}(\ket{\mathbf{g}h}\bra{\mathbf{g}h})$ one obtains $\left(1-\frac{y_1^2}{y_2^2} \right)\ket{{\mathbf 0}}\bra{{\mathbf 0}}\otimes \ket{2}\bra{2}$ thus completing the construction for $N=2,3$. We note that the technique presented here using the projector $\mathcal{P}$ is equivalent to the uniform mixing/averaging over all phases used to find separability proofs for symmetric mixed states of $N$ qubits\cite{elie13}.
\section*{References}
\end{document} |
\begin{document}
\title{Real zeros of Random Dirichlet series}
\author{Marco Aymone}
\maketitle
\begin{abstract}
Let $F(\sigma)$ be the random Dirichlet series $F(\sigma)=\sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}$, where $\mathcal{P}$ is an increasing sequence of
positive real numbers and $(X_p)_{p\in\mathcal{P}}$ is a sequence of i.i.d. random variables
with $\mathbb{P}(X_1=1)=\mathbb{P}(X_1=-1)=1/2$. We prove that, for certain conditions on $\mathcal{P}$, if $\sum_{p\in\mathcal{P}}\frac{1}{p}<\infty$ then with positive probability
$F(\sigma)$ has no real zeros while if $\sum_{p\in\mathcal{P}}\frac{1}{p}=\infty$, almost surely $F(\sigma)$ has an infinite number of real zeros.
\end{abstract}
\section{Introduction.}
A Dirichlet series is an infinite sum of the form $F(\sigma):=\sum_{p\in\mathcal{P}}\frac{X_p}{p^{\sigma}}$, where $\mathcal{P}$ is an
increasing sequence of positive real numbers and $(X_p)_{p\in\mathcal{P}}$ is any sequence of complex numbers. If $F(\sigma)$ converges then $F(s)$ converges for all
$s\in\mathds{C}$ with real part greater than $\sigma$ (see \cite{montgomerylivro} Theorem 1.1). The abscissa of convergence of a Dirichlet series is the smallest number $\sigma_c$
for which $F(\sigma)$ converges for all $\sigma>\sigma_c$.
The problem of finding the zeros of a Dirichlet series is classical in Analytic Number Theory. For instance, the Riemann hypothesis states that the zeros of the analytic continuation
of the Riemann zeta function $\zeta(\sigma):=\sum_{k=1}^\infty\frac{1}{k^\sigma}$ in the half plane $\{\sigma+it\in\mathds{C}:\sigma>0\}$ all have real part equal to $1/2$.
This analytic continuation can be described in terms of a convergent Dirichlet series -- The Dirichlet $\eta$-function
$\eta(s)=\sum_{k=1}^\infty\frac{(-1)^{k+1}}{k^s}$ satisfies $\eta(s)=(1-2^{1-s})\zeta(s)$, for all complex $s$ with positive real part. Thus, to find zeros of $\eta(s)$ for $0<Re(s)<1$ is the same as finding non-trivial zeros of $\zeta$.
In this paper we are interested in the real zeros of the random Dirichlet series $F(\sigma):=\sum_{p\in\mathcal{P}} \frac{X_p}{p^{\sigma}}$, where
the coefficients $(X_p)_{p\in\mathcal{P}}$ are random and $\mathcal{P}$ satisfies:
\begin{align*}
&(P1)\quad \mathcal{P}\cap[0,1)=\varnothing,\\
&(P2) \quad \sum_{p\in\mathcal{P}}\frac{1}{p^\sigma} \mbox{ has abcissa of convergence }\sigma_c=1.
\end{align*}
For instance, $\mathcal{P}$ can be the set of the natural numbers. The conditions $(P1-P2)$ imply, in particular, that the series $\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}$ converges for each $\sigma>1/2$.
Therefore, if $(X_p)_{p\in\mathcal{P}}$ is a sequence of i.i.d. random variables with $\mathbb{E} X_p =0 $ and $\mathbb{E} X_p^2=1$, then, by the Kolmogorov one-series Theorem,
the series $F(\sigma)=\sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}$ has \textit{a.s.} abscissa of convergence $\sigma_c=1/2$. Moreover, the function of one complex variable
$\sigma+it\mapsto F(\sigma+it)$ is \textit{a.s.} an analytic function in the half plane $\{\sigma+it\in\mathds{C}:\sigma>1/2\}$. In the case $X_p=\pm 1$ with equal probability,
the line $\sigma=\sigma_c$ is a natural boundary for $F(\sigma+it)$, see \cite{kahane} (pg. 44 Theorem 4).
Our main result states:
\begin{theorem}\label{Teorema 1} Assume that $\mathcal{P}$ satisfies $P1$-$P2$ and let $(X_p)_{p\in\mathcal{P}}$ be i.i.d and such that $\mathbb{P}(X_p=1)=\mathbb{P}(X_p=-1)=1/2$. Let
$F(\sigma)=\sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}$.\\
i. If $\sum_{p\in\mathcal{P}} \frac{1}{p}<\infty$, then with positive probability $F$ has no real zeros;\\
ii. If $\sum_{p\in\mathcal{P}} \frac{1}{p}=\infty$, then \textit{a.s.} $F$ has an infinite number of real zeros.
\end{theorem}
It follows as corollary to the proof of item i. that in the case $\sum_{p\in\mathcal{P}}\frac{1}{p}=\infty$, with positive probability $F(\sigma)$ has no zeros in the interval
$[1/2+\delta,\infty)$, for fixed $\delta>0$.
Since a Dirichlet series $F(s)=\sum_{p\in\mathcal{P}}\frac{X_p}{p^s}$ is a random analytic function, it can be viewed as a random Taylor series
$\sum_{k=0}^\infty Y_k (s-a)^k$, where $a>\sigma_c$ and $(Y_k)_{k\in\mathds{N}}$ are random and \textit{dependent} random variables. The case of random Taylor series
and random polynomials where $(Y_k)_{k\in\mathds{N}}$ are i.i.d. has been widely studied in the literature, for an historical background we refer to \cite{krishnapurzeros} and
\cite{Vurandompolynomials} and the references therein.
\section{Preliminaries}
\subsection{Notation.} We employ both $f(x)=O(g(x))$ and Vinogradov's $f(x)\ll g(x)$ to mean that there exists a constant $c>0$
such that $|f(x)|\leq c |g(x)|$ for all sufficiently large $x$, or when $x$ is sufficiently close to a certain real number $y$.
For $\sigma\in\RR$, $\mathds{H}_{\sigma}$ denotes the half plane $\{z\in\mathds{C}:Re(z)>\sigma\}$. The indicator function of a set $S$ is denoted by $\mathds{1}_S(s)$
and it is equal to $1$ if $s\in S$, or equal to $0$ otherwise. We let $\pi(x)$ to denote the counting function of $\mathcal{P}$:
\begin{equation*}
\pi(x):=|\{p\leq x:p\in\mathcal{P}\}|.
\end{equation*}
\subsection{The Mellin transform for Dirichlet series} In what follows $\mathcal{P}=\{p_1<p_2<...\}$ is a set of non-negative real numbers satisfying $P1$-$P2$ above.
A generic element of $\mathcal{P}$ is de noted by $p$, and we employ $\sum_{p\leq x}$ to denote $\sum_{p\in\mathcal{P}; p\leq x}$.
Let $A(x)=\sum_{p \leq x} X_p$ and $F(s)=\sum_{p\in\mathcal{P}}\frac{X_p}{p^s}$. Let $\sigma_c>0$ be the abscissa of convergence of $F(\sigma)$. Then $F$ can be
represented as the Mellin transform of the function $A(x)$ (see, for instance, Theorem 1.3 of \cite{montgomerylivro}):
\begin{equation}\label{equation integral representation for Dirichlet series}
F(s)=s\int_{1}^\infty A(x)\frac{dx}{x^{1+s}},\mbox{ for all } s\in\mathds{H}_{\sigma_c}.
\end{equation}
In particular, we can state:
\begin{lemma}\label{lemma mellin transform application} Let $F(s)=\sum_{p\in\mathcal{P}} \frac{X_p}{p^s}$ be such that $F(1/2)$ is convergent.
Then for each $\sigma\geq1/2$ and all $\epsilon>0$, for all $U>1$:
\begin{equation*}
F(\sigma+\epsilon)=\sum_{p\leq U} \frac{X_p}{p^{\sigma+\epsilon}} + O\bigg{(}U^{-\epsilon}\sup_{x>U}\bigg{|}\sum_{U< p\leq x}\frac{X_p}{p^\sigma} \bigg{|} \bigg{)},
\end{equation*}
where the implied constant in the $O(\cdot)$ term above can be taken to be $1$.
\end{lemma}
\begin{proof}
Put $\displaystyle A(x)=\sum_{p\leq x} \mathds{1}_{(U,\infty)}(p)\frac{X_p}{p^\sigma}$. By (\ref{equation integral representation for Dirichlet series}) it follows that
\begin{align*}
\sum_{p>U} \frac{X_pp^{-\sigma}}{p^{\epsilon}}&=\epsilon\int_{1}^\infty A(x)\frac{dx}{x^{1+\epsilon}}=\epsilon\int_{U}^\infty\bigg{(} \sum_{U<n\leq x} \frac{X_p}{p^\sigma}
\bigg{)}\frac{dx}{x^{1+\epsilon}}\\
&\ll \sup_{x>U}\bigg{|}\sum_{U< p\leq x}\frac{X_p}{p^\sigma} \bigg{|} \int_{U}^\infty \frac{\epsilon}{x^{1+\epsilon}}dx =
U^{-\epsilon}\sup_{x>U}\bigg{|}\sum_{U< p\leq x}\frac{X_p}{p^\sigma} \bigg{|}.
\end{align*}
\end{proof}
\subsection{A few facts about sums of independent random variables} In what follows we use
\textit{Levy's maximal inequality}: Let $X_1,...,X_n$ be independent random variables. Then
\begin{equation}\label{equation Levy maximal}
\mathbb{P}\bigg{(}\max_{1\leq m \leq n}\bigg{|}\sum_{k=1}^m X_k \bigg{|}\geq t \bigg{)}\leq 3 \max_{1\leq m \leq n}\mathbb{P}\bigg{(}\bigg{|}\sum_{k=1}^m X_k \bigg{|}\geq \frac{t}{3} \bigg{)}.
\end{equation}
\textit{Hoeffding's inequality}: Let $X_1,...,X_n$ be i.i.d. with $\mathbb{P}(X_1=1)=\mathbb{P}(X_1=-1)=1/2$. Let $a_1,...,a_n$ be real numbers. Then for any $\lambda>0$
\begin{equation}\label{equation Hoeffding inequality}
\mathbb{P}\bigg{(}\sum_{k=1}^n a_k X_k \geq \lambda \bigg{)}\leq \exp\bigg{(}-\frac{\lambda^2}{2\sum_{k=1}^n a_k^2} \bigg{)}.
\end{equation}
\section{Proof of the main result}
\begin{proof}[Proof of item i] Since $\sum_{p\in\mathcal{P}}\frac{1}{p}<\infty$ we have by the Kolmogorov one series theorem that the series
$\sum_{p\in\mathcal{P}}\frac{X_p}{\sqrt{p}}$ converges almost surely. In what follows $U>0$ is a large fixed number to be chosen later, $A_U$ is the
event in which $X_p=1$ for all $p\leq U$ and $B_U$ is the event in which
\begin{equation*}
\sup_{x>U} \bigg{|}\sum_{U<p\leq x} \frac{X_p}{\sqrt{p}} \bigg{|} < \frac{1}{10}.
\end{equation*}
We claim that for sufficiently large $U$ on the event $A_U\cap B_U$ the function
$F(s)=\sum_{p\in\mathcal{P}}\frac{X_p}{p^s}$ does not vanish for all
$s\geq \frac{1}{2}$. Further for sufficiently large $U$ we will show that $\mathbb{P}(A_U\cap B_U)>0$.
On the event $A_U\cap B_U$ we have by lemma \ref{lemma mellin transform application}
that
\begin{equation}\label{equacao desigualdade sigma + epsilon}
F(1/2+\epsilon)\geq \sum_{p\leq U}\frac{1}{p^{1/2+\epsilon}}-\frac{1}{10U^\epsilon}\geq \frac{\pi(U)}{U^{1/2+\epsilon}}-\frac{1}{10U^\epsilon},
\end{equation}
where $\pi(U)=\#\{p\leq U: p\in\mathcal{P}\}$. We claim that for each $\delta>0$ we have that
\begin{equation*}
\limsup_{U\to\infty}\frac{\pi(U)}{U^{1-\delta}}=\infty.
\end{equation*}
In fact, this is a consequence from P2: For any $\delta>0$ the series diverges $\sum_{p\in\mathcal{P}}\frac{1}{p^{1-\delta}}=\infty$.
To show that this is true we argue by contraposition: Assume that for some fixed $\delta>0$
$\limsup_{U\to\infty}\frac{\pi(U)}{U^{1-\delta}}<\infty$ and hence that there exists a constant $c>0$ such that for all $U>0$,
$\pi(U)\leq c U^{1-\delta}$. In that case we have for $0<\epsilon<\delta$
\begin{align*}
\sum_{p\leq U}\frac{1}{p^{1-\epsilon}}&=\int_1^U \frac{d\pi(x)}{x^{1-\epsilon}}=\frac{\pi(U)}{U^{1-\epsilon}}-\pi(1) +(1-\epsilon)\int_1^U \frac{\pi(x)}{x^{2-\epsilon}} dx \\
& \leq \frac{cU^{1-\delta}}{U^{1-\epsilon}}+1+(1-\epsilon)\int_1^U \frac{cx^{1-\delta}}{x^{2-\epsilon}}dx\ll 1+\int_1^U \frac{1}{x^{1+(\delta-\epsilon)}}dx\ll 1,
\end{align*}
and hence that the series $\sum_{p\in\mathcal{P}}\frac{1}{p^{1-\epsilon}}$ converges. Therefore, we showed that $\limsup_{U\to\infty}\frac{\pi(U)}{U^{1-\delta}}<\infty$
implies that $\sum_{p\in\mathcal{P}}\frac{1}{p^\sigma}$ has abscissa of convergence $\sigma_c\leq 1-\delta$.
Now we may select arbitrarily large values of $U>1$ for which $\pi(U)\geq U^{1-1/4}$ and $\sum_{p\leq U}\frac{1}{\sqrt{p}}>\frac{1}{10}$, and hence,
by (\ref{equacao desigualdade sigma + epsilon}),
for all $\epsilon>0$ we obtain that
\begin{equation*}
F(1/2+\epsilon)\geq \frac{ U^{1-1/4} }{U^{1/2+\epsilon}}-\frac{1}{10U^\epsilon}=\frac{1}{U^\epsilon}\bigg{(}U^{1/4}-\frac{1}{10}\bigg{)}>0.
\end{equation*}
This proves that on the event $A_U\cap B_U$ we have that $F(s)\neq 0$ for all $s\in[1/2,\infty)$.
Observe that $A_U$ and $B_U$ are independent and $A_U$ has probability $\frac{1}{2^{\pi(U)}}>0$. Now we will show that the complementary event $B_U^c$ has small probability.
Indeed, by applying the Levy's maximal inequality and the Hoeffding's inequality, we obtain:
\begin{align*}
\mathbb{P}(B_U^c)&=\lim_{n\to\infty}\mathbb{P}\bigg{(} \max_{U<x\leq n} \bigg{|}\sum_{U<p\leq x} \frac{X_p}{\sqrt{p}} \bigg{|} \geq \frac{1}{10} \bigg{)}\leq 3\lim_{n\to\infty}
\max_{U<x\leq n}\mathbb{P}\bigg{(} \bigg{|}\sum_{U<p\leq x} \frac{X_p}{\sqrt{p}} \bigg{|} \geq \frac{1}{30} \bigg{)}\\
& \leq 6 \lim_{n\to\infty} \max_{U<x\leq n}
\mathbb{P}\bigg{(} \sum_{U<p\leq x} \frac{X_p}{\sqrt{p}} \geq \frac{1}{30} \bigg{)}
\leq 6 \lim_{n\to\infty} \exp\bigg{(}\frac{-1/30^2}{2\sum_{U<p\leq n}\frac{1}{p}} \bigg{)}\\
& \leq 6 \exp\bigg{(}-\frac{1}{2\cdot30^2\sum_{p>U}\frac{1}{p}} \bigg{)}.
\end{align*}
Since $\sum_{p\in\mathcal{P}}\frac{1}{p}$ is convergent, the tail $\sum_{p>U}\frac{1}{p}$
converges to $0$ as $U\to\infty$. Therefore, for sufficiently large $U$ we can make $\mathbb{P}(B_U^c)<1/2$. \end{proof}
Now we are going to prove Theorem \ref{Teorema 1} part $ii$. We present two different proofs. In the first proof we assume that the counting function of $\mathcal{P}$
\begin{equation}\label{equacao pi(x) ll log x}
\pi(x)\ll \frac{x}{\log x}.
\end{equation}
In this case, for instance, $\mathcal{P}$ can be the set of prime numbers. In this proof we show that, for $\sigma$ close to $1/2$, the infinite sum $\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}$ can be approximated
by the partial sum $\sum_{p\leq y}\frac{X_p}{\sqrt{p}}$ for a suitable choice of $y$ (Lemma \ref{lemma aproximando a serie em sigma pela serie em meio }). Then we show that these partial sums change sign for an infinite number of $y$, and hence, $F(\sigma)=\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}$ changes sign for an infinite number of $\sigma\to 1/2^+$.
The case in which $\mathcal{P}$ is the set of natural numbers, the infinite sum $\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}$ can not be approximated by the finite sum $\sum_{p\leq y}\frac{X_p}{\sqrt{p}}$, \textit{i.e}, Lemma \ref{lemma aproximando a serie em sigma pela serie em meio } fails in this case. Thus, our approach is different in the general case. First we show (Lemma \ref{lemma central do limite}) that $\sum_{p\in\mathcal{P}}\frac{1}{p}=\infty$ implies that
\begin{equation}\label{equacao central do limite}
\frac{\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}}{\sqrt{\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}}}\to_d \mathcal{N}(0,1), \mbox{ as } \sigma\to\frac{1}{2}^+,
\end{equation}
and second, for each $L>0$, the event
\begin{equation*}
\limsup_{\sigma\to\frac{1}{2}^+}\frac{\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}}{\sqrt{\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}}}\geq L
\end{equation*}
is a tail event, and by (\ref{equacao central do limite}), it has positive probability. Similarly,
\begin{equation*}
\liminf_{\sigma\to\frac{1}{2}^+}\frac{\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}}{\sqrt{\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}}}\leq -L
\end{equation*}
also is a tail event and has positive probability. Thus, by the Kolmogorov $0-1$ Law, with probability $1$,
$\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}$ changes sign for an infinite number of $\sigma\to 1/2^+$.
\subsection{Proof of Theorem \ref{Teorema 1} (ii) in the case $\pi(x)\ll \frac{x}{\log x}$}
\begin{lemma}\label{lemma aproximando a serie em sigma pela serie em meio } Assume that $\mathcal{P}$ satisfies $P1$-$P2$ and that $\sum_{p\in\mathcal{P}}\frac{1}{p}=\infty$. Further, assume that $\pi(x)\ll\frac{x}{\log x}$. Let $\sigma>1/2$ and $y=\exp((2\sigma-1)^{-1})\geq 10$. Then there is a constant $d>0$ such that for all $\lambda>0$
\begin{equation*}
\mathbb{P}\bigg{(}\bigg{|} \sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}-\sum_{p\leq y} \frac{X_p}{\sqrt{p}} \bigg{|} \geq 2\lambda \bigg{)}\leq 4\exp(-d\lambda^2).
\end{equation*}
\end{lemma}
\begin{proof} If $|a+b|\geq 2\lambda$ then either $|a|\geq\lambda$ or $|b|\geq \lambda$. This fact combined with the Hoeffding's inequality allows us to bound:
\begin{align*}
\mathbb{P}\bigg{(} \bigg{|} \sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}-\sum_{p\leq y} \frac{X_p}{\sqrt{p}} \bigg{|} \geq 2\lambda\bigg{)} \leq & \mathbb{P} \bigg{(}\bigg{|}
\sum_{p\leq y}X_p \bigg{(}\frac{1}{p^\sigma}-\frac{1}{\sqrt{p}}\bigg{)} \bigg{|} \geq \lambda \bigg{)}\\+& \mathbb{P} \bigg{(}\bigg{|}
\sum_{p>y} \frac{X_p}{p^\sigma}\bigg{|} \geq \lambda \bigg{)} \\
&\leq \exp\bigg{(}-\frac{\lambda^2}{2V_y}\bigg{)}+\exp\bigg{(}-\frac{\lambda^2}{2W_y}\bigg{)},
\end{align*}
where $V_y=\sum_{p\leq y}\bigg{(} \frac{1}{p^\sigma}-\frac{1}{\sqrt{p}} \bigg{)}^2$ and $U_y=\sum_{p>y}\frac{1}{p^{2\sigma}}$. To complete the proof we only need to estimate these quantities. By the mean value theorem
\begin{equation*}
\frac{1}{p^\sigma}-\frac{1}{\sqrt{p}}=(\sigma-1/2)\frac{\log p}{p^\theta}, \mbox{ for some }\theta=\theta(p,\sigma)\in [1/2,\sigma].
\end{equation*}
Therefore
\begin{align*}
V_y&\leq (\sigma-1/2)^2\sum_{p\leq y} \frac{\log^2 p}{p}=(\sigma-1/2)^2\int_{1^-}^y \frac{\log^2 t}{t} d\pi(t)\\
&=(\sigma-1/2)^2\bigg{(} \frac{\pi(y)\log^2 y}{y}-\int_{1^-}^y\pi(t)\frac{2\log t-\log^2t}{t^2}dt\bigg{)}\\
&\ll (\sigma-1/2)^2\bigg{(} \log y+\int_{1^{-}}^y\frac{\log t}{t}dt\bigg{)} \ll (\sigma-1/2)^2\log^2 y.\\
U_y &=\int_{y}^{\infty}\frac{d\pi(t)}{t^{2\sigma}}=-\frac{\pi(y)}{y^{2\sigma}}-\int_y^{\infty}\frac{-2\sigma\pi(t)}{t^{2\sigma+1}}dt\\
&\ll \frac{1}{y^{2\sigma-1}\log y}+2\sigma\int_{y}^\infty \frac{1}{t^{2\sigma}\log t}dt\ll\frac{1}{y^{2\sigma-1}\log y}+\frac{2\sigma}{(2\sigma-1)y^{2\sigma-1}\log y}\\
&\ll\frac{1}{(2\sigma-1)y^{2\sigma-1}\log y}.
\end{align*}
In particular, the choice $y=\exp ((2\sigma-1)^{-1})$ implies that both variances $V_y$ and $U_y$ are $O(1)$. \end{proof}
The simple random walk $S_n=\sum_{k=1}^n X_n$ where $(X_n)_{n\in\mathds{N}}$ is i.i.d with $X_1=\pm 1$ with probability $1/2$ each, satisfies \textit{a.s.}
$\limsup_{n\to\infty} S_n=\infty$ and $\liminf_{n\to\infty} S_n=-\infty$. We follow the same line of reasoning as in the proof of this result
(\cite{shiryaev} pg. 381, Theorem 2) to prove:
\begin{lemma}\label{lemma lim sup acima da variancia} Assume that $\sum_{p\in\mathcal{P}}\frac{1}{p}=\infty$.
Let $y_k$ be a increasing sequence of positive real numbers such that $\lim y_k=\infty$. Then it \textit{a.s.} holds that:
\begin{align*}
&\limsup_{k\to\infty} \frac{\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}}{\sqrt{\sum_{p\leq y_k} \frac{1}{p}}}=\infty,\\
&\liminf_{k\to\infty} \frac{\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}}{\sqrt{\sum_{p\leq y_k} \frac{1}{p}}}=-\infty.
\end{align*}
\end{lemma}
\begin{proof}
We begin by observing that $(X_p/\sqrt{p})_{p\in\mathcal{P}}$ is a sequence of independent and symmetric random variables that are uniformly bounded by $1$.
It follows that
\begin{equation*}
\lim_{y\to\infty} \var \sum_{p\leq y} \frac{X_p}{\sqrt{p}}=\lim_{y\to\infty}\sum_{p\leq y}\frac{1}{p}=\infty,
\end{equation*}
and hence this sequence satisfies the Lindenberg condition. By the Central Limit Theorem it follows that for each fixed $L>0$ there exists a $\delta>0$
such that for sufficiently large $y>0$
\begin{equation*}
\mathbb{P}\bigg{(} \sum_{p\leq y} \frac{X_p}{\sqrt{p}} \geq L \sqrt{\sum_{p\leq y} \frac{1}{p}} \bigg{)}=\mathbb{P}\bigg{(} \sum_{p\leq y} \frac{X_p}{\sqrt{p}} \leq - L \sqrt{\sum_{p\leq y} \frac{1}{p}} \bigg{)}\geq \delta.
\end{equation*}
Next observe that the event in which $\limsup_{k\to\infty} \frac{\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}}{\sqrt{\sum_{p\leq y_k} \frac{1}{p}}}\geq L$ is a tail event,
and hence by the Kolmogorov zero or one law it has either probability zero or one. Since
\begin{align*}
&\mathbb{P} \bigg{(} \sum_{p\leq y_k} \frac{X_p}{\sqrt{p}} \geq L \sqrt{\sum_{p\leq y_k} \frac{1}{p}}\mbox{ for infinitely many }k\bigg{)}\\
&=\lim_{n\to\infty}\mathbb{P} \bigg{(} \bigcup_{k=n}^\infty\bigg{[} \sum_{p\leq y_k} \frac{X_p}{\sqrt{p}} \geq L \sqrt{\sum_{p\leq y_k} \frac{1}{p}}\bigg{]}\bigg{)}\geq \delta,
\end{align*}
it follows that for each fixed $L>0$ $\limsup_{k\to\infty} \frac{\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}}{\sqrt{\sum_{p\leq y_k} \frac{1}{p}}}\geq L$, \textit{a.s.}
Similarly, we can conclude that for each fixed $L>0$ $\liminf_{k\to\infty} \frac{\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}}{\sqrt{\sum_{p\leq y_k} \frac{1}{p}}}\leq -L$, \textit{a.s.} \end{proof}
\begin{proof}[Proof of item ii] Take $\lambda=\lambda(y) = \sqrt{\sum_{p\leq y}\frac{1}{p} }$ in Lemma \ref{lemma aproximando a serie em sigma pela serie em meio } and let $y=\exp((2\sigma-1)^{-1})$. Since $\lim_{y\to\infty}\lambda(y)=\infty$, it follows that there is a subsequence $y_k\to\infty$ for which $\sum_{k=1}^\infty \exp(-d\lambda^2(y_k))<\infty$ and hence, by the Borel-Cantelli Lemma, it \textit{a.s.} holds that
\begin{equation*}
\limsup_{k\to\infty} \frac{\bigg{|} \sum_{p\in\mathcal{P}} \frac{X_p}{p^{\sigma_k}}-\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}\bigg{|}}{\sqrt{\sum_{p\leq y_k}\frac{1}{p} }} \leq 2,
\end{equation*}
where $y_k=\exp((2\sigma_k-1)^{-1})$. This combined with Lemma \ref{lemma lim sup acima da variancia} gives \textit{a.s.}
\begin{align*}
\limsup_{\sigma\to1/2^+} \frac{\sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}}{\sum_{p\leq y}\frac{1}{p}}&\geq \limsup_{k\to\infty} \frac{ \sum_{p\leq y_k} \frac{X_p}{\sqrt{p}} -\bigg{|}
\sum_{p\in\mathcal{P}} \frac{X_p}{p^{\sigma_k}}-\sum_{p\leq y_k} \frac{X_p}{\sqrt{p}} \bigg{|} }{\sqrt{\sum_{p\leq y_k}\frac{1}{p}}}\\
&\geq \limsup_{k\to\infty}\bigg{(} \frac{ \sum_{p\leq y_k} \frac{X_p}{\sqrt{p}}}{\sqrt{\sum_{p\leq y_k}\frac{1}{p}}} -3\bigg{)}\\
&=\infty.
\end{align*}
Similarly, we conclude that $\liminf_{\sigma\to1/2^+} \sum_{p\in\mathcal{P}} \frac{X_p}{p^\sigma}=-\infty$, \textit{a.s.} Since $F(\sigma)$ is \textit{a.s.} analytic,
it follows that there is an infinite number of $\sigma>1/2$ for which $F(\sigma)=0$. \end{proof}
\subsection{Proof of Theorem \ref{Teorema 1} (ii), the general case}
The following Lemma is an adaptation of \cite{boviergeometricseries}, Theorem 1.2:
\begin{lemma}\label{lemma central do limite} Assume that $\mathcal{P}$ satisfies P1-P2 and that $\sum_{p\in\mathcal{P}}\frac{1}{p}=\infty$. Then
\begin{equation}\label{equacao central do limite 2}
\frac{\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}}{\sqrt{\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}}}\to_d \mathcal{N}(0,1), \mbox{ as } \sigma\to\frac{1}{2}^+.
\end{equation}
\end{lemma}
\begin{proof} Let $V(\sigma)=\sqrt{\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}}$. Observe that
$V(\sigma)\to\infty$ as $\sigma\to 1/2^+$: For each fixed $y>0$
\begin{equation*}
\liminf_{\sigma\to 1/2^+}\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}\geq \lim_{\sigma\to 1/2^+}\sum_{p\leq y}\frac{1}{p^{2\sigma}}=\sum_{p\leq y}\frac{1}{p}.
\end{equation*}
Thus, by making $y\to\infty$ in the equation above, we obtain the desired claim.
For each fixed $\sigma>1/2$, by the Kolmogorov one series Theorem, we have that $\sum_{p\leq y}\frac{X_p}{p^\sigma}$ converges almost surely as $y\to\infty$. Since $(X_p)_{p\in\mathcal{P}}$ are independent, by the dominated convergence theorem:
\begin{align*}
\varphi_\sigma(t)&:=\mathbb{E} \exp \bigg{(}\frac{it}{V(\sigma)}\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}\bigg{)}=\lim_{y\to\infty}\mathbb{E} \exp \bigg{(}\frac{it}{V(\sigma)}\sum_{p\leq y}\frac{X_p}{p^\sigma} \bigg{)}\\
&=\prod_{p\in\mathcal{P}}\cos\bigg{(}\frac{t}{V(\sigma)p^\sigma}\bigg{)}.
\end{align*}
We will show that for each fixed $t\in\RR$, $\varphi_\sigma(t)\to \exp(-t^2/2)$ as $\sigma\to 1/2^+$. Observe that $\varphi_\sigma(t)=\varphi_\sigma(-t)$, so we may assume $t\geq 0$. Thus, for each fixed $t\geq 0$
we may choose $\sigma>1/2$ such that $0\leq\frac{t}{V(\sigma)p^\sigma}\leq \frac{1}{100}$ and $0\leq 1-\cos\big{(}\frac{t}{V(\sigma)p^\sigma}\big{)}\leq \frac{1}{100}$, for all $p\in\mathcal{P}$.
For $|x|\leq 1/100$, we have that $\log(1-x)=-x+O(x^2)$ and $\cos(x)=1-\frac{x^2}{2}+O(x^4)$. Further,
$1-\cos(x)=2\sin^2(x/2)\leq \frac{x^2}{2}$. Thus, we have:
\begin{align*}
\log \varphi_\sigma(t)&=\sum_{p\in\mathcal{P}}\log \cos\bigg{(}\frac{t}{V(\sigma)p^\sigma}\bigg{)}\\
&=\sum_{p\in\mathcal{P}}\log\bigg{(}1-\bigg{(}1-\cos\bigg{(}\frac{t}{V(\sigma)p^\sigma}\bigg{)}\bigg{)}\bigg{)}\\
&=-\sum_{p\in\mathcal{P}}\bigg{(}1-\cos\bigg{(}\frac{t}{V(\sigma)p^\sigma}\bigg{)}\bigg{)}+\sum_{p\in\mathcal{P}}O\bigg{(}1-\cos\bigg{(}\frac{t}{V(\sigma)p^\sigma}\bigg{)}\bigg{)}^2 \\
&=-\sum_{p\in\mathcal{P}}\bigg{(}\frac{t^2}{2V^2(\sigma)p^{2\sigma}}+O\bigg{(}\frac{t^4}{V^4(\sigma)p^{4\sigma}} \bigg{)} \bigg{)}+\sum_{p\in\mathcal{P}}O\bigg{(}\frac{t^4}{V^4(\sigma)p^{4\sigma}} \bigg{)}\\
&=-\frac{t^2}{2V^2(\sigma)}\sum_{p\in\mathcal{P}}\frac{1}{p^{2\sigma}}+\sum_{p\in\mathcal{P}}O\bigg{(}\frac{t^4}{V^4(\sigma)p^{2}} \bigg{)}\\
&=-\frac{t^2}{2}+O\bigg{(}\frac{t^4}{V^4(\sigma)}\bigg{)}.
\end{align*}
We conclude that $\varphi_\sigma(t)\to \exp(-t^2/2)$ as $\sigma\to 1/2^+$. \end{proof}
\begin{proof}[Proof of item ii] Let $V(\sigma)$ be as in the proof of Lemma \ref{lemma central do limite}.
Since $V(\sigma)\to\infty$ as $\sigma\to 1/2^+$, we have, for each fixed $y>0$
\begin{equation*}
\limsup_{\sigma\to1/2^+}\frac{1}{V(\sigma)}\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}=\limsup_{\sigma\to1/2^+}\frac{1}{V(\sigma)}\sum_{p>y}\frac{X_p}{p^\sigma}.
\end{equation*}
Thus, for each fixed $L>0$,
\begin{equation*}
\limsup_{\sigma\to1/2^+}\frac{1}{V(\sigma)}\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}\geq L
\end{equation*}
is a tail event. By Lemma \ref{lemma central do limite}, $\frac{1}{V(\sigma)}\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}\to_d\mathcal{N}(0,1)$, as $\sigma\to1/2^+$. Thus, this tail event has positive probability (see the proof of Lemma \ref{lemma lim sup acima da variancia}). By the Kolmogorov zero or one Law, \textit{a.s.}:
\begin{equation*}
\limsup_{\sigma\to1/2^+}\frac{1}{V(\sigma)}\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}= \infty.
\end{equation*}
Similarly, \textit{a.s.}:
\begin{equation*}
\liminf_{\sigma\to1/2^+}\frac{1}{V(\sigma)}\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}=-\infty.
\end{equation*}
Since $F(\sigma)=\sum_{p\in\mathcal{P}}\frac{X_p}{p^\sigma}$ is \textit{a.s.} an analytic function, with probability $1$ we have that $F(\sigma)=0$ for an infinite number of $\sigma\to1/2^+$. \end{proof}
\noindent \textbf{Acknowledgments.} The proof of Theorem \ref{Teorema 1} item ii was initially presented
only in the case $\pi(x)\ll \frac{x}{\log x}$. I would like to thank the anonymous referee for pointing that this should not be a necessary condition for the existence of an infinite number of real zeros, and for pointing the reference \cite{boviergeometricseries}, from which I could adapt their Theorem 1.2 for random Dirichlet series (Lemma \ref{lemma central do limite}).
{\small{\sc Departamento de Matem\'atica, Universidade Federal de Minas Gerais, Av. Ant\^onio Carlos, 6627, CEP 31270-901, Belo Horizonte, MG, Brazil.} \\
\textit{Email address:} marco@mat.ufmg.br}
\end{document} |
\begin{document}
\title{quantum privacy amplification for quantum secure direct communication}
\author{}
\author{Fu-Guo Deng$^{1,3}$ and Gui Lu Long$^{1,2}$}
\address{$^{1}$Key Laboratory For Quantum Information and Measurements,
and Department of Physics, Tsinghua University, Beijing 100084,
P. R. China\\
$^{2}$Center for Atomic and Molecular
NanoSciences, Tsinghua University, Beijing 100084, P. R. China\\
$3$ Key Laboratory of Beam Technology and Materials Modification
of MOE, and Institute of Low Energy Nuclear Physics, Beijing
Normal University, Beijing 100875, P. R. China}
\date{\today }
\begin{abstract}
Using quantum mechanics, secure direct communication between
distant parties can be performed. Over a noisy quantum channel,
quantum privacy amplification is a necessary step to ensure the
security of the message. In this paper, we present a quantum
privacy amplification scheme for quantum secure direct
communication using single photons. The quantum privacy
amplification procedure contains two control-not gates and a
Hadamard gate. After the unitary gate operations, a measurement is
performed and one photon is retained. The retained photon carries
the state information of the discarded photon, and hence reduces
the information leakage. The procedure can be performed
recursively so that the information leakage can be reduced to any
arbitrarily low level.
\end{abstract}
\pacs{03.67.Hk, 03.67.Dd, 03.67.-a}
\maketitle
Quantum secure direct communication (QSDC) allows two distant
parties to directly communicate securely. It has attracted
attention recently
\cite{beige,bf,long1,long2,cai1,yan0,zhangzj,cai2}. In contrast,
quantum key distribution usually generates a string of random keys
\cite{bb84,ekert91}, and the secret message is transmitted later
through a classical channel, usually using the Vernam one-time-pad
cipher-system\cite{vernam}. Recently, a QSDC protocol using batch
of single photons is proposed \cite{long2}. It has two distinct
features. First it uses single photons instead of entangled photon
pairs. Secondly, the transmission is operated in a batch by batch
manner. This feature is also reflected in the QSDC protocols in
Refs. \cite{long1,yan0,zhangzj}. The secret message is encoded and
released to the quantum channel in public only when the channel is
assured secure so that no secret message is leaked even though an
malicious eavesdropper may intercept the encoded qubits. Over a
noiseless quantum channel, the scheme is completely secure. In
practice, channel noise is inevitable, error correction and
privacy amplification must be used in order to reduce the
information leakage below the desired level.
The basic steps in the QSDC protocol in Ref.\cite{long2} contains
4 steps. First, Bob prepares a batch of $N$ single photons
randomly in one of four polarization states: $|0\rangle=|+z\rangle$,
$|1\rangle=|-z\rangle$, $|x\rangle=(|0\rangle+|1\rangle)/\sqrt{2}$ and
$|-x\rangle=(|0\rangle-|1\rangle)/\sqrt{2}$. Bob sends this batch of
photons to Alice. Alice stores most of the single photons and
selects randomly a subset of single photons and performs
measurement using either the $\sigma_z$ or $\sigma_x$ basis. Alice
publishes the measuring-basis and the measured results of these
single photons. Upon these information, Bob determines the error
rate. If the error rate is higher than the threshold, the process
is aborted, and if the error rate is below the threshold, the
process continues and Alice encodes her message using two unitary
operations on the stored single photons and then sends them back
to Bob. Upon receiving these encoded photons, Bob makes
appropriate measurement and reads out the message.
Classical privacy amplification (CPA) \cite{bennettcpa} has been
used for QKD, for instance the BB84 protocol \cite{bb84}. Quantum
privacy amplification (QPA) \cite{bennettqpa,deutschqpa} has been
used for QKD using entangled quantum system, for instance for the
Ekert91 QKD scheme \cite{ekert91}. Without privacy amplification,
the error threshold in the proposed QSDC scheme in
ref.\cite{long2} will be very small. To allow the scheme to be
operable over a noisy channel, quantum privacy amplification has
to be used. In this paper, we present a quantum privacy
amplification scheme for QSDC (QSDC-QPA). This QSDC-QPA can be
used not only for the QSDC protocol, but also for the BB84 QKD
protocol. It uses simple local gate operations and single particle
measurement, and these operations could be implemented by
technologies that is currently being developed.
The basic scientific problem for the proposed QSDC-QPA scheme can
be expressed as follows. Suppose Bob sends Alice a batch of single
photons, each photon is randomly prepared in one of the four
quantum states $|0\rangle$, $|1\rangle$, $(|0\rangle+|1\rangle)/2$ and
$(|0\rangle-|1\rangle)/2$, where $|0\rangle$ and $|1\rangle$ denote the
horizontal and vertical polarization state of a photon. Due to
channel noise and eavesdropping, an error bit rate $r$ is known
for the photon batch ($r$ is four times of the error bit rate
detected by Alice and Bob using random sampling, because
eavesdropper's interception causes only 25 percent of error). The
QSDC-QPA task is to condense a portion of photons from the batch
so that Eve's information about the condensed photons is reduced
to below a desired level.
The basic operation of QSDC-QPA is shown in Fig.\ref{f1} for two
qubits. It consists of two control not (CNOT) gates and a Hadamard
(H) gate. Using the first qubit as control, two CNOT gates are
performed on the second qubit, or target qubit. Between these two
CNOT gates, a Hadamard gate is performed on the first qubit. Then
a measurement in the $\sigma_z$ basis is performed on the second
qubit. Then the first qubit is the condensed qubit. We now explain
why the above procedure can reduce the information leakage.
Without loss of the generality, we assume the quantum states of
single photon 1 and 2 are written as
\begin{eqnarray}
\left\vert \varphi \right\rangle _{1}&=&a_{1}\left\vert
0\right\rangle +b_{1}\left\vert 1\right\rangle, \label{s1}\\
\left\vert \varphi \right\rangle _{2}&=&a_{2}\left\vert
0\right\rangle +b_{2}\left\vert 1\right\rangle, \label{s2}
\end{eqnarray}
where
\begin{equation}
\vert a_{1}\vert ^{2}+\vert b_{1}\vert ^{2}=\vert a_{2}\vert
^{2}+\vert b_{2}\vert ^{2}=1. \label{r1}
\end{equation}
After the two CNOT gates and H-gate operations,
the state of the joint system of single
photon 1 and 2 is changed to
\begin{eqnarray}
\left\vert \psi \right\rangle _{out} &=&\frac{1}{\sqrt{2}}
\{(a_{1}a_{2}+b_{1}b_{2})\left\vert 0\right\rangle
_{1}+(a_{1}b_{2}-b_{1}a_{2})\left\vert 1\right\rangle
_{1}\}\left\vert
0\right\rangle _{2} \nonumber \\
&+&\frac{1}{\sqrt{2}}\{(a_{1}a_{2}-b_{1}b_{2})\left\vert
1\right\rangle _{1}+(a_{1}b_{2}+b_{1}a_{2})\left\vert
0\right\rangle _{1}\}\left\vert 1\right\rangle _{2}.\nonumber\\
\label{s3}
\end{eqnarray}
After measuring the second qubit in the $\sigma_{z}$-basis, no
matter what the result, the state of the control qubit
$\vert\varphi\rangle_{1out}$ will contain the information of the
state of the original target qubit (qubit 2). Tables \ref{Table1}
and \ref{Table2} give the output state of control qubit after
the measurement on the target qubit with result $0$ and 1
respectively. It depends not only on the result of the measurement
on the target qubit, but also on the original states of the two
input single photons (photon 1 and 2).
\begin{table}
\begin{center}
\caption{The state of the output qubit when the result of the
second qubit measurement is $\vert 0\rangle$. $\varphi_1$ and
$\varphi_2$ are the original states of the control and target
qubit, respectively.}
\begin{tabular}{c|cccc}\hline
& \multicolumn{4}{c}{$\varphi_{1}$}\\ \cline{2-5}
$\varphi_{2 }$& $\left\vert +z\right\rangle $ & $\left\vert -z\right\rangle $ & $
\left\vert +x\right\rangle $ & $\left\vert -x\right\rangle$\\
\cline{2-5}
$\left\vert +z\right\rangle $ & $\left\vert 0\right\rangle
$ & $\left\vert 1\right\rangle $ & $|-x\rangle$ & $|+x\rangle$ \\
$\left\vert -z\right\rangle $ & $\left\vert 1\right\rangle $ &
$\left\vert 0\right\rangle $ & $|+x\rangle$ &
$|-x\rangle$ \\
$\left\vert +x\right\rangle $ & $|+x\rangle$ & $|-x\rangle$ &
$\left\vert 0\right\rangle $ & $\left\vert 1\right\rangle $ \\
$\left\vert -x\right\rangle $ & $|-x\rangle$ & $|+x\rangle$ &
$\left\vert 1\right\rangle $ & $\left\vert 0\right\rangle $\\
\hline
\end{tabular}\label{Table1}
\end{center}
\end{table}
\begin{table}
\begin{center}
\caption{he state of the output qubit when the result of the
second qubit measurement is $\vert 1\rangle$. $\varphi_1$ and
$\varphi_2$ are the original states of the control and target
qubit, respectively.}
\begin{tabular}{c|cccc}\hline
& \multicolumn{4}{c}{$\varphi_1$}\\ \cline{2-5}
$\varphi_2$ & $\left\vert +z\right\rangle $ & $\left\vert -z\right\rangle $ & $
\left\vert +x\right\rangle $ & $\left\vert
-x\right\rangle$\\\hline $\left\vert +z\right\rangle $ &
$\left\vert 1\right\rangle $ & $\left\vert 0\right\rangle $ &
$|x\rangle$ &
$|-x\rangle$ \\
$\left\vert -z\right\rangle $ & $\left\vert 0\right\rangle $ &
$\left\vert 1\right\rangle $ & $|-x\rangle$ &
$|+x\rangle$ \\
$\left\vert +x\right\rangle $ & $|+x\rangle$ & $|-x\rangle$ &
$\left\vert 0\right\rangle $ & $\left\vert 1\right\rangle $ \\
$\left\vert -x\right\rangle $ & $|-x\rangle$ & $|+x\rangle$ &
$\left\vert 1\right\rangle $ & $\left\vert 0\right\rangle $\\
\hline
\end{tabular}\label{Table2}
\end{center}
\end{table}
Suppose that Eve knows the complete information of the first
qubit. If the second photon is unknown to her, because the single
photons prepared by Bob is in one of the four states randomly with
$1/4$ probability each, Eve's knowledge about the output state of
the control qubit after the quantum privacy amplification
operation becomes
\begin{eqnarray}
\rho &=&\frac{1}{4}\left(
\left\vert +z\right\rangle \left\langle +z\right\vert +
\left\vert -z\right\rangle \left\langle -z\right\vert +
\left\vert +x\right\rangle \left\langle +x\right\vert +\left\vert
-x\right\rangle \left\langle
-x\right\vert\right)\nonumber\\
&=&\frac{1}{2}\left(
\begin{array}{cc}
1 & 0 \\
0 & 1
\end{array}
\right).
\end{eqnarray}
That is, Eve has no knowledge at all about the output state. But
for Bob who has prepared the original states of the two qubits, he
will know completely the output state when Alice tells him the
$\sigma_{2,z}$ measurement result.
If it happens that Eve has complete information about both qubits,
she will know the output state exactly just like Bob. However the
probability this can happen is only
\begin{eqnarray}
P_2 ={r^2}.
\end{eqnarray}
We can use the output qubit again as a control qubit and choose a
third qubit from the batch as the target qubit and perform QPA
operation on them. In this way, as more qubits are used in the QPA
process, Eve's information is reduced exponentially
\begin{eqnarray}
P_m={r^m},
\end{eqnarray}
where $m$ is the number of qubits that have been used in the GPA.
In this way, Alice can condense a portion of single photons from a
batch of $N$ photons with negligibly small information leakage.
Then Alice can perform unitary operations to encode her secret
message.
In summary, quantum privacy amplification on a batch of polarized
single photons can be done with quantum mechanics for secure
direct communication.
This work is supported the National Fundamental Research Program
Grant No. 001CB309308, China National Natural Science Foundation
Grant No. 60073009, 10325521, the Hang-Tian Science Fund, the
SRFDP program of Education Ministry of China.
\begin{figure}
\caption{ Quantum privacy amplification operation for two qubits.
}
\label{f1}
\end{figure}
\end{document} |
\begin{document}
\title{Toy observer in unitary evolution: Histories, consciousness, and state reduction}
\author{Lutz Polley}
\date{\small Institute of Physics, Oldenburg University, 26111 Oldenburg, FRG}
\maketitle
\begin{abstract}
For a toy version of a quantum system with a conscious observer, it is demonstrated that the
many-worlds problem is solved by retreating into the conscious subspace of an entire observer
history.
In every step of a discretised time, the observer tries to ``see'' records of his past and
present in a
coherent temporal sequence, by scanning through a temporal fine-graining cascade. The extreme
most likely occurs at the end of some branch, thus determining observer's world line. The
relevant neurons, each with two dimensions, are power-law distributed in number, so order
statistics implies that conscious dimension is located almost entirely in the extremal
branch.
\end{abstract}
\section{Introduction}
Among the various approaches to an interpretation of quantum theory, one is to regard the
superposition principle, state vectors, and the Schr\"odinger equation as universally valid,
and to seek a solution to the ensuing many-worlds problem \cite{Everett1973}. An old
\cite{vNeumann1932} but still relevant \cite{Donald1999,Tegmark2014} conjecture is that the
solution should involve the physical functioning of an observer's consciousness.
The approach taken here falls in this category.
As to the merits of the superposition principle,
most concepts of quantum theory, as presented in textbooks, rely on the formalism of state
vectors. Even thermal systems, traditionally regarded as mixtures, can be
treated as pure state vectors \cite{Tasaki1998}.
The basic law of propagation of a particle takes an almost self-evident form
\cite{Feynman1965,Baym1978,Zee1991,Polley2001} when positions are restricted to a spatial
lattice, and superpositions are regarded as a logical possibility. For electromagnetic fields
to be incorporated, only the complex phases already present in the hopping amplitudes need to
be varied \cite{Wilson1974}. In comparison, Newton's laws are phenomenological.
A fundamental problem of a linear evolution equation emerges in application to a quantum system interacting with an observer. With a system in a superposition of properties $1,\ldots,B$, and an observer trying to determine the ``actual'' property, the Schr\"odinger equation implies a transition of the form
\begin{equation} \label{EqualAmplitudeSplitting}
\left(\sum_{n=1}^B |n\rangle \right)|\mathrm{ready}\rangle \longrightarrow
\sum_{n=1}^B \Big(|n\rangle|\mathrm{observed}\,n\rangle\Big)
\end{equation}
While experience suggests an observer should find himself
in one of the states $|\mathrm{observed}\,n\rangle$ after the measurement, the superposition state produced by the Schr\"odinger equation does not indicate which of the possible results has ``actually'' been obtained. The observer rather seems to have split into $B$ branches of himself. To date, no consensus exists as to whether a superposition of observer states like (\ref{EqualAmplitudeSplitting}) describes something physically real. The problem appears less dramatic when the state vector is converted into a density matrix, as in the theory of decoherence, but the ambiguity about the result of the measurement persists \cite{Schlosshauer2007}.
In Everett's many-worlds interpretation \cite{Everett1973} the superposition \emph{is} observer's real wavefunction. His inability to realise more than one of his branches is inferred from the (undisputed) impossibility of branches interacting with each other. While certain activities like talking in branch 1 about events in branch 2 can be ruled out in this way, a mere simultaneous awareness of branches is not covered by the argument
\cite{Penrose1997}. An attempt at resolving this problem was made by this author in
\cite{Polley2012}; in the present paper, that approach is simplified and generalised. The hypothesis is that an observer's awareness is extremal in one branch, in such a way that the sum of remaining branches can be regarded as a negligible contribution.
In the Copenhagen interpretation, the process of measurement is not described by
a linear equation for amplitudes in a superposition, but is described by state reduction
following Born's rule. Accordingly, superpositions evolve in a stochastic way. Superposition
(\ref{EqualAmplitudeSplitting}) would end up as $|n\rangle|\mathrm{observed}\,n\rangle$ with
probability $1/B$ for each case. The disturbing point here is that
probabilities occur at a fundamental level---no natural law supposedly exists that would
determine, in principle, the outcome of a single quantum measurement.
By contrast, an agreeable role for stochastics would be that of
an approximation to deterministic but uncontrollably complicated dynamics. In the model of this
paper, there will be a constant law of evolution (representing determinism) given by a unitary
operator which is a peculiar kind of random matrix (assumed to approximate some complicated
dynamics). Pseudo-random evolution results if the operator is applied repeatedly to an
initial state of an appropriate class.
Since the early days of quantum mechanics the idea has been pondered that state reduction may involve an observer's consciousness \cite{vNeumann1932}. While the observer remains conscious with every result of the measurement, there may be variations in the degree of consciousness. Of all the details around us that could in principle catch our attention, only a tiny fraction actually does so. If entire histories are considered, that fraction multiplies with every instant of time, so there is lots of room for outstanding extremes.
A formal description of the physical functioning of a \emph{real} observer, including both
Everettian branching and the influence of an irreducible stochastic process in the generation
of the branches, has been given by Donald \cite{Donald1999}. For a model of state reduction,
it may be an unnecessary complication to consider consciousness as versatile as that of a human
being.
Yet, as emphasised in \cite{Donald1999}, a lesson from real brain dynamics is that consciousness cannot be described statically, by assigning labels to mental states as in equation (\ref{EqualAmplitudeSplitting}), but that neural activity is required, like the switching between firing and resting states of a neuron. It makes a great difference for model building! Neurons thus come as subsystems with two dimensions at least, and fluctuations in the number of neurons appear vastly enhanced in the dimensions of Hilbert spaces involved.
The guiding idea of the present paper is that Everett's many worlds are not all
equivalent for an observer, but that his consciousness is \emph{physically} contained
almost exclusively in one world. The technical basis of this approch is a theorem of order
statistics \cite{Embrechts1997,David1981}, relating to statistical ensembles with power-law
distribution. The largest draw,
in that case, exceeds the second-largest by a quantity $\Delta$ of the order of the ensemble
size taken to some power. On an Everettian world tree, the ensemble size is huge, and
concentrated near the end of the branches. Thus the dominance of the extremal
draw is particularly pronounced, and it occurs near the end of a branch, thus singling it out.
If the draws are for numbers of neurons, the exceedance $\Delta$ exponentiates because the
dimensions of subsystems multiply. In fact, to see whether and how this statistical mechanism
might be relevant for state reduction was the main guide for the constructions below.
The model scenario is as follows. At an equidistant sequence of times, a quantum system is
observed, generating $B$ branches of itself, of a number of records, and of a corresponding
number of observer's neurons. The main interest is in the statistics of dimensions;
therefore, system states, records, and neurons are only distinguished by an index and
are not specified any further. The law of evolution, for a step of time, is given by a unitary
operator. A specific form of initial state must be assumed for the scenario to unfold. This
``objective'' part of the model dynamics is constructed in section \ref{secRS}.
As to an observer's consciousness, it should be memory-based \cite{Edelman1989};
``the history of a brain’s functioning is an essential part of its nature as an object on which
a mind supervenes'' \cite{Donald1997}. In the simplification of the model,
memories and the history of the brain's functioning are identified with the records.
The supervening mind is represented by neuronal activity drawing on memories, i.e., records.
The implementation of this drawing, by a (pseudo-)random cascade of neuronal activity,
serves two purposes: generating a draw from a power-law distribution, and composing in one
draw a conscious history---by ``recalling'' what happened at a certain time, then what happened
before and after, and before and after that again, and so on. This ``subjective'' part of the
model dynamics is constructed in section \ref{secObserversQuest}.
At several points in the construction of the evolution operator, random draws are made. As
mentioned already, these are supposed to approximate some complex deterministic law of
evolution, and they are made ``once and for all times''.
The evolution operator is the same at all times.
Some conclusions are given in section \ref{Conclusions}, and a technically convenient restriction of the dynamics is defined in appendix \ref{secAvoidingLoops}.
\section{Records and objective dynamics\label{secRS}}
\subsection{A paradigm: Griffiths' models of histories}
An elaborate version of the Copenhagen interpretation is the formalism of consistent histories \cite{Griffiths2002}. The notion of measurement on a quantum system is modified to that of a property (selected from an available spectrum) which the system has, irrespective of whether an observer is in place. Those properties a system has at times $t_0,t_1,\ldots, t_n$, while no property should be imagined for intermediate times. By a generalised Born's rule, probabilities are defined for sequences of the available properties (histories of the system) to occur. There are constraints, involving the time evolution operator, to be imposed on the sort of properties that can form a history. In order to demonstrate the constraints, Griffiths devises a number of toy models by immediately constructing operators of time-evolution, rather than Hamiltonians. For this purpose, the tensor-product structure of the so-called history Hilbert space turns out to be quite convenient. Moreover, models for simultaneously running processes in a time interval can be simply constructed as products of unitaries.
Although the intention in \cite{Griffiths2002} is to keep observers out of the theory, it certainly is a reasonable approximation for an observer model as well to consider awareness only at times $t_0,t_1,\ldots, t_n$ while leaving unspecified observer's state in between. In the model to be constructed, the times will be equidistant. The operator of evolution from an instant to the next will be the product $U_\mathrm{wit\,2}U_\mathrm{con}U_\mathrm{wit\,1}U_\mathrm{orb}U_\mathrm{age}$, their roles being to increase the ages of records, move the system along its (branching) orbit, create first half of witnessing records, run awareness cascade, and create second half of witnessing records. Each of these is a product of a large but finite number of unitary factors.
The tensor product structure of the space of states, which in Griffiths' formalism emerges from the notion of history Hilbert space, is a convenient element of model building independently of that notion, because it facilitates the construction of operators that manifestly commute. Below, the tensor product will describe a reservoir of potential records and their neuronal counterparts, each of which being treated as a subsystem. By construction of the evolution operator, every branch will consist of its own collection of subsystems. This makes the numbers of subsystems very large, raising the question of whether a ``multiverse'' in the classical sense is tacitly assumed for the model. It should therefore be noted that the subsystems can be mathematically identified with degrees of freedom of a conventional Hilbert space. For example, any unitary space of dimension $2^n$ can be rewritten as a tensor product by expressing the index of basis vectors in binary form and identifying
$$
|i_1,\ldots,i_n\rangle \equiv |i_1\rangle \cdots |i_n\rangle \qquad i_k=0,1
$$
For the counting of dimensions, both versions are equivalent.
In order to show that one branch nearly exhausts all dynamical dimension of awareness, what matters is the quantity of records and of neuronal response. The only quality of relevance is the date at which a record is created. For the model it therefore suffices to distinguish records by an unspecified index, and to make their age the only stored content.
\subsection{Structure of state vectors; dating of records\label{secAgeing}}
The subsystems of the model are ``objective'' records and ``subjective'' bits of
mental processing. For the records, two classes are assumed. Those in class ${\cal O}$ (orbits) induce further records in the course of evolution, giving rise to branching world lines. The branches emanating from a record $k\in{\cal O}$ are collected in a set ${\cal B}(k)$ of $B$ elements. They are envisioned as predetermined and (due to complex dynamics of real macroscopic systems) pseudorandom. They are defined here\footnote{A more technical specification, simplifying evaluation, is given in appendix \ref{secAvoidingLoops}.} by random draw, once and for all time:
\begin{equation} \label{DefB(k)}
{\cal B}(k) = \{j(k,1),\ldots,j(k,B)\} \mbox{ where } j(k,s) = \mbox{random draw from }
{\cal O}\backslash\{k\}
\end{equation}
The other class of objective records consists of mere witnesses, holding redundant information about records in ${\cal O}$:
\begin{equation} \label{DefW(k)}
k\in{\cal O}\mbox{ is witnessed by all records } l \in {\cal W}(k)
\end{equation}
All ${\cal W}(k)$ are assumed to have the same macroscopic number $W$ of elements.
Observer's neurons are associated with witnessing records, not immediately with orbital
records. The model neurons are distinguished by an index and are not specified any further.
However, it could make sense to address the same anatomical neuron at different times by
different indices. The multiple degrees of freedom would then be provided by the metabolic
environment.
Denoting by ${\cal R}_k$ and ${\cal N}_k$ the state-vector space of a record and an observer's ``neuron'', respectively, the model Hilbert space is
$$
{\cal H} = \bigotimes_{k\in{\cal O}}\left({\cal R}_k \otimes
\bigotimes_{l\in{\cal W}(k)} \left( {\cal R}_l\otimes {\cal N}_l \right) \right)
$$
The information to be stored in a recording subsystem is whether anything is recorded at all,
and if so, since how many steps of evolution. The basis states of a record of the orbital kind thus are
\begin{equation} \label{DefRecordBasisOrbit}
|\mathrm{blank}\rangle, ~ |\mathrm{age}~m\rangle \quad m\in{\bf Z}
~~~~ \mbox{ for each index in } {\cal O}
\end{equation}
Observer's mental processing of a record is modelled by transitions between the firing and resting state of a ``neuron''. The basis states of a witnessing record and its mental counterpart are
\begin{equation} \label{DefRecordBasisWitness}
\left\{ \begin{array}{l} |\mathrm{blank}\rangle \\
|\mathrm{age}~m\rangle \quad m\in{\bf Z} \end{array}\right\} \otimes
\left\{ \begin{array}{c} |\mathrm{rest}\rangle \\ |\mathrm{fire}\rangle \end{array}\right\}
~~ \mbox{for each index in } \bigcup_{k\in{\cal O}}{\cal W}(k)
\end{equation}
The ages of records could be limited to an observer's lifetime, as it was done in \cite{Polley2012}, but the infinite dimension implicit in (\ref{DefRecordBasisOrbit}) and (\ref{DefRecordBasisWitness}) is harmless with finite products of unitary operators, and avoids an unnecessary degree of subjectivity in the model.
For all recording subsystems, an ageing operation is defined:
\begin{equation} \label{DefUage}
U_\mathrm{age} = \prod_{k\in{\cal O}} U_\mathrm{age}(k)
\prod_{i\in{\cal W}(k)}U_\mathrm{age}(i)
\end{equation}
where for record number $r$
$$
\begin{array}{l}
U_\mathrm{age}(r)|\mathrm{blank}\rangle_r = |\mathrm{blank}\rangle_r \\
U_\mathrm{age}(r)|m\rangle_r = |m+1\rangle_r
\end{array}
$$
\subsection{Preferred initial state\label{secInitialState}}
A real observer's identity derives from a single DNA molecule. For a model of an observer's
history, this is taken here as justification for considering exclusively the evolution from an
initial state in which one orbital record $k_0$ and its witnesses ${\cal W}(k_0)$ are in the
zero-age state while all other records are in their blank states. The choice of a zero-age
record determines observer's entire history.
It is the only ``seed'' for all ensuing pseudo-random processes of evolution.
\subsection{Orbital branching\label{secOrbit}}
The idea is that observer's history branches at every step of evolution, like in a quantum
measurement. A new branch is described by an index of a new record, and is not specified any
further. Imagining a new quantity being measured at every step would seem to be consistent with
this scenario.
Deutsch \cite{Deutsch1999} showed, for systems with sufficiently many degrees of
freedom, that Born's rule for superpositions with coefficients more general than in equation
(\ref{EqualAmplitudeSplitting}) can be reduced to the
equal-amplitude case, providing the unitarity of any physical transformation is taken for
granted. In the model to be constructed, evolution will be unitary. Therefore,
invoking Deutsch's argument, only branching into equal-amplitude superpositions will be
considered.
Under the condition that record $k$ is older than zero, and that all records to
which the orbit possibly continues are blank, the orbit does continue as a superposition of
zero-age states of the records of address set ${\cal B}(k)$. Else, the identity operation is carried out. The corresponding evolution operator, specific to point $k$ on an orbit, is defined using the following basis of the \emph{partial tensor product} relating to the records of the
set ${\cal B}(k)$.
\begin{equation} \label{PartialBasis}
\begin{array}{l}
|\Psi_0\rangle = \prod_{l\in{\cal B}(k)} |\mathrm{~blank}\rangle_l \\[3mm]
|\Psi_l\rangle = \Big(|0\rangle\langle\mathrm{blank}|\Big)_l|\Psi_0\rangle
\qquad l\in{\cal B}(k)
\end{array}
\end{equation}
That is, one basis vector has all records of ${\cal B}(k)$ in the blank
state, while the remaining have one record promoted to the zero-age state.
The subspace orthogonal to $|\Psi_0\rangle$, \ldots, $|\Psi_B\rangle$
is spanned by product vectors with more than one record in a zero-age state or with records
in higher-age states.
The idea of equal-amplitude branching from point $k$ is that $|\Psi_0\rangle$ should evolve
into a superpositon of $|\Psi_1\rangle$ to $|\Psi_B\rangle$; in the basis (\ref{PartialBasis}),
\begin{equation} \label{SplittingShorthand}
\left(\begin{array}{c} 1 \\ 0 \\ \vdots \\ 0 \end{array} \right) \longrightarrow
\frac1{\sqrt B} \left(\begin{array}{c} 0 \\ 1 \\ \vdots \\ 1 \end{array} \right)
\end{equation}
A convenient way of completing this to define a unitary operator is to use a Fourier basis in
$B$ dimensions,
$$
F_m = \frac1{\sqrt B}\left(\begin{array}{c} \alpha_m^0 \\ \alpha_m^1 \\ \vdots \\
\alpha_m^{B-1} \end{array} \right) \qquad \alpha_m = \exp\frac{2\pi i m}{B}
\qquad m = 0,\ldots B-1
$$
Relating to the basis (\ref{PartialBasis}), and using the $F_m$ as $B$-dimensional column vectors, a branching operation can be defined using the $(B+1)\times(B+1)$ matrix
$$
S = \left(\begin{array}{ccccc} 0 & 1 & 0 & \cdots & 0 \\
F_0 & 0 & F_1 & \cdots & F_{B-1} \end{array} \right)
$$
whose columns form an orthonormal set.
Conditioning on $\mathrm{age} = 1$ of record $k$, the factor of orbital evolution triggered by
this record is then given by
\begin{equation} \label{DefUorb(k)}
U_\mathrm{orb}(k) = 1 + \left( \sum_{n,n'=0}^B |\Psi_n\rangle
(S_{nn'}-\delta_{nn'}) \langle\Psi_{n'}|\right)_{{\cal B}(k)}
|\mathrm{age}~1\rangle_k \langle \mathrm{age}~1|_k
\end{equation}
The bracket reduces to zero, in particular, when branching from $k$ has occurred previously in the evolution, so that, by subsequent ageing of non-blank records,
any zero-age components of records have been promoted to higher-age components;
cf.\ (\ref{PartialBasis}). In this way, repeated branching from the same point as well as loops of evolution are avoided. Technically, however, an additional means of avoiding loops (appendix \ref{secAvoidingLoops}) facilitates the evaluation of evolution.
The global operator of orbital evolution is
\begin{equation} \label{DefUorb}
U_\mathrm{orb} = \prod_{k\in{\cal O}} U_\mathrm{orb}(k)
\end{equation}
\subsection{Witnessing}
The basis states of the witnessing records are defined in (\ref{DefRecordBasisWitness}).
In order to represent (\ref{DefW(k)}) by an evolution operator, orbital records of zero age
are assumed to induce a change of witnessing records from blank to zero-age. The relevant part of the operation is, in self-explaining notation,
\begin{equation} \label{UwitSimpl}
\left( \prod_{l\in{\cal W}(k)} \Big(|0\rangle \langle\mathrm{blank}|\Big)_l\right)
\Big( |0\rangle \langle 0|\Big)_k
\end{equation}
For the sake of unitarity, however, this needs to be complemented by further operations,
although these will never become effective in the evolution of an initial state as defined in section \ref{secInitialState} and as age-promoted by the operators of section \ref{secAgeing}.
A complemented version of the operator above would be
$$
1 + \left( -1 + \prod_{l\in{\cal W}(k)} \left[|0\rangle \langle\mathrm{blank}|
+ |\mathrm{blank}\rangle \langle 0| + \sum_{m\neq 0}|m\rangle\langle m|\right]_l
\right) \Big( |0\rangle \langle 0|\Big)_k
$$
However, witnessing records just created can be read and processed by an observer within the
same step of evolution. It will be essential for the functioning of a stochastic mechanism below (section \ref{secObserversQuest}) that half of the witnessing records are created before the observer might immediately address them, while the other half is created thereafter. For this purpose, let the addresses of witnessing records be split into subsets of equal size,
$$
{\cal W}(k) = {\cal W}_1(k) \cup {\cal W}_2(k) ~~~~~~
{\cal W}_1(k) \cap {\cal W}_2(k) = \emptyset
$$
The two corresponding witness-generating evolution operators are
\begin{equation} \label{DefUwit}
U_\mathrm{wit\,1} = \prod_{k\in{\cal O}} U_\mathrm{wit\,1}(k) ~~~~~~~~~~~~~~~~~~
U_\mathrm{wit\,2} = \prod_{k\in{\cal O}} U_\mathrm{wit\,2}(k)
\end{equation}
where \vspace*{-4mm}
$$
~~~~~~~~ \begin{array}{l}
U_\mathrm{wit\,1,2}(k) ~ = ~ 1 ~ + \\[1mm] \displaystyle
\left( -1 + \!\!\! \prod_{l\in{\cal W}_{1,2}(k)} \left[|0\rangle \langle\mathrm{blank}|
+ |\mathrm{blank}\rangle \langle 0| + \sum_{m \neq 0}|m\rangle\langle m|\right]_l
\right) \Big( |0\rangle \langle 0|\Big)_k
\end{array}
$$
\section{Observer's mental programme\label{secObserversQuest}}
Consider an observer who is constantly trying to assemble his records into a coherent temporal
sequence. One way of doing this would be to see what happened at the middle $a/2$ of his life
at age $a$, by seeking an appropriate record; then what happend a quarter before and after, at
$a/4$ and $3a/4$, then at multiples of $a/8$, and so forth.
This defines a cascade of increasing temporal resolution. For
``coherence'', connection by a logical AND is required. It would fit in with the spirit (not
with the technical detail) of Tononi's concept of consciousness as Integrated Information
\cite{Tononi2008}:
``Phenomenologically, every experience is an integrated whole, one that means what it means by
virtue of being one, \ldots''. Moreover, memory
becomes a fundamental constituent of consciousness \cite{Edelman1989} in this way.
\subsection{Generating the power-law statistics\label{secGenPowStat}}
The idea for generating power-law statistics, within one step of evolution, is as follows.
While the AND condition is satisfied, the cascade of records addressed grows like $2^l$ where
$l$ is the generation number. By the scanning procedure to be constructed, records of non-zero
ages will be found with probability 1 whenever addressed, but records of zero age (being
created within the same step and representing the ``present'') will only be found with
probability 1/2. The entire cascade is stopped when the quest for a coherent picture of past
and present fails, for which the probability is 1/2 in every generation. This is a standard
mechanism for generating power law statistics \cite{SimkinRoychowdhury2006}, here with exponent
$-1$ for the cumulative distribution function since a number greater than $n=2^{l+1}-1$ (sum of
generations) is obtained with probability $\frac12(n+1)^{-1}$.
\subsection{Recursive construction of awareness cascade}
The unitary operator to be constructed in this section will go through all possible cascades
of records of ages $a/2$, multiples of $a/4$, of $a/8$ and so on, looking for a randomly
chosen witnessing record $i$ in every ${\cal W}(k)$ of the cascade. It should be noted again
that all random draws are made once and for all times. Parameter $a$ will be an eigenvalue of
observer's age operator, defined in section \ref{secObserversAge}.
We begin by constructing the $l$th generation of the cascade. Consider a set $g$ of addresses
given by pairs $(k,i)$ with
\begin{equation} \label{ikjDef}
\left. \begin{array}{rcl}
k(j) & = & \mbox{label of an orbital record} \\
& & \mbox{restricted by } k(j) < k(j') \mbox{ for } j < j'
\\[3mm]
i(j) & = & \mbox{random draw from ${\cal W}(k(j))$}
\end{array} \right\} \mbox{ for }j = 1,\ldots,2^l
\end{equation}
The ordering of the $k$ is for technical convenience; permutations are taken into account when
assigning ages, as below. Denote the collection of all possible sets of the above form by
$$
{\cal G}(l) = \big\{ \mbox{all possible $g$ of the form (\ref{ikjDef})} \big\}
$$
The random draws are understood to be \emph{independent for different} $g$.
The elementary projection on which the scanning operation is based is
\begin{equation} \label{PaikDefinition}
\Big( |m\rangle \langle m|\Big)_i = \mbox{projection on age $m$ of record $i$}
\end{equation}
Below, fractional ages are converted to integers by the ceiling function $\lceil~\rceil$.
To enable the scanning of all combinations of ages and records, let us define
\begin{equation} \label{alphaDef}
\alpha = \mbox{sequence consisting of ages }\lceil 2^{-l}(j-1)a \rceil,
j = 1,\ldots,2^l, \mbox{ reordered}
\end{equation}
This distinguishes permutations of different ages, but not of equal ages.
Denote the collection of all sequences of the form (\ref{alphaDef}) by
$$
{\cal A}(l) = \big\{ \mbox{all possible $\alpha$ of the form (\ref{alphaDef})} \big\}
$$
The projection operator testing whether the records given by $g$ have ages as given by
$\alpha$ is
\begin{equation} \label{DefP(g,a)}
P(g,\alpha) =
\left[\prod_{j=1}^{2^l} \Big( |\alpha(j)\rangle \langle \alpha(j)|\Big)_{i(j)}\right]
\left[\prod_{i \notin g} \Big( |\mbox{blank}\rangle \langle \mbox{blank}|\Big)_i\right]
\end{equation}
where $i(j)$ in the first bracket denotes elements of $g$. These projectors are mutually
orthogonal,
\begin{equation} \label{OrthogonalP(g,a)}
P(g,\alpha) P(g',\alpha') = 0 ~~ \mbox{ if } g\neq g' \mbox{ or } \alpha\neq \alpha'
\end{equation}
To show this, consider $g \neq g'$. Let $i$ be an index in $g$ but not in $g'$. Then in
$P(g,\alpha)$ we have a projector $( |m\rangle \langle m|)_i$ while in $P(g',\alpha')$ we have
$( |\mbox{blank}\rangle \langle \mbox{blank}|)_i$ instead. The product of these two is zero
already. Secondly, consider the case of $g=g'$ and $\alpha\neq\alpha'$.
Let $j_0$ be an index for which $\alpha(j_0)\neq \alpha'(j_0)$. Now the projectors are
orthogonal because they project on different ages for record $i(j_0)$.
The idea for the modelling of observer's neuronal reaction is as follows.
In the subspace where the test by $P(g,\alpha)$ is positive (all $p$ give 1) the observer
notices it by some neural activity, and the next generation of the scanning process takes
place. In the subspace where the test is negative (some $p$ give 0) nothing happens; evolution
reduces to an identity operation. The neural activity is modelled by
2-dimensional rotations $\sigma_i$ in the counterparts ${\cal N}_i$ of the records.
In the subspace where $g$ tests positive, the collective rotations are, with obvious
assignment to the tensorial factors,
$$
\sigma(g) = \prod_{i \in g} \sigma_i
$$
The awareness cascade, running within a step of evolution, is conveniently constructed
recursively, downward from higher to lower resolutions of time. This is enabled by the fact
that after many generations the finite contents of the address sets ${\cal W}(k)$ will be
exhausted. So there is a maximum $L$ for the generation number $l$, determined by the other
parameters of the model. The counting of the generations will be upward here as usual,
beginning with $l=1$ at age $a/2$. The recursion is initialised by
\begin{equation} \label{UawaInitial}
U_\mathrm{awa}(L+1,a) = 1
\end{equation}
and proceeds by
\begin{equation} \label{UawaRecursion}
U_\mathrm{awa}(l,a) = 1 + \sum_{g\in{\cal G}(l)} \sum_{\alpha\in{\cal A}(l)}
\Big( -1 + U_\mathrm{awa}(l+1,a) \sigma(g) \Big) P(g,\alpha)
\end{equation}
The awareness operator for the completed cascade is $ U_\mathrm{awa}(1,a)$. Defining it by
recursion is only a convenient way of representing the algebraic structure. In application
to a state vector, projections of the various generations automatically occur in the natural
order, $l=1,\ldots,L$.
In order to show that (\ref{UawaRecursion}) indeed defines a unitary operator, first note
that $P(g,\alpha)$ only consists of projections on the ages of witnessing records, so that
basis states of the form (\ref{DefRecordBasisWitness}) are eigenstates of the projectors.
All age projectors commute among themselves, and commute with the $\sigma$
operations because these do not act on records. It follows, starting from
(\ref{UawaInitial}) and going through (\ref{UawaRecursion}), that the projectors commute
with the $U_\mathrm{awa}$ of all generations. Using (\ref{OrthogonalP(g,a)}), unitarity in the
form $U_\mathrm{awa}(l,a)^\dag U_\mathrm{awa}(l,a) = 1$ can then be shown by
straightforward algebra.
\subsection{Observer's age and conscious history\label{secObserversAge}}
Parameter $a$ of the preceding section is identified here as an eigenvalue of observer's
age operator. It suffices to assign an age to any tensor product of basis vectors as defined in
(\ref{DefRecordBasisWitness}). Assigning 0 to the ``blank'' state here, any basis state of
record $i$ has an age value $a_i$. The age operator is defined by
$$
A \prod_i |a_i\rangle = ( \max a_i) \prod_i |a_i\rangle
$$
Observer's lifetime can be taken into account by restricting neuronal activity to ages
$ a \leq T$, by including an operator factor $\Theta(T-A)$.
Thus, the final expression for the evolution operator of observer's consciousness is
\begin{equation} \label{DefUconsc}
U_\mathrm{con} = 1 + \Big(-1 + U_\mathrm{awa}(1,A)\Big)\Theta(T-A)
\end{equation}
The complete evolution operator is a product of the factors constructed above.
In a new step of evolution, ages of all records are increased by one unit.
Next, orbital records develop. The creation of witnessing records and their processing by the
observer (operations that do not commute) are assumed to be intertwined in such a way that
unitarity is manifestly preserved. The full evolution operator of the model is
\begin{equation} \label{DefUmodel}
U = U_\mathrm{wit\,2} ~ U_\mathrm{con} ~ U_\mathrm{wit\,1} ~ U_\mathrm{orb} ~
U_\mathrm{age}
\end{equation}
\subsection{Verifying the Scenario\label{ScenarioRecovered}}
\subsubsection{Structure of branches}
We start out from a product state as specified in section \ref{secInitialState} and repeatedly apply the evolution operator (\ref{DefUmodel}). Clearly, since product states form a basis, we can always write the resulting states as superpositions of products; however, we wish to show that only a superposition of special products emerges, which will be regarded as the ``branches'' of the wave function. After $a$ applications of the evolution operator $U$, the properties of those product states are as follows.
\begin{enumerate}
\item In each branch there is precisely one orbital record of age $0$.
\item For each of the ages $0,\ldots,a$, there is one set ${\cal W}(k)$ of
witnessing records in the corresponding eigenstates of age; all other witnesses
are blank.
\item Neuronal states and record states factorise (do not entangle).
\end{enumerate}
The initial state has these properties with $a=0$ by definition. Let us assume then that $a$
applications of $U$ have produced a superposition of product states with properties 1-3. When
$U$ is applied once more, it suffices by linearity to consider the action on any of the product
states. The first action is to increase by 1 the ages of all non-blank records.
There is now for each of the ages $1,\ldots,a+1$ precisely one set ${\cal W}(k)$ of
witnessing records in the corresponding eigenstates of age, while all other witnesses
are blank; witnesses of age $0$ are missing so far.
Also, a single orbital record of age 1 is generated from the previous one of age 0; let its
address be $k_1$. Now applying $U_\mathrm{orb}$, as defined in (\ref{DefUorb}), only the factor
with $k=k_1$ can have an effect since the projection on age 1 gives zero for all other $k$.
Nontrivial action of $U_\mathrm{orb}(k_1)$ requires all records in ${\cal B}(k_1)$ to be blank,
which would not be true if the system had been on any of those points before. Invoking the loop-
avoiding specification of ${\cal B}(k_1)$, as given in appendix \ref{secAvoidingLoops}, we can
regard this condition as satisfied within observer's lifetime. The action of
$U_\mathrm{orb}(k_1)$ then consists in creating a new superposition of products, with a single
zero-age orbital record in each of them, as expressed in (\ref{SplittingShorthand}). Property 1
holds in each of these products. For the remaining operations of $U$ it suffices by linearity
again to apply them only on the product states just created by $U_\mathrm{orb}(k_1)$.
Let $k_0$ be the single zero-age orbital record in one of them. Then, of all
witness-generating factors of (\ref{DefUwit}), only $U_\mathrm{wit\,1}(k_0)$ and
$U_\mathrm{wit\,2}(k_0)$ act nontrivially, due to the conditioning on zero-age.
The records of the sets ${\cal W}_1(k_0)$ and ${\cal W}_2(k_0)$ are in blank states before this
action, because orbital point $k_0$ was not visited before, so the simplified expression
(\ref{UwitSimpl}) applies, and the records of ${\cal W}_1(k_0)$ are transformed from blank to
zero-age states. Thus, $U_\mathrm{wit\,1}(k_0)$ generates the first half of zero-age witnessing
records that were missing so far from the full range of ages.
Next comes the action of the action of $U_\mathrm{con}$, defined in (\ref{DefUconsc}). It is
the only factor of evolution which could affect property 3. It
consists in making certain neuronal factors rotate if certain projections on the ages of
records are nonzero, and no action else. All records are in definite ages or
blank, so the projections of $U_\mathrm{con}$ preserve the product form of the state vector.
The neuronal rotations preserve the product form by construction. Hence, property 3 continues
to hold. Finally, the second half of zero-age witnessing records is generated by
$U_\mathrm{wit\,2}(k_0)$, so property 2 holds as well after $a+1$ applications of the
evolution operator.
\subsubsection{Awareness cascades}
To evaluate the awareness cascades encoded in $U_\mathrm{con}$, defined in
(\ref{DefUconsc}), assume that observer's age is $a<T$ so that $U_\mathrm{awa}(1,a)$
applies. The projection operators $P(g,\alpha)$ of (\ref{UawaRecursion}), using
(\ref{ikjDef}) and (\ref{alphaDef}) for $l=1$, test for randomly chosen records
in ${\cal W}(k(0))$ and ${\cal W}(k(1))$, with ages $0$ and $\lceil a/2\rceil$ or the
permutation of these, while the pair of $k(0)$ and $k(1)$ is ordered. By property 2 above,
the product state (or branch) being considered has records of one set ${\cal W}(k_0)$ at age
$0$ and of one set ${\cal W}(k_1)$ at age $\lceil a/2\rceil$. Hence,
the only successful projection $P(g,\alpha)$ can be for $g=(k_0,k_1)$ and
$\alpha=(0,\lceil a/2\rceil)$ or for $g=(k_1,k_0)$ and $\alpha=(\lceil a/2\rceil,0)$,
depending on which of the addresses $k_0$ or $k_1$ is smaller. Only one term, at most,
contributes to the sum over $g$ and $\alpha$ in (\ref{UawaRecursion}).
The test for $k_1$ with age $\lceil a/2\rceil$ will be positive, since all
witnesses of ages $1$ to $a$ have been created during the preceding steps of evolution.
However, witnesses of age $0$ are created half before the action of $U_\mathrm{con}$ and half
thereafter. If the randomly chosen record from ${\cal W}(k_0)$ is contained in the first half,
${\cal W}_1(k_0)$, it is created by $U_\mathrm{wit\,1}$ before the action of $U_\mathrm{con}$,
so the test will result in $P=1$ in equation (\ref{UawaRecursion}); if it is created by
$U_\mathrm{wit\,2}$ instead, the test will result in $P=0$. In the latter case,
$U_\mathrm{awa}(1,a)$ reduces to the identity operation.
In the case of $P=1$, the neurons associated with the witnesses for $k_0$ or $k_1$
become active through the $\sigma$ factor, and the second generation of the cascade comes into
action through $U_\mathrm{awa}(2,a)$.
The argument repeats: As a consequence of property 2, at most one combination $(g,\alpha)$
contributes to the sum (\ref{UawaRecursion}) for $U_\mathrm{awa}(2,a)$, namely that combination
in which the records collected in $g$ are tested for the ages they actually have on the branch
considered. The zero-age orbital record $k_0$ and its witnesses are the same for all
generations of the cascade, but for each $l$ a new randomly chosen witness is tested.
Projection $P(g,\alpha)$ reduces to $1$ if that witness happens to be created by
$U_\mathrm{wit\,1}$, while it reduces to $0$ if it is created by $U_\mathrm{wit\,2}$. The
probability for the cascade to continue is $1/2$ in every generation. Witnesses of higher age
always test positive, as they have been created in the preceding steps of evolution.
\subsubsection{Statistics of dimensions of conscious subspaces\label{DimensionStatistics}}
If the observer lives to age $T$, the number of orbital points on his world-tree is
$$
N = \frac{B^{T+1}-1}{B-1}
$$
This is also the number of statistically independent awareness cascades, as we now show.
By definition (\ref{ikjDef}), a new series of random draws is made for every
sequence $g$, a selection of orbital addresses. This definition does not a priori relate to a
specific time, but its occurrence in the projector $P(g,\alpha)$ of the evolution operator,
defined in (\ref{DefP(g,a)}), combines it with a sequence of observer's ages.
We intend to show the following: If projections with the same sequence, $g_1=g_2$, give
positive results for two points on observer's world-tree, those points must be equal.
Let $a_1$ and $a_2$ be observer's ages at the two points, and let $k_1$ and $k_2$ be the
orbital points of zero age, the ``present'' points. By (\ref{DefP(g,a)}) and (\ref{alphaDef}),
the present point is always contained in $g$, so $k_1\in g_1$ in particular. This here implies
$k_1\in g_2$. Since $P(g_2,\alpha_2)$ is assumed to test positive, $k_1$ must be an orbital
record on the branch leading to $k_2$, so it either coincides with $k_2$ or is a record of age
greater than zero. In the latter case, it must have been the zero-age record at an earlier
age of the observer. Hence, $a_1<a_2$, unless $k_1=k_2$. Exchanging 1 and 2 in the
argument, we find $a_1>a_2$ unless $k_1=k_2$. This implies $k_1=k_2$ and $a_1=a_2$.
For the number of neurons activated in a cascade (section \ref{secGenPowStat}) the
probability distribution is a power law characterised by exponent $-1$. Hence, by a theorem of
order statistics \cite{Embrechts1997}, the largest number of neurons activated
exceeds the second-largest by a quantity of order $N$. More precisely, using notation of
\cite{Embrechts1997} corollary 4.2.13, given an ensemble of size $N$ of random draws with the
power-law distribution, we have for the difference between the largest draw $X_{1,N}$ and the
second-largest $X_{2,N}$
\begin{equation} \label{FrechetSeparation}
X_{1,N}-X_{2,N} = N \, Y \qquad \mbox{$Y$ = random variable independent of $N$}
\end{equation}
The distribution of $Y$ is non-singular.
The dimension of the active neuronal subspace in the extremal branch is $2^{X_{1,N}}$.
The total dimension of active neuronal subspaces in all other branches is bounded by
$2^{X_{2,N}}N$. For the latter to be larger than the former, the probability is
$$
P(2^{NY}<N) = P(Y<N^{-1}\log_2N) = \mbox{negligible}
$$
By a comfortable margin, an observer can expect to find his world-line well-defined, providing
it is indeed the \emph{dimension} of awareness that matters, rather than the number of neurons.
Two arguments in favour of the dimension are at hand. The simplest is Fermi's Golden Rule,
although it relies on state reduction and so goes beyond the framework of the model; any
transition probabilities into observer's subspace of awareness would be proportional to the
dimension of the subspace.
The other argument uses a change of basis in the union of conscious subspaces of \emph{all}
branches. Let $|1\rangle,\ldots,|N\rangle$ be a basis for the extremal branch, and
$|N+1\rangle,\ldots,|N+M\rangle$ a basis for the remaining branches. We know that $M$ is tiny
in comparison to $N$. Now consider instead a Fourier basis, which consists of superpositions
of all $|1\rangle,\ldots,|N+M\rangle$ with equal amplitudes but different phases. In each of
the new basis vectors, properties of the non-extremal branches only occur in a tiny
component. The situation is now similar to that of an electron bound to a proton on earth.
It resides by $10^{-10^{18}}$ of its wavefunction behind the moon, but we still regard it as
an electron on earth.
\subsection{Analysing states in a time-local basis\label{secLocalBasis}}
In order to analyse the properties of a state vector, it must be represented in a particular basis, such as the eigenbasis of an observable. For some applications, like representing dynamics in the Heisenberg picture, the basis may conveniently be chosen time-dependent.
As to the state vectors of observer's neurons, we have so far used a global basis which applies to all branches, and in which the evolution operator is constant. In this way, observer's entire experience emerges in a single step of evolution near the end of his lifetime. Observer's mental reactions thus appear highly non-local.
However, when analysed in a time-dependent basis adapted to the evolution in one particular branch, observer's reactions occur simultaneously with the creation of the records, while the operator of evolution appears to change in a random way after each observation. This is trivial mathematically, but not physically.
Consider a section of evolution of a neuronal subsystem,
$$
\left(\begin{array}{c} x_1 \\ x_2 \end{array}\right) \stackrel{1}{\longrightarrow}
\left(\begin{array}{c} x_1 \\ x_2 \end{array}\right) \stackrel{\sigma}{\longrightarrow}
\left(\begin{array}{c} y_1 \\ y_2 \end{array}\right)
$$
where entries relate to some initially chosen basis, and where $\sigma$ is a unitary $2\times 2$ matrix. Only in the second step something appears to happen here. Changing the basis for the second state vector such that
$$
\left(\begin{array}{c} x_1 \\ x_2 \end{array}\right)_\mathrm{old~basis}
= \sigma^{-1}\left(\begin{array}{c} y_1 \\ y_2 \end{array}\right)_\mathrm{new~basis}
$$
the section of evolution takes the form
$$
\left(\begin{array}{c} x_1 \\ x_2 \end{array}\right) \stackrel{\sigma}{\longrightarrow}
\left(\begin{array}{c} y_1 \\ y_2 \end{array}\right) \stackrel{1}{\longrightarrow}
\left(\begin{array}{c} y_1 \\ y_2 \end{array}\right)
$$
The step of evolution where change appears can be shifted to any position in the
sequence. Due to the tensor-product structure of the model's branches, the argument applies
separately to all neurons involved. It is thus \emph{possible} to choose a basis in which
observer's neuronal reactions appear local, but it is a different choice in each branch, and
the reason for the choice cannot be found in the state of records at the given time. In this
sense, the choice appears to be intrinsically random.
\section{Conclusions\label{Conclusions}}
It has been demonstrated for a toy model of a quantum system with conscious observer,
that a unitary evolution operator, repeatedly applied to an appropriate initial state,
can accomplish two things: gather information about an observer's world-tree,
and perform a random draw on the world-tree so as to single out a world-line of extreme
awareness. A theorem, known from order statistics, about the dominance of the extreme in a
power-law ensemble plays a central role.
What the model \emph{avoids} to do is giving up fundamental linearity, and
introducing fundamental stochastics. The framework is state vectors and unitary evolution
under a constant law. Yet certain vectors evolve pseudo-stochastically.
The role of time and causality in the model is precarious, inevitably so under
the working hypothesis that a world-line should be determined by an extremal
draw on a world-tree.
As was shown in section \ref{secLocalBasis}, the model reproduces the usual
scenario of alternating Schr\"odinger-type and Born-type evolution when
represented in an appropriate basis. Observer's mental reactions then appear at the same
instant as the generation of records, but the choice of the basis appears indeterminate at
that instant. The model resolves that indeterminacy by omitting any erosion of
``witnessing records'', keeping them readable throughout observer's lifetime.
Can this be true for more realistic ``witnessing records'', or is this the point where
attempts at realistic modifications of the model must fail?
Non-universality of time could be an argument in favour of the optimistic alternative.
From Special Relativity Theory,
time is known to be observer-dependent, but only with negligible effects if
observers move at low speed. On this basis, time is treated as universal in nonrelativistic
quantum mechanics (likewise in the preceding sections of this paper).
But quantum mechanics provides its own path to special relativity, in the sense that it
enables pre-relativistic derivations of the Dirac equation \cite{Zee1991,Polley2001};
it might also provide its own version of observer-dependent time. The existence of two
modes of evolution, Schr\"odinger and Born, might be an indication of it.
Having recovered the stochastic appearance of measurements in section \ref{secLocalBasis}
by referring to a specific basis, we may have some freedom in reinterpreting the
evolution \emph{operator} as something more general. Since logics is always part of a natural
law, and conceptually more general, could the role of the operator be to generate a logical
structure of which time evolution is only a representative in a particular basis?
In elementary cases like those described by a Dirac equation, the law of motion is close to
mere logics of nearest neighbours \cite{Polley2001}, so ``space-time'' might indeed reduce
to ``space-logics''. For quantum systems with great complexity, like an
elementary system coupled to a conscious observer, logical implications might depend on many
conditions, and could be halted as long as some conditions were not met by the state vector.
In different ways, ``halted'' evolutions are also considered in other scenarios
of quantum measurement. Stapp \cite{Stapp1993} proposed an interaction between mind and matter
based on the quantum Zeno effect; it would keep observer's attention focussed on one outcome in
a measurement, but a side effect would be the halting of processes in matter under observation.
With consistent histories \cite{Griffiths2002}, there is a copy of Hilbert space assigned to
each of the times $t_1,\ldots,t_n$ of measurement. In the present model, an analogue of such
``history Hilbert spaces'' may be seen in the subspaces defined by the written states of
records at a time $t_k$.
The modelling of
consciousness by a coherent, logically conjunctive neuronal activity appears to be in
the \emph{spirit} of Integrated Information \cite{Tononi2008}. As a \emph{measure} of
consciousness, however, section \ref{DimensionStatistics} of the present paper
suggests to take the total dimension of the subspace of neuronal activity, which is very
different from the entropy-based measure suggested in \cite{Tononi2008}. By taking logarithms
of dimensions, an elementary relation like that of one subspace covering
the union of many other subspaces becomes almost invisible.
If the model scenario could indeed be extended to more realistic systems and
observers, it would suggest an easier intuitive look on state vectors, and on the persistent
problem of ``the Now'' \cite{Mermin2013}. Intuitively, states of superposition have always been
associated with potentialities for a quantum system, but the need for an actuality
seemed to make it an insufficient characterisation. The model scenario suggests to identify
actuality with that potentiality which involves an extremal degree of awareness.
It is generated in one step of logical evolution, so an observer's impression of his entire
experience as one shifting moment would seem less surprising.
\begin{appendix}
\section{Suspended orbital return\label{secAvoidingLoops}}
Presumably, the probability for an orbital point to be visited twice under the dynamics of
section \ref{secOrbit} is negligible, but in order to enable exact statements on evolution,
any returns of the system should be rigorously excluded for the time span of interest.
Loops of evolution on a branching orbit of points in ${\cal O}$ cannot be avoided entirely if
${\cal O}$ is a finite set, but they can be avoided within an observer's lifetime. To keep
branches apart for $T$ splittings, assume ${\cal O}$ to be decomposable into $B^T$ subsets of
the form ${\cal J}[s]$, mutually disjoint and big enough to serve as an ensemble for a random
draw, with $s$ a register of the form
\begin{equation} \label{Register}
s = [s_1,s_2,\ldots,s_T] ~~ \mbox{ where }~~ s_j \in \{1,2,\ldots,B \}
\end{equation}
Then, starting from $k\in{\cal J}[s_1,s_2,\ldots,s_T]$, define the jumping addresses
$j(k,s)$ for branches $s = 1,\ldots,B$ by
\begin{equation} \label{JumpingRegister}
j(k,s) = \mbox{random draw from }[(s_2\,\mathrm{mod}\,B) + 1,s_3,\ldots,s_T,s]
\end{equation}
The cyclic permutation in the first entry serves to avoid jumping within one subset.
Entry $s$ will remain in the register for $T$ subsequent splittings. Thereafter,
the corresponding information is lost, allowing for inevitable loops to close.
The addresses generated in (\ref{JumpingRegister}) are a loop-avoiding specification of the
elements of ${\cal B}(k)$, previously defined in (\ref{DefB(k)}).
\end{appendix}
\end{document} |
\betaegin{equation}gin{document}
\quad \vskip1.375truein
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muq{\muathfrak{q}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mup{\muathfrak{p}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muH{\muathfrak{H}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muh{\muathfrak{h}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mua{\muathfrak{a}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mus{\muathfrak{s}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mum{\muathfrak{m}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mun{\muathfrak{n}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muz{\muathfrak{z}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muw{\muathfrak{w}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phiioch{{\taut Hoch}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mut{\muathfrak{t}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mul{\muathfrak{l}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muT{\muathfrak{T}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\muL{\muathfrak{L}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mug{\muathfrak{g}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mud{\muathfrak{d}}
\delta} \def\muathbb{D}{\muathbb{D}eltaef\mur{\muathfrak{r}}
\betaegin{equation}gin{abstract}
The main purpose of the present paper is a study of orientations of the
moduli spaces of pseudo-holomorphic discs with boundary lying on
a \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmph{real} Lagrangian submanifold, i.e., the fixed point set of an
anti-symplectic involution $\tauau$ on a symplectic manifold.
We introduce the notion of $\tauau$-relative spin structure for an anti-symplectic involution $\tauau$,
and study how the orientations on the moduli space behave under the involution $\tauau$.
We also apply this to the study of Lagrangian Floer theory of real Lagrangian submanifolds.
In particular, we study unobstructedness of the $\tauau$-fixed point set
of symplectic manifolds and in particular prove its unobstructedness in the case of Calabi-Yau manifolds.
And we also do explicit calculation of Floer cohomology of ${\muathbb R} P^{2n+1}$
over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$ which provides an example whose Floer cohomology is not
isomorphic to its classical cohomology.
We study Floer cohomology of the diagonal of the square of a symplectic
manifold, which leads to a rigorous construction of the quantum Massey product of
symplectic manifold in complete generality.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{abstract}
\delta} \def\muathbb{D}{\muathbb{D}eltaate{Nov. 25, 2015}
\kappaeywords{anti-symplectic involution, orientation, Floer cohomology, unobstructed
Lagrangian submanifolds, quantum cohomology}
\muaketitle
\tauableofcontents
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{Introduction and statement of results}
\lambda} \def\La{\Lambdaambdabel{sec:introduction}
An {\it anti-symplectic involution} $\tauau$ on
a symplectic manifold $(M,\omega} \def\O{\Omegamega)$ is an involution
on $M$ which satisfies $\tauau^{\alphast} \omega} \def\O{\Omegamega = -\omega} \def\O{\Omegamega$.
Two prototypes of anti-symplectic involutions are the complex
conjugation of a complex projective space with respect to the
Fubini-Study metric and the canonical reflection along the zero
section on the cotangent bundle. (See also \chiite{CMS} for a construction of
an interesting class of anti-symplectic involutions on Lagrangian torus
fibrations.) The fixed point set of $\tauau$, if it is non-empty, gives
an example of Lagrangian submanifolds. For instance, the set
of real points of a complex projective manifold defined over ${\muathbb R}$ belongs to this class.
In this paper we study Lagrangian intersection Floer theory for
the fixed point set of an anti-symplectic involution.
\psiar
Let $(M,\omega} \def\O{\Omegamega)$ be a compact, or more generally tame,
$2n$-dimensional symplectic manifold and $L$ an oriented closed Lagrangian submanifold of $M$.
It is well-known by now that the Floer cohomology of a
Lagrangian submanifold $L$ can not be defined in general.
The phenomenon of bubbling-off holomorphic discs is the main source of troubles in defining Floer cohomology of Lagrangian submanifolds.
In our books \chiite{fooobook1} and \chiite{fooobook2}, we developed general theory
of obstructions and deformations of Lagrangian intersection Floer cohomology based on the theory of filtered $A_{\infty}$ algebras
which we associate to each Lagrangian submanifold.
However it is generally very hard to formulate the criterion for unobstructedness to defining Floer cohomology let alone
to calculate Floer cohomology for a given Lagrangian submanifold.
In this regard, Lagrangian torus fibers in toric manifolds
provide good test cases for these problems, which we
studied in \chiite{foootoric1}, \chiite{foootoric2} in detail.
For this class of Lagrangian submanifolds,
we can do many explicit calculations of various notions and invariants
that are introduced in the books \chiite{fooobook1} and \chiite{fooobook2}.
Another important class of Lagrangian submanifolds is that of
the fixed point set of an anti-symplectic involution.
Actually, the set of real points in Calabi-Yau manifolds
plays an important role of homological mirror symmetry conjecture.
(See \chiite{Wal}, \chiite{PSW} and \chiite {fukaya;counting}.
See also \chiite{Wel} for related topics of real points.)
The purpose of the present paper is to study Floer cohomology of this class of Lagrangian submanifolds.
For example, we prove unobstructedness for such Lagrangian submanifolds
in Calabi-Yau manifolds and also provide some other examples of explicit calculations of Floer cohomology.
The main ingredient of this paper is a careful study of
orientations of the moduli spaces of pseudo-holomorphic discs.
\psiar
Take an $\omega} \def\O{\Omegamega$-compatible almost complex structure $J$ on $(M,\omega} \def\O{\Omegamega)$.
We consider
moduli space ${\muathbb C}M(J;\betaegin{equation}ta)$ of $J$-holomorphic stable maps from bordered Riemann surface
$(\Sigmama, \psiartial \Sigmama)$ of genus $0$ to $(M,L)$
which represents a class $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)=\psii_2(M,L)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$: $\betaegin{equation}ta \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim \betaegin{equation}ta' \in \psii_2(M,L)$ if and only if
$
\omega} \def\O{\Omegamega(\betaegin{equation}ta)=\omega} \def\O{\Omegamega(\betaegin{equation}ta')$
and
$
\muu_L(\betaegin{equation}ta) =\muu_L(\betaegin{equation}ta')$.
Here $\muu_L : \psii_2(M,L) \tauo {\muathbb Z}$ is the Maslov index homomorphism. The values of $\muu_L$ are even integers if $L$ is oriented.
When the domain $\Sigmama$ is a $2$-disc $D^2$,
we denote by ${\muathbb C}M ^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)$
the subset of ${\muathbb C}M(J;\betaegin{equation}ta)$ consisting of \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmph{smooth} maps, that is, psedo-holomorphic maps from disc
without disc or sphere bubbles.
The moduli space ${\muathbb C}M(J;\betaegin{equation}ta)$ has a Kuranishi structure, see Proposition 7.1.1 \chiite{fooobook2}.
However it in not orientable in the sense of Kuranishi
structure in general.
In Chapter 8 \chiite{fooobook2} we introduce the notion of relative spin structure on $L\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset M$ and its stable conjugacy class,
and prove that
if $L$ carries a relative spin structure
$(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$, its stable conjugacy class
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ determines an orientation
on the moduli space ${\muathbb C}M(J;\betaegin{equation}ta)$
(see Sections \rhoef{sec:pre}, \rhoef{sec:relspin} for the precise definitions and notations.)
We denote it by ${\muathbb C}M(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$
when we want to specify the stable conjugacy class of the relative spin structure.
If we have a diffeomorphism $f : M \tauo M$ satisfying $f(L)=L$, we
can define the pull-back $f^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ of the relative spin structure.
(See also Subsection \rhoef{subsec:relspin}.)
Now we consider the case that $\tauau : M \tauo M$ is an anti-symplectic involution
and
$$L = {\rhom Fix} ~\tauau.$$
We assume $L$ is nonempty, oriented and relatively spin.
Take an $\omega} \def\O{\Omegamega$-compatible almost complex structure $J$
satisfying $\tauau^{\alphast} J =-J$. We call such $J$ {\it $\tauau$-anti-invariant}.
Then we find that $\tauau$ induces a map
$$\tauau_{\alphast} : {{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)\lambda} \def\La{\Lambdaongrightarrow {{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)$$
which satisfies $\tauau_{\alphast} \chiirc \tauau_{\alphast} = {\rhom Id}$.
(See Definition \rhoef{def:inducedtau} and Lemma \rhoef{Lemma38.6}.)
Here we note that $\tauau_{\alphast}(\betaegin{equation}ta)=\betaegin{equation}ta$ in $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$
(see Remark \rhoef{rem:tau}).
We pick a conjugacy class of relative spin structure $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ and consider the pull back
$\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$.
Then we have an induced map
$$\tauau_{\alphast} : {{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\lambda} \def\La{\Lambdaongrightarrow {{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}.$$
We will prove in Proposition \rhoef{regtau} that $\tauau_{\alphast}$ is induced by an automorphism of
${{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)$ as a space with Kuranishi structure, see Definition \rhoef{def:auto}.
The definition for an automorphism to be orientation preserving
in the sense of Kuranishi structure is given in Definition \rhoef{def:oripres}.
The first problem we study is the question whether $\tauau_{\alphast}$ respects the orientation or not.
The following theorem plays a fundamental role in this paper.
\betaegin{equation}gin{thm}[Theorem \rhoef{Proposition38.7}]\lambda} \def\La{\Lambdaambdabel{thm:fund}
Let $L$ be a fixed point set of an anti-symplectic involution $\tauau$ on $(M, \omega} \def\O{\Omegamega)$ and
$J$ a $\tauau$-anti-invariant almost complex structure
compatible with $\omega} \def\O{\Omegamega$.
Suppose that $L$ is oriented and carries a relative spin structure $(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$.
Then the map
$
\tauau_*: {{\muathbb C}M}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{{\muathbb C}M}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$
is orientation preserving if $\muu_L(\betaegin{equation}ta) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0
\muod 4$ and is orientation reversing if
$\muu_L(\betaegin{equation}ta) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 2
\muod 4$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem;thm1}
If $L$ has a {\it $\tauau$-relative spin structure}
(see Definition \rhoef{Definition44.17}),
then
$${{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
={{\muathbb C}M}^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$$ as spaces with oriented Kuranishi structures.
Corollary \rhoef{corProposition38.7} is nothing but this case.
If $L$ is spin, then it is automatically $\tauau$-relatively spin
(see Example \rhoef{Remark44.18}).
Later in
Proposition \rhoef{Proposition44.19} we show that there is an example of Lagrangian submanifold
$L$ which is relatively spin but not $\tauau$-relatively spin.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Including marked points, we consider the
moduli space ${\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta)$
of $J$-holomorphic stable maps to $(M,L)$ from a bordered Riemann surface
$(\Sigmama, \psiartial \Sigmama)$ in class $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$ of genus $0$ with
$(k+1)$ boundary marked points
and $m$ interior marked points.
The anti-symplectic involution $\tauau$ also induces
a map $\tauau_{\alphast}$ on the moduli space of $J$-holomorphic maps with
marked points. See Theorem \rhoef{Proposition38.11}.
Then we have:
\betaegin{equation}gin{thm}[Theorem \rhoef{Proposition38.11}]\lambda} \def\La{\Lambdaambdabel{withmarked}
The induced map
$$\tauau_* : {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$ is orientation preserving if and only if $\muu_L(\betaegin{equation}ta)/2 + k + 1 + m$
is even.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
When we construct the filtered $A_{\infty}$ algebra
$(C(L,\muathbb{L}ambdambda_{0,{\rhom nov}}), \muathfrak m)$ associated to a relatively spin Lagrangian submanifold $L$,
we use the component of ${\muathbb C}M_{k+1}(J;\betaegin{equation}ta)$ consisting of the elements
whose boundary marked points lie in counter-clockwise cyclic
order on $\psiartial \Sigmama$. We also involve interior marked points.
For the case of $(k+1)$ boundary marked points
on $\psiartial \Sigmama$ and $m$ interior marked points in
${\rhom Int} ~\Sigmama$,
we denote the corresponding component by
${\muathbb C}M_{k+1, m}^{\rhom main}(J;\betaegin{equation}ta)$ and call it the {\it main component}.
Moreover, we consider the moduli space
${\muathbb C}M_{k+1,m}^{\rhom main}(J;\betaegin{equation}ta;P_1, \lambda} \def\La{\Lambdadots , P_k)$
which is defined by taking a fiber product of ${\muathbb C}M_{k+1,m}^{\rhom main}(J;\betaegin{equation}ta)$
with smooth singular simplices $P_1, \lambda} \def\La{\Lambdadots , P_k$ of $L$.
(This is nothing but the main component of (\rhoef{withP}) with $m=0$.)
A stable conjugacy class of a relative spin structure determines orientations on
these spaces as well.
See Sections \rhoef{sec:pre} and \rhoef{sec:relspin} for
the definitions and a precise description of their orientations.
Here we should note that $\tauau_{\alphast}$ above does {\it not} preserve
the cyclic ordering of boundary marked points and so
it does not preserve the main component.
However, we can define the maps denoted by
$$
\tauau_{\alphast}^{\rhom main} : {\muathbb C}M_{k+1,m}^{\rhom main}(J;\betaegin{equation}ta)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{\muathbb C}M_{k+1,m}^{\rhom main}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$
and
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:taumain}
\tauau_*^{\omega} \def\O{\Omegaperatorname{main}} :
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\tauo
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J; \betaegin{equation}ta ; P_k,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
respectively.
See (\rhoef{38.13}), (\rhoef{taumain}) and (\rhoef{38.16})
for the definitions.
We put
$\delta} \def\muathbb{D}{\muathbb{D}eltaeg ' P= \delta} \def\muathbb{D}{\muathbb{D}eltaeg P -1$ which is the shifted degree of $P$
as a singular cochain of $L$ (i.e., $\delta} \def\muathbb{D}{\muathbb{D}eltaeg P= \delta} \def\muathbb{D}{\muathbb{D}eltaim L-\delta} \def\muathbb{D}{\muathbb{D}eltaim P$.)
Then we show the following:
\betaegin{equation}gin{thm}[Theorem \rhoef{Lemma38.17}]\lambda} \def\La{\Lambdaambdabel{withsimplex}
Denote
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon = \psihi} \def\F{\Phirac{\muu_L(\betaegin{equation}ta)}{2} + k + 1 + m + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{1 \lambda} \def\La{\Lambdae i < j \lambda} \def\La{\Lambdae k} \delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_i\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_j.
$$
Then the map induced by the involution $\tauau$
$$
\tauau_*^{\omega} \def\O{\Omegaperatorname{main}} :
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J; \betaegin{equation}ta ; P_k,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$
is orientation preserving if $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon$ is even, and
orientation reversing if $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon$ is odd.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
See Theorem \rhoef{Lemma38.17withQ}
for a more general statement involving the fiber product with singular
simplicies $Q_j$ $(j=1,\lambda} \def\La{\Lambdadots ,m)$ of $M$.
\psiar\muedskip
These results give rise to some non-trivial applications
to Lagrangian intersection Floer theory for the case $L={\rhom Fix}~\tauau$.
We briefly describe some consequences in the rest of this section.
\psiar
In the books \chiite{fooobook1} and \chiite{fooobook2}, using the moduli spaces
${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k)$,
we construct a filtered $A_{\infty}$ algebra
$(C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}), {\muathfrak m})$ with $\muathfrak m =\{{\muathfrak m_k}\}_{k=0,1,2,\lambda} \def\La{\Lambdadots}$
for any relatively spin closed Lagrangian submanifold $L$
of $(M,\omega} \def\O{\Omegamega)$ (see Theorem \rhoef{thm:Ainfty}) and
developed the obstruction and deformation theory of
Lagrangian intersection Floer cohomology. Here $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ is
the universal Novikov ring over $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$ (see (\rhoef{eq:nov})).
In particular, we formulate the unobstructedness to
defining Floer cohomology of $L$ as the existence of
solutions of the Maurer-Cartan equation
for the filtered $A_{\infty}$ algebra (see Definition \rhoef{def:boundingcochain}).
We denote the set of such solutions by ${\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$.
By definition, when ${\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$,
we can use any element $b \in {\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ to deform the Floer's `boundary' map
and define a deformed Floer cohomology $HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$.
See Subsection \rhoef{subsec:Ainfty} for a short review of this process.
Now for the case $L={\rhom Fix}~\tauau$,
Theorem \rhoef{withsimplex} yields
the following particular property of the filtered
$A_{\infty}$ algebra:
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Theorem34.20} Let $M$ be a compact, or tame, symplectic manifold and
$\tauau$ an anti-symplectic involution. If $L = \tauext{\rhom Fix}~\tauau$
is non-empty, compact, oriented and $\tauau$-relatively spin, then the
filtered $A_{\infty}$ algebra $(C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb
Q}),\muathfrak m)$ can be chosen so that
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{34.21}
\muathfrak m_{k,\betaegin{equation}ta}(P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k)
= (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_1} \muathfrak m_{k,\tauau_*\betaegin{equation}ta}(P_k,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_1)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_1 = \psihi} \def\F{\Phirac{\muu_L(\betaegin{equation}ta)}{2} + k+ 1 + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{1\lambda} \def\La{\Lambdae i < j \lambda} \def\La{\Lambdae k}
\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_i\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_j.
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
Using the results from \chiite{fooobook1} and \chiite{fooobook2}, we derive that
Theorem \rhoef{Theorem34.20} implies
unobstructedness
of $L={\rhom Fix}~\tauau$ in the following cases:
\betaegin{equation}gin{cor}\lambda} \def\La{\Lambdaambdabel{Corollary34.22}
Let $\tauau$ and $L = \tauext{\rhom Fix}~\tauau$ be as in Theorem \rhoef{Theorem34.20}. In addition,
we assume either
\betaegin{equation}gin{enumerate}
\item
$c_1(TM)\vert_{\psii_2(M)} \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod 4$,
or
\item $c_1(TM)\vert_{\psii_2(M)} \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod 2$ and
$i_{\alphast} : \psii_1(L) \tauo \psii_1(M)$ is injective.
(Here $i_{\alphast}$ is the natural map induced by inclusion $i : L \tauo M$.)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
Then
$L$ is unobstructed over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb Q}$
(i.e., ${\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)\nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$)
and so $HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ is defined
for any $b \in {\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$.
Moreover, we may choose $b \in {\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$ so that the map
$$
\alphaligned
(-1)^{k(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll+1)} {(\muathfrak m_2)}_* & : HF^k((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)
\omega} \def\O{\Omegatimes HF^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii) \\
& \quad \lambda} \def\La{\Lambdaongrightarrow HF^{k+\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
induces a graded commutative product.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cor}
\betaegin{equation}gin{rem} \lambda} \def\La{\Lambdaambdabel{Remark34.24}
By symmetrizing the filtered $A_{\infty}$ structure
$\muathfrak m_k$ of $(C(L,\muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb
Q}),\muathfrak m)$,
we obtain a filtered $L_{\infty}$ algebra
$(C(L,\muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb Q}),\muathfrak l)
=(C(L,\muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb Q}),\{\muathfrak l_k\}_{k=0,1,2,\lambda} \def\La{\Lambdadots})$.
See Section A3 \chiite{fooobook2} for the definitions of the symmetrization
and of the filtered $L_{\infty}$ structure.
In the situation of Corollary \rhoef{Corollary34.22},
the same proof shows that we have
$\muathfrak l_k = \omega} \def\O{\Omegaverline{\muathfrak l}_k\omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb
Q}$ if $k$ is even. Here $\omega} \def\O{\Omegaverline{\muathfrak l}_k$ is the (unfiltered) $L_{\infty}$ structure
obtained by the reduction of the coefficient of
$(C(L,\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii),\muathfrak l)$ to $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$.
Note that {\it over} ${\muathbb R}$ we may choose $\omega} \def\O{\Omegaverline{\muathfrak l}_k = 0$
for $k\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 3$ by Theorem X in Chapter 1 \chiite{fooobook1}. On the other hand,
Theorem A3.19 \chiite{fooobook2} shows that
$\omega} \def\O{\Omegaverline{\muathfrak l}_k =0$
for $H(L; \ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
We note that we do not assert that Floer cohomology $HF((L,b),(L,b);
\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$ is isomorphic to $H^*(L;\Bbb Q) \omega} \def\O{\Omegatimes
\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$, in general.
(Namely, we do not assert $\muathfrak m_1 =
\omega} \def\O{\Omegaverline{\muathfrak m}_{1}\omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$.)
Indeed, we show in Subsection \rhoef{subsec:Appl2} that
for the case $L={\muathbb R} P^{2n+1}$ in ${\muathbb C} P^{2n+1}$ the Floer cohomology group
is {\it not} isomorphic to the classical cohomology group.
(See Theorem \rhoef{Theorem44.24}.)
Moreover, if we assume $c_1(TM)\vert_{\psii_2(M)} = 0$ in addition,
we can show the following non-vanishing theorem of Floer
cohomology:
\betaegin{equation}gin{cor}\lambda} \def\La{\Lambdaambdabel{TheoremN}
Let $\tauau$, $L = \tauext{\rhom Fix}~\tauau$ be as in Theorem \rhoef{Theorem34.20}.
Assume $c_1(TM)\vert_{\psii_2(M)} = 0$. Then
$L$ is unobstructed over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\Bbb Q}$
and
$$
HF((L,b),(L,b);\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})\nue 0
$$
for any $b \in {\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$.
In particular, we have
$$
\psisi (L) \chiap L \nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset
$$
for any Hamiltonian diffeomorphism $\psisi : M \tauo M$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cor}
Theorem \rhoef{Theorem34.20} and Corollaries \rhoef{Corollary34.22},
\rhoef{TheoremN}
can be applied to
the real point set $L$ of any Calabi-Yau manifold (defined over
$\Bbb R)$ if it is oriented and $\tauau$-relative spin.
\psiar\muedskip
Another application of Theorem \rhoef{Theorem34.20} and Corollary \rhoef{Corollary34.22} is a ring isomorphism between
quantum cohomology and Lagrangian Floer cohomology for
the case of the diagonal
of square of a symplectic manifold. Let $(N,\omega} \def\O{\Omegamega_N)$ be a closed symplectic manifold.
We consider the product
$$
(M,\omega} \def\O{\Omegamega_M) = (N\tauimes N, -{\rhom pr}_1^*\omega} \def\O{\Omegamega_N + {\rhom pr}_2^* \omega} \def\O{\Omegamega_N),
$$
where ${\rhom pr}_i$ is the projection to the $i$-th factor.
The involution $\tauau : M \tauo M$ defined by
$\tauau(x,y) = (y,x)$ is
anti-symplectic and its fixed point set $L$ is the diagonal
$$
\muathbb{D}elta_N= \{(x,x) \muid x \in N\} \chiong N.
$$
As we will see in the proof of Theorem
\rhoef{Proposition34.25}, the diagonal set is always unobstructed. Moreover,
we note that the natural map $i_*: H_*(\muathbb{D}elta_N, \ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii) \tauo H_*(N \tauimes N;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$
is injective and so the spectral sequence constructed
in Chapter 6 \chiite{fooobook1} collapses at $E_2$-term
by Theorem D (D.3) \chiite{fooobook1}, which in turn induces the natural isomorphism
$H(N;\Bbb Q) \omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0,{\rhom nov}} \chiong HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
for any $b \in{\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$.
We prove in the proof of
Theorem \rhoef{Proposition34.25} in Subsection \rhoef{subsec:Appl1} that $\muathfrak m_2$ also
derives a graded commutative product
$$
\chiup_Q : HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii) \omega} \def\O{\Omegatimes
HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii) \tauo HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii).
$$
In fact, we can prove that the
following stronger statement.
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Proposition34.25}
Let $(N, \omega} \def\O{\Omegamega_N)$ be a closed symplectic manifold.
\betaegin{equation}gin{enumerate}
\item
The diagonal set of
$(N\tauimes N, -{\rhom pr}_1^* \omega} \def\O{\Omegamega_N + {\rhom pr}_2^*\omega} \def\O{\Omegamega_N)$ is unobstructed
over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$.
\item
There exists a bounding cochain $b$ such that product $\chiup_Q$ coincides with the quantum
cup product on $(N,\omega} \def\O{\Omegamega_N)$ under the natural isomorphism
$$HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \chiong H(N;\Bbb Q) \omega} \def\O{\Omegatimes
\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}.$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\psiar
If we use Corollary 3.8.43 \chiite{fooobook1},
we can easily find that the diagonal set is
{\it weakly unobstructed} in the sense of Definition 3.6.29 \chiite{fooobook1}. See also Remark \rhoef{Remark44.25}.
We also note that for the case of diagonals, $\muathfrak m_k$ $(k\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 3)$ define
a quantum (higher) Massey product.
It was discussed formally in \chiite{fukaya;mhtpy}.
We have made it rigorous here:
\betaegin{equation}gin{cor}\lambda} \def\La{\Lambdaambdabel{qMassey}
For any closed symplectic manifold $(N, \omega} \def\O{\Omegamega_N)$,
there exists a filtered $A_{\infty}$ structure
$\muathfrak m_k$
on $H(N;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})=H(N;\Bbb Q) \omega} \def\O{\Omegatimes
\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ such that
\betaegin{equation}gin{enumerate}
\item $\muathfrak m_0 = \muathfrak m_1 =0$;
\item $\chiup_Q$ defined by (\rhoef{qproduct}) using
$\muathfrak m_2$ coincides with the quantum cup product;
\item the ${\muathbb R}$-reduction
$(H(N;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii), \omega} \def\O{\Omegaverline{\muathfrak m})\omega} \def\O{\Omegatimes_{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii} {\muathbb R}$
of the filtered $A_{\infty}$ algebra is homotopy equivalent to the de Rham complex of $N$ as an $A_{\infty}$ algebra,
where $(H(N;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii), \omega} \def\O{\Omegaverline{\muathfrak m})$ is
the reduction of the coefficient of $(H(N;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}),\muathfrak m)$
to $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cor}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
The paper is organized as follows:
In Section \rhoef{sec:pre}, we briefly recall some basic material
on the moduli space of stable maps from a bordered Riemann surface
of genus $0$.
In Section \rhoef{sec:relspin},
we also recall from \chiite{fooobook2} the notion of relative spin structure, its stable
conjugacy class
and the orientation of the moduli space of pseudo-holomorphic discs.
We describe how the stable conjugacy class of relative spin structure determines an orientation on the moduli space.
We introduce here the notion of $\tauau$-relative spin structure
for an anti-symplectic involution $\tauau : M \tauo M$, and
also give some examples which are relatively spin but not
$\tauau$-relatively spin Lagrangian submanifolds.
In Section \rhoef{sec:inducedtau}, we define the map
$\tauau_{\alphast}$ on the moduli space induced by
$\tauau$ and study how the induced map $\tauau_{\alphast}$
on various moduli spaces
changes or preserves the orientations.
Assuming Theorem \rhoef{thm:fund} holds, we prove Theorem \rhoef{withmarked} in this section.
The fundamental theorems Theorem \rhoef{thm:fund}
and Theorem \rhoef{withsimplex} are proved in
Section \rhoef{sec:Proofth}.
Section \rhoef{sec:Appl} is devoted to
various applications of the results obtained above
to Lagrangian Floer cohomology.
After a short review of the general story of Lagrangian intersection
Floer theory laid out in \chiite{fooobook1} and \chiite{fooobook2}, we prove
Theorem \rhoef{Theorem34.20}, Corollary \rhoef{Corollary34.22},
Corollary \rhoef{TheoremN} in Subsection \rhoef{subsec:Appl1}.
Subsection \rhoef{proof1.9} is devoted to the proofs of Theorem \rhoef{Proposition34.25} and Corollary \rhoef{qMassey}.
In particular, we introduce stable maps with admissible system of circles and study their moduli spaces in Subsection \rhoef{6.4}.
In Subsection \rhoef{subsec:Appl2},
we calculate Floer cohomology of ${\muathbb R} P^{2n+1}$ over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$
coefficients by studying orientations in detail.
The calculation shows that the Floer cohomology of ${\muathbb R} P^{2n+1}$
over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$ is not isomorphic to
the usual cohomology.
This result contrasts with Oh's earlier calculation \chiite{Oh93} of the Floer
cohomology of real projective spaces over ${\muathbb Z}_2$ coefficients,
where the Floer cohomology is isomorphic to the usual cohomology
over ${\muathbb Z}_2$. In the first two subsections of Appendix, we briefly recall form \chiite {fooobook2} the definition of orientation on the space with Kuranishi structure
and the notion of group action on a space with Kuranishi structure.
In the third subsection of Appendix, we present how to promote filtered $A_{n,K}$-structures keeping the invariance under the involution.
\psiar
\muedskip
Originally, the content of this paper appeared as a part of Chapter 8
in the preprint version \chiite{fooo06} of the books
\chiite{fooobook1}, \chiite{fooobook2}, and
was intended to be published in a part of the book. However, due to the publisher's page
restriction on the AMS/IP Advanced Math Series, we took out two chapters, Chapter 8 and Chapter 10 from
the preprint \chiite{fooo06} and published the book without
those two chapters.
The content of this paper is based on the parts extracted from
Chapter 8 (Floer theory of Lagrangian submanifolds over ${\muathbb Z}$)
and Chapter 9 (Orientation) in the preprint \chiite{fooo06}.
We also note that this is a part of the paper cited as [FOOO09I] in the books \chiite{fooobook1}, \chiite{fooobook2}.
The half of the remaining part of Chapter 8 in \chiite{fooo06} is published as
\chiite{fooointeger}.
\psiar
The authors would like to thank Cheol-Hyun Cho for some helpful discussion
concerning Theorem \rhoef{Proposition34.25}. They also thank
Mohammad Tehrani, Aleksey Zinger and also the referee for pointing out an error
in Lemma \rhoef{oripreversing}, Lemma \rhoef{Lemma44.28}
in the first draft, respectively, and thank anonymous referees for their careful reading and for their comments
that help to improve presentation of the paper.
\psiar
\psiar
Kenji Fukaya is supported partially by JSPS Grant-in-Aid for Scientific Research
No. 23224002 and NSF grant \# 1406423, Yong-Geun
Oh by IBS project \# IBS-R003-D1 in Korea and by US NSF grant \# 0904197,
Hiroshi Ohta by JSPS Grant-in-Aid
for Scientific Research Nos.19340017, 23340015, 15H02054 and Kaoru Ono by JSPS Grant-in-Aid for
Scientific Research, Nos. 18340014, 21244002, 26247006.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{Preliminaries}
\lambda} \def\La{\Lambdaambdabel{sec:pre}
In this section, we prepare some basic notations we use in this paper.
We refer Section 2.1 in \chiite {fooobook1} and Section A1 in \chiite{fooobook2} for more detailed explanation
of moduli spaces and the notion of Kuranishi structure, respectively.
Let $L$ be an oriented compact Lagrangian submanifold
of $(M,\omega} \def\O{\Omegamega)$. Take an $\omega} \def\O{\Omegamega$-compatible almost complex structure $J$ on $M$.
We recall Definition 2.4.17 in \chiite{fooobook1} where we introduce
the relation $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ in $\psii_2(M,L)$:
We define $\betaegin{equation}ta \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim \betaegin{equation}ta'$ in $\psii_2(M,L)$ if and only if
$
\omega} \def\O{\Omegamega(\betaegin{equation}ta)=\omega} \def\O{\Omegamega(\betaegin{equation}ta')$
and
$
\muu_L(\betaegin{equation}ta) =\muu_L(\betaegin{equation}ta').
$
We denote the quotient group by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:Pi}
\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L) =\psii_2(M,L)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
This is an abelian group.
Let $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$.
{\it A stable map from a bordered Riemann surface of genus zero with
$(k+1)$ boundary marked points and $m$ interior marked points}
is a pair $((\Sigmama, \vec{z}, {\vec {z}}^+), w)=((\Sigmama, z_0, \delta} \def\muathbb{D}{\muathbb{D}eltaots , z_k, z_1^{+}, \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_{m}^+), w)$
such that $(\Sigmama, \vec{z}, \vec{z}^+)$ is a bordered
semi-stable curve of genus zero with $(k+1)$ boundary
marked points and $m$ interior marked points
and $w:(\Sigmama, \psiartial \Sigmama) \tauo (M,L)$ is
a $J$-holomorphic map such that its automorphism group, i.e.,
the set of biholomorphic maps $\psisi : \Sigmama \tauo \Sigmama$
satisfying $\psisi(z_i)=z_i, \psisi(\vec{z_i}\,^+) =\vec{z_i}\,^+$
and $w\chiirc \psisi =w$ is finite.
We say that $((\Sigmama, \vec{z}, {\vec {z}}\,^+), w)$ is isomorphic to
$((\Sigmama', \vec{z}\,', {\vec {z}}\,^{+\psirime}), w')$,
if there exists a bi-holomorphic map
$\psisi : \Sigmama \tauo \Sigmama'$ satisfying
$\psisi(z_i)=z_i', \psisi(\vec{z_i}\,^{+}) =\vec{z_i}\,^{+\psirime}$
and $w'\chiirc \psisi =w$. We denote by ${\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta)$
the set of the isomorphism classes of stable maps in class $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$ from a bordered
Riemann surface of genus zero with $(k+1)$ boundary
marked points and $m$ interior marked points.
When the domain curve $\Sigmama$ is a smooth 2-disc $D^2$, we
denote the corresponding subset by
${\muathbb C}M^{\rhom reg}_{k+1,m}(J;\betaegin{equation}ta)$.
We note that ${\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta)$ is a compactification of
${\muathbb C}M^{\rhom reg}_{k+1,m}(J;\betaegin{equation}ta)$.
The virtual real dimension is
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaim_{{\muathbb R}} {\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta) =
n + \muu_L(\betaegin{equation}ta)+k+1 +2m -3,
$$
where $n=\delta} \def\muathbb{D}{\muathbb{D}eltaim L$ and $\muu_L(\betaegin{equation}ta)$ is the Maslov index which
is an even integer for an oriented Lagrangian submanifold $L$.
When we do not consider interior marked points,
we denote them by ${\muathbb C}M_{k+1}(J;\betaegin{equation}ta)$,
${\muathbb C}M_{k+1}^{\rhom reg}(J;\betaegin{equation}ta)$, and
when we do not consider any marked points, we simply denote
them by ${\muathbb C}M(J;\betaegin{equation}ta)$, ${\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)$ respectively.
Furthermore, we define a component $ {\muathbb C}M^{\rhom main}_{k+1,m}(J;\betaegin{equation}ta) $ of ${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$
by
$$
\alphaligned
{\muathbb C}M^{\rhom main}_{k+1,m}(J;\betaegin{equation}ta)
=\{
((& \Sigmama, \vec{z}, {\vec {z}}\,^+), w) \in {\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta)
~\vert~ \\
& {\tauext{$(z_0,z_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots , z_k)$
is in counter-clockwise cyclic order on $\psiartial \Sigmama $}}
\},
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned$$
which we call the {\it main component}.
We define ${\muathbb C}M_{k+1,m}^{\rhom main, reg}(J;\betaegin{equation}ta)$,
${\muathbb C}M_{k+1}^{\rhom main}(J;\betaegin{equation}ta)$ and
${\muathbb C}M_{k+1}^{\rhom main, reg}(J;\betaegin{equation}ta)$
in a similar manner.
\psiar
We have a Kuranishi structure on ${\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta)$
so that the evaluation maps
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:ev}
\alphaligned
ev_i ~ & :~ {\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta) \lambda} \def\La{\Lambdaongrightarrow
L, \quad i=0,1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,k\\
ev_j^+ ~ & :~{\muathbb C}M_{k+1, m}(J;\betaegin{equation}ta) \lambda} \def\La{\Lambdaongrightarrow
M, \quad j=1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,m
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
defined by $ev_i((\Sigmama, \vec{z}, {\vec {z}}\,^+), w))=w(z_i)$
and
$ev_j^+((\Sigmama, \vec{z}, {\vec {z}}\,^+), w))=w(z_j^+)$
are weakly submersive.
(See Section 5 \chiite{FO} and Section A1.1 \chiite{fooobook2} for
the definitions of Kuranishi structure
and weakly submersive map.)
Then for given smooth singular simplicies $(f_i : P_i \tauo L)$ of $L$ and
$(g_j : Q_j \tauo M)$ of $M$, we can define the fiber product in the sense of Kuranishi structure:
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{withsimplicies}
\alphaligned
& {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta; \vec{Q},\vec{P}) \\
:= & {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta) {}_{(ev_1^+, \delta} \def\muathbb{D}{\muathbb{D}eltaots, ev_m^+,ev_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,ev_k)} \tauimes _{g_1 \tauimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \tauimes f_k}\lambda} \def\La{\Lambdaeft(\psirod_{j=1}^{m} Q_j\tauimes
\psirod_{i=1} ^{k}P_i \rhoight).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
See Section A1.2 \chiite{fooobook2} for the definition of fiber product
of Kuranishi structures.
We define ${\muathbb C}M_{k+1,m}^{\rhom main}(J;\betaegin{equation}ta; \vec{Q},\vec{P})$
in a similar way.
When we do not consider the interior marked points,
we denote the corresponding moduli spaces by
${\muathbb C}M_{k+1}(J;\betaegin{equation}ta; \vec{P})$ and
${\muathbb C}M_{k+1}^{\rhom main}(J;\betaegin{equation}ta; \vec{P})$, respectively.
Namely,
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{withP}
{\muathbb C}M_{k+1}(J;\betaegin{equation}ta; \vec{P})
:= {\muathbb C}M_{k+1}(J;\betaegin{equation}ta) {}_{(ev_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,ev_k)}
\tauimes_{f_1 \tauimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \tauimes f_k}
\lambda} \def\La{\Lambdaeft(\psirod_{i=1}^{k}P_i \rhoight).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
In Subsection \rhoef{subsec:orimain}, we describe
the orientations on these spaces precisely.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{$\tauau$-relative spin structure and orientation}\lambda} \def\La{\Lambdaambdabel{sec:relspin}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Review of relative spin structure and orientation}\lambda} \def\La{\Lambdaambdabel{subsec:relspin}
It is known that the moduli space of pseudo-holomorphic discs
with Lagrangian boundary condition is not always orientable.
To discuss orientability and orientation of the moduli space,
we first recall the notion of relative spin structure
and its stable conjugacy class introduced in \chiite{fooobook2}
and also briefly review how the stable conjugacy class of relative
spin structure determines an orientation of the moduli space of
pseudo-holomorphic discs with Lagrangian boundary condition.
See Section 8.1 \chiite{fooobook2} for more details.
See also V. de Silva's work \chiite{Si}.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:relspin}
An oriented Lagrangian submanifold $L$ of $M$ is
called {\it relatively spin} if there exists a class
$st \in H^2(M;{\muathbb Z}_2)$ such that $st\vert _L = w_2(TL)$.
\psiar
A pair of oriented Lagrangian submanifolds $(L^{(1)},L^{(0)})$ is called
{\it relatively spin}, if there exists a class $st\in H^2(M;{\muathbb Z}_2)$ satisfying
$st |_{L^{(i)}}=w_2(TL^{(i)})$ ($i = 0,1$)
simultaneously.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem:pin}
Using the relative pin structure, J. Solomon \chiite{Sol} generalized our results
about the orientation problem studied in \chiite{fooobook2}
to the case of non-orientable Lagrangian submanifolds.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Let $L$ be a relatively spin Lagrangian submanifold of $M$.
We fix a triangulation of $M$ such that $L$ is a subcomplex.
A standard obstruction theory yields that we can take an oriented real vector bundle $V$ over the $3$-skeleton
$M_{[3]}$ of $M$ which satisfies $w_2(V)=st$.
Then $w_2(TL\vert_{L_{[2]}} \omega} \def\O{\Omegaplus V\vert_{L_{[2]}})=0$ and so
$TL \omega} \def\O{\Omegaplus V$ carries a spin structure on the $2$-skeleton $L_{[2]}$ of $L$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:relspinstr}
The choice of an orientation on $L$,
a cohomology class
$st\in H^2(M;{\muathbb Z}_2)$,
an oriented real vector bundle
$V$ over the $3$-skeleton $M_{[3]}$ satisfying
$w_2(V)=st$ and a spin structure $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ on
$(TL \omega} \def\O{\Omegaplus V)\vert_{L_{[2]}}$ is called a {\it relative spin structure} on $L \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset M$.
\psiar
A {\it relative spin structure} on the pair
$(L^{(1)},L^{(0)})$ is the choice of orientations on $L^{(i)}$, a cohomology class
$st\in H^2(M;{\muathbb Z}_2)$,
an oriented real vector bundle $V$ over the 3 skeleton $M_{[3]}$ satisfying
$w_2(V)=st$
and spin structures on $(TL^{(i)} \omega} \def\O{\Omegaplus V)|_{L^{(i)}_{[2]}}$
$(i=0,1)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
In this paper we fix an orientation on $L$.
If $L$ is spin, we have an associated relative spin structure for each spin structure on $L$ as follows:
Take $st=0$ and $V$ is trivial. Then the spin structure
on $L$ naturally induces the spin structure on $TL \omega} \def\O{\Omegaplus V$.
Definition \rhoef{def:relspinstr} depends on the choices of $V$ and the triangulation of $M$.
We introduce an equivalence relation called {\it stable conjugacy}
on the set of relative spin structures so that
the stable conjugacy class is independent of such choices.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:stablyconj}
We say that two relative spin structures
$(st_i,V_i,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma_i)$ $(i=1,2)$ on $L$ are
{\it stably conjugate}, if there exist integers $k_i$
and an orientation preserving bundle isomorphism
$\varphiphi: V_1 \omega} \def\O{\Omegaplus {\muathbb R}^{k_1} \tauo V_2 \omega} \def\O{\Omegaplus {\muathbb R}^{k_2}$
such that by $1\omega} \def\O{\Omegaplus \varphiphi\vert_{L_{[2]}}: (TL \omega} \def\O{\Omegaplus V_1)_{L_{[2]}} \omega} \def\O{\Omegaplus {\muathbb R}^{k_1}
\tauo (TL \omega} \def\O{\Omegaplus V_2)_{L_{[2]}} \omega} \def\O{\Omegaplus {\muathbb R}^{k_2}$, the spin structure $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma_1 \omega} \def\O{\Omegaplus 1$
induces the spin structure $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma_2 \omega} \def\O{\Omegaplus 1$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Here ${\muathbb R}^{k_i}$ denote trivial vector bundles of rank $k_i$
($i=1,2$).
We note that
in Definition \rhoef{def:stablyconj}, we still fix a triangulation $\muathfrak T$ of $M$ such that $L$ is a subcomplex.
However, by Proposition 8.1.6 in \chiite{fooobook2},
we find
that the stable conjugacy class of relative spin structure
is actually independent of the choice of a triangulation of
$M$ as follows:
We denote by $\omega} \def\O{\Omegaperatorname{Spin}(M,L;\muathfrak T)$
the set of all the stable conjugacy classes of relative spin structures on $L\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset M$.
\betaegin{equation}gin{prop}[Proposition 8.1.6 in \chiite{fooobook2}]\lambda} \def\La{\Lambdaambdabel{prop:8.1.6}
$(1)$ There is a simply transitive action of $H^2( M, L; {\muathbb Z}_2)$
on $\omega} \def\O{\Omegaperatorname{Spin}(M,L;\muathfrak T)$.
\psiar
$(2)$ For two triangulations $\muathfrak T$ and $\muathfrak T'$ of $M$ such that $L$ is a subcomplex, there exists a
canonical isomorphism
$\omega} \def\O{\Omegaperatorname{Spin}(M,L;\muathfrak T) \chiong \omega} \def\O{\Omegaperatorname{Spin}(M,L;\muathfrak T')$ compatible with the above action.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
In particular, if a spin structure of $L$ is given, there is a canonical
isomorphism $\omega} \def\O{\Omegaperatorname{Spin}(M,L;\muathfrak T) \chiong H^2(M,L;{\muathbb Z}_2)$.
Thus, hereafter,
we denote by $\omega} \def\O{\Omegaperatorname{Spin}(M,L)$
the set of the stable conjugacy classes of relative spin structures on $L$
without specifying any triangulation of $M$.
Since the class $st$ is determined by $V$, we
simply write the stable conjugacy class of relative spin structure
as $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ where $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ is a spin structure on
$(TL \omega} \def\O{\Omegaplus V)\vert_{L_{[2]}}$.
The following theorem is proved in Section 8.1 \chiite{fooobook2}.
We denote by $\varphiidetilde{{\muathbb C}M}^{\rhom reg}(J;\betaegin{equation}ta)$
the set of all $J$-holomorphic maps from $(D^2,\psiartial D^2)$
to $(M,L)$ representing a class $\betaegin{equation}ta$.
We note that ${\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)={\varphiidetilde{{\muathbb C}M}}^{\rhom reg}(J;\betaegin{equation}ta)/PSL(2,{\muathbb R})$.
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{thm:ori}
If $L$ is a relatively spin Lagrangian submanifold,
$\varphiidetilde{{\muathbb C}M}^{\rhom reg}(J; \betaegin{equation}ta)$ is orientable. Furthermore, the choice
of stable conjugacy class of relative spin structure on $L$ determines an orientation on
$\varphiidetilde{{\muathbb C}M}^{\rhom reg}(J;\betaegin{equation}ta)$ canonically for all $\betaegin{equation}ta \in \psii_2(M,L)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem:thmori}
(1) Following Convention 8.2.1 in \chiite{fooobook2},
we have an induced orientation on the quotient space.
Thus Theorem \rhoef{thm:ori} holds for the quotient space
${\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)={\varphiidetilde{{\muathbb C}M}}^{\rhom reg}(J;\betaegin{equation}ta)/PSL(2,{\muathbb R})$ as well.
Here we use the orientation of $PSL(2,{\muathbb R})$ as in Convention 8.3.1 in \chiite{fooobook2}.
\psiar
(2) Since ${\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)$ is the top dimensional
stratum of ${\muathbb C}M(J;\betaegin{equation}ta)$, the orientation on
${\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)$ determines one on
${\muathbb C}M(J;\betaegin{equation}ta)$.
In this sense, it is enough to consider ${\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)$
when we discuss orientation on ${\muathbb C}M(J;\betaegin{equation}ta)$.
The same remark applies to other moduli spaces including
marked points and fiber products with singular simplices.
\psiar
(3) The moduli space $\muathcal{M}(J,\betaegin{equation}ta)$ may not contain a smooth holomorphic disc, i.e.,
$\muathcal{M}^{\omega} \def\O{\Omegaperatorname{reg}}(J,\betaegin{equation}ta) = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$.
However, the orientation issue
can be discussed as if $\muathcal{M}^{\omega} \def\O{\Omegaperatorname{reg}}(J,\betaegin{equation}ta) \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$.
This is because we consider the orientation of Kuranishi structure, i.e., the orientation of
$\delta} \def\muathbb{D}{\muathbb{D}eltaet E^{\alphast}\omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaet TV$, where $V$ is a Kuranishi neighborhood around a point $p=[w:(\Sigmama,\psiartial \Sigmama) \tauo (M,L)]$
in $\muathcal{M}(J,\betaegin{equation}ta)$ and $E \tauo V$ is the obstruction bundle.
Even though $p$ is not represented by a bordered stable map with
an irreducible domain, i.e., a disc, $V$ contains a solution of $\omega} \def\O{\Omegaverline{\psiartial} u \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod E$ for
$u:(D^2, \psiartial D^2) \tauo (M,L)$.
The determinant bundle of the linearized $\omega} \def\O{\Omegaverline{\psiartial}$-operators parametrized by $V$
is trivialized around $p$, hence the orientation of the determinant line at $[u]$ determines the one at $p$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
We recall from Section 8.1 \chiite{fooobook2} how each stable conjugacy class of
relative spin structures determines
an orientation on the moduli space of holomorphic discs.
Once we know the orientability of $\varphiidetilde{{\muathbb C}M}^{\rhom reg}(J;\betaegin{equation}ta)$,
it suffices to give an orientation
on the determinant of the tangent space at a point
$w \in \varphiidetilde{{\muathbb C}M}^{\rhom reg}(J;\betaegin{equation}ta)$
for each stable conjugacy class of
relative spin structures.
We consider
the linearized operator
of the pseudo-holomorphic
curve equation
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{linearizedeq}
\alphaligned
D_w\omega} \def\O{\Omegaverline{\psiartial}:\,\,&W^{1,p}(D^2,\psiartial D^2;w^*TM,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*TL) \tauo
L^p(D^2;w^*TM\omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{D^2}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll = w\vert_{\psiartial D^2}$
and $p>2$.
Since it has the same symbol as the Dolbeault operator
$$\omega} \def\O{\Omegaverline{\psiartial}_{(w^*TM,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*TL)}:W^{1,p}(D^2,\psiartial
D^2;w^*TM,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*TL)
\tauo L^p(D^2;w^*TM\omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{D^2}),
$$
we may consider the determinant of the index of
this Dolbeault operator $\omega} \def\O{\Omegaverline{\psiartial}_{(w^*TM,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*TL)}$
instead.
We can deform $w:(D^2, \psiartial D^2) \tauo (M,L)$
to $w_0:(D^2, \psiartial D^2) \tauo (M_{[2]},L_{[1]})$
by the simplicial approximation theorem.
We put $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0 = w_0 \vert_{\psiartial D^2}$.
Now pick
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)] \in \omega} \def\O{\Omegaperatorname{Spin}(M,L)$.
Then it determines the stable homotopy class of
trivialization of $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0^{\alphast}(TL \omega} \def\O{\Omegaplus V)$.
The existence of the oriented bundle $V$ on $M_{[3]}$ induces a unique
homotopy class of trivialization of $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0^* V$.
Thus, we have a unique homotopy class of
trivialization of $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0^*TL$.
Using this trivialization and Proposition \rhoef{prop:8.1.4} below
(applied to the pair of $(E,\lambda} \def\La{\Lambdaambdambda)$ with $E=w_0^{\alphast}TM, \lambda} \def\La{\Lambdaambdambda=\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0^{\alphast}TL$),
we can assign
an orientation on the determinant of the index
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet \omega} \def\O{\Omegaperatorname{Index}~
\omega} \def\O{\Omegaverline{\psiartial}_{(w_0^*TM,{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}_0^*TL)})
:=
\delta} \def\muathbb{D}{\muathbb{D}eltaet (\omega} \def\O{\Omegaperatorname{coker}~
\omega} \def\O{\Omegaverline{\psiartial}_{(w_0^*TM,{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}_0^*TL)})^* \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaet
\omega} \def\O{\Omegaperatorname{ker}~\omega} \def\O{\Omegaverline{\psiartial}_{(w_0^*TM,{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}_0^*TL)}.
$$
This process is invariant under
stably conjugate relation of relative spin structures.
Therefore we obtain an orientation on $\varphiidetilde{{\muathbb C}M}^{\rhom {reg}}(J;\betaegin{equation}ta)$
and so on ${\muathbb C}M^{\rhom {reg}}(J;\betaegin{equation}ta)$ for each stable conjugacy class of relative spin structure $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$.
\betaegin{equation}gin{prop}[Proposition 8.1.4 \chiite{fooobook2}]
\lambda} \def\La{\Lambdaambdabel{prop:8.1.4}
Let $E$ be a complex vector bundle over $D^2$
and $\lambda} \def\La{\Lambdaambdambda$ a maximally totally real bundle
over $\psiartial D^2$ with an isomorphism
$$
E\vert_{\psiartial D^2} \chiong
\lambda} \def\La{\Lambdaambdambda \omega} \def\O{\Omegatimes {\muathbb C}.
$$
Suppose that $\lambda} \def\La{\Lambdaambdambda$ is
trivial. Then each trivialization on $\lambda} \def\La{\Lambdaambdambda$ canonically induces
an orientation on
$\omega} \def\O{\Omegaperatorname{Index}\omega} \def\O{\Omegaverline{\psiartial}_{(E,\lambda} \def\La{\Lambdaambdambda)}$.
Here $\omega} \def\O{\Omegaverline{\psiartial}_{(E,\lambda} \def\La{\Lambdaambdambda)}$ is the Dolbeault operator on $(D^2, \psiartial D^2)$
with coefficient $(E,\lambda} \def\La{\Lambdaambdambda)$:
$$
\omega} \def\O{\Omegaverline{\psiartial}_{(E,\lambda} \def\La{\Lambdaambdambda)} :
W^{1,p}(D^2,\psiartial D^2;E,\lambda} \def\La{\Lambdaambdambda) \tauo
L^p(D^2;E\omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{D^2}).
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem:prop8.1.4}
In order to explain some part of the proof of
Theorem \rhoef{thm:fund} given in Section \rhoef{sec:Proofth} in a self-contained way,
we briefly recall the outline of the proof of Proposition
\rhoef{prop:8.1.4}. See Subsection 8.1.1 \chiite{fooobook2} for more
detail.
For $0<\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon < 1$, we put
$A(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon) = \{ z \in D^2 ~\vert~ 1-\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon \lambda} \def\La{\Lambdae \vert z\vert
\lambda} \def\La{\Lambdae 1 \}$ and
$C_{1-\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon}= \{ z \in D^2 ~\vert~ \vert z\vert =1-\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon
\}$.
By pinching the circle $C$ to a point,
we have a union of a $2$-disc $D^2$ and a $2$-sphere ${\muathbb C} P^1$
with the center $O \in D^2$ identified with
a point $p \in {\muathbb C} P^1$.
The resulting space $\Sigmama =D^2 \chiup {\muathbb C} P^1$ has naturally a structure of a nodal curve where $O=p$ is the nodal point.
Under the situation of Proposition \rhoef{prop:8.1.4},
the trivial bundle $\lambda} \def\La{\Lambdaambdambda \tauo \psiartial D^2$ trivially extends
to $A(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon )$ and the complexification of
each trivialization of $\lambda} \def\La{\Lambdaambdambda \tauo \psiartial D^2$
gives a trivialization on $E\vert_{A(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon )} \tauo A(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon )$.
Thus the bundle $E\tauo D^2$ descends to a bundle over
the nodal curve $\Sigmama$ together with a maximally totally real bundle over $\psiartial \Sigmama = \psiartial D^2$. We denote them by $E' \tauo \Sigmama$
and $\lambda} \def\La{\Lambdaambdambda' \tauo \psiartial \Sigmama$ respectively.
We also denote by $W^{1,p}({\muathbb C} P^1; E'\vert_{{\muathbb C} P^1})$ the space of $W^{1,p}$-sections of $E' \vert_{{\muathbb C} P^1} \tauo {\muathbb C} P^1$ and
by $W^{1,p}
(D^2; E'\vert_{D^2}, \lambda} \def\La{\Lambdaambdambda')$ the space of
$W^{1,p}$-sections $\xii_{D^2}$ of $E' \vert_{D^2} \tauo D^2$ satisfying
$\xii_{D^2}(z)\in \lambda} \def\La{\Lambdaambdambda'_z$, $z \in \psiartial D^2 = \psiartial \Sigmama$.
We consider a map denoted by $\omega} \def\O{\Omegaperatorname{diff}$:
\betaegin{equation}gin{eqnarray*}
&{}& \omega} \def\O{\Omegaperatorname{diff}:
W^{1,p}({\muathbb C} P^1; E'\vert_{{\muathbb C} P^1}) \omega} \def\O{\Omegaplus W^{1,p}
(D^2, \psiartial D^2; E'\vert_{D^2}, \lambda} \def\La{\Lambdaambdambda') \tauo {\muathbb C}^n; \\
&{}& \hskip1in (\xii_{{\muathbb C} P^1}, \xii_{D^2}) \muapsto
\xii_{{\muathbb C} P^1}(p) - \xii _{D^2}(O).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{eqnarray*}
We put $W^{1,p}(E', \lambda} \def\La{\Lambdaambdambda'):=
\omega} \def\O{\Omegaperatorname{diff}^{-1}(0)$ and consider the index of operator
$$
\omega} \def\O{\Omegaverline{\psiartial}_{(E',\lambda} \def\La{\Lambdaambdambda')}~:~
W^{1,p}(E', \lambda} \def\La{\Lambdaambdambda')
\tauo
L^p({\muathbb C} P^1; E'\vert_{{\muathbb C} P^1} \omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{{\muathbb C} P^1})
\omega} \def\O{\Omegaplus
L^p(D^2, \psiartial D^2;E'\vert_{D^2} \omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{D^2}).
$$
Then the orientation problem for $\omega} \def\O{\Omegaperatorname{Index} \omega} \def\O{\Omegaverline{\psiartial}_{(E,\lambda} \def\La{\Lambdaambdambda)}$ on $(D^2, \psiartial D^2)$
is translated into the problem for
$\omega} \def\O{\Omegaperatorname{Index} \omega} \def\O{\Omegaverline{\psiartial}_{(E',\lambda} \def\La{\Lambdaambdambda')}$
on $(\Sigmama, \psiartial \Sigmama)$.
Firstly, we note that the operator
$$
\omega} \def\O{\Omegaverline{\psiartial}_{(E'\vert_{D^2}, \lambda} \def\La{\Lambdaambdambda'\vert_{\psiartial D^2})}
~:~
W^{1,p}(D^2, \psiartial D^2;E'\vert_{D^2},\lambda} \def\La{\Lambdaambdambda') \tauo L^p(D^2;E'\vert_{D^2}\omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{D^2})
$$
is surjective. Each trivialization of $\lambda} \def\La{\Lambdaambdambda \tauo \psiartial D^2$
gives an identification:
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:isokernel}
\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{(E'\vert_{D^2}, \lambda} \def\La{\Lambdaambdambda'\vert_{\psiartial D^2})} \chiong
\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C} ^n, \psiartial D^2 \tauimes {\muathbb R} ^n)} \chiong
{\muathbb R} ^n,
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where $\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C} ^n, \psiartial D^2 \tauimes {\muathbb R} ^n)}$ is the space of solutions
$\xii : D^2 \tauo {\muathbb C} ^n$ of the Cauchy-Riemann equation with
boundary condition:
$$\omega} \def\O{\Omegaverline{\psiartial} \xii =0, \quad \xii (z) \in \lambda} \def\La{\Lambdaambdambda'_z \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv {\muathbb R}^n, \quad z \in \psiartial D^2.
$$
Thus the solution must be a real constant vector.
This implies that we have a canonical isomorphism in
(\rhoef{eq:isokernel}).
Then the argument in Subsection 8.1.1 \chiite{fooobook2} shows that
the orientation problem can be reduced to the orientation
on $\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{(E'\vert_{D^2}, \lambda} \def\La{\Lambdaambdambda'\vert_{\psiartial D^2})}$ and
$\omega} \def\O{\Omegaperatorname{Index} \omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C} P^1}}$.
The latter one has a complex orientation.
By taking a finite dimensional complex vector space
$W \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset L^p({\muathbb C} P^1;E'\vert_{{\muathbb C} P^1}\omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{{\muathbb C} P^1})$
such that
$$L^p({\muathbb C} P^1;E'\vert_{{\muathbb C} P^1}\omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{0,1}_{{\muathbb C} P^1})
= \omega} \def\O{\Omegaperatorname{Image} \omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C} P^1}}
+ W,
$$
a standard argument (see the paragraphs after Remark 8.1.3
in \chiite{fooobook2}, for example) shows
that the orientation problem on $\omega} \def\O{\Omegaperatorname{Index} \omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C} P^1}}$ is further reduced to
one on $\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C} P^1}}$
which is the space of holomorphic sections of $E'\vert_{{\muathbb C} P^1} \tauo {\muathbb C} P^1$,
denoted by $\omega} \def\O{\Omegaperatorname{Hol} ({\muathbb C} P^1; E'\vert_{{\muathbb C} P^1})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
We next describe how the orientation behaves
under the change of stable conjugacy classes of
relative spin structures.
Proposition \rhoef{prop:8.1.6} shows that the difference of relative spin structures is measured by
an element $\muathfrak x$ in $H^2(M,L;{{\muathbb Z}}_2)$.
We denote the simply transitive action of $H^2(M,L;{{\muathbb Z}}_2)$ on
$\omega} \def\O{\Omegaperatorname{Spin}(M,L)$ by
$$
(\muathfrak x, [(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]) \muapsto \muathfrak x \chidot [(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)].
$$
When we change the relative spin structure by $\muathfrak r
\in H^2(M,L;{{\muathbb Z}}_2)$, then
we find that the orientation on
the index of the operator $D_w\omega} \def\O{\Omegaverline{\psiartial}$
in (\rhoef{linearizedeq}) changes by $(-1)^{{\muathfrak x}[w]}$.
The following result is proved in Proposition 8.1.16 in \chiite{fooobook2} and also obtained
by Cho \chiite{Cho04}, Solomon \chiite{Sol}.
This proposition is used in Subsections
\rhoef{proof1.9} and \rhoef{subsec:Appl2}.
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{Proposition44.16}
The identity map
$$
{\muathbb C}M(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow {\muathbb C}M(J;\betaegin{equation}ta)^{\muathfrak x \chidot[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$
is orientation preserving if and only if $\muathfrak x [\betaegin{equation}ta]=0$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
For a diffeomorphism $\psisi : (M,L) \tauo (M',L')$
satisfying $\psisi(L)=L'$, we define the pull-back map
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{pullback}
\psisi^{\alphast} ~:~
\omega} \def\O{\Omegaperatorname{Spin}(M',L') \lambda} \def\La{\Lambdaongrightarrow
\omega} \def\O{\Omegaperatorname{Spin}(M,L)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
by $\psisi^{\alphast}[(V',\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma')] = [(\psisi^{\alphast}V', \psisi^{\alphast} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma')]$.
That is, we take a triangulation on $M'$ such that
$L'$ is its subcomplex and $\psisi : (M,L) \tauo (M',L')$ is
a simplicial map.
Then $\psisi^{\alphast}V'$ is a real vector bundle over $M_{[3]}$
and $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma'$ induces a spin structure on
$(TL \omega} \def\O{\Omegaplus \psisi^{\alphast}V')\vert_{L_{[2]}}$.
Then it is easy to see that
$$
\psisi_{\alphast} ~:~ {\muathbb C}M(\betaegin{equation}ta;M,L;J)^{\psisi^{\alphast}[(V',\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma')]}
\lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M(\psisi_{\alphast}\betaegin{equation}ta;M',L';\psisi_{\alphast}J)^{[(V',\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma')]}
$$
is orientation preserving.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{$\tauau$-relative spin structure and an example}
\lambda} \def\La{\Lambdaambdabel{subsec:taurelspin}
After these general results are prepared in the previous subsection,
we focus ourselves on the case $L= {\rhom Fix}~\tauau$, the fixed point set
of an anti-symplectic involution $\tauau$ of $M$.
We define the notion of $\tauau$-relative spin structure and
discuss its relationship with the orientation of the moduli space.
Note that for $\tauau$ such that $L = \tauext{Fix} ~\tauau$ is relative spin, $\tauau$
induces an involution $\tauau^*$ on the set of relative spin structures $(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$
by pull-back \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{pullback}.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{Definition44.17}
A $\tauau$-{\it relative spin structure} on $L$ is a relative spin
structure $(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$ on $L$ such that $\tauau^*(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$ is stably
conjugate to $(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$, i.e.,
$\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]=[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ in $\omega} \def\O{\Omegaperatorname{Spin}(M,L)$.
We say that $L$ is $\tauau$-{\it relatively spin} if it carries a
$\tauau$-relative spin structure, i.e., if the involution
$\tauau^*: \omega} \def\O{\Omegaperatorname{Spin}(M,L) \tauo \omega} \def\O{\Omegaperatorname{Spin}(M,L)$ has a
fixed point.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{exm}\lambda} \def\La{\Lambdaambdabel{Remark44.18}
If $L$ is spin, then it is $\tauau$-relatively spin: Obviously $\tauau$
preserves spin structure of $L$ since it is the identity on $L$. And we
may take $V$ needed in the definition of relative spin structure to
be the trivial vector bundle.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{exm}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{Remark44.20}
We would like to emphasize that a relative spin structure $(V, \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma ,st)$ satisfying
$\tauau^{\alphast} st = st$ is {\it not necessarily} a
$\tauau$-relative spin structure in the sense of Definition \rhoef{Definition44.17}.
See Proposition \rhoef{Proposition44.19} below.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Now we give an example of $L = \tauext{Fix }\tauau$ that is relatively spin but {\it
not} $\tauau$-relatively spin.
Consider $M = {\muathbb C} P^{n}$ with standard symplectic and complex
structures and $L = {\muathbb R} P^n \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^n$, the real point set. The real projective space ${\muathbb R} P^n$ is
oriented if and only if $n$ is odd. We take the tautological real
line bundle $\xii$ on ${\muathbb R} P^n$ such that the 1-st Stiefel-Whitney class $w_1(\xii):= x$
is a generator of $H^1({\muathbb R} P^n;{\muathbb Z}_2)$. Then we have
$$
T{\muathbb R} P^n \omega} \def\O{\Omegaplus {\muathbb R} \chiong \xii^{\omega} \def\O{\Omegaplus (n+1)}
$$
and so the total Stiefel-Whitney class is given by
$$
w(T{\muathbb R} P^n) = (1+x)^{n+1}.
$$
Therefore we have
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:w2}
w_2({\muathbb R} P^{2n+1})
= \betaegin{equation}gin{cases}
x^2 &\tauext{if $n$ is even,}
\\
0 &\tauext{if $n$ is odd.}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cases}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
From this, it follows that ${\muathbb R} P^{4n+3}$ ($n \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 0$) and ${\muathbb R} P^1$
are spin and hence are $\tauau$-relatively spin
by Example \rhoef{Remark44.18}.
On the other hand we prove:
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{Proposition44.19}
The real projective space ${\muathbb R} P^{4n+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^{4n+1}$ $(n
\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 1)$ is relatively spin but not $\tauau$-relatively spin.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
\betaegin{equation}gin{proof}
The homomorphism
$$
H^2({\muathbb C} P^{4n+1};{\muathbb Z}_2) \lambda} \def\La{\Lambdaongrightarrow H^2({\muathbb R} P^{4n+1};{\muathbb Z}_2)
$$
is an isomorphism. We can construct an isomorphism explicitly as
follows: Let $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta$ be the tautological complex line bundle on ${\muathbb C}
P^{4n+1}$ such that
$$
c_1(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta) = y \in H^2({\muathbb C} P^{4n+1};{\muathbb Z})
$$
is a generator. We can easily see that
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta\vert_{{\muathbb R} P^{4n+1}} = \xii \omega} \def\O{\Omegaplus \xii
$$
where $\xii$ is the real line bundle over ${\muathbb R} P^{4n+1}$ chosen as above.
Since $c_1(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta)$ is the Euler class which reduces to the second
Stiefel-Whitney class under ${\muathbb Z}_2$-reduction, $y \muapsto x^2$ under
the above isomorphism. But \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{eq:w2} shows that $x^2 = w_2({\muathbb R} P^{4n+1})$.
This proves that ${\muathbb R} P^{4n+1}$ is relatively spin: For $st$, we take $st = y$.
\psiar
Now we examine the relative spin structures of ${\muathbb R} P^{4n+1}$. It is easy to
check that $H^2({\muathbb C} P^{4n+1}, {\muathbb R} P^{4n+1};{\muathbb Z}_2) \chiong {\muathbb Z}_2$ and so there are
$2$ inequivalent relative spin structures by Proposition \rhoef{prop:8.1.6}.
Let $st = y$ and take
$$
V = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus 2n+1} \omega} \def\O{\Omegaplus {\muathbb R}
$$
for the vector bundle $V$ noting $w_2(V) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv c_1(V) = (2n+1)y = y = st \muod 2$.
\psiar
Next we have the isomorphism
$$
\tauilde{\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma} : T{\muathbb R} P^{4n+1} \omega} \def\O{\Omegaplus {\muathbb R}^2 \chiong \xii^{\omega} \def\O{\Omegaplus
(4n+2)} \omega} \def\O{\Omegaplus {\muathbb R} \chiong \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)}\vert_{{\muathbb R} P^{4n+1}}
\omega} \def\O{\Omegaplus {\muathbb R}
$$
and so it induces a trivialization of
$$
\alphaligned (T{\muathbb R} P^{4n+1} \omega} \def\O{\Omegaplus V) \omega} \def\O{\Omegaplus {\muathbb R}^2 & \chiong T{\muathbb R}
P^{4n+1} \omega} \def\O{\Omegaplus {\muathbb R}^2 \omega} \def\O{\Omegaplus V \\
& \chiong (\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)}\vert_{{\muathbb R} P^{4n+1}} \omega} \def\O{\Omegaplus {\muathbb R}) \omega} \def\O{\Omegaplus
(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)}\vert_{{\muathbb R} P^{4n+1}} \omega} \def\O{\Omegaplus {\muathbb R}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
We note that on a $2$ dimensional CW complex, any stable isomorphism
between two oriented real vector bundles $V_1$, $V_2$ induces a
stable trivialization of $V_1 \omega} \def\O{\Omegaplus V_2$.
In particular,
$(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)}\vert_{{\muathbb R} P^{4n+1}} \omega} \def\O{\Omegaplus {\muathbb R}) \omega} \def\O{\Omegaplus
(\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)}\vert_{{\muathbb R} P^{4n+1}} \omega} \def\O{\Omegaplus {\muathbb R})$ has a canonical
stable trivialization on the $2$-skeleton of ${\muathbb R} P^{4n+1}$,
which in turn
provides a spin structure on $T{\muathbb R} P^{4n+1} \omega} \def\O{\Omegaplus V$
denoted by $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$. This provides a relative spin structure on
${\muathbb R} P^{4n+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^{4n+1}$.
\psiar
Next we study the question on the $\tauau$-relatively spin property.
By the definition of the tautological line bundle $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta$ on ${\muathbb C} P^{4n+1}$,
the involution $\tauau$ lifts to an anti-complex linear isomorphism
of $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta$ which we denote
$$
c ~:~ \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta \lambda} \def\La{\Lambdaongrightarrow \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta.
$$
Then
$$
c^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus (-1) ~:~ \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus {\muathbb R}
\lambda} \def\La{\Lambdaongrightarrow
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus {\muathbb R}
$$
is an isomorphism which covers $\tauau$. Therefore
we may identify
$$
\tauau^*V = V = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus {\muathbb R}
$$
on ${\muathbb R} P^{4n+1}$ and also
$$
\tauau^* = c^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus (-1).
$$
Then we have
$$
\tauau^*(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma,st) = (V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma',st)
$$
where the spin structure $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma'$ corresponds to the isomorphism
$$
(c^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus (-1)) \chiirc \tauilde{\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma}.
$$
Therefore to complete the proof of Proposition
\rhoef{Proposition44.19} it suffices to
show that the restriction of $c^{\omega} \def\O{\Omegaplus (2n+1)} \omega} \def\O{\Omegaplus (-1)$
to $({\muathbb R} P^{4n+1})_{[2]}$ is not stably homotopic to the identity map as
a bundle isomorphism.
\psiar
Note that the $2$-skeleton $({\muathbb R} P^{4n+1})_{[2]}$ is ${\muathbb R} P^2$.
We have $\psii_1(SO(m)) \chiong {\muathbb Z}_2$ and $\psii_2(SO(m)) = 1$ (for $m > 2$).
Hence an oriented isomorphism of real vector bundles on $({\muathbb R} P^{4n+1})_{[2]}$ is
stably homotopic to identity if it is so on the $1$-skeleton $S^1 = ({\muathbb R} P^{4n+1})_{[1]}$.
\psiar
It is easy to see that $c \omega} \def\O{\Omegaplus c$ is homotopic to identity.
So it remains to consider $c \omega} \def\O{\Omegaplus -1 : \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta \omega} \def\O{\Omegaplus {\muathbb R} \tauo \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta \omega} \def\O{\Omegaplus {\muathbb R}$ on $S^1$.
Note that $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta\vert_{S^1} = \xii \omega} \def\O{\Omegaplus \xii$ and this bundle is trivial.
The splitting corresponds to the basis
$(\chios t/2,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t/2)$, $(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t/2,\chios t/2)$. (Here $t\in S^1 = {\muathbb R}/2\psii{\muathbb Z}$.)
The map $c$ is given by
$c = (1,-1) : \xii \omega} \def\O{\Omegaplus \xii \tauo \xii \omega} \def\O{\Omegaplus \xii$.
So when we identify $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta \omega} \def\O{\Omegaplus {\muathbb R} \chiong {\muathbb R}^3$ on $S^1$,
the isomorphism $c \omega} \def\O{\Omegaplus -1$ is represented by the matrix
$$\alphaligned
&\lambda} \def\La{\Lambdaeft(
\betaegin{equation}gin{matrix}
\chios t/2 & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t/2 & 0 \\
- \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t/2 & \chios t/2 & 0\\
0 & 0 & 1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{matrix}
\rhoight)
\lambda} \def\La{\Lambdaeft(
\betaegin{equation}gin{matrix}
1 & 0 & 0 \\
0 & - 1 & 0\\
0 & 0 & -1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{matrix}
\rhoight)
\lambda} \def\La{\Lambdaeft(
\betaegin{equation}gin{matrix}
\chios t/2 & -\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t/2 & 0 \\
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t/2 & \chios t/2 & 0\\
0 & 0 & 1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{matrix}
\rhoight)
\\
&=
\lambda} \def\La{\Lambdaeft(
\betaegin{equation}gin{matrix}
\chios t & -\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t & 0 \\
-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t & - \chios t & 0\\
0 & 0 & -1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{matrix}
\rhoight)
\\
&
=
\lambda} \def\La{\Lambdaeft(
\betaegin{equation}gin{matrix}
\chios t & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t & 0 \\
-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamain t & \chios t & 0\\
0 & 0 & 1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{matrix}
\rhoight)
\lambda} \def\La{\Lambdaeft(
\betaegin{equation}gin{matrix}
1 & 0 & 0 \\
0 & -1 & 0\\
0 & 0 & -1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{matrix}
\rhoight).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned$$
This loop represents the nontrivial homotopy class in $\psii_1(SO(3)) \chiong {\muathbb Z}_2$.
This proves that the involution
$\tauau^*: \tauext{Spin}({\muathbb C} P^{4n+1},{\muathbb R} P^{4n+1}) \tauo \tauext{Spin}({\muathbb C} P^{4n+1},{\muathbb R} P^{4n+1})$ is
non-trivial. Since $\tauext{Spin}({\muathbb C} P^{4n+1},{\muathbb R} P^{4n+1}) \chiong {\muathbb Z}_2$,
the proof of Proposition \rhoef{Proposition44.19} is complete.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Using the results in this section, we calculate Floer cohomology
of ${\muathbb R} P^{2n+1}$ over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$
(see (\rhoef{eq:nov0}) for the definition of $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$)
in Subsection \rhoef{subsec:Appl2}, which
provides an example of Floer cohomology that is {\it not}
isomorphic to the ordinary cohomology.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Orientations on
${\muathbb C}M_{k+1}^{\rhom {main}}(J;\betaegin{equation}ta;\vec{P})$
and ${\muathbb C}M_{k+1, m}^{\rhom {main}}(J; \betaegin{equation}ta; \vec{Q}, \vec{P})$
}
\lambda} \def\La{\Lambdaambdabel{subsec:orimain}
In this subsection we recall the definitions of the orientations
of ${\muathbb C}M_{k+1}^{\rhom {main}}(J;\betaegin{equation}ta;\vec{P})$ and ${\muathbb C}M_{k+1, m}^{\rhom {main}}(J; \betaegin{equation}ta; \vec{Q}, \vec{P})$
from Section 8.4 and Subsection 8.10.2 in \chiite{fooobook2}.
Here $L$ is not necessarily the fixed point set of an anti-symplectic
involution $\tauau$.
When we discuss the orientation problem, it suffices to
consider the regular parts of the moduli spaces.
See Remark \rhoef{rem:thmori} (2).
By Theorem \rhoef{thm:ori}, we have an orientation on $\varphiidetilde{{\muathbb C}M}^{\rhom reg}(J;\betaegin{equation}ta)$ for
each stable conjugacy class of relative spin structure.
Including marked points,
we define an orientation on ${\muathbb C}M^{\rhom reg}_{k+1,m}(J;\betaegin{equation}ta)$ by
$$
\alphaligned
& {\muathbb C}M^{\rhom reg}_{k+1,m}(J;\betaegin{equation}ta) \\
= & \lambda} \def\La{\Lambdaeft( \varphiidetilde{{\muathbb C}M}^{\rhom reg}(J;\betaegin{equation}ta) \tauimes \psiartial D^2_0
\tauimes D^2_1\tauimes \chidots \tauimes D^2_m \tauimes \psiartial D^2_{m+1} \tauimes \chidots \tauimes \psiartial D^2_{m+k} \rhoight)/
PSL(2:{\muathbb R}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
Here the sub-indices in $\psiartial D^2_0$ and $\psiartial D^2_{m+i}$ (resp. $D^2_j$) stand for
the positions of the marked points $z_0$ and $z_i$ (resp. $z_j^+$).
(In Subsection 8.10.2 in \chiite{fooobook2} we write the above space
as ${\muathbb C}M_{(1,k),m}(\betaegin{equation}ta)$.)
Strictly speaking, since the marked points are required to be distinct,
the left hand side above is not exactly equal to the right hand side but is
an open subset. However, when we discuss orientation problem,
we sometimes do not distinguish them when no confusion can occur.
In (\rhoef{withsimplicies}), (\rhoef{withP})
we define ${\muathbb C}M_{k+1}(J;\betaegin{equation}ta;\vec{P})$
and ${\muathbb C}M_{k+1, m}(J; \betaegin{equation}ta; \vec{Q}, \vec{P})$ by fiber products.
Now we equip the right hand sides in (\rhoef{withsimplicies}) and (\rhoef{withP})
with the fiber product orientations using
Convention 8.2.1 (3) \chiite{fooobook2}.
However, we do not use the fiber product orientation themselves as the orientations on
${\muathbb C}M_{k+1}(J;\betaegin{equation}ta;\vec{P})$
and ${\muathbb C}M_{k+1, m}(J; \betaegin{equation}ta; \vec{Q}, \vec{P})$,
but we use the following orientations twisted from the fiber product orientation:
We put $\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_i =n-\delta} \def\muathbb{D}{\muathbb{D}eltaim P_i$, $\delta} \def\muathbb{D}{\muathbb{D}eltaeg Q_j =2n-\delta} \def\muathbb{D}{\muathbb{D}eltaim Q_j$
for smooth singular simplicies $f_i : P_i \tauo L$ and
$g_j : Q_j \tauo M$.
\betaegin{equation}gin{defn}[Definition 8.4.1 \chiite{fooobook2}]\lambda} \def\La{\Lambdaambdabel{Definition8.4.1}
For given smooth singular simplicies $f_i : P_i \tauo L$,
we define an orientation on
$
{\muathbb C}M _{k +1}(J;\betaegin{equation}ta ;P_1 ,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_{k})$ by
$$
{\muathbb C}M _{k +1}(J;\betaegin{equation}ta ;P_1 ,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_{k})
:=
(-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon (P) }
{\muathbb C}M _{k +1}(J;\betaegin{equation}ta ) {}_{(ev_1 ,\lambda} \def\La{\Lambdadots ,ev_{k})}
\tauimes _{f_1 \tauimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \tauimes f_{k}}
\lambda} \def\La{\Lambdaeft(\psirod _{i=1}^{k} P_{i}\rhoight),
$$
where
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon (P) =
(n+1)\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum _{j=1}^{k -1}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum _{i=1}^{j} \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_i .
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{defn}[Definition 8.10.2 \chiite{fooobook2}]\lambda} \def\La{\Lambdaambdabel{Definition8.10.2}
For given smooth singular simplicies $f_i:P_i \tauo L$ in $L$ and $g_j:Q_j \tauo M$ in $M$, we define
$$
\alphaligned
& {{\muathbb C}M }_{k+1,m}(J;\betaegin{equation}ta ;
Q_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots , Q_{m}; P_1 , \delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_{k}) \\
:= &
(-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon (P,Q)}
{{\muathbb C}M }_{k+1, m}(J;\betaegin{equation}ta ) {}_{(ev_1^{+} , \delta} \def\muathbb{D}{\muathbb{D}eltaots ,ev_{m}^{+},ev_{1}, \delta} \def\muathbb{D}{\muathbb{D}eltaots ,ev_{k})}
\tauimes _{g_1 \tauimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \tauimes f_k}
\lambda} \def\La{\Lambdaeft(\psirod _{j=1}^{m} Q_{j} \tauimes \psirod_{i=1}^{k} P_i\rhoight),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
where
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{epsilonPQ}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon (P,Q) =
(n+1)\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum _{j=1}^{k-1}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum _{i=1}^{j} \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_i + \lambda} \def\La{\Lambdaeft( (k+1)(n+1) + 1 \rhoight) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{j=1}^{m} \delta} \def\muathbb{D}{\muathbb{D}eltaeg Q_j.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Replacing ${\muathbb C}M_{k+1}(J;\betaegin{equation}ta)$ and
${{\muathbb C}M }_{k+1, m}(J;\betaegin{equation}ta )$ on the right hand sides of above definitions
by
${\muathbb C}M_{k+1}^{\rhom main}(J;\betaegin{equation}ta)$ and
${{\muathbb C}M }_{k+1, m}^{\rhom main}(J;\betaegin{equation}ta )$ respectively,
we define orientations
on the main components
${\muathbb C}M _{k +1}^{\rhom main}(J;\betaegin{equation}ta ;P_1 ,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_{k})$ and
${{\muathbb C}M }_{k+1,m}^{\rhom main}(J;\betaegin{equation}ta ;
Q_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots , Q_{m}; P_1 , \delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_{k})$
in the same way.
When we do not consider the fiber product with
$g_j : Q_j \tauo M$,
we drop the second term in $(\rhoef{epsilonPQ})$.
Thus when $m=0$, the moduli space
in Definition \rhoef{Definition8.10.2} is nothing but
${{\muathbb C}M }_{k+1}(J;\betaegin{equation}ta ;P_1,\lambda} \def\La{\Lambdadots ,P_k)$ equipped with the orientation given by Definition \rhoef{Definition8.4.1}.
\psiar
When we study the map $\tauau^{\rhom main}_{\alphast}$
in (\rhoef{eq:taumain}),
we have to change the ordering of boundary marked points.
Later we use the following lemma which describes the behavior of orientations
under the change of ordering of boundary marked points:
\betaegin{equation}gin{lem}[Lemma 8.4.3 \chiite{fooobook2}]\lambda} \def\La{\Lambdaambdabel{Lemma8.4.3}
Let $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ be the transposition element $(i,i+1)$ in the $k$-th
symmetric group $\muathfrak S_{k}$.
$(i=1,\lambda} \def\La{\Lambdadots ,k -1)$.
Then the action of $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ on
the moduli space
${\muathbb C}M_{k +1}(J;\betaegin{equation}ta ;P_1 ,\lambda} \def\La{\Lambdadots ,P_i ,P_{i+1}, \lambda} \def\La{\Lambdadots ,P_{k})$
changing the order of marked points
induces an orientation preserving isomorphism
$$
\alphaligned
& \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma ~:~ {\muathbb C}M_{k +1}(J;\betaegin{equation}ta ;P_1 ,\lambda} \def\La{\Lambdadots ,P_i ,P_{i+1}, \lambda} \def\La{\Lambdadots ,P_{k}) \\
& \lambda} \def\La{\Lambdaongrightarrow (-1)^{(\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_i +1)(\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_{i+1}+1)}
{\muathbb C}M_{k +1}(J;\betaegin{equation}ta ;P_1 ,\lambda} \def\La{\Lambdadots ,P_{i+1},P_i ,\lambda} \def\La{\Lambdadots ,P_{k}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{The induced maps $\tauau_{\alphast}$ and $\tauau_{\alphast}^{\rhom main}$}
\lambda} \def\La{\Lambdaambdabel{sec:inducedtau}
Let $(M,\omega} \def\O{\Omegamega)$ be a compact, or tame, symplectic manifold and let $\tauau: M \tauo M$
be an anti-symplectic involution, i.e., a map satisfying $\tauau^2 = id$ and
$
\tauau^*(\omega} \def\O{\Omegamega) = -\omega} \def\O{\Omegamega.
$
We also assume that the fixed point set $L=\tauext{ Fix }\tauau$ is non-empty, oriented and compact.
\psiar
Let ${\muathbb C}J_{\omega} \def\O{\Omegamega}$ be the set of all $\omega} \def\O{\Omegamega$ compatible almost
complex structures
and ${\muathbb C}J_\omega} \def\O{\Omegamega^\tauau$ its subset consisting of
$\tauau$-anti-invariant almost complex structures $J$ satisfying
$
\tauau_* J = -J.
$
\betaegin{equation}gin{lem}[Lemma 11.3 \chiite{fooo00}. See also Proposition 1.1 \chiite{Wel}.]\lambda} \def\La{\Lambdaambdabel{Lemma38.3}
The space ${\muathbb C}J_{\omega} \def\O{\Omegamega}^\tauau$ is non-empty and
contractible. It becomes an infinite dimensional
(Fr\'echet) manifold.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof} For given $J\in {\muathbb C}J^\tauau_\omega} \def\O{\Omegamega$, its tangent space
$T_J{\muathbb C}J^\tauau_\omega} \def\O{\Omegamega$
consists of sections $Y$ of the bundle $\omega} \def\O{\Omegaperatorname{End}(TM)$ whose fiber at $p \in M$ is
the
space of linear maps $Y:T_pM \tauo T_pM$ such that
$$
Y J + J Y=0, \quad \omega} \def\O{\Omegamega(Y v,w) + \omega} \def\O{\Omegamega(v,Y w)=0,
\quad \tauau^*Y= - Y.
$$
Note that the second condition means that $JY$ is a symmetric endomorphism
with respect to the metric $g_J= \omega} \def\O{\Omegamega(\chidot, J\chidot)$.
It immediately follows that ${\muathbb C}J^\tauau_\omega} \def\O{\Omegamega$ becomes a manifold.
The fact that ${\muathbb C}J^\tauau_\omega} \def\O{\Omegamega$ is non-empty
(and contractible) follows from the polar decomposition theorem
by choosing a $\tauau$-invariant
Riemannian metric on $M$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{The map $\tauau_{\alphast}$ and orientation}
\lambda} \def\La{\Lambdaambdabel{subsec:tau}
We recall the definition of $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)=\psii_2(M,L)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ where the equivalence relation is defined by
$\betaegin{equation}ta \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim \betaegin{equation}ta' \in \psii_2(M,L)$ if and only if
$
\omega} \def\O{\Omegamega(\betaegin{equation}ta)=\omega} \def\O{\Omegamega(\betaegin{equation}ta')$
and
$
\muu_L(\betaegin{equation}ta) =\muu_L(\betaegin{equation}ta')$.
(See (\rhoef{eq:Pi}).)
We notice that
for each $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii (L)$, we defined the moduli space
${\muathbb C}M(J;\betaegin{equation}ta)$ as the union
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{remarkmoduli}
{\muathbb C}M(J;\betaegin{equation}ta)=
\betaigcup_{B \in \psii_2(M,L);[B]=\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)} {\muathbb C}M(J;B).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
We put $D^2 =\{ z \in {\muathbb C} ~\vert~ \vert z\vert \lambda} \def\La{\Lambdae 1 \}$
and $\omega} \def\O{\Omegaverline{z}$ denotes the complex conjugate.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:inducedtau}
Let $J \in {\muathbb C}J_{\omega} \def\O{\Omegamega}^\tauau$.
For $J$ holomorphic curves $w : (D^2,\psiartial D^2) \tauo (M,L)$ and
$u : S^2 \tauo M$,
we define $\varphiidetilde w$, $\varphiidetilde u$ by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{38.4}
\varphiidetilde w(z) = (\tauau\chiirc w)(\omega} \def\O{\Omegaverline z),
\qquad
\varphiidetilde u(z) = (\tauau\chiirc u)(\omega} \def\O{\Omegaverline z).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
For $(D^2,w) \in {\muathbb C}M^{\tauext{reg}}(J;{\betaegin{equation}ta})$,
$((D^2,\vec z,{\vec z}\,^+),w) \in {\muathbb C}M^{\tauext{reg}}_{k+1,m}(J;{\betaegin{equation}ta})$
we define
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{38.5}
\tauau_*((D^2,w)) = (D^2,\varphiidetilde w),
\qquad
\tauau_*(((D^2,\vec z,\vec z\,^+),w)) = ((D^2,\vec{\omega} \def\O{\Omegaverline z},
\vec{\omega} \def\O{\Omegaverline z}\,^{+}),\varphiidetilde w),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where
$$
\vec{\omega} \def\O{\Omegaverline z} = (\omega} \def\O{\Omegaverline z_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,\omega} \def\O{\Omegaverline z_k),
\qquad
\vec{\omega} \def\O{\Omegaverline z}\,^+ = (\omega} \def\O{\Omegaverline z^+_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,\omega} \def\O{\Omegaverline z^+_m).
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem:tau}
For $\betaegin{equation}ta = [w]$, we put $\tauau_*\betaegin{equation}ta=[\varphiidetilde{w}]$.
Note if $\tauau_{\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaharp} : \psii_2(M,L) \tauo \psii_2(M,L)$ is the
natural homomorphism induced by $\tauau$, then
$$\tauau_*\betaegin{equation}ta = -\tauau_{\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaharp}\betaegin{equation}ta.$$
This is because $z \muapsto \omega} \def\O{\Omegaverline z$ is of degree $-1$.
In fact, we have
$$
\tauau_*(\betaegin{equation}ta) = \betaegin{equation}ta
$$
in $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$,
since $\tauau_*$ preserves both the symplectic area and the Maslov index.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{Lemma38.6}
The definition
$(\rhoef{38.5})$ induces the maps
$$
\tauau_* : {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta) \tauo
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta),
\quad
\tauau_* : {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}_{k+1,m}(J;\betaegin{equation}ta) \tauo
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}_{k+1,m}(J;\betaegin{equation}ta),
$$
which satisfy
$\tauau_{\alphast} \chiirc \tauau_{\alphast} = \omega} \def\O{\Omegaperatorname{id}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
If $(w,(z_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,z_k),(z_1^+,\delta} \def\muathbb{D}{\muathbb{D}eltaots,z_m^+)) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim (w',(z'_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,z'_k),
(z_1^{\psirime +},\delta} \def\muathbb{D}{\muathbb{D}eltaots,z_m^{\psirime +}))$, we have
$\varphiphi \in PSL(2,\Bbb R) = \tauext{Aut}(D^2)$
such that $w' = w\chiirc \varphiphi^{-1}$, $z'_i = \varphiphi(z_i)$,
$z^{\psirime +}_i = \varphiphi(z^+_i)$ by definition.
We define $\omega} \def\O{\Omegaverline \varphiphi: D^2 \tauo D^2$ by
\betaegin{equation}gin{equation}
\lambda} \def\La{\Lambdaambdabel{eq:barPSL2R}
\omega} \def\O{\Omegaverline{\varphiphi}(z) = \omega} \def\O{\Omegaverline{(\varphiphi(\omega} \def\O{\Omegaverline z))}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Then $\omega} \def\O{\Omegaverline{\varphiphi} \in PSL(2,\Bbb R)$ and
$\varphiidetilde w' = \varphiidetilde w \chiirc \omega} \def\O{\Omegaverline{\varphiphi}^{-1}$,
$\omega} \def\O{\Omegaverline z'_i = \omega} \def\O{\Omegaverline{\varphiphi}(\omega} \def\O{\Omegaverline z_i)$,
$\omega} \def\O{\Omegaverline z^{\psirime +}_i = \omega} \def\O{\Omegaverline\varphiphi(\omega} \def\O{\Omegaverline z^+_i)$.
The property
$\tauau_{\alphast} \chiirc \tauau_{\alphast} = \omega} \def\O{\Omegaperatorname{id}$
is straightforward.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
We note that the mapping $\varphiphi \muapsto \omega} \def\O{\Omegaverline\varphiphi$, $PSL(2,{\muathbb R})
\tauo PSL(2,{\muathbb R})$ is orientation preserving.
We have the following
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{regtau}
The involution $\tauau_*$ is induced by an automorphism of
${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)$ as a space with Kuranishi structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
Proposition \rhoef{regtau} is a special case of Theorem \rhoef{Proposition38.11} (1).
The proof of Theorem \rhoef{Proposition38.11} (1)
is similar to the proof of Theorem \rhoef{Lemma38.14}
which will be proved in Section \rhoef{sec:Proofth}.
See Definition \rhoef{def:auto}
for the definition of an automorphism of a space with Kuranishi structure and
Definition \rhoef{def:oripres} for the definition for an automorphism to be {\it orientation preserving} in the sense of Kuranishi structure.
In this paper, we use the terminology {\it orientation preserving} only in the sense of Kuranishi structure.
We refer Section A1.3 \chiite{fooobook2}
for more detailed explanation of the group action on
a space with Kuranishi structure.
In Section \rhoef{sec:relspin}, we
explained that a choice of stable conjugacy class
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)] \in
\omega} \def\O{\Omegaperatorname{Spin} (M,L)$ of relative spin structure on $L$ induces an
orientation on
${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$ for any given $\betaegin{equation}ta\in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$. Hereafter we equip
${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$ with this orientation when we regard it as a space
with oriented Kuranishi structure.
We write it as ${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$ when we specify the
stable conjugacy class of relative spin structure.
\psiar
For an anti-symplectic involution $\tauau$ of $(M,\omega} \def\O{\Omegamega)$,
we have the pull back $\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ of
the stable conjugacy class of relative spin structure $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$.
See (\rhoef{pullback}).
Then from the definition of the map $\tauau_{\alphast}$ in Lemma \rhoef{Lemma38.6}
we obtain the maps
$$
\alphaligned
\tauau_* & : {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]},
\\
\tauau_* & : {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}_{k+1,m}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
Here we note that $\tauau_{\alphast}J =-J$
and we use the same $\tauau$-anti-symmetric
almost complex structure $J$ in both the source and the target
spaces of the map $\tauau_{\alphast}$.
If $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ is
$\tauau$-relatively spin (i.e.,
$\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]=[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$),
$\tauau_{\alphast}$ defines involutions
of ${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$ and ${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$ with Kuranishi structures.
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Proposition38.7}
Let $L$ be a fixed point set of an anti-symplectic involution $\tauau$ and
$J \in {\muathbb C}J^{\tauau}_{\omega} \def\O{\Omegamega}$.
Suppose that $L$ is oriented and carries a relative spin structure $(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$.
Then the map
$
\tauau_*: {{\muathbb C}M}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{{\muathbb C}M}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$
is orientation preserving if $\muu_L(\betaegin{equation}ta) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0
\muod 4$ and is orientation reversing if
$\muu_L(\betaegin{equation}ta) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 2
\muod 4$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{cor}\lambda} \def\La{\Lambdaambdabel{corProposition38.7}
Let $L$ be as in Theorem \rhoef{Proposition38.7}.
In addition, if $L$ carries a $\tauau$-relative spin structure
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$, then the map $\tauau_{\alphast} : {{\muathbb C}M}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{{\muathbb C}M}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$ is orientation preserving if $\muu_L(\betaegin{equation}ta) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0
\muod 4$ and is orientation reversing if
$\muu_L(\betaegin{equation}ta) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 2
\muod 4$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cor}
We prove Theorem \rhoef{Proposition38.7} in Section \rhoef{sec:Proofth}.
Here we give a couple of examples.
\betaegin{equation}gin{exm}\lambda} \def\La{\Lambdaambdabel{Example38.8.}
(1) Consider the case of $M={\muathbb C} P^n$, $L={\muathbb R} P^n$.
In this case, each Maslov
index $\muu_L(\betaegin{equation}ta)$ has the form
$$
\muu_L(\betaegin{equation}ta) = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_\betaegin{equation}ta (n+1)
$$
where $\betaegin{equation}ta = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_\betaegin{equation}ta$ times the generator.
We know that when $n$ is even $L$ is not orientable, and so we
consider only the case where $n$ is odd. On the other hand,
when $n$ is odd, $L$ is relatively spin.
Moreover, we have proved in Proposition \rhoef{Proposition44.19}
that
${\muathbb R} P^{4n+3}$ $(n\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0)$ is $\tauau$-relatively spin, (indeed, ${\muathbb R} P^{4n+3}$
is spin), but
${\muathbb R} P^{4n+1}$ $(n\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 1)$ is {\it not} $\tauau$-relatively spin.
Then using the above formula for the Maslov index,
we can conclude from
Theorem \rhoef{Proposition38.7} that the map
$\tauau_*: {{\muathbb C}M}^{\tauext{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{{\muathbb C}M}^{\tauext{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$ is always an orientation preserving involution
for any $\tauau$-relative spin structure $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ of ${\muathbb R} P^{4n+3}$.
\psiar
Of course, ${\muathbb R} P^1$ is spin and so $\tauau$-relatively spin.
The map $\tauau_*$ is an orientation preserving involution if $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_{\betaegin{equation}ta}$
is even, and an orientation reversing involution if $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_{\betaegin{equation}ta}$
is odd.
\psiar
(2) Let $M$ be a Calabi-Yau 3-fold and let $L \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset M$ be the set of real points
(i.e., the fixed point set of an anti-holomorphic involutive isometry).
In this case, $L$ is orientable (because it is a special
Lagrangian) and spin (because any orientable 3-manifold is spin).
Furthermore $\muu_L(\betaegin{equation}ta) = 0$ for any $\betaegin{equation}ta \in \psii_2(M,L)$.
Therefore Theorem \rhoef{Proposition38.7} implies that the map $
\tauau_*: {{\muathbb C}M}^{\tauext{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\tauo {{\muathbb C}M}^{\tauext{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$ is
orientation preserving for any $\tauau$-relative spin structure
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{exm}
\psiar
We next include marked points.
We consider the moduli space ${\muathbb C}M^{\tauext{reg}}_{k+1,m}(J;\betaegin{equation}ta)$.
\psiar
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{Lemma38.9}
The map
$\tauau_* : {\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \tauo
{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$ is orientation preserving if and only if $\muu_L(\betaegin{equation}ta)/2 + k + 1 + m$
is even.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
\betaegin{equation}gin{proof}
Assuming Theorem \rhoef{Proposition38.7}, we prove
Proposition \rhoef{Lemma38.9}.
Let us consider the diagram:
$$
\xiymatrix{
(S^1)^{k+1} \tauimes (D^2)^m \alphar[r]^c & (S^1)^{k+1}\tauimes (D^2)^m\\
((S^1)^{k+1} \tauimes (D^2)^m)_0 \alphar[u]^{\tauext{\psihi} \def\F{\Phiootnotesize inclusion}} \alphar[r]^c
\alphar[d] & ((S^1)^{k+1}\tauimes (D^2)^m)_0 \alphar[u]^{\tauext{\psihi} \def\F{\Phiootnotesize inclusion}} \alphar[d]
\alphar[u]^{\tauext{\psihi} \def\F{\Phiootnotesize inclusion}}\\
\varphiidetilde{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\alphar[r]^{\tauext{Prop \rhoef{Lemma38.9}}} \alphar[d]_{\muathfrak{forget}} &
\varphiidetilde{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\alphar[d]_{\muathfrak{forget}} \\
\varphiidetilde{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\alphar[r]^{\tauext{Thm \rhoef{Proposition38.7}}} & \varphiidetilde{\muathbb C}M^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
}
$$
\chienterline{\betaf Diagram \rhoef{sec:inducedtau}.1}
\psiar\muedskip
\nuoindent
Here $c$ is defined by
$$c(z_0, z_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_k,z_1^{+},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
z_m^{+}) =
(\omega} \def\O{\Omegaverline z_0, \omega} \def\O{\Omegaverline z_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots, \omega} \def\O{\Omegaverline z_k,
\omega} \def\O{\Omegaverline {z}_1^{+},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
\omega} \def\O{\Omegaverline {z}_m^{+})$$
and ${\muathfrak{forget}}$ are the forgetful maps of marked points. Here we denote by
$((S^1)^{k+1} \tauimes (D^2)^m)_0$ the set of all
$c(z_0, z_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_k,z_1^{+},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
z_m^{+})$ such that $z_i \nue z_j$, $z_i^+ \nue z_j^+$ for $i \nue j$.
\psiar
Proposition \rhoef{Lemma38.9} then follows from Theorem \rhoef{Proposition38.7} and the
fact that the ${\muathbb Z}_2$-action $\varphiphi \muapsto \omega} \def\O{\Omegaverline\varphiphi$
on $PSL(2,{\muathbb R})$ given by \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{eq:barPSL2R}
is orientation preserving.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
We next extend $\tauau_*$ to the compactification ${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$ of
${\muathbb C}M_{k+1,m}^{\rhom reg}(J;\betaegin{equation}ta)$
and define
a continuous map
$$
\tauau_* : {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}.
$$
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Proposition38.11}
\betaegin{equation}gin{enumerate}
\item The map
$\tauau_* : {\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$
extends to an automorphism
$\tauau_*$, denoted by the same symbol:
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{38.10}
\tauau_* : {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation} \\
between spaces with Kuranishi structures.
\item
It preserves orientation if and only if
$\muu_L(\betaegin{equation}ta)/2 + k + 1 + m$ is even.
In particular, if $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ is a $\tauau$-relative spin structure, it can be
regarded as an involution on the space
${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$ with Kuranishi structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{proof}
(1) The proof of (1) is given right after the proof of Theorem \rhoef{Lemma38.14}
in Section \rhoef{sec:Proofth}.
\psiar
(2) The statement follows from the corresponding
statement on ${\muathbb C}M_{k+1,m}^{\tauext{reg}}(J;\betaegin{equation}ta)$ in Proposition \rhoef{Lemma38.9}.
For $((\Sigmama, \vec{z}, \vec{z}\,^+), w) \in \muathcal{M}_{k+1,m}(J,\betaegin{equation}ta)$, we denote by
$$((\Sigmama_i,\vec{z}\,^{(i)}, \vec{z}\,^{+(i)}),w_{(i)}) \in
\muathcal{M}^{\omega} \def\O{\Omegaperatorname{reg}}_{k_i+1, m_i}(J,\betaegin{equation}ta_{(i)})$$ the irreducible disc components and by
$((\Sigmama_j^S, \vec{z}\,^{+(j)_S}),u_{(j)}) \in \muathcal{M}^{\omega} \def\O{\Omegaperatorname{sph}}_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_j}(\alphalpha)$ the irreducible sphere components.
\betaegin{equation}gin{itemize}
\item
By Proposition \rhoef{Lemma38.9}, we find that $\tauau_*$ respects the orientation of $\muathcal{M}^{\omega} \def\O{\Omegaperatorname{reg}}_{k_i+1, m_i}(J,\betaegin{equation}ta_{(i)})$
if and only if $\muu(\betaegin{equation}ta_{i})/2 + k_i + 1 + m_i$ is even.
\item
In the same way, we find that $\tauau_*$ respects the orientation of $ \muathcal{M}^{\omega} \def\O{\Omegaperatorname{sph}}_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_j}(\alphalpha)$ if and only if
$n + c_1(M)[\alphalpha ] + \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_j - 3$ is even.
\item
$m \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i m_i + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_j \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_j \muod 2$ and $k+1 \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i (k_i + 1) \muod 2$.
\item The number of interior nodes is equal to the number of sphere components,
since $\Sigmama$ is a bordered stable curve of genus $0$ such that $\psiartial \Sigmama$ is connected.
\item The involution $\tauau_*$ acts on the space of parameters for smoothing interior nodes with orientation preserving if and only if
the number of interior nodes is even.
\item
The fiber product is taken over either $L$ or $M$. The involution $\tauau$ respects the orientation of $M$ if and only if $n$ is even.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{itemize}
\psiar
Combining these with Lemma 8.2.3 (4) in \chiite{fooobook2}, we obtain that
$\tauau_*$ respects the orientation on $\muathcal{M}_{k+1,m}(J,\betaegin{equation}ta)$ if and only if $\muu(\betaegin{equation}ta)/2 + k+1 + m$ is even.
Hence we obtain the second statement of the theorem.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{The map $\tauau_{\alphast}^{\rhom main}$ and orientation}
\lambda} \def\La{\Lambdaambdabel{subsec:taumain}
We next restrict our maps to the main component of ${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$.
As we mentioned before,
we observe that the induced map $\tauau_* : {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)
\tauo {\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$ does {\it not} preserve the main component
for $k > 1$. On the other hand the assignment given by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{38.13}
\alphaligned
& \quad (w, \vec{z}, \vec{z}\,^+)=(w,(z_0, z_1, z_2,\delta} \def\muathbb{D}{\muathbb{D}eltaots, z_{k-1},z_k),(z_1^+, \delta} \def\muathbb{D}{\muathbb{D}eltaots,z_m^+)) \\
& \lambda} \def\La{\Lambdaongmapsto
(\varphiidetilde {w}, \vec{\omega} \def\O{\Omegaverline z}^{\rhom ~ rev}, \vec{\omega} \def\O{\Omegaverline z}\,^+)=(\varphiidetilde{w}, (\omega} \def\O{\Omegaverline{z}_0, \omega} \def\O{\Omegaverline{z}_k, \omega} \def\O{\Omegaverline{z}_{k-1},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
\omega} \def\O{\Omegaverline{z}_2,\omega} \def\O{\Omegaverline{z}_1),(\omega} \def\O{\Omegaverline{z}_1^+,
\delta} \def\muathbb{D}{\muathbb{D}eltaots,\omega} \def\O{\Omegaverline{z}_m^+))
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
respects the counter-clockwise cyclic order of $S^1 = \psiartial D^2$ and so
preserves the main component, where $\varphiidetilde{w}$ is as in
(\rhoef{38.4}).
Therefore we consider this map instead which we denote by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{taumain}
\tauau_*^{\omega} \def\O{\Omegaperatorname{main}} :
{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
We note that for $k=0,1$ we have
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{taumaink=0,1}
\tauau^{\rhom main}_{\alphast} = \tauau_{\alphast}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Lemma38.14}
The map $\tauau_*^{\omega} \def\O{\Omegaperatorname{main}}$ is induced by an automorphism
between the spaces with Kuranishi structures and satisfies $\tauau_*^{\omega} \def\O{\Omegaperatorname{main}}\chiirc
\tauau_*^{\omega} \def\O{\Omegaperatorname{main}} = \omega} \def\O{\Omegaperatorname{id}$. In particular, if $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ is
$\tauau$-relatively spin, it defines an involution of the space
$
{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$ with Kuranishi
structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
The proof will be given in Section \rhoef{sec:Proofth}.
\psiar
We now have the following commutative diagram:
$$
\xiymatrix{
(S^1)^{k+1}\tauimes (D^2)^m \alphar[r]^{c'} & (S^1)^{k+1}\tauimes (D^2)^m\\
((S^1)^{k+1}\tauimes (D^2)^m)_{00} \alphar[u]^{\tauext{\psihi} \def\F{\Phiootnotesize inclusion}}\alphar[r]^{c'} \alphar[d]
& ((S^1)^{k+1}\tauimes (D^2)^m)_{00} \alphar[u]^{\tauext{\psihi} \def\F{\Phiootnotesize inclusion}} \alphar[d]\\
\varphiidetilde{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\alphar[r]^{\tauau_*^{\omega} \def\O{\Omegaperatorname{main}}} \alphar[d]_{\muathfrak{forget}} &
\varphiidetilde{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\alphar[d]_{\muathfrak{forget}} \\
\varphiidetilde{\muathbb C}M(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\alphar[r]^{\tauau_*} & \varphiidetilde{\muathbb C}M(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
}
$$
\chienterline{\betaf Diagram \rhoef{sec:inducedtau}.2}
\psiar\muedskip
\nuoindent
Here $c'$ is defined by
$$c'(z_0, z_1 \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_k,
z_1^{+},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
z_m^{+}) =
(\omega} \def\O{\Omegaverline z_0, \omega} \def\O{\Omegaverline z_k,\delta} \def\muathbb{D}{\muathbb{D}eltaots, \omega} \def\O{\Omegaverline z_1,
\omega} \def\O{\Omegaverline{z}_1^{+},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
\omega} \def\O{\Omegaverline{z}_m^{+})$$
and ${\muathfrak{forget}}$ are the forgetful maps of marked points.
In the diagram, $((S^1)^{k+1} \tauimes (D^2)^m)_{00}$ is the open subset of
$(S^1)^{k+1} \tauimes (D^2)^m$ consisting of the points
such that all $z_i$'s and $z_j^+$'s are distinct respectively.
Let $\tauext{\rhom Rev}_k : L^{k+1} \tauo L^{k+1}$ be the map defined by
$$
\tauext{\rhom Rev}_k(x_0, x_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,x_k) = (x_0,x_k,\delta} \def\muathbb{D}{\muathbb{D}eltaots,x_1).
$$
It is easy to see that
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{38.15}
ev\chiirc \tauau_*^{\tauext{main}} = \tauext{\rhom Rev}_k \chiirc ev.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
We note again that $\tauext{\rhom Rev}_k = \omega} \def\O{\Omegaperatorname{id}$
and $\tauau_*^{\omega} \def\O{\Omegaperatorname{main}}= \tauau_*$ for $k=0,1$.
\psiar
Let $P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k$
be smooth singular simplices on $L$. By taking the fiber product and using
(\rhoef{38.13}), we obtain a map
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{38.16}
\tauau_*^{\omega} \def\O{\Omegaperatorname{main}} : {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta ;
P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\tauo {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J; \betaegin{equation}ta ;
P_k,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
which satisfies
$\tauau_*^{\omega} \def\O{\Omegaperatorname{main}} \chiirc \tauau_*^{\omega} \def\O{\Omegaperatorname{main}}
= \omega} \def\O{\Omegaperatorname{id}$.
We put
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{epsilonmain}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon = \psihi} \def\F{\Phirac{\muu_L(\betaegin{equation}ta)}{2} + k + 1 + m + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{1 \lambda} \def\La{\Lambdae i < j \lambda} \def\La{\Lambdae k}
\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_i\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_j.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Lemma38.17}
The map $(\rhoef{38.16})$ preserves orientation if $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon$ is even, and
reverses orientation if $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon$ is odd.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
The proof of Theorem \rhoef{Lemma38.17} is given in Section \rhoef{sec:Proofth}.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{Proofs of Theorems \rhoef{Proposition38.7},
\rhoef{Proposition38.11} (1), \rhoef{Lemma38.14} and \rhoef{Lemma38.17}}
\lambda} \def\La{\Lambdaambdabel{sec:Proofth}
In this section we prove Theorems \rhoef{Proposition38.7}
(= Theorem \rhoef{thm:fund}), \rhoef{Proposition38.11} (1), \rhoef{Lemma38.14} and
\rhoef{Lemma38.17}
(= Theorem \rhoef{withsimplex}) stated in the previous sections.
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Proposition38.7}.]
Pick
$J \in {\muathbb C}J_\omega} \def\O{\Omegamega^\tauau$, a $\tauau$-anti-invariant almost complex structure compatible with $\omega} \def\O{\Omegamega$.
For a $J$ holomorphic curve $w :
(D^2,\psiartial D^2) \tauo (M,L)$, we recall that we define $\varphiidetilde w$ by
$$
\varphiidetilde w(z) = (\tauau\chiirc w)(\omega} \def\O{\Omegaverline z).
$$
Moreover
for $(D^2,w) \in {\muathbb C}M^{\tauext{reg}}(J;{\betaegin{equation}ta})$,
$((D^2,\vec z,{\vec z}\,^+),w) \in {\muathbb C}M^{\tauext{reg}}_{k+1,m}(J;{\betaegin{equation}ta})$
we define
$$
\tauau_*((D^2,w)) = (D^2,\varphiidetilde w),
\qquad
\tauau_*(((D^2,\vec z,\vec z\,^+),w)) = ((D^2,\vec{\omega} \def\O{\Omegaverline z},
\vec{\omega} \def\O{\Omegaverline z}\,^{+}),\varphiidetilde w),
$$
where
$$
\vec{\omega} \def\O{\Omegaverline z} = (\omega} \def\O{\Omegaverline z_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,\omega} \def\O{\Omegaverline z_k),
\qquad
\vec{\omega} \def\O{\Omegaverline z}\,^+ = (\omega} \def\O{\Omegaverline z^+_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,\omega} \def\O{\Omegaverline z^+_m).
$$
Let $[D^2,w]\in {\muathbb C}M^{\rhom reg}(J;\betaegin{equation}ta)$. We consider the
deformation complex
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{47.6.1}
D_w\omega} \def\O{\Omegaverline\psiartial: \Gamma(D^2,\psiartial D^2: w^*TM,
w\vert_{\psiartial D^2}^*TL) \lambda} \def\La{\Lambdaongrightarrow \Gamma(D^2;\muathbb{L}ambdambda^{0,1}\omega} \def\O{\Omegatimes w^*TM)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
and
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{47.6.2}
D_{\varphiidetilde w}\omega} \def\O{\Omegaverline\psiartial: \Gamma(D^2,\psiartial D^2: {\varphiidetilde w}^*TM,
{\varphiidetilde w}\vert_{\psiartial D^2}^*TL) \lambda} \def\La{\Lambdaongrightarrow \Gamma(D^2;\muathbb{L}ambdambda^{0,1}\omega} \def\O{\Omegatimes {\varphiidetilde w}^*TM),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where $D_{w}\omega} \def\O{\Omegaverline{\psiartial}$ is the linearized operator
of pseudo-holomorphic curve equation as (\rhoef{linearizedeq}).
(Here and hereafter, $\muathbb{L}ambdambda^1 = \muathbb{L}ambdambda^{1,0} \omega} \def\O{\Omegaplus \muathbb{L}ambdambda^{0,1}$ is the decomposition of the
complexified cotangent bundle of the {\it domain} of
pseudo-holomorphic curves.)
\psiar
We have the commutative diagram
\vskip0.2cm
$$
\xiymatrix{
(w^*TM, w\vert_{\psiartial D^2}^*TL) \alphar[r]^{T\tauau} \alphar[d] &
(\varphiidetilde w^*TM, \varphiidetilde w\vert_{\psiartial D^2}^*TL) \alphar[d]\\
(D^2, \psiartial D^2) \alphar[r]^c & (D^2, \psiartial D^2)
}
$$
\chienterline{\betaf Diagram \rhoef{sec:Proofth}.1}
\psiar\betaigskip
\nuoindent
where $c(z) = \omega} \def\O{\Omegaverline z$ and we denote by $T\tauau$ the differential of $\tauau$.
It induces a bundle map
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{47.7}
\omega} \def\O{\Omegaperatorname{Hom}_{{\muathbb R}}(TD^2,w^*TM) \lambda} \def\La{\Lambdaongrightarrow
\omega} \def\O{\Omegaperatorname{Hom}_{{\muathbb R}}(TD^2,{\varphiidetilde w}^*TM),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
which covers $z \muapsto \omega} \def\O{\Omegaverline z$. The bundle map
(\rhoef{47.7}) is fiberwise anti-complex linear i.e.,
$$
\omega} \def\O{\Omegaperatorname{Hom}_{{\muathbb R}}(T_zD^2,T_{w(z)}M) \lambda} \def\La{\Lambdaongrightarrow
\omega} \def\O{\Omegaperatorname{Hom}_{{\muathbb R}}(T_{\omega} \def\O{\Omegaverline z} D^2,T_{\tauau(w(z))}M),
$$
is anti-complex linear at each $z \in D^2$
with respect to both of the complex structures
$a \muapsto J \chiirc a$
and
$a \muapsto a\chiirc j$.
Therefore it preserves the decomposition
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{47.8}
\omega} \def\O{\Omegaperatorname{Hom}_{{\muathbb R}}(TD^2,w^*TM) \omega} \def\O{\Omegatimes {\muathbb C} = (\muathbb{L}ambdambda^{1,0} \omega} \def\O{\Omegatimes w^*TM)
\omega} \def\O{\Omegaplus (\muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes w^*TM),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
since (\rhoef{47.8}) is the decomposition to the complex and anti-complex
linear parts. Hence we obtain a map
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{Ttau1}
(T_{w,1}\tauau)_* : \Gamma(D^2;\muathbb{L}ambdambda^{0,1}\omega} \def\O{\Omegatimes w^*TM)
\lambda} \def\La{\Lambdaongrightarrow \Gamma(D^2;\muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes {\varphiidetilde w}^*TM)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
which is anti-complex linear. In the similar way, we obtain an anti-complex linear map:
$$
(T_{w,0}\tauau)_* : \Gamma(D^2,\psiartial D^2: w^*TM,
w\vert_{\psiartial D^2}^*TL) \lambda} \def\La{\Lambdaongrightarrow \Gamma(D^2,\psiartial D^2: {\varphiidetilde w}^*TM,
{\varphiidetilde w}\vert_{\psiartial D^2}^*TL).
$$
Since $\tauau$ is an isometry, it commutes with the
covariant derivative. This gives rise to the following commutative diagram.
$$
\xiymatrix{
\Gamma(D^2,\psiartial D^2: w^*TM,
w\vert_{\psiartial D^2}^*TL) \alphar[d]_{(T_{w,0}\tauau)_*} \alphar[r]^-{D_w\omega} \def\O{\Omegaverline\psiartial}
& \Gamma(D^2;\muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes w^*TM) \alphar[d]_{(T_{w,1}\tauau)_*}\\
\Gamma(D^2,\psiartial D^2: {\varphiidetilde w}^*TM,
{\varphiidetilde w}\vert_{\psiartial D^2}^*TL) \alphar[r]^-{D_{\varphiidetilde w}\omega} \def\O{\Omegaverline\psiartial}
& \Gamma(D^2;\muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes {\varphiidetilde w}^*TM)
}
$$
\vskip0.2cm
\chienterline{\betaf Diagram \rhoef{sec:Proofth}.2}
\psiar\muedskip
We study the orientation. Let $w \in \varphiidetilde{\muathbb C}M^{\rhom reg} (J;\betaegin{equation}ta)$ and
consider $\varphiidetilde w \in \varphiidetilde{\muathbb C}M^{\rhom reg} (J;\betaegin{equation}ta)$.
We consider the commutative Diagram \rhoef{sec:Proofth}.1.
A trivialization
$$
\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi: (w^*TM, w\vert_{\psiartial D^2}^*TL) \lambda} \def\La{\Lambdaongrightarrow (D^2, \psiartial D^2; {\muathbb C}^n, \muathbb{L}ambdambda)
$$
naturally induces a trivialization
$$
\varphiidetilde \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi: (\varphiidetilde w^*TM, \varphiidetilde w\vert_{\psiartial D^2}^*TL)
\lambda} \def\La{\Lambdaongrightarrow (D^2, \psiartial D^2; {{\muathbb C}}^n, \varphiidetilde \muathbb{L}ambdambda),
$$
where $\muathbb{L}ambdambda: S^1 \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaimeq \psiartial D^2 \tauo \muathbb{L}ambdambda ({\muathbb C}^n)$
is a loop of Lagrangian subspaces given by
$\muathbb{L}ambdambda(z) : = T_{w(z)}L$ in the trivialization
and $\varphiidetilde \muathbb{L}ambdambda$ is defined by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{widetildeLambda}
\varphiidetilde \muathbb{L}ambdambda(z) = \omega} \def\O{\Omegaverline{\muathbb{L}ambdambda(\omega} \def\O{\Omegaverline z)}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
With respect to these trivializations, we have the commutative diagram
$$
\xiymatrix{
(D^2,\psiartial D^2; {\muathbb C}^n, \muathbb{L}ambdambda) \quad
\alphar[r]^{\varphiidetilde \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi\chiirc T\tauau
\chiirc \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi^{-1}} \alphar[d] & \quad
(D^2,\psiartial D^2; {{\muathbb C}}^n, \varphiidetilde\muathbb{L}ambdambda) \alphar[d] \\
(D^2, \psiartial D^2) \alphar[r]^c & (D^2, \psiartial D^2)
}
$$
\vskip0.2cm
\chienterline{\betaf Diagram \rhoef{sec:Proofth}.3}
\psiar\betaigskip
\nuoindent and the elliptic complex (\rhoef{47.6.1}) and (\rhoef{47.6.2}) are identified with
$\omega} \def\O{\Omegaverline{\psiartial}_{(D^2,\psiartial D^2; {\muathbb C}^n, \muathbb{L}ambdambda)}$ and
$\omega} \def\O{\Omegaverline{\psiartial}_{(D^2,\psiartial D^2; {{\muathbb C}}^n, \varphiidetilde{\muathbb{L}ambdambda})}$, respectively.
The relative spin structure $\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ (resp. $[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$) determines a trivialization $\muathbb{L}ambdambda \chiong \psiartial D^2 \tauimes {\muathbb R}^n$
(resp. $\varphiidetilde{\muathbb{L}ambdambda} \chiong \psiartial D^2 \tauimes {\muathbb R}^n$) unique up to homotopy.
These trivializations are compatible with $\varphiidetilde \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi \chiirc T\tauau \chiirc \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi^{-1}$ in Diagram 5.3.
We recall the argument explained in Remark \rhoef{rem:prop8.1.4}.
We have the complex vector bundle $E'$ over the nodal curve
$\Sigmama = D^2 \chiup {\muathbb C} P^1$ with a nodal point $D^2 \nui O=p \in {\muathbb C} P^1$.
The topology of the bundle $E'\vert_{{\muathbb C} P^1} \tauo {\muathbb C} P^1$ is determined by
the loop $\muathbb{L}ambdambda$ of Lagrangian subspaces
defined by $\muathbb{L}ambdambda(z) = T_{w(z)}L$ in the trivialization.
The Cauchy-Riemann operator
$\omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C} ^n, \psiartial D^2 \tauimes {\muathbb R} ^n)}$
is surjective and
the Cauchy-Riemann operator
$$
\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C} P^1}}: \Gamma ({{\muathbb C} P^1}; E'\vert _{{\muathbb C} P^1}) \tauo \Gamma ({{\muathbb C} P^1}; \muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes E'\vert_{{\muathbb C} P^1})
$$
is
approximated by a finite dimensional model which is $ 0{\tauext{-map}} : H^0({{\muathbb C} P^1}; E'\vert_{{\muathbb C} P^1}) \tauo H^1({{\muathbb C} P^1}; E'\vert_{{\muathbb C} P^1})$,
where $H^1({{\muathbb C} P^1}; E'\vert_{{\muathbb C} P^1})$
is regarded as the obstruction bundle.
For a later purpose, we take a {\it stabilization} of this finite dimensional model so that the evaluation at $p$ is surjective to $E'_p$.
Namely, we take a finite dimensional complex linear subspace
${\muathbb V}^+ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Gamma ({\muathbb C}P^1; \muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes E'\vert_{{\muathbb C}P^1})$ such that
the Cauchy-Riemann operator $\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}}$ is surjective modulo ${\muathbb V}^+$ and
the evaluation at $p$ $ev_p:(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V}^+) \tauo E'\vert_p$ is surjective.
Set $\muathbb V = {\muathbb V}^+ \chiap \tauext{\rhom Im~} \omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Gamma ({\muathbb C}P^1; \muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes E'\vert_{{\muathbb C}P^1})$.
Then we have an isomorphism ${\muathbb V}^+ \chiong {\muathbb V} \omega} \def\O{\Omegaplus H^1({{\muathbb C} P^1}; E'\vert_{{\muathbb C} P^1})$ and
$(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V}^+) =(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})$.
The Cauchy-Riemann operator $\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}}$ has a finite dimensional approximation by
$$s:(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V}) \tauo {\muathbb V} \omega} \def\O{\Omegaplus H^1({{\muathbb C} P^1}; E'\vert_{{\muathbb C} P^1}),
\ \ s(\xii)=(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}}(\xii), 0).$$
We use the notation in Convention 8.2.1 (3) (4) in \chiite{fooobook2}
to describe
the kernel of the operator $\omega} \def\O{\Omegaverline{\psiartial}_{(E', \lambda} \def\La{\Lambdaambdambda')}$ as the zero set of $s$ in
the fiber product of the kernel of $\omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C} ^n, \psiartial D^2 \tauimes {\muathbb R} ^n)}$ and
$(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})$.
We decompose it as follows.
$$
(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})= E'_p \tauimes ~^{\chiirc}
(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V}).
$$
Here $~^{\chiirc} (\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})$ is the space of sections
in $(\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})$, which
vanish at $p$. (See (8.2.1.6) in \chiite{fooobook2} for the notation used here.)
Since $\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C} ^n, \psiartial D^2 \tauimes {\muathbb R} ^n)}
\chiong {\muathbb R} ^{n}$ by (\rhoef{eq:isokernel}), the complex conjugate induces
the trivial action on
$\omega} \def\O{\Omegaperatorname{ker} \omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C} ^n, \psiartial D^2 \tauimes {\muathbb R} ^n)}$.
Therefore $(T\tauau)_*:\delta} \def\muathbb{D}{\muathbb{D}eltaet D_w \omega} \def\O{\Omegaverline{\psiartial} \tauo \delta} \def\muathbb{D}{\muathbb{D}eltaet D_{\varphiidetilde w} \omega} \def\O{\Omegaverline{\psiartial}$ is orientation preserving or not if and only if so is the complex conjugation action on
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet({\muathbb V} \omega} \def\O{\Omegaplus H^1({{\muathbb C} P^1};E'\vert_{{\muathbb C} P^1}))^* \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaet ~^{\chiirc}\betaigl((\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})\betaigr),
$$
which is isomorphic to
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet({\muathbb V} \omega} \def\O{\Omegaplus H^1({{\muathbb C} P^1};E'\vert_{{\muathbb C} P^1}))^* \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaet (E'\vert_p^*) \omega} \def\O{\Omegatimes
\delta} \def\muathbb{D}{\muathbb{D}eltaet \betaigl((\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})\betaigr).
$$
On the other hand, we observe that
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet({\muathbb V} \omega} \def\O{\Omegaplus H^1({{\muathbb C} P^1};E'\vert_{{\muathbb C} P^1}))^* \omega} \def\O{\Omegatimes
\delta} \def\muathbb{D}{\muathbb{D}eltaet \betaigl((\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})\betaigr)
$$
is isomorphic to
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet (H^1({{\muathbb C} P^1};E'\vert_{{\muathbb C} P^1}))^* \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaet \tauext H^0({\muathbb C} P^1;E'\vert_{{\muathbb C} P^1}),
$$
on which the complex conjugation acts by
the multiplication by $(-1)^{\muu(\muathbb{L}ambdambda)/2 + n}$,
since
$$\delta} \def\muathbb{D}{\muathbb{D}eltaim_{\muathbb C} H^0({\muathbb C} P^1;E'\vert_{{\muathbb C} P^1}) - \delta} \def\muathbb{D}{\muathbb{D}eltaim_{\muathbb C} H^1({{\muathbb C} P^1};E'\vert_{{\muathbb C} P^1}) = \psihi} \def\F{\Phirac12 \muu (\muathbb{L}ambdambda)+ n .$$
Here $n$ is the rank of $E'$ as a complex vector bundle.
Note also that the complex conjugation acts on $E'\vert_p$ by the multiplication by $(-1)^n$.
Combining these, we find that the complex conjugation acts on
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet({\muathbb V} \omega} \def\O{\Omegaplus H^1({{\muathbb C} P^1};E'\vert_{{\muathbb C} P^1}))^* \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaet ~^{\chiirc}\betaigl((\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})\betaigr),
$$
by the multiplication by $(-1)^{\muu(\muathbb{L}ambdambda)/2 }$.
Note that the action of $T_{w} \tauau$ on the determinant bundle of $D_w\omega} \def\O{\Omegaverline{\psiartial}$ is isomorphic to
the conjugation action explained above.
Therefore this map is orientation preserving if and only
if $\psihi} \def\F{\Phirac1{2}\muu(\muathbb{L}ambdambda) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod 2$, i.e., $\muu(\muathbb{L}ambdambda) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0
\muod 4$. We note, by definition,
that $\muu_L(\betaegin{equation}ta)=\muu (\muathbb{L}ambdambda)$.
This finishes the proof of Theorem \rhoef{Proposition38.7}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
The argument above is an adaptation of Lemma 8.3.2 (4) in \chiite{fooobook2}
with $X_1=\tauext{\rhom index}~ \omega} \def\O{\Omegaverline{\psiartial}_{(D^2 \tauimes {\muathbb C}^n, \psiartial D^2 \tauimes {\muathbb R}^n)}$,
$X_2=\tauext{\rhom index}~ (\omega} \def\O{\Omegaverline{\psiartial}_{E'\vert_{{\muathbb C}P^1}})^{-1}({\muathbb V})$ and $Y=E'\vert_p$.
The complex conjugation action on $X_1$ (resp. $X_2$, $Y$) is a $+1$ (resp. $(-1)^{\muu(\muathbb{L}ambdambda)/2+n}$, $(-1)^n$)-oriented
isomorphism. Hence the action on $X_1 \tauimes_Y X_2$ is a $(-1)^{\muu(\muathbb{L}ambdambda)/2}$-oriented isomoprhism.
\psiar\muedskip
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Lemma38.14}]
We will extend the map $\tauau_{\alphast}^{\rhom main}$
(See (\rhoef{taumain}).) to
an automorphism of Kuranishi structure by a triple induction over
$\omega} \def\O{\Omegamega(\betaegin{equation}ta)= \int_{\betaegin{equation}ta}\omega} \def\O{\Omegamega$, $k$ and $m$.
Namely we define an order on the set of triples
$(\betaegin{equation}ta, k, m)$ by the relation
\betaegin{equation}gin{subequations}\lambda} \def\La{\Lambdaambdabel{tripleorder}
\betaegin{equation}gin{equation}
\omega} \def\O{\Omegamega(\betaegin{equation}ta') < \omega} \def\O{\Omegamega(\betaegin{equation}ta).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\betaegin{equation}gin{equation}
\omega} \def\O{\Omegamega(\betaegin{equation}ta') = \omega} \def\O{\Omegamega(\betaegin{equation}ta), \quad k' < k.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\betaegin{equation}gin{equation}
\omega} \def\O{\Omegamega(\betaegin{equation}ta') = \omega} \def\O{\Omegamega(\betaegin{equation}ta), \quad k' = k, \quad m' <m.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{subequations}
We will define the extension of (\rhoef{taumain}) for $(\betaegin{equation}ta, k, m)$ to
an automorphism of Kuranishi structure
under the assumption that such extension is already defined for
all $(\betaegin{equation}ta',k',m')$ smaller than $(\betaegin{equation}ta, k, m)$ with respect to this order.
Firstly, we consider the case that the domain is irreducible, i.e.,
a pseudo-holomorphic map $w:(D^2, \psiartial D^2) \tauo (M,L)$.
Let $((D^2,\vec z,\vec z\,^+),w)$ be an element of
${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$.
We consider $((D^2,\vec z\,',\vec z\,^{+\psirime}),w')$ where
$(D^2,\vec z\,',\vec z\,^{+\psirime})$ is close to $(D^2,\vec z,\vec z\,^+)$ in the
moduli space of discs with $k+1$ boundary and $m$ interior marked points.
We use local trivialization of this moduli space
to take a diffeomorphism $(D^2,\vec z) \chiong (D^2,\vec z\,')$.
(In case $2m + k <2$ we take additional interior marked points
to stabilize the domain. See \chiite{FO} appendix and
\chiite{foootech} Definition 18.9.)
We assume $w' : (D^2,\psiartial D^2) \tauo (M,L)$ is
$C^1$-close to $w$, using this identification.
To define a Kuranishi chart in a neighborhood of $[((D^2,\vec z,\vec z\,^+),w)]$ we
take a family of finite dimensional subspaces
$E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')$ of $\Gamma(D^2;\muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes (w')^*TM)$
such that
$$
\omega} \def\O{\Omegaperatorname{Im}D_{w'}\omega} \def\O{\Omegaverline\psiartial + E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')
= \Gamma(D^2;\muathbb{L}ambdambda^{0,1}\omega} \def\O{\Omegatimes (w')^*TM).
$$
The Kuranishi neighborhood $V_{[((D^2,\vec z,\vec z\,^+),w)]}$ is
constructed in Section 7.1 in \chiite{fooobook2} and
is given by the set of solutions of the
equation
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{4-16}
\omega} \def\O{\Omegaverline\psiartial w' \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w').
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Moreover we will take it so that the evaluation maps
$$
V_{[((D^2,\vec z,\vec z\,^+),w)]} \tauo L^{k+1}
$$
defined by $((D^2,\vec z\,',\vec z\,^{+\psirime}),w')
\muapsto (w'(z'_1),\delta} \def\muathbb{D}{\muathbb{D}eltaots,w'(z'_k),w'(z'_0))$ is a submersion.
\psiar
We choose $E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')$ so that it is invariant under $\tauau^{\rhom main}$,
in the following sense.
We define $\varphiidetilde w$, $\varphiidetilde w'$, and $\vec{\omega} \def\O{\Omegaverline z}\,^+$,
$\vec{\omega} \def\O{\Omegaverline z}\,^{+\psirime}$ as in
(\rhoef{38.5}).
We also define $\vec{\omega} \def\O{\Omegaverline z}^{\rhom ~rev}$,
$\vec{\omega} \def\O{\Omegaverline z}\,^{\psirime {\rhom ~rev}}$ as in (\rhoef{38.13}).
(So $\tauau^{\rhom main}_*([((D^2,\vec z,\vec z\,^+),w)])
= [((D^2,\vec{\omega} \def\O{\Omegaverline z}^{\rhom ~rev},\vec{\omega} \def\O{\Omegaverline z}\,^+),\varphiidetilde w)]$.)
Then we require:
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{4-17}
\alphaligned
&E_{[((D^2,\vec{\omega} \def\O{\Omegaverline z}^{\rhom ~rev},\vec{\omega} \def\O{\Omegaverline z}\,^+),\varphiidetilde w)]}
([((D^2,\vec{\omega} \def\O{\Omegaverline z}\,^{\psirime \rhom rev},\vec{\omega} \def\O{\Omegaverline z}\,^{+\psirime}),\varphiidetilde w')])
\\
&= (T_{w',1}\tauau)_*(E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here $(T_{w',1}\tauau)_*$ is as in (\rhoef{Ttau1}).
If (\rhoef{4-16}) is satisfied then
it is easy to see the following.
\betaegin{equation}gin{enumerate}
\item[(*)]
If $w'$ satisfies
(\rhoef{4-16}) then
$
\omega} \def\O{\Omegaverline\psiartial \varphiidetilde w' \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod E_{[((D^2,\vec{\omega} \def\O{\Omegaverline z}^{\rhom ~rev},\vec{\omega} \def\O{\Omegaverline z}\,^+),\varphiidetilde w)]}.
$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
$(*)$ implies that the map $[(D^2,\vec z\,',\vec z\,^{+\psirime}),w')] \muapsto
[((D^2,\vec{\omega} \def\O{\Omegaverline z}\,^{\psirime \rhom ~rev},\vec{\omega} \def\O{\Omegaverline z}\,^{+\psirime}),\varphiidetilde w')]$ defines a diffeomorphism
between Kuranishi neighborhoods of
$[((D^2,\vec z,\vec z\,^+),w)]$ and of
$[((D^2,\vec{\omega} \def\O{\Omegaverline z}^{\rhom ~rev},\vec{\omega} \def\O{\Omegaverline z}\,^+),\varphiidetilde w)]$.
Moreover the Kuranishi map
$
[(D^2,\vec z\,',\vec z\,^{+\psirime}),w')] \muapsto s([(D^2,\vec z\,',\vec z\,^{+\psirime}),w')]) = \omega} \def\O{\Omegaverline\psiartial w'
$
commutes with $\tauau^{\rhom main}_*$.
Therefore, $\tauau^{\rhom main}_*$ induces an isomorphism of
our Kuranishi structure on
${\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)$.
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
In order to find $E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')$
satisfying (\rhoef{4-17}) in addition, we proceed as follows.
We first recall briefly the way how we defined it in \chiite{FO} appendix, \chiite{fooobook2} pages 423-424,
\chiite{foootech} Section 18.
We take a sufficiently dense subset $\{((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a}) \muid \psihi} \def\F{\Phirak a \in \psihi} \def\F{\Phirak A\}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta)$.
We choose
$$
E_{\psihi} \def\F{\Phirak a}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset
\Gamma(D^2;\muathbb{L}ambdambda^{0,1}\omega} \def\O{\Omegatimes (w_{\psihi} \def\F{\Phirak a})^*TM)
$$
that is a finite dimensional vector space of smooth sections
whose supports are away from node or marked points and
satisfy
$$
\omega} \def\O{\Omegaperatorname{Im}D_{w_{\psihi} \def\F{\Phirak a}}\omega} \def\O{\Omegaverline\psiartial + E_{\psihi} \def\F{\Phirak a}
= \Gamma(D^2;\muathbb{L}ambdambda^{0,1}\omega} \def\O{\Omegatimes (w_{\psihi} \def\F{\Phirak a})^*TM).
$$
Moreover we assume
$$
\betaigoplus dev_{z_i} :
(\omega} \def\O{\Omegaperatorname{Im}D_{w_{\psihi} \def\F{\Phirak a}}\omega} \def\O{\Omegaverline\psiartial)^{-1}(E_{\psihi} \def\F{\Phirak a})
\tauo \betaigoplus_{i=0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,k}T_{w_{\psihi} \def\F{\Phirak a}(z_i)}L
$$
is surjective. See \chiite{fooobook2} Lemma 7.1.18.
\psiar
We take a sufficiently small neighborhood $W_{\psihi} \def\F{\Phirak a}$ of $w_{\psihi} \def\F{\Phirak a}$
such that
$$
\betaigcup_{\psihi} \def\F{\Phirak a \in \psihi} \def\F{\Phirak A} W_{\psihi} \def\F{\Phirak a}
=
{\muathbb C}M_{k+1,m}^{\omega} \def\O{\Omegaperatorname{main}}(J;\betaegin{equation}ta).
$$
Let $[((D^2,\vec z,\vec z\,^+),w)] \in W_{\psihi} \def\F{\Phirak a}$
and
$((D^2,\vec z\,',\vec z\,^{+\psirime}),w')$ be as above.
We take an isomorphism
\betaegin{equation}gin{equation}
I_{w',\psihi} \def\F{\Phirak a} :
\Gamma(D^2,\psiartial D^2: w_{\psihi} \def\F{\Phirak a}^*TM,
w_{\psihi} \def\F{\Phirak a}\vert_{\psiartial D^2}^*TL)
\chiong
\Gamma(D^2,\psiartial D^2: (w')^*TM,
w'\vert_{\psiartial D^2}^*TL).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Then we define
\betaegin{equation}gin{equation}
E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')
=
\betaigoplus_{\psihi} \def\F{\Phirak a \in \psihi} \def\F{\Phirak A, \alphatop w' \in W_{\psihi} \def\F{\Phirak a}}
I_{w',\psihi} \def\F{\Phirak a}(E_{\psihi} \def\F{\Phirak a}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
(It is easy to see that we can perturb $E_{\muathfrak a}, {\muathfrak a} \in {\muathfrak A}$, a bit so that the right hand side is a
direct sum. See \chiite{foootech} Section 27 for example.)
\psiar
We now explain the way how we take
$E_{[((D^2,\vec z,\vec z\,^+),w)]}((D^2,\vec z\,',\vec z\,^{+\psirime}),w')$
so that (\rhoef{4-17}) is satisfied.
\psiar
We first require that
\betaegin{equation}gin{enumerate}
\item[(i)]
The set
$\{((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a}) \muid \psihi} \def\F{\Phirak a \in \psihi} \def\F{\Phirak A\}$
is invariant under the $\tauau^{\rhom main}_*$ action.
\item[(ii)]
If
$((D^2_{\psihi} \def\F{\Phirak b},\vec z_{\psihi} \def\F{\Phirak b},\vec z\,^+_{\psihi} \def\F{\Phirak b}),w_{\psihi} \def\F{\Phirak b})
= \tauau^{\rhom main}_*((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a})$,
$\psihi} \def\F{\Phirak a \nue \psihi} \def\F{\Phirak b$ then
$
E_{\psihi} \def\F{\Phirak b} =
(T_{w_{\psihi} \def\F{\Phirak a},1}\tauau)_*(E_{\psihi} \def\F{\Phirak a}).
$
Moreover $\tauau^{\rhom main}_*(W_{\psihi} \def\F{\Phirak a}) = W_{\psihi} \def\F{\Phirak b}$.
\item[(iii)]
If
$((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a})
= \tauau^{\rhom main}_*((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a})$
then
$
E_{\psihi} \def\F{\Phirak a} =
(T_{w_{\psihi} \def\F{\Phirak a},1}\tauau)_*(E_{\psihi} \def\F{\Phirak a}).
$
Moreover $\tauau^{\rhom main}_*(W_{\psihi} \def\F{\Phirak a}) = W_{\psihi} \def\F{\Phirak a}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
It is easy to see that such a choice exists.
\psiar
We next choose $I_{w',\psihi} \def\F{\Phirak a}$ such that
the following holds.
\betaegin{equation}gin{enumerate}
\item[(I)]
If $((D^2_{\psihi} \def\F{\Phirak b},\vec z_{\psihi} \def\F{\Phirak b},\vec z\,^+_{\psihi} \def\F{\Phirak b}),w_{\psihi} \def\F{\Phirak b})
= \tauau^{\rhom main}_*((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a})$,
$\psihi} \def\F{\Phirak a \nue \psihi} \def\F{\Phirak b$ then
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{47.13}
(T_{w',1}\tauau)_* \chiirc I_{w',\psihi} \def\F{\Phirak a} = I_{\varphiidetilde w',\psihi} \def\F{\Phirak b} \chiirc (T_{w_{\psihi} \def\F{\Phirak a},1}\tauau)_*.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\item[(II)]
If
$((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a})
= \tauau^{\rhom main}_*((D^2_{\psihi} \def\F{\Phirak a},\vec z_{\psihi} \def\F{\Phirak a},\vec z\,^+_{\psihi} \def\F{\Phirak a}),w_{\psihi} \def\F{\Phirak a})$
then
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{47.132}
(T_{w',1}\tauau)_* \chiirc I_{w',\psihi} \def\F{\Phirak a} = I_{\varphiidetilde w',\psihi} \def\F{\Phirak a} \chiirc (T_{w_{\psihi} \def\F{\Phirak a},1}\tauau)_*.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
We can find such $I_{w',\psihi} \def\F{\Phirak a}$ by taking various data
to define this isomorphism (See \chiite{foootech} Definition 17.7) to be invariant under the $\tauau$ action.
\psiar
It is easy to see that (i)(ii)(iii) and (I)(II) imply (\rhoef{4-17}).
\psiar
We have thus constructed the Kuranishi neighborhood
of the point $ {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)$ corresponding to a pseudoholomorphic map from
a disc (without disc or sphere bubbles).
\psiar
Let us consider an element
$((\Sigmama,\vec z,\vec z\,^{ +}),w) \in {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)$,
such that $\Sigmama$ is not irreducible. (Namely $\Sigmama$ is not $D^2$.)
\betaegin{equation}gin{lem-def}\lambda} \def\La{\Lambdaambdabel{lem51}
$((\Sigmama,\vec z,\vec z\,^{ +}),w)$
is obtained by gluing elements of
${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k'+1,m'}(J;\betaegin{equation}ta')$ with
$(\betaegin{equation}ta',k',m') < (\betaegin{equation}ta,k,m)$ and
sphere bubbles.
$\tauau^{\tauext{\rhom main}}_* ((\Sigmama,\vec z,\vec z\,^{ +}),w)$ is defined using
$\tauau^{\tauext{\rhom main}}_*$ on ${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k'+1,m'}(J;\betaegin{equation}ta')$ with
$(\betaegin{equation}ta',k',m') < (\betaegin{equation}ta,k,m)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem-def}
\betaegin{equation}gin{proof}
First we suppose that $\Sigmama$ has a sphere bubble $S^2 \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama$.
We remove it from $\Sigmama$ to obtain $\Sigmama_0$.
We add one more marked point to $\Sigmama_0$ at the location
where the sphere bubble used to be attached.
Then we obtain an element
$$
((\Sigmama_0,\vec z,\vec z\,^{(0)}),w_0) \in {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m+1-\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}(J;\betaegin{equation}ta').
$$
Here $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll$ is the number of marked points on $S^2$.
By the induction hypothesis, $\tauau^{\tauext{\rhom main}}_*$ is already defined on
${\muathbb C}M_{k+1,m+1-\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}^{\tauext{\rhom main}}(J;\betaegin{equation}ta')$, since
$\omega} \def\O{\Omegamega(\betaegin{equation}ta) \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq \omega} \def\O{\Omegamega(\betaegin{equation}ta')$ and if $\omega} \def\O{\Omegamega(\betaegin{equation}ta) = \omega} \def\O{\Omegamega(\betaegin{equation}ta')$, $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 2$ .
We denote
$$
((\Sigmama'_0,\vec z\,',\vec z\,^{(0) \psirime}),w_0^\psirime)
:=
\tauau_*(((\Sigmama_0,\vec z,\vec z\,^{(0)}),w_0)).
$$
We define $v: S^2 \tauo M$ by
$$
v(z) = \tauau \chiirc w\vert_{S^2}(\omega} \def\O{\Omegaverline z).
$$
We assume that the nodal point
in $\Sigmama_0 \chiap S^2$ corresponds to $0 \in {\muathbb C} \chiup \{\infty\} \chiong
S^2$. We also map $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll$ marked points on $S^2$ by $z \muapsto \omega} \def\O{\Omegaverline z$
whose images we denote by $\vec z\,^{(1)} \in S^2$.
We then glue $((S^2,\vec z\,^{(1)}), v)$ to
$((\Sigmama'_0,\vec z\,',\vec z\,^{(0) \psirime}), w_0')$ at the
point $0 \in S^2$ and at the
last marked point of $(\Sigmama'_0,\vec z\,',\vec z\,^{(0) \psirime})$ and obtain
a curve which is to be the definition of
$\tauau^{\tauext{\rhom main}}_*(((\Sigmama,\vec z,\vec z\,^+),w))$.
\psiar
Next suppose that there is no sphere bubble on
$\Sigmama$.
Let $\Sigmama_0$ be the component containing the 0-th marked point.
If there is only one irreducible component
of $\Sigmama$, then $\tauau^{\tauext{\rhom main}}_*$ is already defined there.
So we assume that there is at least one other disc component. Then $\Sigmama$ is a union of $\Sigmama_0$ and
$\Sigmama_i$ for $i = 1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,m$ ($m \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 1$).
We regard the unique point in $\Sigmama_0 \chiap \Sigmama_i$
as a marked point of $\Sigmama_0$ for $i=1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,m$.
Here each $\Sigmama_i$ itself is a union of disc components and is connected.
We also regard the point in $\Sigmama_0 \chiap \Sigmama_i$ as
$1 \in D^2 \chiong \ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii \chiup \{\infty\}$ where $D^2$ is the irreducible component
of $\Sigmama_i$ joined to $\Sigmama_0$, and also as
one of the marked points of $\Sigmama_i$. This defines an element
$((\Sigmama_i,\vec z\,^{(i)},\vec z\,^{(i) +}),w_{(i)})$
for each $i = 0,\delta} \def\muathbb{D}{\muathbb{D}eltaots, m$.
By an easy combinatorics and the induction hypothesis, we can show that
$\tauau^{\tauext{\rhom main}}_*$ is already constructed on them.
Now we define $\tauau^{\tauext{\rhom main}}_*(((\Sigmama,\vec z,\vec z\,^+),w))$
by gluing $\tauau^{\tauext{\rhom main}}_*(((\Sigmama_i,\vec z\,^{(i)},\vec z\,^{(i) +}),w_{(i)}))$ to
$\tauau^{\tauext{\rhom main}}_*((\Sigmama_0,\vec{z}\,^{(0)},{\vec z}\,^{(0) +}),w_{(0)})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\psiar
Thus we proved that,
if $\Sigmama$ is not irreducible, then $((\Sigmama,\vec z,\vec z\,^{ +}),w)$
is obtained by gluing some elements corresponding to
$(\betaegin{equation}ta',k',m') < (\betaegin{equation}ta,k,m)$ and
sphere bubbles.
\psiar
We define
the map $u \muapsto \varphiidetilde u$
by the same formula as (\rhoef{38.4}) on the moduli space of {\it spheres}.
Then we can regard this map as an involution
on the space with Kuranishi structure in the same way
as the case of discs.
(In other words, we construct $\tauau$ invariant Kuranishi structures
on the moduli space of spheres before starting the construction of the Kuranishi structures
on the moduli space of discs.)
\psiar
Let us consider an element
$((\Sigmama,\vec z,\vec z\,^{ +}),w) \in {\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)$,
such that $\Sigmama$ is not irreducible.
By Lemma \rhoef{lem51} and the above remark, the
involution of its Kuranishi neighborhood is constructed
by the induction hypothesis, on each irreducible component (which is either a disc or a sphere).
A Kuranishi neighborhood of
$((\Sigmama,\vec z,\vec z\,^{ +}),w)$
is a fiber product of the Kuranishi neighborhoods of
the gluing pieces and the space of the
smoothing parameters of the singular points.
By definition, our involution obviously commutes with the process to take
the fiber product. For the parameter space of smoothing the interior singularities,
the action of the involution is the complex
conjugation. For the parameter space of smoothing the boundary
singularities, the action of involution is trivial.
The fiber product corresponding to a boundary node is taken over the Lagrangian submanifold $L$ and
the fiber product corresponding to an interior node is taken over the ambient symplectic manifold $M$.
Hence the fiber product construction can be carried out in a $\tauau$-invariant way.
It is easy to see that the analysis we worked out in Section 7.1 \chiite{fooobook2} (see also \chiite{foootech}
Parts 2 and 3 for more detail) of the gluing is
compatible with the involution.
Thus $\tauau^{\tauext{\rhom main}}_*$ defines an involution on ${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)$
with Kuranishi structure.
The proof of Theorem \rhoef{Lemma38.14} is complete.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Proposition38.11} (1).]
The proof is the same as the proof of Theorem \rhoef{Lemma38.14}
except the following point.
Instead of ${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)$ we
will construct Kuranishi structure on ${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta)$.
We want it invariant under $\tauau_*$ instead of $\tauau^{\rhom main}_*$.
(Note ${\muathbb C}M^{\omega} \def\O{\Omegaperatorname{main}}_{k+1,m}(J;\betaegin{equation}ta)$
is not invariant under $\tauau_*$.)
Taking this point into account the proof of Theorem \rhoef{Proposition38.11} (1)
goes in the same way as the proof of Theorem \rhoef{Lemma38.14}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Note we actually do not use Theorem \rhoef{Proposition38.11} (1)
to prove our main results.
(It is Theorem \rhoef{Lemma38.14} that we actually use.)
So we do not discuss its proof in more detail.
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Lemma38.17}.]
To prove the assertion on orientation,
it is enough to consider the orientation on
the regular part
${\muathbb C}M^{\tauext{\rhom main, reg}}_{k+1,m}(J;\betaegin{equation}ta;P_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots , P_k)$.
See Remark \rhoef{rem:thmori} (2).
By Theorem \rhoef{Proposition38.7},
$\tauau_*:{\muathbb C}M^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\tauo {\muathbb C}M^{\tauext{\rhom reg}}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$
is orientation preserving if and only if $\muu_L(\betaegin{equation}ta )/2$ is even.
Recall when we consider the main component
${\muathbb C}M^{\rhom main, reg}_{k+1, m}(J;\betaegin{equation}ta)$, the boundary marked points is in counter-clockwise
cyclic ordering. However,
by the involution $\tauau_*$ in Theorem \rhoef{Lemma38.17}, each boundary marked point $z_i$ is mapped to
$\omega} \def\O{\Omegaverline{z}_i$ and each interior marked point $z^+_j$ is mapped to
$\omega} \def\O{\Omegaverline{z^+_j}$.
Thus the order of the boundary marked points changes to
clockwise ordering.
Denote by
${\muathbb C}M^{\tauext{\rhom clock, reg}}_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$
the moduli space with the boundary
marked points $(z_0,z_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots ,z_k)$ respect the {\it clock-wise} orientation
and interior marked points $z^+_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, z^+_m$.
Since $z \muapsto \omega} \def\O{\Omegaverline{z}$ reverses the orientation on the boundary
and $z^+ \muapsto \omega} \def\O{\Omegaverline{z^+}$ reverses the orientation on the interior, the argument in the proof of Proposition \rhoef{Lemma38.9}
shows that
$\tauau_*:{\muathbb C}M^{\tauext{\rhom main, reg}}_{k+1, m}(J;\betaegin{equation}ta)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\tauo {\muathbb C}M^{\tauext{\rhom clock, reg}}_{k+1,m}(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}$
respects the orientation if and only if $\muu_L(\betaegin{equation}ta)/2 + k+1 + m$ is even.
Thus we have
$$
\alphaligned
& {\muathbb C}M^{\tauext{\rhom main, reg}}_{k+1,m}(J;\betaegin{equation}ta;P_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots , P_k)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \\
= & (-1)^{\muu_L(\betaegin{equation}ta)/2 + k+1+m}
{\muathbb C}M^{\tauext{\rhom clock, reg}}_{k+1,m}(J;\betaegin{equation}ta;P_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
Recall that Lemma \rhoef{Lemma8.4.3} describes how the orientation of ${\muathbb C}M_{k+1,m}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)$
changes by changing ordering of boundary marked points.
Thus, using Lemma \rhoef{Lemma8.4.3}, we obtain Theorem \rhoef{Lemma38.17} immediately.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Since the map $\tauau_{\alphast}^{\rhom main}$ preserves the
ordering of interior marked points, we also obtain the following.
When we study bulk deformations (\chiite{fooo06}, \chiite{fooobook1}) of $A_{\infty}$ algebra
for $L=\tauext{Fix } \tauau$,
which we do not discuss in this article, we need the next theorem.
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Lemma38.17withQ}
Let $Q_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,Q_m$ be smooth singular simplicies of
$M$. Then the map
$$
\alphaligned
\tauau_{\alphast}^{\rhom main}~:~
& ~{\muathbb C}M^{\tauext{\rhom main}}_{k+1,m}(J;\betaegin{equation}ta;Q_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,
Q_m;P_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots , P_k)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \\
\lambda} \def\La{\Lambdaongrightarrow
& ~{\muathbb C}M^{\tauext{\rhom main}}_{k+1,m}(J;\betaegin{equation}ta;\tauau (Q_1),\delta} \def\muathbb{D}{\muathbb{D}eltaots ,\tauau (Q_m);P_k, \delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
preserves orientation if and only if
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon = \psihi} \def\F{\Phirac{\muu_L(\betaegin{equation}ta)}{2} + k + 1 + m + nm + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{1 \lambda} \def\La{\Lambdae i < j \lambda} \def\La{\Lambdae k} \delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_i\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_j
$$
is even.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{proof}
Note that $\tauau$ preserves the orientation on $M$ if and only if
$\psihi} \def\F{\Phirac{1}{2}\delta} \def\muathbb{D}{\muathbb{D}eltaim_{{\muathbb R}}M=n$ is even. Taking this into account,
Theorem \rhoef{Lemma38.17withQ} follows from Theorem \rhoef{Lemma38.17}.
See also Lemma \rhoef{oripreversing}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{Applications}
\lambda} \def\La{\Lambdaambdabel{sec:Appl}
Using the results obtained in the previous sections, we prove Theorem \rhoef{Theorem34.20},
Corollary \rhoef{Corollary34.22},
Corollary \rhoef{TheoremN} in Subsection \rhoef{subsec:Appl1},
Theorem \rhoef{Proposition34.25}
and Corollary \rhoef{qMassey} in Subsection \rhoef{proof1.9} and
calculate Floer cohomology of ${\muathbb R} P^{2n+1}$
over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$ in Subsection
\rhoef{subsec:Appl2}.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Filtered $A_{\infty}$ algebra
and Lagrangian Floer cohomology}
\lambda} \def\La{\Lambdaambdabel{subsec:Ainfty}
In order to explain how our study of
orientations can be used for the applications to Lagrangian Floer theory,
we briefly recall the construction of the filtered $A_\infty$ algebra for the
relatively spin Lagrangian submanifold and its obstruction/deformation theory developed in our books
\chiite{fooobook1}, \chiite{fooobook2}.
See a survey paper \chiite{Oht} for a more detailed review.
Let $R$ be a commutative ring with unit.
Let $e$ and $T$ be formal variables of degree $2$ and $0$, respectively.
We use the {\it universal Novikov ring} over $R$
as our coefficient ring:
\betaegin{equation}gin{align}\lambda} \def\La{\Lambdaambdabel{eq:nov}
\muathbb{L}ambdambda^{R}_{{\rhom nov}} & = \lambda} \def\La{\Lambdaeft.\lambda} \def\La{\Lambdaeft\{ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{i=0}^{\infty} a_i T^{\lambda} \def\La{\Lambdaambdambda_i}e^{\muu_i} \
\rhoight\vert \ a_i \in R,
\ \muu_i \in {\muathbb Z}, \ \lambda} \def\La{\Lambdaambdambda_i \in {\muathbb R}, \
\lambda} \def\La{\Lambdaim_{i \tauo \infty}\lambda} \def\La{\Lambdaambdambda_i = +\infty \rhoight\},
\\
\muathbb{L}ambdambda^{R}_{0,{\rhom nov}} & =
\lambda} \def\La{\Lambdaeft.\lambda} \def\La{\Lambdaeft\{ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{i=0}^{\infty} a_i T^{\lambda} \def\La{\Lambdaambdambda_i}e^{\muu_i} \in \muathbb{L}ambdambda_{{\rhom nov}}
\ \rhoight\vert \ \lambda} \def\La{\Lambdaambdambda_i \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 0 \rhoight\}.
\lambda} \def\La{\Lambdaambdabel{eq:nov0}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{align}
We define a filtration $F^{\lambda} \def\La{\Lambdaambdambda}\muathbb{L}ambdambda_{0,{\rhom nov}}^R=
T^{\lambda} \def\La{\Lambdaambdambda}\muathbb{L}ambdambda_{0,{\rhom nov}}^R$ ($\lambda} \def\La{\Lambdaambdambda \in {\muathbb R}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0}$) on $\muathbb{L}ambdambda_{0,{\rhom nov}}^R$
which induces a filtration $F^{\lambda} \def\La{\Lambdaambdambda}\muathbb{L}ambdambda_{{\rhom nov}}^R$
($\lambda} \def\La{\Lambdaambdambda \in {\muathbb R}$) on $\muathbb{L}ambdambda_{{\rhom nov}}^R$.
We call this filtration the {\it energy filtration}.
Given these filtrations, both $\muathbb{L}ambdambda_{0,{\rhom nov}}^R$ and $\muathbb{L}ambdambda_{{\rhom nov}}^R$
become graded filtered commutative rings.
In the rest of this subsection and the next, we take $R=\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$.
We use the case $R={\muathbb Z}$ in Subsection \rhoef{subsec:Appl2}.
In Section \rhoef{sec:pre} we define
${\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)$
for smooth singular simplicies $(P_i,f_i)$ of $L$.
By the result of Section 7.1 \chiite{fooobook2} it has a Kuranishi structure.
Here we use the same notations for the Kuranishi structure as the ones used in Appendix
of the present paper.
The space ${\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)$
is locally described by
$s^{-1}_p(0)/\Gamma_p$.
If the Kuranishi map $s_p$ is transverse to the zero
section, it is locally an orbifold.
However, if $\Gamma_p$ is non trivial, we can not perturb $s_p$ to a
$\Gamma_p$-equivariant section transverse to the zero
section, in general. Instead of single valued sections,
we take a $\Gamma_p$-equivariant {\it multi-valued} section
(multi-section)
$\muathfrak s_p$ of $E_p \tauo V_p$
so that each branch of the multi-section is transverse to the zero section
and ${\muathfrak s}_p^{-1}(0)/\Gamma_p$ and sufficiently close
to $s_p^{-1}(0)/\Gamma_p$.
(See Sections 7.1 and 7.2 in \chiite{fooobook2} for the precise
statement.)
We denote the perturbed zero locus (divided by $\Gamma_p$) by
${\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{\muathfrak s}$.
We have the evaluation map at the zero-th marked point
for the perturbed moduli space:
$$
ev_0 :
{\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{\muathfrak s}
\lambda} \def\La{\Lambdaongrightarrow L.
$$
Then
such a system $\muathfrak s =\{ \muathfrak s_p \}$ of muti-valued sections gives
rise to the virtual fundamental chain over $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$ as follows:
By Lemma 6.9 in \chiite{FO} and Lemma A1.26 in \chiite{fooobook2},
${\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{\muathfrak s}$ carries a smooth triangulation. We take a smooth triangulation on it.
Each simplex $\muathbb{D}elta^d_a$ of dimension $d=\delta} \def\muathbb{D}{\muathbb{D}eltaim \muathfrak s_p^{-1}(0)$ in the triangulation comes with multiplicity
$\tauext{mul}_{\muathbb{D}elta^d_a} \in \ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$.
(See Definition A1.27 in \chiite{fooobook2} for the definition of multiplicity.)
Restricting $ev_0$ to $\muathbb{D}elta_a^d$, we have a
singular simplex of $L$ denoted by $(\muathbb{D}elta^d_a, ev_0)$.
Then the virtual fundamental chain over $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$ which we denote by
$({\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{\muathfrak s},ev_0)$
is defined by
$$
({\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{\muathfrak s},ev_0)
=
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_a \tauext{mul}_{\muathbb{D}elta_a^d} \chidot (\muathbb{D}elta^d_a, ev_0).
$$
When the virtual dimension is zero, i.e. when $d=0$, we denote
$$
\#{\muathbb C}M^{\rhom main}_{k+1}(J;\betaegin{equation}ta;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots ,P_k)^{\muathfrak s}
= \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_a \tauext{mul}_{\muathbb{D}elta_a^0} \in \ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii.
$$
Now
put $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)_0 = \{ (\omega} \def\O{\Omegamega(\betaegin{equation}ta), \muu_L(\betaegin{equation}ta)) ~\vert~\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L),
~{\muathbb C}M(J;\betaegin{equation}ta) \nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset \}$, see (\rhoef{eq:Pi})
for $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)$.
Let $G(L)$ be a submonoid of
${\muathbb R}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0} \tauimes 2{\muathbb Z}$ generated by $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(L)_0$.
We put $\betaegin{equation}ta_0 =(0,0) \in G(L)$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:mk}
For smooth singular simplicies $P_i$ of $L$ and $\betaegin{equation}ta \in G(L)$,
we define a series of maps
$
\muathfrak m_{k, \betaegin{equation}ta}
$
by
$$
\alphaligned
\muathfrak m_{0,\betaegin{equation}ta}(1) & =
\betaegin{equation}gin{cases}
({\muathbb C}M_1(J;\betaegin{equation}ta)^{\muathfrak s},ev_0) & \tauext {for $\betaegin{equation}ta \nue \betaegin{equation}ta _0$} \\
0 & \tauext {for $\betaegin{equation}ta =\betaegin{equation}ta _0$,}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cases} \\
\muathfrak m_{1,\betaegin{equation}ta}(P) & =
\betaegin{equation}gin{cases}
({{\muathbb C}M}_{2}^{\tauext {main}}(J;\betaegin{equation}ta;P)^{\muathfrak s},ev_0) & {\tauext {for $\betaegin{equation}ta \nue
\betaegin{equation}ta_0$}}\\
(-1)^{n}\psiartial P & {\tauext {for $\betaegin{equation}ta = \betaegin{equation}ta_0$}},
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cases}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
and
$$
\muathfrak m_{k,\betaegin{equation}ta}(P_1, \lambda} \def\La{\Lambdadots ,P_k) =
({\muathbb C}M_{k+1}^{\tauext {main}}(J;\betaegin{equation}ta;P_1,\lambda} \def\La{\Lambdadots, P_k)^{\muathfrak s},ev_0), \quad k\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 2.
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Here $\psiartial$ is the usual boundary operator and
$n=\delta} \def\muathbb{D}{\muathbb{D}eltaim L$.
Then,
one of main results proved in \chiite{fooobook1} and \chiite{fooobook2} is
as follows:
For a smooth singular chain $P$ on $L$
we put the cohomological grading
$\delta} \def\muathbb{D}{\muathbb{D}eltaeg P = n -\delta} \def\muathbb{D}{\muathbb{D}eltaim P$ and regard
a smooth singular chain complex $S_{\alphast}(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$
as a smooth singular cochain complex $S^{n-\alphast}(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$.
For a subcomplex $C(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$ of $S(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$
we denote by
$C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
a completion of $C(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii) \omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$
with respect to the
filtration induced from one on $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$
introduced above. We shift the degree by 1, i.e., define
$$
C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1]^{\betaullet}=C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})^{\betaullet +1},
$$
where we define $\delta} \def\muathbb{D}{\muathbb{D}eltaeg (P T^{\lambda} \def\La{\Lambdaambdambda}e^{\muu})=\delta} \def\muathbb{D}{\muathbb{D}eltaeg P +2\muu$ for
$P T^{\lambda} \def\La{\Lambdaambdambda}e^{\muu} \in C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$.
We put
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:defmk}
\muathfrak m_k =
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta \in G(L)}
\muathfrak m_{k,\betaegin{equation}ta} \omega} \def\O{\Omegatimes T^{\omega} \def\O{\Omegamega (\betaegin{equation}ta )}
e^{{\muu_L (\betaegin{equation}ta)}/{2}}, \quad
k=0,1,\lambda} \def\La{\Lambdadots .
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
To simplify notation we write $C=C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$. Put
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{Bk}
B_k(C[1]) = \underbrace{C[1]\omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes C[1]}_{k}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
and take its completion with respect to the energy filtration.
By an abuse of notation, we denote the completion by the same symbol.
We define the {\it bar complex} $B(C[1])= \betaigoplus_{k=0}^\infty B_k(C[1])$ and extend $\muathfrak m_k$ to the graded coderivation
$\varphiidehat{\muathfrak m}_k$ on $B(C[1])$ by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:hatmk}
\varphiidehat{\muathfrak m}_k(x_1 \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_n) =
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{i=1}^{n-k+1} (-1)^{*}
x_1 \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes \muathfrak m_k(x_i,\delta} \def\muathbb{D}{\muathbb{D}eltaots,x_{i+k-1})
\omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_n
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where $* = \delta} \def\muathbb{D}{\muathbb{D}eltaeg x_1+\chidots+\delta} \def\muathbb{D}{\muathbb{D}eltaeg x_{i-1}+i-1$.
We put
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:d}
\varphiidehat{d}=\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum _{k=0}^{\infty} \varphiidehat{\muathfrak m}_k : B(C[1]) \lambda} \def\La{\Lambdaongrightarrow B(C[1]).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\betaegin{equation}gin{thm}[Theorem 3.5.11 in \chiite{fooobook1}]\lambda} \def\La{\Lambdaambdabel{thm:Ainfty}
For any closed relatively spin Lagrangian submanifold $L$ of $M$,
there exist a countably generated subcomplex $C(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$
of smooth singular cochain complex of $L$ whose cohomology
is isomorphic to $H(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$ and
a system of multi-sections $\muathfrak s$
of ${\muathbb C}M_{k+1}^{{\tauext {\rhom main}}}(J;\betaegin{equation}ta;P_1,\lambda} \def\La{\Lambdadots ,P_k)$
($\muathfrak s$ are chosen depending on $P_i \in C(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$)
such that
$$
\muathfrak m_k
:
\underbrace{C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1] \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \omega} \def\O{\Omegatimes
C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1]}_{k} \tauo
C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1], \quad k=0,1,\lambda} \def\La{\Lambdadots
$$
are defined and satisfy
$\varphiidehat{d} \chiirc \varphiidehat{d} =0$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
The equation $\varphiidehat{d}\chiirc \varphiidehat{d} = 0$ is equivalent to
$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k_1+k_2=k+1}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg' x_1 + \chidots + \delta} \def\muathbb{D}{\muathbb{D}eltaeg' x_{i-1}}
\muathfrak m_{k_1}(x_1,\lambda} \def\La{\Lambdadots,
\muathfrak m_{k_2}
(x_i,\lambda} \def\La{\Lambdadots,
x_{i+k_2-1}),\lambda} \def\La{\Lambdadots,x_k)=0
$$
which we call the {\it $A_{\infty}$ formulas} or {\it relations}.
Here $\delta} \def\muathbb{D}{\muathbb{D}eltaeg'x_i = \delta} \def\muathbb{D}{\muathbb{D}eltaeg x_i -1$, the shifted degree.
In particular, the $A_{\infty}$ formulas imply
an equality
$$\muathfrak m_2 (\muathfrak m_0(1), x) +(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg' x}\muathfrak m_2(x,\muathfrak m_0(1)) + \muathfrak m_1 \muathfrak m_1(x)=0,
$$
which shows $\muathfrak m_1 \chiirc \muathfrak m_1 = 0$ may not hold unless
if $\muathfrak m_0(1) = 0$, in general.
So $\muathfrak m_0$ gives an obstruction to define
$\muathfrak m_1$-cohomology.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:boundingcochain}
An element $b \in C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1]^0$
with $b \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod \muathbb{L}ambdambda_{0,{\rhom nov}}^+$
is called a {\it solution of the Maurer-Cartan equation}
or {\it bounding cochain}, if it satisfies
the Maurer-Cartan equation:
$$
\muathfrak m_0 (1) + \muathfrak m_{1}(b) + \muathfrak m_2(b,b)
+ \muathfrak m_3(b,b,b) + \delta} \def\muathbb{D}{\muathbb{D}eltaots =0.
$$
Here $\muathbb{L}ambdambda_{0,{\rhom nov}}^+ =
\{ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum a_i T^{\lambda} \def\La{\Lambdaambdambda_i}e^{\muu_i} \in \muathbb{L}ambdambda_{0,{\rhom nov}} ~\vert ~
\lambda} \def\La{\Lambdaambdambda_i >0 \}$.
We denote by ${\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
the set of bounding cochains.
We say $L$ is {\it unobstructed} if ${\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem:boundingcochain}
We do not introduce the notion of
gauge equivalence of bounding cochains
(Definition 4.3.1 in \chiite{fooobook1}), because we do not use it in this paper.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
If ${\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$, then by using any $b \in {\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ we can deform the $A_{\infty}$ structure
$\muathfrak m$ to $\muathfrak m^b$
by
$$
\muathfrak m^b_k(x_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,x_k)
= \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0,\delta} \def\muathbb{D}{\muathbb{D}eltaots,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_k}
\muathfrak m_{k+\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_i}(\underbrace{b,\delta} \def\muathbb{D}{\muathbb{D}eltaots,b}_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_0},x_1,
\underbrace{b,\delta} \def\muathbb{D}{\muathbb{D}eltaots,b}_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_1},\delta} \def\muathbb{D}{\muathbb{D}eltaots,
\underbrace{b,\delta} \def\muathbb{D}{\muathbb{D}eltaots,b}_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_{k-1}},x_k,
\underbrace{b,\delta} \def\muathbb{D}{\muathbb{D}eltaots,b}_{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_k})
$$
so that $\muathfrak m_1^b \chiirc \muathfrak m_1^b =0$
(Proposition 3.6.10 in \chiite{fooobook1}).
Then we can define
$$
HF((L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) :=
H(C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}), \muathfrak m_1^b)
$$
which we call {\it Floer cohomology of $L$} (deformed by $b$).
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
In the actual proof of Theorem \rhoef{thm:Ainfty} given in
Section 7.2 \chiite{fooobook2},
we do not construct the filtered $A_{\infty}$ structure at once,
but we first construct a {\it filtered $A_{n,K}$ structure} for any non negative integers $n,K$ and
promote it to a filtered $A_{\infty}$ structure by developing certain
obstruction theory (Subsections 7.2.6--7.2.10 \chiite{fooobook2}).
Here we recall the notion of filtered $A_{n,K}$ structure
from Subsection 7.2.6 \chiite{fooobook2}, which is mentioned later in the proof of Theorem \rhoef{Theorem34.20}.
See also Subsection 2.6 and Section 4 \chiite{foooYash} for a quick review.
We briefly summarize the obstruction theory in Subsection \rhoef{subsec:promotion}.
Let $G\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb R}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0}\tauimes 2{\muathbb Z}$ be a monoid such that
$\tauext {pr}_1^{-1}([0,c])$ is finite for any $c\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0$ and
$\tauext {pr}_1^{-1}(0)=\{\betaegin{equation}ta_0 =(0,0)\}$, where $\tauext {pr}_i$ denotes
the projection to the $i$-th factor.
We note that in the geometric situation we take $G=G(L)$ introduced above.
For $\betaegin{equation}ta \in G$, we define
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:betanorm}
\muathbb{V}ert \betaegin{equation}ta \muathbb{V}ert = \lambda} \def\La{\Lambdaeft\{
\betaegin{equation}gin{array}{ll}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaup \{ n ~\vert~ \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonxists \betaegin{equation}ta_i \in G \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{\betaegin{equation}ta_0\}, \
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{i=1}^n \betaegin{equation}ta_i = \betaegin{equation}ta \} + [{\rhom pr}_1(\betaegin{equation}ta)] -1 &
\tauext{ if } \betaegin{equation}ta \nueq \betaegin{equation}ta_0 \\
-1 & \tauext{ if } \betaegin{equation}ta = \betaegin{equation}ta_0,
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{array}
\rhoight.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where $[{\rhom pr}_1(\betaegin{equation}ta)]$ stands for the largest integer less than
or equal to ${\rhom pr}_1(\betaegin{equation}ta)$.
Using this, we introduce a partial order on $(G \tauimes {\muathbb Z}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 0})
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{(\betaegin{equation}ta_0,0)\}$ denoted by
$(\betaegin{equation}ta_1,k_1) < (\betaegin{equation}ta_2,k_2)$ if and only if
either
$$
\muathbb{V}ert \betaegin{equation}ta_1 \muathbb{V}ert + k_1 <
\muathbb{V}ert \betaegin{equation}ta_2 \muathbb{V}ert + k_2
$$ or
$$
\muathbb{V}ert \betaegin{equation}ta_1 \muathbb{V}ert + k_1 =
\muathbb{V}ert \betaegin{equation}ta_2 \muathbb{V}ert + k_2 \tauext{ and }
\muathbb{V}ert \betaegin{equation}ta_1 \muathbb{V}ert < \muathbb{V}ert \betaegin{equation}ta_2 \muathbb{V}ert.
$$
We write $(\betaegin{equation}ta_1,k_1) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim (\betaegin{equation}ta_2,k_2)$, when
$$
\muathbb{V}ert \betaegin{equation}ta_1 \muathbb{V}ert + k_1 =
\muathbb{V}ert \betaegin{equation}ta_2 \muathbb{V}ert + k_2 \tauext{ and }
\muathbb{V}ert \betaegin{equation}ta_1 \muathbb{V}ert = \muathbb{V}ert \betaegin{equation}ta_2 \muathbb{V}ert.
$$
We denote $(\betaegin{equation}ta_1,k_1) \lambda} \def\La{\Lambdaesssim (\betaegin{equation}ta_2,k_2)$ if
either $(\betaegin{equation}ta_1,k_1) < (\betaegin{equation}ta_2,k_2)$ or $(\betaegin{equation}ta_1,k_1) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim
(\betaegin{equation}ta_2,k_2)$.
For non-negative integers $n, n', k, k'$, we also use
the notation $(\betaegin{equation}ta,k)<(n,k)$, $(n,k)<(n',k')$,
$(\betaegin{equation}ta,k)\lambda} \def\La{\Lambdaesssim (n,k)$, $(n,k)\lambda} \def\La{\Lambdaesssim (n',k')$ etc. in a similar way.
\psiar
Let $\omega} \def\O{\Omegaverline{C}$ be a cochain complex over $R$ and
$C=\omega} \def\O{\Omegaverline{C}\omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0,\tauext{nov}}^R$.
Suppose that there is a sequence of $R$ linear maps
$$
{\muathfrak m}_{k,\betaegin{equation}ta}:B_k(\omega} \def\O{\Omegaverline{C}[1]) \tauo \omega} \def\O{\Omegaverline{C}[1]
$$
for $(\betaegin{equation}ta,k) \in (G \tauimes {\muathbb Z}) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{(\betaegin{equation}ta_0,0)\}$
with $(\betaegin{equation}ta,k) < (n,K)$.
We also suppose that ${\muathfrak m}_{1,\betaegin{equation}ta_0}$ is
the boundary operator of the cochain complex $\omega} \def\O{\Omegaverline{C}$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:AnK}
We call $(C,\{{\muathfrak m}_{k,\betaegin{equation}ta}\})$
a ($G$-gapped) {\it filtered $A_{n,K}$ algebra}, if the identity
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{AnKformula}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta_1+\betaegin{equation}ta_2=\betaegin{equation}ta, k_1+k_2=k+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg'{\muathbf x}_i^{(1)}} {\muathfrak m}_{k_2,\betaegin{equation}ta_2}
\betaigl({\muathbf x}_i^{(1)},
{\muathfrak m}_{k_1,\betaegin{equation}ta_1}({\muathbf x}_i^{(2)}),
{\muathbf x}_i^{(3)} \betaigr) = 0
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
holds for all $(\betaegin{equation}ta,k) < (n,K)$,
where
$$
\muathbb{D}elta^2({\muathbf x})=\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i{\muathbf x}_i^{(1)} \omega} \def\O{\Omegatimes
{\muathbf x}_i^{(2)} \omega} \def\O{\Omegatimes {\muathbf x}_i^{(3)}.
$$
Here $\muathbb{D}elta$ is the coproduct of the tensor coalgebra.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Let $C$, $C'$ be filtered $A_{n,K}$ algebras. We
consider a sequence of $R$ linear maps of degree zero
$$
\muathfrak f_{k,\betaegin{equation}ta}: B_k(\omega} \def\O{\Omegaverline C[1]) \tauo \omega} \def\O{\Omegaverline C'[1]
$$
satisfying $\muathfrak f_{0,\betaegin{equation}ta_0} = 0$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:AnKhom}
We call $\{\muathfrak f_{k,\betaegin{equation}ta}\}$ a {\it filtered
$A_{n,K}$ homomorphism}, if the identity
$$\alphaligned
&\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{m,i}\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta'+\betaegin{equation}ta_1+\chidots+\betaegin{equation}ta_m = \betaegin{equation}ta}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k_1+\chidots+k_m = k}
\muathfrak m_{m,\betaegin{equation}ta'}\lambda} \def\La{\Lambdaeft(
\muathfrak f_{k_1,\betaegin{equation}ta_1}(\tauext{\betaf x}_i^{(1)}),
\chidots,
\muathfrak f_{k_m,\betaegin{equation}ta_m}(\tauext{\betaf x}_i^{(m)})
\rhoight)\\
& =\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta_1+\betaegin{equation}ta_2 = \betaegin{equation}ta, k_1+k_2 = k+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg'\tauext{\betaf x}_i^{(1)}
}\muathfrak f_{k_2,\betaegin{equation}ta_2}\lambda} \def\La{\Lambdaeft(\tauext{\betaf x}_i^{(1)} ,
\muathfrak m_{k_1,\betaegin{equation}ta_1}(\tauext{\betaf x}_i^{(2)}),
\tauext{\betaf x}_i^{(3)}\rhoight)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned$$
holds for $(\betaegin{equation}ta,k) \lambda} \def\La{\Lambdaesssim (n,K)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
We also have the notion of
filtered $A_{n,K}$ homotopy equivalences in a natural way.
(See Subsection 7.2.6 \chiite{fooobook2}.)
In \chiite{fooobook2}, we proved the following:
\betaegin{equation}gin{thm}[Theorem 7.2.72 in \chiite{fooobook2}]\lambda} \def\La{\Lambdaambdabel{ext(n,K)}
Let $C_1$ be a filtered $A_{n,K}$ algebra and
$C_2$ a filtered $A_{n',K'}$ algebra such that
$(n,K) < (n',K')$.
Let ${\muathfrak h}:C_1 \tauo C_2$
be a filtered $A_{n,K}$ homomorphism.
Suppose that ${\muathfrak h}$ is a filtered $A_{n,K}$ homotopy
equivalence.
Then there exist a filtered $A_{n',K'}$ algebra structure on
$C_1$
extending the given filtered $A_{n,K}$ algebra structure
and a filtered $A_{n',K'}$ homotopy equivalence
$C_1 \tauo C_2$ extending the given
filtered $A_{n,K}$ homotopy equivalence ${\muathfrak h}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
Next, let $(L^{(1)}, L^{(0)})$ be a relatively spin pair of closed Lagrangian submanifolds.
We first assume that $L^{(0)}$ is transverse to $L^{(1)}$.
Let $C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ be the free $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ module
generated by the intersection points $L^{(1)} \chiap L^{(0)}$.
Then we can construct a filtered $A_{\infty}$ bimodule
structure $\{\muathfrak n_{k_1, k_0}\}_{k_1, k_0 =0, 1, \delta} \def\muathbb{D}{\muathbb{D}eltaots}$
on $C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ over the pair
$(C(L^{(1)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}), C(L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}))$
of $A_\infty$ algebras as follows. Here we briefly describe
the map
$$
\alphaligned
\muathfrak n_{k_1,k_0} :
B_{k_1} (C(L^{(1)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])
\omega} \def\O{\Omegatimes C(L^{(1)},L^{(0)};& \muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\omega} \def\O{\Omegatimes B_{k_0}(C(L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1]) \\
& \lambda} \def\La{\Lambdaongrightarrow
C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
A typical element of the tensor product above
is written as
$$
\lambda} \def\La{\Lambdaeft(P_{1}^{(1)}T^{\lambda} \def\La{\Lambdaambdambda_{1}^{(1)}}e^{\muu_{1}^{(1)}} \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaots
\omega} \def\O{\Omegatimes P_{k_1}^{(1)}T^{\lambda} \def\La{\Lambdaambdambda_{k_1}^{(1)}}e^{\muu_{k_1}^{(1)}}\rhoight)
\omega} \def\O{\Omegatimes T^{\lambda} \def\La{\Lambdaambdambda}e^{\muu}\lambda} \def\La{\Lambdaambdangle p \rhoangle \omega} \def\O{\Omegatimes
\lambda} \def\La{\Lambdaeft(P_{1}^{(0)}T^{\lambda} \def\La{\Lambdaambdambda_{1}^{(0)}}e^{\muu_{1}^{(0)}} \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaots
\omega} \def\O{\Omegatimes P_{k_0}^{(0)}T^{\lambda} \def\La{\Lambdaambdambda_{k_0}^{(0)}}e^{\muu_{k_0}^{(0)}}\rhoight)
$$
for $p \in L^{(1)}\chiap L^{(0)}$. Then $\muathfrak n_{k_1,k_0}$ maps
it to
$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{q, B}
\# \lambda} \def\La{\Lambdaeft({\muathbb C}M(p,q;B;P_{1}^{(1)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_1}^{(1)};
P_{1}^{(0)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_0}^{(0)})\rhoight)T^{\lambda} \def\La{\Lambdaambdambda'}e^{\muu'} \lambda} \def\La{\Lambdaambdangle q \rhoangle
$$
with $\lambda} \def\La{\Lambdaambdambda'= \omega} \def\O{\Omegamega (B) +\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum \lambda} \def\La{\Lambdaambdambda_i^{(1)} + \lambda} \def\La{\Lambdaambdambda + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum \lambda} \def\La{\Lambdaambdambda_i^{(0)}$ and
$\muu' = \muu_{L}(B) +\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum \muu_i^{(1)} + \muu + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum \muu_i^{(0)}$.
Here $B$ is the homotopy class of Floer trajectories connecting
$p$ and $q$, and the sum is taken over all $(q,B)$ such that
the virtual dimension of the moduli space
${\muathbb C}M(p,q;B;P_{1}^{(1)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_1}^{(1)};
P_{1}^{(0)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_0}^{(0)})$ of Floer trajectories is zero.
See Subsection 3.7.4 of \chiite{fooobook1} for the precise definition of
$$
{\muathbb C}M(p,q;B;P_{1}^{(1)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_1}^{(1)};
P_{1}^{(0)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_0}^{(0)}).
$$
Strictly speaking, we also need to take a suitable system of multi-sections on this
moduli space to obtain the virtual fundamental chain that enters in the construction of
the operators $\muathfrak n_{k_1,k_0}$ defining the desired $A_{\infty}$ bimodule structure.
Because of the usage of multi-sections, the counting number with sign
$$
\# \lambda} \def\La{\Lambdaeft({\muathbb C}M(p,q;B;P_{1}^{(1)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_1}^{(1)};
P_{1}^{(0)},\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_{k_0}^{(0)})\rhoight)
$$ is
a rational number, in general.
Now let $B(C(L^{(1)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])
\omega} \def\O{\Omegatimes C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\omega} \def\O{\Omegatimes B(C(L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])$ be
the completion of
$$\betaigoplus_{k_0 \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0, k_1 \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0} B_{k_1} (C(L^{(1)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])
\omega} \def\O{\Omegatimes C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\omega} \def\O{\Omegatimes B_{k_0}(C(L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])
$$
with respect to the induced energy filtration.
We extend $\muathfrak n_{k_1,k_0}$ to a bi-coderivation
on $B(C(L^{(1)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])
\omega} \def\O{\Omegatimes C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\omega} \def\O{\Omegatimes B(C(L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})[1])$ which is given by
the formula
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq:dbimodule}
\alphaligned
&
\varphiidehat d(x_{1}^{(1)} \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_1}^{(1)} \omega} \def\O{\Omegatimes y \omega} \def\O{\Omegatimes
x_{1}^{(0)}
\omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_0}^{(0)}) \\
= & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k^{\psirime}_1\lambda} \def\La{\Lambdae k_1,k^{\psirime}_0\lambda} \def\La{\Lambdae k_0}
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg' x_{1}^{(1)}+\chidots+\delta} \def\muathbb{D}{\muathbb{D}eltaeg' x_{k_1-k'_1}^{(1)}}\\
& \quad x_{1}^{(1)} \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes
x_{k_1-k'_1}^{(1)}\omega} \def\O{\Omegatimes
\muathfrak n_{k'_1,k'_0}(x_{k_1-k'_1+1}^{(1)},\chidots,
y,\chidots,x_{k'_0}^{(0)})\omega} \def\O{\Omegatimes
x_{k'_0+1}^{(0)} \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_0}^{(0)} \\
& + \varphiidehat {d}^{(1)} (x_{1}^{(1)} \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_1}^{(1)}) \omega} \def\O{\Omegatimes
y \omega} \def\O{\Omegatimes x_{1}^{(0)} \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_0}^{(0)} \\
& + (-1)^{\Sigmama \delta} \def\muathbb{D}{\muathbb{D}eltaeg' x_{i}^{(1)} + \delta} \def\muathbb{D}{\muathbb{D}eltaeg' y}
x_{1}^{(1)} \omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_1}^{(1)}
\omega} \def\O{\Omegatimes y \omega} \def\O{\Omegatimes \varphiidehat {d}^{(0)} (x_{1}^{(0)}
\omega} \def\O{\Omegatimes \chidots \omega} \def\O{\Omegatimes x_{k_0}^{(0)}).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here $\varphiidehat {d}^{(i)}$ is defined by
(\rhoef{eq:hatmk}) and (\rhoef{eq:d}), using
the filtered $A_{\infty}$
structure $\muathfrak {m}^{(i)}$ of $(C(L^{(i)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}), \muathfrak m^{(i)})$ ($i=0,1$).
\betaegin{equation}gin{thm}[Theorem 3.7.21 in \chiite{fooobook1}]
\lambda} \def\La{\Lambdaambdabel{thm:Ainftybimodule}
For any relatively spin pair $(L^{(1)}, L^{(0)})$
of closed Lagrangian submanifolds, the family of maps $\{ \muathfrak n_{k_1,k_0}\}_{k_1,k_0}$ defines
a filtered $A_{\infty}$ bimodule structure on $C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
over $(C(L^{(1)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}),C(L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}))$.
Namely, $\varphiidehat{d}$ in (\rhoef{eq:dbimodule}) satisfies
$\varphiidehat{d} \chiirc \varphiidehat{d} =0$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
Since the equation $\varphiidehat{d} \chiirc \varphiidehat{d} =0$ implies, in particular,
$$
\muathfrak n_{0,0}\chiirc \muathfrak n_{0,0} (y) + \muathfrak n_{1,0}(\muathfrak m_{0}^{(1)}(1),y) + (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg'y}\muathfrak {n}_{0,1}(y,\muathfrak m_{0}^{(0)}(1))
=0,
$$
we have $\muathfrak n_{0,0} \chiirc \muathfrak n_{0,0} \nue 0$, in general.
However,
if both of $L^{(0)}$ and $L^{(1)}$ are unobstructed
in the sense of Definition \rhoef{def:boundingcochain},
we can deform the filtered $A_{\infty}$ bimodule
structure $\muathfrak n$ by $b_i \in {\muathbb C}M (L^{(i)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
so that
$$
{}^{b_1}\muathfrak n_{0,0}^{b_0} (y):=
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k_1, k_0} \muathfrak n_{k_1,k_0}
(\underbrace{b_1,\lambda} \def\La{\Lambdadots , b_1}_{k_1}, y,
\underbrace{b_0, \lambda} \def\La{\Lambdadots , b_0}_{k_0})
$$
satisfies ${}^{b_1}\muathfrak n_{0,0}^{b_0} \chiirc {}^{b_1}\muathfrak n_{0,0}^{b_0}=0$ (Lemma 3.7.14 in \chiite{fooobook1}).
Then we can define
$$
HF((L^{(1)},b_1),(L^{(0)},b_0);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
:= H(C(L^{(1)},L^{(0)};\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}), {}^{b_1}\muathfrak n_{0,0}^{b_0})
$$
which we call {\it Floer cohomology of a pair $(L^{(1)},L^{(0)})$}
(deformed by $b_1,b_0$).
\psiar
So far we assume that $L^{(0)}$ is transverse to $L^{(1)}$.
But we can generalize the story to the Bott-Morse case,
that is, each component of $L^{(0)}\chiap L^{(1)}$ is a smooth
manifold.
Especially, for the case $L^{(1)}=L^{(0)}$, we have
$\muathfrak n_{k_1,k_0}=\muathfrak m_{k_1+k_0+1}$ (see Example 3.7.6
in \chiite{fooobook1}) and an isomorphism
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{isoFloer}
HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \chiong HF((L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
for any $b \in {\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ by Theorem G (G.1) in \chiite{fooobook1}.
Moreover, if we extend the coefficient ring $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$
to $\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$, we can find that
Hamiltonian isotopies $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_i^s : M \tauo M$ ($i=0,1, s\in [0,1]$) with $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_i^0={\rhom id}$ and $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_i^1=\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_i$ induce an isomorphism
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{invariance}
\alphaligned
& HF((L^{(1)},b_1),(L^{(0)},b_0);\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \\
\chiong~
& HF((\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_1(L^{(1)}),\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{1\alphast}b_1),(\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_0(L^{(0)}),\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{0\alphast}b_0);\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
by Theorem G (G.4) in \chiite{fooobook1}.
This shows invariance of Floer cohomology
of a pair $(L^{(1)},L^{(0)})$ over $\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ under Hamiltonian isotopies.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Proofs of Theorem \rhoef{Theorem34.20}, Corollary \rhoef{Corollary34.22}
and Corollary \rhoef{TheoremN} }
\lambda} \def\La{\Lambdaambdabel{subsec:Appl1}
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Theorem34.20}]
We consider
the map (\rhoef{38.16}) for the case $m=0$.
It is an automorphism of order $2$.
We first take its quotient by Lemma \rhoef{lem:quot}
(Lemma A1.49 \chiite{fooobook2}) in the sense
of Kuranishi structure, and
take a perturbed multi-section of the quotient space,
which is transverse to zero section.
After that we lift the perturbed multi-section.
Then
we can obtain a system of multi-sections
$\muathfrak s$ on the moduli space
${\muathbb C}M^{{\tauext{\rhom {main}}}}_{k+1}(J;\betaegin{equation}ta ;P_1,\delta} \def\muathbb{D}{\muathbb{D}eltaots,P_k)$
which is preserved by (\rhoef{38.16}).
Then Definition \rhoef{def:mk} yields
the operators $\{\muathfrak m_{k,\betaegin{equation}ta}\}_{k,\betaegin{equation}ta}$
which satisfy the filtered $A_{n,K}$ relations \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{AnKformula}
together with
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{34.21}.
The sign in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{34.21} follows from Theorem \rhoef{Lemma38.17}.
To complete the proof
we need to promote the filtered $A_{n,K}$ structure to a filtered $A_{\infty}$ structure keeping the symmetry \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{34.21}.
This follows from an involution invariant version
(see Theorem \rhoef{invariantext(n,K)}) of
Theorem \rhoef{ext(n,K)} (=Theorem 7.2.72 \chiite{fooobook2}).
Although the proof of the invariant version is a straightforward modification of
that of Theorem 7.2.72 \chiite{fooobook2},
we give the outline of the argument in Appendix, Subsection
\rhoef{subsec:promotion}, for readers' convenience.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\psiar
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{remonmult}
In general, it is not possible to perturb a section of the obstruction bundle transversal to the zero section by a single-valued
perturbation.
Using multi-valued perturbation, we can take the perturbation, which is transversal to the zero section
and invariant under the action of stabilizer as well as other finite group action coming from the symmetry of the problem.
In our case, there may be a fixed point of (\rhoef{38.16}).
But this does not cause any problem as far as we work with {\it multi}-sections and
study virtual fundamental chain over $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$
as in the proof of Theorem \rhoef{Theorem34.20}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{proof}[Proof of Corollary \rhoef{Corollary34.22}]
For $w : (D^2,\psiartial D^2) \tauo (M,L)=(M, \tauext{\rhom Fix}~\tauau)$ we define its double
$v : S^2 \tauo M$ by
$$
v(z) = \betaegin{equation}gin{cases} w(z) & \quad \tauext{for $z \in \ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii$} \\
\tauau \chiirc w(\omega} \def\O{\Omegaverline{z}) & \quad \tauext{for $z\in {\muathbb C}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii$,}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cases}
$$
where $(D^2, (-1,1,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}))$ is identified with
the upper half plane $(\ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii ,(0,1,\infty))$ and
$S^2 = {\muathbb C} \chiup \{ \infty \}$.
Then it is easy to see that
$c_1(TM)[v]=\muu_{L}([w])$.
(See Example \rhoef{Example38.8.} (1).)
Then the assumption (1) implies that $\muu_L \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod 4$.
Next
we note the following general lemma.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{c1maslov}
Let $L$ be an oriented Lagrangian submanifold of $M$.
Then the composition
$$
\psii_2 (M) \lambda} \def\La{\Lambdaongrightarrow \psii_2(M,L)
\omega} \def\O{\Omegaverset{\muu_L}\lambda} \def\La{\Lambdaongrightarrow {\muathbb Z}
$$
is equal to $2c_1(TM)[\alphalpha]$ for $[\alphalpha]\in \psii_2(M)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
The proof is easy and so it is omitted.
Then by this lemma the assumption (2) also implies that
the Maslov index of $L$ modulo $4$ is trivial.
Therefore in either case of (1) and (2),
Theorem \rhoef{Theorem34.20} implies $\muathfrak m_{0,\tauau_*\betaegin{equation}ta}(1) = - \muathfrak m_{0,\betaegin{equation}ta}(1)$.
On the other hand we have
$$
\muathfrak m_0 (1)=\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum _{\betaegin{equation}ta \in \psii_2(M,L)} \muathfrak m_{0,\betaegin{equation}ta}(1)
T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}
$$
by Definition \rhoef{def:mk} and (\rhoef{eq:defmk}) which is also the same as
$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta \in \psii_2(M,L)}
\muathfrak m_{0,\tauau_*\betaegin{equation}ta}(1)T^{\omega} \def\O{\Omegamega(\tauau_*\betaegin{equation}ta)}e^{\muu(\tauau_*\betaegin{equation}ta)/2}
$$
because $\tauau_*^2 = id$ and $\tauau_*:\psii_2(M,L) \tauo \psii_2(M,L)$ is a one-one correspondence.
Therefore since $\omega} \def\O{\Omegamega(\betaegin{equation}ta) = \omega} \def\O{\Omegamega(\tauau_*\betaegin{equation}ta)$ and $\muu(\betaegin{equation}ta) = \muu(\tauau_*\betaegin{equation}ta)$, we can rewrite
$\muathfrak m_0 (1)$ into
$$
\muathfrak m_0 (1) = \psihi} \def\F{\Phirac{1}{2} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta}\lambda} \def\La{\Lambdaeft(\muathfrak m_{0,\betaegin{equation}ta}(1) + \muathfrak m_{0,\tauau_*\betaegin{equation}ta}(1)\rhoight)T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}
$$
which becomes 0 by the above parity consideration. Hence $L$ is unobstructed.
Actually, we find that $0$ is a bounding cochain; $0 \in {\muathbb C}M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$.
Furthermore, (\rhoef{34.21}) implies
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{34.23}
\muathfrak m_{2,\betaegin{equation}ta}(P_1,P_2) = (-1)^{1+\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_1\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_2}
\muathfrak m_{2,\tauau_*\betaegin{equation}ta}(P_2,P_1).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
We denote
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{qproduct}
P_1 \chiup_Q P_2 : = (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1(\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2+1)} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta}
\muathfrak m_{2,\betaegin{equation}ta}(P_1,P_2)T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Then a simple calculation shows that (\rhoef{34.23}) gives
rise to
$$
P_1 \chiup_Q P_2 = (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2} P_2 \chiup_Q P_1.
$$
Hence $\chiup_Q$ is graded commutative.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{proof}[Proof of Corollary \rhoef{TheoremN}]
Let $L$ be as in Corollary \rhoef{TheoremN}.
By Corollary \rhoef{Corollary34.22}, $L$ is unobstructed.
Since $L = \tauext{Fix } \tauau$, we find that $c_1(TM)\vert{\psii_2(M)} = 0$ implies $\muu_L = 0$.
Then Theorem E and Theorem 6.1.9 in \chiite{fooobook1} show that the Floer cohomology of $L$ over $\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$
does not vanish for any $b \in {\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$:
$$
HF((L,b);\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \nue 0.
$$
(Note that Theorem E holds not only over
$\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ but also over $\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$. See
Theorem 6.1.9.)
By extending the isomorphism
(\rhoef{isoFloer}) to $\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ coefficients
(by taking the tensor product with $\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ over
$\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$),
we also have $HF((L,b),(L,b);\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \nue 0$.
Therefore by (\rhoef{invariance}) we obtain
$$
HF((\psisi(L),\psisi_{\alphast}b),(L,b);\muathbb{L}ambdambda_{{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}) \nue 0
$$
which implies $\psisi(L) \chiap L \nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Proofs of Theorem \rhoef{Proposition34.25} and Corollary \rhoef{qMassey}}\lambda} \def\La{\Lambdaambdabel{proof1.9}
In this subsection, we prove Theorem \rhoef{Proposition34.25} and Corollary \rhoef{qMassey}.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsubsection{Proof of Theorem \rhoef{Proposition34.25} (1)}\lambda} \def\La{\Lambdaambdabel{1.9(1)}
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Proposition34.25} (1) ]
Let $(N,\omega} \def\O{\Omegamega)$ be a symplectic manifold, $M = N\tauimes N$, and
$\omega} \def\O{\Omegamega_M = -{\rhom pr}_1^*\omega} \def\O{\Omegamega_N + {\rhom pr}_2^*\omega} \def\O{\Omegamega_N$.
We consider an anti-symplectic involution $\tauau : M \tauo M$ defined by $\tauau(x,y) = (y,x)$.
Then $L = \tauext{\rhom Fix}\,\tauau \chiong N$. Let $J_N$ be a compatible almost structure on
$N$, and $J_M = -J_N \omega} \def\O{\Omegatimes 1 +1 \omega} \def\O{\Omegatimes J_N$. The almost
complex structure $J_M$ is compatible with $\omega} \def\O{\Omegamega_M$.
Note that $w_2(T(N\tauimes N))={\rhom pr}_1^{\alphast}w_2(TN) + {\rhom pr}_2^{\alphast}w_2(TN)$.
\psiar
If $N$ is spin, then $L= \tauext{\rhom Fix}\,\tauau \chiong N$ is $\tauau$-relatively spin
by Example \rhoef{Remark44.18} and $c_1(T(N\tauimes N)) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv
w_2(T(N\tauimes N)) \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod 2$.
Since $\psii_1(L) \tauo \psii_1(N \tauimes N)$ is injective,
Corollary \rhoef{Corollary34.22} shows that $L$ is unobstructed
and $\muathfrak m_2$ defines a graded commutative product structure $\chiup_Q$ by (\rhoef{qproduct}).
\psiar
Suppose that $N$ is not spin. We take a relative spin structure $(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)$ on $L= \tauext{\rhom Fix}\,\tauau \chiong N$ such that
$V={\rhom pr}_1^{\alphast}(TN)$ and $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ is the following spin structure on
$(TL \omega} \def\O{\Omegaplus V)\vert_{L} \chiong (TL \omega} \def\O{\Omegaplus TL)\vert_{L}$.
Since the composition of the diagonal embedding $SO(n) \tauo SO(n) \tauimes SO(n)$
and the inclusion $SO(n) \tauimes SO(n) \tauo SO(2n)$ admits a unique lifting
$SO(n) \tauo Spin(2n)$, we can equip the bundle $TL \omega} \def\O{\Omegaplus TL$
with a canonical spin structure.
It determines the spin structure $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ on $(TL \omega} \def\O{\Omegaplus V)\vert_L$.
(In this case, we have $st={\rhom pr}_1^{\alphast}w_2(TN)$.)
Then clearly we find that $\tauau^{\alphast} V={\rhom pr}_2^{\alphast}TN$.
Note that
${\rhom pr}_1^*TN$ and ${\rhom pr}_2^*TN$ are canonically isomorphic to $TL$ by
the differentials of the projections ${\rhom pr}_1$ and ${\rhom pr}_2$, respectively.
On the other hand, since $(TL \omega} \def\O{\Omegaplus \tauau^{\alphast}V)\vert_{L} \chiong (TL \omega} \def\O{\Omegaplus TL)\vert_{L} \chiong
(TL \omega} \def\O{\Omegaplus V)\vert_{L}$, the spin structure
$\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ is preserved by $\tauau$.
Therefore the difference of the conjugacy classes of two relative spin structures
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ and $\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$ is measured
by
$w_2(V\omega} \def\O{\Omegaplus \tauau^{\alphast}V)= w_2({\rhom pr}_1^{\alphast}TN \omega} \def\O{\Omegaplus
{\rhom pr}_2^{\alphast}TN)$.
Using the canonical spin structure on $TL\omega} \def\O{\Omegaplus TL$
mentioned above,
we can give a trivialization of $V \omega} \def\O{\Omegaplus \tauau^*V$
over the 2-skeleton of $L$.
Hence $w_2(V \omega} \def\O{\Omegaplus \tauau^*V)$ is regarded as a class in
$H^2(N \tauimes N,L;{\muathbb Z}_2)$.
Since $w_2({\rhom pr}_1^{\alphast}TN \omega} \def\O{\Omegaplus
{\rhom pr}_2^{\alphast}TN)=w_2(T(N\tauimes N))\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv c_1(T(N\tauimes N))
\muod 2$ and $\psii_2(N\tauimes N) \tauo \psii_2(N\tauimes N, L)$ is surjective, Lemma \rhoef{c1maslov}
shows that the class is equal to $\muu_L /2$.
Hence by Proposition \rhoef{Proposition44.16} we obtain the following:
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{changespin}
In the above situation,
the identity map
$$
{\muathbb C}M(J;\betaegin{equation}ta)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M(J;\betaegin{equation}ta)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$
is orientation preserving if and only if
$\muu_L(\betaegin{equation}ta)/2$ is even.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
Combining Theorem \rhoef{withsimplex}, we find that
the composition
$$
\alphaligned
{\muathbb C}M ^{\rhom main}_{k+1} (J;\betaegin{equation}ta;P_1,\lambda} \def\La{\Lambdadots, P_k)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
& \lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M ^{\rhom main}_{k+1} (J;\betaegin{equation}ta;P_1,\lambda} \def\La{\Lambdadots, P_k)^{\tauau^{\alphast}[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \\
& \lambda} \def\La{\Lambdaongrightarrow
{\muathbb C}M ^{\rhom main}_{k+1} (J;\betaegin{equation}ta;P_k,\lambda} \def\La{\Lambdadots, P_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned$$
is orientation preserving if and only if
$$
k+1 +\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{1 \lambda} \def\La{\Lambdae i < j \lambda} \def\La{\Lambdae k} \delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_i\delta} \def\muathbb{D}{\muathbb{D}eltaeg'P_j
$$
is even.
It follows that $\muathfrak m_{0,\tauau_{\alphast}\betaegin{equation}ta}(1)=-\muathfrak m_{0,\betaegin{equation}ta}(1)$ and hence we find that
$L= \tauext{\rhom Fix}~\tauau \chiong N$ is unobstructed.
This finishes the proof of the assertion (1).
Moreover, we also find that $\muathfrak m_2$ satisfies Theorem \rhoef{Proposition34.25} (\rhoef{34.23}), which
induces the graded commutative product $\chiup_Q$ as well for the non-spin case.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsubsection{Proof of Theorem \rhoef{Proposition34.25} (2), I: preliminaries}
Before starting the proof of Theorem \rhoef{Proposition34.25} (2)
we clarify the choice of the bounding cochain $b$ for which this
statement holds.
Note we constructed a filtered $A_{\infty}$ structure on
$C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
using $\tauau$-invariant Kuranishi structure and $\tauau$-invariant perturbation.
As we proved in Subsection \rhoef{1.9(1)}, $b=0$ is the bounding cochain of this
filtered $A_{\infty}$ structure.
In fact $\psihi} \def\F{\Phirak m_0$ becomes $0$ in the chain level by the cancelation.
This choice $b=0$ is one for which the conclusion of Theorem \rhoef{Proposition34.25} (2) holds.
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{b}
This particular choice $b=0$ does not make sense unless we specify the
particular way to construct our filtered $A_{\infty}$ structure.
Suppose we define a filtered $A_{\infty}$ structure on $C(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$
using a different perturbation.
The set of the gauge equivalence classes of the bounding cochains are independent of
the choice up to isomorphism, so there exists certain
bounding cochain which corresponds to the $0$ of the filtered $A_{\infty}$ structure
defined by the $\tauau$-invariant perturbation.
The conclusion of Theorem \rhoef{Proposition34.25} (2) holds for that $b$ and that
filtered $A_{\infty}$ structure. However $b = 0$ may not hold in this different filtered $A_{\infty}$ structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
We now start the proof of Theorem \rhoef{Proposition34.25} (2).
\psiar
Firstly we explain the proof of Theorem \rhoef{Proposition34.25} (2) under the hypothesis that there do not appear
holomorphic disc bubbles.
\psiar
Let $v : S^2 \tauo N$ be a $J_N$-holomorphic map. We fix 3 marked points
$0,1,\infty \in S^2 = \Bbb C \chiup \{\infty\}$. Then we consider the
upper half plane $\ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii \chiup\{\infty\} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C}\chiup \{\infty\}$ and
define a map $I(v) : \Bbb H \tauo M$ by
$$
I(v)(z) = (v(\omega} \def\O{\Omegaverline z),v(z)).
$$
Identifying $(\Bbb H,(0,1,\infty))$ with $(D^2,(-1,1,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}))$
where $(-1,1,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}) \in \psiartial D^2$,
we obtain a map from $(D^2,\psiartial D^2)$ to $(M,L)$
which we also denote by $I(v)$. One can easily check the converse:
For any given $J_M$-holomorphic map
$w: (D^2,\psiartial D^2) \chiong
(\ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii, {\muathbb R}\chiup \{\infty \}) \tauo (M,L) = (N \tauimes N, \muathbb{D}elta_N)$,
the assignment
$$
v(z) = \betaegin{equation}gin{cases} {\rhom pr}_2 \chiirc w(z) & \quad \tauext{for $z \in \ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii$} \\
{\rhom pr}_1 \chiirc w(\omega} \def\O{\Omegaverline z) & \quad \tauext{for $z\in {\muathbb C}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii$}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{cases}
$$
defines a $J_N$-holomorphic sphere on $N$.
Therefore the map
$v \muapsto I(v)$ gives an isomorphism between the moduli spaces of
$J_N$-holomorphic spheres and $J_M$-holomorphic discs with boundary in
$N$.
We can easily check that this map is induced by the isomorphism of
Kuranishi structures.
\psiar
We remark however that this construction works only at the interior of the
moduli spaces of pseudo-holomorphic discs and of pseudo-holomorphic spheres,
that is the moduli spaces of those without bubble.
To study the relationship between compactifications of them
we need some extra argument, which will be explained later in Subsection \rhoef{proof1.9}.
\psiar
We next compare the orientations on these moduli spaces.
The moduli spaces of holomorphic spheres have canonical orientation, see, e.g.,
Section 16 in \chiite{FO}.
In Chapter 8 \chiite{fooobook2}, we proved that a relative spin structure determines
a system of orientations on the moduli spaces of bordered stable maps of genus $0$.
We briefly review a crucial step for comparing orientations in our setting. See p. 677 of \chiite{fooobook2}.
\psiar
Let $w:(D^2, \psiartial D^2) \tauo (M,L)$ be a $J_M$-holomorphic map. Denote by $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll$
the restriction of $w$ to $\psiartial D^2$.
Consider the Dolbeault operator
\psiar
$$\omega} \def\O{\Omegaverline{\psiartial}_{(w^*TM, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*TL)}:
W^{1,p}(D^2,\psiartial D^2;w^*TM, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*TL) \tauo L^p(D^2;w^*TM \omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{D^2}^{0,1})
$$
with $p>2$.
We deform this operator to an operator on the union $\Sigmama$ of $D^2$ and ${\muathbb C} P^1$ with
the origin $O$ of $D^2$ and the ``south pole'' $S$ of ${\muathbb C} P^1$ identified.
The spin structure $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma$ on $TL \omega} \def\O{\Omegaplus V\vert_L$ gives a trivialization of
$\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^*(TL \omega} \def\O{\Omegaplus V\vert_L)$.
Since $w^*V$ is a vector bundle on the disc, it has a unique trivialization up to homotopy.
Hence $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll^* V$ inherits a trivialization, which is again unique up to homotopy.
Using this trivialization, we can descend the vector bundle
$E=w^*TM$ to $E'$ on $\Sigmama$.
The index problem is reduced to the one for the Dolbeault operator on $\Sigmama$.
Namely, the restriction of the direct sum of the following two operators to the fiber product
of the domains with respect to the evaluation maps at $O$ and $S$.
On $D^2$, we have the Dolbeault operator for the trivial vector bundle pair $(\underline{{\muathbb C}^n},
\underline{{\muathbb R}^n})$.
On ${\muathbb C} P^1$, we have the Dolbeault operator for the vector bundle $E'\vert_{{\muathbb C} P^1}$.
The former operator is surjective and its kernel is the space of constant sections in
$\underline{{\muathbb R}^n}$.
The latter has a natural orientation, since it is Dolbeault operator twisted
by $E'\vert_{{\muathbb C} P^1}$ on a closed Riemann surface.
Since the fiber product of kernels is taken on a complex vector space,
the orientation of the index is determined by the orientations of the two operators.
\psiar
Now we go back to our situation.
Pick a $1$-parameter family $\{\psihi_t\}$ of dilations on ${\muathbb C} P^1={\muathbb C} \chiup \{ \infty\}$ such that
$\lambda} \def\La{\Lambdaim_{t \tauo +\infty} \psihi_t(z) = -\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}$ for $z \in {\muathbb C} \chiup \{\infty\} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}\}$.
Here $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}$ in the upper half plane and $-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}$ in the lower half plane correspond to
the north pole and the south pole of ${\muathbb C} P^1$, respectively.
As $t \tauo + \infty$, the boundary of the second factor of the disc
$I(v \chiirc \psihi_t)$ contracts to the point $v(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1})$ and its image
exhausts the whole image of the sphere $v$, while the whole image of the first factor
contracts to $v(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1})$.
Therefore as $t \tauo \infty$ the images of the map $z \muapsto I(v \chiirc \psihi_t)(z)$ converge
to the constant disc at $(v(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}),v(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1})$ with a sphere
$$
z \in S^2 \muapsto (v(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1}), v(z))
$$
attached to the point. If we denote $w_t = I(v\chiirc \psihi_t)$, it follows from our choice
$V = {\rhom pr}_1^*TN$ that the trivialization of $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_t^*V$, which
is obtained by restricting the trivialization of $w_t^*V = ({\rhom pr}_1\chiirc I(v\chiirc \psihi_t))^*TN$,
coincides with the one induced by the frame of the fiber $V$ at $v(-\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaqrt{-1})$ for
a sufficiently large $t$. Therefore considering the linearized index of the family
$w_t$ for a large $t$, it follows from the explanation given in the above
paragraph that the map $v \muapsto I(v\chiirc \psihi_t)=w_t$ induces an isomorphism
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaet (\tauext{Index} D\psiartialbar_{J_N}(v)) \chiong \delta} \def\muathbb{D}{\muathbb{D}eltaet(\tauext{Index} D\psiartialbar_{J_M}(w_t))
$$
as an oriented vector space.
By flowing the orientation
to $t=0$ by the deformation $\psihi_t$, we have proven that the map
$v \muapsto I(v)$ respects the orientations of the moduli spaces.
\psiar
Now we compare the product $\chiup_Q$ in (\rhoef{qproduct}) and the product on the quantum
cohomology, presuming, for a while, that
they can be calculated by the contribution from the interior of the moduli spaces only.
\psiar
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{equivonpi2}
We define the equivalence relation $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ on $\psii_2(N)$ by
$\alphalpha \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim \alphalpha'$ if and only if $c_1(N)[\alphalpha] = c_1(N) [\alphalpha']$ and
$\omega} \def\O{\Omegamega (\alphalpha ) = \omega} \def\O{\Omegamega (\alphalpha')$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
For $\betaegin{equation}ta=[w:(D^2, \psiartial D^2) \tauo (N \tauimes N, \muathbb{D}elta_N)] \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii (\muathbb{D}elta_N)$,
we set
$\varphiidetilde{\betaegin{equation}ta}=[({\rhom pr}_2 \chiirc w) \# ({\rhom pr}_1 \chiirc w): D^2 \chiup \omega} \def\O{\Omegaverline{D}^2 \tauo N] \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$,
where $\omega} \def\O{\Omegaverline D^2$ is the unit disc with the complex structure reversed and
$D^2 \chiup \omega} \def\O{\Omegaverline{D}^2$ is the union of discs glued along boundaries.
This defines a homomorphism
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{picorresp}
\psisi : \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(\muathbb{D}elta_N) \tauo \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
For $\alphalpha \in \psii_2(N)$ let $\muathcal M_3^{\tauext{\rhom sph,reg}}(J_{N};\alphalpha)$
be the moduli space of pseudo-holomorphic map
$v : S^2 \tauo N$ of homotopy class $\alphalpha$ with three marked points,
(without bubble). For $\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$, we put
$$
\muathcal M_3^{\tauext{\rhom sph,reg}}(J_{N};\rhoho)
= \betaigcup_{\alphalpha \in \rhoho}
\muathcal M_3^{\tauext{\rhom sph,reg}}(J_{N};\alphalpha).
$$
For $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(\muathbb{D}elta_N)$, let $\muathcal M_3^{\tauext{\rhom reg}}(J_{N\tauimes N};\betaegin{equation}ta)$
be the moduli space of pseudo-holomorphic map
$u : (D^2,\psiartial D^2) \tauo (N\tauimes N,\muathbb{D}elta_N)$ of class $\betaegin{equation}ta$ with
three boundary marked points (without disc or sphere bubble).
We denote by $\muathcal M_3^{\tauext{\rhom main,reg}}(\betaegin{equation}ta)$
the subset that consists of elements in the main component.
We put
$$
\muathcal M_3^{\tauext{\rhom main,reg}}(J_{N\tauimes N};\rhoho)
= \betaigcup_{\psisi(\betaegin{equation}ta) = \rhoho}
\muathcal M_3^{\tauext{\rhom main,reg}}(J_{N\tauimes N};\betaegin{equation}ta).
$$
Summing up the above construction, we have the following proposition.
For a later purpose, we define the map
$\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$ by the inverse of $I$.
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{regoripres}
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{mapI}
\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I} : \muathcal M_3^{\tauext{\rhom main,reg}}(J_{N\tauimes N};\rhoho) \tauo \muathcal M_3^{\tauext{\rhom sph,reg}}(J_{N};\rhoho)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
is an isomorphism as spaces with Kuranishi structure. Moreover, $\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$ respects the orientations in the sense of
Kuranishi structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
Denote by $*$ the quantum cup product on the quantum cohomology
$QH^*(N; \muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$.
\psiar
For cycles $P_0, P_1, P_2$ in $N$ such that
$\delta} \def\muathbb{D}{\muathbb{D}eltaim {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\rhoho) = \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_0 + \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 + \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2$,
we can take a multi-valued perturbation, {\it multi-section},
of ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\rhoho)$ such that the intersection of its zero set and
$(ev_0 \tauimes ev_1 \tauimes ev_2 )^{-1}(P_0 \tauimes P_1 \tauimes P_2)$ is a finite subset in
${\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho)$, i.e.,
it does not contain elements with domains with at least two irreducible components.
Counting these zeros with weights, we obtain the intersection number
$$(P_0 \tauimes P_1 \tauimes P_2) \chidot [{\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho)].$$
In other words,
for homology classes $[P_0], [P_1], [P_2] \in H_*(N)$, we take cocycles $a_0, a_1, a_2$
which represent the Poincar\'e dual of $[P_0], [P_1], [P_2]$, respectively.
We can take a multi-section of $\muathcal M_3^{\tauext{\rhom sph}}(J_{N};\rhoho)$ such that
the intersection of
its zero set and the support of $ev_0^* a_0 \chiup ev_1^* a_1 \chiup ev_2^* a_2$
is contained in ${\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho)$.
Since the zero set of the multi-valued perturbation of ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\rhoho)$
is compact, $ev_0^* a_0 \chiup ev_1^* a_1 \chiup ev_2^* a_2$ is regarded as
a cocycle with a compact support.
Using such a multi-valued perturbation, we obtain
$[{\muathbb C}M^{\tauext{\rhom sph,reg}}_3(J_{N};\rhoho)]$, which ia locally finite fundamental cycle.
Thus we find that $(ev_0^*a_0 \chiup ev_1^* a_1\chiup ev_2^* a_2) [{\muathbb C}M^{\tauext{\rhom sph,reg}}(J_N;\rhoho)]$
makes sense.
\psiar
The Poincar\'e pairing on cohomology is given by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{cohompairing}
\lambda} \def\La{\Lambdaambdangle a, b \rhoangle = (a \chiup b) [N].
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
By definition we have
\betaegin{equation}gin{eqnarray}
\lambda} \def\La{\Lambdaambdangle a_0, a_1*a_2 \rhoangle
&
= &\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim}
( ev_0^* a_0 \chiup ev_1^* a_1 \chiup ev_2^* a_2)
[{\muathbb C}M^{\tauext{\rhom sph}}_3(J_{N};\rhoho)]T^{\omega} \def\O{\Omegamega (\rhoho)}e^{c_1(N)[\rhoho]} \nuonumber \\
& = & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim}
( ev_0^* a_0 \chiup ev_1^* a_1 \chiup ev_2^* a_2)
[{\muathbb C}M^{\tauext{\rhom sph,reg}}_3(J_{N};\rhoho)]T^{\omega} \def\O{\Omegamega (\rhoho)}e^{c_1(N)[\rhoho]}. \nuonumber
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{eqnarray}
From the assumption we made at the beginning of this subsection, the map $\muathfrak{m}_2$ is given by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{comefrommain}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta; \psisi(\betaegin{equation}ta) = \rhoho}
\muathfrak{m}_{2,\betaegin{equation}ta} (P_1,P_2)T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}
=({\muathbb C}M^{\tauext{main,reg}}_3(J_{N\tauimes N};\rhoho;P_1,P_2),ev_0)T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here $P_1$ and $P_2$ are cycles,
and
$$
{\muathbb C}M^{\tauext{main,reg}}_3(J_{N\tauimes N};\rhoho;P_1,P_2)
= (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon} {\muathbb C}M^{\tauext{main,reg}}_3(J_{N\tauimes N};\rhoho) \tauimes_{N^2} (P_1\tauimes P_2),
$$
where $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon = (\delta} \def\muathbb{D}{\muathbb{D}eltaim \muathbb{D}elta_N +1)\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1= \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1$, see \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{epsilonPQ}.
(We also assume that
the right hand side becomes a cycle.)
These assumptions are removed later in Subsections \rhoef{6.3}, \rhoef{6.4} and \rhoef{6.5}.
\psiar
Taking a homological intersection number with another cycle $P_0$, we have
$$
\alphaligned
& P_0 \chidot ({\muathbb C}M^{\tauext{main,reg}}_3(J_{N\tauimes N};\rhoho;P_1,P_2),ev_0) \\
= & (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon} P_0 \chidot \lambda} \def\La{\Lambdaeft( {\muathbb C}M^{\tauext {main,reg}}_3(J_{N\tauimes N};\rhoho) \tauimes _
{(ev_1,ev_2)}(P_1\tauimes P_2), ev_0 \rhoight) \\
= & (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon} (ev_0^* PD[P_0] \chiup
(ev_1,ev_2)^* PD[P_1 \tauimes P_2] )[{\muathbb C}M^{\tauext{main,reg}}_3(J_{N\tauimes N};\rhoho)],
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
where $PD[P_i]$, resp. $PD[P_j \tauimes P_k]$ is the Poincar\'e dual of
$P_i$ in $N$, resp. $P_j \tauimes P_k$ in $N \tauimes N$.
We adopt the convention that
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{PDconvention}
(PD [P] \chiup a) [N] = a [P] \ \ \tauext{~for~} a \in H^*(N).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Since $\delta} \def\muathbb{D}{\muathbb{D}eltaim \muathbb{D}elta_N$ is even, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{PDconvention} implies that
$$
ev_1^*PD[P_1] \chiup ev_2^*PD[P_2]=(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 \chidot \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2} (ev_1,ev_2)^*PD[P_1 \tauimes P_2].$$
By identifying ${\muathbb C}M_{3}^{\tauext{\rhom sph,reg}}(J_{N};\rhoho)$ and
${\muathbb C}M^{\tauext{main,reg}}_{3}(J_{N\tauimes N};\rhoho)$ as spaces with oriented Kuranishi structures, we find that
$$
\lambda} \def\La{\Lambdaambdangle PD[P_0], PD[P_1] * PD[P_2] \rhoangle
= (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 (\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2 +1)} \lambda} \def\La{\Lambdaambdangle P_0, \muathfrak{m}_2(P_1, P_2) \rhoangle,
$$
or equivalently,
\betaegin{equation}gin{equation}
\lambda} \def\La{\Lambdaambdangle PD[P_1] * PD[P_2], PD[P_0] \rhoangle
= (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 (\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2 +1)} \lambda} \def\La{\Lambdaambdangle \muathfrak{m}_2(P_1, P_2), P_0 \rhoangle. \lambda} \def\La{\Lambdaambdabel{comparisonprod}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here the right hand side is the intersection product of $P_0$ and $\muathfrak{m}_2(P_1, P_2)$.
Namely, we put
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{signpairing}
\lambda} \def\La{\Lambdaambdangle P, Q \rhoangle = P \chidot Q = (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P \delta} \def\muathbb{D}{\muathbb{D}eltaeg Q} \# (P \tauimes_N Q) = \# (Q \tauimes_N P).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Note that we use a different convention of the pairing on cycles from \chiite{fooobook1}, \chiite{fooobook2},
cf. Definition 8.4.6 in \chiite{fooobook2}, but the same as one in Definition 3.10.4, Subsection 3.10.1 in \chiite{fooomirror1}.
Therefore we observe the following consistency between pairings on homology \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{signpairing} and cohomology \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{cohompairing}
$$\lambda} \def\La{\Lambdaambdangle P, Q \rhoangle = \lambda} \def\La{\Lambdaambdangle PD [P], PD [Q] \rhoangle.$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsubsection{Proof of Theorem \rhoef{Proposition34.25} (2), II: the isomorphism as modules}\lambda} \def\La{\Lambdaambdabel{6.3}
To complete the proof of Theorem \rhoef{Proposition34.25} (2), we need to
remove the assumption (\rhoef{comefrommain}), that is, the product ${\muathfrak m}_2(P_1,P_2)$ is
determined only on the part of the moduli space
where there is no bubble.
We study
how our identification of the moduli spaces of pseudo-holomorphic discs (attached to the diagonal
$\muathbb{D}elta_N$) and of pseudo-holomorphic spheres (in $N$) extends to their compactifications
for this purpose.
To study this point, we define the isomorphism in Theorem \rhoef{Proposition34.25} (2)
as {\it $\muathbb{L}ambdambda_{0,{\rhom nov}}$ modules} more explicitly.
\psiar
As discussed in the introduction, this isomorphism follows
from the degeneration at $E_2$-level of the spectral sequence of
Theorem D \chiite{fooobook1}. The proof of this degeneration is based on the
fact that the image of the differential is contained in the
Poincar\'e dual to the kernel of the inclusion induced homomorphism
$H(\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}) \tauo H(N \tauimes N;\muathbb{L}ambdambda_{0,{\rhom nov}})$, which
is actually injective in our case.
This fact (Theorem D (D.3) \chiite{fooobook1}) is proved by using
the operator $\muathfrak p$ introduced in \chiite{fooobook1} Section 3.8.
Therefore to describe this isomorphism we recall
a part of the construction of this operator below.
\psiar
Let $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(\muathbb{D}elta_N) =\psii_2(N\tauimes N, \muathbb{D}elta_N) /\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$.
We consider $\muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta)$,
the moduli space of bordered stable maps of genus zero
with one interior and one boundary marked point in homotopy class $\betaegin{equation}ta$.
Let $ev_0 : \muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta) \tauo \muathbb{D}elta_N$ be the
evaluation map at the boundary marked point and
$ev_{\tauext{\rhom int}} : \muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta) \tauo N \tauimes N$ be
the evaluation map at the interior marked point.
Let $(P,f)$ be a smooth singular chain in $\muathbb{D}elta_N$. We put
$$
\muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta;P)
= \muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta) \, {}_{ev_0}\tauimes_f
P.
$$
It has a Kuranishi structure. We take its multisection $\muathfrak s$ and
a triangulation of its zero set $\muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta;P)^{\muathfrak s}$.
Then $(\muathcal M_{1;1}(J_{N\tauimes N};\betaegin{equation}ta;P)^{\muathfrak s},ev_{\tauext{\rhom int}})$ is a singular
chain in $N \tauimes N$, which is by definition
$\muathfrak p_{1,\betaegin{equation}ta}(P)$.
(See \chiite{fooobook1} Definition 3.8.23.)
In our situation, where $\muathfrak m_{0}(1) = 0$ for
$\muathbb{D}elta_N$ in the chain level by Theorem \rhoef{Proposition34.25} (1), we have:
\psiar
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{pmainformula} We identify $N$ with $\muathbb{D}elta_N$. Then
for any singular chain $P \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset N$ we have
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{frakpfrakm}
(-1)^{n+1}\psiartial_{N \tauimes N} (\muathfrak p_{1,\betaegin{equation}ta}(P) )
+ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta_1+\betaegin{equation}ta_2=\betaegin{equation}ta}
\muathfrak p_{1,\betaegin{equation}ta_1}(\muathfrak m_{1,\betaegin{equation}ta_2}(P)) = 0.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here $\psiartial_{N \tauimes N}$ is the boundary operator in the singular chain complex of $N\tauimes N$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{rem}
When $\betaegin{equation}ta = 0$, ${\muathfrak p}_{1,0}$ is the identity map and
the second term in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{frakpfrakm} is equal to ${\muathfrak m}_{1,0}(P)$.
Recalling ${\muathfrak m}_{1,0}(P)=(-1)^n \psiartial_{\muathbb{D}elta_N}(P)$
in Definition \rhoef{def:mk}, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{frakpfrakm} turns out to be
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{frakpfrakmclasical}
(-1)^{n+1}\psiartial_{N \tauimes N} (\muathfrak p_{1,0}(P) )
+ (-1)^n
\muathfrak p_{1,0}(\psiartial_{\muathbb{D}elta_N}(P)) = 0.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
When $\betaegin{equation}ta \nueq 0$, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{frakpfrakm} is equal to
$$
(-1)^{n+1}\psiartial_{N \tauimes N} (\muathfrak p_{1,\betaegin{equation}ta}(P) )
+\muathfrak p_{1,\betaegin{equation}ta}(\muathfrak m_{1,0} (P))
+ \muathfrak p_{1,0}(\muathfrak m_{1,\betaegin{equation}ta}(P))
+ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\betaegin{equation}ta_2=\betaegin{equation}ta,}
{\betaegin{equation}ta_1, \betaegin{equation}ta_2 \nue 0}}
\muathfrak p_{1,\betaegin{equation}ta_1}(\muathfrak m_{1,\betaegin{equation}ta_2}(P)) = 0.
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Lemma \rhoef{pmainformula} is a particular case of \chiite{fooobook1}
Theorem 3.8.9 (3.8.10.2).
See Remark \rhoef{rem:signNN} for the sign.
We also note that $\muathfrak p_{1,0}(P) = P$.
(\chiite{fooobook1} (3.8.10.1).)
We remark that even in the case when $P$ is a singular cycle $\muathfrak m_1(P)$
may not be zero. In other words the identity map
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{notchainmap}
(C(\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}),\psiartial) \tauo (C(\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}),\muathfrak m_1)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
is {\it not} a chain map. We use the operator
$$
\muathfrak p_{1,\betaegin{equation}ta}: C(\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}) \tauo C(N \tauimes N; \muathbb{L}ambdambda_{0,{\rhom nov}})
$$
to modify the identity map to obtain a chain map (\rhoef{notchainmap}).
Using the projection to the second factor, we define
$p_2 : N \tauimes N \nui (x,y) \muapsto (y,y) \in \muathbb{D}elta_N$.
We put
$$
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta} = p_{2 *} \chiirc \muathfrak p_{1,\betaegin{equation}ta}.
$$
Then by applying $p_{2 *}$ to the equation \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{frakpfrakm},
we obtain, for $\betaegin{equation}ta \nueq 0$,
\betaegin{equation}gin{equation}
\lambda} \def\La{\Lambdaambdabel{pbarformula}
-\muathfrak m_{1,0} (\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta}(P) )
+\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta}(\muathfrak m_{1,0} (P))
+ \muathfrak m_{1,\betaegin{equation}ta}(P)
+ \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\betaegin{equation}ta_2=\betaegin{equation}ta,}
{\betaegin{equation}ta_1, \betaegin{equation}ta_2 \nue 0}}
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1}(\muathfrak m_{1,\betaegin{equation}ta_2}(P)) = 0.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{rem:signNN}
The sign in Formula (\rhoef{frakpfrakm})
looks slightly different from one in
Formula (3.8.10.2) in \chiite{fooobook1}.
The sign for $\psiartialta_M$ in \chiite{fooobook1}, \chiite{fooobook2} was not
specified, since it was not necessary there.
Here we specify it as $\psiartialta_M=(-1)^{n+1}\psiartial_M$.
This sign is determined by considering the case
when $\betaegin{equation}ta=0$, which is nothing but (\rhoef{frakpfrakmclasical}).
Another way to determine this sign is as follows. In the proof of
(3.8.10.2) given in Subsection 3.8.3 \chiite{fooobook1}, we use the same argument in the proof of Theorem 3.5.11 (=Theorem \rhoef{thm:Ainfty} in this article)
where we define $\muathfrak m_{1,0}=(-1)^n\psiartial_L$
to get the $A_{\infty}$ formula.
The proof of Theorem 3.5.11 uses
Proposition 8.5.1 \chiite{fooobook2} which is the case $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_1 =\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_2 = 0$ in the formulas
in Proposition 8.10.5 \chiite{fooobook2}.
On the other hand, in the case of (3.8.10.2)
we use the $0$-th {\it interior} marked point as the output evaluation point instead of the $0$-th {\it boundary} marked point.
Then the proof of (3.8.10.2) uses
Proposition 8.10.4 instead of Proposition 8.10.5 \chiite{fooobook2}.
We can see that the difference of every corresponding sign appearing
in Proposition 8.10.4 and Proposition 8.10.5 \chiite{fooobook2} is exactly $-1$.
Thus we find that
$\psiartialta_{M}=(-1)^{n+1}\psiartial_M$ in
the formula (3.8.10.2) (and also (3.8.10.3)) of \chiite{fooobook1}.
This difference arises from
the positions of the factors corresponding to the $0$-th {\it boundary} marked point and
the $0$-th {\it interior} marked point
in the definitions of
orientations on $\muathcal{M}_{(1,k),\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}(\betaegin{equation}ta)$ and
$\muathcal{M}_{k,(1,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll)}(\betaegin{equation}ta)$ respectively. See the formulas given just before Definition 8.10.1 and Definition 8.10.2 \chiite{fooobook2} for the
notations $\muathcal{M}_{(1,k),\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}(\betaegin{equation}ta)$,
$\muathcal{M}_{k,(1,\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll)}(\betaegin{equation}ta)$ respectively.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{P_beta}
For each given singular chain $P$ in $N$,
we put
$$
P(\betaegin{equation}ta) = \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1}^{\infty} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}}
(-1)^{k}\lambda} \def\La{\Lambdaeft( \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1} \chiirc \chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k} \rhoight)(P)
$$
regarding $P$ as a chain in $\muathbb{D}elta_N$. Then we define
a chain $\muathcal I(P) \in C(N;\muathbb{L}ambdambda_{0,{\rhom nov}})$ by
$$
\muathcal I(P) = P + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta\nue 0} P(\betaegin{equation}ta)T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}.
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{lem}
$$
\muathcal I : (C(\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}),\muathfrak m_{1,0}) \tauo (C(\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}),\muathfrak m_1)
$$
is a chain homotopy equivalence.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
We can use \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{pbarformula} to show that $\muathcal I$ is a chain map
as follows.
We prove that $\muathfrak m_1 \chiirc \muathcal I - \muathcal I \chiirc\muathfrak m_{1,0} \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0
\muod T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}$ by induction on $\omega} \def\O{\Omegamega(\betaegin{equation}ta)$.
We assume that it holds modulo $T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}$ and
will study the terms of order $T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}$.
Those terms are sum of
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{ordbetaterms1}
\muathfrak m_{1,0}(P(\betaegin{equation}ta)) + {\muathfrak m}_{1, \betaegin{equation}ta} (P)+
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\betaegin{equation}ta_2=\betaegin{equation}ta}{\betaegin{equation}ta_i \nue 0}}
\muathfrak m_{1,\betaegin{equation}ta_1}(P(\betaegin{equation}ta_2)) - ({\muathfrak m}_{1,0} P)(\betaegin{equation}ta),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
for given $\omega} \def\O{\Omegamega(\betaegin{equation}ta)$'s.
By definition, (\rhoef{ordbetaterms1}) becomes:
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{ordbetaterms2}
\alphaligned
&\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1,2,\delta} \def\muathbb{D}{\muathbb{D}eltaots}\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}} (-1)^k \lambda} \def\La{\Lambdaeft(\muathfrak m_{1,0} \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1} \chiirc \chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k}\rhoight)(P) \\
&+\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1,2,\delta} \def\muathbb{D}{\muathbb{D}eltaots}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}} (-1)^{k-1} \lambda} \def\La{\Lambdaeft(\muathfrak m_{1,\betaegin{equation}ta_1} \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_2} \chiirc \chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k}\rhoight)(P) \\
& - ({\muathfrak m}_{1,0} P)(\betaegin{equation}ta).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Using \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{pbarformula}
we can show that \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{ordbetaterms2} is equal to
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{ordbetaterms3}
\alphaligned
&\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1,2,\delta} \def\muathbb{D}{\muathbb{D}eltaots}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}} (-1)^k \lambda} \def\La{\Lambdaeft( \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1}\chiirc
\muathfrak m_{1,0} \chiirc \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_2}\chiirc\chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k}\rhoight)(P) \\
&+\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1,2,\delta} \def\muathbb{D}{\muathbb{D}eltaots}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}} (-1)^k \lambda} \def\La{\Lambdaeft( \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1} \chiirc
\muathfrak m_{1,\betaegin{equation}ta_2} \chiirc \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_3} \chiirc \chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k}\rhoight)(P) \\
& - \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1,2,\delta} \def\muathbb{D}{\muathbb{D}eltaots}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}} (-1)^k \lambda} \def\La{\Lambdaeft( \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1} \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_2} \chiirc \chidots \chiirc \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_{k}} \chiirc {\muathfrak m}_{1,0} \rhoight)(P) \\
= & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{k=1,2,\delta} \def\muathbb{D}{\muathbb{D}eltaots}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta_1 \nueq 0} \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1} \chiirc
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaenfrac{}{}{0pt}{}{\betaegin{equation}ta_1+\delta} \def\muathbb{D}{\muathbb{D}eltaots+\betaegin{equation}ta_k = \betaegin{equation}ta}
{\betaegin{equation}ta_i \nue 0}} (-1)^k \betaigl(
\muathfrak m_{1,0} \chiirc \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_2}\chiirc\chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k}(P) \\
& + \muathfrak m_{1,\betaegin{equation}ta_2} \chiirc \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_3} \chiirc \chidots \chiirc
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_k}(P)
- \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_2} \chiirc \chidots \chiirc \omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_{k}} \chiirc {\muathfrak m}_{1,0}(P)
\betaigr).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
(\rhoef{ordbetaterms3}) vanishes by induction hypothesis.
\psiar
On the other hand $\muathcal I$ is identity modulo $\muathbb{L}ambdambda_{0,{\rhom nov}}^+$.
The lemma follows by the standard spectral sequence argument.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Thus we obtain an isomorphism as $\muathbb{L}ambdambda_{0,{\rhom nov}}$-modules
$$
\muathcal I_{\#} : H(N;\muathbb{L}ambdambda_{0,{\rhom nov}}) \chiong HF(\muathbb{D}elta_N,\muathbb{D}elta_N;\muathbb{L}ambdambda_{0,{\rhom nov}}).
$$
In order to complete the proof of Theorem \rhoef{Proposition34.25} (2),
we need to prove
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{diagmaineq'}
\lambda} \def\La{\Lambdaambdangle
\muathcal I(P_1) \chiup_Q \muathcal I(P_2),\muathcal I(P_0)\rhoangle
=
\lambda} \def\La{\Lambdaambdangle
PD[P_1] * PD[P_2],PD[P_0]\rhoangle,
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
i.e.,
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{diagmaineq}
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 (\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2 +1)}
\lambda} \def\La{\Lambdaambdangle
\muathfrak m_2(\muathcal I(P_1),\muathcal I(P_2)),\muathcal I(P_0)\rhoangle
=
\lambda} \def\La{\Lambdaambdangle
PD[P_1] * PD[P_2],PD[P_0]\rhoangle.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
For the orientation of the moduli spaces which define the operations in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{diagmaineq},
see
Sublemma \rhoef{sublemma}, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{M_{1,1}}-\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{def:Mhat} for the left hand side
and Definition \rhoef{notationmoduli} for the right hand side.
We will prove (\rhoef{diagmaineq}) in the next two subsections.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsubsection{Proof of Theorem \rhoef{Proposition34.25} (2), III: moduli space of stable
maps with circle system}\lambda} \def\La{\Lambdaambdabel{6.4}
To define the left hand side of \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{diagmaineq} we use
the moduli spaces and multisections used in Lagrangian Floer theory, while we use
other moduli spaces and multisections to define the right hand side of \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{diagmaineq}.
Recall that in
Theorem \rhoef{thm:Ainfty} and Theorem \rhoef{Theorem34.20}
we took particular multisections to get the
$A_{\infty}$ structure. The point of the whole proof we will give here
is to show that two
multisections, one used for Lagrangian Floer theory and
the other one more directly related to quantum cup product,
can be homotoped in the {\it moduli space of stable maps
with circle system}, which will be introduced in Definition \rhoef{StableMapsWithCircles}, so that those two multisections
finally give the same answer.
The purpose of this subsection is to define the moduli space of stable maps
with circle system and to describe the topology and the Kuranishi structure in detail. In the next subsection we will compare two moduli spaces and multisections to complete the proof by interpolating the moduli space of stable maps
with circle system.
\psiar
We consider a class $\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$.
Put
$$
\muathcal M_{3}^{\tauext{\rhom sph}}(J_N;\rhoho) = \betaigcup_{\alphalpha \in \rhoho}
\muathcal M_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha),
$$
where $\muathcal M_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ is the moduli space
of stable maps of genus $0$ with $3$ marked points and of homotopy class
$\alphalpha$.
Let $((\Sigmama,(z_0,z_1,z_2)),v)$ be a representative of its element.
We decompose $\Sigmama = \betaigcup \Sigmama_a$ into irreducible components.
\betaegin{equation}gin{defn}
\betaegin{equation}gin{enumerate}
\item
Let $\Sigmama_0 \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama$ be the minimal connected union of
irreducible components containing three marked points $z_0,z_1,z_2$.
\item
An irreducible component $\Sigmama_a$ is said to be
{\it Type I} if it is contained in $\Sigmama_0$.
Otherwise it is said to be {\it Type II}.
\item
Let $\Sigmama_a$ be an irreducible component of Type I.
Let $k_a$ be the number of its singular points in
$\Sigmama_a$ which do not intersect irreducible components of $\Sigmama \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \Sigmama_0$.
Let $k'_a$ be the number of marked points on $\Sigmama_a$.
It is easy to see that $k_a + k'_a$ is either $2$ or $3$.
(See Lemma \rhoef{TypeI} below or the proof.)
We say that $\Sigmama_a$ is {\it Type I-1} if $k_a + k'_a = 3$ and
{\it Type I-2} if $k_a + k'_a = 2$.
We call these $k_a+k'_a$ points
{\it the interior special points}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{TypeI} Let $k_a$, $k_a'$ be the numbers defined above.
Then $k_a + k'_a$ is either $2$ or $3$. Moreover, there exists exactly one irreducible component of
Type I-1.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof} Consider the dual graph $T$ of the prestable curve $\Sigmama_0$.
Note that $T$ is a tree with 3 exterior edges with a finite number of interior vertices.
Therefore it follows that there is a unique 3-valent vertex and all others are 2-valent.
Since the number $k_a + k'_a$ is precisely the valence of the vertex associated to
the component $\Sigmama_a$, the first statement follows.
Furthermore since the dichotomy of $k_a + k'_a$ being $3$ and $2$ corresponds to
the component $\Sigmama_a$ being of Type I-1 and of Type I-2 respectively,
the second statement follows.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\psiar
Our next task is to relate a bordered stable map to $(N \tauimes N, \muathbb{D}elta_N)$ of genus zero
with three boundary marked points to a stable map to $N$ of genus 0 with three marked points.
For this purpose, we introduce the notion of stable maps of genus 0 with circle systems,
see Definition \rhoef{StableMapsWithCircles}.
\psiar
Firstly, we fix a terminology ``circle'' on the Riemann sphere.
We call a subset $C$ in ${\muathbb C} P^1$ a {\it circle}, if it is the image of ${\muathbb R} \chiup \{ \infty \}$
by a projective linear transformation $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi$ as in complex analysis, i.e., $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi({\muathbb R} \chiup \{ \infty \}) =C$.
From now on, we only consider $C$ with an orientation. The projective linear transformation $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi$ can be chosen
so that the orientation coincides with the one as the boundary of the upper half space $\ifmmode{\muathbb H}\else{$\muathbb H$}\psihi} \def\F{\Phii$.
\psiar
For a pseudoholomorphic disc $w$ in$(N \tauimes N, J_{N \tauimes N})$ with boundary on the diagonal $\muathbb{D}elta_N$,
the map $\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$ in Proposition \rhoef{regoripres} gives a pseudoholomorphic sphere $v=\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}(w)$ in $(N,J_N)$.
For three boundary marked points $z_0, z_1, z_2$ of the domain of $w$, we have corresponding marked points,
which we also denote by $z_0, z_1, z_2$ by an abuse of notation, on the Riemann sphere, which is the domain of $v$.
We also note that the boundary of the disc corresponds to the circle passing through $z_0, z_1, z_2$ on the Riemann sphere.
Thus, when describing the pseudoholomorphic sphere corresponding to a pseudoholomorphic disc,
we regard it as a stable map of genus $0$ with the image of
the boundary of the disc, which is a circle on the Riemann sphere.
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{bubbletree}
\betaegin{equation}gin{enumerate}
\item For each genus $0$ bordered stable map to $(N \tauimes N, \muathbb{D}elta_N)$, we construct a genus $0$ stable map to $N$ as follows.
The construction goes componentwise. For a disc component, we apply $\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$ as we explained above.
For a bubble tree $w^{\tauext{\rhom bt}}$ of pseudoholomorphic spheres attached to a disc component at $z^{\tauext{\rhom int}}$,
we attach $({\rhom pr}_1)_* w^{\tauext{\rhom bt}}$ (resp. $({\rhom pr}_2)_* w^{\tauext{\rhom bt}}$) to the lower hemisphere at $\omega} \def\O{\Omegaverline{z^{\tauext{\rhom int}}}$
(resp. the upper hemisphere at $z^{\tauext{\rhom int}}$). Here $({\rhom pr}_i)_* w^{\tauext{\rhom bt}}$ is ${\rhom pr}_i \chiirc w^{\tauext{\rhom bt}}$ with each unstable
component shrunk to a point. If ${\rhom pr}_i \chiirc w^{\tauext{\rhom bt}}$ is unstable, we do not attach it to the Riemann sphere.
\item For $\betaegin{equation}ta \in \psii_2(N \tauimes N, \muathbb{D}elta_N)$, pick a representative $w:(D^2, \psiartial D^2) \tauo (N\tauimes N, \muathbb{D}elta_N)$.
Although $w$ is not necessarily pseudoholomorphic, we have $v: S^2 \tauo N$ in the same way as $\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$.
We call the class $[v] \in \psii_2(N)$ the {\it double} of the class $[w] \in \psii_2(N \tauimes N, \muathbb{D}elta_N)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
In preparation for the definition of stable maps of genus 0 with circle systems, we define {\it admissible systems of circles}
in Definitions \rhoef{def:circlesystemI}, \rhoef{TypeII} and \rhoef{circlesonSigma}.
\psiar
Definition \rhoef{def:circlesystemI} includes the case of moduli space representing
various terms of \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{diagmaineq},
that is, a union of doubles of several discs.
We glue them at a boundary marked point of one
component and an interior or a boundary marked
point with the other component.
\psiar
Let $\Sigmama_a$ be an irreducible component of $\Sigmama$ where
$((\Sigmama,(z_0,z_1,z_2)),v)$ is an element of $\muathcal M_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
A {\it domain which bounds $C_a$} is a disc in ${\muathbb C} P^1$ whose boundary (together with orientation)
is $C_a$.
We decompose $\Sigmama_a$ into the union
of two discs $D_a^\psim$ so that ${\muathbb C} P^1 = D_a^+ \chiup D_a^-$
where $\psiartial D_a^+ = C_a$ as an oriented manifold. Then
$\psiartial D_a^- = - C_a$ as an oriented manifold.
\psiar
Now consider a component $\Sigmama_a$ of Type I-2.
We say an interior special point of $\Sigmama_a$ is {\it inward} if it is contained
in the connected component of the closure of $\Sigmama_0 \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \Sigmama_a$
that contains the unique Type I-1 component.
Otherwise it is called {\it outward}.
(An inward interior special point must necessarily be a singular point.)
Note that each Type I-2 component contains a unique inward interior marked point and a unique outward interior marked point.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:circlesystemI}
An {\it admissible system of circles of Type I} for $((\Sigmama,(z_0,z_1,z_2)),v)$
is an assignment of $C_a$, which is a circle or an empty set (which may occur in the case (2) below, see Example \rhoef{example} (2)), to each irreducible component $\Sigmama_a$ of Type I,
such that the following holds:
\betaegin{equation}gin{enumerate}
\item
If $\Sigmama_a$ is Type I-1, $C_a$ contains all the three interior special points.
\item Let $\Sigmama_a$ be Type I-2.
If $C_a$ is not empty, we require the following two conditions.
\betaegin{equation}gin{itemize}
\item $C_a$ contains the outward interior special point.
\item The inward interior special point lies on the disc $D_a^+$, namely either on $C_a$ or $\tauext{\rhom Int }D_a^+$.
If the inward interior special point $p$ lies on $C_a$, the circle $C_{a'}$ on the adjacent component $\Sigmama_{a'}$ contains
$p$. Here $\Sigmama_{a'}$ meets $\Sigmama_a$ at the node $p$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{itemize}
\item
Denote by $\Sigmama_{a_0}$ the unique irreducible component of Type I-1 and
let $C$ be the maximal connected union of $C_a$'s containing $C_{a_0}$.
If $C$ contains all $z_i$, we require the orientation of $C$
to respect the cyclic order of $(z_0,z_1,z_2)$.
If some $z_i$ is not on $C$, we instead consider the
following point $z_i'$ on $C$ described below and require that the orientation of $C$
respects the cyclic order of $(z_0',z_1',z_2')$:
There exists a unique irreducible component $\Sigmama_{a}$ such that
$C$ is contained in a connected component of the closure of $\Sigmama_0 \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \Sigmama_{a}$,
$z_i$ is contained in the other connected component or $\Sigmama_a$,
and that $C$ intersects $\Sigmama_a$ (at the inward interior special point of
$\Sigmama_a$). Then the point $z_i'$ is this inward interior
special point of $\Sigmama_a$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{exm}\lambda} \def\La{\Lambdaambdabel{example}
(1) Let us consider the admissible system of circles as in Figure 1
below. The left sphere is of Type I-2 and the right sphere is of Type I-1.
The circle in the right sphere is $C$ in Definition
\rhoef{def:circlesystemI} (3).
\psiar
\chienterline{
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsfbox{InvolutionFigure1.eps}}
\psiar
\chienterline{\betaf Figure 1}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
This is the double of the following configuration:
\psiar
\chienterline{
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsfbox{InvolutionFigure2.eps}}
\psiar
\chienterline{\betaf Figure 2}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
The moduli space of such configurations is
identified with the moduli space that is used to define
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{Fig12eq}
\lambda} \def\La{\Lambdaambdangle \muathfrak m_{2,\betaegin{equation}ta_2}(P_1,P_2),
\omega} \def\O{\Omegaverline{\muathfrak p}_{1,\betaegin{equation}ta_1}(P_0)\rhoangle.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
(2) Type I-2 components may not have circles. For example, see Figure 3.
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
\chienterline{
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsfbox{InvolutionNewFIg3.eps}}
\psiar
\chienterline{\betaf Figure 3}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{exm}
We next discuss the admissible system of circles on the irreducible
components of Type II.
A {\it connected component of Type II} of $\Sigmama$
is by definition the closure of a connected component of
$\Sigmama \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \Sigmama_0$.
Each connected component of Type II intersects
$\Sigmama_0$ at one point.
We call this point the {\it root} of our connected component of Type II.
\psiar
We denote by $\Sigmama_\rhoho$ a connected component of Type II
and decompose it into the irreducible components:
$$
\Sigmama_{\rhoho} = \betaigcup_{a \in I_{\rhoho}}\Sigmama_a.
$$
Then we consider a Type II irreducible component $\Sigmama_a$
contained in a $\Sigmama_{\rhoho}$.
If $\Sigmama_a$ does not contain the root of $\Sigmama_{\rhoho}$,
we consider the connected component
of the closure of $\Sigmama_{\rhoho} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \Sigmama_a$ that
contains the root of $\Sigmama_{\rhoho}$.
Then, there is a unique singular point of $\Sigmama_a$
contained therein. We call this singular point the {\it root} of $\Sigmama_a$.
Note that if $\Sigmama_a$ contains the root of $\Sigmama_{\rhoho}$,
it is, by definition, the root of $\Sigmama_a$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{TypeII}
Let an admissible system of circles of Type I on
$\Sigmama$ be given. We define an {\it admissible system of circles of Type II} on $\Sigmama_{\rhoho}$ to
be a union
$$
C_{\rhoho} = \betaigcup_{a \in I_{\rhoho}} C_a
$$
in which $C_a$ is either a circle or an empty set and which
we require to satisfy the following:
\betaegin{equation}gin{enumerate}
\item
If the root of $\Sigmama_{\rhoho}$ is not contained in our system
of circles of Type I, then
all of $C_a$ are empty set.
\item
If $C_{\rhoho}$ is nonempty, then it is connected and
contains the root of $\Sigmama_{\rhoho}$.
\item Let $\Sigmama_a$ be a Type II irreducible component
contained in $\Sigmama_{\rhoho}$ and
let $\Sigmama_b$ be the irreducible component
of $\Sigmama$ that contains the root of $\Sigmama_a$ and such that $a \nue b$.
If the root of $\Sigmama_a$ is contained in $C_b$, we require
$C_a$ to be nonempty.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{circlesonSigma}
An {\it admissible system of circles} on $\Sigmama$ is, by definition,
an admissible system of circles of Type I together with that of Type II
on each connected component of Type II.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\psiar
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{smoothable}
Let $\Sigmama = \chiup_a \Sigmama_a$ be the decomposition into irreducible components and $\{C_a\}$ the admissible system of circles.
($C_a$ is either a circle or an empty set.)
Let $p$ be a node joining components $\Sigmama_a$ and $\Sigmama_b$.
We call $p$ a {\it non-smoothable} node if $p$ lies exactly one of $C_a$ and $C_b$. Otherwise, we call $p$ a {\it smoothable} node.
That is, a node $p$ is smoothable, if and only if one of the following conditions holds: (1) $p \in C_a$ and $p \in C_b$, (2) $p \nuotin C_a$ and $p \nuotin C_b$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{gluesmoothablenode}
For a smoothable node $p$ joining two components $\Sigmama_a$ and $\Sigmama_b$, we can glue them in the following way.
We call such a process the {\it smoothing} at the smoothable node $p$.
If neither $C_a \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama_a$ nor $C_b \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama_b$ contains the node $p$, we can perform the gluing of stable maps at the node $p$.
(In such a case, the admissibility of the circle system prohibits that both $C_a$ and $C_b$ are non-empty.)
If both $C_a$ and $C_b$ contain the node $p$, we choose a complex coordinate $z_a$, resp. $z_b$, of $\Sigmama_a$, resp. $\Sigmama_b$, around $p$
such that $C_a$, resp. $C_b$ is described as the real axis with the standard orientation.
Here we give an orientation on the real axis by the positive direction.
Gluing $\Sigmama_a$ and $\Sigmama_b$ by $z_a \chidot z_b = -t$, $t \in [0, \infty)$, we obtain the gluing of stable maps such that $C_a$ and $C_b$ are
glued to an oriented circle.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{StableMapsWithCircles}
Let $\Sigmama=\betaigcup_a \Sigmama_a$ be a prestable curve, i.e., a singular Riemann surface
of genus $0$ at worst with nodal singularities,
$z_0,z_1,z_2$ marked points on the smooth part of $\Sigmama$, $u:\Sigmama \tauo N$ a holomorphic map
and let $C_a \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama_a$ be either an oriented circle or an empty set.
We call ${\muathbf x}=(\Sigmama, z_0,z_1,z_2, \{C_a\}, u:\Sigmama \tauo N)$ a {\it stable map of genus $0$ with circle system}, if the following conditions are
satisfied:
\betaegin{equation}gin{enumerate}
\item $\{C_a\}$ is an admissible system of circles in the sense of Definition \rhoef{circlesonSigma}.\\
\item Let $P$ be the set of non-smoothable nodes on $(\Sigmama, z_0,z_1,z_2, \{C_a\})$.
For the closure $\Sigmama'$ of each connected component of $\Sigmama \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus P$, one of the following conditions holds.
\betaegin{equation}gin{enumerate}
\item With circles forgotten, $u\vert_{\Sigmama'} :\Sigmama' \tauo N$ is still a stable map. Here we put marked points $(\{z_0,z_1,z_2\} \chiup P) \chiap \Sigmama'$ on $\Sigmama'$. \\
\item The map $u$ is non-constant on some irreducible component of $\Sigmama'$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\item The automorphism group ${\rhom Aut} ({\muathbf x})$ is finite. Here we set
${\rhom Aut} ({\muathbf x})$ the group of automorphisms $\psihi$ of the singular Riemann surface $(\Sigmama, z_0,z_1,z_2)$ such that $u \chiirc \psihi = u$ and
$\psihi (\chiup C_a) = \chiup C_a$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Since we only consider stable maps of genus $0$, we omit ``of genus $0$'' from now on.
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{remcircle}
\betaegin{equation}gin{enumerate}
\item If $C_a \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$, $C_a$ must contain a node or a marked point. \\
\item Let $\Sigmama_a$ be an irreducible component, which becomes unstable after the circle system forgotten, i.e.,
the map $u$ is constant on $\Sigmama_a$ and the number of special points contained in $\Sigmama_a$ is less than $3$.
Here a special point means a marked point or a node.
In such a case, we find that the number of special points on $\Sigmama_a$ is exactly $2$ and exactly one of them is
on $C_a$.
The Type I-1 component contains three special points, hence it cannot be such a component.
There are three possibilities, case (i), case (ii$+$), case (ii$-$) described in Definition \rhoef{unstwocirc} below.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
By an abuse of terminology, we call the inward special point of a Type I-2 component $\Sigmama_a$ the root of $\Sigmama_a$.
(The definition of the root of a Type II component is given just before Definition \rhoef{TypeII}.)
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{unstwocirc}
Let $\Sigmama_a$ be an irreducible component, which becomes unstable after the circle system forgotten
as we discussed in Remark \rhoef{remcircle} (2).
There are the following three cases (see Figure 4):
\betaegin{equation}gin{enumerate}
\item [(\tauext{\rhom i})]
the root is in $\tauext{\rhom Int }D_a^+$ and another special point is on $C_a$,
\item[(\tauext{\rhom ii}$+$)]
the root is on $C_a$ and the other node is contained in $\tauext{\rhom Int }D_a^+$,
\item[(\tauext{\rhom ii}$-$)]
the root is on $C_a$ and the other node is contained in $\tauext{\rhom Int }D_a^-$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\psiar
\chienterline{
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsfbox{InvolutionFigure3.eps}}
\psiar
\chienterline{\betaf Figure 4}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{remcircle2}
Suppose that $\Sigmama_a$ is a component of case (i). Since the root is not on $C_a$, $\Sigmama_a$ must be Type I.
Suppose that $\Sigmama_a$ is a component of case (ii). Since the node, which is different from the root, is not on $C_a$, $\Sigmama_a$ must be
Type II.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{adjacentleq2}
Let ${\muathbf x}=(\Sigmama, z_0,z_1,z_2, \{C_a\}, u:\Sigmama \tauo N)$ be a stable map of genus $0$ with circle system.
If two adjacent components become unstable when forgetting the circle system
in the sense of Remark \rhoef{remcircle} (2), both of them are of case (i).
Moreover, there cannot appear more than two consecutive components, which become unstable after the circle system forgotten.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
Let $\Sigmama_{a_1}, \Sigmama_{a_2}$ be adjacent components which become unstable after the circle system forgotten.
Without loss of generality, we assume that the root of $\Sigmama_{a_2}$ is attached to $\Sigmama_{a_1}$.
Then there are three cases:
\betaegin{equation}gin{enumerate}
\item[{(A)}] $\Sigmama_{a_1}, \Sigmama_{a_2}$ are Type I.
\item[{(B)}] $\Sigmama_{a_1}$ is Type I and $\Sigmama_{a_2}$ is Type II.
\item[{(C)}] $\Sigmama_{a_1}, \Sigmama_{a_2}$ are Type II.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
By Remark \rhoef{remcircle2}, we find that, in each case,
\betaegin{equation}gin{enumerate}
\item[{(A)}] $\Sigmama_{a_1}, \Sigmama_{a_2}$ belong to case (i).
\item[{(B)}] $\Sigmama_{a_1}$ belongs to case (i) and $\Sigmama_{a_2}$ belongs to case (ii).
\item[{(C)}] $\Sigmama_{a_1}, \Sigmama_{a_2}$ belong to case (ii).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
Consider Case (B). Since $\Sigmama_{a_1}$ is Type I, $C_{a_1}$ must contain either one of $z_0, z_1, z_2$ or a root of a component of Type I.
But the node at $\Sigmama_{a_1} \chiap \Sigmama_{a_2}$ is the only special point on $C_{a_1}$ and $\Sigmama_{a_2}$ is Type II. Hence Case (B) cannot occur.
Next consider Case (C). In this case, $\Sigmama_{a_2}$ is Type II, but its root is not on $C_{a_1}$. Hence Case (C) cannot occur.
Therefore the only remaning case is (A). Namely, both $\Sigmama_{a_1}$ and $\Sigmama_{a_2}$ are Type I, hence case (i) in Definition \rhoef{unstwocirc}.
\psiar
Suppose that $\Sigmama_{a_1}, \Sigmama_{a_2}, \Sigmama_{a_3}$ are consecutive components, which become unstable when forgetting the circle system.
As we just showed, all of them are case (i). Note that the nodes $\Sigmama_{a_1} \chiap \Sigmama_{a_2}$, $\Sigmama_{a_2} \chiap \Sigmama_{a_3}$ are non-smoothable
nodes. Thus the middle component $\Sigmama_{a_2}$ does not satisfy Condition (2) in Definition \rhoef{StableMapsWithCircles}. Hence the proof.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{notationmoduli}
We denote by $\muathcal M^{\tauext{sph}}_3(J_N;\alphalpha)$ (resp. $\muathcal M^{\tauext{sph}}_3(J_N;\alphalpha;\muathcal C)$)
the moduli space consisting of stable maps (resp. stable maps with circle system) with three marked points
$\vec z=(z_0,z_1,z_2)$ representing class $\alphalpha$.
We put
$$\alphaligned
\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;P_1,P_2,P_0)
&=
P_0 \tauimes_{ev_0} \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha) {}_{(ev_1,ev_2)}
\tauimes (P_1 \tauimes P_2) \rhoight)\\
\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C;P_1,P_2,P_0)
&=
P_0 \tauimes_{ev_0} \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C) {}_{(ev_1,ev_2)}
\tauimes (P_1 \tauimes P_2)\rhoight).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned$$
As a space with oriented Kuranishi structure, we define
$$
\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;P_1,P_2,P_0)
= (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2} P_0 \tauimes_{ev_0}
\lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha) {}_{(ev_1,ev_2)}
\tauimes (P_1 \tauimes P_2)\rhoight).
$$
To define the orientation on $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C;P_1,P_2,P_0)$, we consider
$$
\alphaligned
\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha;P_1,P_2,P_0)
& = (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2} P_0 \tauimes_{ev_0}
\lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha) {}_{(ev_1,ev_2)}
\tauimes (P_1 \tauimes P_2)\rhoight) \\
& \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubseteq \muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;P_1,P_2,P_0).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
(See the sentence after \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{picorresp} for the notation $\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha)$.)
For any element in
$\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha;P_1,P_2,P_0)$,
there is a unique circle passing through
$z_0, z_1, z_2$ in this order.
Hence $\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha;P_1,P_2,P_0)$ can be identified with
a subset of $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C;P_1,P_2,P_0)$.
We denote this subset by
$\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha;\muathcal C;P_1,P_2,P_0)$.
We define an orientation on $\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\alphalpha;\muathcal C;P_1,P_2,P_0)$
in such a way that this identification respects the orientations.
We will explain in Remark \rhoef{inversionTypeII} (1) how to equip the whole space
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C;P_1,P_2,P_0)$
with an orientation.
\psiar
For $\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ we define
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0)$,
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
and $ \muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C)$ in an
obvious way.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Then we have
$$
\alphaligned
& \lambda} \def\La{\Lambdaambdangle PD [P_1] * PD [P_2], PD [P_0] \rhoangle \\
= & (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_1 } \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} (ev_0^* PD [P_0] \chiup ev_1^* PD [P_1] \chiup ev_2^* PD [P_2]) [\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho)] \\
= & (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_1 + \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_2} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} (ev_0^* PD [P_0] \chiup (ev_1 \tauimes ev_2)^* PD [P_1 \tauimes P_2]) [\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho)] \\
= & (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_2} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} (ev_0^* PD [P_0]) [\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho)_{(ev_1,ev_2)} \tauimes (P_1 \tauimes P_2)] \\
= & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} (ev_0^* PD [P_0]) [\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2)] \\
= & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} (-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_1} P_0 \chidot [\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2)] \\
= & \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} \# {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\rhoho;P_1, P_2, P_0),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
where $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_1= \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_0 (\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 + \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2)$ and $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon_2 = \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 \chidot \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2$.
For the second equality we use that $PD [P_1 ] \tauimes PD [P_2] = (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 \chidot \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2} PD [P_1 \tauimes P_2]$
in $H^*(N \tauimes N)$.
The third and fifth equalities follow from our convention of the Poincar\'e dual in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{PDconvention}.
The fourth equality is due to the definition of $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2)$.
The last equality follows from \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{signpairing}.
In sum, we have
\betaegin{equation}gin{equation}
\lambda} \def\La{\Lambdaambdangle PD[P_1] * PD [P_2], PD [P_0] \rhoangle =
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho} \# {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\rhoho;P_1, P_2, P_0).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
\psiar
Now we will put a topology on the moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ derived from the topology on
the moduli spaces $\muathcal M^{\tauext{\rhom sph}}_{3+L}(J_N;\alphalpha)$ of stable maps of genus $0$ with $3+L$ marked points in Definition \rhoef{top} and
Proposition \rhoef{nbhdtop}.
Here $L$ is a suitable positive integer explained later.
To relate $\muathcal M^{\tauext{\rhom sph}}_3(J_N; \alphalpha; C)$ to $\muathcal M^{\tauext{\rhom sph}}_{3+L}(J_N; \alphalpha)$,
we will put $L$ marked points on the source curve of elements of $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha; C)$.
(This process of adding additional marked points is somewhat reminiscent of a similar process
in the definition of stable map topology given in \chiite{FO}.)
\psiar
We start with an elementary fact. That is,
for distinct three points $p, q, r$ on ${{\muathbb C}}P^1$, the circle passing through $p,q,r$ is characterized as
the set of points $w \in {{\muathbb C}}P^1$ such that the cross ratio of $p,q,r,w$ is either real number or the infinity.
The following lemma is clear.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{crossratio}
Let $u^{(i)}, u: \Sigmama^{(i)} \tauo N$ be pseudoholomorphic maps from prestable curves $\Sigmama^{(i)}, \Sigmama$ representing the class $\alphalpha$.
Let $w^{(i)}_1, w^{(i)}_2, w^{(i)}_3, w^{(i)}_4$ be distinct four points on $\Sigmama^{(i)}$ and
$w_1, w_2, w_3, w_4$ distinct four points on an irreducible component $\Sigmama_a$ of $\Sigmama$
such that a sequence
$(\Sigmama^{(i)}, \vec{z}\,^{(i)} \chiup \{w^{(i)}_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, w^{(i)}_4 \}, u^{(i)})$ converges to
$(\Sigmama, \vec{z} \chiup \{w_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, w_4\}, u)$ in $\muathcal M^{\tauext{\rhom sph}}_{3+4}(J_N;\alphalpha)$.
Then $w^{(i)}_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, w^{(i)}_4$ belong to an irreducible component of $\Sigmama^{(i)}$ for sufficiently large $i$ and
the cross ratio $[w^{(i)}_1: \delta} \def\muathbb{D}{\muathbb{D}eltaots : w^{(i)}_4]$ converges to $[w_1: \delta} \def\muathbb{D}{\muathbb{D}eltaots :w_4]$ as $i$ tends to $+\infty$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
We consider a stratification of $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ by their combinatorial types as follows.
Recall that by circles we always mean oriented circles in ${\muathbb C} P^1$ as we stated in the second paragraph after the proof of Lemma \rhoef{TypeI}.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{combtype}
The combinatorial type $\muathfrak{c}({\muathbf x})$ of ${\muathbf x} \in \muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ is defined by the following data:
\betaegin{equation}gin{enumerate}
\item The dual graph, whose vertices (resp. edges) correspond to irreducible components of the domain $\Sigmama$ (resp., nodes of $\Sigmama$).
\item The data that tells the irreducible components which contain $z_0$, $z_1$, $z_2$, respectively.
\item The homology class represented by $u$ restricted to each irreducible component.
\item For each irreducible component $\Sigmama_a$, whether $C_a$ is empty or not. If $C_a \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$,
we include the data of the list of all nodes contained in $\tauext{\rhom Int }D_a^+$ bounded by the oriented circle $C_a$
and the list of all nodes on $C_a$.
For each node $p$ that is not the root of $\Sigmama_a$,
we include the data that determines
whether $p$ lies in the domain bounded by $C_a$ or not.
This data determines
which side of $C_a$ $p$ lies on. (Recall that $C_a$ is an oriented circle,
and that by `domain $D_a^+$ bounded by $C_a$' we involve the orientation together as we mentioned in the third paragraph after Remark \rhoef{bubbletree}. So the orientation of $C_a$ is a part of data of the combinatorial type.)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
For a node $p$ of $\Sigmama$, we denote by $\Sigmama_{\tauext{\rhom in}, p}$ (resp. $\Sigmama_{\tauext{\rhom out}, p}$) the component of $\Sigmama$, which
contains $p$ as an outward node (resp. the root node).
The combinatorial type of $p$ is defined by the following data:
\betaegin{equation}gin{enumerate}
\item $C_{\tauext{in},p}$ contains $p$ or not.
\item $C_{\tauext{out},p}$ contains $p$ or not.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}
The combinatorial type of a node $p$ of $\Sigmama$ only depends on the components which contain $p$.
The combinatorial data ${\muathfrak c}({\muathbf x})$ determine the combinatorial type of each node $p$, in particular, whether the node $p$ is smoothable or not.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{finite-combi-type} There are only finitely many combinatorial types in
$\muathcal M^{\tauext{sph}}_3(J_N;\alphalpha;\muathcal C)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof} This follows from Lemma \rhoef{adjacentleq2} and the finiteness of combinatorial types of
$\muathcal M^{\tauext{sph}}_3(J_N;\alphalpha)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{defn}
Let $\muathfrak{c}_1, \muathfrak{c}_2$ be the combinatorial types of some elements in $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$.
We define the partial order $\muathfrak{c}_1 \psireceq \muathfrak{c}_2$ if and only if $\muathfrak{c}_2$ is obtained from $\muathfrak{c}_1$
by smoothing some of smoothable nodes.
See Remark \rhoef{gluesmoothablenode} for smoothing of a smoothable node.
We set
$${\muathcal M}_3^{{\tauext{\rhom sph}}, \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaucceq{\muathfrak{c}}({\muathbf x})} (J_N;\alphalpha;{\muathcal C}) =
\{ {\muathbf x}' \in {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C}) \ \vert \ {\muathfrak c}({\muathbf x}') \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaucceq {\muathfrak c}({\muathbf x}) \}$$
and
$${\muathcal M}_3^{{\tauext{\rhom sph}}, ={\muathfrak{c}}({\muathbf x})} (J_N;\alphalpha;{\muathcal C}) =
\{ {\muathbf x}' \in {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C}) \ \vert \ {\muathfrak c}({\muathbf x}') = {\muathfrak c}({\muathbf x}) \}. $$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{dualgraph}
\betaegin{equation}gin{enumerate}
\item
A combinatorial type $\muathfrak c$ determines the intersection pattern of the circle systems.
For $\muathbf x \in {\muathcal M}_3^{\tauext{\rhom sph}} (J_N;\alphalpha;{\muathcal C})$, consider
the dual graph $\Gamma ({\muathbf x})$ of $\chiup C_a$.
Note that $\Gamma ({\muathbf x})$ is a subgraph of the dual graph of the domain of ${\muathbf x}
\in {\muathcal M}_3^{\tauext{\rhom sph}}(J_N; \alphalpha; {\muathcal C})$.
Since the genus of the domain of ${\muathbf x}$ is $0$, each connected
component of $\Gamma({\muathbf x})$ is a tree.
Namely, we assign a vertex $v_a({\muathbf x})$ to each non-empty circle $C_a$ and an edge joining the
vertices $v_{a_1}({\muathbf x})$ and $v_{a_2}({\muathbf x})$ corresponding to $C_{a_1}$ and $C_{a_2}$
respectively if they intersect at a node of $\Sigmama({\muathbf x})$.
The graph $\Gamma({\muathbf x})$ is determined by the combinatorial type ${\muathfrak c}({\muathbf x})$
and we also denote it by $\Gamma({\muathfrak c})$.
The smoothing of a node, which is an intersection point of circles $C_{a_1}$ and $C_{a_2}$,
corresponds to the process of contracting the edge joining $v_{a_1}({\muathbf x})$ and
$v_{a_2}({\muathbf x})$.
Hence, if ${\muathfrak c}_1 \psireceq {\muathfrak c}_2$, $\Gamma({\muathfrak c}_2)$ is obtained from
$\Gamma({\muathfrak c}_1)$ by contracting some of edges.
Therefore we have a canonical one-to-one correspondence between
connected components of $\Gamma({\muathfrak c}_1)$ and $\Gamma({\muathfrak c}_2)$.
In particular, we find that
$$\# \psii_0(\Gamma({\muathfrak c}_1)) = \# \psii_0(\Gamma({\muathfrak c}_2)),$$
where $\#\psii_0(\Gamma({\muathfrak c}))$ denotes the number of connected components of
$\Gamma({\muathfrak c})$.
\item
Each circle $C_a$ in the admissible system of circles on $\muathbf x$
is oriented. If $C_a$ intersects any other $C_b$ in
the circle system, we cut $C_a$ at these intersection points to get a collection of oriented arcs.
Recall that each connected component $J$ of $\Gamma({\muathbf x})$ is a tree.
Hence the union of oriented circles corresponding to the vertices in $J$ is regarded as
an oriented Eulerian circuit, i.e., an oriented loop $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_J({\muathbf x})$ which is a concatenation of
the oriented arcs arising from $C_a$ ($a$ is a vertex in $J$).
The oriented loop $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_J({\muathbf x})$ is determined up to orientation preserving reparametrization.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
For a stable map with circle system, we can put appropriate additional marked points on the circles in such a way that the circles can be recovered from the additional
marked points.
More precisely, we consider the following conditions for the elements in
$\muathcal{M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$:
\psiar
Let $\tauilde{\muathbf y} \in \muathcal{M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ and let $\vec{z}\,^+=\vec{z} \chiup \{ z_3, \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_{2+L} \}$ be the marked points.
Namely, $\tauilde{\muathbf y}=(\Sigmama, \vec{z}\,^+, u: \Sigmama \tauo N)$ is a stable map representing the class $\alphalpha$.
We consider the following.
\betaegin{equation}gin{conds}\lambda} \def\La{\Lambdaambdabel{cond}
For each irreducible component $\Sigmama_a$ of the domain $\Sigmama$,
$\Sigmama_a \chiap \vec{z}\,^+$ is either empty or consisting of at least three points. In the latter case,
$\Sigmama_a \chiap \vec{z}\,^+$ lies on a unique circle $C_a$ on $\Sigmama_a$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{conds}
Note, however, the orientation of circles is not directly determined by Condition 6.41.
For the circles $\{ C_a \}$ in Condition \rhoef{cond}, we can associate its dual graph
$\Gamma(\tauilde{\muathbf y})$ in the same manner.
Let $E(\tauilde{\muathbf y})$ be the set of edges of $\Gamma(\tauilde{\muathbf y})$.
\psiar
For a fixed $\muathbf x \in {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$, we put
additional marked points on the union of circles in the admissible system of circles to
obtain $\tauilde{\muathbf x} \in {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ such that
Condition \rhoef{cond} is satisfied.
We can find a neighborhood $V(\tauilde{\muathbf x})$ of $\tauilde{\muathbf x}$ in
$$\{ \tauilde{\muathbf y} \in {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha) ~\vert~
\tauext{$\tauilde{\muathbf y}$ satisfies Condition \rhoef{cond}}. \},$$
so that
if $\tauilde{\muathbf y} \in V(\tauilde{\muathbf x})$,
we can obtain
$\Gamma(\tauilde{\muathbf y})$ from $\Gamma(\tauilde{\muathbf x})$ by contracting edges
in $E'$ and removing edges in $E''$.
Here $E', E''$ are disjoint subsets of $E(\tauilde{\muathbf x})$, which may possibly be empty.
In particular, we have
$$\# \psii_0 (\Gamma(\tauilde{\muathbf x})) \lambda} \def\La{\Lambdaeq \# \psii_0 (\Gamma(\tauilde{\muathbf y})).
$$
\betaegin{equation}gin{conds}\lambda} \def\La{\Lambdaambdabel{cond2} $\tauilde{\muathbf y} \in V(\tauilde{\muathbf x})$ and
$\# \psii_0 (\Gamma(\tauilde{\muathbf x})) = \# \psii_0 (\Gamma(\tauilde{\muathbf y})).$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{conds}
Under Condition \rhoef{cond2}, we have a canonical one-to-one correspondence between
connected components of $\Gamma(\tauilde{\muathbf x})$ and those of $\Gamma(\tauilde{\muathbf y})$.
To prove this it suffices to describe the way to determine the orientation of circles $\{ C_a\}$.
We consider Condition \rhoef{cond3} below for this purpose.
Let $J(\tauilde{\muathbf x})$ be a connected component of $\Gamma(\tauilde{\muathbf x})$
and $J(\tauilde{\muathbf y})$ be the corresponding
connected component of $\Gamma(\tauilde{\muathbf y})$, i.e.,
$J(\tauilde{\muathbf y})$ obtained from $J(\tauilde{\muathbf x})$ by contracting some edges.
\psiar
To make clear that $\vec{z}^+=\vec{z} \chiup \{ z_3, \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_{2+L}\}$ are marked points
on $\Sigmama(\tauilde{\muathbf x})$, we denote it by
$\vec{z}^+(\tauilde{\muathbf x})=(z_0(\tauilde{\muathbf x}), \delta} \def\muathbb{D}{\muathbb{D}eltaots, z_{2+L}(\tauilde{\muathbf x})))$.
Let $\Sigmama_{J(\tauilde{\muathbf x})}(\tauilde{\muathbf x})$ be the union of irreducible components,
which contain circles $C_a$ corresponding to the vertices in $J({\muathbf x})$.
We can assign a cyclic order on
$$I_{J(\tauilde{\muathbf x})}=\{i ~\vert~ z_i (\tauilde{\muathbf x}) \in \Sigmama_{J(\tauilde{\muathbf x})}(\tauilde{\muathbf x}), ~i=0,1,2,3,\delta} \def\muathbb{D}{\muathbb{D}eltaots,2+L\}$$
using the oriented loop $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_{J(\tauilde{\muathbf x})}(\tauilde{\muathbf x})$
defined as in Remark \rhoef{dualgraph} (2).
Namely, the oriented loop $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_{J(\tauilde{\muathbf x})}(\tauilde{\muathbf x})$ passes at $z_i$,
$i \in I_{J(\tauilde{\muathbf x})}$ compatible with the cyclic order on $I_{J(\tauilde{\muathbf x})}$.
\psiar
For $\tauilde{\muathbf y}$ satisfying Condition \rhoef{cond2},
we find that $I_{J(\tauilde{\muathbf x})} = I_{J(\tauilde{\muathbf y})}$, which we denote
by $I_J$.
\betaegin{equation}gin{conds}\lambda} \def\La{\Lambdaambdabel{cond3}
Each circle $C_{a'}$ on the domain of $\tauilde{\muathbf y}$ given in Condition \rhoef{cond} is equipped with an orientation with the following property.
For each connected component $J$ of $\Gamma(\tauilde{\muathbf x})$,
the cyclic order on $I_J$ coming from
the oriented loop $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_J(\tauilde{\muathbf x})$
coincides with the one coming from the oriented loop $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_J (\tauilde{\muathbf y})$
defined as in Remark \rhoef{dualgraph} (2) using the orientation on $C_{a'}$ for
any vertex $a'$ in $J(\tauilde{\muathbf y})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{conds}
Note that such an orientation on $C_{a'}$ is unique, if there exists one.
Under Condition \rhoef{cond3}, each circle $C_a$ in Condition \rhoef{cond} is oriented
in such a manner.
This is the way to define the
canonical one-to-one correspondence between
connected components of $\Gamma(\tauilde{\muathbf x})$ and those of $\Gamma(\tauilde{\muathbf y})$.
\betaegin{equation}gin{conds}\lambda} \def\La{\Lambdaambdabel{cond4}
The quadruple $(\Sigmama, \vec{z}, \{ C_a \}, u)$ defines a stable map with circle system
${\muathcal C}=\{C_a\}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{conds}
\betaegin{equation}gin{defn}
We set
$$
{\muathcal U}(\tauilde{\muathbf x})=\{ \tauilde{\muathbf y} \in V(\tauilde{\muathbf x}) ~\vert~ \tauilde{\muathbf y} \tauext{ satisfies Conditions \rhoef{cond}, \rhoef{cond2}, \rhoef{cond3}, \rhoef{cond4}.} \}.
$$
We denote the natural map by
\betaegin{equation}gin{align}
\psii^L_{\tauilde{\muathbf x}} : {\muathcal U}(\tauilde{\muathbf x}) \tauo {\muathcal M}_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C}) \nuonumber \\
(\Sigmama, \vec{z}\,^+, u) \muapsto (\Sigmama, \vec{z}, \{ C_a\}, u). \nuonumber
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{align}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{choiceV(x)}Note that we can take the above set $V(\tauilde{\muathbf x})$
for $\tauilde{\muathbf x}$ in such a way that
${\muathcal M}_3^{\tauext{\rhom sph}, ={\muathfrak c}(\muathbf x)} (J_N;\alphalpha;{\muathcal C})$ is contained in the image of
$\psii^L_{\tauilde{\muathbf x}} : {\muathcal U}(\tauilde{\muathbf x}) \tauo {\muathcal M}_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$.
Note also that we have
$$\psii^L_{\tauilde{\muathbf x}} ({\muathcal U}(\tauilde{\muathbf x})) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal M}_3^{{\tauext{\rhom sph}}, \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaucceq{\muathfrak{c}}({\muathbf x})} (J_N;\alphalpha;{\muathcal C}).$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Summarizing the construction above, we have
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{add}
For any ${\muathbf x} \in {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha; {\muathcal C})$, there exist a positive integer $L$ and $\tauilde{\muathbf x} \in {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$
such that the above naturally defined map $\psii^L_{\tauilde{\muathbf x}} : {\muathcal U}(\tauilde{\muathbf x}) \tauo {\muathcal M}_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$ satisfies
$\psii^L_{\tauilde{\muathbf x}}(\tauilde{\muathbf x}) = {\muathbf x}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
We equip ${\muathcal U}(\tauilde{\muathbf x})$ with the subspace topology of ${\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{top}
For $U \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$, $U$ is defined to be a neighborhood of ${\muathbf x}$ if and only if
there exist a positive number $L$,
$\tauilde{\muathbf x} \in {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$
as in Lemma \rhoef{add}
such that there exists a neighborhood $\varphiidetilde{U} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal U}(\tauilde{\muathbf x})$ of $\tauilde{\muathbf x}$ satisfying
$\psii^L_{\tauilde{\muathbf x}}(\varphiidetilde{U}) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset U$. Let ${\muathfrak N}(\muathbf x)$ be the collection of all neighborhoods of $\muathbf x$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
We can show the following
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{nbhdtop}
The collection $\{ {\muathfrak N}(\muathbf x) ~ \vert ~ \muathbf x \in {\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C}) \}$
satisfies the axiom of the system of neighborhoods.
Thus it defines a topology on ${{\muathbb C}M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
\betaegin{equation}gin{proof}
Let $U$ be a neighborhood of $\muathbf x$ and
$\varphiidetilde{U}$ a neighborhood of $\tauilde{\muathbf x}$ in ${\muathcal U}(\tauilde{\muathbf x})$ as in Definition \rhoef{top}.
Take an open neighborhood ${\varphiidetilde U}^{\chiirc}$ of $\tauilde{\muathbf x}$ in $\varphiidetilde U$.
For any $\tauilde{\muathbf y} \in {\varphiidetilde U}^{\chiirc}$, there is a neighborhood $\varphiidetilde W$ of
$\tauilde{\muathbf y}$ in ${\muathcal U}(\tauilde{\muathbf x}) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ such that
$\varphiidetilde W$ is contained in ${\muathcal U}(\tauilde{\muathbf y})$.
Thus $\varphiidetilde W \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal U}(\tauilde {\muathbf x}) \chiap {\muathcal U}(\tauilde{\muathbf y})$.
Hence $\psii^L_{\tauilde{\muathbf y}}(\varphiidetilde W)=\psii^L_{\tauilde{\muathbf x}}(\varphiidetilde W)$ is a neighborhood of $\muathbf y$.
It remains to show that the definition of the neighborhood in Definition \rhoef{top} is independent of
the choice of $\tauilde{\muathbf x}$.
This follows from the next lemma.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{lemaddpoints}
Let $\tauilde{\muathbf x} \in {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ and
$\tauilde{\muathbf x}' \in {\muathcal M}_{3+L'}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ as in Lemma \rhoef{add} such that
$\psii^L_{\tauilde{\muathbf x}}(\tauilde{\muathbf x})=\psii^{L'}_{\tauilde{\muathbf x}'}(\tauilde{\muathbf x}')={\muathbf x}$.
Then for any neighborhood $\varphiidetilde{U} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal U}(\tauilde{\muathbf x})$ of $\tauilde{\muathbf x}$,
there exists a neighborhood $\varphiidetilde{U}' \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal U}(\tauilde{\muathbf x}')$ of $\tauilde{\muathbf x}'$
such that $\psii^{L'}_{\tauilde{\muathbf x}'} (\varphiidetilde{U}') \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \psii^L_{\tauilde{\muathbf x}} (\varphiidetilde{U})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
Let $\varphiidetilde{W}' \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal U}(\tauilde{\muathbf x}')$ be a sufficiently small neighborhood
of $\tauilde{\muathbf x}'$, which will be specified later.
We will define a continuous mapping $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{\tauilde{\muathbf x}'}: \varphiidetilde{W}' \tauo {\muathcal U}(\tauilde{\muathbf x}) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset
{\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ such that
$\psii^L_{\tauilde{\muathbf x}} \chiirc \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{\tauilde{\muathbf x}'} = \psii^{L'}_{\tauilde{\muathbf x}'}$.
In other words, for $\tauilde{\muathbf y}' \in {\varphiidetilde W}'$,
we will find $\tauilde{\muathbf y} = \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{{\muathbf x}'}(\varphiidetilde{\muathbf y}') \in {\muathcal U}(\tauilde{\muathbf x})$ with
$\psii^L_{\tauilde{\muathbf x}}(\tauilde{\muathbf y})={\psii^{L'}_{\tauilde{\muathbf x}'}(\tauilde{\muathbf y}')=\muathbf y'}$.
Namely, we find additional $L$ marked points on the circles on the domain
$\Sigmama({\muathbf y}')$ of ${\muathbf y}'$.
We define $\tauilde{\muathbf y}$ in the following steps.
Firstly, for $\tauilde{\muathbf y}' \in {\muathcal U}(\tauilde{\muathbf x}')$, we define $L$ mutually distinct marked points
$w_j(\tauilde{\muathbf y}')$, $j=3, \delta} \def\muathbb{D}{\muathbb{D}eltaots, 2+L$, on the union of circles $\chiup C({\muathbf y}')$
in the admissible circle systems of ${\muathbf y}'$.
By the construction below, any $w_j(\tauilde{\muathbf y}')$ does not coincide with
$z_0({\muathbf y}'), z_1({\muathbf y}'), z_2({\muathbf y}')$.
Then there exists a neighborhood of $\tauilde{\muathbf x}'$ in ${\muathcal U}(\tauilde{\muathbf x}')$
such that $w_j(\tauilde{\muathbf y}')$ does not coincide with any nodes of ${\muathbf y}'$ and
defines an element in ${\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
Finally, we find a neighborhood $\varphiidetilde{W}'$ of $\tauilde{\muathbf x}'$
in ${\muathcal U}(\tauilde{\muathbf x}')$
such that $\tauilde{\muathbf y}' \in {\varphiidetilde W}'$ and $\tauilde{\muathbf y} \in {\muathcal U}(\tauilde{\muathbf x})$.
\psiar
Denote by $\vec{z}\,^+ (\tauilde{\muathbf x})$, (resp. $\vec{z}\,^{+} (\tauilde{\muathbf x}')$), the set of marked points in
$\tauilde{\muathbf x}$, (resp. $\tauilde{\muathbf x}')$.
For $j=3, \delta} \def\muathbb{D}{\muathbb{D}eltaots, 2 + L$, we need to find $w_j(\tauilde{\muathbf y'})$ on the admissible system of circles on
$\Sigmama({\muathbf y}')$. Here recall that ${\muathbf y}' = \psii^{L'}_{\tauilde{\muathbf x}'}(\tauilde{\muathbf y}')$.
\psiar
Firstly, we pick three distinct points $z_{i_1(a)} (\tauilde{\muathbf x}'), z_{i_2(a)}(\tauilde{\muathbf x}'), z_{i_3(a)}(\tauilde{\muathbf x}')$
on each circle $C({\muathbf x})_a$ in the admissible circle system of ${\muathbf x}$.
Let $C({\muathbf x})_{a(j)} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama({\muathbf x})_{a(j)}$ be a circle on an irreducible
component $\Sigmama({\muathbf x})_{a(j)}$ of the domain of $\muathbf x$
such that $z_j(\tauilde{\muathbf x}') \in \vec{z}\,^{+}(\tauilde{\muathbf x}')$ is on $C({\muathbf x})_{a(j)}$.
Note that the graph $\Gamma(\tauilde{\muathbf y}')$ is obtained by contracting some edges of $\Gamma(\tauilde{\muathbf x}')$.
Denote by $a'(j)$ the vertex of $\Gamma(\tauilde{\muathbf y}')$, which is the image of the vertex $a(j)$ of $\Gamma(\tauilde{\muathbf x}')$
by the contracting map.
Then the component $\Sigmama({\muathbf y}')_{a'(j)}$ of the domain of ${\muathbf y}'$ contains
$z_{i_1(a(j)))}(\tauilde{\muathbf y}'), z_{i_2(a(j)))}(\tauilde{\muathbf y}'), z_{i_3(a(j))}(\tauilde{\muathbf y}')$.
There is a unique circle passing through these three points, which is nothing but
$C({\muathbf y}')_{a'(j)} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama({\muathbf y}')_{a'(j)}$, since
${\muathbf y}' = \psii^{L'}_{\tauilde{\muathbf x}'}(\tauilde{\muathbf y}')$.
Then we define $w_j(\tauilde{\muathbf y}')$ on
$C({\muathbf y}')_{a'(j)} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama({\muathbf y}')_{a'(j)}$ so that the cross ratio of
$z_{i_1(a(j))}(\tauilde{\muathbf x}'), z_{i_2(a(j))}(\tauilde{\muathbf x}'), z_{i_3(a(j))}(\tauilde{\muathbf x}'), z_j(\tauilde{\muathbf x})$
is equal to that of $z_{i_1(a(j))}(\tauilde{\muathbf y}'), z_{i_2(a(j))}(\tauilde{\muathbf y}'), z_{i_3(a(j))}(\tauilde{\muathbf y}'),
w_j(\tauilde{\muathbf y}')$.
Here the cross ratio is taken on $\Sigmama({\muathbf x})_{a(j)}$ and $\Sigmama({\muathbf y}')_{a'(j)}$, which are biholomorphic to ${\muathbb C}P^1$, respectively.
Note that $w_j(\tauilde{\muathbf y}')$ depends continuously on $\tauilde{\muathbf y}'$ in the following sense.
Take a real projectively linear isomorphism $\psisi_{\tauilde{\muathbf y}'}$ from $C({\muathbf y}')_{a'(j)}$ to ${\muathbb R}P^1$
in ${\muathbb C}P^1$ such that
$z_{i_1(a(j))}(\tauilde{\muathbf y}')$, $z_{i_2(a(j))}(\tauilde{\muathbf y}'), z_{i_3(a(j))}(\tauilde{\muathbf y}')$ are sent to
$0,1, \infty$.
Then $\psisi_{\tauilde{\muathbf y}'}(w_j(\tauilde{\muathbf y}'))$ is continuous with respect to $\tauilde{\muathbf y}'$.
Since $w_j(\tauilde{\muathbf x}')=z_j(\tauilde{\muathbf x})$ by the construction and $w_j(\tauilde{\muathbf y}')$ depends continuously
on $\tauilde{\muathbf y}'$,
if $\tauilde{\muathbf y'}$ is sufficiently close to $\tauilde{\muathbf x}'$ in ${\muathcal U}(\tauilde{\muathbf x}')$,
the 0-th, 1-st, 2-nd marked points on $\muathbf y'$ and
$w_j(\tauilde{\muathbf y}')$, $j=3, \delta} \def\muathbb{D}{\muathbb{D}eltaots, 2+L$, are mutually distinct and do not coincide nodes of $\Sigmama({\muathbf y}')$ on
$\Sigmama({\muathbf y}')_{a'}$.
Hence ${\muathbf y}'$ with marked points $z_0({\muathbf y}'), z_1({\muathbf y}'), z_2({\muathbf y}'), w_3(\tauilde{\muathbf y}'),
\delta} \def\muathbb{D}{\muathbb{D}eltaots, w_{2+L}(\tauilde{\muathbf y}')$ defines an element in ${\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$,
which we denote by $\tauilde{\muathbf y}$.
We take a neighborhood $\varphiidetilde{W}'$ of $\tauilde{\muathbf x}'$ in ${\muathcal U}(\tauilde{\muathbf x}')$ in order that
$\tauilde{\muathbf y} \in V(\tauilde{\muathbf x})$ for $\tauilde{\muathbf y}' \in \varphiidetilde{W}'$.
Note that the number of marked points of $\tauilde{\muathbf y}$ is either zero or at least three by the construction.
Since $\psii^{L'}_{\tauilde{\muathbf x}'}(\tauilde{\muathbf y}')={\muathbf y}'$ is a stable map with admissible system of circles,
any $\tauilde{\muathbf y} \in \varphiidetilde{W}'$ satisfies Conditions \rhoef{cond}, \rhoef{cond2}, \rhoef{cond3}, \rhoef{cond4}.
We define $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{\tauilde{\muathbf x}'}:\varphiidetilde{W}' \tauo {\muathcal U}(\tauilde{\muathbf x})$
by $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{\tauilde{\muathbf x}'}(\tauilde{\muathbf y}')=\tauilde{\muathbf y}$ constructed above.
It is continuous and $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{\tauilde{\muathbf x}'}(\tauilde{\muathbf x}')
= \tauilde{\muathbf x}$. Hence, for any neighborhood $\varphiidetilde{U}$ of $\tauilde{\muathbf x}$ in
${\muathcal U}(\tauilde{\muathbf x})$, there exists a neighborhood $\varphiidetilde{U}'$ of $\tauilde{\muathbf x}'$
in ${\muathcal U}(\tauilde{\muathbf x}')$ such that
$\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiihi_{\tauilde{\muathbf x}'}(\varphiidetilde{U}') \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathcal U}(\tauilde{\muathbf x})$.
It implies that $\psii^{L'}_{\tauilde{\muathbf x}'}(\varphiidetilde{U}') \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \psii^L_{\tauilde{\muathbf x}}(\varphiidetilde{U})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\psiar
When the combinatorial type $\muathfrak{c}=\muathfrak{c}({\muathbf x})$ is fixed,
we have mentioned in Remark \rhoef{choiceV(x)} that ${\muathcal U}(\tauilde{\muathbf x})$ enjoys the following property.
\betaegin{equation}gin{equation}
{\muathcal M}_3^{{\tauext{\rhom sph}}, ={\muathfrak{c}}({\muathbf x})} (J_N;\alphalpha;{\muathcal C}) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset
\psii^L_{\tauilde{\muathbf x}}({\muathcal U}(\tauilde{\muathbf x})). \lambda} \def\La{\Lambdaambdabel{samecomb}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
We observe that ${\muathcal U}(\tauilde{\muathbf x})$ satisfies the second axiom
of countability, since so does the moduli space
${\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
We recall from Lemma \rhoef{finite-combi-type} that
${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$
carries only a finite number of combinatorial types.
Combining these observations with \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{samecomb}, we obtain
the following
\betaegin{equation}gin{prop}
${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$ satisfies the second axiom of countability.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
Hence compactness is equivalent to sequential compactness and
Hausdorff property is equivalent to the uniqueness of the limit of convergent sequences
for moduli spaces of stable maps with circle system.
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{seqcpt}
The moduli space ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$ is sequentially compact.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
\betaegin{equation}gin{proof}
Let ${\muathbf x}^{(j)}= \betaigl( \Sigmama({\muathbf x}^{(j)}), z_0^{(j)},z_1^{(j)}, z_2^{(j)}, \{C_a^{(j)}\}, u^{(j)}:\Sigmama({\muathbf x}^{(j)}) \tauo N \betaigr)$ be a sequence in
${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$.
Because there are a finite number of combinatorial types, we may assume that ${\muathfrak{c}}({\muathbf x}^{(j)})$
are independent of $j$.
First of all, we find a candidate of the limit of a subsequence of ${\muathbf x}^{(j)}$.
In Step 1, we consider irreducible components in $\Sigmama({\muathbf x}^{(j)})$ explained in Remark \rhoef{remcircle} (2).
Since these components are not stable after forgetting the admissible system of circles, we put a point on each component
so that these components become stable. We obtain ${\muathbf x}^{(j)+}$ at this stage.
Because the moduli space of stable maps with marked points is compact, we can take a convergent subsequence.
We denote its limit by ${\muathbf x}_{\infty}^+$.
We will find an admissible system of circles on ${\muathbf x}_{\infty}^+$ and insert irreducible components explained in
Remark \rhoef{remcircle} (2), if necessary, to obtain a candidate of the limit of ${\muathbf x}^{(j)}$ in Steps 2 and 3.
There are three cases, case (i), case (ii$+$), case (ii$-$) in Definition \rhoef{unstwocirc}.
In Step 2, we deal with sequences $C_a^{(j)}$ of circles on $\Sigmama({\muathbf x}^{(j)+})_a$, which do not collapse
to any node of $\Sigmama({\muathbf x}_{\infty}^+)$.
In Step 3, we discuss when insertions of such irreducible components are necessary and explain how to
perform insertions to obtain the candidate of the limit of ${\muathbf x}^{(j)}$. The detail follows.
{\betaf Step 1.} \ \ If ${\muathbf x}^{(j)}$ has irreducible components which become unstable
after the circle system forgotten, we put additional marked point $z_a^{(j),++}$ for each such component
$\Sigmama({\muathbf x}^{(j)})_a$.
Such an irreducible component contains its root $p_{\tauext{\rhom root}, a}^{(j)}$, another special point $p_{\tauext{\rhom out}, a}^{(j)}$
and the circle $C_a^{(j)}$.
We can take $z_a^{(j),++}$ for each $j$ with the following property.
For any $j, j'$, there exists a biholomorphic map $\psihi_{j',j} :\Sigmama({\muathbf x}^{(j)})_a \tauo \Sigmama({\muathbf x}^{(j')})_a$ such that
$\psihi_{j',j}$ sends $p_{\tauext{\rhom root}, a}^{(j)}$, $p_{\tauext{\rhom out}, a}^{(j)}$, $z_a^{(j), ++}$ and $C_a^{(j)}$ to
$p_{\tauext{\rhom root}, a}^{(j')}$, $p_{\tauext{\rhom out}, a}^{(j')}$, $z_a^{(j'), ++}$ and $C_a^{(j')}$, respectively.
Such components are determined by the combinatorial data, hence the number of these components are independent of $j$.
If there are $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll$ such components, we obtain a sequence
${\muathbf x}^{(j)+} \in {\muathcal M}_{3+\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
{\betaf Step 2.} \ \ Since ${\muathcal M}_{3+ \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ is compact,
there is a convergent subsequence of
${\muathbf x}^{(j)+}$. We may assume that ${\muathbf x}^{(j)+}$ is convergent.
Denote its limit by
${\muathbf x}_{\infty}^+ \in {\muathcal M}_{3+ \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
Forgetting the marked points $z_3, \delta} \def\muathbb{D}{\muathbb{D}eltaots , z_{2+ \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll}$, $\Sigmama({\muathbf x}_{\infty}^+)$ is a prestable curve of genus 0 with three marked points
$z_0, z_1, z_2$. Hence we can define a unique irreducible component of Type I-1 in the same way as in Definition \rhoef{def:circlesystemI} (1).
Let $\{V_k\}_k$, $V_{k+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset V_k$, be a sequence of open neighborhoods of the set of nodes on the domain $\Sigmama({\muathbf x}_{\infty}^+)$
such that $\chiap_k V_k$ is the set of nodes. There exists a positive integer $N(k)$ such that
if $j \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq N(k)$, there is a holomorphic embedding
$$\psihi^{(j)}_k: \Sigmama({\muathbf x}_{\infty}^+) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k \tauo \Sigmama({\muathbf x}^{(j)+}).$$
For each component $\Sigmama({\muathbf x}_{\infty}^+)_a$,
there is an irreducible component $\Sigmama({\muathbf x}^{(j)+})_{a'}$ such that $\psihi^{(j)}_k(\Sigmama({\muathbf x}_{\infty}^+)_a
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama({\muathbf x}^{(j)+})_{a'}$.
From now on, we will take subsequences of $\{ j \}$ successively and rename it $\{ j \}$.
{\betaf Step 2-1. } \ \ If $C_{a'}^{(j)} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama({\muathbf x}^{(j)+})_{a'}$ is empty for any $j$,
we set $C_a \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama({\muathbf x}_{\infty}^+)_a$ to be an empty set.
{\betaf Step 2-2.} \ \ Consider the case where there is $k$ such that $\psihi^{(j)}_k (\Sigmama({\muathbf x}_{\infty}^+)_a
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$ intersects the circle $C_{a'}^{(j)}$ on $\Sigmama({\muathbf x}^{(j)+})_{a'}$ for any $j$.
We treat the following two cases separately.
{\betaf Case 1:} \ \ For any point $p \in \Sigmama({\muathbf x}_{\infty}^+)_a$, which is not a node, there is a neighborhood $U(p)$ of $p$ such that
$C^{(j)}_{a'}$ is not contained in $\psihi^{(j)}_k (U(p))$ for any $j$.
Since $\Sigmama({\muathbf x}_{\infty}^+)_a \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k$ is compact,
after taking a subsequence of $j$, we may assume that
there are three distinct points $p_1, p_2, p_3 \in \Sigmama({\muathbf x}_{\infty}^+)_a
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k$ such that there are mutually disjoint neighborhoods $U(p_1), U(p_2), U(p_3)$
and $\psihi^{(j)}_k(U(p_i)) \chiap C^{(j)}_{a'} \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$.
We pick $p_i^{(j)} \in \psihi^{(j)}_k(U(p_i)) \chiap C^{(j)}_{a'}$.
After taking a suitable subsequence of $j$, we may assume that $p_i^{(j)}$ converges to $p_i^{(\infty)}$
for $i=1,2,3$.
Then we take the circle passing through $p_1^{(\infty)}, p_2^{(\infty)}, p_3^{(\infty)}$, which we denote by
$C_a \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset\Sigmama({\muathbf x}_{\infty}^+)_a$. Since $C^{(j)}_{a'}$ are oriented and
all ${\muathbf x}^{(j)}$ have the same
combinatorial type, $C_a$ is also canonically oriented.
It is clear that the circle $C_a$ is uniquely determined once the subsequence of ${j}$ is taken as above.
(For example, consider the cross ratios.)
Note that Case 1 is applied to each irreducible component treated in Step 1, hence an oriented circle is
put on each of such components.
{\betaf Case 2:} \ \ There is a point $p$ on $\Sigmama({\muathbf x}_{\infty}^+)_a$ such that $p$ is not a node and,
for any neighborhood $U(p)$ of $p$, there exists a positive integer $N$ such that
if $j \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq N$, $C_{a'}^{(j)}$ is contained in $\psihi^{(j)}_k(U(p))$.
Since $p$ as above is not a node, $C_{a'}^{(j)}$ must contain a unique special point by the admissibility
of the circle system. (If there are at least two special points, then these points get closer as $j \tauo \infty$.
Hence there should appear a new component attached at $p$ in
$\Sigmama({\muathbf x}_{\infty}^+)$.)
In this case, we attach a new irreducible component at $p$ as in case (i) in Definition \rhoef{unstwocirc}.
Note that the attached component contains a circle of Type I.
{\betaf Step 3.} \ \ Let $p$ be a node on $\Sigmama({\muathbf x}_{\infty}^+)$.
Then either $p$ is the limit of nodes $p^{(j)}$ on $\Sigmama({\muathbf x}^{(j)+})$ or
$p$ appears as a degeneration of $\Sigmama({\muathbf x}^{(j)+})$.
We insert an irreducible component explained in Remark \rhoef{remcircle} (2) as discussed
in Case 1-2 and Case 2-2 below.
{\betaf Case 1:} \ \ $p$ is the limit of nodes $p^{(j)}$.
{\betaf Case 1-1:}
If $p$ and $p^{(j)}$ are either both smoothable or both non-smoothable,
we keep the node $p$ as it is.
{\betaf Case 1-2:}
If one of them is smoothable and the other is non-smoothable,
we insert a new irreducible component as in Remark \rhoef{remcircle} (2) in the following way.
Firstly, we consider the case that $p^{(j)}$ are non-smoothable but $p$ is smoothable.
In this case, we find that both component containing $p^{(j)}$ are of Type I. We insert
the component of case (i) in Definition \rhoef{unstwocirc} at $p$.
Next, we consider the case that $p^{(j)}$ are smoothable but $p$ is non-smoothable.
If $p^{(j)}$ are smoothable nodes joining two components of Type I in $\Sigmama({\muathbf x}^{(j)+})$,
we insert the component of case (i) in Definition \rhoef{unstwocirc}.
Suppose that at least one of irreducible components containing $p^{(j)}$ is of Type II.
There are two possibilities, which we discuss separately.
The first possibility is that both components contain non-empty circles in the admissible system.
Let $\Sigmama({\muathbf x}^{(j)+})_a$ be the component such that $p^{(j)}$ is its outward node and
$\Sigmama({\muathbf x}^{(j)+})_b$ the component such that $p^{(j)}$ is its root node.
In this case, $C_a^{(j)}$ cannot collapse to $p$, since $C_a^{(j)}$ must contain at least one special points
other than $p^{(j)}$. Namely, if $\Sigmama({\muathbf x}^{(j)})_a$ is of Type I, then $C_a^{(j)}$ must contain one of $z_0, z_1, z_2$
or a node of a tree of components of Type I. If $\Sigmama({\muathbf x}^{(j)})_a$ is of Type II, then $C_a^{(j)}$ must contain
the root node of $\Sigmama({\muathbf x}^{(j)})_a$.
Hence we consider the case that $C_b^{(j)}$ collapse to $p$.
Recall that $D_b^{(j)+}$ denotes the domain in $\Sigmama({\muathbf x}^{(j)+})_b$ bounded by the oriented circle $C_b^{(j)}$.
If $D_b^{(j)+}$ collapse to $p$, then we insert the component of case (ii$-$) in Definition \rhoef{unstwocirc}.
Otherwise, we insert the component of case (ii$+$) in Definition \rhoef{unstwocirc}.
The second possibility is that only one of $\Sigmama({\muathbf x}^{(j)+})_a$ or $\Sigmama({\muathbf x}^{(j)+})_b$ contains a non-empty circle in the admissible system.
Note that the component $\Sigmama({\muathbf x}^{(j)+})_a$ must contain $p^{(j)}$ as an outward node.
It is impossible that $C_a^{(j)} = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$ and $C_b^{(j)} \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$.
Hence $C_a^{(j)}$ is non-empty. Since $p^{(j)}$ is a smoothable node, $C_a^{(j)}$ does not contain $p^{(j)}$.
Denote by $D_a^{(j)+}$ the domain in $\Sigmama({\muathbf x}^{(j)+})_a$ bounded by the oriented circle $C_a^{(j)}$.
If $D_a^{(j)+}$ contains the node $p^{(j)}$, we insert the component of case (ii$+$) in Definition \rhoef{unstwocirc} at the node $p$.
Otherwise, we insert the component of case (ii$-$) in Definition \rhoef{unstwocirc} at the node $p$.
{\betaf Case 2:} \ \ $p$ appears as a degeneration of $\Sigmama({\muathbf x}^{(j)+})$.
{\betaf Case 2-1:} If $p$ is smoothable, we keep the node $p$ as it is.
{\betaf Case 2-2:} . If $p$ is non-smoothable, we insert a new irreducible component
explained in Remark \rhoef{remcircle} (2) in a similar way to Case 1-2 above as follows.
Suppose that a sequence $\Sigmama({\muathbf x}^{(j)+})_a$ degenerates to a nodal curve
with a node $p$, which is non-smoothable.
Let $\Sigmama({\muathbf x}_{\infty}^+)_{a_1}$ and $\Sigmama({\muathbf x}_{\infty}^+)_{a_2}$ be components containing the node $p$
such that $\Sigmama({\muathbf x}_{\infty}^+)_{a_2}$ is farther from the component of Type I-1 than $\Sigmama({\muathbf x}_{\infty}^+)_{a_1}$.
Here the component of Type I-1 of $\Sigmama({\muathbf x}_{\infty}^+)$ is defined in the beginning of Step 2.
There are two possibilities.
The first possibility is that there exists a positive integer $k$ such that
$C_a^{(j)} \chiap \psihi_k^{(j)} (\Sigmama({\muathbf x}_{\infty}^+)_{a_1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k) = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$
for any sufficiently large $j > N(k)$.
In this case, we find that $\Sigmama({\muathbf x}^{(j)+})_a$ is of Type I.
We insert the component of case (i) in Definition \rhoef{unstwocirc} at the node $p$.
The second possibility is that there exists a positive integer $k$ such that
$C_a^{(j)} \chiap \psihi_k^{(j)} (\Sigmama({\muathbf x}_{\infty}^+)_{a_2} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k) = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$
for any sufficiently large $j > N(k)$.
If there exists a positive integer $k'$ such that $D_a^{(j)+} \chiap \psihi_{k'}^{(j)} (\Sigmama({\muathbf x}_{\infty}^+)_{a_2} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_{k'}) = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$,
we insert the component of case (ii$-$) in Definition \rhoef{unstwocirc} at the node $p$.
Here $D_{a}^{(j)+}$ be the domain in $\Sigmama({\muathbf x}^{(j)+})_a$ bounded by the oriented circle $C_a^{(j)}$.
Otherwise, we insert the component of case (ii$+$) in Definition \rhoef{unstwocirc} at the node $p$.
\betaegin{equation}gin{rem}
Suppose that the adjacent components $\Sigmama({\muathbf x}_{\infty}^+)_{a_1}$
and $\Sigmama({\muathbf x}_{\infty}^+)_{a_2}$
contain circles $C_{a_1}$ and $C_{a_2}$, respectively, which are put in Step 2-2, and suppose that
$\Sigmama({\muathbf x}^{(j)})_{a_1'}=\Sigmama({\muathbf x}^{(j)})_{a_2'}$,
i.e., this component degenerates to a nodal curve including
$\Sigmama({\muathbf x}_{\infty}^+)_{a_1}$ and $\Sigmama({\muathbf x}_{\infty}^+)_{a_2}$.
Since $C_{a'_1}^{(j)}$ is a circle which is connected, it passes through the neck region corresponding to
the node between $\Sigmama({\muathbf x}_{\infty}^+)_{a_1}$ and
$\Sigmama({\muathbf x}_{\infty}^+)_{a_2}$.
Hence $C_{a_1}$ and $C_{a_2}$ pass through the node, which is smoothable in the sense of
Definition \rhoef{smoothable}.
Case 2-2 is the case that one of $C_{a_1}$ and $C_{a_2}$ is a circle passing through
the node and the other is empty.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
After these processes, we obtain ${\muathbf x}_{\infty}^{\psirime \psirime}$ as the candidate of the limit of
${\muathbf x}^{(j)}$.
By the construction, ${\muathbf x}_{\infty}^{\psirime \psirime}$ is equipped with
an admissible system of circles.
\betaegin{equation}gin{rem}
By our construction, in particular, Step 3 Case 1-2 and Case 2-2, we find
$${\muathfrak{c}}({\muathbf x}_{\infty}^{\psirime \psirime}) \psireceq {\muathfrak{c}}({\muathbf x}^{(j)}).$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Now we show the following
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{lemcomp}
There exists a subsequence of ${\muathbf x}^{(j)}$, which converges to ${\muathbf x}_{\infty}^{\psirime \psirime}$
in ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
We will add suitable marked points on ${\muathbf x}^{(j)}$ to obtain $\tauilde{\muathbf x}^{(j)}$.
For an irreducible component in ${\muathbf x}^{(j)}$, which becomes unstable when forgetting the circle, we added the marked point $z_a^{(j),++}$ in Step 1.
Since such an irreducible component contains three special points, i.e., nodes or marked points and the holomorphic map is constant on the irreducible component,
the limit ${\muathbf x}_{\infty}^+$ must contain an irreducible component of the same type.
We add one more marked point on the circle on the component in ${\muathbf x}^{(j)}$ and ${\muathbf x}_{\infty}^{\psirime \psirime}$, respectively,
so that the four special points on the component have the fixed cross ratio. (In total, we add two marked points on the circle in this case.)
We put additional marked points on other irreducible components as follows.
If an irreducible component does not contain a circle in the admissible system, we do not put additional marked points.
Then the remaining components are either those discussed in Step 2 or those discussed in Step 3.
We deal with them separately.
For an irreducible component in Case 1 of Step 2-2, we have three marked points $p_1, p_2, p_3$ on the circle $C_a$.
We pick $p_i^{(j)} \in \psihi_k^{(j)}(U(p_i)) \chiap C_{a'}^{(j)}$ , $i=1,2,3$. (Here we added three marked points on the circle.)
For an irreducible component in Case 2 of Step 2-2, we have a special point $q$ on the new attached irreducible component $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_b$.
For sufficiently large $j$, the circle $C^{(j)}_{a'}$ is contained in $\psihi^{(j)}_k(U(p))$.
By the admissibility of the circle system, there should be a special point $q^{(j)}$ on $C^{(j)}_{a'}$.
We put two more marked points $q_1 ,q_2$ on the circle $C_b$ on $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_b$.
We choose $p^{(j)} \nuotin \psihi_k^{(j)}(U(p))$ which converges to some $p' \in \Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_a$.
We choose $q_1^{(j)}, q_2^{(j)} \in C^{(j)}_{a'}$ in such a way that the cross ratio of $p^{(j)}, q^{(j)}, q_1^{(j)}, q_2^{(j)}$ is equal to
the one of $\omega} \def\O{\Omegaverline{p}, q, q_1, q_2$ for any $j$.
Here $\omega} \def\O{\Omegaverline{p}$ is the node, where we attach
$\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_b$
to $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_a$ in Case 2 of Step 2-2.
(In this case, we add two marked points.)
For each newly inserted component $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_c$
in Step 3, we add additional two marked points $q_1, q_2 \in C_c$ as follows.
Firstly, we consider Case 1.
Let $\Sigmama({\muathbf x}_{\infty}^+)_a$ and $\Sigmama({\muathbf x}_{\infty}^+)_b$ be the irreducible components of
$\Sigmama({\muathbf x}_{\infty}^+)$, which intersect at $p$
and let $\Sigmama({\muathbf x}^{(j)+})_a$ and $\Sigmama({\muathbf x}^{(j)+})_b$ be the irreducible components of
$\Sigmama({\muathbf x}^{(j)+})$, which intersect at $p^{(j)}$.
Here we arrange that $p$ (resp. $p^{(j)}$) is the root node of $\Sigmama({\muathbf x}_{\infty}^+)_b$ (resp. $\Sigmama({\muathbf x}^{(j)+})_b$).
The new component $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_c$ is inserted between $\Sigmama({\muathbf x}_{\infty}^+)_a$ and $\Sigmama({\muathbf x}_{\infty}^+)_b$.
We denote by $\tauilde{p}$ the node of $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_c$, which has the same combinatorial type as the node $p^{(j)}$ (see
Definition \rhoef{combtype} for the definition of the combinatorial type of a node) and by ${\tauilde p}'$ the other node of $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_c$.
Let $d=a \tauext{\rhom ~or~} b$ such that $\tauilde p$ is (resp. is not) the root of $\Sigmama({\muathbf x}_{\infty}^{\psirime \psirime})_c$ if and only if
$p^{(j)}$ is (resp. is not) the root of $\Sigmama({\muathbf x}^{(j)+})_d$.
We discuss the following two cases separately:
\betaegin{equation}gin{enumerate}
\item[({\rhom a})] $\tauilde p \in C_c$ and ${\tauilde p}' \nuotin C_c$,
\item[({\rhom b})] $\tauilde p \nuotin C_c$ and ${\tauilde p}' \in C_c$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
Pick and fix $k$. Then we have $V_k$ and $\psihi^{(j)}_k: \Sigmama({\muathbf x}_{\infty}^+) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k \tauo \Sigmama({\muathbf x}^{(j)+})$ as in the beginning of
Step 2.
In Case (a), we find that $p^{(j)} \in C_d^{(j)}$. Pick $q_1, q_2 \in C_c \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{\tauilde p\}$.
We choose points $q_1^{(j)}, q_2^{(j)}$ on $C_{d}^{(j)}$ as follows.
Pick $\tauilde{p}^{\psirime (j)}$ on $\Sigmama({\muathbf x}^{(j)})_{d} \chiap \psihi_k^{(j)}(\Sigmama({\muathbf x}_{\infty}^+) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$.
Then we take $q_1^{(j)}, q_2^{(j)} \in C_d^{(j)}$ such that
the cross ratios of $p^{(j)}, q_1^{(j)}, q_2^{(j)}, \tauilde{p}^{\psirime (j)}$ are
equal to the one of $\tauilde{p}, q_1, q_2, \tauilde{p}'$.
In Case (b), we find that $p^{(j)} \nuotin C_d^{(j)}$. Pick $q_1, q_2 \in C_c \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{\tauilde p' \}$.
We choose points $q_1^{(j)}, q_2^{(j)}$ on $C_{d}^{(j)}$ as follows.
Pick $\tauilde{p}^{\psirime (j)}$ on $C_{d}^{(j)} \chiap \psihi_k^{(j)}(\Sigmama({\muathbf x}_{\infty}^+) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$.
Then we take $q_1^{(j)}, q_2^{(j)} \in C_d^{(j)}$ such that
the cross ratios of $p^{(j)}, q_1^{(j)}, q_2^{(j)}, \tauilde{p}^{\psirime (j)}$ are
equal to the one of $\tauilde{p}, q_1, q_2, \tauilde{p}'$.
Next we consider Case 2.
Let $\Sigmama({\betaf x}_{\infty}^+)_{a_1}$ and $\Sigmama({\betaf x}_{\infty}^+)_{a_2}$ be the irreducible components,
which share the node $p$.
Pick distinct three points $q_1,q_2,q_3$ on the newly inserted components so that none of them are nodes.
We note that the circle $C_{a'}^{(j)}$ intersects at least one of
$\psihi^{(j)}_k(\Sigmama({\muathbf x}_{\infty}^+)_{a_i} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$, $i=1,2$.
From now on, we fix such a $k$ and denote it by $k_0$.
We may assume that $\psihi^{(j)}_{k_0}(\Sigmama({\betaf x}_{\infty}^+)_{a_1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_{k_0})$ intersects $C_{a'}^{(j)}$.
(The other case is similar.)
Then Case 2 in Step 2-2 is applied to $\Sigmama({\muathbf x}_{\infty}^+)_{a_2}$.
Pick $p' \in C_a \chiap \Sigmama({\betaf x}_{\infty}^+)_{a_1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_{k_0}$.
For all sufficiently large $j$, we arrange the neck region $V_{\tauext{\rhom neck}, p}^{(j)}$,
which is a connected component of the complement of
$\psihi^{(j)}_{k_0}(\Sigmama({\betaf x}_{\infty}^+) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_{k_0})$
and degenerates to a neighborhood of the node $p$, as follows.
Pick and fix a suitable biholomorphic map
$\varphiphi^{(j)}: \Sigmama({\betaf x}^{(j)})_{a'} \tauo {\betaf C}P^1$ such that
\betaegin{equation}gin{itemize}
\item $V_{\tauext{\rhom neck}, p}^{(j)}$ is mapped to an annulus
$\{z \in {\muathbb C} ~ \vert ~ r^{(j)} < \vert z \vert < R^{(j)}\}$ for some $0 < r^{(j)} < 1/2, R^{(j)} >1$,
\item
$(\varphiphi^{(j)})^{-1}(\{z \in {\muathbb C} ~ \vert ~ \vert z \vert < r^{(j)} \})$ contains
$\psihi^{(j)}_k(\Sigmama({\betaf x}_{\infty}^+)_{a_1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{itemize}
By applying a dilation fixing $0, \infty$, we may assume that the circle $C^{(j)}_{a'}$ is tangent to the unit circle
$\{z \in {\muathbb C} ~ \vert ~ \vert z \vert =1\}$.
Since $C^{(j)}_{a'} \chiap \psihi^{(j)}_k(\Sigmama({\betaf x}^+_{\infty})_{a_1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$ is not empty for any $k$,
we find that $r^{(j)}$ tends to $0$.
Similarly, since for each given $k$, $C^{(j)}_{a'}$ does not intersect
$\psihi^{(j)}_k(\Sigmama({\betaf x}^+_{\infty})_{a_2} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus V_k)$ for all sufficiently large $j$,
the number $R^{(j)}$ tends to $+\infty$.
Pick $p^{\psirime (j)} \in C^{(j)}_{a'}$ such that $\vert \varphiphi^{(j)}(p^{\psirime (j)}) \vert < r^{(j)}$ and
${\betaf x}_j^+$ with $p^{\psirime (j)}$ added converges to ${\betaf x}_{\infty}^+$ with $p'$ added.
We pick $q^{(j)}_1, q^{(j)}_2, q^{(j)}_3$ on $C^{(j)}_{a'}$ such that
$\vert \varphiphi^{(j)}(q^{(j)}_1) \vert =1$, $\vert \varphiphi^{(j)}(q^{(j)}_2) \vert =1/2$
and the cross ratios of $p^{\psirime (j)}, q^{(j)}_1, q^{(j)}_2, q^{(j)}_3$ are the same as the cross ratio of
$p', q_1, q_2, q_3$.
After adding those new marked points, we obtain $\tauilde{\muathbf x}^{(j)}$ for all sufficiently large $j$ and $\tauilde{\muathbf x}_{\infty}^{\psirime \psirime}$
in ${\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ for some $L$.
By the choice of those points, we find that $\tauilde{\muathbf x}^{(j)}$ converges to $\tauilde{\muathbf x}_{\infty}^{\psirime \psirime}$ in
${\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$.
By Definition \rhoef{top}, ${\muathbf x}^{(j)}$ converges to
${\muathbf x}^{\psirime \psirime}_{\infty}$ in
${\muathcal M}_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha; {\muathcal C})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
This finishes the proof of Proposition \rhoef{seqcpt}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Now we have
\betaegin{equation}gin{thm}
The moduli space ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$ is compact and Hausdorff.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{proof}
Since ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$ satisfies the second axiom of countability,
Proposition \rhoef{seqcpt} implies the compactness.
Then the Hausdorff property follows in the same way as in Lemma 10.4 in \chiite{FO}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{KuraonMsph}
The moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ carries a Kuranishi structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{proof}
We construct a Kuranishi structure on
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ in the same way as the case of moduli space of stable maps, \chiite{FO}
see \chiite{foootech} Part 3 and Part 4 for more details.
Our strategy to construct Kuranishi structures on the moduli space of bordered stable maps with admissible system of
circles is to reduce the construction to the one for moduli spaces of bordered stable maps with marked points
by putting a suitable number of points on circles.
The only points, which we have to take care of, are the following two points.
The first point to be taken care of is the way how to deal with the admissible system of circles in terms of additional marked points on the domain curve.
An element in $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ is a stable map with an admissible system of circles.
For $\muathbf x \in {\muathcal M}^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$, we put suitable marked points on circles in the admissible system of
circles to obtain an element $\tauilde{\muathbf x} \in {\muathcal M}_{3+L}^{\tauext{\rhom sph}}(J_N;\alphalpha)$ and
\betaegin{equation}gin{eqnarray}
\psii^L_{\tauilde{\muathbf x}} : {\muathcal U}(\tauilde{\muathbf x}) \tauo {\muathcal M}_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C}) \nuonumber \\
(\Sigmama, \vec{z}\,^+, u) \muapsto (\Sigmama, \vec{z}, \{ C_a\}, u). \nuonumber
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{eqnarray}
(See Lemma \rhoef{add}.)
Note that $\psii^L_{\tauilde{\muathbf x}}$ is not injective.
For ${\muathbf x} \in {\muathcal M}_{3}^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$,
we take a subspace of a Kuranishi neighborhood of $\tauilde {\muathbf x} \in {\muathcal U}(\tauilde{\muathbf x})$ as we explain in the next paragraph
to obtain a Kuranishi neighborhood of ${\muathbf x}$.
If an irreducible component $\Sigmama_a \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama$ contains a circle $C_a \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$, $C_a$ must contain
at least one special point, i.e., a node or a marked point.
Note that $C_a$ is an oriented circle on $\Sigmama_a$.
If a holomorphic automorphism $\varphiphi$ of $\Sigmama_a$ is of finite order preserving $C_a$ and
its orientation and if $\varphiphi$ fixes a point on $C_a$, then $\varphiphi$ must be the identity.
Hence the stabilizer of this component must be trivial.
If the number of special points on $C_a$ is less than $3$, we take the minimal number of marked points on $C_a$
in such a way that the total number of special points is $3$.
Let $p_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, p_c$ be nodes on $C_a$ and $w_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, w_k$ marked points on $C_a$.
Then, for each marked point $w_j$ on $C_a$, we choose a short embedded arc $A_{w_j}$ on $\Sigmama_a$ which is transversal to $C_a$ at $w_j$.
We allow to move the marked point $w_j$ to $w'_j$ on $A_{w_j}$ such that
$p_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, p_c, w'_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, w'_k$ lie on a common circle.
This last condition is expressed using the cross ratio and
these constraints cut out the set of such $w'_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, w'_k$ transversally.
Thus if we restrict $\psii^L_{\tauilde{\muathbf x}}$ to the subset of ${\muathcal U}(\tauilde{\muathbf x})$
such that the extra $L$ marked points hit
$A_{w_j}$, the restricted map is injective.
(More precisely a similar map on the Kuranishi neighborhood is
injective.)
Therefore we can use this subset of Kuranishi neighborhood of
${\muathcal U}(\tauilde{\muathbf x})$
as a Kuranishi neighborhood of ${\muathcal M}^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$.
The second point to be taken care of is about the gluing construction.
We use gluing construction following Part 3 and Part 4 of \chiite{foootech} at smoothable nodes.
Let $p$ be a smoothable node.
If no circle in the admissible system of circles passes through $p$, we use the gluing construction as in the case of stable maps.
Otherwise, we proceed as follows.
Let $\Sigmama_a$ and $\Sigmama_b$ intersect at the node $p$. Then the circles $C_a$ and $C_b$ contain $p$.
In order to perform gluing, we need {\it coordinate at infinity}, Definition 16.2 in \chiite{foootech}.
For a stable map with an admissible system of circles, we use a coordinate at infinity adapted to the circle system as follows.
We pick a complex local coordinate $\xii$, $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta$ on $\Sigmama_a$, resp., $\Sigmama_b$, around $p$ such that
$C_a$, resp., $C_b$, with the given orientation corresponds to the real line oriented from $-\infty$ to $+\infty$
in the $\xii$-plane, resp. the $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta$-plane.
For the gluing construction,
we use the gluing parameter $T \in [T_0, \infty]$ for a sufficiently large $T_0 >0$ such that the $\xii$-plane and $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta$-plane are glued by
$\xii \chidot \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonta = - e^{-T}$.
In this case the parameter to smooth this node is $[T_0,\infty)$.
Therefore such a point corresponds to a boundary. (Or corner if there are more such points.)
We remark that in the case when there is no circle on the node the
parameter to smooth this node is $[T_0,\infty) \tauimes S^1$.
Then the construction of a Kuranishi structure goes through as in Part 3 and Part 4 in \chiite{foootech}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Once we have Kuranishi structures on ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha)$ and ${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$,
we also have Kuranishi structures on their fiber products with singular chains $P_i$'s.
For $\muathbf x = (\Sigmama({\muathbf x}), \vec{z}, \{ C({\muathbf x})_a \}, u:\Sigmama({\muathbf x}) \tauo N)
\in \muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$, we set
\betaegin{equation}gin{eqnarray}
n({\muathbf x})&= &\tauext{the number of irreducible components of $\Sigmama({\muathbf x})$}, \nuonumber \\
n_{I,\muathfrak p}({\muathbf x})&= &\tauext{the number of irreducible components $\Sigmama({\muathbf x})_a$ of Type I-2 in $\Sigmama({\muathbf x})$,}
\nuonumber \\
& & \tauext{such that $C({\muathbf x})_a \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$ does not contain the root node}, \nuonumber \\
n_{I, \tauext{\rhom bubble}}({\muathbf x})&= &\tauext{the number of irreducible components $\Sigmama({\muathbf x})_a$ of Type I-2 in $\Sigmama({\muathbf x})$,}
\nuonumber \\
& & \tauext{such that $C({\muathbf x})_a$ contains the root node}, \nuonumber \\
n_{I, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathbf x}) &= &\tauext{the number of irreducible components $\Sigmama({\muathbf x})_a$ of Type I-2 in $\Sigmama({\muathbf x})$,}
\nuonumber \\
& & \tauext{such that $C({\muathbf x})_a = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset $}, \nuonumber \\
n_{II,\tauext{\rhom circ}}({\muathbf x})&= &\tauext{the number of irreducible components $\Sigmama({\muathbf x})_a$ of Type II in
$\Sigmama({\muathbf x})$,}
\nuonumber\\
& & \tauext{such that $C({\muathbf x})_a$ is non-empty}. \nuonumber \\
n_{II, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathbf x}) & = & \tauext{the number of irreducible components $\Sigmama({\muathbf x})_a$ of Type II,} \nuonumber \\
& & \tauext{such that $C({\muathbf x})_a$ is empty.} \nuonumber
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{eqnarray}
Since these numbers depend only on the combinatorial type $\muathfrak c$, we also denote them by
$n({\muathfrak c})$, $n_{I,\muathfrak p}({\muathfrak c})$, $n_{I,\tauext{\rhom bubble}}({\muathfrak c})$, $n_{I, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathfrak c})$,
$n_{II,\tauext{\rhom circ}}({\muathfrak c})$, $n_{II, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathfrak c})$.
Note that
$$n({\muathfrak c}) =1+n_{I, \muathfrak p}({\muathfrak c}) + n_{I, \tauext{\rhom bubble}}({\muathfrak c}) +
n_{I, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathfrak c}) + n_{II, \tauext{\rhom circ}}({\muathfrak c}) + n_{II, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathfrak c}),$$
because there always exists a unique irreducible component of Type I-1.
Then we find the following proposition.
The proof is easy so omitted.
\betaegin{equation}gin{prop}\lambda} \def\La{\Lambdaambdabel{virdimcombtype}
\betaegin{equation}gin{enumerate}
\item The virtual codimension $\tauext{\rhom vcd}({\muathfrak c})$ of the stratum with the combinatorial type $\muathfrak c$ is equal to
\betaegin{equation}gin{eqnarray}
& & 2\lambda} \def\La{\Lambdaeft( n({\muathfrak c} \rhoight)-1) - 2 n_{I,\muathfrak p}({\muathfrak c}) - n_{I,\tauext{\rhom bubble}}({\muathfrak c}) -
n_{II,\tauext{\rhom circ}}({\muathfrak c}) \nuonumber \\
& = & n_{I,\tauext{\rhom bubble}}({\muathfrak c}) + 2 n_{I, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathfrak c}) + n_{II, \tauext{\rhom circ}}({\muathfrak c}) + 2 n_{II, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset}({\muathfrak c}) ,
\nuonumber
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{eqnarray}
which is non-negative.
\item $\tauext{\rhom vcd}({\muathfrak c})=0$ if and only if $n({\muathfrak c})=n_{I,\muathfrak p}({\muathfrak c})+1$.
Namely, all irreducible components are of Type I with non-empty circles and the circle on each component of Type I-2 does not contain
the root node.
\item $\tauext{\rhom vcd}({\muathfrak c})=1$ if and only if either
Case (A) $n({\muathfrak c})=n_{I,\muathfrak p}({\muathfrak c})+2$ and
$n_{I,\tauext{\rhom bubble}}({\muathfrak c})=1$ or Case (B) $n({\muathfrak c})=n_{I,\muathfrak p}({\muathfrak c})+2$ and $n_{II,\tauext{\rhom circ}}({\muathfrak c})=1$.
Namely, either all irreducible components are of Type I with non-empty circles and there is exactly one irreducible component $\Sigmama_a$ of
Type I-2 such that the circle $C_a$ contains the root node of $\Sigmama_a$, or
there is exactly one Type II component $\Sigmama_a$ with $C_a \nueq \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$, all others are of Type I with non-empty circles and the circle
$C_a$ on each irreducible component of Type I-2 does not contain the root node.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prop}
Proposition \rhoef{virdimcombtype} describes combinatorial types $\muathfrak c$ such that the corresponding
strata in $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\alphalpha;\muathcal C)$ is codimension $1$.
There are two cases:
(A) $n({\muathfrak c})=n_{I,\muathfrak p}({\muathfrak c})+2$ and $n_{I,\tauext{\rhom bubble}}({\muathfrak c})=1$
(B) $n({\muathfrak c})=n_{I,\muathfrak p}({\muathfrak c})+2$ and $n_{II,\tauext{\rhom circ}}({\muathfrak c})=1$.
Case (A) and Case (B) are treated in a different way. Firstly, we consider Case (A).
Note that the stable map is constant on the irreducible components explained in Remark \rhoef{remcircle} (2).
By our convention, we do not put obstruction bundles on these components.
Therefore we can identify the following two codimension 1 boundary components equipped with Kuranishi structures:
\betaegin{equation}gin{enumerate}
\item A Type I component splits into two irreducible components.
\item A Type I circle $C_a$ meets an inward interior marked point and an irreducible component of case (i) in Definition \rhoef{unstwocirc}
is inserted at the node of the two irreducible components.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
See Figure 5 which illustrates an example with $n_{I,\muathfrak p}({\muathfrak c})=0$.
\psiar
\chienterline{
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsfbox{InvolutionFigure5.eps}}
\psiar
\chienterline{\betaf Figure 5}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
These two strata are glued to cancel codimension 1 boundaries.
See Remark \rhoef{inversionTypeII} (2) for the cancellation with orientation.
This is a key geometric idea to see the equality in Lemma \rhoef{pmainformula}.
From now on, we denote by $${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\alphalpha;{\muathcal C})$$
the moduli space with the codimension $1$ boundaries of (1) and (2) identified
as above.
The remaining codimension 1 boundary components are
Case (B), i.e., those with non-empty Type II circles in the admissible system of circles,
which correspond to codimension 1 disc bubbling phenomenon in
${\muathcal M}^{\tauext{\rhom main}}_3(J_{N\tauimes N};\betaegin{equation}ta)$.
We will study these codimension 1 boundary components in Lemma \rhoef{forgettypeII}.
See Remark \rhoef{inversionTypeII} (3), (4) and Subsection 6.3.1 for the cancellation with orientation in Case (B).
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsubsection{Proof of Theorem \rhoef{Proposition34.25} (2), IV: completion of the proof}\lambda} \def\La{\Lambdaambdabel{6.5}
In this subsection we prove \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{diagmaineq}.
First of all, we recall
the following lemma, which is a well-known fact on the moduli space of pseudo-holomorphic spheres
which is used in the definition of quantum cup product \chiite{FO}.
Let $\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$, where the equivalence relation $\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ was defined in Definition \rhoef{equivonpi2}.
For given $\rhoho$ and cycles $P_0,P_1,P_2$ in $N$, we defined $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0)$ in Definition \rhoef{notationmoduli}.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{moduliqp}
The moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0) $
carries a Kuranishi structure ${\muathfrak K}_0$ and a multisection $\muathfrak s_0$ such that
$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\rhoho}
\#\lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0)
\rhoight)^{\muathfrak s_0} T^{\omega} \def\O{\Omegamega(\rhoho)}e^{c_1(N)[\rhoho]}
= \lambda} \def\La{\Lambdaambdangle
PD[P_1] * PD[P_2],PD[P_0]\rhoangle
$$
where the sum is taken over $\rhoho$ for which the virtual dimension of
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0)$ is zero.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
Now we consider the moduli space used to define
the left hand side of (\rhoef{diagmaineq}).
Let $\vec \betaegin{equation}ta = (\betaegin{equation}ta_{1},\lambda} \def\La{\Lambdadots,\betaegin{equation}ta_{{k}}), \betaegin{equation}ta_{j} \nueq 0 \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii (\muathbb{D}elta_N)$.
Set $\tauext{length}(\vec{\betaegin{equation}ta)}=k$.
We define $\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta;P)$ by induction on $\tauext{length}(\vec{\betaegin{equation}ta})$.
Firstly, we consider the moduli space
$\muathcal M_{1,(1,0)}(J_{N\tauimes N};\betaegin{equation}ta)$ of bordered stable maps
representing the class $\betaegin{equation}ta$ attached to $(N\tauimes N, \muathbb{D}elta_N)$ with one interior marked point and
one boundary marked point.
Here to specify the interior marked point as an output marked point
we use the notation $\muathcal M_{1,(1,0)}(J_{N\tauimes N};\betaegin{equation}ta)$
used in Subsection 8.10.2 \chiite{fooobook2}.
See the line just before Definition 8.10.2 \chiite{fooobook2} where
the orientation on $\muathcal M_{1,(1,0)}(J_{N\tauimes N};\betaegin{equation}ta)$ is given.
We denote by $z_1$ the first (only one) boundary marked point.
Then we define
$$
\muathcal M_{1,{(1,0)}}(J_{N\tauimes N};\betaegin{equation}ta;P)=\muathcal M_{1,(1,0)}(J_{N\tauimes N};\betaegin{equation}ta)_{ev_1}\tauimes P.
$$
This is a special case of Definition 8.10.2 in \chiite{fooobook2} with $k=1, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll=0$ and
the sign is $(-1)^{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonpsilon}=+1$ in this case.
\psiar
When $\tauext{length}(\betaegin{equation}ta)=1$, i.e., $\vec{\betaegin{equation}ta}=(\betaegin{equation}ta_{1})$, we set
\betaegin{equation}gin{equation} \lambda} \def\La{\Lambdaambdabel{M_{1,1}}
\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta;P)
= -\muathcal M_{1,(1,0)}(J_{N\tauimes N}; \betaegin{equation}ta_1;P).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Here we reversed the orientation of $\muathcal M_{1,(1,0)}(J_{N\tauimes N}; \betaegin{equation}ta_1;P)$ so that
it is compatible with Definition \rhoef{P_beta} in the case that $k=1$.
\psiar
Suppose that the orientation of
$\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta;P)$ is given for $\tauext{length}(\vec{\betaegin{equation}ta}) \lambda} \def\La{\Lambdaeq k$.
For $\vec{\betaegin{equation}ta}=(\betaegin{equation}ta_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_{k+1})$, we write
$\vec{\betaegin{equation}ta}^-=(\betaegin{equation}ta_2, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_{k+1})$.
Then we define
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:M11}
\alphaligned
\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta;P)
=
& - \muathcal M_{1,1}(J_{N \tauimes N};\betaegin{equation}ta_1)_{ev_1} \tauimes_{p_2 \chiirc ev_{int}}
\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}^-;P).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Namely we reversed the orientation so that it is consistent with Definition \rhoef{P_beta}
for each positive integer $k= \tauext{length}(\vec{\betaegin{equation}ta})$.
We also denote by $\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta;P)$ the chain
$$
p_2 \chiirc ev_{int}:\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta;P) \tauo \muathbb{D}elta_N,
$$
where $p_2(x,y)=(y,y)$.
By an abuse of notation, we set
$\muathcal M_{1,1}(J_{N \tauimes N};\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset;P)=P$.
From now on, $\vec{\betaegin{equation}ta}$ is either $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$ or $(\betaegin{equation}ta_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_k)$ with
$\betaegin{equation}ta_j \nueq 0$ for each $j=1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, k$.
For each $P_0,, P_1, P_2$ and $\vec{\betaegin{equation}ta}_0, \vec{\betaegin{equation}ta}_1, \vec{\betaegin{equation}ta}_2$, we define
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:Mhat}
\alphaligned
&\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)
\\
&= \muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_0;P_0)_{ev_1} \tauimes_{ev_0}
\muathcal M_3^{\tauext{\rhom main}}(J_{N \tauimes N}; \betaegin{equation}ta';\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_1;P_1),
\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_2;P_2)).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Taking our Convention 8.2.1 (4) in \chiite{fooobook2} and the pairing \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{signpairing} into account,
the following is immediate from definition.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{multiondisc}
There exist a Kuranishi structure ${\muathfrak K}_1$ on
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$
and a multisection $\muathfrak s_1$ with the
following properties. We denote by $n_\betaegin{equation}ta$ the sum of
$$
\# \lambda} \def\La{\Lambdaeft(
\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)
\rhoight)^{\muathfrak s_1}
$$
over $(\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0)$ whose total sum
is $\betaegin{equation}ta$. Then we have
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{eq627}
\lambda} \def\La{\Lambdaambdangle\muathfrak m_2(\muathcal I(P_1),\muathcal I(P_2)),\muathcal I(P_0)\rhoangle
= \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta} n_{\betaegin{equation}ta}T^{\omega} \def\O{\Omegamega(\betaegin{equation}ta)}e^{\muu(\betaegin{equation}ta)/2}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Moreover, The multi-section ${\muathfrak s}_1$ is invariant under the involution $\tauau_*$ on
each disc component with bubble trees of spheres.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
The last statement follows from the fact that
the multi-section ${\muathfrak s}_1$ is constructed by induction on the energy of
the bordered stable maps keeping the invariance under $\tauau_*$, see the explanation in Remark \rhoef{remonmult}.
Consider the union of
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$
over $(\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0)$ such that
the total sum of $(\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0)$ is $\betaegin{equation}ta$
whose double belongs to class $\rhoho \in\psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$.
(See Remark \rhoef{bubbletree} (2) for the double of $\betaegin{equation}ta$.)
We glue them along virtual codimension one strata appearing in Case (A) in Proposition \rhoef{virdimcombtype} and denote it by
\betaegin{equation}gin{equation}
\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0). \lambda} \def\La{\Lambdaambdabel{hatmoduli}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
See Sublemma \rhoef{sublemma} for the description of codimension one strata which we identify.
Each $\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$ has Kuranishi structure in such a way that
we can glue them to obtain a Kuranishi structure on $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ (see also Lemma \rhoef{frak s_2}).
Namely we have
\betaegin{equation}gin{sublem}\lambda} \def\La{\Lambdaambdabel{sublemma}
The orientations of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$ are compatible and $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$
has an oriented Kuranishi structure.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{sublem}
\betaegin{equation}gin{proof}
It is sufficient to see that two top dimensional strata adjacent along a stratum of codimension 1 induce
opposite orientations on the stratum of codimension 1.
\psiar
Firstly, we consider the case that a transition of strata occurs in one of $\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_i;P_i)$, $i=0,1,2$.
It suffices to check the compatibility of orientations inside $\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta};P)$
for a given $\vec{\betaegin{equation}ta}$.
Let $\vec{\betaegin{equation}ta}=(\betaegin{equation}ta_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_i, \betaegin{equation}ta_{i+1}, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_k)$,
$\vec{\betaegin{equation}ta}_{(1)}=(\betaegin{equation}ta_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_i)$, $\vec{\betaegin{equation}ta}_{(2)}=(\betaegin{equation}ta_{i+1}, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_k)$
and $\vec{\betaegin{equation}ta}'=(\betaegin{equation}ta_1, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_{i-1}, \betaegin{equation}ta_i + \betaegin{equation}ta_{i+1}, \betaegin{equation}ta_{i+2}, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_k)$.
We define
$$\muathcal M_2(J_{N \tauimes N}; \vec{\betaegin{equation}ta}_{(2)};P)=\muathcal M_2(J_{N \tauimes N}; \betaegin{equation}ta_{i+1};
\muathcal M_{1,1}(J_{N\tauimes N}; (\betaegin{equation}ta_{i+2}, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_k);P)).$$
By Proposition 8.10.4 \chiite{fooobook2} with $\betaegin{equation}ta_1=0$,
$k=k_2=1$ and $\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_1=\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonll_2=0$, \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{M_{1,1}}
and the proof that $\muathfrak p_{1,0} \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv i_! \muod \muathbb{L}ambdambda^+_{0, \tauext{nov}}$ in page 739 thereof, we find that
$$
\muathcal M_2(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(2)};P) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaim \muathbb{D}elta_N+1} \psiartial \muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(2)};P).
$$
By Proposition 8.10.4 (2) \chiite{fooobook2}, we obtain
$$
\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(1)};\muathcal M_2(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(2)};P))
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaim \muathbb{D}elta_N +1} \psiartial \muathcal M_{1,1}(J_{N \tauimes N}; \vec{\betaegin{equation}ta};P).
$$
On the other hand, Proposition 8.10.4 (1) \chiite{fooobook2} also implies that
$$
\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(1)};\muathcal M_2(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(2)};P))
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaim \muathbb{D}elta_N} \psiartial \muathcal M_{1,1}(J_{N \tauimes N}; \vec{\betaegin{equation}ta}';P).
$$
Hence the orientations of $ \muathcal M_{1,1}(J_{N \tauimes N}; \vec{\betaegin{equation}ta};P)$ and
$\muathcal M_{1,1}(J_{N \tauimes N}; \vec{\betaegin{equation}ta}';P)$ are compatible along
$\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(1)};\muathcal M_2(J_{N \tauimes N};\vec{\betaegin{equation}ta}_{(2)};P))$.
\psiar
Next we consider the remaining case, i.e., a transition of strata involving $\muathcal M_3(J_{N \tauimes N};\betaegin{equation}ta')$.
For $\vec{\betaegin{equation}ta}_1=(\betaegin{equation}ta_{1,1}, \delta} \def\muathbb{D}{\muathbb{D}eltaots, \betaegin{equation}ta_{1,k})$, we write $\vec{\betaegin{equation}ta}_1^-=(\betaegin{equation}ta_{1,2}, \delta} \def\muathbb{D}{\muathbb{D}eltaots,
\betaegin{equation}ta_{1,k})$.
The moduli spaces
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$
and
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta' + \betaegin{equation}ta_{1,1};\vec \betaegin{equation}ta^-_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$
are adjacent along a stratum of codimension 1
\betaegin{equation}gin{equation}
\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_0;P_0)_{ev_1} \tauimes_{ev_0}
\muathcal M_3(J_{N \tauimes N}; \betaegin{equation}ta';\muathcal M_2(J_{N \tauimes N};\vec{\betaegin{equation}ta}_1;P_1),
\muathcal M_{1,1}(J_{N \tauimes N};\vec{\betaegin{equation}ta}_2;P_2)). \lambda} \def\La{\Lambdaambdabel{cod1stratum}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Instead of Proposition 8.10.4 (1), (2), we use Proposition 8.5.1 in \chiite{fooobook2} and find that
the orientations of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$ and $\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta' + \betaegin{equation}ta_{1,1};\vec \betaegin{equation}ta^-_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$ are compatible along the stratum given in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{cod1stratum}.
\psiar
The same argument applies to $\muathcal M_{1,1}(J_{N \tauimes N}; \vec{\betaegin{equation}ta}_i;P_i)$, $i=0,2$.
Hence the orientations of the moduli spaces $\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$ are compatible with one another and so defines
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$.
\psiar
Hence we can glue oriented Kuranishi structures on
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\betaegin{equation}ta';\vec \betaegin{equation}ta_1,\vec \betaegin{equation}ta_2,\vec \betaegin{equation}ta_0;P_1,P_2,P_0)$
to obtain an oriented Kuranishi structure on $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
The map $\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$ in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{mapI} induces
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{Ireg}
{\muathfrak I}^{\tauext{\rhom reg}}: {\muathcal M}_3^{\tauext{\rhom main, reg}}(J_{N \tauimes N}; \rhoho;P_1, P_2, P_0) \tauo
{\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho; P_1, P_2, P_0),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where we recall the definition
$$
{\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho ;P_1, P_2, P_0)=
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 \chidot \delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2} P_0 \tauimes_{ev_0} \lambda} \def\La{\Lambdaeft({\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho )_{(ev_1, ev_2)} \tauimes_{N^2}(P_1 \tauimes P_2)\rhoight)
$$
from Definition \rhoef{notationmoduli}.
We extend
$\muathfrak J^{\tauext{\rhom reg}}$ to a map
\betaegin{equation}gin{equation}
\lambda} \def\La{\Lambdaambdabel{doublingmap}
\muathfrak I: \varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)
\tauo
\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
defined on the full moduli space as follows.
Let $(p_i,(S_{i,j},(z_{i,j;0},z_{i,j;\tauext{\rhom int}}),u_{i,j})_{j=1}^{k_i})$ be an element of
$\muathcal M_{1;1}(J_{N\tauimes N};\vec \betaegin{equation}ta_i;P_i)$. Here
$p_i \in \vert P_i \vert$, $(S_{i,j},(z_{i,j;0},z_{i,j;\tauext{\rhom int}}),u_{i,j}) \in \muathcal M_{1,1}(J_{N\tauimes N};\betaegin{equation}ta_{i,j})$
such that
$$
f(p_i) = u_{i,1}(z_{i,1;0}), \,\,
u_{i,1}(z_{i,1;\tauext{\rhom int}}) = u_{i,2}(z_{i,2;0}), \,\,
\lambda} \def\La{\Lambdadots,\,\,
u_{i,k_i-1}(z_{i,k_i-1;\tauext{\rhom int}}) = u_{i,k_i}(z_{i,k_i;0}),
$$
where $P_i$ is $(\vert P_i\vert,f)$, $\vert P_i\vert$ is a simplex,
and $f : \vert P_i\vert \tauo N$ is a smooth map.
\psiar
Suppose that $S_{i,j}$ is a disc component.
Writing $u_{i,j} = (u_{i,j}^+,u_{i,j}^{-})$, we obtain
a map $\varphiidehat u_{i,j} : \Sigmama_{i,j} \tauo N$ with
$\Sigmama_{i,j}$ a sphere, the double of $S_{i,j}$.
For bubble trees, we goes as in Remark \rhoef{bubbletree} (1).
\psiar
We denote by $C_{i,j}\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Sigmama_{i,j}$
the circle along which we glued two copies of $S_{i,j}$.
Then $\varphiidehat u_{i,j}$ is defined by gluing $u_{i,j}^+$ and $u_{i,j}^{-}\chiirc c$
along $C_{i,j}$ in $\Sigmama_{i,j}$ where $c: \Sigmama_{i,j} \tauo \Sigmama_{i,j}$
is the conjugation with $C_{i,j}$ as its fixed point set.
\psiar
We glue $(\Sigmama_{i,j},u_{i,j})$ and $(\Sigmama_{i,j+1},u_{i,j+1})$ at $z_{i,j;\tauext{\rhom int}}$
and $z_{i,j+1;0}$. Here we identify $z_{i,j;\tauext{\rhom int}} \in S_{i,j}$ as the corresponding point
in $\Sigmama_{i,j}$ such that it is in the disc bounding $C_{i,j}$.
We thus obtain a configuration of tree of spheres and system of circles on it,
for each $i=1,2,0$. We glued them with the double of an element
$\muathcal M^{\tauext{\rhom main}}_3(J_{N\tauimes N};\betaegin{equation}ta')$ in an obvious way. Thus we obtain the map
(\rhoef{doublingmap}).
(In case some of the sphere component becomes unstable we need to
shrink it. See the proof of Lemma \rhoef{Inotiso} below.)
\psiar
It is easy to see that $\muathfrak I$ is surjective.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{Inotiso}
There is a subset ${\muathcal D}({\muathfrak I}) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ of
codimension $\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 2$ such that
the map $\muathfrak I$ is an isomorphism outside ${\muathcal D}({\muathfrak I})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
We can easily check that the map $\muathfrak I$ fails to be an isomorphism only by the
following reason.
Let $((\Sigmama^{\tauext{\rhom dis}},(z_1,z_2,z_0)),v)$ be an element of
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$
and let $\Sigmama^{\tauext{\rhom dis}}_i$ be one of its irreducible sphere component.
Suppose that $\Sigmama^{\tauext{\rhom dis}}_i$ is unstable.
(Namely we assume that it has one or two singular points.)
Then its automorphism group $\tauext{Aut}(\Sigmama^{\tauext{\rhom dis}}_i)$ will
have positive dimension by definition of stability.
(We require the elements of $\tauext{Aut}(\Sigmama^{\tauext{\rhom dis}}_i)$
to fix the singular point.) By restricting $v$ to $\Sigmama^{\tauext{\rhom dis}}_i$, we
obtain $v_i = (v_i^+,v_i^-)$ where $v_i^{\psim}:\Sigmama^{\tauext{\rhom dis}}_i \tauo N$
are maps from the sphere domain $\Sigmama^{\tauext{\rhom dis}}_i$.
On the double (which represents $\muathfrak I((\Sigmama^{\tauext{\rhom dis}},(z_1,z_2,z_0)),v)$)
the domain $\Sigmama^{\tauext{\rhom dis}}$ contains two sphere components $\Sigmama_{i}^{+}$
and $\Sigmama_{i}^{-}$ on which the maps $v_i^+$ and $v_i^-$ are defined respectively.
We have two alternatives:
\betaegin{equation}gin{enumerate}
\item If one of $v_i^+$ and $v_i^-$ is a constant map, then this double itself is not a stable
map. So we shrink the corresponding component $\Sigmama_{i}^{+}$ or $\Sigmama_{i}^{-}$
to obtain a stable map. (This is actually a part of the construction used in
the definition of $\muathfrak I$.)
\item Suppose both $v_i^+$ and $v_i^-$ are nonconstant and let $g \in \tauext{Aut}(\Sigmama^{\tauext{\rhom dis}}_i)$.
Then the map $v_i^g = (v_i^+, v_i^-\chiirc g)$ defines
an element of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$
different from $v_i = (v_i^+,v_i^-)$ but is mapped to the same element under the map $\muathfrak I$.)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
We denote by ${\muathfrak D}({\muathfrak I})$ the subset of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$
consisting of $((\Sigmama^{\tauext{\rhom dis}},(z_1,z_2,z_0)),v)$ with at least one unstable sphere component $\Sigmama_i^{\tauext{\rhom dis}}$.
This phenomenon occurs only at the stratum of codimension $\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 2$ because
it occurs only when there exists a sphere bubble.
This finishes the proof.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{inversionTypeII}
\betaegin{equation}gin{enumerate}
\item
We give the orientation on
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
as follows:
We recall from Proposition \rhoef{regoripres} that the map
$$\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}:{\muathcal M}_3^{\tauext{\rhom main, reg}}(J_{N\tauimes N};\rhoho) \tauo {\muathcal M}_3^{\tauext{\rhom sph, reg}}(J_N;\rhoho)$$
is an orientation preserving isomorphism between spaces with oriented Kuranishi structures.
Taking Definitions \rhoef{Definition8.4.1} and \rhoef{notationmoduli} into account,
we find that the map ${\muathfrak I}^{\tauext{\rhom reg}}$ in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{Ireg} induces an isomorphism from
$${P_0 \tauimes_{ev_0} \muathcal M}^{\tauext{\rhom main, reg}}_3(J_{N\tauimes N};\rhoho;P_1,P_2)$$ to
$\muathcal M^{\tauext{\rhom sph, reg}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$, which is orientation
preserving if and only if $(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 (\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2 +1)}=1$.
Since $P_i = {\muathcal M}_{1,1}(J_{N \tauimes N}; \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset; P_i)$, the orientation of the fiber product
${P_0 \tauimes_{ev_0} \muathcal M}^{\tauext{\rhom main, reg}}_3(J_{N\tauimes N};\rhoho;P_1,P_2)$
is the restriction of the orientation of $\varphiidehat{\muathcal M}(J_{N \tauimes N}; \rhoho; P_1, P_2, P_0)$.
Recall that $\omega} \def\O{\Omegaverset{\chiirc}{\muathfrak I}$ extends to ${\muathfrak I}$ in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{doublingmap}, which is
an isomorphism outside ${\muathcal D}({\muathfrak I})$ of codimension at least $2$ (Lemma \rhoef{Inotiso}).
Hence we can use ${\muathfrak I}$ to equip the moduli space
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ with an orientation in such a way
that
${\muathfrak I}$ is orientation preserving if and only if
$(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_1 (\delta} \def\muathbb{D}{\muathbb{D}eltaeg P_2 +1)}=1$.
\item
For strata of virtual codimension $1$, there are two cases, i.e., Case (A) and Case (B) in Proposition \rhoef{virdimcombtype} (3).
We also explained that
each stratum in Case (A) arises in two ways of codimension $1$ boundary of top dimensional strata,
i.e., phenomena (1) and (2), see Figure 5.
Note that there is a canonical identification, i.e., inserting/forgetting the component of case (i) in Definition \rhoef{unstwocirc},
between those arising from the phenomenon (1) and those arising from the phenomenon (2).
The orientation of each stratum of top dimension in
${\muathcal M}_3^{\tauext{\rhom sph}}(J_N;\rhoho;{\muathcal C};P_1, P_2, P_0)$ is defined using the orientation
of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1, P_2, P_0)$ as we just mentioned in Remark \rhoef{inversionTypeII} (1).
Since the moduli space $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ is oriented,
these two orientations are opposite under the above identification to give an orientation on the glued space
with Kuranishi structure. As for the cancellation in Case (B), see Subsection \rhoef{1.9(1)}, Lemma \rhoef{forgettypeII} and the following items (3) and (4).
\item We consider the involution $\tauau_*$
applied to one of the disc component $S$ of the fiber product factors
appearing in a stratum of
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$, such that
the double of $S$ is Type II.
Then the orientation of the circle $C=\psiartial S$ embedded in the domain $\Sigmama$ of the corresponding
sphere component is inverted under the operation on
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ induced by
$\tauau_*$ under the map $\muathfrak I$.
\item
The moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ is stratified accroding to
their combinatorial types $\muathfrak c$, see Definition \rhoef{combtype}.
When $\muathfrak c$ is fixed, there are finitely many Type II components. There are involutions acting on
these components by the reflection with respect to the circle of Type II.
Namely, the involution reverses the orientation of the circle of Type II.
We call a Kuranishi structure, resp. a multi-section, on $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
{\it invariant under the inversion of the orientaion of circles of Type II}, if the Kuranishi structure, resp. the multi-section, restricted to each stratum
corresponding to ${\muathfrak c}$ is invariant under the involution acting on each Type II component in $\muathfrak c$.
Note that these involutions are defined on the corresponding strata, not on the whole moduli space
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{frak s_2}
The moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
carries a Kuranishi structure ${\muathfrak K}_2$ invariant under the inversion of the orientation of circles of Type II.
The Kuranishi structure can be canonically pull-backed to a Kuranishi structure on the space $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$.
Moreover, there is a multisection ${\muathfrak s}_2$ of the Kuranishi structure on $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ with the
following properties.
\betaegin{equation}gin{enumerate}
\item The multisection $\muathfrak s_2$ is transversal to the zero section.
\item The multisection $\muathfrak s_2$ is invariant under the inversion of the orientation of the circles of Type II.
\item The multisection $\muathfrak s_2$ does not vanish on ${\muathcal D}({\muathfrak I})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
Lemma \rhoef{frak s_2} is clear from construction except the following points.
Firstly, we consider the point of the moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ such that
one of the following two conditions is satisfied.
\betaegin{equation}gin{enumerate}
\item A circle in Type II component $\Sigmama_a$ hits the singular point of $\Sigmama_a$
other than the root thereof.
\item A circle in Type I-2 component $\Sigmama_a$ hits the
singular point other than its outward interior special point.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
We have to glue various different strata meeting at such a point in
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$.
We have already given such a construction during the proof of Theorem \rhoef{KuraonMsph}.
By examining the way how the corresponding strata
are glued in $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$,
the gluing of corresponding strata of $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
are performed in the same way.
We like to note that the phenomenon spelled out in the proof of Lemma \rhoef{Inotiso}
concerns {\it sphere} bubbles of the elements of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$,
while the phenomenon we concern here arises from {\it disc} bubbles.
Therefore they do not interfere with each other.
\psiar
Secondly, we need to make the choice of the
obstruction bundle of the Kuranishi structure of
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
in such a way that it is compatible with one of
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$.
Lemma \rhoef{Inotiso} describes the locus ${\muathcal D}({\muathfrak I})$ where the map $\muathfrak I$ fails to be an isomorphism.
Let $\nuu_i=(\nuu_i^+,\nuu_i^-)$ be the sphere bubble as in the proof of Lemma \rhoef{Inotiso}.
By ${\muathfrak I}$, $\nuu_i^+$ (resp. $\nuu_i^-$) corresponds to a sphere bubble attached to a pseudoholomorphic sphere at a point
in the lower hemisphere (resp. the upper hemisphere).
In the construction of a Kuranishi structure on the moduli space of holomorphic spheres (or stable maps
of genus $0$), we take obstruction bundles in order to construct Kuranishi neighborhoods.
Let $E(\nuu_i^{\psim})$ be a finite dimensional subspace in $\Omegamega^{0,1}((\nuu_i^{\psim})^*TN)$ such that
the linearization operator of the holomorphic curve equation at $\nuu_i^{\psim}$ becomes surjective modulo $E(\nuu_i^{\psim})$.
In order to extend $E(\nuu_i^{\psim})$ to a neighborhood of $\nuu_i^{\psim}$, we used {\it obstruction bundle data}, introduced in \chiite{foootech} Definition 17.7,
in particular, additional marked points $w_{i,j}^{\psim}$ and local transversals ${\muathcal D}_{i,j}^{\psim}$ to the image of $\nuu_i^{\psim}$ at $\nuu_i^{\psim}(w_{i,j}^{\psim})$.
For $\nuu_i:{\muathbb C}P^1 \tauo N \tauimes N$, we regard $E(\nuu_i^{+})$ (resp. $E(\nuu_i^{-})$) as a subspace of $\Omegamega^{0,1}((\nuu_i)^*(TN \omega} \def\O{\Omegaplus 0)) \chiong
\Omegamega^{0,1}((\nuu_i^+)^*TN \omega} \def\O{\Omegaplus 0)$ (resp. $\Omegamega^{0,1}((\nuu_i)^*(0 \omega} \def\O{\Omegaplus TN)) \chiong \Omegamega^{0,1}(0\omega} \def\O{\Omegaplus (\nuu_i^-)^*TN)$).
Note that the linearization operator of the pseudoholomorphic curve equation at $\nuu_i$ is surjective
modulo $E(\nuu_i^+) \omega} \def\O{\Omegaplus E(\nuu_i^-) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Omegamega^{0,1}((\nuu_i)^*(T (N \tauimes N))$.
When we extend $E(\nuu_i^{+})$ (resp. $E(\nuu_i^{-})$) to a neighborhood of $\nuu_i$, we use $w_{i,j}^+$ and ${\muathcal D}_{i,j}^+ \tauimes N$
(resp. $w_{i,j}^-$ and $N \tauimes {\muathcal D}_{i,j}^-$).
Namely, we use the data $w_{i,j}^+$ and ${\muathcal D}_{i,j}^+$ and the data $w_{i,j}^-$ and ${\muathcal D}_{i,j}^-$ separately, not simultaneously.
The Kuranishi structure can be taken invariant under stratawise involutions, since the Kuranishi structure is constructed
by induction on the energy and we can keep the finite symmetries as in the explanation in Remark \rhoef{remonmult}.
The existence of a multisection in the statement follows from general theory of Kuranishi structures once the following point is taken into account.
By Lemma \rhoef{Inotiso}, there is a subset ${\muathcal D}({\muathfrak I})$ of codimension at least $2$ such that $\muathfrak I$ is an isomorphism outside
${\muathcal D}({\muathfrak I})$.
Since the expected dimension of $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ is $0$, $\muathfrak s_2$ can be chosen such that
$\muathfrak s_2$ does not vanish on ${\muathcal D}({\muathfrak I})$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{reasonforC}
Generally, note that we can pull back a Kuranishi structure ${\muathfrak K}$ on
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
to a Kuranishi structure on the space
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ in a canonical way,
if the next condition (*) is satisfied.
By construction of the Kuranishi structure
we take a sufficiently dense finite subset $\psihi} \def\F{\Phirak P \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset
\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
and for each $\psihi} \def\F{\Phirak x \in \psihi} \def\F{\Phirak P$ we take a finite
dimensional subspace $E_0(\psihi} \def\F{\Phirak x)$ of
$
\Omegamega^{0,1}(\Sigmama_{\muathfrak x}, u_{\muathfrak x}^*TN)=
C^{\infty}(\Sigmama_{\psihi} \def\F{\Phirak x}, \muathbb{L}ambdambda^{0,1} \omega} \def\O{\Omegatimes u_{\muathfrak x}^*TN)
$,
where $(\Sigmama_{\psihi} \def\F{\Phirak x}, \vec{z}_{\psihi} \def\F{\Phirak x}^+, u_{\psihi} \def\F{\Phirak x}: \Sigmama_{\psihi} \def\F{\Phirak x} \tauo N)$
is a stable map appearing in $\psihi} \def\F{\Phirak x$.
The subspace $E_0(\psihi} \def\F{\Phirak x)$ consists of smooth sections of compact
support away from nodes.
Moreover the union of $E_0(\psihi} \def\F{\Phirak x)$ and the image of linearized
operator of the Cauchy-Riemann equation spans the
space $\Omegamega^{0,1}(\Sigmama_{\muathfrak x}, u_{\muathfrak x}^*TN)$.
(See section 12 in \chiite{FO}.)
Now we require
\betaegin{equation}gin{enumerate}
\item[(*)]
The support of any element of $E_0(\psihi} \def\F{\Phirak x)$ does not intersect
with the circles consisting $\psihi} \def\F{\Phirak x
\in \muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
We show that the condition (*) implies that the Kuranishi structure $\muathfrak K$ can be pulled back
to $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1, P_2, P_0)$ below.
\psiar
We recall the construction of Kuranishi structure on
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ in Theorem \rhoef{KuraonMsph}
a bit more.
For each $\psihi} \def\F{\Phirak x
\in \psihi} \def\F{\Phirak P$ we take a sufficiently small closed neighborhood $U(\psihi} \def\F{\Phirak x)$ of
$\psihi} \def\F{\Phirak x$ in $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$.
Let $\psihi} \def\F{\Phirak y \in \muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$. We consider
$\psihi} \def\F{\Phirak P(\psihi} \def\F{\Phirak y)
= \{ \psihi} \def\F{\Phirak x \in \psihi} \def\F{\Phirak P \muid \psihi} \def\F{\Phirak y \in U(\psihi} \def\F{\Phirak x)\}$.
Using the complex linear part of parallel transport along minimal geodesics
as in Definition 17.15, Lemma 18.6, Definition 18.7 in \chiite{foootech},
we transform a subspace $E_0(\psihi} \def\F{\Phirak x)$ with $\psihi} \def\F{\Phirak x \in \psihi} \def\F{\Phirak P(\psihi} \def\F{\Phirak y)$
to a subspace of $\Omegamega^{0,1} (\Sigmama_{\muathfrak y}, u_{\muathfrak y}^*TN)$,
where $(\Sigmama_{\psihi} \def\F{\Phirak y}, \vec{z}_{\psihi} \def\F{\Phirak y}^+, u_{\psihi} \def\F{\Phirak y}: \Sigmama_{\psihi} \def\F{\Phirak y} \tauo N)$
is a stable map appearing in $\psihi} \def\F{\Phirak y$.
We fix various data such as obstruction bundle data on $\psihi} \def\F{\Phirak x$ for our construction. (See \chiite[Definition 17.7]{foootech}.)
We define
$E(\psihi} \def\F{\Phirak y) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset \Omegamega^{0,1} (\Sigmama_{\muathfrak y}, u_{\muathfrak y}^*TN)$
as the sum of those subspaces for various $\psihi} \def\F{\Phirak x \in \psihi} \def\F{\Phirak P(\psihi} \def\F{\Phirak y)$.
(We remark that this sum can be taken to be a direct sum \chiite[Lemma 18.8]{foootech}.)
\psiar
By taking $U(\psihi} \def\F{\Phirak x)$ small we may and will require
that the supports of elements of $E(\psihi} \def\F{\Phirak y)$ are disjoint from the
circles consisting $\psihi} \def\F{\Phirak y$.
\psiar
Now let $\tauilde{\psihi} \def\F{\Phirak y} \in \varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ with
${\muathfrak I}(\tauilde{\psihi} \def\F{\Phirak y}) = {\psihi} \def\F{\Phirak y}$.
Using the fact that
the supports of elements of $E(\psihi} \def\F{\Phirak y)$ are disjoint from the
circles consisting $\psihi} \def\F{\Phirak y$ we can lift $E(\psihi} \def\F{\Phirak y)$
to a subspace $E(\tauilde{\psihi} \def\F{\Phirak y})$ of
$\Omegamega^{0,1}(\Sigmama_{\tauilde{\muathfrak y}}, u_{\tauilde{\psihi} \def\F{\Phirak y}}^{ *}TN)$, where
$(\Sigmama_{\tauilde{\psihi} \def\F{\Phirak y}}, \vec{z}_{\tauilde{\psihi} \def\F{\Phirak y}}^+, u_{\tauilde{\psihi} \def\F{\Phirak y}}: \Sigmama_{\tauilde{\psihi} \def\F{\Phirak y}} \tauo N)$
is a stable map appearing in $\tauilde{\psihi} \def\F{\Phirak y}$.
We use $E(\tauilde{\psihi} \def\F{\Phirak y})$ as the obstruction bundle to define the
lift of our Kuranishi structure.
See also Remark \rhoef{reasonforC2}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
Let $Z$ be a compact metrizable space and
$\muathfrak K_{0}^Z$, $\muathfrak K_{1}^Z$ its Kuranishi structures with orientation.
Let $\muathfrak s_0^Z$ and $\muathfrak s_1^Z$ be multisections of the Kuranishi structures
$\muathfrak K_{0}^Z$ and $\muathfrak K_1^Z$, respectively.
We say that $(\muathfrak K_{0}^Z,\muathfrak s_0^Z)$ is {\it homotopic} to
$(\muathfrak K_{1}^Z,\muathfrak s_1^Z)$ if there exists an
oriented Kuranishi structure ${\muathfrak K}^{Z \tauimes [0,1]}$ on $Z\tauimes [0,1]$ and its multisection
$\muathfrak s^{Z \tauimes [0,1]}$ which restricts to $(\muathfrak K_{0}^Z,\muathfrak s_0^Z)$ and
$(\muathfrak K_{1}^Z,\muathfrak s_1^Z)$ at $Z\tauimes \{0\}$ and $Z\tauimes \{1\}$,
respectively. We call such $({\muathfrak K}^{Z \tauimes [0,1]}, \muathfrak s^{Z \tauimes [0,1]})$ a homotopy between
$(\muathfrak K_{0}^Z,\muathfrak s_0^Z)$ and $(\muathfrak K_{1}^Z,\muathfrak s_1^Z)$.
\psiar
The moduli space $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ is stratified according to combinatorial types.
For ${\muathbf u} \in \varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$, we decompose the domain of ${\muathbf u}$
into disc components and sphere components.
An {\it extended disc component} of $\muathbf u$ is, by definition, the union of a disc component $D_a({\muathbf u})$ and all trees of spheres rooted on
$D_a({\muathbf u})$. We denote it by $\varphiidehat{D}_a({\muathbf u})$.
An extended disc component is said to be of Type I (resp. Type II), if the corresponding component of
${\muathfrak I}({\muathbf u})$, i.e., the double of $D_a({\muathbf u})$,
is of Type I (resp. Type II).
The involution $\tauau_*$ acts on each extended disc component.
In particular, $\tauau_*$ acting on an extended disc component $\varphiidehat{D}_a({\muathbf u})$ of Type II is compatible with the inversion of the orientation of the circle
on the component of ${\muathfrak I}({\muathfrak u})$ of Type II, which is the double of the disc component $D_a({\muathbf u})$, see
Remark \rhoef{inversionTypeII}.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{htpy}
For the pull-back ${\muathfrak I}^*({\muathfrak K}_2,{\muathfrak s}_2)$ and $({\muathfrak K}_1,{\muathfrak s}_1)$,
there is a homotopy $({\muathbf K}, {\muathbf s})$ between them such that it is invariant under $\tauau_*$ on each extended disc component of Type II
acting on the first factor of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0) \tauimes [0,1]$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
By Proposition \rhoef{frak s_2},
the moduli space $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$ has the pair ${\muathfrak I}^*({\muathfrak K}_2,{\muathfrak s}_2)$
of Kuranishi structure and multisection, which are invariant under the inversion of the orientation of circles.
We also have another such a pair $({\muathfrak K}_1,{\muathfrak s}_1)$.
Then the standard theory of Kuranishi structure shows the existence of the desired homotopy.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{calIcoincidence}
We have
$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta} n_{\betaegin{equation}ta} =
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)\rhoight)^{\muathfrak s_2}.
$$
Here the sum is taken over the class $\betaegin{equation}ta \in \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiii(\muathbb{D}elta_N) =\psii_2(N\tauimes N, \muathbb{D}elta_N) /\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ whose double belongs to
class $\rhoho \in \psii_2(N)/\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaim$ and the virtual dimension of
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ is zero.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
If the moduli space $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0) \tauimes [0,1]$ had no codimension $1$
boundary in the sense of Kuranishi structure,
the existence of the homotopy $({\muathbf K}, {\muathbf s})$ in Lemma \rhoef{htpy} would immediately imply the conclusion.
But in reality, there exists a codimension 1 boundary.
However the codimension $1$ boundary of $\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0) \tauimes [0,1]$
consists of elements with at least one component of
Type II.
Since ${\muathbf s}$ is invariant under the action $\tauau_*$ on the disc component of Type II, the contribution from the boundary cancels
as in the proof of unobstructedness of the diagonal in
$(N\tauimes N, - {\rhom pr}_1^*\omega} \def\O{\Omegamega + {\rhom pr}_2^* \omega} \def\O{\Omegamega)$ in Subsection \rhoef{1.9(1)}.
Hence the proof.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{rem}
Even though the dimension of a space $Z$ with Kuranishi structure is $0$,
codimension $1$ boundary can be non-empty. This is because the dimension
of a space with Kuranishi structure is {\it virtual} dimension. After taking a suitable
multi-valued perturbation, its zero set does not meet codimension $1$ boundary.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
We now consider the forgetful map:
\betaegin{equation}gin{eqnarray}
\muathfrak F :
\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)
&\tauo& \muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0),
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{eqnarray}
which is defined by forgetting all the circles in the admissible system of circles.
We recall from Lemmas \rhoef{moduliqp} and \rhoef{frak s_2} that both moduli spaces
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0)$ and
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
carry Kuranishi structures.
We have also used multisections on
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;P_1,P_2,P_0)$ and
on $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$,
denoted by $\muathfrak s_0$ and $\muathfrak s_2$ respectively.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{forgettypeII}
Let $(\muathfrak K_{2},\muathfrak s_2)$ be a pair of Kuranishi structures and multisections on $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
in Lemma \rhoef{frak s_2}.
Then $(\muathfrak K_{2},\muathfrak s_2)$ is homotopic to the pull-back $\muathfrak F^*(\muathfrak K_{0},\muathfrak s_0)$.
Moreover, there is a homotopy between them, which is invariant under the inversion of the orientation of the Type II circles.
In particular we have
$$
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)\rhoight)^{\muathfrak s_2}
=
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\;P_1,P_2,P_0)\rhoight)^{\muathfrak s_0},
$$
if the virtual dimensions of the moduli spaces of the both hand sides are zero.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
The existence of a homotopy between Kuranishi structures ${\muathfrak K}_2$ and ${\muathfrak F}^*{\muathfrak K}_0$
is again a consequence of general theory, once the following point is taken into account.
Note that ${\muathfrak F}^*{\muathfrak s}_0$ and ${\muathfrak s}_2$ are invariant under the inversion of the orientation of circles of Type II.
Then we can take a homotopy, which is also invariant under the inversion of the orientation of circles of Type II.
We note that there are several components of
$\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$
which are of codimension $0$ or $1$ and contracted
by $\muathfrak F$.
Note that we took $\muathfrak s_0$ in such a way that its zero set does not contain elements with domains of at least two irreducible components,
see the paragraph right after Proposition \rhoef{regoripres}.
Hence the zeros of ${\muathfrak F}^*\muathfrak s_0$ is contained in the subset where ${\muathfrak F}$ gives an isomorphism and we can count them
with signed weights to obtain a rational number.
Namely, we have
$$
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)\rhoight)^{{\muathfrak F}^*\muathfrak s_0}
=
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\;P_1,P_2,P_0)\rhoight)^{\muathfrak s_0}.
$$
If the moduli space $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ had no codimension 1 boundary in the sense of
Kuranishi structure, the existence of a homotopy between $({\muathfrak K}_2, {\muathfrak s}_2)$ and ${\muathfrak F}^*({\muathfrak K}_0, {\muathfrak s}_0)$
would immediately imply that
$$
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)\rhoight)^{\muathfrak s_2}
=
\# \lambda} \def\La{\Lambdaeft(\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)\rhoight)^{{\muathfrak F}^*\muathfrak s_0},
$$
which would complete the proof.
However, $\muathcal M^{\tauext{\rhom sph}}_3(J_N;\rhoho;\muathcal C;P_1,P_2,P_0)$ has a codimension $1$ boundary, which consists of
stable maps with admissible systems of circles containing at least one circle of Type II.
All the contribution from those components
cancel out by the involution, which inverts the orientation
of the circles of Type II. (This is a geometric way
to see the vanishing of $\muathfrak m_0(1)$ in the chain level.
We have already checked that it occurs {\it with sign} in Subsection 6.3.1.
See also Remark \rhoef{inversionTypeII} (3), (4).)
Hence the lemma.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{reasonforC2}
The pull-back Kuranishi structure $\muathfrak F^*(\muathfrak K_{0},\muathfrak s_0)$ does not satisfy the condition (*)
appearing in Remark \rhoef{reasonforC}.
In fact the obstruction bundle of $\muathfrak F^*(\muathfrak K_{0},\muathfrak s_0)$ is
independent of the position of the circles.
Therefore we may not pull back $\muathfrak F^*(\muathfrak K_{0},\muathfrak s_0)$ to a Kuranishi structure on
$\varphiidehat{\muathcal M}(J_{N\tauimes N};\rhoho;P_1,P_2,P_0)$, while we can pull back the Kuranishi structure $\muathfrak K_{2}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
By Lemmas \rhoef{moduliqp}, \rhoef{multiondisc}, \rhoef{calIcoincidence} and \rhoef{forgettypeII},
the proof of Theorem \rhoef{Proposition34.25} (2) is now complete.
\qed
\betaegin{equation}gin{proof}[Proof of Corollary \rhoef{qMassey}]
Viewing $N$ as a closed relatively spin Lagrangian submanifold
of $(N\tauimes N, -{\rhom pr}_1^* \omega} \def\O{\Omegamega_N + {\rhom pr}_2^* \omega} \def\O{\Omegamega_N)$,
we can construct a filtered $A_{\infty}$ structure
on $H(N;\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii})$ which is homotopy equivalent
to the filtered $A_{\infty}$ algebra given by Theorem \rhoef{thm:Ainfty}.
This is a consequence of Theorem W \chiite{fooobook1}.
See also Theorem A \chiite{fooobook1}.
Then (1) and (2) follow from
Theorem \rhoef{Proposition34.25}.
The assertion (3) follows from Theorem X \chiite{fooobook1}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Calculation of Floer cohomology of ${\muathbb R} P^{2n+1}$}
\lambda} \def\La{\Lambdaambdabel{subsec:Appl2}
In this subsection, we apply the results proved in the previous
sections to calculate Floer cohomology of real projective space of odd dimension.
Since the case ${\muathbb R} P^1 \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^1$ is already discussed in Subsection 3.7.6 \chiite{fooobook1},
we consider ${\muathbb R} P^{2n+1}$ for $n>0$.
We note that ${\muathbb R} P^{2n+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^{2n+1}$ is monotone with
minimal Maslov index $2n+2 > 2$ if $n>0$.
Therefore by \chiite{Oh93} and Section 2.4 \chiite{fooobook1} Floer cohomology
over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$ is defined.
In this case we do not need to use the notion of Kuranishi structure and the technique of the virtual fundamental chain.
From the proof of Corollary \rhoef{Corollary34.22},
we can take $0$ as a bounding cochain.
Hereafter we omit the bounding
cochain $0$ from the notation.
By \chiite{Oh96} and Theorem D in \chiite{fooobook1},
we have a spectral sequence converging to the Floer cohomology.
Strictly speaking,
in \chiite{Oh96} the spectral sequence is constructed
over ${\muathbb Z}_2$ coefficients. However, we can generalize his results
to ones over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$ coefficients in a straightforward way,
as long as we take the
orientation problem, which is a new and crucial point of this calculation, into account.
Thus Oh's spectral sequence over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$
is enough for our calculation of this example.
(See Chapters 8 and 6 in \chiite{fooo06} for a
spectral sequence over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}$ in more
general setting.)
\psiar
We use a relative spin structure in Proposition \rhoef{Proposition44.19} when $n$ is even and
a spin structure when $n$ is odd. We already check that ${\muathbb R} P^{2n+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^{2n+1}$
has two inequivalent relative spin structures. The next theorem applies
to both of them.
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{Theorem44.24}
Let $n$ be any positive integer. Then the spectral sequence calculating $HF({\muathbb R} P^{2n+1},{\muathbb R} P^{2n+1};\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}})$
has unique nonzero differential
$$
d^{2n+1} : H^{2n+1}({\muathbb R} P^{2n+1};{\muathbb Z}) \chiong {\muathbb Z}
\lambda} \def\La{\Lambdaongrightarrow H^{0}({\muathbb R} P^{2n+1};{\muathbb Z}) \chiong {\muathbb Z}
$$
which is multiplication by $\psim 2$. In particular, we have
$$
HF({\muathbb R} P^{2n+1},{\muathbb R} P^{2n+1};\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}})
\chiong (\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}}/2\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}})^{\omega} \def\O{\Omegaplus(n+1)}.
$$
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{rem}\lambda} \def\La{\Lambdaambdabel{Remark44.25}
(1)
Floer cohomology of ${\muathbb R} P^m$ over ${\muathbb Z}_2$ is calculated in
\chiite{Oh93} and
is isomorphic to the ordinary cohomology.
This fact also follows from Theorem 34.16
in \chiite{fooo06}, which implies that
Floer cohomology of ${\muathbb R} P^m$ over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}_2}$
is isomorphic to the ordinary cohomology
over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb Z}_2}$.
\psiar
(2) Theorem \rhoef{Theorem44.24} gives an example where Floer cohomology
of the real point set is different from its ordinary cohomology.
Therefore it is necessary to use ${\muathbb Z}_2$ coefficient to study the
Arnold-Givental conjecture (see Chapter 8 \chiite{fooo06}).
\psiar
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\betaegin{equation}gin{proof}[Proof of Theorem \rhoef{Theorem44.24}] If $n\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 1$,
the set $\psii_2({\muathbb C} P^{2n+1},{\muathbb R} P^{2n+1})$ has exactly one element $B_1$
satisfying $\muu_{{\muathbb R} P^{2n+1}}(B_1) = 2n+2$,
which is the minimal Maslov number of
${\muathbb R} P^{2n+1}$.
By the monotonicity of ${\muathbb R} P^{2n+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^{2n+1}$,
a degree counting argument shows that
only ${\muathbb C}M_2(J;B_1)$, among the moduli spaces
${\muathbb C}M_2(J;B), \, B \in \psii_2({\muathbb C} P^{2n+1},{\muathbb R} P^{2n+1})$,
contributes to the differential of the spectral sequence.
First of all, we note that $\tauau$ induces an isomorphism
modulo orientations
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{44.27}
\tauau_* : {\muathbb C}M_2(J;B_1) \lambda} \def\La{\Lambdaongrightarrow {\muathbb C}M_2(J;B_1).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Later we examine whether $\tauau_{\alphast}$ preserves the orientation or not, after we specify relative spin structures.
Since $\omega} \def\O{\Omegamega [B_1]$ is the smallest positive
symplectic area,
${\muathbb C}M_2(J;B_1)$ has codimension $1$ boundary corresponding to the strata
consisting of elements which have a constant disc component with two marked points and a disc bubble.
However when we consider the evaluation map
$ev=(ev_0, ev_1): {\muathbb C}M_2(J;B_1) \tauo {\muathbb R} P^{2n+1} \tauimes {\muathbb R} P^{2n+1}$,
these strata are mapped to the diagonal set whose codimension is bigger than $2$.
Thus we can define
fundamental cycle over ${\muathbb Z}$ of $ev({\muathbb C}M_2(J;B_1))$ which we denote by
$[ev({\muathbb C}M_2(J;B_1))]$.
We also note
$$
\delta} \def\muathbb{D}{\muathbb{D}eltaim {\muathbb C}M_2(J;B_1) = 2n +2 + 2n + 1 + 2 - 3 = 2 \delta} \def\muathbb{D}{\muathbb{D}eltaim {\muathbb R} P^{2n+1}.
$$
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{Lemma44.28} Consider the evaluation map
$ev: {\muathbb C}M_2(J;B_1) \tauo {\muathbb R} P^{2n+1} \tauimes {\muathbb R} P^{2n+1}$. Then we have
$$
[ev({\muathbb C}M_2(J;B_1))] = \psim 2[{\muathbb R} P^{2n+1} \tauimes {\muathbb R} P^{2n+1}]
$$
where $[{\muathbb R} P^{2n+1} \tauimes {\muathbb R} P^{2n+1}]$ is the fundamental cycle of ${\muathbb R} P^{2n+1} \tauimes {\muathbb R} P^{2n+1}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
For any distinct two points $p,q \in {\muathbb C} P^{2n+1}$ there exists
a holomorphic map $w : S^2={\muathbb C} \chiup \{\infty \} \tauo {\muathbb C} P^{2n+1}$
of degree $1$ such that $w(0) = p$, $w(\infty) = q$, which is unique up to the action of ${\muathbb C} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{0\} \chiong \tauext{\rhom Aut}({\muathbb C} P^1;0,\infty)$.
In case $p,q \in {\muathbb R} P^{2n+1}$ the uniqueness implies that
$w(\omega} \def\O{\Omegaverline z) = \tauau w(c z)$ for some $c \in {\muathbb C} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaetminus \{0\}$.
Using this equality twice, we have $w(z) = w(\vert c \vert^2 z)$. In particular, we find that $\vert c \vert =1$.
Let $a$ be a square root of $c$ and
set $w'(z) = w (a z)$. Since $\omega} \def\O{\Omegaverline{a} c/a = 1$, we obtain $w' ({\omega} \def\O{\Omegaverline z}) = \tauau w' (z)$.
(Note that $w$ and $w'$ define the same element in the moduli space ${\muathbb C}M^{\tauext{\rhom sph}}_2(J; [{\muathbb C}P^1])$.)
Thus the restriction of $w$ to the upper or lower half plane defines elements $w_u$ or $w_l \in
{\muathbb C}M_2(J;B_1)$. Namely there exist $w_u, w_l \in {\muathbb C}M_2(J;B_1)$
such that $ev(w_u) = ev(w_l) = (p,q)$.
Conversely, any such elements determine a degree one curve by
the reflection principle.
\psiar
To complete the proof of Lemma \rhoef{Lemma44.28} we have to show that
the orientations of the evaluation map $ev$ at $w_u$ and $w_l$ coincide.
Note that
$\tauau_{\alphast}(w_u)=w_l$ and $\tauau_{\alphast}\chiirc ev=ev$.
Thus it suffices to show that $\tauau_{\alphast}$ in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{44.27} preserves the orientation.
First, we consider the case of ${\muathbb R} P^{4n+3}$.
In this case, ${\muathbb R} P^{4n+3}$ is $\tauau$-relatively spin. Therefore, by Theorem \rhoef{Proposition38.11},
the map (\rhoef{44.27}) is orientation preserving,
because
$$
\psihi} \def\F{\Phirac{1}{2}\muu_{{\muathbb R} P^{4n+3}}(B_1) + 2 = 2n+4
$$
is even. We next consider the case of ${\muathbb R} P^{4n+1}$. We pick its relative spin structure
$[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$. By Theorem \rhoef{Proposition38.11} again, the map
$$
\tauau_* : {\muathbb C}M_2(J;B_1)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow {\muathbb C}M_2(J;B_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$
is orientation reversing, because
$$
\psihi} \def\F{\Phirac{1}{2}\muu_{{\muathbb R} P^{4n+1}}(B_1) + 2 = 2n+3
$$
is odd. On the other hand, by Proposition \rhoef{Proposition44.19}
we have $\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)] \nue [(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]$.
Let $\muathfrak x$ be the
unique nonzero element
of $H^2({\muathbb C} P^{4n+1},{\muathbb R} P^{4n+1};{\muathbb Z}_2) \chiong {\muathbb Z}_2$.
It is easy to see that
$\muathfrak x [B_1] \nue 0$. Then by Proposition \rhoef{Proposition44.16} the
identity induces an {\it orientation reversing} isomorphisms
$$
{\muathbb C}M_2(J;B_1)^{\tauau^*[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow {\muathbb C}M_2(J;B_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}.
$$
Therefore we can find that
$$
\tauau_* : {\muathbb C}M_2(J;B_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]} \lambda} \def\La{\Lambdaongrightarrow {\muathbb C}M_2(J;B_1)^{[(V,\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaigma)]}
$$
is orientation preserving.
This completes the proof of Lemma \rhoef{Lemma44.28}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Then
Lemma \rhoef{Lemma44.28} and the definition of
the differential $d$ imply
$$
d^{2n+1}(PD([p]))
= [ev_{0}\lambda} \def\La{\Lambdaeft( {\muathbb C}M_2(J;B_1) {}_{ev_1}\tauimes [p] \rhoight)]
= \psim 2PD[{\muathbb R} P^{2n+1}],
$$
which finishes the proof of Theorem \rhoef{Theorem44.24}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\betaegin{equation}gin{rem}
In Subsection 3.6.3 \chiite{fooobook1}, we introduced
the notion of {\it weak unobstructedness} and {\it weak bounding cochains} using the homotopy unit of the filtered
$A_{\infty}$ algebra.
We denote by ${\muathbb C}M _{\rhom weak} (L;\muathbb{L}ambdambda_{0,{\rhom nov}})$ the
set of all weak bounding cochains.
We also defined the potential
function $\muathfrak {PO}: {\muathbb C}M _{\rhom weak} (L;\muathbb{L}ambdambda_{0,{\rhom nov}})
\tauo \muathbb{L}ambdambda_{0,{\rhom nov}}^{+(0)}$,
where $\muathbb{L}ambdambda_{0,{\rhom nov}}^{+(0)}$ is the degree zero part
of $\muathbb{L}ambdambda_{0,{\rhom nov}}^+$.
Then the set of bounding cochains ${\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}})$
is characterized by ${\muathbb C}M (L;\muathbb{L}ambdambda_{0,{\rhom nov}})=
\muathfrak {PO}^{-1}(0)$.
About the value of the potential function, we have the
following problem:
\betaegin{equation}gin{prob}\lambda} \def\La{\Lambdaambdabel{Problem44.26}
Let $L$ be a relatively spin Lagrangian submanifold
of a symplectic manifold $M$.
We assume that $L$ is weakly unobstructed
and that the Floer cohomology
$HF((L,b),(L,b);\muathbb{L}ambdambda_{0,{\rhom nov}}^F)$
deformed by $b \in {\muathbb C}M_{\omega} \def\O{\Omegaperatorname{weak}}(L)$
does not vanish for some field $F$.
In this situation, the question is whether $\muathfrak{PO}(b)$ is an eigenvalue of the operation
$$
c \muapsto c \chiup_Q c_1(M) : QH(M;\muathbb{L}ambdambda_{0,{\rhom nov}}^F) \lambda} \def\La{\Lambdaongrightarrow QH(M;\muathbb{L}ambdambda_{0,{\rhom nov}}^F).
$$
Here $(QH(M;\muathbb{L}ambdambda_{0,{\rhom nov}}^F), \chiup_Q )$
is the quantum cohomology ring of $M$ over
$\muathbb{L}ambdambda_{0,{\rhom nov}}^F$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{prob}
Such statement was made by M. Kontsevich in 2006 during a conference
of homological mirror symmetry at Vienna. (According to some physicists this
had been known to them before.) See also \chiite{Aur07}.
As we saw above, ${\muathbb R} P^{2n+1} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb C} P^{2n+1}$, $n \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaeq 1$, is unobstructed.
Since the minimal Maslov number is strictly greater than $2$, we find that any $b \in H^1({\muathbb R} P^{2n+1}; F) \omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0, \tauext{\rhom nov}}^F$ of
total degree $1$ is a bounding cochain, i.e.,
$\muathfrak{PO}(b)=0$, by the dimension counting argument.
On the other hand, Theorem \rhoef{Theorem44.24} shows that
the Floer cohomology does not vanish for $F={\muathbb Z}_2$, and
the eigenvalue is zero in the field $F={\muathbb Z}_2$ because
$c_1({\muathbb C} P^{2n+1})\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonquiv 0 \muod 2$.
Thus this is consistent with the problem.
(If we take $F=\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$, the eigenvalue is not zero in $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$.
But Theorem \rhoef{Theorem44.24} shows that
the Floer cohomology over $\muathbb{L}ambdambda_{0,{\rhom nov}}^{\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$ vanishes.
So the assumption of the problem is not satisfied in this case.)
Besides this, we prove this statement for the
case of Lagrangian fibers of smooth toric manifolds
in \chiite{fooomirror1}.
We do not have any counter example to this statement at the time of writing this paper.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Wall crossing term in \chiite{fukaya;counting}}
\lambda} \def\La{\Lambdaambdabel{subsec:wall}
Let $M$ be a $6$-dimensional symplectic manifold and
let $L$ be
its relatively spin Lagrangian submanifold.
Suppose the Maslov index homomorphism $\muu_L
: H_2(M,L;{\muathbb Z}) \tauo {\muathbb Z}$ is zero.
In this situation the first named author \chiite{fukaya;counting}
introduced an invariant
$$
\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_J : \muathcal M(L;\muathbb{L}ambdambda_{0,{\rhom nov}}^{{\muathbb C}}) \tauo \muathbb{L}ambdambda_{0,{\rhom nov}}^{+ {\muathbb C}}.
$$
In general, it depends on a compatible almost structure $J$ and
the difference $\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_J - \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{J'}$ is an element of $\muathbb{L}ambdambda_{0,{\rhom nov}}^{+ \ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii}$.
\psiar
Let us consider the case where $\tauau : M \tauo M$ is an
anti-symplectic involution and $L = \omega} \def\O{\Omegaperatorname{Fix}\,\tauau$.
We take
the compatible almost complex structures $J_0$, $J_1$ such that
$\tauau_* J_0 = - J_0$, $\tauau_* J_1 = - J_1$.
Moreover, we assume that there exists a one-parameter family of compatible almost
complex structures
$\muathcal J = \{J_t\muid t\in [0,1]\}$ such that $\tauau_* J_t = - J_t$.
We will study the difference
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{nowallcross}
\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{J_1} - \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{J_0}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
below.
Namely, we will study the wall crossing phenomenon by the method of this paper.
\psiar
Let $\alphalpha \in H_2(M;{\muathbb Z})$. Denote by
$\muathcal M_1(\alphalpha;J)$ the moduli space of $J$-holomorphic {\it sphere}
with one interior marked point and of homology class $\alphalpha$.
We have an evaluation map $ev : \muathcal M_1(\alphalpha;J) \tauo M$.
We assume
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{nonintassum}
ev(\muathcal M_1(\alphalpha;J_0)) \chiap L = ev(\muathcal M_1(\alphalpha;J_1)) \chiap L = \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
for any $\alphalpha \nue 0$. Since the virtual dimension of $\muathcal M_1(\alphalpha;J)$ is
$2$, (\rhoef{nonintassum}) holds in generic cases.
The space
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{wallcrossingmoduli}
{\muathbb C}M_1(\alphalpha;\muathcal J;L)
= \betaigcup_{t \in [0,1]} \{t\} \tauimes \lambda} \def\La{\Lambdaeft({\muathbb C}M_1(\alphalpha;J_t) {}_{ev}\tauimes_M L\rhoight)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
has a Kuranishi structure of dimension $0$, which is fibered on $[0,1]$.
The assumption (\rhoef{nonintassum})
implies that (\rhoef{wallcrossingmoduli}) has no boundary.
Therefore its virtual fundamental cycle is well-defined and gives a rational
number, which we denote by $\# {\muathbb C}M_1(\alphalpha;\muathcal J;L)$.
By Theorem 1.5 \chiite{fukaya;counting} we have
$$
\ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{J_1} - \ifmmode{\muathbb P}\else{$\muathbb P$}\psihi} \def\F{\Phiisi_{J_0} = \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\alphalpha}
\# {\muathbb C}M_1(\alphalpha;\muathcal J;L)T^{\omega} \def\O{\Omegamega(\alphalpha)}.
$$
The involution naturally induces a map
$
\tauau : {\muathbb C}M_1(\alphalpha;\muathcal J;L) \tauo {\muathbb C}M_1(\tauau_*\alphalpha;\muathcal J;L)
$.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{oripreversing}
The map
$\tauau : {\muathbb C}M_1(\alphalpha;\muathcal J;L) \tauo {\muathbb C}M_1(\tauau_*\alphalpha;\muathcal J;L)$
is orientation preserving.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{proof}
In the same way as Proposition \rhoef{Lemma38.9}, we can
prove that $\tauau : {\muathbb C}M_1(\alphalpha;J) \tauo {\muathbb C}M_1(\tauau_*\alphalpha;J)$ is orientation
reversing. In fact, this case is similar to the case
$k=-1$, $\muu_L(\betaegin{equation}ta) = 2c_1(\alphalpha) = 0$ and $m=1$ of Proposition \rhoef{Lemma38.9}.
Note that $\tauau$ also reverses the orientation on $M$ if $\psihi} \def\F{\Phirac{1}{2}\delta} \def\muathbb{D}{\muathbb{D}eltaim_{{\muathbb R}}M$ is odd.
Therefore for any $t\in [0,1]$, $\tauau$ respects the orientation on
${\muathbb C}M_1(\alphalpha;J_t){}_{ev}\tauimes_M L$ and
that on ${\muathbb C}M_1(\tauau_{\alphast}\alphalpha;J_t){}_{ev}\tauimes_M L$.
Hence the lemma.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
Lemma \rhoef{oripreversing} implies that, in the case of $\tauau$ fixed point,
the cancelation of the wall crossing term via involution,
does {\it not} occur, because of the sign.
Namely Formula (8.1) in the first author's paper \chiite{fukaya;counting} is wrong.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaection{Appendix: Review of Kuranishi structure -- orientation and
group action}
\lambda} \def\La{\Lambdaambdabel{sec:appendix}
In this appendix, we briefly review the orientation on a space with
Kuranishi structure and notion of group action on a space with Kuranishi structure for the readers convenience.
For more detailed explanation, we refer Sections A1.1 \chiite{fooobook2}
and A1.3 \chiite{fooobook2} for Subsections 7.1 and 7.2, respectively.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Orientation}
\lambda} \def\La{\Lambdaambdabel{subsec:AOri}
To define orientation on a space with Kuranishi structure,
we first recall the notion of tangent bundle of it.
Let ${\muathbb C}M$ be a compact topological space and let ${\muathbb C}M$ have
a Kuranishi structure. That is, ${\muathbb C}M$ has a collection
of a finite number of Kuranishi neighborhoods
$(V_p,E_p,\Gamma_p,\psisi_p,s_p), p\in {\muathbb C}M$
such that
\betaegin{equation}gin{enumerate}
\item[(k-1)]
$V_{p}$ is a finite dimensional smooth manifold which may have boundaries or corners;
\item[(k-2)]
$E_p$ is a finite dimensional real vector space and
$\delta} \def\muathbb{D}{\muathbb{D}eltaim V_p -\delta} \def\muathbb{D}{\muathbb{D}eltaim E_p$ is independent of $p$;
\item[(k-3)]
$\Gamma_{\alphalpha}$ is a finite group acting smoothly and effectively on $V_{p}$,
and on $E_p$ linearly;
\item[(k-4)]
$s_{p}$, which is called a {\it Kuranishi map}, is a $\Gamma_{p}$- equivariant smooth section of
the vector bundle $E_{p}\tauimes V_p \tauo V_p$ called an {\it obstruction bundle};
\item[(k-5)]
$\psisi_{p}: s_{p}^{-1}(0)/\Gamma_{p} \tauo \muathcal M$ is a homeomorphism to its image;
\item[(k-6)]
$\betaigcup_p \psisi_{p}(s_{p}^{-1}(0)/\Gamma_{p})={\muathbb C}M$;
\item[(k-7)]
the collection $\{(V_p,E_p,\Gamma_p,\psisi_p,s_p)\}_{p\in {\muathbb C}M}$ satisfies
certain compatibility conditions under coordinate change.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
See Definition A1.3 and Definition A1.5 in \chiite{fooobook2} for the precise
definition and description of the {\it coordinate change} and the compatibility conditions in (k-7), respectively.
We denote by ${\muathbb C}P$ the finite set of $p \in {\muathbb C}M$ above.
By Lemma 6.3 \chiite{FO}, we may assume that
$\{(V_p,E_p,\Gamma_p,\psisi_p,s_p)\}_{p\in P}$
is a {\it good coordinate system} in the sense of
Definition 6.1 in \chiite{FO}.
In other words, there is a partial
order $<$ on ${\muathbb C}P$ such that the following conditions hold:
Let $q < p, (p,q \in {\muathbb C}P)$
with
$
\psisi_p(s_p^{-1}(0)/\Gamma_p) \chiap \psisi_q(s_q^{-1}(0)/\Gamma_q) \nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset
$.
Then there exist
\betaegin{equation}gin{enumerate}
\item[(gc-1)]
a $\Gamma_q$-invariant open subset $V_{pq}$ of $V_q$ such that
$$
\psisi_q^{-1}(\psisi_p(s_p^{-1}(0)/\Gamma_p) \chiap \psisi_q(s_q^{-1}(0)/\Gamma_q)) \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset V_{pq}/\Gamma_q,
$$
\item[(gc-2)]
an injective group homomorphism $h_{pq}:\Gamma_q \tauo \Gamma_p$,
\item[(gc-3)]
an
$h_{pq}$-equivariant smooth embedding $\psihi_{pq} : V_{pq} \tauo V_p$
such that the induced map
$V_{pq}/\Gamma_{q} \tauo V_p/\Gamma_p$ is injective,
\item[(gc-4)]
an $h_{pq}$-
equivariant embedding $\varphiidehat{\psihi}_{pq}:E_q\tauimes{V_{pq}} \tauo E_p\tauimes V_{p}$ of vector bundles which covers $\psihi_{pq}$ and
satisfies
$$
\varphiidehat{\psihi}_{pq} \chiirc s_q =s_p \chiirc \psihi_{pq}, \quad
\psisi_q= \psisi_p \chiirc \underline{\psihi}_{pq}.
$$
Here $\underline{\psihi}_{pq}:V_{pq}/\Gamma_q \tauo V_p/\Gamma_p$
is the map induced by $\psihi_{pq}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
Moreover, if $r<q<p$ and
$\psisi_p(s_p^{-1}(0)/\Gamma_p) \chiap \psisi_q(s_q^{-1}(0)/\Gamma_q)
\chiap \psisi_r(s_r^{-1}(0)/\Gamma_r) \nue \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonmptyset$, then there exists
\betaegin{equation}gin{enumerate}
\item[(gc-5)]
$\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqr} \in \Gamma_p$
such that
$$
h_{pq} \chiirc h_{qr} =
\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqr} \chidot
h_{pr} \chidot
\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma^{-1}_{pqr}
, \quad
\psihi_{pq} \chiirc \psihi_{qr} = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqr} \chidot
\psihi_{pr}, \quad
\varphiidehat{\psihi}_{pq} \chiirc \varphiidehat{\psihi}_{qr} = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqr} \chidot
\varphiidehat{\psihi}_{pr}.
$$
Here the second and third equalities
hold on $\psihi_{qr}^{-1}(V_{pq}) \chiap V_{qr}
\chiap V_{pr}$ and on
$E_r\tauimes({\psihi_{qr}^{-1}(V_{pq}) \chiap V_{qr}}\chiap V_{pr})$),
respectively.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\psiar\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamamallskip
Now we identify a neighborhood of
$\psihi_{pq}(V_{pq})$ in $V_p$ with a neighborhood of the zero section
of the normal bundle $N_{V_{pq}}V_p \tauo V_{pq}$.
Then the differential of the Kuranishi map $s_p$ along the fiber
direction defines an $h_{pq}$-equivariant bundle homomorphism
$$
d_{\tauext{fiber}}s_p : N_{V_{pq}}V_p \tauo E_p \tauimes V_{pq}.
$$
See Lemma A1.58 \chiite{fooobook2} and
also Theorems 13.2, 19.5 \chiite{foootech} for detail.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:tangent}
We say that the space ${\muathbb C}M$ with Kuranishi structure
{\it has a tangent bundle} if $d_{\tauext{fiber}}s_p$
induces a bundle isomorphism
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{dfiber}
N_{V_{pq}}V_p \chiong \psihi} \def\F{\Phirac{E_p \tauimes V_{pq}}{\varphiidehat{\psihi}_{pq}(E_q\tauimes {V_{pq})}}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
as $\Gamma_q$-equivariant bundles on a neighborhood of $V_{pq}\chiap s_q^{-1}(0)$. (See also Chapter 2 \chiite{foootech}.)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:oriKura}
Let ${\muathbb C}M$ be a space with Kuranishi structure with a tangent bundle.
We say that the Kuranishi structure on ${\muathbb C}M$ is {\it orientable} if
there is a trivialization of
$$
\muathbb{L}ambdambda^{\tauext{\rhom top}}E^*_p \omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{\tauext{\rhom top}}TV_p
$$
which is compatible with the isomorphism (\rhoef{dfiber})
and whose homotopy class is preserved by
the $\Gamma_p$-action.
The {\it orientation} is the choice of the homotopy class of such a trivialization.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Pick such a trivialization.
Suppose that $s_p$ is transverse to zero at $p$.
Then we define an orientation on the zero locus $s_p^{-1}(0)$
of the Kuranishi map $s_p$, which may be assumed so that $p \in s^{-1}_p(0)$, by
the following equation:
$$
E_{p} \tauimes T_ps_p^{-1}(0) = T_p V.
$$
Since we pick a trivialization of
$\muathbb{L}ambdambda^{\tauext{\rhom top}}E^*_p \omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{\tauext{\rhom top}}TV_p$ as in Definition \rhoef{def:oriKura}, the above equality
determines an orientation on $s_p^{-1}(0)$, and also on $s_p^{-1}(0)/\Gamma_p$.
See Section 8.2 \chiite{fooobook2} for more detailed explanation of orientation on a space with
Kuranishi structure.
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Group action}
\lambda} \def\La{\Lambdaambdabel{subsec:Aaction}
We next recall the definitions of a finite group action on a space with Kuranishi structure
and its quotient space.
In this paper we used the ${\muathbb Z}_2$-action and its quotient space
(in the proof of Theorem \rhoef{Theorem34.20}).
\psiar
Let ${\muathbb C}M$ be a compact topological space with
Kuranishi structure.
We first define the notion of automorphism of Kuranishi structure.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:auto}
Let $\varphiphi : {\muathbb C}M \tauo {\muathbb C}M$ be a homeomorphism of ${\muathbb C}M$.
We say that it induces an
{\it automorphism of Kuranishi structure} if the following holds:
Let $p \in {\muathbb C}M$ and $p' = \varphiphi(p)$. Then, for the
Kuranishi neighborhoods $(V_p,E_p,\Gamma_p,\psisi_p,s_p)$ and
$(V_{p'},E_{p'},\Gamma_{p'},\psisi_{p'},s_{p'})$ of $p$ and $p'$ respectively,
there exist $\rhoho_p : \Gamma_p \tauo \Gamma_{p'}$, $\varphiphi_p : V_p
\tauo V_{p'}$, and
$\varphiidehat{\varphiphi}_p : E_p \tauo E_{p'}$ such that
\betaegin{equation}gin{enumerate}
\item[(au-1)]
$\rhoho_p$ is an isomorphism of groups;
\item[(au-2)]
$\varphiphi_p$
is a $\rhoho_p$-equivariant diffeomorphism;
\item[(au-3)]
$\varphiidehat{\varphiphi}_p$ is a $\rhoho_p$-equivariant
bundle isomorphism which covers $\varphiphi_p$;
\item[(au-4)]
$s_{p'} \chiirc \varphiphi_p = \varphiidehat{\varphiphi}_p \chiirc s_p$;
\item[(au-5)]
$
\psisi_{p'}\chiirc \underline{\varphiphi}_p = \varphiphi \chiirc\psisi_p,
$
where $\underline{\varphiphi}_p : s_p^{-1}(0)/\Gamma_p \tauo s_{p'}^{-1}(0)
/\Gamma_{p'}$ is a homeomorphism induced by
$\varphiphi_p \vert_{s_p^{-1}(0)}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
We require that $\rhoho_p$, $\varphiphi_p$, $\varphiidehat{\varphiphi}_p$ above satisfy
the following compatibility conditions with the coordinate changes of Kuranishi structure:
Let $q \in \psisi_p(s_p^{-1}(0)/\Gamma_p)$ and
$q' \in \psisi_{p'}(s_{p'}^{-1}(0)/\Gamma_{p'})$ such that
$\varphiphi(q) = q'$. Let
$(\varphiidehat{\psihi}_{pq},\psihi_{pq},h_{pq})$,
$(\varphiidehat{\psihi}_{p'q'},\psihi_{p'q'},h_{p'q'})$ be the coordinate changes.
Then there exists $\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqp'q'} \in \Gamma_{p'}$ such that the following conditions hold:
\betaegin{equation}gin{enumerate}
\item[(auc-1)]
$\rhoho_p\chiirc h_{pq} =
\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqp'q'} \chidot (h_{p'q'} \chiirc
\rhoho_q) \chidot \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqp'q'}^{-1}$;
\item[(auc-2)]
$\varphiphi_p\chiirc \psihi_{pq} = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqp'q'} \chidot (\psihi_{p'q'} \chiirc
\varphiphi_q)$;
\item[(auc-3)]
$\varphiidehat{\varphiphi}_p\chiirc \varphiidehat{\psihi}_{pq} = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_{pqp'q'} \chidot (\varphiidehat{\psihi}_{p'q'} \chiirc
\varphiidehat{\varphiphi}_q)$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
Then we call $((\rhoho_p,\varphiphi_p,\varphiidehat{\varphiphi}_p)_p;\varphiphi)$ an {\it automorphism
of the Kuranishi structure}.
\betaegin{equation}gin{rem}
Here $(\rhoho_p, \psihi_p, \varphiidehat{\psihi}_p)_p$ are included as data of an automorphism.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{rem}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:conjauto}
We say that an automorphism $((\rhoho_p,\varphiphi_p,\varphiidehat{\varphiphi}_p)_p;\varphiphi)$
is {\it conjugate} to
$((\rhoho'_p,\varphiphi'_p,\varphiidehat{\varphiphi}'_p)_p;\varphiphi')$,
if $\varphiphi = \varphiphi'$ and
if there exists $\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_p \in \Gamma_{\varphiphi(p)}$ for each $p$ such that
\betaegin{equation}gin{enumerate}
\item[(cj-1)]
$\rhoho'_p = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_p \chidot \rhoho_p \chidot \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma^{-1}_p$;
\item[(cj-2)]
$\varphiphi'_p = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_p \chidot \varphiphi_p$;
\item[(cj-3)]
$\varphiidehat{\varphiphi}'_p = \gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammaamma_p \chidot \varphiidehat{\varphiphi}_p$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{enumerate}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
The {\it composition} of the two automorphisms is defined by
the following formula:
$$
\alphaligned
& ((\rhoho^1_p,\varphiphi^1_p,\varphiidehat{\varphiphi}^1_p)_p;\varphiphi^1)\chiirc
((\rhoho_p^2,\varphiphi_p^2,\varphiidehat{\varphiphi}_p^2)_p;\varphiphi^2) \\
&=
((\rhoho^1_{\varphiphi^2(p)} \chiirc \rhoho^2_p,\varphiphi^1_{\varphiphi^2(p)}\chiirc
\varphiphi^2_p,\varphiidehat{\varphiphi}^1_{\varphiphi^2(p)}\chiirc\varphiidehat{\varphiphi}^2_p)_p;\varphiphi^1\chiirc\varphiphi^2).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
$$
Then we can easily check that the right hand side also satisfies the compatibility
conditions (auc-1)-(au-3).
Moreover, we can find that the composition induces the composition
of the conjugacy classes of automorphisms.
\psiar
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:oripres}
An automorphism$((\rhoho_p,\varphiphi_p,\varphiidehat{\varphiphi}_p)_p;\varphiphi)$ is {\it orientation preserving}, if it is
compatible with the trivialization of $\muathbb{L}ambdambda^{\tauext{\rhom top}}E^*_p \omega} \def\O{\Omegatimes \muathbb{L}ambdambda^{\tauext{\rhom top}}TV_p$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
\psiar
Let $\tauext{\rhom Aut}({\muathbb C}M)$ be the set of all conjugacy classes of the automorphisms of
${\muathbb C}M$ and let $\tauext{\rhom Aut}_0({\muathbb C}M)$ be the set of all conjugacy classes of the orientation
preserving automorphisms of ${\muathbb C}M$.
Both of them become groups by composition.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:action}
Let $G$ be a finite group which acts on a compact space ${\muathbb C}M$.
Assume that ${\muathbb C}M$ has a Kuranishi structure.
We say that $G$ {\it acts} on ${\muathbb C}M$ (as a space with Kuranishi structure) if, for
each $g\in G$, the homeomorphism $x \muapsto gx$, ${\muathbb C}M \tauo {\muathbb C}M$
induces an automorphism $g_*$ of the Kuranishi structure and
the composition of $g_*$ and $h_*$ is conjugate to $(gh)_*$.
In other words, an action of $G$ to ${\muathbb C}M$ is a group homomorphism
$G \tauo \tauext{\rhom Aut}({\muathbb C}M)$.
\psiar
An {\it involution} of a space with Kuranishi structure
is a ${\muathbb Z}_2$ action.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Then we can show the following:
\betaegin{equation}gin{lem}[Lemma A1.49 \chiite{fooobook2}]\lambda} \def\La{\Lambdaambdabel{lem:quot}
If a finite group $G$ acts on a space ${\muathbb C}M$ with Kuranishi structure, then
the quotient space ${\muathbb C}M/G$ has a Kuranishi structure.
\psiar
If ${\muathbb C}M$ has a tangent bundle and the action preserves it, then
the quotient space has a tangent bundle.
If ${\muathbb C}M$ is oriented and the action preserves the orientation, then the
quotient space has an orientation.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubsection{Invariant promotion}
\lambda} \def\La{\Lambdaambdabel{subsec:promotion}
As we promised in the proof of Theorem \rhoef{Theorem34.20},
we explain how we adopt the obstruction theory
developed in Subsections 7.2.6--7.2.10 \chiite{fooobook2}
to promote
a filtered $A_{n,K}$ structure
to a filtered $A_{\infty}$ structure keeping
the symmetry \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{34.21}.
Since the modification is straightforward,
we give the outline for readers' convenience.
We use the same notation as in Subsection \rhoef{subsec:Ainfty}.
We first note that in our geometric setup the Lagrangian submanifold $L$
is the fixed point set of the involution $\tauau$. So $\tauau$ acts trivially on $C(L;\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii)$ in Theorem \rhoef{thm:Ainfty}.
Moreover the induced map $\tauau_{\alphast}$ given by Definition \rhoef{def:inducedtau} is also trivial on the monoid $G(L)$. (See Remark \rhoef{rem:tau}.)
\psiar
Let $G\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb R}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0}\tauimes 2{\muathbb Z}$ be a monoid as in Subsection \rhoef{subsec:Ainfty}. We denote by $\betaegin{equation}ta$
an element of $G$.
Let $R$ be a field containing $\ifmmode{\muathbb Q}\else{$\muathbb Q$}\psihi} \def\F{\Phii$ and
$\omega} \def\O{\Omegaverline{C}$ a graded $R$-module.
Put $C=\omega} \def\O{\Omegaverline{C}\omega} \def\O{\Omegatimes \muathbb{L}ambdambda_{0,\tauext{nov}}^R$.
Following Subsection 7.2.6 \chiite{fooobook2}, we use the Hochschild cohomology to describe the obstruction to the promotion.
In this article we use
$\omega} \def\O{\Omegaverline{C}^e =\omega} \def\O{\Omegaverline{C} \omega} \def\O{\Omegatimes R[e,e^{-1}]$
instead of $\omega} \def\O{\Omegaverline{C}$ to encode the data of the Maslov index
appearing in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{34.21}.
Here
$e$ is the formal variable in $\muathbb{L}ambdambda_{0,\tauext{nov}}^R$.
Note that the promotion is made by induction on the partial order
$<$ on the set $G\tauimes {\muathbb Z}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0}$ which is absolutely independent of
the variable $e$. Thus the obstruction theory given in Subsections 7.2.6--7.2.10 \chiite{fooobook2} also works on $\omega} \def\O{\Omegaverline{C}^e$.
Let $\{\muathfrak m_{k,\betaegin{equation}ta}\}$ and $\{ \muathfrak f_{k,\betaegin{equation}ta}\}$ be a filtered $A_{n,K}$ algebra structure (Definition \rhoef{def:AnK}) and a filtered $A_{n,K}$ homomorphism (Definition \rhoef{def:AnKhom}). They are
$R$ linear maps from $B_k(\omega} \def\O{\Omegaverline{C}[1])$ to $\omega} \def\O{\Omegaverline{C}$.
We naturally extend them as $R[e,e^{-1}]$ module homomorphisms and denote the extensions by the same symbols.
We put
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{mkeandfke}
\alphaligned
\muathfrak{m}^e_{k,\betaegin{equation}ta} & = \muathfrak{m}_{k,\betaegin{equation}ta}e^{\tauext{pr}_2(\betaegin{equation}ta)/2} \quad : B_k(\omega} \def\O{\Omegaverline{C}^e[1]) \tauo \omega} \def\O{\Omegaverline{C}^e[1]\\
\muathfrak{f}^e_{k,\betaegin{equation}ta} & = \muathfrak{f}_{k,\betaegin{equation}ta}e^{\tauext{pr}_2(\betaegin{equation}ta)/2} \quad : B_k(\omega} \def\O{\Omegaverline{C}^e[1]) \tauo \omega} \def\O{\Omegaverline{C}^e[1],
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonndaligned
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where $\tauext{pr}_2 : G\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaubset {\muathbb R}_{\gamma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\G{\Gammae 0}\tauimes 2{\muathbb Z} \tauo 2{\muathbb Z}$ is the projection to the second factor.
In the geometric situation $\tauext{pr}_2$ is the Maslov index $\muu$.
(See \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{eq:defmk}.)
For each $K$ we have a map
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{OP}
\tauext{Op} ~:~ B_K\omega} \def\O{\Omegaverline{C}^e[1] \tauo B_K\omega} \def\O{\Omegaverline{C}^e[1]
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
defined by
$$
\tauext{Op} ~(a_1x_1 \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \omega} \def\O{\Omegatimes a_K x_K)=(-1)^{\alphast}
(a_K^{\delta} \def\muathbb{D}{\muathbb{D}eltaag}x_K \omega} \def\O{\Omegatimes \delta} \def\muathbb{D}{\muathbb{D}eltaots \omega} \def\O{\Omegatimes a_1^{\delta} \def\muathbb{D}{\muathbb{D}eltaag}x_1)
$$
for $a_i=\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_j c_j e^{\muu_j} \in R[e,e^{-1}]$ and $x_i \in \omega} \def\O{\Omegaverline{C}$.
Here
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{opsign}
\alphast = K+ 1 + \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{1\lambda} \def\La{\Lambdae i < j \lambda} \def\La{\Lambdae K}
\delta} \def\muathbb{D}{\muathbb{D}eltaeg'x_i\delta} \def\muathbb{D}{\muathbb{D}eltaeg'x_j
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
and
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{adag}
a_i^{\delta} \def\muathbb{D}{\muathbb{D}eltaag} = \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_j c_j (-e)^{\muu_j}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Obviously we have $\tauext{Op} \chiirc \tauext{Op}=id$.
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:Opinv}
An $R[e,e^{-1}]$ module homomorphism $\muathfrak g \in \tauext{Hom }B(\omega} \def\O{\Omegaverline{C}^e_K[1]), \omega} \def\O{\Omegaverline{C}^e[1])$ is called {\it $\tauext{\rhom Op}$-invariant} if $\muathfrak g \chiirc \tauext{Op} = \tauext{Op} \chiirc \muathfrak g$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
Then it is easy to check the following. Recall that $\tauau_{\alphast}\betaegin{equation}ta=\betaegin{equation}ta$ for $\betaegin{equation}ta \in G$.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{lem:Opmkbeta}
A filtered $A_{n,K}$ structure $\{ \muathfrak m_{k,\betaegin{equation}ta} \}$ satisfies
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{34.21} if and only if $\muathfrak m^e_{k,\betaegin{equation}ta}$ defined by
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{mkeandfke} is $\tauext{Op}$-invariant.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaegin{equation}gin{defn}\lambda} \def\La{\Lambdaambdabel{def:OpinvAnK}
A filtered $A_{n,K}$ algebra $(C, \{ \muathfrak m_{k,\betaegin{equation}ta} \})$ is called
an {\it $\tauext{Op}$-invariant filtered $A_{n,K}$ algebra} if
$\{ \muathfrak m_{k,\betaegin{equation}ta}^e \}$ is $\tauext{Op}$-invariant.
A filtered $A_{n,K}$ homomorphism $\{ \muathfrak f_{k,\betaegin{equation}ta} \}$
is called {\it $\tauext{Op}$-invariant} if $\{ \muathfrak f_{k,\betaegin{equation}ta}^e \}$
is $\tauext{Op}$-invariant.
We define an {\it \tauext{Op}-invariant filtered $A_{n,K}$ homotopy equivalence} in a similar way.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{defn}
The following is the precise statement of our invariant version of Theorem \rhoef{ext(n,K)} (=Theorem 7.2.72 \chiite{fooobook2}) which is
used in the proof of Theorem \rhoef{Theorem34.20}.
\betaegin{equation}gin{thm}\lambda} \def\La{\Lambdaambdabel{invariantext(n,K)}
Let $C_1$ be an $\tauext{Op}$-invariant filtered $A_{n,K}$ algebra and
$C_2$ an $\tauext{Op}$-invariant filtered $A_{n',K'}$ algebra such that
$(n,K) < (n',K')$.
Let ${\muathfrak h}:C_1 \tauo C_2$
be an $\tauext{Op}$-invariant filtered $A_{n,K}$ homomorphism.
Suppose that ${\muathfrak h}$ is an $\tauext{Op}$-invariant
filtered $A_{n,K}$ homotopy equivalence.
Then there exist an $\tauext{Op}$-invariant filtered $A_{n',K'}$ algebra structure on $C_1$
extending the given $\tauext{Op}$-invariant filtered $A_{n,K}$ algebra structure and an $\tauext{Op}$-invariant filtered $A_{n',K'}$ homotopy equivalence
$C_1 \tauo C_2$ extending the given
$\tauext{Op}$-invariant filtered $A_{n,K}$ homotopy equivalence
${\muathfrak h}$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thm}
\betaegin{equation}gin{proof}
To prove this theorem,
we mimic the obstruction theory to promote
a filtered $A_{n,K}$ structure
to a filtered $A_{\infty}$ structure
given in Subsections 7.2.6--7.2.10 \chiite{fooobook2}.
\psiar
Let $(C, \{ \muathfrak m_{k,\betaegin{equation}ta} \})$ is a filtered $A_{n,K}$ algebra. As mentioned before, we first rewrite the obstruction theory
by using $\omega} \def\O{\Omegaverline{C}^e$ instead of $\omega} \def\O{\Omegaverline{C}$.
This is done by extending the coefficient ring $R$ to
$R[e,e^{-1}]$ and replacing $\muathfrak m_{k,\betaegin{equation}ta}$ by $\muathfrak m^e_{k,\betaegin{equation}ta}$ in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{mkeandfke} as follows:
We put $\omega} \def\O{\Omegaverline{\muathfrak m}_{k}=
\muathfrak m_{k,\betaegin{equation}ta_0}$ for $\betaegin{equation}ta_0=(0,0)$.
(Note that $\omega} \def\O{\Omegaverline{\muathfrak m}_{k}=\muathfrak m^e_{k, \betaegin{equation}ta_0}$.)
We naturally extend $\omega} \def\O{\Omegaverline{\muathfrak m}_{k}$ to an
$R[e,e^{-1}]$ module homomorphism
$B_k(\omega} \def\O{\Omegaverline{C}^e[1]) \tauo \omega} \def\O{\Omegaverline{C}^e[1]$.
By abuse of notation we also write $\omega} \def\O{\Omegaverline{\muathfrak m}_{k}~:~B_k(\omega} \def\O{\Omegaverline{C}^e[1]) \tauo \omega} \def\O{\Omegaverline{C}^e[1]$.
\psiar
As in the proof of Theorem 7.2.72 \chiite{fooobook2}, we may assume that
$(n,K)<(n',K')=(n+1,K-1)$ or $(n,K)=(n,0)<(n',K')=(0,n+1)$ to consider
the promotion.
We consider an $R[e,e^{-1}]$-module
$
\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1], \omega} \def\O{\Omegaverline{C}^e[1])
$
and
define the coboundary operator $\psiartialta_1$ on it by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:Hcoboundary}
\psiartialta_1(\varphiphi) = \omega} \def\O{\Omegaverline{\muathfrak m}_1 \chiirc \varphiphi
+ (-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg\varphiphi +1}\varphiphi \chiirc \varphiidehat{\omega} \def\O{\Omegaverline {\muathfrak m}}_1
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
for $\varphiphi \in B_{K'}\omega} \def\O{\Omegaverline{C}^e[1]$.
Here $\varphiidehat{\omega} \def\O{\Omegaverline {\muathfrak m}}_1: B\omega} \def\O{\Omegaverline{C}^e[1] \tauo
B\omega} \def\O{\Omegaverline{C}^e[1]$ is a coderivation induced by
$\omega} \def\O{\Omegaverline{\muathfrak
m}_1$ on $\omega} \def\O{\Omegaverline{C}^e$ as in \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{eq:hatmk}.
We denote the $\psiartialta_1$-cohomology by
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:Hoch}
H(\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1], \omega} \def\O{\Omegaverline{C}^e[1]),\psiartialta_1)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
and call it {\it Hochschild cohomology} of $\omega} \def\O{\Omegaverline{C}^e$.
We modify the definition of the obstruction class as follows.
As in the proof of Lemma 7.2.74 \chiite{fooobook2}
we put $(\muathbb{V}ert \betaegin{equation}ta \muathbb{V}ert, k)=(n',K')$. (See \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{def:betanorm} for the definition of $\muathbb{V}ert \betaegin{equation}ta \muathbb{V}ert$.)
Then, replacing $\muathfrak m_{k,\betaegin{equation}ta}$
by $\muathfrak m_{k,\betaegin{equation}ta}^e$,
(7.2.75) \chiite{fooobook2} is modified so that
$$
\sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_{\betaegin{equation}ta_1+\betaegin{equation}ta_2 = \betaegin{equation}ta, k_1+k_2 = k+1, (k_i,\betaegin{equation}ta_i) \nue (k,
\betaegin{equation}ta)} \sigma} \delta} \def\muathbb{D}{\muathbb{D}eltaef\S{\Sigmamaum_i
(-1)^{\delta} \def\muathbb{D}{\muathbb{D}eltaeg'\tauext{\betaf x}_i^{(1)}
}\muathfrak m^e_{k_2,\betaegin{equation}ta_2}\lambda} \def\La{\Lambdaeft(\tauext{\betaf x}_i^{(1)} ,
\muathfrak m^e_{k_1,\betaegin{equation}ta_1}(\tauext{\betaf x}_i^{(2)}),
\tauext{\betaf x}_i^{(3)}\rhoight),
$$
where $\tauext{\betaf x} \in B_{K'}\omega} \def\O{\Omegaverline{C}^e[1]$.
Note that $e^{\tauext{pr}_2(\betaegin{equation}ta_1)/2}e^{\tauext{pr}_2(\betaegin{equation}ta_2)/2}=e^{\tauext{pr}_2(\betaegin{equation}ta)/2}$.
Then this defines an element
$$
o_{K',\betaegin{equation}ta}^e(C) \in \tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])
$$
which is a $\psiartialta_1$-cocycle. Thus
we can define the {\it obstruction class}
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:obst}
[o_{K',\betaegin{equation}ta}^e(C)] \in H(\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1], \omega} \def\O{\Omegaverline{C}^e[1]),\psiartialta_1)
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
for each $\muathbb{V}ert \betaegin{equation}ta \muathbb{V}ert =n'$.
Under this modification
Lemma 7.2.74 \chiite{fooobook2} holds for $\omega} \def\O{\Omegaverline{C}^e$ as well.
\psiar
Now we consider the $\tauext{Op}$-invariant version.
The map $\tauext{Op}$ defined by \varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{OP} acts on
$B_{K'}\omega} \def\O{\Omegaverline{C}^e[1]$, $\omega} \def\O{\Omegaverline{C}^e[1]$ and so
on
$\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])$
as involution.
We decompose
$\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])$
so that
\betaegin{equation}gin{equation}
\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1]) =
\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])^{\tauext{Op}}
\omega} \def\O{\Omegaplus
\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])^{-\tauext{Op}},
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
where $\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])^{\tauext{Op}}$ is the $\tauext{Op}$-invariant part and
$\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])^{-\tauext{Op}}$
is the anti $\tauext{Op}$-invariant part.
\psiar
Suppose that $(C, \{ \muathfrak m_{k,\betaegin{equation}ta} \})$ is an $\tauext{Op}$-invariant filtered $A_{n,K}$ algebra.
By Lemma \rhoef{lem:Opmkbeta} we have $\tauext{Op}$-invariant elements
$$
\muathfrak{m}^e_{k,\betaegin{equation}ta} = \muathfrak{m}_{k,\betaegin{equation}ta}e^{\tauext{pr}_2(\betaegin{equation}ta)/2}
\in \tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1])^{\tauext{Op}}.
$$
Note that the map $\tauext{Op}$ and $\psiartialta_1$ defined by
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonqref{def:Hcoboundary} commute. Therefore if we
use $\muathfrak{m}^e_{k,\betaegin{equation}ta}$,
we can define
an {\it $\tauext{Op}$-invariant Hochschild cohomology}
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:OpHoch}
H(\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1], \omega} \def\O{\Omegaverline{C}^e[1]),\psiartialta_1)^{\tauext{Op}}:=
H(\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1], \omega} \def\O{\Omegaverline{C}^e[1])^{\tauext{Op}}
,\psiartialta_1).
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Moreover the construction of the obstruction class above
yields the $\tauext{Op}$-invariant obstruction class
\betaegin{equation}gin{equation}\lambda} \def\La{\Lambdaambdabel{def:Opobst}
[o_{K',\betaegin{equation}ta}^e(C)]^{\tauext{Op}} \in H(\tauext{Hom } (B_{K'}\omega} \def\O{\Omegaverline{C}^e[1], \omega} \def\O{\Omegaverline{C}^e[1]),\psiartialta_1)^{\tauext{Op}}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{equation}
Then the following lemma is the $\tauext{Op}$-invariant version of
Lemma 7.2.74 \chiite{fooobook2} whose proof is straightforward.
\betaegin{equation}gin{lem}
Let $(n',K')$ be as above and $C$
an $\tauext{Op}$-invariant filtered $A_{n,K}$ algebra. Then the obstruction classes
$$
[o_{K',\betaegin{equation}ta}^e(C)]^{\tauext{\rhom Op}} \in
H(\tauext{\rhom Hom }(B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1]), \psiartialta_{1})^{\tauext{\rhom Op}}
$$
vanish
for all $\betaegin{equation}ta$ with $\muathbb{V}ert \betaegin{equation}ta\muathbb{V}ert = n'$
if and only if there exists an $\tauext{Op}$-invariant
filtered $A_{n',K'}$ structure extending the given
$\tauext{Op}$-invariant filtered $A_{n,K}$ structure.
\psiar
Moreover, if $C \tauo C'$ is an $\tauext{Op}$-invariant
filtered $A_{n,K}$ homotopy
equivalence, then $[o_{K',\betaegin{equation}ta}^e(C)]^{\tauext{\rhom Op}}$ is mapped to
$[o_{K',\betaegin{equation}ta}^e(C')]^{\tauext{\rhom Op}}$ by the isomorphism
$$H(\tauext{\rhom Hom }(B_{K'}\omega} \def\O{\Omegaverline{C}^e[1],\omega} \def\O{\Omegaverline{C}^e[1]), \psiartialta_{1})^{\tauext{\rhom Op}}
\chiong
H(\tauext{\rhom Hom }(B_{K'}\omega} \def\O{\Omegaverline{C'}^e[1],\omega} \def\O{\Omegaverline{C'}^e[1]), \psiartialta_{1})^{\tauext{\rhom Op}}
$$
induced by the $\tauext{Op}$-invariant homotopy equivalence.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
Once $\tauext{Op}$-invariant obstruction theory is established,
the rest of the proof of Theorem \rhoef{invariantext(n,K)} is parallel to one in Subsection 7.2.6 \chiite{fooobook2}.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{proof}
\psiar
Similarly, using the $\tauext{Op}$-invariant obstruction theory for
an $\tauext{Op}$-invariant filtered $A_{n,K}$ homomorphism, we can also show the $\tauext{Op}$-invariant version
of Lemma 7.2.129 \chiite{fooobook2} in a straightforward way.
\betaegin{equation}gin{lem}\lambda} \def\La{\Lambdaambdabel{Opinvhompromot}
Let $(n,K) < (n',K')$ and
$C_1,C_2,C'_1,C'_2$ be $\tauext{Op}$-invariant filtered $A_{n',K'}$ algebras.
Let $\muathfrak h : C_1 \tauo C_2$, $\muathfrak h': C'_1 \tauo C'_2$ be
$\tauext{Op}$-invariant filtered $A_{n',K'}$
homotopy equivalences.
Let $\muathfrak g_{(1)}: C_1 \tauo C'_1$ be an $\tauext{Op}$-invariant
filtered $A_{n,K}$ homomorphism and $\muathfrak g_{(2)}: C_2 \tauo C'_2$ an
$\tauext{Op}$-invariant
filtered $A_{n',K'}$ homomorphism. We assume that $\muathfrak g_{(2)}
\chiirc \muathfrak h$ is an
$\tauext{Op}$-invariant $A_{n,K}$ homotopic to
$\muathfrak h' \chiirc \muathfrak
g_{(1)}$.
\psiar
Then there exists an $\tauext{Op}$-invariant filtered $A_{n',K'}$ homomorphism
$\muathfrak g_{(1)}^+: C_1 \tauo C'_1$ such that $\muathfrak g_{(1)}^+$ coincides to
$\muathfrak g_{(1)}$ as an $\tauext{Op}$-invariant filtered $A_{n,K} $ homomorphism and that
$\muathfrak g_{(2)} \chiirc \muathfrak h$ is $\tauext{Op}$-invariant filtered $A_{n',K'}$ homotopic to $\muathfrak h' \chiirc \muathfrak g_{(1)}^+$.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{lem}
\betaibliographystyle{amsalpha}
\betaegin{equation}gin{thebibliography}{FOOO6}
\betaibitem[A]{Aur07} D. Auroux,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Mirror symmetry and T-duality in the complement of
an anticanonical divisor},
J. G\"okova Geom. Topol. 1 (2007), 51--91,
arXiv:0706.3207v2.
\betaibitem[CMS]{CMS}
R. Casta\~no-Bernard, D. Matessi and J. Solomon,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Symmetries of Lagrangian fibrations},
Adv. Math. 225 (2010), no. 3, 1341-1386, arXiv:0908.0966.
\betaibitem[C]{Cho04} C.-H. Cho, {\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Holomorphic discs, spin structures and Floer cohomology of the Clifford tori},
Internat. Math. Res. Notices 35 (2004), 613--640.
\betaibitem[F1]{fukaya;mhtpy}
K. Fukaya,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Morse homotopy and its quantization},
in `Geometric topology (Athens, GA, 1993)', ed.
W. Kazez, 409--440, AMS/IP Stud. Adv. Math., 2.1, Amer. Math. Soc., Providence, RI, 1997.
\betaibitem[F2]{fukaya;counting}
K. Fukaya,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Counting pseudo-holomorphic discs
in Calabi-Yau 3 fold}, Tohoku Math. J. (2) 63 (2011), no. 4, 697-727, arXiv:0908.0148.
\betaibitem[FOOO1]{fooo00} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian intersection Floer theory-anomaly and
obstruction,} Kyoto University preprint, 2000. Available at http://www.math.kyoto-u.ac.jp/~fukaya/fukaya.html
\betaibitem[FOOO2]{fooo06} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian intersection Floer theory-anomaly and
obstruction,} expanded version of [FOOO1], 2006 \& 2007.
\betaibitem[FOOO3]{fooobook1} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian intersection Floer theory-anomaly and
obstruction,}
vol. 46-1,
AMS/IP Studies in Advanced Math.,
American Math. Soc. and International Press. (2009). MR2553465.
\betaibitem[FOOO4]{fooobook2} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian intersection Floer theory-anomaly and
obstruction,}
vol. 46-2,
AMS/IP Studies in Advanced Math.,
American Math. Soc. and International Press. (2009). MR2548482.
\betaibitem[FOOO5]{foooYash} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Canonical models of filtered $A_{\infty}$-algebras and Morse complexes,}
New perspectives and challenges in symplectic field theory, 201-227, CRM Proc. Lecture Notes, 49, Amer. Math. Soc., (2009).
\betaibitem[FOOO6]{foootoric1} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian Floer theory on compact toric manifolds I}, Duke Math. J., 151 (2010), 23--175,
arXiv:0802.1703.
\betaibitem[FOOO7]{foootoric2} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian Floer theory on compact toric manifolds II: bulk deformations}, Selecta Math. (N.S.) 17 (2011), no. 3, 609-711, arXiv:0810.5774.
\betaibitem[FOOO8]{fooomirror1} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian Floer theory and mirror symmetry on compact toric manifolds},
Ast\'erisque, No. 376, (2016), arXiv:1009.1648.
\betaibitem[FOOO9]{fooointeger} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Lagrangian Floer theory over integers: spherically positive symplectic manifolds},
Pure and Applied Mathematics Quaterly 9 (2013), 189-289, arXiv:1105.5124.
\betaibitem[FOOO10]{foootech} K. Fukaya, Y.-G. Oh, H. Ohta and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Technical details on Kuranishi structure and virtual fundamental chain}, arXiv:1209.4410.
\betaibitem[FO]{FO}
K. Fukaya and K. Ono,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Arnold conjecture and Gromov-Witten invariant},
Topology 38 (1999), no. 5, 933--1048.
\betaibitem[Oh1]{Oh93}
Y.-G. Oh,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Floer cohomology of Lagrangian intersections and pseudo-
holomorphic disks I, $\&$ II},
Comm. Pure and Appl. Math.
46
(1993),
949--994 \& 995--1012.
Addenda, ibid, 48 (1995), 1299--1302.
\betaibitem[Oh2]{Oh96}
Y.-G. Oh
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Floer cohomology, spectral sequence and the Maslov class of
Lagrangian embeddings},
Internat. Math. Res. Notices
7 (1996), 305--346.
\betaibitem[Oht]{Oht} H. Ohta, {\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Obstruction to and deformation of
Lagrangian intersection Floer cohomology}, Symplectic Geometry and Mirror
Symmetry (Seoul, 2000), K. Fukaya, Y.-G. Oh, K. Ono and G. Tian ed.,
World Sci. Publishing, River Edge, 2001, pp. 281--309.
\betaibitem[PSW]{PSW}
R. Pandharipande, J. Solomon and J. Walcher,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Disk enumeration on the quintic 3-fold},
J. Amer. Math. Soc. 21 (2008), no. 4, 1169--1209.
\betaibitem[Si]{Si}
V. de Silva,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Products on symplectic Floer homology},
Thesis, Oxford Uinv.
(1997).
\betaibitem[So]{Sol}
J. Solomon,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Intersection theory on the moduli space of holomorphic curves with Lagrangian boundary conditions,}
preprint, arXiv:math/0606429.
\betaibitem[Wa]{Wal}
J. Walcher,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Opening mirror symmetry on the quintic},
Commun. Math. Phys. 276 (2007),
671--689.
\betaibitem[We]{Wel}
J-Y. Welschinger,
{\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonm Invariants of real symplectic 4-manifolds and lower bounds in real enumerative geometry},
Invent. Math. 162 (2005), no. 1, 195--234.
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{thebibliography}
\varphiepsilon} \delta} \def\muathbb{D}{\muathbb{D}eltaef\ep{\epsilonnd{document} |
\begin{document}
\title{On Partial Smoothness, Tilt Stability and the $\mathcal{VU}$--Decomposition}
\author{A. C. Eberhard\thanks{Email addresses of the authors: andy.eb@rmit.edu.au; yluo@rmit.edu.au and
shuai0liu@gmail.com\newline
This research was in part supported by the ARC Discovery grant no.
DP120100567.}, Y. Luo and S. Liu}
\date{}
\maketitle
\begin{abstract}
Under the assumption of prox-regularity and the presence of a tilt stable
local minimum we are able to show that a $\mathcal{VU}$ like decomposition
gives rise to the existence of a smooth manifold on which the function in
question coincides locally with a smooth function.
\end{abstract}
\section{Introduction}
The study of substructure of nonsmooth functions has led to an enrichment of
fundamental theory of nonsmooth functions \cite{Hare:1, Hare:2, Hare:3,
Lem:1, Lewis:1,Lewis:2, Miller:1}. Fundamental to this substructure is the
presence of manifolds along which the restriction of the nonsmooth function
exhibits some kind of smoothness. In the case of \textquotedblleft partially smooth function\textquotedblright\ \cite{Lewis:1}
an axiomatic approach is used to describe the local structure that is
observed in a number of important examples \cite{Lewis:1, Lewis:2}. In \cite{Lewis:2}
it is shown that the study of tilt stability can be enhanced for the class of
partially smooth functions. In the
theory of the \textquotedblleft$\mathcal{U}$-Lagrangian\textquotedblright\ and the associated \textquotedblleft$\mathcal{VU}$
decomposition\textquotedblright\ \cite{Lem:1, Mifflin:2003} the existence of a smooth manifold substructure is proven for
some special classes of functions \cite{Mifflin:2003, Mifflin:2005}. In the extended theory the presence of so
called \textquotedblleft fast tracks\textquotedblright\ is assumed and these
also give rise to similar manifold substructures \cite{Miller:1, Mifflin:2004}. The $\mathcal{U}$
-Lagrangian is reminiscent of a partial form of \textquotedblleft tilt minimisation\textquotedblright\
\cite{rock:7} and this
observation has motivated this study. As fast tracks and related concepts such as
\textquotedblleft identifiable constraints\textquotedblright are designed to aid the
design of methods for the solution of nonsmooth minimization
problems \cite{Wright:1993, Miller:1, Mifflin:2002, Mifflin:2004,
Mifflin:2005,Hare:2014}, it seems appropriate to ask what additional
structure does the existence of a tilt stable local minimum give to the
study of the $\mathcal{VU}$ decomposition \cite{Lem:1}? This is the subject
of the paper.
In the following discussion we denote the extended reals by $\mathbb{R}_{\infty }:=\mathbb{R}\cup \left\{
+\infty \right\} .$ If not otherwise stated we will consider a lower
semi-continuous, extended--real--valued function $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$.
We denote the limiting subdifferential of
Mordukhovich, Ioffe and Kruger by $\partial f$.
Tilt stability was first studied in \cite{rock:7} for the case of $f$ being
both \textquotedblleft prox-regular\textquotedblright\ at $\bar{x}\ $ for $\bar{z}\in \partial f(\bar{x})$ and
\textquotedblleft subdifferentially continuous\textquotedblright\ at
$\left( \bar{x},\bar{z}\right)$, in
the sense of Rockafellar and Poliquin \cite{polrock:1}. In \cite{rock:7} a characterisation
of tilt stability is made in terms of certain second order sufficient
optimality conditions. Such optimality conditions have been studied in
\cite{rock:7, Boris:4, eberhard:8, eberhard:9}. In \cite{eberhard:6, eberhard:8}
it is shown that second order information provided by the coderivative is
closely related to another second order condition framed in terms of the
\textquotedblleft limiting subhessian\textquotedblright\ \cite{ebpenot:2, eberhard:1, eberhard:6}.
These may be thought of as the robust$\backslash$limiting version of symmetric matrices associated
with a lower, supporting Taylor expansion with a first order component ${z}$ and
second order component $Q$ (a symmetric matrix). The limiting pairs
$(\bar{z}, \bar{Q})$ are contained in the so called \textquotedblleft subjet\textquotedblright\ \cite{Crandall:1992} and the second
order components $\bar{Q}$ associated with a given $\bar{z} \in \partial f(\bar{x})$ are contained
in the limiting subhessian $\underline{\partial}^2 f(\bar{x}, \bar{z})$, \cite{ebpenot:2, ebioffe:4, eberhard:9}.
These have been extensively studied and possess a robust calculus similar to that which
exists for the limiting subdifferential \cite{ebioffe:4, eberhard:7}. One can view
the \textquotedblleft best curvature\textquotedblright\ approximation in the direction $h$ for the function $f$ at $(\bar{x},\bar{z})$ to be
$q(\underline{\partial}^2 f(\bar{x},\bar{z}))(h):= \sup \{\langle Qh, h\rangle \mid Q \in \underline{\partial}^2 f(\bar{x},\bar{z}) \}$,
where we denote by $\langle u, h\rangle$ the usual Euclidean inner product of two vectors
$u, h \in \mathbb{R}^n$.
To complete our
discussion we consider the $\mathcal{VU}$ decomposition \cite{Lem:1}. When
$\operatorname{rel}$-$\operatorname{int} \partial f\left( \bar{x}\right) \neq \emptyset $ we can take $\bar{z}\in \operatorname{rel}$-$\operatorname{int}
\partial f\left( \bar{x}\right) $ and define $\mathcal{V}:=\operatorname{ span}\left\{ \partial f\left( \bar{x}\right) -\bar{z}\right\} $ and
$\mathcal{U}:=\mathcal{V}^{\perp }$.
The $\mathcal{V}$-space is thought to
capture the directions of nonsmoothness of $f$ at $\bar{x}$ while the $\mathcal{U}$ is thought
to capture directions of smoothness. When $\mathcal{U}^{2}:= \operatorname{dom}
q(\underline{\partial }^{2}f(\bar{x},\bar{z}))(\cdot )$ is a linear subspace that is
contained in $\mathcal{U}$, we call $\mathcal{U}^{2}$ the second order component of
$\mathcal{U}$ and in Lemma \ref{lem:sharp} we give quite mild condition under
which this is indeed the case. When $\mathcal{U}^{2}=\mathcal{U}$ we say that a fast-track
exists at $\bar{x}$ for $\bar{z}\in \partial f\left( \bar{x}\right) $.
In this paper we
investigate whether the existence of a tilt stable local minimum provides
extra information regarding the existence of a smooth manifold within which a
smooth function interpolates the values of the $f$. We are able to show the
following positive results.
Recall that we say $f$ is quadratically minorised when there exists a quadratic function
$q\left( x\right) :=\alpha -\frac{R}{2}\left\Vert x-\bar{x}\right\Vert ^{2}$
such that $q\leq f$ (globally). All balls $B^{X}_{\varepsilon} (0) := \{ x \in X \mid \|x\| \leq \varepsilon\}$ are closed.
\begin{theorem}
\label{thm:1}Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is
a proper lower semi-continuous function, quadratically minorised, and
prox-regular at $\bar{x}$ for $0\in \partial f(\bar{x})$. Suppose in
addition $f$ admits a nontrivial subspace $\mathcal{U}^{2}:= \operatorname{dom} \left(
\underline{\partial }^{2}f\left( \bar{x},0\right) \right) (\cdot)$ and that $f$
has a tilt stable local minimum at $\bar{x}$. Then $\mathcal{U}^2 \subseteq \mathcal{U}$ and for $g\left( w\right) :=
\left[ \operatorname{co}h\right] \left( w\right) $, $h(w):=f(\bar{x}+w)$ and $\{v\left( u\right) \}=
\operatorname{argmin}_{v^{\prime }\in \mathcal{V}^{2}\cap B_{\varepsilon }\left( 0\right) }f\left( \bar{x}+u+v^{\prime
}\right) :\mathcal{U}^{2}\rightarrow \mathcal{V}^{2}:=(\mathcal{U}^2)^{\perp}$,
there exists a $\delta>0$ such that
we have $g\left(
u+v\left( u\right) \right) =f\left( \bar{x}+u+v\left( u\right) \right) $ and
$\nabla _{u}g\left( u+v\left( u\right) \right) $ existing as Lipschitz
function for $u\in B_{\delta }^{\mathcal{U}^{2}}\left( 0\right) $.
\end{theorem}
That is,
$\mathcal{M}:=\left\{ \left( u,v\left( u\right) \right) \mid u\in
B_{\varepsilon }^{\mathcal{U}^{2}}\left( 0\right) \right\} $ is a manifold
on which the restriction to $\mathcal{M}$ of function $g$ coincides with
a smooth $C^{1,1}$ function of $u \in \mathcal{U}$ (tilt stability ensures local uniqueness of the function $v(\cdot)$).
Assuming a little more we obtain the smoothness of $v$ and in addition the
smoothness of the manifold.
\begin{theorem}
\label{thm:2}Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is
a proper lower semi-continuous function, quadratically minorised and
prox-regular at $\bar{x}$ for $0\in \partial f(\bar{x})$. Suppose in
addition that $\mathcal{U}^{2} =\mathcal{U}$ is a linear subspace (i.e. $\mathcal{U}$ admits a fast track), $f$
has a tilt stable local minimum at $\bar{x}$ for $0\in \operatorname{rel}$-$\operatorname{int}\partial f\left( \bar{x}\right) $ and $\partial ^{\infty }f\left( \bar{x}+u+v\left( u\right) \right)
=\left\{ 0\right\} $ for $ v\left( u\right) \in
\operatorname{argmin}_{v^{\prime }\in \mathcal{V}\cap B_{\varepsilon }\left(
0\right) }\left\{ g\left( u+v^{\prime }\right) \right\} : \mathcal{U}\rightarrow \mathcal{V}$,
$u \in B_{\varepsilon }^{\mathcal{U}}\left( 0\right)$. Then
there exists a $\varepsilon >0$ such that for $g\left( w\right) :=\left[
\operatorname{co}h\right] \left( w\right) $ the function
defined below is a $C^{1,1}\left( B_{\varepsilon }^{\mathcal{U}}\left( 0\right)
\right) $ smooth function
\begin{eqnarray*}
u\mapsto g\left( u+v\left( u\right) \right) &=&f\left( \bar{x}+u+v\left(
u\right) \right) \quad \text{where } \\
\nabla _{w}g\left( u+v\left( u\right) \right) &=&\left( e_{\mathcal{U}},\nabla v\left( u\right) \right) ^{T}\partial g\left( u+v\left( u\right)
\right)
\end{eqnarray*}
($e_{\mathcal{U}}$ is the identity operator on $\mathcal{U}$). Moreover
if we suppose we have a $\delta >0$ (with $\delta \leq \varepsilon$) such that for all $z_{\mathcal{V}}\in
B_{\delta }\left( 0\right) \cap \mathcal{V}\subseteq \partial _{\mathcal{V}}f\left( \bar{x}\right) $ we have a common
\begin{equation}
\{v\left( u\right) \} = \operatorname{argmin}_{v\in \mathcal{V}\cap B_{\varepsilon
}\left( 0\right) }\left\{ f\left( \bar{x}+u+v\right) -\langle z_{\mathcal{V}},v\rangle \right\} \label{eqn:1}
\end{equation}
for all $u\in B_{\varepsilon }\left( 0\right) \cap \mathcal{U}$. Then $\mathcal{M}:=\left\{ \left( u,v\left( u\right) \right) \mid u\in
B_{\varepsilon }^{\mathcal{U}}\left( 0\right) \right\} $ is a $C^1$ - smooth
manifold on which $u\mapsto f\left( \bar{x}+u+v\left( u\right) \right) $ is $C^{1,1}
\left( B_{\delta }^{\mathcal{U}}\left( 0\right) \right) $ smooth and $u\mapsto v\left( u\right) $ is continuously differentiable.
\end{theorem}
We are also able to produce a lower Taylor approximation for $f$ that holds locally at all points
inside $\mathcal{M}$, see Corollary \ref{cor:53}. These results differ from those present in the literature in that we impose common structural assumptions on $f$ found elsewhere in the literature on stability of local minima \cite{Drusvy:1,rock:7}, rather than imposing very special structural properties, as is the approach of \cite{Hare:2014,Wright:1993,Mifflin:2003,Mifflin:2004}. Moreover, we do not assume the a-priori existence of any kind of smoothness of the underlying manifold, as is done in the axiomatic approach in \cite{Lewis:2}, but let smoothness arise from a graded set of assumptions which progressively enforce greater smoothness. In this way the roles of these respective assumptions are clarified. Finally we note that it is natural in this context to study $C^{1,1}$ smoothness rather than the $C^2$ smoothness used in other works such as \cite{Lewis:2,Mifflin:2002,Miller:1}.
\section{Preliminaries}
The following basic concepts are used repeatedly throughout the paper.
\begin{definition}
\label{lim:subhessian} Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a lower semi--continuous function.
\begin{enumerate}
\item Denote by $\partial _{p}f(\bar{x})$ the proximal subdifferential,
which consists of all vectors $z$ satisfying $f(x)\geq f(\bar{x})+\langle
z,x-\bar{x}\rangle -\frac{r}{2}\Vert x-\bar{x}\Vert ^{2}$ in some
neighbourhood of $\bar{x}$, for some $r\geq 0$, where $\Vert \cdot \Vert $
denotes the Euclidean norm. Denote by $S_{p}(f)$ the points in the domain of
$f$ at which $\partial _{p}f(x)\neq \emptyset $.
\item The limiting subdifferential \cite{M06a, rock:6} at $x$ is given by
\begin{equation*}
\partial f(x)=\limsup_{x^{\prime }\rightarrow _{f}x}\partial _{p}f(x^{\prime
}):=\{z\mid \exists z_{v}\in \partial _{p}f(x_{v}),x_{v}\rightarrow _{f}x \text{, with }z_{v}\rightarrow z\},
\end{equation*}
where $x^{\prime }\rightarrow _{f}x$ means that $x^{\prime }\rightarrow x$
and $f(x^{\prime })\rightarrow f(x)$.
\item The singular
limiting subdifferential is given by
\begin{align*}
\partial ^{\infty }f(x)& =\limsup_{x^{\prime }\rightarrow _{f}x}\!{}^{\infty
}\,\partial _{p}f(x^{\prime }) \\
& :=\{z\mid \exists z_{v}\in \partial _{p}f(x_{v}),x_{v}\rightarrow _{f}x \text{, with }\lambda _{v}\downarrow 0\text{ and }\lambda
_{v}z_{v}\rightarrow z\}.
\end{align*}
\end{enumerate}
\end{definition}
\subsection{The $\mathcal{VU}$ decomposition}\label{sec:VU}
Denote the convex hull of a set $C\subseteq \mathbb{R}^{n}$ by $\operatorname{co}C$.
The convex hull of a function $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$
is denoted by $\operatorname{co}f$ and corresponds to the proper
lower-semi-continuous function whose epigraph is given by
$\overline{\operatorname{co }\operatorname{epi}f}$. In this section we will use a slightly
weaker notion of the $\mathcal{VU}$ decomposition. When $\operatorname{rel}$-$\operatorname{int}
\operatorname{co}\partial f\left( \bar{x}\right) \neq \emptyset $ we can take $\bar{z}\in \operatorname{rel}$-$\operatorname{int}
\operatorname{co}\partial f\left( \bar{x}\right) $ and define $\mathcal{V}:=\operatorname{ span}
\left\{ \operatorname{co}\partial f\left( \bar{x}\right) -\bar{z}\right\} $ and
$\mathcal{U}:=\mathcal{V}^{\perp }$.
Under the $\mathcal{VU}$ decomposition \cite{Lem:1} for a given $\bar{z}\in
\operatorname{rel}$-$\operatorname{int}\operatorname{co}\partial f(\bar{x})$ we have, by definition,
\begin{equation}
\bar{z}+B_{\varepsilon }\left( 0\right) \cap \mathcal{V}\subseteq \operatorname{co}\partial f\left( \bar{x}\right)
\quad \text{for some $\varepsilon >0$.}
\label{neqn:6}
\end{equation}
One can then decompose $\bar{z}=\bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}}$
so that when $w=u+v\in \mathcal{U}\oplus \mathcal{V}$ we have $\langle \bar{z},w\rangle =\langle \bar{z}_{\mathcal{U}},
u\rangle +\langle \bar{z}_{\mathcal{V}},v\rangle .$ Indeed we may decompose into the direct
sum $x=x_{\mathcal{U}}+x_{\mathcal{V}}\in \mathcal{U}\oplus \mathcal{V}$ and use the
following norm for this decomposition $\left\Vert x-\bar{x}\right\Vert
^{2}:=\left\Vert x_{\mathcal{U}}-\bar{x}_{\mathcal{U}}\right\Vert
^{2}+\left\Vert x_{\mathcal{V}}-\bar{x}_{\mathcal{V}}\right\Vert ^{2}.$ As all norms are equivalent
we will at times prefer to use $\{B^{\mathcal{U}}_{\varepsilon} (\bar{x}_{\mathcal{U}})
\oplus B^{\mathcal{V}}_{\varepsilon} (\bar{x}_{\mathcal{V}}) \}_{\varepsilon > 0}$ which more directly
reflects the direct sum $\mathcal{U}\oplus \mathcal{V}$, where each $B^{(\cdot)}_{\varepsilon} (\cdot)$ is a closed ball of radius $\varepsilon >0$, in their respective space.
Denote the projection onto the subspaces $\mathcal{U}$ and $\mathcal{V}$ by $P_{\mathcal{U}}\left( \cdot \right) $ and $P_{\mathcal{V}}\left( \cdot
\right) $, respectively. Denote by $f|_{\mathcal{U}}$ the restriction of $f$
to the subspace $\mathcal{U}$, $\partial _{\mathcal{V}}f(\bar{x}):=P_{\mathcal{V}}(\partial f(\bar{x}))$ and $\partial _{\mathcal{U}}f(\bar{x}
):=P_{\mathcal{U}}(\partial f(\bar{x}))$. Let $\delta _{C}(x)$ denote the indicator function of a
set $C$, $\delta _{C}(x)=0$ iff $x\in C$ and $+\infty $ otherwise. Let $f^{\ast }$ denote the convex conjugate of a function $f$.
\begin{remark}
The condition (\ref{neqn:6}) implies one can take $\mathcal{V}:=\operatorname{span}
\left\{ \operatorname{co}\partial f\left( \bar{x}\right) -\bar{z}\right\} =\operatorname{affine}$-$\operatorname{hull}
\left[ \operatorname{co}\partial f\left( \bar{x}\right) \right]
-\bar{z}$ which is independent of the choice of $\bar{z}\in \operatorname{co}\partial f\left( \bar{x}\right) $.
Moreover, as was observed in \cite[Lemma
2.4]{Miller:1} we have $\bar{z}_{\mathcal{U}}=P_{\operatorname{affine}\text{-}\operatorname{hull}
\operatorname{co}\partial f\left( \bar{x}\right) }\left( 0\right) $ (see part \ref{part:3} below).
\end{remark}
\begin{proposition}
\label{prop:reg}Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$
is a proper lower semi--continuous function with (\ref{neqn:6}) holding.
\begin{enumerate}
\item We have
\begin{equation}
\mathcal{U}=\left\{ u\mid -\delta _{\partial f(\bar{x})}^{\ast }(-u)=\delta
_{\partial f(\bar{x})}^{\ast }(u)\right\} . \label{neqn:4}
\end{equation}
\item \label{part:3} We have
\begin{equation}
\partial f\left( \bar{x}\right) =\left\{ \bar{z}_{\mathcal{U}}\right\}
\oplus \partial _{\mathcal{V}}f\left( \bar{x}\right) . \label{prop:6:3}
\end{equation}
\item \label{part:4} Suppose there exists $\varepsilon >0$
such that for all $z_{\mathcal{V}}\in B_{\varepsilon }\left( \bar{z}_{\mathcal{V}}\right) \cap \mathcal{V}\subseteq \partial _{\mathcal{V}}f\left(
\bar{x}\right) $ there is a common
\begin{equation*}
v\left( u\right) \in \operatorname{argmin}_{v\in \mathcal{V}\cap B_{\varepsilon
}\left( 0\right) }\left\{ f\left( \bar{x}+u+v\right) -\langle z_{\mathcal{V}},v\rangle \right\} \cap
\operatorname{int} B_{\varepsilon} (0)
\end{equation*}
for all $u\in B_{\varepsilon }\left( 0\right) \cap \mathcal{U}$. Then we
have
\begin{equation}
\operatorname{cone}\left[ \partial _{\mathcal{V}}f\left( \bar{x}+u+v\left( u\right)
\right) -\bar{z}_{\mathcal{V}}\right] \supseteq \mathcal{V}. \label{neqn:26}
\end{equation}
\item \label{part:2} If we impose the addition assumption that $f$ is (Clarke) regular at $\bar{x}$,
$\bar{z}\in \partial f\left( \bar{x}\right) $ and $\partial ^{\infty }f\left(
\bar{x}\right) \cap \mathcal{V} =\left\{ 0\right\} $. Then the function
\begin{equation*}
H_{\mathcal{U}}\left( \cdot \right) :=f\left( \bar{x}+\cdot \right) :\mathcal{U}
\rightarrow \mathbb{R}_{\infty }
\end{equation*}
is strictly differentiable at $0$ and single valued with $\partial H_{\mathcal{U}}\left(
0\right) =\left\{ \bar{z}_{\mathcal{U}}\right\}$
and $H_{\mathcal{U}}$ (as a function defined on $\mathcal{U}$) is continuous with $H_{\mathcal{U}}$
and $-H_{\mathcal{U}}$ (Clarke) regular
functions at $0$ (in the sense of \cite{rock:6}).
\end{enumerate}
\end{proposition}
\begin{proof}
(1) If $u\in \mathcal{U}$ then by construction we have
\begin{equation}
-\delta _{\partial f(\bar{x})}^{\ast }(-u)=-\delta _{\operatorname{co}\partial f(\bar{x})}^{\ast }(-u)=\delta _{\operatorname{co}\partial f(\bar{x})}^{\ast
}(u)=\delta _{\partial f(\bar{x})}^{\ast }(u) \label{neqn:5}
\end{equation}
giving the containment of $\mathcal{U}$ in the right hand side of (\ref{neqn:4}). For $u$ satisfying (\ref{neqn:5})
then $\langle z-\bar{z},u\rangle =0$ for all $z\in \operatorname{co}\partial f(\bar{x})$. That is, $u\perp
\lbrack \operatorname{co}\partial f(\bar{x})-\bar{z}]$ and hence $u\perp \mathcal{V}=\mathcal{U}^{\perp }$ verifying $u\in \mathcal{U}$.
(2) Since $\partial f(\bar{x})\subseteq \bar{z}+\mathcal{V}=\bar{z}_{\mathcal{U}}+\mathcal{V}$ always have $\partial f\left( \bar{x}\right) =\left\{
\bar{z}_{\mathcal{U}}\right\} \oplus \partial _{\mathcal{V}}f\left( \bar{x}\right) .$
(3) When $v\left( u\right) \in \operatorname{argmin}_{v\in \mathcal{V}\cap
B_{\varepsilon }\left( 0\right) }\left\{ f\left( \bar{x}+u+v\right) -\langle
z_{\mathcal{V}},v\rangle \right\} $ for all $u\in B_{\varepsilon }\left(
0\right) \cap \mathcal{U}$ and $z_{\mathcal{V}}\in B_{\varepsilon }\left(
\bar{z}_{\mathcal{V}}\right) \cap \mathcal{V}$ we have, due to the necessary
optimality conditions, that
\begin{equation*}
z_{\mathcal{V}}\in \partial _{\mathcal{V}}f\left( \bar{x}+u+v\left( u\right)
\right)
\end{equation*}
and hence $B_{\varepsilon }\left( \bar{z}_{\mathcal{V}}\right) \cap \mathcal{V}\subseteq \partial _{\mathcal{V}}f\left( \bar{x}+u+v\left( u\right)
\right) $ giving (\ref{neqn:26}).
(4) For $h\left( \cdot \right) :=f\left( \bar{x}+\cdot \right) $ define $H =h
+\delta _{\mathcal{U}} $ so $h\left( u\right) =H\left( u\right) $ when $u\in
\mathcal{U}$. Then as $\partial ^{\infty }f\left( \bar{x}\right) \cap \mathcal{V} =\left\{
0\right\} $, by \cite[Corollary 10.9]{rock:6} we have
\begin{equation*}
\partial H\left( 0\right) \subseteq \partial f\left( \bar{x}\right) + N_{\mathcal{U}}\left( 0 \right) =\partial f\left( \bar{x}\right) +\mathcal{\ V}.
\end{equation*}
Then restricting to $\mathcal{U}$ we have $P_{\mathcal{U}} \partial H\left( 0\right)
\subseteq \partial_{\mathcal{U}}
f\left( \bar{x} \right)$. Then for $u \in \mathcal{U}$ we have $\delta_{\partial H(0)}^{\ast} (u) = \delta_{P_{\mathcal{U}}\partial H(0)}^{\ast} (u) \leq \delta _{\partial f(\bar{x})}^{\ast }(u)$ and so
\begin{equation*}
-\delta _{\partial f(\bar{x})}^{\ast }(-u)\leq -\hat{d}H(0)(-u)\leq \hat{d}
H(0)(u)\leq \delta _{\partial f(\bar{x})}^{\ast }(u).
\end{equation*}
As $f$ is regular at $\bar{x}$ we have $\partial^{\infty} f (\bar{x}) = 0^{+} (\partial f (\bar{x}))$ where the later corresponds to the recession directions of the convex set
$\partial f (\bar{x})$ (see \cite[Theorem 8.49]{rock:6}). Then we have $0^+ (\partial f (\bar{x})) \subseteq \mathcal{V}$. [Take $u \in 0^+ (\partial f (\bar{x}))$ and $z \in \operatorname{rel-int}
\partial f (\bar{x})$. Then by \cite[Theorem 6.1]{rock:1} we have $z + u \in \operatorname{rel-int}
\partial f (\bar{x})$ and hence $u \in \mathcal{V}$.] Thus for $u \in \mathcal{U} \subseteq (0^+ (\partial f (\bar{x})))^{\circ}$ we have
$$
\hat{d}H (0)(u) := \limsup_{x \to 0, t \downarrow 0} \inf_{u^{\prime}\to
u} \frac{1}{t} (f(x+tu^{\prime})-f(x) ) =\delta_{\partial H(0)}^{\ast} (u)=\delta_{P_{\mathcal{U}}\partial H(0)}^{\ast} (u),
$$
see \cite[Definition 8.16, Exercise 8.23]{rock:6}.
It follows that $-\hat{d}H(0)(-u)=\hat{d}H(0)(u)$ for all $u\in \mathcal{U}$.
Restriction of $H$ to the subspace $\mathcal{U}$, (denoted this function by $H_{\mathcal{U}}$)
we have $\partial^{\infty} H_{\mathcal{U}} (0) \subseteq \partial ^{\infty }f\left( \bar{x}\right)
\cap \mathcal{U} = \{0\}$ then by
\cite[Theorem 9.18]{rock:6} we have
$\partial H_{\mathcal{U}} \left( 0\right) $ a singleton
with $H_{\mathcal{U}}$ continuous at $0$ and $H_{\mathcal{U}}$ and $-H_{\mathcal{U}}$
(Clarke) regular. As $\bar{z}_{\mathcal{U}
}\in \partial H_{\mathcal{U}}\left( 0\right) $ we have $\partial H_{\mathcal{U}}\left( 0\right)
=\left\{
\bar{z}_{\mathcal{U}}\right\} $, so $\partial_{\mathcal{U}} f (\bar{x})
=\left\{ \bar{z}_{\mathcal{U}}\right\} $.
\end{proof}
\section{A Primer on Subjets and Subhessians \label{sec:3}}
We will have need to discuss second order behaviour in this paper and as a
consequence it will be useful to define a refinement of this decomposition
that takes into account such second order variations. In most treatments of
the $\mathcal{VU}$ decomposition one finds that by restricting $f$ to
$\mathcal{M}:=\{(u,v(u)) \mid u \in \mathcal{U}\}$ not only do we find
$f$ is smooth we also find that there is better second order behaviour as well \cite{Lem:1}.
This is also
often associated with smooth manifold substructures. Let $\mathcal{S}(n)$ denote the set of symmetric $n\times n$ matrices (endowed with the
Frobenius norm and inner product) for which $\langle Q,hh^{T}\rangle
=h^{T}Qh $. Denote the cone of positive semi-definite matrices by $\mathcal{P}(n)$ and
$\Delta_{2}f(x,t,z,u):=2\frac{f(x+tu)-f(x)-t\langle z,u\rangle }{t^{2}}$.
\begin{definition} \label{def:6}
Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a lower
semi--continuous function.
\begin{enumerate}
\item \label{part:1}The function $f$ is said to be twice sub-differentiable
(or possess a subjet) at $x$ if the following set is nonempty;
\begin{equation*}
\partial ^{2,-}f(x)=\{(\nabla \varphi (x),\nabla ^{2}\varphi
(x))\,:\,f-\varphi \,\text{ has a local minimum at }\,\,x\text{ with }
\,\,\varphi \in {\mathcal{C}}^{2}{\mathcal{(}}\mathbb{R}^{n})\}.
\end{equation*}
The subhessians at $(x,z)\in \operatorname{graph}\partial f$ are given by $\partial
^{2,-}f(x,z):=\{Q\in \mathcal{S}(n)\mid (z,Q)\in \partial ^{2,-}f(x)\}$.
\item The limiting subjet of $f$ at $x$ is defined to be: $\underline{\partial }^{2}f(x)=\limsup_{u\rightarrow ^{f}x}\partial ^{2,-}f(u)$ and the
associated limiting subhessians for $z\in \partial f\left( x\right) $ are $ \underline{\partial }^{2}f(x,z)=\left\{ Q\in \mathcal{S}\left( n\right) \mid
\left( z,Q\right) \in \underline{\partial }^{2}f(x)\right\} $.
\item We define the rank one barrier cone for $\underline{\partial }^{2}f(x,z)$ as
\begin{equation*}
b^{1}(\underline{\partial }^{2}f(x,z)):=\{h\in \mathbb{R}^{n}\mid q\left(
\underline{\partial }^{2}f(x,z)\right) (h):=\sup \left\{ \langle Qh,h\rangle
\mid Q\in \underline{\partial }^{2}f(x,z)\right\} <\infty \}.
\end{equation*}
\item Denoting $S_{2}(f)=\{x\in \operatorname{dom}\,(f)\mid \nabla ^{2}f(x) \text{
exists} \} $, then the limiting Hessians at $(\bar{x}, \bar{z})$ are given
by:
\begin{eqnarray*}
\overline{D}^{2}f(\bar{x},\bar{z}) &=&\{Q\in \mathcal{S}(n)\mid
Q=\lim_{n\rightarrow \infty }\nabla ^{2}f(x_{n}) \\
&&\qquad \text{where }\{x_{n}\}\subseteq S_{2}(f)\text{, }x_{n}\rightarrow
^{f}\bar{x}\text{ and }\nabla f(x_{n})\rightarrow \bar{z}\}.
\end{eqnarray*}
\item Define the second order Dini-directional derivative of $f$ by $f_{\_}^{\prime \prime }(\bar{x},z,h)=\liminf_{t\downarrow 0,u\rightarrow
h}\Delta _{2}f(\bar{x},t,z,u)$.
\end{enumerate}
\end{definition}
Define $\partial ^{2,+}f(x,z):= -\partial ^{2,-}(-f)(x,-z)$ then when $Q\in
\partial ^{2,-}f(x,z)\cap \partial ^{2,+}f(x,z)$ it follows that $Q=\nabla
^{2}f\left( x\right) $ and $z=\nabla f\left( x\right) $. If $f_{\_}^{\prime
\prime }(\bar{x},z,h)$ is finite then $f_{\_}^{\prime }(\bar{x},h):=\liminf_{{{t\downarrow 0}} \atop {{u\rightarrow h }}}\frac{1}{t}(f(\bar{x}+tu)-f(\bar{x}))=\langle
z,h\rangle $. It must be stressed that these second order objects may not
exist everywhere but as $\partial ^{2,-}f(x)$ is non--empty on a dense
subset of its domain \cite{Crandall:1992} when $f$ is lower semi--continuous then at worst so are
the limiting objects. In finite dimensions this concept is closely related
to the proximal subdifferential (as we discuss below). The subhessian is always a closed convex
set of matrices while $\underline{\partial }^{2}f(\bar{x},z)$ may not be
convex (just as $\partial _{p}f(\bar{x})$ is convex while $\partial f(\bar{x})$ often is not).
A function $f$ is \emph{para-concave} around $\bar{x}$ when there exists a $c>0$ and a ball $B_{\varepsilon }\left( \bar{x}\right) $ within which the
function $x\mapsto $ $f\left( x\right) -\frac{c}{2}\left\Vert x\right\Vert
^{2}$ is finite concave (conversely $f$ is para-convex around $\bar{x}$ iff $-f$ is para-concave around $\bar{x}$). If a function is para--concave or para--convex we have (by Alexandrov's
theorem) the set $S_{2}(f)$ is of full Lebesgue measure in $\operatorname{dom}\,f$. A function is $C^{1,1}$ when $\nabla f $ exists and satisfies a Lipschitz property. In \cite[Lemma 2.1]{eberhard:6}, it is noted that $f$ is locally $C^{1,1}$ iff $f$ is
simultaneously a locally para-convex and para-concave function. The next
observation was first made in \cite[Prposition 4.2]{ebpenot:2} and later
used in \cite[Proposition 6.1]{ebioffe:4}.
\begin{proposition}[\protect\cite{ebioffe:4}, Proposition 6.1]
\label{prop:ebpenot}If $f$ is lower semi--continuous then for $z\in \partial
f(\bar{x})$ we have
\begin{equation}
\overline{D}^{2}f(\bar{x},z)-\mathcal{P}(n)\subseteq \underline{\partial }^{2}f(\bar{x},z). \label{ebneqn:8}
\end{equation}
If we assume in addition that $f$ is continuous and a para--concave function
around $\bar{x}$ then equality holds in (\ref{ebneqn:8}).
\end{proposition}
A weakened form of para-convexity is prox-regularity.
\begin{definition} [\protect\cite{polrock:1}]
\label{def:proxreg}Let the function $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ be finite at $\bar{x}$.
\begin{enumerate}
\item The function $f$ is prox--regular at $\bar{x}$ for $\bar{z}$ with
respect to $\varepsilon >0$ and $r \geq0 $, where $\bar{z}\in \partial f(\bar{x})$, if $f $ is locally lower semi--continuous at $\bar{x}$ and
\begin{equation*}
f(x^{\prime })\geq f(x)+\langle z,x^{\prime }-x\rangle -\frac{r}{2}\Vert
x^{\prime }-x\Vert ^{2}
\end{equation*}
whenever $\Vert x^{\prime }-\bar{x}\Vert \leq \varepsilon $ and $\Vert x-\bar{x}\Vert \leq \varepsilon $ and $\left\vert f(x)-f(\bar{x})\right\vert
\leq \varepsilon \ $ with $\Vert z-\bar{z}\Vert \leq \varepsilon $ and $z\in
\partial f(x)$.
\item The function $f$ is subdifferentially continuous at $\bar{x}$ for $\bar{z}$, where $\bar{z}\in \partial f(\bar{x})$, if for every $\delta >0$
there exists $\varepsilon >0$ such that $\left\vert f(x)-f(\bar{x}
)\right\vert \leq \delta $ whenever $|x-\bar{x}|\leq \varepsilon $ and $|z-
\bar{z}|\leq \varepsilon $ with $z\in \partial f(x).$
\end{enumerate}
\end{definition}
\begin{remark}
In this paper we adopt the convention that limiting subgradients must exist at $\bar{x}$ to invoke this
definition.
We say that $f$ is prox-regular at $\bar{x}$ iff it is
prox-regular with respect to each $\bar{z}\in \partial f\left( \bar{x}\right) $ (with respect to some $\varepsilon >0$ and $r\geq 0$).
\end{remark}
\begin{remark}\label{rem:jets}
We shall now discuss a well known alternative characterisation of $(z,Q)\in
\partial ^{2,-}f(\bar{x})$, see \cite{ebpenot:2}. By taking the $\varphi \in
C^{2}(\mathbb{R}^{n})$ in Definition \ref{def:6} and expanding
using a Taylor expansion we may equivalently assert that there exists a $\delta >0$ for which
\begin{equation}
f(x)\geq f(\bar{x})+\langle z,x-\bar{x}\rangle +\frac{1}{2}h^{T}Qh+o(\Vert x-\bar{x}\Vert )\quad \text{ for all }x\in B_{\delta }(\bar{x}),
\label{taylor}
\end{equation}
where $o\left( \cdot \right) $ is the usual Landau small order notation. It
is clear from (\ref{taylor}) that we have $(z,Q)\in \partial^{2,-}f(\bar{x}) $ implies $z\in \partial _{p}f(\bar{x})$ as
\begin{equation*}
f(x)\geq f(\bar{x}) + \langle z,x-\bar{x}\rangle -\frac{r}{2}\Vert x-\bar{x}\Vert \quad \text{ for all }x\in B_{\delta }(\bar{x})
\end{equation*}
when $r>\Vert Q\Vert _{F}$ and $\delta >0$ sufficiently reduced. Moreover $z\in \partial _{p}f(\bar{x})$ implies $(z,-rI)\in \partial ^{2,-}f(\bar{x})$.
From the definition of prox-regularity at $\bar{x}$ for $\bar{z}$ (and the
choice of $x=\bar{x}$) we conclude that we must have $\bar{z}\in \partial
_{p}f(\bar{x})$ and hence $\partial ^{2,-}f(\bar{x},\bar{z})\neq \emptyset $. Moreover the definition of prox-regularity implies the limiting
subgradients are actually proximal subgradients locally i.e. within an "$f$-attentive neighbourhood of $\bar{z}$" \cite{polrock:1}. When $f$ is subdifferentially
continuous we may drop the $f$-attentiveness and claim $B_{\delta }(\bar{z})\cap \partial f(\bar{x})=B_{\delta }(\bar{z})\cap \partial _{p}f(\bar{x})$
for some sufficiently small $\delta >0$. The example 4.1 of \cite{Lewis:2} show that this neighbourhood can reduce to a singleton $\{\bar{z}\}$. When we have a tilt stable local minimum at $\bar{x}$ or $\bar{z} \in \operatorname{rel-int} \partial f (\bar{x})$ then this situation cannot occur.
\end{remark}
\begin{remark}
\label{rem:limhess}We denote $(x^{\prime },z^{\prime })\rightarrow
_{S_{p}(f)}(\bar{x},z)$ to mean $x^{\prime }\rightarrow ^{f}\bar{x}$, $\
z^{\prime }\in \partial _{p}f(x^{\prime })$ and $z^{\prime }\rightarrow z$.
As $\partial ^{2,-}f(x^{\prime },z^{\prime })\neq \emptyset $ iff $z^{\prime
}\in \partial _{p}f\left( x^{\prime }\right) $ it follows via an elementary
argument that
\begin{equation*}
\underline{\partial }^{2}f(\bar{x},\bar{z})=\limsup_{(x^{\prime },z^{\prime
})\rightarrow _{S_{p}(f)}(\bar{x},\bar{z})}\partial ^{2,-}f(x^{\prime
},z^{\prime }).
\end{equation*}
\end{remark}
Denote the recession directions of a convex set $C$ by $0^{+}C$. Noting that
$\langle Q,uv^{T}\rangle =v^{T}Qu$ one may see the motivation for the
introduction of the rank-1 support in (\ref{taylor}). The rank-1 support $q\left( \mathcal{A}\right) (u,v):=\sup \left\{ \langle Q,uv^{T}\rangle \mid
Q\in \mathcal{A}\right\} $ for a subset $\mathcal{A}\subseteq \mathcal{S}\left( n\right) $, in our case $\mathcal{A}=\underline{\partial }^{2}f(\bar{x},z)$. We see from (\ref{taylor}) that when we have $Q\in \partial ^{2,-}f(\bar{x},\bar{z})$ then $Q-P\in \partial ^{2,-}f(\bar{x},\bar{z})$ for any $n\times n$ positive semi-definite matrix $P \in \mathcal{P}(n)$. Thus we always have $-\mathcal{P}(n)\subseteq
0^{+}\partial ^{2,-}f(\bar{x},\bar{z})$ where $\partial ^{2,-}f(\bar{x},\bar{z}) \subseteq \underline{\partial }^{2}f(\bar{x},\bar{z})$.
\begin{theorem}[\protect\cite{eberhard:1}, Theorem 1]
\label{ebthm:rank:1} Let $g:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$
be proper (i.e. $g(u)\neq -\infty $ anywhere) and $\operatorname{dom}g\neq \emptyset
$. For $u,v\in \mathbb{R}^{n}$, define $q(u,v)=\infty $ if $u$ is not a
positive scalar multiple of $v$ or vice versa, and $q(\alpha u,u)=q(u,\alpha
u)=\alpha g(u)$ for any $\alpha \geq 0$. Then $q$ is a rank one support of a
set $\mathcal{A}\subseteq \mathcal{S}(n)$ with $-\mathcal{P}(n)\subseteq
0^{+}\mathcal{A}$ if and only if
\begin{enumerate}
\item $g$ is positively homogeneous of degree 2.
\item $g$ is lower semicontinuous.
\item $g(-u)=g(u)$ (symmetry).
\end{enumerate}
\end{theorem}
For the sets $\mathcal{A}\subseteq \mathcal{S}(n)$ described in Theorem \ref{ebthm:rank:1} one only needs to consider the support defined on $\mathbb{R}^{n}$ by $q\left( \mathcal{A}\right) (h):=\sup \left\{ \langle
Q,hh^{T}\rangle \mid Q\in \mathcal{A}\right\} $. On reflection it is clear
that all second order directional derivative possess properties 1. and 3. of
the above theorem and those that are topologically well defined possess 2.
as well. We call
\begin{equation*}
\mathcal{A}^{1}:=\{Q\in \mathcal{S}(n)\mid q(\mathcal{A})(h)\geq \langle
Q,hh^{T}\rangle \text{, }\forall h\}
\end{equation*}
the symmetric rank--1 hull of $\mathcal{A}\subseteq \mathcal{S}(n)$. Note
that by definition $q(\mathcal{A})(h)=q(\mathcal{A}^{1})(h)$. When $\mathcal{A}=\mathcal{A}^{1}$, we say $\mathcal{A}$ is a symmetric rank--1
representer. Note that if $Q\in \mathcal{A}^{1}$, then $Q-P\in \mathcal{A}^{1}$ for $P\in \mathcal{P}(n)$ so always $-\mathcal{P}(n)\subseteq 0^{+}\mathcal{A}
$. The rank one barrier cone for a symmetric rank-1 representer is denoted
by $b^{1}(\mathcal{A}):=\{h\in \mathbb{R}^{n}\mid q\left( \mathcal{A}\right)
(h)<\infty \}$. Note that rank-1 support is an even, positively homogeneous
degree 2 function (i.e. $q\left( \mathcal{A}\right) (h)=q\left( \mathcal{A}\right) (-h)$ and $q\left( \mathcal{A}\right) (th)=t^{2}q\left( \mathcal{A}\right) (h)$). Moreover its domain is the union of a cone $C:= \operatorname{dom} f^{\prime\prime}_{-} (\bar{x}, \bar{z}, \cdot)$ and its negative
i.e.
\begin{equation}
\operatorname{dom}q\left( \mathcal{A}\right) (\cdot ):=b^{1}(\mathcal{A})=C\cup
\left( -C\right) . \label{neqn:eb34}
\end{equation}
In the first order case we have $\delta^{\ast}_{\partial _{p}f(\bar{x})} (h) \leq
f_{\_}^{\prime }(\bar{x},h)$. A related second order inequality was first observed
in \cite{eberhard:1} \vspace*{-0.2cm}
\begin{equation*}
\vspace*{-0.2cm}q\left( \partial ^{2,-}f(\bar{x},z)\right) (u)=\min
\{f_{\_}^{\prime \prime }(\bar{x},z,u),f_{\_}^{\prime \prime }(\bar{x}
,z,-u)\}=f_{s}^{\prime \prime }\left( \bar{x},z,u\right)
:=\liminf_{t\rightarrow 0,u^{\prime }\rightarrow u}\Delta
_{2}f(x,t,z,u^{\prime })\text{.}
\end{equation*}
Hence if we work with subjets we are in effect dealing with objects dual to
the lower, symmetric, second-order epi-derivative $f_{\_}^{\prime \prime }(\bar{x},z,\cdot)$. Many text book examples of these
quantities can be easily constructed. Moreover there exists a robust calculus for the limiting
subjet \cite{eberhard:7, ebioffe:4}. Furthermore as noted in example 51 of \cite{eberhard:8} the
qualification condition for the sum rule for the limiting subjet can hold while for the same problem the basic qualification condition for the sum rule for the limiting (first order) subdifferential can fail to hold. This demonstrates the value of considering pairs $(z,Q)$.
\begin{example} \label{Ex:1}Consider the convex function on $\mathbb{R}^{2}$ given by
$
f(x,y)=\left\vert x-y\right\vert.
$
Take $\left( x,y\right) =(0,0)$ and $z=(0,0)\in\partial f(0,0)$ then
$Q=\left(
\begin{array}
[c]{cc}
\alpha & \gamma\\
\gamma & \beta
\end{array}
\right) \in\partial^{2,-}f \left( (0,0)\,(0,0)\right) $ iff locally
around $(0,0)$ we have
\[
\left\vert x-y\right\vert \geq\frac{1}{2}\left(
\begin{array}
[c]{cc}
x & y
\end{array}
\right) \left(
\begin{array}
[c]{cc}
\alpha & \gamma\\
\gamma & \beta
\end{array}
\right) \left(
\begin{array}
[c]{c}
x\\
y
\end{array}
\right) =\frac{1}{2}\left( \alpha x^{2}+2\gamma xy+\beta y^{2}\right)
+o\left( \left\Vert \left( x,y\right) \right\Vert ^{2}\right) \text{.}
\]
This inequality only bites when $x=y$ in which case
\[
0\geq\frac{x^{2}}{2}\left( \alpha+2\gamma+\beta\right) +o\left(
x^{2}\right) \quad\text{or \quad}0\geq\alpha+2\gamma+\beta+\frac{o\left(
x^{2}\right) }{x^{2}}\quad\text{so \quad}0\geq\alpha+2\gamma+\beta\text{.}
\]
Consequently
\[
\partial^{2,-}f\left( (0,0)\,(0,0)\right) =\left\{ Q=\left(
\begin{array}
[c]{cc}
\alpha & \gamma\\
\gamma & \beta
\end{array}
\right) \mid0\geq\alpha+2\gamma+\beta\right\} .
\]
The extreme case is when $\alpha+2\gamma+\beta=0$ and two examples of $Q$
attaining this extremal value are:
\[
Q_{1}=\alpha\left(
\begin{array}
[c]{cc}
1 & 0\\
0 & -1
\end{array}
\right) \quad\text{and \quad}Q_{2}=\alpha\left(
\begin{array}
[c]{cc}
1 & -1\\
-1 & 1
\end{array}
\right) \text{.}
\]
Also
\begin{align*}
q\left( \partial^{2,-}f \left( (0,0)\,(0,0)\right) \right) (h_{1}
,h_{2}) & =\left\{
\begin{array}
[c]{cc}
0 & \text{if }h_{1}=h_{2}\\
+\infty & \text{otherwise}
\end{array}
\right\} = f^{\prime\prime}_{s} (0,0),(0,0),(h_1,h_2)) \\
\text{and so\quad}b^{1}\left( \partial^{2,-}f \left( (0,0)\,(0,0)\right)
\right) & =\left\{ \left( h_{1},h_{2}\right) \mid h_{1}=h_{2}\right\}
\subsetneq\mathbf{R}^{2}.
\end{align*}
\end{example}
\begin{remark} \label{rem:rankone}
Knowing the rank-1 barrier cone of a rank-1 representer $\mathcal{A}$ tells us a lot about it's structure. This is no small part to the fact that it consists only of symmetric matrices. This discussion has been carried out in quite a bit of detail in \cite{eberhard:7}. From convex analysis we know that
the barrier cone (the points at which the support function is finite valued) is polar to the recession directions. In \cite[Lemma 14]{eberhard:7} it is shown that for a rank-1 representer (using the Frobenious inner product on $\mathcal{S}$ (n))
this corresponds to $(0^+ \mathcal{A})^{\circ} = \mathcal{P} (b^1 (\mathcal{A}))
:= \{ \sum_{i \in F} u_i u_i^T \mid u_i \in b^1 (\mathcal{A})\, \text{for a finite index set $F$}\}$.
Moreover in \cite[Lemma 24]{eberhard:7} it is shown that
$ \mathcal{P} (b^1 (\mathcal{A}))^{\circ} \cap \mathcal{P} (n) =
\mathcal{P} (b^1 (\mathcal{A})^{\perp})$. Denoting $\mathcal{U}^2 := b^1 (\mathcal{A})$
and $\mathcal{V}^2 = (\mathcal{U}^2)^{\perp}$ we deduce that
$\mathcal{P}(\mathcal{V}^2) = (0^+ \mathcal{A}) \cap \mathcal{P} (n)$. This
explains why $q(\mathcal{A}) (w) = +\infty$ when $w \notin \mathcal{U}^2$.
Since we always have $-\mathcal{P} (\mathcal{V}^2) \subseteq -\mathcal{P} (n) \subseteq 0^{+} \mathcal{A}$ it follows that $\mathcal{P}(\mathcal{V}^2) -\mathcal{P}(\mathcal{V}^2) \subseteq 0^{+} \mathcal{A}$. Furthermore we find that for any $w = w_{\mathcal{U}^2} + w_{\mathcal{V}^2}$ we then have for $\mathcal{S}(\mathcal{V}^2)$, denoting the symmetric
linear mapping from $\mathcal{V}^2$ into $\mathcal{V}^2$, that
\[
\mathcal{A}w =\mathcal{A}w_{\mathcal{U}^2} + \mathcal{A}w_{\mathcal{V}^2}
\supseteq \mathcal{A}w_{\mathcal{U}^2} + [\mathcal{P}(\mathcal{V}^2) -\mathcal{P}(\mathcal{V}^2)]w_{\mathcal{V}^2}= \mathcal{A}w_{\mathcal{U}^2} + \mathcal{S}(\mathcal{V}^2)w_{\mathcal{V}^2}.
\]
\end{remark}
\subsection{A second order $\mathcal{VU}$ decomposition }
The result \cite{eberhard:6}, Corollary 6.1 contains a number of observations that
characterise the rank-1 support of the limiting subhessians. We single out the following
which is of particular interest for this paper.
\begin{proposition}[\protect\cite{eberhard:6}, Corollary 6.1]
\label{limpara} Suppose that $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty
}$ is quadratically minorised and is prox--regular at $\bar{x}\ $ for $\bar{z}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r.$ Then $h\mapsto q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (h)+r\Vert h\Vert ^{2}$ is convex.
\end{proposition}
\begin{proof}
For the convenience of the reader we provide a
self contained proof of this in the Appendix A.
\end{proof}
\begin{corollary}
Suppose that $f$ is quadratically minorised and is prox--regular at $\bar{x}$
for $\bar{z}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r.$
Then $b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}))$ is a linear
subspace of $\mathbb{R}^{n}$.
\end{corollary}
\begin{proof}
Note that $b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}))=\operatorname{dom}[q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (\cdot )]$ is
convex under the assumption of Proposition \ref{limpara}. Let $C$ be the
cone given in (\ref{neqn:eb34}) then $b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}))=\operatorname{co}(C\cup (-C))=\operatorname{span}C$. As $b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}))$ is a symmetric convex cone it is a
subspace.
\end{proof}
\begin{definition}
Let the function $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ be
finite at $\bar{x}$. When $b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}
)) $ is a linear subspace of $\mathbb{R}^{n}$ and $b^{1}(\underline{\partial
}^{2}f(\bar{x},\bar{z}))\subseteq \mathcal{U}$ we call $\mathcal{U}
^{2}:=b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}))$ a second order
component of the $\mathcal{U}$-space.
\end{definition}
We will now justify this definition via the following results.
\begin{lemma}
\label{lem:sharp}Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$
is quadratically minorised and is prox--regular at $\bar{x}\ $ for $\bar{z}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r.$ Suppose in
addition that $\bar{z}\in \operatorname{rel}$-$\operatorname{int}\partial f(\bar{x})$. Then
for any $\beta \geq 0$ there is $\varepsilon ^{\prime }>0$ (independent of $\beta $) and a $\epsilon _{\beta }>0$ ($\beta $ dependent) such that we have
$f\left( \bar{x}+u+v\right) \geq f\left( \bar{x}\right) +\langle \bar{z},u+v\rangle +\frac{\beta }{2}\left\Vert v\right\Vert ^{2}-\frac{r}{2}\left\Vert u\right\Vert ^{2}$ whenever $v\in B_{\epsilon _{\beta }}\left(
0\right) $ and $u\in B_{\varepsilon ^{\prime }}\left( 0\right) $.
Moreover we have
\begin{equation}
\mathcal{U}^{2}\subseteq \mathcal{U}=\left\{ h\mid -\delta _{\partial f(\bar{x})}^{\ast }(-h)=\delta _{\partial f(\bar{x})}^{\ast }(h)=\langle \bar{z},h\rangle \right\} . \label{eqn:44}
\end{equation}
\end{lemma}
\begin{proof}
By the prox-regularity of $f$ at $\bar{x}$ for $\bar{z}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r>0$ we have $B_{\delta }(\bar{z}
)\cap \partial f(\bar{x})=B_{\delta }(\bar{z})\cap \partial _{p}f(\bar{x})$
for some sufficiently small $\delta >0$. Thus $\bar{z}\in \operatorname{rel}$-$\operatorname{int}\partial _{p}f(\bar{x})$ and there exists a $\varepsilon ^{\prime }\leq
\min \{\varepsilon ,\delta \}$ such that $\bar{z}+\varepsilon ^{\prime
}B_{1}\left( 0\right) \cap \mathcal{V}\subseteq \partial _{p}f(\bar{x})$ and
$r>0$ such that for $u+v\in B_{\varepsilon ^{\prime }}^{\mathcal{U}}\left( 0\right) \times
B_{\varepsilon ^{\prime }}^{\mathcal{V}}\left( 0\right) $ we have
\begin{eqnarray}
f\left( \bar{x}+u+v\right) &\geq &f\left( \bar{x}\right) +\langle
z,u+v\rangle -\frac{r}{2}\left[ \left\Vert u\right\Vert ^{2}+\left\Vert
v\right\Vert ^{2}\right] \quad \text{for all }z\in \bar{z}+\varepsilon
^{\prime }B_{1}\left( 0\right) \cap \mathcal{V}\ \notag \\
&\geq &f\left( \bar{x}\right) +\langle \bar{z}_{\mathcal{V}},v\rangle
+\langle \bar{z}_{\mathcal{U}},u\rangle +\left( \varepsilon ^{\prime }-\frac{r\left\Vert v\right\Vert }{2}\right) \left\Vert v\right\Vert -\frac{r}{2}
\left\Vert u\right\Vert ^{2} \text{\ for }v\in \varepsilon ^{\prime
}B_{1}\left( 0\right) \cap \mathcal{V} \label{neqn:14} \\
&\geq &f\left( \bar{x}\right) +\langle \bar{z}_{\mathcal{V}},u+v\rangle +\frac{\beta }{2}\left\Vert v\right\Vert ^{2}-\frac{r}{2}\left\Vert
u\right\Vert ^{2}\quad \text{for all }v\in \min \{\varepsilon ^{\prime },\frac{2\varepsilon ^{\prime }}{\beta +r}\}B_{1}\left( 0\right) \cap \mathcal{V}, \notag
\end{eqnarray}
where the last inequality holds due to the fact that $\varepsilon^{\prime}-\frac{r \Vert v \Vert}{2} \geq \beta \Vert v \Vert$.
Now choose $\epsilon _{\beta }=\min \{\varepsilon ^{\prime },\frac{2\varepsilon ^{\prime }}{\beta +r}\}$.
This inequality implies that for all $\beta >0$ we have
$
\beta I\in P_{\mathcal{V}}^{T}\partial^{2,-}f(\bar{x},\bar{z})P_{\mathcal{V}}
$
and hence when $P_{\mathcal{V}} h \ne 0$ (or $h \notin \mathcal{U}$) we have
$ q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (h) = +\infty$ and so
$h \notin \mathcal{U}^2$.
\end{proof}
\begin{remark}
This result may hold trivially with both $\mathcal{U} = \mathcal{U}^2 =\{0\}$. Consider the function $f :\mathbb{R}^2 \to \mathbb{R}$ given by:
\[
f\left( x,y\right) =\left\{
\begin{array}
[c]{lc}
\max\left\{0,x+y\right\} & :\text{for }x\leq0\text{, }y\geq0\\
\max\left\{0,-x+y\right\} & :\text{for }x\geq0\text{, }y\geq0\\
\max\left\{0,x-y\right\} & :\text{for }x\leq0\text{, }y\leq0\\
\max\left\{0,-x-y\right\} & :\text{for }x\geq0\text{, }y\leq0
\end{array}
\right.
\]
and take $\bar{x} = (0,0)$. Then
$\partial f\left( 0,0\right) \supseteq \left\{ \left( 0,0\right) ,\left(
1,1\right) ,\left( -1,1\right) ,\left( 1,-1\right) ,\left( -1,-1\right)
\right\} $ and $\mathcal{U}=\left\{ 0\right\} $ with $\mathcal{V}
=\mathbb{R}^{2}.$ We have $f$ is prox-regular at $\bar{x}=(0,0)$ for $\bar{z} = (0,0)$
and quadratically memorised (by the zero quadratic). We have $\mathcal{U}^{2}=\left\{ 0\right\} $ as we have $Q_{1}
=\pm\beta\left( 1,1\right) \left(
\begin{array}
[c]{c}
1\\
1
\end{array}
\right) =\pm\beta\left(
\begin{array}
[c]{cc}
1 & 1\\
1 & 1
\end{array}
\right) $ and $Q_{2}=\pm\beta\left( -1,1\right) \left(
\begin{array}
[c]{c}
-1\\
1
\end{array}
\right) =\pm\beta\left(
\begin{array}
[c]{cc}
1 & -1\\
-1 & 1
\end{array}
\right) $ with $Q_{1}$, $Q_{2}\in\underline{\partial}^{2}f\left( ( 0,0), (0,0) \right)
$ for all $\beta\geq0$ (approach $(0,0)$ along $x=y$ and $y=-x$ for
$z\rightarrow0$) . Then $q\left( \underline{\partial}^{2}f\left( (0,0), (0,0) \right)
\right) \left( u,w\right) =+\infty\geq\beta\max\left\{ \left(
-u+w\right) ^{2},\left( u+w\right) ^{2}\right\} $ for all $\left(
u,w\right) \neq\left( 0,0\right) $ and $\beta\geq 0$.
We note that the examples developed in \cite[Exampls 2, 3]{Mifflin:2004:2} show that the
assumption that $\bar{z}\in \operatorname{rel}$-$\operatorname{int}\partial f(\bar{x})$
is necessary for Lemma \ref{lem:sharp} to hold.
\end{remark}
We finish by
generalizing the notion of "fast track" \cite{Lem:1}.
\begin{definition}
We say $f$ possesses a "fast track" at $\bar{x}$ iff there exists $\bar{z}
\in \partial f\left( \bar{x}\right) $ for which
\begin{equation*}
\mathcal{U}^{2}=b^{1}( \underline{\partial }^{2}f(\bar{x},\bar{z}))=\mathcal{U}.
\end{equation*}
\end{definition}
In the next section after we have introduced the localised $\mathcal{U}$-Lagrangian we will justify this definition further. From Proposition \ref{prop:ebpenot} we see that $\mathcal{U}^{2}=b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}))$ provides the subspace within which the eigen-vectors of
the limiting Hessians remain bounded.
\begin{lemma}
\label{lem:boundprox} Suppose $f$ is quadratically minorised and
prox-regular at $\bar{x}$ for $\bar{z}\in \partial f(\bar{x})$
which possesses a nontrivial second order component $\mathcal{U}^{2}\subseteq \mathcal{U}$. Then for all $\left\{ x_{k}\right\} \subseteq
S_{2}(f)$, $x_{k}\rightarrow ^{f}\bar{x}$ with $z_{k}\rightarrow \bar{z}$
and all $h\in \mathcal{U}^{2}$ there is a uniform bound $M>0$ such that for $Q_{k}\in \partial ^{2,-}f\left( x_{k},z_{k}\right) $ we have
\begin{equation}
\langle Q_{k},hh^{T}\rangle \leq M\Vert h\Vert ^{2}\quad \text{ for }k\text{
sufficiently large}. \label{neqn:34}
\end{equation}
\end{lemma}
\begin{proof}
We have for all $Q\in \underline{\partial }^{2}f(\bar{x},\bar{z})$ and any $h\in \mathcal{U}^{2}$ that
\begin{equation*}
\langle Q,hh^{T}\rangle \leq q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (h)<+\infty .
\end{equation*}
As $f$ is prox-regular, by Proposition \ref{limpara} $q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (\cdot )+r\Vert \cdot \Vert ^{2}$ is
convex and finite valued on $\mathcal{U}^{2}$, a closed subspace and
therefore is locally Lipschitz. Thus $q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (\cdot )$ is locally Lipschitz continuous on $\mathcal{U}^{2}$. Moreover a compactness argument allows us to claim it is
Lipschitz continuous on the unit ball inside the space $\mathcal{U}^{2}$ and
thus obtains a maximum, over the unit ball restricted to the space $\mathcal{U}^{2}$. Hence
\begin{equation*}
\max_{\left\{ h\in \mathcal{U}^{2}\mid \left\Vert h\right\Vert \leq
1\right\} }q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right)
(h)\leq K
\end{equation*}
for some $K>0$. On multiplying by $\Vert h\Vert ^{2}$ for $h\in \mathcal{U}^{2}$ and using the positive homogeneity of degree 2 of the rank-1 support
results in following inequality
\begin{equation*}
\langle Q,hh^{T}\rangle \leq q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (h)\leq K\left\Vert h\right\Vert ^{2}
\end{equation*}
for all $Q\in \underline{\partial }^{2}f(\bar{x},\bar{z})$ and any $h\in
\mathcal{U}^{2}$. Take an arbitrary sequence $(x_{k},z_{k})\rightarrow
_{S_{p}(f)}(\bar{x},\bar{z})$ and $Q_{k}\in \partial ^{2,-}f(x_{k},z_{k})$
with $Q_{k}\rightarrow Q\in \underline{\partial }^{2}f(\bar{x},\bar{z})$
then by taking $M=2K$ we have
\begin{equation*}
\langle Q_{k},hh^{T}\rangle \leq M\left\Vert h\right\Vert ^{2}\quad \text{for }k\text{ sufficiently large. }
\end{equation*}
Moreover any sequence $\left\{ x_{k}\right\} \subseteq
S_{2}(f)$, $x_{k}\rightarrow ^{f}\bar{x}$ with $z_{k}\rightarrow \bar{z}$ has
$(x_{k},z_{k})\rightarrow_{S_{p}(f)}(\bar{x},\bar{z})$.
\end{proof}
\subsection{Some Consequences for Coderivatives of $C^{1,1}$ Functions}
As usual we have denoted the indicator function of a set $\mathcal{A}$ by $\delta _{\mathcal{A}}(Q)$ which equals zero if $Q\in \mathcal{A}$ and $+\infty $ otherwise. In general for the recession directions $0^{+}\mathcal{A}^{1}\supseteq -\mathcal{P}(n)$. Consequently the convex support function $\delta _{\mathcal{A}^{1}}^{\ast }\left( P\right) :=\sup \left\{ \langle
Q,P\rangle :=\operatorname{tr}QP\mid Q\in \mathcal{A}^{1}\right\} =+\infty $ if $P\notin \mathcal{P}(n)$. It is noted in \cite[Proposition 4]{eberhard:1} that $0^{+}\mathcal{A}^{1}=-\mathcal{P}(n)$ iff $q(\mathcal{A})(h)<+\infty $ for all $h$.
\begin{lemma}[\protect\cite{eberhard:5}, Lemma 7]
\label{lem:clo}For any $\mathcal{A}\subseteq \mathcal{S}(n)$, then $\operatorname{co}
\left( \mathcal{A}-\mathcal{P}(n)\right) =\mathcal{A}^{1}\text{.}$
\end{lemma}
For any multi--function $F:\mathbb{R}^{n}\rightrightarrows \mathbb{R}^{m}$ we denote its graph by $\operatorname{Graph}F:=\left\{ (x,y)\mid y\in F(x)\right\} $. The \emph{Mordukhovich coderivative\/} is defined as
\begin{equation*}
D^{\ast }F(x,y)(w):=\{p\in \mathbb{R}^{n}\mid (p,-w)\in \partial \delta _{\operatorname{Graph}\,F}(x,y):=N_{\operatorname{Graph}\,F}(x,y)\}
\end{equation*}
and a second order object $D^{\ast }\left( \partial f\right) (\bar{x},\bar{z})(h)$
is obtained by applying this construction to $F\left( x\right) =\partial
f\left( x\right) $ for $\bar{z} \in \partial f (\bar{x})$.
We can combine this observation with \cite[Theorem 13.52]{rock:6} that gives
a characterisation of the convex hull of the coderivative in terms of
limiting Hessians for a $C^{1,1}$ function $f$.
\begin{corollary}
\label{coderiv} Suppose $f$ is locally $C^{1,1}$ around $x$ then the
Mordukhovich coderivative satisfies
\begin{eqnarray}
\operatorname{co}D^{\ast }(\partial f)(x, z)(h) &=&\operatorname{co}\{Ah\mid A=\lim_{k}\nabla
^{2}f(x^{k})\text{ for some }x\text{$^{k}$($\in S_{2}(f)$)$\rightarrow $}x
\text{ with }\nabla f\left( x^{k}\right) \rightarrow z\} \label{neqn:23} \\
&=&\operatorname{co}\left[ \overline{D}^{2}f(x,z)h\right] =\left[ \operatorname{co}\overline{D}^{2}f(x,z)\right] h\subseteq \left[ \left( \overline{D}^{2}f(x,z)\right)
^{1}\right] h. \notag
\end{eqnarray}
and
\begin{equation*}
\delta _{D^{\ast }(\partial f)(x|z)(h)}^{\ast }\left( h\right) =q\left(
\underline{\partial }^{2}f( {x},z)\right) \left( h\right) =q\left(
\overline{D}^{2}f(x,z)\right) \left( h\right) .
\end{equation*}
\end{corollary}
\begin{proof}
The first equality of (\ref{neqn:23}) follows from \cite[Theorem 13.52]
{rock:6} and the second a restatement in terms of $\overline{D}^{2}f(x,z)$.
The third equality follows from preservation of convexity under a linear
mapping. Clearly $\operatorname{co}\overline{D}^{2}f(x,z)\subseteq \operatorname{co}\left[
\overline{D}^{2}f(x,z)-\mathcal{P}(n)\right] =\overline{D}^{2}f(x,z)^{1}$ by
Lemma \ref{lem:clo}. Moreover we must have by Proposition \ref{prop:ebpenot}
and the linearity of $Q\mapsto \langle Q,hh^{T}\rangle $ that
\begin{eqnarray*}
q\left( \underline{\partial }^{2}f( {x},z)\right) \left( h\right)
&=&q\left( \underline{\partial }^{2}f( {x},z)^{1}\right) \left( h\right)
=q\left( \overline{D}^{2}f(x,z)^{1}\right) \left( h\right) =q\left(
\overline{D}^{2}f(x,z)-\mathcal{P}(n)\right) \left( h\right) \\
&=&\sup \left\{ \langle v,h\rangle \mid v\in \operatorname{co}\left[ \overline{D}
^{2}f(x,z)h\right] \right\} =\sup \left\{ \langle v,h\rangle \mid v\in \operatorname{co}D^{\ast }(\partial f)(x, z)(h)\right\} \\
&=&\sup \left\{ \langle v,h\rangle \mid v\in D^{\ast }(\partial f)(x, z)(h)\right\}
=\delta _{D^{\ast }(\partial f)(x, z)(h)}^{\ast }\left( h\right) .
\end{eqnarray*}
\end{proof}
A central assumption in this paper will be the presence of the following notion of local minimizer.
\begin{definition} [\protect\cite{rock:7}]
\label{def:tilt}A point $\bar{x}$ gives a tilt stable local
minimum of a function $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ if $f\left( \bar{x}\right) $ is finite and there exists an $\varepsilon >0$ such
that the mapping
\begin{equation}
m_{f}:v\mapsto \operatorname{argmin}_{\left\Vert x-\bar{x}\right\Vert \leq
\varepsilon }\left\{ f\left( x\right) -\langle x,v\rangle \right\}
\label{eqn:101}
\end{equation}
is single valued and Lipschitz on some neighbourhood of $0$ with $ m_{f}\left( 0\right) =\bar{x}$.
\end{definition}
In \cite[Theorem 1.3]{rock:7} a criterion
for tilt stability was given in terms of second order construction based on
the coderivative of the subdifferential. Assume the first--order condition $0\in \partial f(\bar{x})$ holds. In \cite{rock:7} the second order sufficiency condition
\begin{equation}
\forall \left\Vert h\right\Vert =1\text{, }p\in D^{\ast }\left( \partial
f\right) (\bar{x}, 0)(h)\text{ we have }\langle p,h\rangle >0
\label{neqn:2}
\end{equation}
is studied and shown to imply a tilt--stable local minimum when $f$ is both subdifferentially continuous and
prox-regular at $\bar{x}$ for $\bar{z}\in \partial f(\bar{x})$.
We may reinterpreting the condition (\ref{neqn:2}) for $C^{1,1}$ functions. Indeed thanks to Corollary \ref{coderiv} condition (\ref{neqn:2}) is equivalent to the following.
\begin{corollary}\label{cor:equiv}
If $f$ is locally $C^{1,1}$ around $x$ then condition (\ref{neqn:2}) is
equivalent to the existence of $\beta >0$ such that:
\begin{equation*}
\forall Q\in \overline{D}^{2}f(x,0)\quad \text{we have }\langle
Q,hh^{T}\rangle \geq \beta >0 \quad \text{for all } \| h \|=1.
\end{equation*}
\end{corollary}
\begin{proof}
By a simple convexity argument (\ref{neqn:2}) is equivalent to $\langle
v,h\rangle >0$ for all $v\in \operatorname{co}D^{\ast }(\partial
f)(x|0)(h)=\left[ \operatorname{co}\overline{D}^{2}f(x,0)\right] h$ from which we
have an equivalent condition that $\langle Qh,h\rangle >0$ for
all $Q\in \operatorname{co} \overline{D}^{2}f(x,0).$ But $\langle Qh,h\rangle
=\langle Q,hh^{T}\rangle $ (the Frobenius inner product) and linearity in $Q$
gives $\langle Qh,h\rangle >0$ for all $Q\in \overline{D}^{2}f(x,0)$ as an equivalent condition.
Finally we note that $\overline{D}^{2}f(x,0)$ is closed and uniformly bounded due to the local
Lipschitzness of the gradient $x \mapsto \nabla f(x)$ so via a compactness argument $\langle Qh,h\rangle \geq \beta >0$ for some $\beta >0$.
\end{proof}
\begin{remark}
It would be interesting to have characterisation of subjets for functions other than
those that are $C^{1,1}$ smooth, in order to compare with their corresponding second order coderivative.
Consider a characterisation of the coderivative for a class of functions found in
\cite[Coroillary 5.4, Theorem 5.3]{Lewis:2} (which are not a $C^{1,1}$ functions).
Then we have:
\[
D^{\ast} (\partial f )(\bar{x} , 0 ) (w)
=\left\{
\begin{array}
[c]{cl}
\nabla^2_{\mathcal{M}} f( \bar{x}) w + N_{\mathcal{M}} (\bar{x}) & :\text{for } w \in T_{\mathcal{M}} ( \bar{x}) \\
\emptyset & :\text{for } w \not\in T_{\mathcal{M}} ( \bar{x}) . \\
\end{array}
\right.
\]
In this context of this paper we have $\mathcal{M} := \{ (u , v (u) ) \mid u \in \mathcal{U} \}$ and if we assume this is a $C^2$ smooth manifold we have $\nabla^2_{\mathcal{M}} f( \bar{x}) w
= \frac{d^2}{dt^2} f(\bar{x} + tw + v(tw) )|_{t=0}$ with $T_{\mathcal{M}} ( \bar{x})
= \mathcal{U}^2$ and $N_{\mathcal{M}} (\bar{x}) = \mathcal{V}^2$.
It seems possible that the calculus provided by \cite{ebioffe:4, eberhard:7} could provide an
avenue to calculate $\underline{\partial}^2 f (\bar{x}, 0 )$ for this class of functions.
\end{remark}
\section{The localised $\mathcal{U}^{\prime}$-Lagrangian}
For the remainder of the paper we will assume $\bar{z} \in \operatorname{rel}$-$\operatorname{int}\partial f\left( \bar{x}\right) \neq \emptyset $ and so $\mathcal{V}:=\operatorname{span}
\left\{ \partial f\left( \bar{x}\right) -\bar{z}\right\} $, $\mathcal{U} = (\mathcal{V})^{\perp}$, as defined in \cite{Lem:1, Mifflin:2003} and
coinciding with the space defined in section \ref{sec:VU}. When
discussing tilt stability we will to assume $\bar{z}=0\in \partial f\left(
\bar{x}\right) $. Then we define the localised $\mathcal{U}^{\prime }$-Lagrangian, for any subspace $\mathcal{U}^{\prime }\subseteq \mathcal{U}$
and some $\varepsilon >0$, to be the function
\begin{equation*}
L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) :=\left\{
\begin{array}{cc}
\inf_{v^{\prime }\in \mathcal{V^{\prime }}\cap B_{\varepsilon }\left(
0\right) }\left\{ f\left( \bar{x}+u+v^{\prime }\right) -\langle \bar{z}_{\mathcal{V}^{\prime }},v^{\prime }\rangle \right\} & \text{for }u\in
\mathcal{U}^{\prime }\cap B_{\varepsilon }\left( 0\right):= B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) \\
+\infty & \text{otherwise}
\end{array}
\right.
\end{equation*}
where $\mathcal{V^{\prime }}:=\mathcal{U^{\prime }}^{\perp }$. Let
\begin{equation}
v\left( u\right) \in \operatorname{argmin}_{v^{\prime }\in \mathcal{V^{\prime }}\cap
B_{\varepsilon }\left( 0\right) }\left\{ f\left( \bar{x}+u+v^{\prime
}\right) -\langle \bar{z}_{\mathcal{V^{\prime }}},v^{\prime }\rangle
\right\} . \label{eqn:78}
\end{equation}
This Lagrangian differs from the modification introduced by Hare \cite{Hare:3} in that $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( \cdot
\right) $ is locally well defined on $\mathcal{U^{\prime }}$ due to the
introduction of the ball $B_{\varepsilon }^{\mathcal{V^{\prime }}}\left(
0\right) =\mathcal{V^{\prime }}\cap B_{\varepsilon }\left( 0\right) $ over
which the infimum is taken. Hare assumes a quadratic minorant to justify a
finite value for a sufficiently large regularization parameter used in the
so-called quadratic sub-Lagrangian. Define for $u\in \mathcal{U^{\prime }}$
and $v\left( \cdot \right) :\mathcal{U^{\prime }}\rightarrow B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( 0\right) $ the auxiliary functions
\begin{eqnarray*}
k_{v}\left( u\right) := &&h\left( u+v\left( u\right) \right) -\langle \bar{z}_{\mathcal{V^{\prime }}},u+v\left( u\right) \rangle \\
\text{where\quad }h\left( w\right) := &&f\left( \bar{x}+w\right) +\delta _{B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right)
\oplus B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( 0\right) }\left( w\right) .
\end{eqnarray*}
Then
\begin{equation*}
L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) :=\inf_{v^{\prime
}\in \mathcal{V^{\prime }}}\left\{ h\left( u+v^{\prime }\right) -\langle
\bar{z}_{\mathcal{V}^{\prime }},v^{\prime }\rangle \right\} .
\end{equation*}
When $v(\cdot )$ is chosen as in (\ref{eqn:78}) we have $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) =k_{v}(u)$ with both infinite
outside $B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) $.
\begin{lemma} \label{lem:27}
Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a proper lower
semi-continuous function and assume $v(\cdot )$ is chosen as in (\ref{eqn:78}). The conjugate of $k_{v}:\mathcal{U}^{\prime }\rightarrow \mathbb{R}_{\infty }$ with respect to $\mathcal{U^{\prime }}$ is given by
\begin{equation}
k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) :=\sup_{u\in \mathcal{U^{\prime }}}\left\{ \langle u,z_{\mathcal{U^{\prime }}}\rangle -k_{v}\left(
u\right) \right\} =h^{\ast }\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) =\left( L_{\mathcal{U^{\prime }}}^{\varepsilon
}\right) ^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) . \label{neqn:33}
\end{equation}
\end{lemma}
\begin{proof}
By direct calculation we have
\begin{eqnarray*}
k_{v}^{\ast }\left( z_{\mathcal{U^{\prime}}}\right) &=&\sup_{u\in \mathcal{U^{\prime}} }\left\{ \langle u,z_{\mathcal{U^{\prime}}}\rangle -\left\{
h\left( u+v\left( u\right) \right) -\langle \bar{z}_{\mathcal{V^{\prime}}
},u+v\left( u\right) \rangle \right\} \right\} \\
&=&\sup_{u\in \mathcal{U^{\prime}}}\left\{ \langle u,z_{\mathcal{U^{\prime}}
}\rangle -\min_{v^{\prime }\in \mathcal{V^{\prime}}}\left\{ h\left(
u+v^{\prime }\right) -\langle \bar{z}_{\mathcal{V^{\prime}}},u+
v^{\prime}\rangle \right\} \right\} \\
&=&\sup_{\left( u,v^{\prime }\right) \in \mathcal{U^{\prime}}\oplus \mathcal{V^{\prime}} }\left\{ \langle u+v^{\prime},z_{\mathcal{U^{\prime}}}+ \bar{z}_{\mathcal{V^{\prime}}}\rangle
-h\left( u+v^{\prime }\right) \right\} =h^{\ast }\left(
z_{\mathcal{U^{\prime}}}+\bar{z}_{\mathcal{V^{\prime}}}\right)
\end{eqnarray*}
as $\langle z_{\mathcal{U^{\prime}}},v^{\prime }\rangle =0$ for all $v^{\prime }\in \mathcal{V^{\prime}}$. Also
\begin{eqnarray*}
k_{v}^{\ast }\left( z_{\mathcal{U^{\prime}}}\right) &=& \sup_{u\in \mathcal{U^{\prime}} }\left\{ \langle u,z_{\mathcal{U^{\prime}}}\rangle
-\min_{v^{\prime }\in \mathcal{V^{\prime}} }\left\{ h\left( u+v^{\prime
}\right) -\langle \bar{z}_{\mathcal{V^{\prime}} },u+v^{\prime }\rangle
\right\} \right\} \\
&=&\sup_{u\in \mathcal{U^{\prime}}}\left\{ \langle u,z_{\mathcal{U^{\prime}}
}\rangle -L_{\mathcal{U^{\prime}}}^{\varepsilon }\left( u\right) \right\}
=\left( L_{\mathcal{U^{\prime}} }^{\varepsilon }\right) ^{\ast } \left( z_{\mathcal{U^{\prime}}}\right) .
\end{eqnarray*}
\end{proof}
When we assume $\bar{x}$ gives a tilt stable local minimum of a function $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ we shall choose the $\varepsilon >0$ to be consistent with the definition of tilt stability at $\bar{x}$ for the neighbourhood
\begin{equation*}
B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( \bar{x}_{\mathcal{U^{\prime }}}\right) \oplus B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( \bar{x}_{\mathcal{V^{\prime }}}\right) :=\left\{ \left( x_{\mathcal{U^{\prime }}},x_{\mathcal{V^{\prime }}}\right) \in \mathcal{U^{\prime }}\oplus \mathcal{V^{\prime }}\mid \left\Vert x_{\mathcal{U^{\prime }}}-\bar{x}_{\mathcal{U^{\prime }}}\right\Vert \leq \varepsilon \text{ and }\left\Vert x_{\mathcal{V^{\prime }}}-\bar{x}_{\mathcal{V^{\prime }}}\right\Vert \leq \varepsilon
\right\}
\end{equation*}
where $\varepsilon$ is reduced to contain the above neighbourhood in a larger ball
$\{x \in \mathbb{R}^n \mid \| x - \bar{x} \| \leq \hat{\varepsilon} \}$ on which tilt stability holds.
We will rely on the results of \cite{Drusvy:1}. From definition \ref{def:tilt} we have on $B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( \bar{x}_{\mathcal{U^{\prime }}}\right) \oplus B_{\varepsilon }^{\mathcal{V^{\prime }
}}\left( \bar{x}_{\mathcal{V^{\prime }}}\right) $ that
\begin{equation}
f\left( x\right) \geq f\left( m_{f}\left( v\right) \right) +\langle
x-m_{f}\left( v\right) ,v\rangle \label{leo:query}
\end{equation}
where $m_{f}(\cdot )$ is as defined in (\ref{eqn:101}). That is, we have a supporting tangent plane to the epigraph of $f +\delta_{B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( \bar{x}_{\mathcal{U^{\prime }}}\right) \oplus B_{\varepsilon }^{\mathcal{V^{\prime }
}}\left( \bar{x}_{\mathcal{V^{\prime }}}\right) }$. As the convex hull of any set (including the epigraph of $f +\delta_{B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( \bar{x}_{\mathcal{U^{\prime }}}\right) \oplus B_{\varepsilon }^{\mathcal{V^{\prime }
}}\left( \bar{x}_{\mathcal{V^{\prime }}}\right) }$) must remain on the same side of any supporting hyperplane (in this case the hyperplane $(x,\alpha) \mapsto \langle (x,\alpha) -(m_{f}\left( v\right) , f(m_{f}\left( v\right) ) , (v, -1)\rangle \leq 0$) we may deduce that (again locally)
\begin{equation*}
\operatorname{co}f\left( x\right) \geq f\left( m_{f}\left( v\right) \right) +\langle
x-m_{f}\left( v\right) ,v\rangle .
\end{equation*}
This observation leads to the following minor rewording of the result from
\cite{Drusvy:1}. It shows that there is a strong convexification process
involved with tilt stability.
\begin{proposition} [\protect\cite{Drusvy:1}, Proposition 2.6]
\label{prop:co}Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a proper lower semi-continuous
function and suppose that $\bar{x}$ give a tilt stable local minimum of $f$.
Then for all sufficiently small $\varepsilon >0,$ in terms of the function $h\left( w\right) :=f\left( \bar{x}+w\right) +\delta _{B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) \oplus B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( 0\right) }\left( w\right) $ we have
\begin{equation*}
\operatorname{argmin}_{x\in B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( \bar{x}_{\mathcal{U^{\prime }}}\right) \oplus B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( \bar{x}_{\mathcal{V^{\prime }}}\right) }\left[ f\left( x\right)
-\langle x,z\rangle \right] =\operatorname{argmin}_{\left( u^{\prime },v^{\prime
}\right) \in \mathcal{U^{\prime }}\oplus \mathcal{V^{\prime }}}\left[ \operatorname{co}h\left( u^{\prime }+v^{\prime }\right) -\langle u^{\prime }+v^{\prime },z\rangle \right] +\bar{x}
\end{equation*}
for all $z$ sufficiently close to $0.$ Consequently $0$ is a tilt stable
local minimum of $\operatorname{co}h$.
\end{proposition}
We now study the subgradients of the $\mathcal{U^{\prime }}$-Lagrangian.
In order to simplify statements we introduce the following modified function:
\begin{equation*}
m_{h} : z \mapsto \operatorname{argmin}_{\left( u^{\prime },v^{\prime }\right)
\in \mathcal{U^{\prime }}\oplus \mathcal{V^{\prime }}}\left[ \operatorname{co}h\left( u^{\prime }+v^{\prime }\right) -\langle u^{\prime }+v^{\prime },z\rangle \right]
\end{equation*}
then we have $m_{h}\left( z\right) +\bar{x}=m_{f}\left( z\right) $ for $m_{f}\left( z\right) :=\operatorname{argmin}_{x\in B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( \bar{x}_{\mathcal{U^{\prime }}}\right) \oplus
B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( \bar{x}_{\mathcal{V^{\prime }}}\right) }\left[ f\left( x\right) -\langle x,z\rangle \right] $. The next
result shows that under the assumption of tilt stability we have $u:=P_{\mathcal{U^{\prime }}}\left[ m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] $ iff
\begin{equation}
z_{\mathcal{U^{\prime }}}\in \partial _{\operatorname{co}}\ L_{\mathcal{U^{\prime }}}^{\varepsilon } \left( u\right) \label{neqn:10}
\end{equation}
where $\partial _{\operatorname{co}}g\left( u\right) :=\left\{ z\mid g\left(
u^{\prime }\right) -g\left( u\right) \geq \langle z,u^{\prime }-u\rangle
\text{ for all }u^{\prime }\right\} $ corresponds to the subdifferential of
convex analysis. In passing we note that tilt stability
of $f$ at $\bar{x}$ implies $\partial_{\operatorname{co}} f (\bar{x}) \ne \emptyset$.
\begin{remark} \label{rem:27}
When $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ has a tilt-stable
local minimum at $\bar{x}$ then for $\bar{z}$ sufficiently small we must also have
$g\left( x\right) :=f\left( x\right) -\langle \bar{z},x\rangle $ possessing
a tilt stable local minimum at $\left\{ \bar{x}\right\} =m_{f}\left( \bar{z}\right)$. In this
way we may obtain a unique Lipschitz continuous selection
\[
\{ m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \}
= \operatorname{argmin}_{\left( u^{\prime },v^{\prime }\right) \in \mathcal{U} \oplus \mathcal{V}}\left[ h \left( u^{\prime }+v^{\prime }\right)
-\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}}\rangle -\langle
u^{\prime },z_{\mathcal{U^{\prime }}}\rangle \right]
\]
in a neighbourhood of $z_{\mathcal{U^{\prime }}} \in B_{\varepsilon} (\bar{z}_{\mathcal{U^{\prime }}}) $ (where $\bar{z}_{\mathcal{U^{\prime }}} \ne 0$).
\end{remark}
\begin{proposition}
\label{prop:LU} Let $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ be a
proper lower semi-continuous function with $f-\langle \bar{z},\cdot \rangle $
having a tilt-stable local minimum at $\bar{x}$.
\begin{enumerate}
\item Then $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( \cdot \right) $ is closed, proper convex function that is finite valued for $u \in B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right)$.
\item Let $u:=P_{\mathcal{U^{\prime }}}\left[ m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] \in \operatorname{int}
B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) $ (where $z_{\mathcal{U}^{\prime }}\in \mathcal{U}^{\prime }$) then
\begin{equation}
L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u^{\prime }\right) -L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) \geq \langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle \quad \text{for }u^{\prime }\in
B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) . \label{neqn:9}
\end{equation}
Moreover $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right)
=\min_{v^{\prime }\in \mathcal{V^{\prime }}}\left[ \operatorname{co}h\left(
u+v^{\prime }\right) -\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}
}\rangle \right] $ for which the minimum is attained at $v\left( u\right) =P_{\mathcal{V^{\prime }}}\left[ m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] $ where $v\left( 0\right) =0$.
\item Conversely suppose (\ref{neqn:10}) holds at any given $u\in
B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) $ and let $v(u)$ be
as defined in (\ref{eqn:78}). Then we have $u=P_{\mathcal{U^{\prime }}}\left[
m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right)
\right] $ and $v\left( u\right) =P_{\mathcal{V^{\prime }}}\left[ m_{h}\left(
z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] \in
\operatorname{int}B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( 0\right) $ for $\|u\|$ sufficiently small.
\end{enumerate}
\end{proposition}
\begin{proof}
Consider 1. By Proposition \ref{prop:co} we have
\begin{equation*}
L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) =\min_{v^{\prime
}\in \mathcal{V^{\prime }}}\left[ h\left( u+v^{\prime }\right) -\langle
v^{\prime },\bar{z}_{\mathcal{V^{\prime }}}\rangle \right] =\min_{v^{\prime
}\in \mathcal{V^{\prime }}}\left[ \operatorname{co}h\left( u+v^{\prime }\right)
-\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}}\rangle \right] .
\end{equation*}
Hence $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u^{\prime}\right) $ is a "marginal mapping" corresponding to a coercive closed convex function $F(u^{\prime},v^{\prime}):=\operatorname{co}h\left( u^{\prime}+v^{\prime }\right)
-\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}}\rangle$. Applying \cite[Theorem 9.2]{rock:1} the result follows on viewing $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u^{\prime}\right) $ as the "image of $F$ under the linear mapping $A$" given by the projection $u^{\prime} := A (u^{\prime}, v^{\prime}) :=P_{\mathcal{U}}(u^{\prime}, v^{\prime})$ onto $\operatorname{int} B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) $.
For the second part we have $z=z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}$, where only the $\mathcal{U^{\prime }}$ component
varies. The following minimum attained at the unique point $m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) $ that
uniquely determines the value of $u\in \mathcal{U^{\prime }}$:
\begin{eqnarray}
\left\{ u+v(u)\right\} &:= &m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \notag \\
&=&\operatorname{argmin}_{\left( u^{\prime },v^{\prime }\right) \in \mathcal{U} \oplus \mathcal{V}}\left[ h\left( u^{\prime }+v^{\prime }\right)
-\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}}\rangle -\langle
u^{\prime },z_{\mathcal{U^{\prime }}}\rangle \right] \notag \\
\text{and so \quad }\left\{ u\right\} &=&\operatorname{argmin}_{u^{\prime }\in
B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) }\left[
\min_{v^{\prime }\in \mathcal{V^{\prime }}}\left[ h\left( u^{\prime
}+v^{\prime }\right) -\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}
}\rangle \right] -\langle u^{\prime },z_{\mathcal{U^{\prime }}}\rangle
\right] , \label{neqn:80}
\end{eqnarray}
where $u:=P_{\mathcal{U^{\prime }}}\left[ m_{h}\left( z_{\mathcal{U^{\prime }}
}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] $ and $v\left( u\right)
:=P_{\mathcal{V^{\prime }}}\left[ m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right]$.
As $m_{h}\left( \cdot \right) $ is a single valued Lipschitz function and $\operatorname{co}h$ has a local minimum at $0$ then $v\left( 0\right) =0$ because
$\left\{ 0\right\} =\operatorname{argmin}_{v^{\prime }\in \mathcal{V^{\prime }}}\left[ \operatorname{co}h\left( v^{\prime }\right) -\langle v^{\prime },\bar{z}_{\mathcal{V^{\prime }}}\rangle \right] $. Hence by continuity $v(u) \in \operatorname{int} B_{\varepsilon }^{\mathcal{U^{\prime }}}$ for $\|u\|$ sufficiently small.
The objective value on this
minimization problem equals
\begin{equation}
\min_{u^{\prime }\in B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right)
}\left[ L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u^{\prime }\right)
-\langle u^{\prime },{z}_{\mathcal{U^{\prime }}}\rangle \right] =L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) -\langle u,z_{\mathcal{U^{\prime
}}}\rangle , \label{eqn:79}
\end{equation}
giving (\ref{neqn:9}).
For the third part we note that (\ref{neqn:10}) is equivalent to (\ref{neqn:9}) and hence equivalent to the identity (\ref{eqn:79}), which affirms
that the minimizer in the $\mathcal{U^{\prime }}$ space is attained at $u$
and thus the minimizer in the $\mathcal{V^{\prime }}$ space in the
definition of $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) $ is
attained at $v(u)$. This in turn can be equivalently written as (\ref{neqn:80}) which affirms that $u=P_{\mathcal{U^{\prime }}}\left[ m_{h}\left(
z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] $
and $v\left( u\right) =P_{\mathcal{V^{\prime }}}\left[ m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] \in
B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( 0\right) $.
\end{proof}
\begin{remark}\label{rem:implicit1}
In principle the knowledge of $m_f$ and $\mathcal{U}$ should allow one to construct the function $v(\cdot)$. One can perform a rotation of coordinates and a translation of
$\bar{x}$ to zero so that we have then $f$ represented as $h: \mathcal{U} \times \mathcal{V}
\to \mathbb{R}_{\infty}$ and correspondingly obtain $m_h$. Now decompose
$m_h (z_{\mathcal{U}} + \bar{z}_{\mathcal{V}}) = m^h_{\mathcal{U}}(z_{\mathcal{U}} ) + m^h_{\mathcal{V}}(z_{\mathcal{U}} )$ (where we have drop the reference to $\bar{z}_{\mathcal{V}}$ as it's value is fixed). Then eliminate the variable $z_{\mathcal{U}} $ from the system of equations $u = m^h_{\mathcal{U}}(z_{\mathcal{U}} ) $ and
$v = m^h_{\mathcal{V}}(z_{\mathcal{U}} )$ to obtain $v(u)$. This solution is unique
under the assumption of a tilt stable local minimum. Indeed one can interpret $v(\cdot)$ as an implicit function. This point of view has been used by numerous authors
\cite[Theorem 2.2]{Miller:1}, \cite[Theorem 6.1]{Lewis:1} and with regard to
$C^2$-smooth manifolds see \cite[Theorem 2.6]{Lewis:2}. This last result indicates that when
$f$ is "partially smooth" with respect to a $C^2$-smooth manifold $\mathcal{M}$ then
the form of $v( \cdot)$ is accessible via the implicit function theorem. Moreover there is a local description $\mathcal{M} = \{ (u, v(u)) \mid u \in \mathcal{U} \cap B_{\varepsilon} (0)\}$. An interesting example of
this sort of approach can be found in \cite[Theorem 4.3]{Lem:1}. Here the exact penalty function of a convex nonlinear optimisation problem is studied where $\bar{x}$ is chosen to be the minimizer. The function
$v(\cdot)$ is characterised as the solution to a system of equation associated with the active
constraints at $\bar{x}$ for the associated nonlinear programming problem. A similar analysis may be applied to the illustrative example of $C^2$ smooth function $f$ restricted to a polyhedral set $P := \{ x \in \mathbb{R}^n \mid l_i (x) \leq 0 \text{ for } i \in I:=\{1,\dots,m\}\}$, where
$l_i$ are affine functions and $I(\bar{x}):=\{ i \in I \mid l_i (\bar{x}) = 0 \}$ are the active constraints. Assume $\{\nabla l_i (\bar{x})\}_{i \in I(\bar{x})}$ are linearly independent. When the optimal solution $\bar{x} \in \operatorname{int} P$ then $\mathcal{V} = \{0\}$ and $\mathcal{U} = \mathbb{R}^n$ giving $\mathcal{M} = \mathbb{R}^n \times \{0\}$, a smooth manifold. When the active constraints $I(\bar{x})$ are nonempty then
$\mathcal{V} = \operatorname{lin} \{\nabla l_i (\bar{x})\}_{i \in I(\bar{x})}\}$ and
$\mathcal{U} = \{d \in \mathbb{R}^n \mid \langle \nabla l_i (\bar{x}) ,d \rangle = 0 \text{ for } i \in I(\bar{x}) \}$. Then $v(u)$ is the solution (or implicit function) associated with the system of equation $l_i (\bar{x} + (u,v)) =0$ for
$i \in I(\bar{x})$, in the unknowns $v \in \mathcal{V}$. The implicit function theorem now furnishes existence, uniqueness and differentiability. Given this clear connection to implicit functions it would be interesting to relate these ideas to a more modern theory of implicit functions \cite{Dontchev:1}.
\end{remark}
Existence of convex subgradients indicates a hidden convexification.
\begin{lemma}
\label{lem:conv}Consider $h:\mathcal{U^{\prime }}\rightarrow \mathbb{R}_{\infty }$ is a proper lower semi-continuous function. Then
\begin{equation*}
\partial _{\operatorname{co}}h\left( u\right) \subseteq \partial \left[ \operatorname{co}h\right] \left( u\right) .
\end{equation*}
When $\partial _{\operatorname{co}}h\left( u\right) \neq \emptyset $ then $\operatorname{co}h\left( u\right) =h\left( u\right) $ and we have $\partial _{\operatorname{co}
}h\left( u\right) =\partial \left[ \operatorname{co}h\right] \left( u\right)
\subseteq \partial _{p}h\left( u\right) \neq \emptyset .$ If in addition $h$
is differentiable we have $\nabla h\left( u\right) =\nabla \left( \operatorname{co}
h\right) \left( u\right) $.
\end{lemma}
\begin{proof}
If $z_{\mathcal{U}^{\prime}}\in \partial _{\operatorname{co}}h\left( u\right) $ then
\begin{eqnarray}
h\left( u^{\prime }\right) -h\left( u\right) &\geq &\langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle \quad \text{for all }u^{\prime }\in
\mathcal{U^{\prime }} \label{neqn:7} \\
\text{hence \quad }\operatorname{co}h\left( u^{\prime }\right) &\geq &h\left(
u\right) +\langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle \notag
\end{eqnarray}
and so for $u^{\prime }=u$ we have $\operatorname{co}h\left( u\right) \geq h\left(
u\right) \geq \operatorname{co}h\left( u\right) $ giving equality. Thus
\begin{equation}
\operatorname{co}h\left( u^{\prime }\right) -\operatorname{co}h\left( u\right) \geq \langle
z_{\mathcal{U}},u^{\prime }-u\rangle \quad \text{for all }u^{\prime }\in
\mathcal{U^{\prime }}\text{. } \label{neqn:8}
\end{equation}
Hence $\partial _{\operatorname{co}}h\left( u\right) \subseteq \partial \left[ \operatorname{co}h\right] \left( u\right) .$ When $\partial _{\operatorname{co}}h\left( u\right)
\neq \emptyset $ then $\operatorname{co}h\left( u\right) =h\left( u\right) $ and (\ref{neqn:8})\ gives (\ref{neqn:7}) as $h\left( u^{\prime }\right) \geq
\operatorname{co}h\left( u^{\prime }\right) $ is always true. In particular (\ref{neqn:7}) implies $z_{\mathcal{U^{\prime }}}\in \partial _{p}h\left(
u\right) $ and when $h$ is actually differentiable at $u$ then$\ \partial _{\operatorname{co}}h\left( u\right) =\partial \left[ \operatorname{co}h\right] \left(
u\right) \subseteq \partial _{p}h\left( u\right) =\left\{ \nabla h\left(
u\right) \right\} .$
\end{proof}
\begin{remark}
\label{rem:lem} Assume $g\left( x\right) :=f\left( x\right) -\langle \bar{z}
,x\rangle $ possessing a tilt stable local minimum at $\left\{ \bar{x}
\right\} =m_{f}\left( \bar{z} \right)$ (and hence $\partial _{\operatorname{co}}f\left( \bar{x}\right) ) \ne \emptyset$). In \cite[Theorem 3.3]{Lem:1} it is
observed that the optimality condition applied to the minimization problem
that defines $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right)
=\inf_{v^{\prime }\in \mathcal{V^{\prime }}}\left\{ \operatorname{co}h\left(
u+v^{\prime }\right) -\langle \bar{z}_{\mathcal{V}^{\prime }},v^{\prime
}\rangle \right\} $ (which attains its minimum at $v(u)$) gives rise to
\begin{equation}
\partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) =\left\{ z_{\mathcal{U}^{\prime }}\mid z_{\mathcal{U}^{\prime }}+\bar{z}_{\mathcal{V^{\prime }}}\in \partial \operatorname{co}h\left( u+v\left( u\right) \right)
\right\} , \label{eqn:2}
\end{equation}
assuming $(u,v(u)) \in \operatorname{int} B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) \times \operatorname{int} B_{\varepsilon }^{\mathcal{V^{\prime }}}\left( 0\right) $.
Applying (\ref{prop:6:3}) and Lemma \ref{lem:conv} we have
\begin{eqnarray*}
\partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( 0\right) &=&\left\{
z_{\mathcal{U}}\mid z_{\mathcal{U}^{\prime }}+\bar{z}_{\mathcal{V^{\prime }}}\in \partial \operatorname{co}h\left( 0\right)
=\partial _{\operatorname{co}} h \left( 0 \right) =\partial _{\operatorname{co}}f\left( \bar{x}\right) \right\} \\
&\subseteq &\left\{ z_{\mathcal{U}^{\prime }}\mid z_{\mathcal{U}^{\prime }}+
\bar{z}_{\mathcal{V^{\prime }}}\in \partial f\left( \bar{x}\right) \right\}
=\left\{ \bar{z}_{\mathcal{U}^{\prime }}\right\} \quad \text{as }\mathcal{U}^{\prime }\subseteq \mathcal{U} .
\end{eqnarray*}
Thus $\nabla L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( 0\right) =\bar{z}_{\mathcal{U}^{\prime }}$ exists (as was first observed in \cite[Theorem 3.3]
{Lem:1} for convex functions). Moreover we also have $L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( 0\right) =\inf_{v^{\prime }\in \mathcal{V^{\prime }}}\left\{ \operatorname{co}h\left( v^{\prime }\right) -\langle \bar{z}_{\mathcal{V}^{\prime }},v^{\prime }\rangle \right\} =\operatorname{co}h\left( 0\right) =f\left(
\bar{x}\right) $ because $m_{h}\left( \bar{z}_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) =\left\{ 0\right\} .$ Furthermore,
due to the inherent Lipschitz continuity implied by tilt stability (see Proposition \ref{prop:LU}) we must have for $\delta$ sufficiently small
$\partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) \ne \emptyset $ for all
$u\in
B_{\delta}^{\mathcal{U^{\prime }}}\left( 0\right) $.
\end{remark}
Even without the assumption of tilt stability we have the following.
\begin{proposition}
\label{prop:m}Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is
a proper lower semi-continuous function and
\begin{equation*}
v\left( u\right) \in \operatorname{argmin}_{v^{\prime }\in \mathcal{V^{\prime }}\cap
B_{\varepsilon }\left( 0\right) }\left\{ f\left( \bar{x}+u+v^{\prime
}\right) -\langle \bar{z}_{\mathcal{V^{\prime }}},v^{\prime }\rangle
\right\} : B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right)
\rightarrow \mathcal{V^{\prime }}.
\end{equation*}
Then when $z_{\mathcal{U^{\prime }}}\in \partial _{\operatorname{co}} L_{\mathcal{U^{\prime }}}^{\varepsilon } \left( u\right) $
we have for $g\left(
w\right) :=\operatorname{co}h\left( w\right) $ that
\begin{equation}
\left( u,v\left( u\right) \right) \in m_{h}\left( z_{\mathcal{U^{\prime }}}+
\bar{z}_{\mathcal{V^{\prime }}}\right) =\operatorname{argmin}\left\{ g\left(
u+v\right) -\langle z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}
},u+v\rangle \right\} \text{ for all }u\in B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) . \label{neqn:11}
\end{equation}
\end{proposition}
\begin{proof}
As $z_{\mathcal{U^{\prime }}}\in \partial _{\operatorname{co}} L_{\mathcal{U^{\prime }}}^{\varepsilon } \left( u\right) $ we have for any $u^{\prime }\in
B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) $ that
\begin{eqnarray*}
L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u^{\prime }\right) &\geq &L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) +\langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle \\
&=&\inf_{v^{\prime }\in \mathcal{V^{\prime }}}\left\{ h\left( u+v^{\prime
}\right) -\langle \bar{z}_{\mathcal{V^{\prime }}},v^{\prime }\rangle
\right\} +\langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle \\
&=&\left\{ h\left( u+v\left( u\right) \right) -\langle \bar{z}_{\mathcal{V^{\prime }}},v\left( u\right) \rangle \right\} +\langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle \\
&=&h\left( u+v\left( u\right) \right) -\langle z_{\mathcal{U^{\prime }}}+
\bar{z}_{\mathcal{V^{\prime }}},u+v\left( u\right) \rangle +\langle z_{\mathcal{U^{\prime }}},u^{\prime }\rangle .
\end{eqnarray*}
Hence for all $v^{\prime }\in \mathcal{V^{\prime }}$ we have
\begin{eqnarray*}
h\left( u^{\prime }+v^{\prime }\right) -\langle \bar{z}_{\mathcal{V^{\prime }}},v^{\prime }\rangle &\geq &L_{\mathcal{U^{\prime }}}^{\varepsilon }\left(
u^{\prime }\right) \\
&\geq &h\left( u+v\left( u\right) \right) -\langle z_{\mathcal{U^{\prime }}}+
\bar{z}_{\mathcal{V^{\prime }}},u+v\left( u\right) \rangle +\langle z_{\mathcal{U^{\prime }}},u^{\prime }\rangle
\end{eqnarray*}
or for all $\left( u^{\prime },v^{\prime }\right) \in B_{\varepsilon }^{\mathcal{U^{\prime }}}\left( 0\right) \oplus \mathcal{V^{\prime }}$ (using
orthogonality of the spaces), we have
\begin{equation}
h\left( u^{\prime }+v^{\prime }\right) -\langle z_{\mathcal{U^{\prime }}}+
\bar{z}_{\mathcal{V^{\prime }}},u^{\prime }+v^{\prime }\rangle \geq h\left(
u+v\left( u\right) \right) -\langle z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}},u+v\left( u\right) \rangle . \label{neqn:16}
\end{equation}
That is $\left( u,v\left( u\right) \right) \in m_{h}\left( z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) $ and we may now apply
Proposition \ref{prop:co}.
\end{proof}
In the following we repeatedly use the fact that when a function has a
supporting tangent plane to its epigraph one can take the convex closure of
the epigraph and the resultant set will remain entirely to that same side of
that tangent hyperplane. This will be true for partial convexifications as
convex combinations cannot violate the bounding plane.
\begin{proposition}
\label{cor:conv}Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$
is a proper lower semi-continuous function and $v\left( u\right) \in \operatorname{argmin}_{v^{\prime }\in \mathcal{V^{\prime}}\cap B_{\varepsilon }\left(
0\right) }\left\{ f\left( \bar{x}+u+v^{\prime }\right) -\langle \bar{z}_{\mathcal{V^{\prime}} },v^{\prime }\rangle \right\} .$ Then when $z_{\mathcal{U^{\prime}}}\in \partial _{\operatorname{co}} L_{\mathcal{U^{\prime}}
}^{\varepsilon } \left( u\right) $ we have
\begin{equation*}
k_{v}^{\ast }\left( z\right) +k_{v}\left( u\right) = \langle z_{\mathcal{U^{\prime}} },u\rangle
\end{equation*}
where $k_{v}\left( u\right) :=h\left( u+v\left( u\right) \right) -\langle
\bar{z}_{\mathcal{V^{\prime}}},u+v\left( u\right) \rangle $ i.e. $z_{\mathcal{U^{\prime}}}\in \partial _{\operatorname{co}}k_{v}\left( u\right) $ and in
particular $\bar{z}_{\mathcal{U^{\prime}}}\in \partial _{\operatorname{co}
}k_{v}\left( u\right) =\partial \operatorname{co} k_{v}\left( u\right) $ and $k_{v}\left( u\right) =\operatorname{co}k_{v}\left( u\right) $. Moreover for $u\in
\mathcal{U^{\prime}}$ we have
\begin{eqnarray}
k_{v}\left( u\right) &=&\left[ \operatorname{co}h\right] \left( u+v\left( u\right)
\right) -\langle \bar{z}_{\mathcal{V^{\prime}}},v\left( u\right) \rangle
\notag \\
&=&h\left( u+v\left( u\right) \right) -\langle \bar{z}_{\mathcal{V^{\prime}}
},v\left( u\right) \rangle =\operatorname{co}k_{v}\left( u\right) , \label{neqn:19}
\end{eqnarray}
\begin{equation}
\text{so\quad }h\left( u+v\left( u\right) \right) =\left[ \operatorname{co}h\right]
\left( u+v\left( u\right) \right) . \label{neqn:36}
\end{equation}
\end{proposition}
\begin{proof}
By (\ref{neqn:16})\ we have
\begin{equation}
h\left( u^{\prime }+v^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}}+
\bar{z}_{\mathcal{V^{\prime}}},u^{\prime }+v^{\prime }\rangle \geq h\left(
u+v\left( u\right) \right) -\langle z_{\mathcal{U^{\prime}}}+ \bar{z}_{\mathcal{V^{\prime}}},u+v\left( u\right) \rangle . \label{neqn:17}
\end{equation}
So $z_{\mathcal{U^{\prime}}}+\bar{z}_{\mathcal{V^{\prime}}} \in \partial _{\operatorname{co}}h\left( u+v\left( u\right) \right) \neq \emptyset $ and by Lemma \ref{lem:conv} we have $\operatorname{co} h\left( u+v\left( u\right) \right) =h\left(
u+v\left( u\right) \right) $. Hence
\begin{eqnarray*}
h\left( u^{\prime }+v^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}}+
\bar{z}_{\mathcal{V^{\prime}}},u^{\prime }+v^{\prime }\rangle &\geq &\left[
\operatorname{co}h\right] \left( u^{\prime }+v^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}}+\bar{z}_{\mathcal{V^{\prime}}},u^{\prime }+ v^{\prime }\rangle
\\
&\geq &h\left( u+v\left( u\right) \right) -\langle z_{\mathcal{U^{\prime}}}+
\bar{z}_{\mathcal{V^{\prime}}},u+v\left( u\right) \rangle
\end{eqnarray*}
On placing $v^{\prime }=v\left( u^{\prime }\right) $ we have $h\left(
u+v\left( u\right) \right) =\left[ \operatorname{co}h\right] \left( u+v\left(
u\right) \right) $ when $u^{\prime }=u$ and otherwise
\begin{equation*}
k_{v}\left( u^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime
}+v\left( u^{\prime }\right) \rangle \geq k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}} },u+v\left( u\right) \rangle
\end{equation*}
or by orthogonality we have for all $u^{\prime }\in \mathcal{U^{\prime}}$
that
\begin{equation*}
k_{v}\left( u^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime
}\rangle \geq k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}}
},u\rangle .
\end{equation*}
Hence $-k_{v}^{\ast }\left( z_{\mathcal{U^{\prime}}}\right) \geq k_{v}\left(
u\right) -\langle z_{\mathcal{U^{\prime}}},u\rangle $ implying $\langle z_{\mathcal{U^{\prime}} },u\rangle \geq k_{v}\left( u\right) +k_{v}^{\ast
}\left( z_{\mathcal{U^{\prime}} }\right) .$ The reverse inequality is supplied by the Fenchel inequality which gives the
result $z_{\mathcal{U^{\prime}}}\in \partial _{\operatorname{co}}k_{v}\left(
u\right) =\partial \operatorname{co}k_{v}\left( u\right) $ and $k_{v}\left( u\right)
=\operatorname{co}k_{v}\left( u\right) $ follows from Lemma \ref{lem:conv}.
Moreover we have from (\ref{neqn:17}) that
\begin{eqnarray*}
h\left( u^{\prime }+v^{\prime }\right) &-&\langle z_{\mathcal{U^{\prime}}}+
\bar{z}_{\mathcal{V^{\prime}}},u^{\prime }+v^{\prime }\rangle \geq h\left(
u+v\left( u\right) \right) -\langle z_{\mathcal{U^{\prime}}}+ \bar{z}_{\mathcal{V^{\prime}}},u+v\left( u\right) \rangle \\
&=&\left[ \operatorname{co}h\right] \left( u+v\left( u\right) \right) - \langle z_{\mathcal{U^{\prime}}}+\bar{z}_{\mathcal{V^{\prime}}}, u+v\left( u\right)
\rangle \\
&\geq &\operatorname{co}k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}}
},u\rangle
\end{eqnarray*}
and hence (using orthogonality)
\begin{eqnarray*}
\left[ \operatorname{co}h\right] \left( u^{\prime }+v^{\prime }\right) &-& \langle
z_{\mathcal{U^{\prime}}}+\bar{z}_{\mathcal{V^{\prime}}},u^{\prime
}+v^{\prime }\rangle \geq k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}}},u\rangle \\
&=&\left\{ \left[ \operatorname{co}h\right] \left( u+v\left( u\right) \right)
-\langle \bar{z}_{\mathcal{V^{\prime}}},v\left( u\right) \rangle \right\}
-\langle z_{\mathcal{U^{\prime}}},u\rangle \\
&\geq &\operatorname{co}k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}}
},u\rangle .
\end{eqnarray*}
On placing $v^{\prime }=v\left( u^{\prime }\right) $ we have
\begin{eqnarray*}
\left[ \operatorname{co}h\right] \left( u^{\prime }+v\left( u^{\prime }\right)
\right) &-&\langle z_{\mathcal{U^{\prime}}}+\bar{z}_{\mathcal{V^{\prime}}
},u^{\prime }+v\left( u^{\prime }\right) \rangle \\
&\geq &\left[ \operatorname{co}h\right] \left( u +v\left( u
\right) \right) -\langle \bar{z}_{\mathcal{V^{\prime}}},v\left( u
\right) \rangle -\langle z_{\mathcal{U^{\prime}}
},u\rangle \\
&\geq &k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}}},u\rangle \geq
\operatorname{co} k_{v}\left( u\right) -\langle z_{\mathcal{U^{\prime}}},u\rangle .
\end{eqnarray*}
and $u^{\prime }=u$ and using the identities $k_{v}\left( u\right) =\operatorname{co}
k_{v}\left( u\right) $ and $\left[ \operatorname{co}h\right] \left( u+v\left(
u\right) \right) =h\left( u+v\left( u\right) \right) $ for $u\in \mathcal{U^{\prime}}$ we have (\ref{neqn:19}).
\end{proof}
\subsection{\protect
Subhessians and the localised $\mathcal{U}^{\prime }$-Lagrangian}
Now that we have some theory of the localised $\mathcal{U}^{\prime }$-Lagrangian we may study its interaction with the notion of subhessian. As
we will be applying these results locally around a tilt stable local minimum
we are going to focus on the case when we have $L_{\mathcal{U}}^{\varepsilon }\left( u\right) =\inf_{v\in \mathcal{V}}\left\{ \operatorname{co}h\left( u+v\right) -\langle \bar{z}_{\mathcal{V}},v\rangle \right\} $ and $\mathcal{U}^{\prime
} = \mathcal{U}$. The following is a small variant of \cite[Corollary
3.5]{Lem:1}.
\begin{lemma}
\label{lem:littleOh}Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty
} $ is quadratically minorised and is prox--regular at $\bar{x}\ $ for $\bar{z}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r.$ Suppose
in addition that $f-\langle \bar{z},\cdot \rangle $ possesses a tilt stable
local minimum at $\bar{x}$, where $\bar{z}\in \operatorname{rel}$-$\operatorname{int}\partial f(\bar{x})$, $\mathcal{U}^{\prime }=\mathcal{U}$, $v\left( u\right) \in \operatorname{argmin}_{v\in \mathcal{V}\cap B_{\varepsilon }\left( 0\right) }
\left[ f\left( \bar{x}+u+v\right) -\langle \bar{z}_{\mathcal{V}},v\rangle
\right] $ and $L_{\mathcal{U}}^{\varepsilon }\left( u\right) =\inf_{v\in
\mathcal{V}}\left\{ \operatorname{co}h\left( u+v\right) -\langle \bar{z}_{\mathcal{V}
},v\rangle \right\} $. Then we have $v\left( u\right) =o\left( \left\Vert
u\right\Vert \right) $ in the following sense:
\begin{equation*}
\forall \varepsilon^{\prime\prime} >0,\quad \exists \delta >0:\quad \left\Vert u\right\Vert
\leq \delta \quad \implies \quad \left\Vert v\left( u\right) \right\Vert
\leq \varepsilon^{\prime\prime} \left\Vert u\right\Vert .
\end{equation*}
\end{lemma}
\begin{proof}
As noted in Remark \ref{rem:lem} we have $\nabla L_{\mathcal{U}}^{\varepsilon }\left( 0\right) =\bar{z}_{\mathcal{U}}$ existing where $L_{\mathcal{U}}^{\varepsilon }\left( u\right) =\inf_{v\in \mathcal{V}}\left\{
\operatorname{co}h\left( u+v\right) -\langle \bar{z}_{\mathcal{V}},v\rangle \right\}
$ is a convex function finite locally around $u=0.$ Consequently we have
for $u\in B^{\mathcal{U}}_{\varepsilon }\left( 0\right) $ we have
\begin{equation*}
L_{\mathcal{U}}^{\varepsilon }\left( u\right) =L_{\mathcal{U}}^{\varepsilon
}\left( 0\right) +\langle \nabla L_{\mathcal{U}}^{\varepsilon }\left(
0\right) ,u\rangle +o\left( \left\Vert u\right\Vert \right) =f\left( \bar{x}
\right) +\langle \bar{z}_{\mathcal{U}},u\rangle +o\left( \left\Vert
u\right\Vert \right) .
\end{equation*}
Invoking (\ref{neqn:14}) in the proof of Lemma \ref{lem:sharp} we have an $\varepsilon ^{\prime }>0$ such that for $u\in B_{\varepsilon ^{\prime
}}\left( 0\right) \cap \mathcal{U}$
\begin{equation*}
f\left( \bar{x}+u+v\right) \geq f\left( \bar{x}\right) +\langle \bar{z}_{\mathcal{V}},v\rangle +\langle \bar{z}_{\mathcal{U}},u\rangle +\left(
\varepsilon ^{\prime }-\frac{r\left\Vert v\right\Vert }{2}\right) \left\Vert
v\right\Vert -\frac{r}{2}\left\Vert u\right\Vert ^{2}\quad \text{for all }
v\in \varepsilon ^{\prime }B_{1}\left( 0\right) \cap \mathcal{V}\text{.}
\end{equation*}
When we choose $v\in B_{\min \{\varepsilon ^{\prime },\varepsilon ^{\prime
}/r\}}\left( 0\right) $ we have
\begin{equation*}
f\left( \bar{x}+u+v\right) \geq f\left( \bar{x}\right) +\langle \bar{z}_{\mathcal{U}} + \bar{z}_{\mathcal{V}}, u + v\rangle +\frac{\varepsilon ^{\prime }}{2}\left\Vert v\right\Vert -\frac{r}{2}\left\Vert
u\right\Vert ^{2}.
\end{equation*}
As $v(\cdot )$ is Lipschitz continuous with $v(0)=0$ there exists $\delta'>0$ such that
$\|u \| < \delta'$ implies $v(u) \in B_{\min \{\varepsilon ^{\prime },\varepsilon ^{\prime
}/r\}}\left( 0\right) $. Then we have
\begin{eqnarray*}
f\left( \bar{x}\right) +\langle \bar{z}_{\mathcal{U} },u\rangle
+o\left( \left\Vert u\right\Vert \right) &=&L_{\mathcal{U }}^{\varepsilon }\left( u\right) =f\left( \bar{x}+u+v\left( u\right) \right)
-\langle \bar{z}_{\mathcal{V} },v\left( u\right) \rangle \\
&\geq &f\left( \bar{x}\right) +\langle \bar{z},u+v\left( u\right) \rangle
-\langle \bar{z}_{\mathcal{V} },v\left( u\right) \rangle +\frac{\varepsilon ^{\prime }}{2}\left\Vert v\left( u\right) \right\Vert -\frac{r}{2}\left\Vert u\right\Vert ^{2} \\
&=&f\left( \bar{x}\right) +\langle \bar{z}_{\mathcal{U} },u\rangle +
\frac{\varepsilon ^{\prime }}{2}\left\Vert v\left( u\right) \right\Vert -
\frac{r}{2}\left\Vert u\right\Vert ^{2}.
\end{eqnarray*}
Hence
\begin{equation*}
\frac{2}{\varepsilon ^{\prime }}\left[ o\left( \left\Vert u\right\Vert
\right) +\frac{r}{2}\left\Vert u\right\Vert ^{2}\right] \geq \left\Vert
v\left( u\right) \right\Vert
\end{equation*}
and given any $\varepsilon^{\prime\prime} >0$ we choose $\delta >0$ with $\delta \leq \delta'$
such that $\left\Vert
u\right\Vert \leq \delta $ implies $\frac{2}{\varepsilon ^{\prime }}\left[
\frac{o\left( \left\Vert u\right\Vert \right) }{\left\Vert u\right\Vert }+
\frac{r}{2}\left\Vert u\right\Vert \right] \leq \min \left\{ \varepsilon^{\prime\prime}
,\varepsilon ^{\prime }/r,\varepsilon ^{\prime }\right\} .$
\end{proof}
We may now further justify our definition of "fast track" at $\bar{x}.$ In \cite{Hare:2} and other works "fast tracks" are specified as a
subspace on which both $u\mapsto L_{\mathcal{U}}^{\varepsilon }\left(
u\right) $ and $u\mapsto v\left( u\right) $ are twice continuously
differentiable. In particular $L_{\mathcal{U}}^{\varepsilon }\left( \cdot
\right) $ admits a Taylor expansion.
\begin{proposition}
\label{prop:Lagsubjet}Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is quadratically minorised and is prox--regular at $\bar{x}\ $
for $\bar{z}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r.$
Suppose in addition that $f-\langle \bar{z},\cdot \rangle $ possesses a
tilt stable local minimum at $\bar{x}$, $\bar{z}+B_{\varepsilon }\left(
0\right) \cap \mathcal{V}\subseteq \partial f(\bar{x})$, $\mathcal{U}^{\prime }=\mathcal{U}$, $L_{\mathcal{U}}^{\varepsilon }\left( u\right)
=\inf_{v\in \mathcal{V}}\left\{ \operatorname{co}h\left( u+v\right) -\langle \bar{z}_{\mathcal{V}},v\rangle \right\} $ and $\left\{ v\left( u\right) \right\} =
\operatorname{argmin}_{v\in \mathcal{V}\cap B_{\varepsilon }\left( 0\right) }\left[
f\left( \bar{x}+u+v\right) -\langle \bar{z}_{\mathcal{V}},v\rangle \right] $ for $u \in B^{\mathcal{U}}_{\varepsilon} (0)$. Then for $\bar{z}_{\mathcal{U}}=\nabla L_{\mathcal{U}}^{\varepsilon
}\left( 0\right) $ we have
\begin{equation*}
\left( L_{\mathcal{U}}^{\varepsilon }\right) _{\_}^{\prime \prime }\left( 0,\bar{z}_{\mathcal{U}},w\right) =\left( \operatorname{co}h\right) _{\_}^{\prime
\prime }\left( 0+v\left( 0\right) ,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,w\right) =f_{\_}^{\prime \prime }\left( \bar{x},\left(
\bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,w\right) \quad \text{for
all }w\in \mathcal{U}\text{. }
\end{equation*}
\end{proposition}
\begin{proof}
Consider the second order quotient
\begin{eqnarray*}
&&\frac{2}{t^{2}}\left[ L_{\mathcal{U}}^{\varepsilon }\left( 0+tu\right) -L_{\mathcal{U}}^{\varepsilon }\left( 0\right) -t\langle \bar{z}_{\mathcal{U}},u\rangle \right] \\
&&\qquad =\frac{2}{t^{2}}\left[ f\left( \bar{x}+tu+v\left( tu\right) \right)
-f\left( \bar{x}\right) -\langle \bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},tu+v\left( tu\right) \rangle \right] \\
&&\qquad =\frac{2}{t^{2}}\left[ f\left( \bar{x}+t\left[ u+\frac{v\left(
tu\right) }{t}\right] \right) -f\left( \bar{x}\right) -t\langle \bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},u+\frac{v\left( tu\right) }{t}\rangle
\right] .
\end{eqnarray*}
Apply Lemma \ref{lem:littleOh}, for an arbitrary $\delta >0$ by taking $\varepsilon^{\prime\prime} =
\frac{\delta}{\|u\|}$ and obtaining the existence of $\gamma >0$ such that
for $\| t u\| \leq t[ \delta + \|w\|] \leq \gamma$ we have $\|v (t u ) \| \leq \varepsilon^{\prime\prime} \|t u \|$. This implies for all $\delta > 0$ that when $t[ \delta + \|w\|] \leq \gamma$ and $u \in B_{\delta} (w )$
we have $\frac{\| v\left( tu\right) \| }{t} \leq \delta$
and so $\frac{v\left( tu\right) }{t}\in B_{\delta }\left( 0\right) $. Thus for $t<\eta := \frac{\gamma}{\delta + \|w\|}$ we
have
\begin{eqnarray*}
&&\inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}}\frac{2}{t^{2}}
\left[ L_{\mathcal{U}}^{\varepsilon }\left( 0+tu\right) -L_{\mathcal{U}}^{\varepsilon }\left( 0\right) -t\langle \bar{z}_{\mathcal{U}},u\rangle
\right] \\
&&\qquad \geq \inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}
}\inf_{v\in B_{\delta }\left( 0\right) \cap \mathcal{V}}\frac{2}{t^{2}}\left[
f\left( \bar{x}+t\left[ u+v\right] \right) -f\left( \bar{x}\right) -t\langle
\bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},u+v\rangle \right] \\
&&\qquad \geq \inf_{h\in B_{\delta }\left( w,0\right) }\frac{2}{t^{2}}\left[
f\left( \bar{x}+th\right) -f\left( \bar{x}\right) -t\langle \bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},h\rangle \right]
\end{eqnarray*}
and so
\begin{eqnarray*}
&&\liminf_{t\downarrow 0}\inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}}\frac{2}{t^{2}}\left[ L_{\mathcal{U}}^{\varepsilon }\left( 0+tu\right)
-L_{\mathcal{U}}^{\varepsilon }\left( 0\right) -t\langle \bar{z}_{\mathcal{U}
},u\rangle \right] \\
&&\qquad \geq \liminf_{t\downarrow 0}\inf_{h\in B_{\delta }\left( w,0\right)
}\frac{2}{t^{2}}\left[ f\left( \bar{x}+th\right) -f\left( \bar{x}\right)
-t\langle \bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},h\rangle \right] .
\end{eqnarray*}
Taking the infimum over $\delta >0$ gives $\left( L_{\mathcal{U}}^{\varepsilon }\right) _{\_}^{\prime \prime }\left( 0,\bar{z}_{\mathcal{U}},w\right) \geq f_{\_}^{\prime \prime }\left( \bar{x},\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,w\right) =\left( \operatorname{co}
h\right) _{\_}^{\prime \prime }\left( 0+v\left( 0\right) ,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,w\right) $ (because $f\left( \bar{x}+\cdot \right) $ and $\operatorname{co}h\left( \cdot \right) $ agree locally).
Conversely consider
\begin{eqnarray*}
&&\inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}}\inf_{\ v\in
\mathcal{V\cap }B_{\delta }\left( 0\right) }\frac{2}{t^{2}}\left[ \operatorname{co}
h\left( 0+t\left[ u+v\right] \right) -\operatorname{co}h\left( 0\right) -t\langle
\bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},u+v\rangle \right] \\
&\geq &\inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}}\inf_{\ v\in
\mathcal{V}}\frac{2}{t^{2}}\left[ \operatorname{co}h\left( 0+\left[ tu+v\right]
\right) -\operatorname{co}h\left( 0\right) -\langle \bar{z}_{\mathcal{U}}+\bar{z}_{\mathcal{V}},tu+v\rangle \right] \\
&=&\inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}}\left[ \frac{2}{t^{2}}\left[ \inf_{v\in \mathcal{V}}\left\{ \operatorname{co}h\left( tu+v\right)
-\langle \bar{z}_{\mathcal{V}},v\rangle \right\} \right] -L_{\mathcal{U}}^{\varepsilon }\left( 0\right) -t\langle \bar{z}_{\mathcal{U}},u\rangle
\right] \\
&=&\inf_{u\in B_{\delta }\left( w\right) \cap \mathcal{U}}\frac{2}{t^{2}}\left[ L_{\mathcal{U}}^{\varepsilon }\left( 0+tu\right) -L_{\mathcal{U}}^{\varepsilon }\left( 0\right) -t\langle \bar{z}_{\mathcal{U}},u\rangle
\right]
\end{eqnarray*}
and on taking a limit infimum as $t\downarrow 0$ and then an infimum over $\delta >0$ gives $\left( \operatorname{co}h\right) _{\_}^{\prime \prime }\left(
0+v\left( 0\right) ,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,w\right) \geq \left( L_{\mathcal{U}}^{\varepsilon }\right)
_{\_}^{\prime \prime }\left( 0,\bar{z}_{\mathcal{U}},w\right) $ and thus
equality.
\end{proof}
Denote $\partial _{\mathcal{U^{\prime }}}^{2,-}\left( \operatorname{co}h\right)
\left( x,z\right) =P_{\mathcal{U^{\prime }}}^{T}\partial ^{2,-}\left( \operatorname{co}h\right) \left( x,z\right) P_{\mathcal{U^{\prime }}}$.
\begin{corollary}
Posit the assumption of Proposition \ref{prop:Lagsubjet}. Then we have
\begin{equation*}
\operatorname{dom}\left( L_{\mathcal{U}}^{\varepsilon }\right) _{\_}^{\prime \prime
}\left( 0,\bar{z}_{\mathcal{U}},\cdot \right) \ \subseteq \operatorname{dom}
f_{\_}^{\prime \prime }\left( \bar{x},\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,\cdot \right)
\end{equation*}
and $\partial _{\mathcal{U}}^{2,-}\left( \operatorname{co}h\right) \left( \bar{x},
\bar{z}\right) \subseteq \partial ^{2,-}L_{\mathcal{U}}^{\varepsilon }\left(
0,\bar{z}_{\mathcal{U}}\right) $.
\end{corollary}
\begin{proof}
As $\operatorname{dom}\left( \operatorname{co}h\right) \subseteq \mathcal{U}$ we have $\operatorname{dom}\left( L_{\mathcal{U}}^{\varepsilon }\right) _{\_}^{\prime \prime
}\left( 0,\bar{z}_{\mathcal{U}},\cdot \right) \subseteq \mathcal{U}$ and
hence by Proposition \ref{prop:Lagsubjet} we have $\operatorname{dom}\left( L_{\mathcal{U}}^{\varepsilon }\right) _{\_}^{\prime
\prime }\left( 0,\bar{z}_{\mathcal{U}},\cdot \right) \ \subseteq \operatorname{dom}f_{\_}^{\prime \prime }\left( \bar{x},\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,\cdot \right) =\operatorname{dom}\left( \operatorname{co}h\right)
_{\_}^{\prime \prime }\left( 0,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,\cdot \right) $. Now take $Q\in \partial ^{2,-}\left(
\operatorname{co}h\right) \left( \bar{x},\bar{z}\right) $ and so we have $\langle
Qu,u\rangle \leq \left( \operatorname{co}h\right) _{\_}^{\prime \prime }\left(
0,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,u\right) $ for
all $u\in \operatorname{dom}\left( \operatorname{co}h\right) _{\_}^{\prime \prime }\left(
0,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,\cdot \right) $. Hence for all $u\in \operatorname{dom}\left( L_{\mathcal{U}}^{\varepsilon }\right)
_{\_}^{\prime \prime }\left( 0,\bar{z}_{\mathcal{U}},\cdot \right) \subseteq
\operatorname{dom}\left( \operatorname{co}h\right) _{\_}^{\prime \prime }\left( 0,\left(
\bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,\cdot \right) \cap
\mathcal{U}$ we have
\begin{equation*}
\langle \left[ P_{\mathcal{U}}^{T}QP_{\mathcal{U}}\right] u,u\rangle
=\langle QP_{\mathcal{U}}u,P_{\mathcal{U}}u\rangle \leq \left( \operatorname{co}
h\right) _{\_}^{\prime \prime }\left( 0,\left( \bar{z}_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) ,u\right) =\left( L_{\mathcal{U}}^{\varepsilon
}\right) _{\_}^{\prime \prime }\left( 0,\bar{z}_{\mathcal{U}},u\right)
\end{equation*}
and so $P_{\mathcal{U}}^{T}QP_{\mathcal{U}}\in \partial ^{2,-}L_{\mathcal{U}}^{\varepsilon }\left( 0,\bar{z}_{\mathcal{U}}\right) .$ That is $\partial _{\mathcal{U}}^{2,-}\left( \operatorname{co}h\right) \left( \bar{x},\bar{z}\right) =P_{\mathcal{U}}^{T}\partial ^{2,-}\left( \operatorname{co}h\right) \left( \bar{x},\bar{z}\right) P_{\mathcal{U}}\subseteq \partial ^{2,-}L_{\mathcal{U}}^{\varepsilon }\left( 0,\bar{z}_{\mathcal{U}}\right) .$
\end{proof}
If we assume more (which is very similar to the "Partial Smoothness" of \cite{Lewis:2}) we obtain the following which can be viewed as a less stringent
version of the second order expansions studied in \cite[Theorem 3.9]{Lem:1},
\cite[Equation (7)]{Mifflin:2004:2} and \cite[Theorem 2.6]{Miller:1}. This
result suggests that the role of assumptions like that of Proposition \ref{prop:reg} part \ref{part:4}, which are also a consequence of the definition of
partial smoothness (via the continuity of the $w\mapsto \partial f(w)$ at $x$
relative to $\mathcal{M}$) could be to build a bridge to the identity $\mathcal{U}=\mathcal{U}^{2}$ (see the discussion in Remark \ref{rem:fasttrack} below).
\begin{corollary}
\label{cor:Lagsubjet}Posit the assumption of Proposition \ref{prop:Lagsubjet}
and assume the assumption of Proposition \ref{prop:reg} part \ref{part:4}
i.e. suppose we have $\varepsilon >0$ such that for
all $z_{\mathcal{V}}\in B_{\varepsilon }\left( \bar{z}_{\mathcal{V}}\right)
\cap \mathcal{V}\subseteq \partial _{\mathcal{V}}f\left( \bar{x}\right) $
there is a common
\begin{equation}
v\left( u\right) \in \operatorname{argmin}_{v\in \mathcal{V}\cap B_{\varepsilon
}\left( 0\right) }\left\{ f\left( \bar{x}+u+v\right) -\langle z_{\mathcal{V}},v\rangle \right\}
\cap \operatorname{int} B_{\varepsilon} (0) \label{eqn:3}
\end{equation}
for all $u\in B^{\mathcal{U}}_{\varepsilon }\left( 0\right) $. In addition suppose there
exists $\varepsilon >0$ such that for all $u\in B^{\mathcal{U}}_{\varepsilon }\left(
0\right) $ we have $u \mapsto \nabla L_{\mathcal{U}}^{\varepsilon
}\left( u\right) :=z_{\mathcal{U}}(u)$ existing, and is a continuous function. Then
\begin{equation*}
\left( L_{\mathcal{U}}^{\varepsilon }\right) _{\_}^{\prime \prime }\left(
u,z_{\mathcal{U}},w\right) =\left( \operatorname{co}h\right) _{\_}^{\prime \prime
}\left( u+v\left( u\right) ,\left( z_{\mathcal{U}},\bar{z}_{\mathcal{V}
}\right) ,w\right) \quad \text{for all }w\in \mathcal{U}\text{.}
\end{equation*}
\end{corollary}
\begin{proof}
All the assumption of Proposition \ref{prop:Lagsubjet} are local in nature
except for the assumption that $\bar{z}+B_{\varepsilon }\left( 0\right) \cap
\mathcal{V}\subseteq \partial f(\bar{x}).$ Discounting this assumption for
now we note that we can perturb $\bar{x}$ (to $\bar{x}+u+v\left( u\right) $)
and $\bar{z}$ (to $\left( z_{\mathcal{U}},\bar{z}_{\mathcal{V}}\right) \in
\partial \left( \operatorname{co}h\right) \left( u+v\left( u\right) \right) $)
within a sufficiently small neighbourhood $u\in B^{\mathcal{U}}_{\varepsilon }\left(
0\right)$ and still have the assumption of prox-regularity,
tilt stability (around a different minimizer of our tilted function) and still use the same selection function $v\left( \cdot
\right) $. Regarding this outstanding assumption the optimality conditions
associated with (\ref{eqn:3}) imply
$\bar{z}_{\mathcal{V}} +B_{\varepsilon }\left(
0\right) \cap \mathcal{V}\subseteq \partial_{\mathcal{V}} f(\bar{x}+u+v\left( u\right) )$.
As we have $\nabla L_{\mathcal{U}}^{\varepsilon
}\left( u\right) =z_{\mathcal{U}}(u)$ existing from (\ref{eqn:2}) that
$ \partial \operatorname{co}h ( u+v\left( u\right) ) - ({z}_{\mathcal{U}}(u) , \bar{z}_{\mathcal{V}})
\subseteq \{0\} \oplus \mathcal{V}$ and so
$ \partial \operatorname{co}h ( u+v\left( u\right) ) = \left\{ {z}_{\mathcal{U}}(u)\right\}
\oplus \partial _{\mathcal{V}}\operatorname{co}h( u+v\left( u\right) ) = \left\{ {z}_{\mathcal{U}} (u)\right\}
\oplus \partial _{\mathcal{V}}f\left( \bar{x} + u+v\left( u\right) \right) . $ Hence
\[
({z}_{\mathcal{U}} (u), \bar{z}_{\mathcal{V}}) + \{0\} \oplus B^{\mathcal{V}}_{\varepsilon }\left(
0\right) \subseteq \partial \operatorname{co}h ( u+v\left( u\right) )
= \partial f( \bar{x} + u+v\left( u\right) ).
\]
This furnishes the final assumption that is required to invoke Proposition
\ref{prop:Lagsubjet} at points near to $\left( \bar{x},\bar{z}\right) $.
\end{proof}
\begin{remark}
\label{rem:fasttrack}We omit the details here, as they are not central to this
paper, but one may invoke \cite[Corollary 3.3]{eberhard:6} to deduce from
Corollary \ref{cor:Lagsubjet} that
\begin{equation*}
q\left( \underline{\partial }^{2}L_{\mathcal{U}}^{\varepsilon }\left( 0,\bar{z}_{\mathcal{U}}\right) \right) \left( w\right) =q\left( \underline{\partial
}^{2}\left( \operatorname{co}h|_{\mathcal{U}}\right) \left( 0,\bar{z}\right) \right)
\left( w\right) \quad \text{for all }w\in \mathcal{U}\text{.}
\end{equation*}
It follows that $\mathcal{U}\supseteq \operatorname{dom}q\left( \underline{\partial }
^{2}L_{\mathcal{U}}^{\varepsilon }\left( 0,\bar{z}_{\mathcal{U}}\right)
\right) \left( \cdot \right) \ =\operatorname{dom}q(\underline{\partial }^{2}\left(
\operatorname{co}h|_{\mathcal{U}}\right) (0,\bar{z}))\left( \cdot \right) \cap
\mathcal{U}=\mathcal{U}^{2}\cap \mathcal{U}$. Then the imposition of the
equality $\mathcal{U}=\mathcal{U}^{2}\mathcal{\ }$(our definition of a fast
track) implies $\underline{\partial }^{2}L_{\mathcal{U}}^{\varepsilon
}\left( 0,\bar{z}_{\mathcal{U}}\right) =\underline{\partial }^{2}\left(
\operatorname{co}h|_{\mathcal{U}}\right) (0,\bar{z})=\underline{\partial }^{2}\left(
f|_{\mathcal{U}}\right) (\bar{x},\bar{z}).$ It would be enlightening to have
a result that establishes this identity without a-priori assuming $\mathcal{U}=\mathcal{U}^{2}$ but having this as a consequence.
\end{remark}
Recall that for a convex function $\left( \operatorname{co}h\right) ^{\ast }$ its
subjet is nonempty at every point at which it is subdifferentiable. The following result
will be required later in the paper.
\begin{lemma}
Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a proper lower
semi-continuous function possessing a
tilt stable local minimum at $\bar{x}$. Suppose in addition that $\bar{z}=0\in \operatorname{rel}$-$\operatorname{int}\partial f\left( \bar{x}\right) $, $\mathcal{U^{\prime }\subseteq U}$, $$v\left( u\right) \in \operatorname{argmin}_{v^{\prime }\in \mathcal{V^{\prime }}\cap B_{\varepsilon }\left( 0\right) }f\left( \bar{x}+u+v^{\prime }\right) $$
and $u:=P_{\mathcal{U^{\prime }}}\left[ m_f \left( z_{\mathcal{U^{\prime }}}+
\bar{z}_{\mathcal{V^{\prime }}}\right) \right] .$
\begin{enumerate}
\item Then $z_{\mathcal{U^{\prime }}}+0_{\mathcal{V}}\in \partial \left(
\operatorname{co}h\right) \left( u+v\left( u\right) \right) =\partial _{\operatorname{co}}h\left( u+v\left( u\right) \right) $ and consequently $z_{\mathcal{U^{\prime }}}\in \partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left(
u\right) =\partial k_{v}\left( u\right) .$
\item Suppose $\left( \left( u+v\left( u\right) \right) ,Q\right) \in
\partial ^{2,-}\left( \operatorname{co}h\right) ^{\ast }\left( z_{\mathcal{U^{\prime
}}}+0_{\mathcal{V^{\prime }}}\right) $ then $P_{\mathcal{U^{\prime }}}^{T}QP_{\mathcal{U^{\prime }}}\in \partial ^{2,-}\left( \operatorname{co}h\right)
^{\ast }|_{\mathcal{U^{\prime }}}\left( z_{\mathcal{U^{\prime }}}\right)
=\partial ^{2,-}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) .$
Consequently when $\nabla ^{2}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}
}\right) $ exists
\begin{equation}
\partial _{\mathcal{U^{\prime }}}^{2,-}\left( \operatorname{co}h\right) ^{\ast
}\left( u+v\left( u\right) ,z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }
}}\right) :=P_{\mathcal{U^{\prime }}}^{T}\partial ^{2,-}\left( \operatorname{co}
h\right) ^{\ast }\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}
}\right) P_{\mathcal{U^{\prime }}}=\nabla ^{2}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) -\mathcal{P}(\mathcal{U}^{\prime}). \label{neqn:13}
\end{equation}
\end{enumerate}
\end{lemma}
\begin{proof}
Note that as $\bar{z}=0$ by (\ref{neqn:33}) we have $k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) =h^{\ast }\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) =(\operatorname{co}h)^{\ast }\left(
z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) $. Invoking Proposition \ref{cor:conv} we have $k_{v}\left( u\right) :=h\left( u+v\left( u\right) \right) =\operatorname{co}h\left(
u+v\left( u\right) \right) $, and by Lemma \ref{lem:27} we then have
\begin{equation*}
k_{v}\left( u\right) +k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right)
=\langle z_{\mathcal{U^{\prime }}},u\rangle =\operatorname{co}h\left( u+v\left(
u\right) \right) +h^{\ast }\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right)
\end{equation*}
and hence $z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\in \partial
(\operatorname{co} h)\left( u+v\left( u\right) \right) =\partial _{\operatorname{co}}h\left(
u+v\left( u\right) \right) $. Taking into account Remark \ref{rem:lem} we
have for all $u\in \mathcal{U^{\prime }\cap B}_{\varepsilon }\left( 0\right)
$ that
\begin{equation*}
z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\in \partial \operatorname{co}
h\left( u+v\left( u\right) \right) \quad \iff \quad z_{\mathcal{U^{\prime }}}\in \partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right)
=\partial k_{v}\left( u\right) .
\end{equation*}
For the second part we have from the definition of $\left( \left( u+v\left(
u\right) \right) ,Q\right) \in \partial ^{2,-}\left( \operatorname{co}h\right)
^{\ast }\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) $
locally around $z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}$ that
\begin{eqnarray*}
\left( \operatorname{co}h\right) ^{\ast }\left( y\right) &\geq &\left( \operatorname{co}
h\right) ^{\ast }\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) +\langle u+v\left( u\right) ,z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\rangle \\
&&\qquad +\frac{1}{2}\langle Q\left( y-\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) \right) ,\left( y-\left( z_{\mathcal{U^{\prime
}}}+0_{\mathcal{V^{\prime }}}\right) \right) \rangle +o\left( \left\Vert
y-\left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right)
\right\Vert ^{2}\right) .
\end{eqnarray*}
Restricting to $\mathcal{U^{\prime }}$ we have the following locally around $z_{\mathcal{U^{\prime }}}$
\begin{eqnarray*}
\left( \operatorname{co}h\right) ^{\ast }|_{\mathcal{U^{\prime }}}\left( y_{\mathcal{U^{\prime }}}\right) &\geq &\left( \operatorname{co}h\right) ^{\ast }|_{\mathcal{U^{\prime }}}\left( z_{\mathcal{U^{\prime }}}\right) +\langle u,z_{\mathcal{U^{\prime }}}\rangle \\
&&\qquad +\frac{1}{2}\langle QP_{\mathcal{U^{\prime }}}\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) ,P_{\mathcal{U^{\prime }}
}\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) \rangle
+o\left( \left\Vert P_{\mathcal{U^{\prime }}}\left( y_{\mathcal{U^{\prime }}
}-z_{\mathcal{U^{\prime }}}\right) \right\Vert ^{2}\right) \\
&=&\left( \operatorname{co}h\right) ^{\ast }|_{\mathcal{U^{\prime }}}\left( z_{\mathcal{U^{\prime }}}\right) +\langle u,z_{\mathcal{U^{\prime }}}\rangle \\
&&\qquad +\frac{1}{2}\langle \left( P_{\mathcal{U^{\prime }}}^{T}QP_{\mathcal{U^{\prime }}}\right) \left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) ,\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime
}}}\right) \rangle +o\left( \left\Vert y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right\Vert ^{2}\right)
\end{eqnarray*}
and by (\ref{neqn:33}) we have $k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}
}\right) =\left( \operatorname{co}h\right) ^{\ast }\left( z_{\mathcal{U^{\prime }}
}+0_{\mathcal{V^{\prime }}}\right) =\left( \operatorname{co}h\right) ^{\ast }|_{\mathcal{U^{\prime }}}\left( z_{\mathcal{U^{\prime }}}^{\prime }\right) .$
Hence
$$
P_{\mathcal{U^{\prime }}}^{T}QP_{\mathcal{U^{\prime }}}\in \partial
^{2,-}\left( \operatorname{co}h\right) ^{\ast }|_{\mathcal{U^{\prime }}}\left( z_{\mathcal{U^{\prime }}}\right) =\partial ^{2,-}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) .
$$
When $\nabla ^{2}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) $ exists we have $\partial ^{2,-}k_{v}^{\ast}\left( z_{\mathcal{U^{\prime }}}\right) =\nabla ^{2}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) -\mathcal{P}(\mathcal{U})$ giving (\ref{neqn:13}).
\end{proof}
In the following we shall at times use the alternate notation $z_{\mathcal{U^{\prime }}}+z_{\mathcal{V^{\prime }}}=(z_{\mathcal{U^{\prime }}},z_{\mathcal{V^{\prime }}})$ to contain the notational burden of the former. The
proof of the next proposition follows a similar line of argument as in \cite[Proposition 3.1]{ebpenot:2}.
\begin{proposition}
\label{prop:subhessianinverse}Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a proper lower semi-continuous function and suppose that $\bar{x}$ is a tilt stable local minimum of $f$. In addition suppose $\bar{z}=0\in \operatorname{rel}$-$\operatorname{int}\partial f\left( \bar{x}\right) $, $\mathcal{U^{\prime }\subseteq U}$ and $v\left( u\right) \in \operatorname{argmin}_{v^{\prime
}\in \mathcal{V^{\prime }}\cap B_{\varepsilon }\left( 0\right) }f\left( \bar{x}+u+v^{\prime }\right) $ with $u:=P_{\mathcal{U^{\prime }}}\left[ m\left(
z_{\mathcal{U^{\prime }}}+\bar{z}_{\mathcal{V^{\prime }}}\right) \right] $
or $z_{\mathcal{U^{\prime }}}\in \partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right) =\partial k_{v}\left( u\right) $. Suppose $k_{v}^{\ast }:\mathcal{U^{\prime }}\rightarrow \mathbb{R}_{\infty }$ is a $C^{1,1}\left( B_{\varepsilon }\left( 0\right) \right) $ function for some $\varepsilon >0$ with $\nabla ^{2}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) $ existing as a positive definite form. Then for $u:=\nabla
k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) $ we have
\begin{equation*}
Q=\nabla ^{2}k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) \quad
\implies \text{\quad }Q^{-1}\in \partial _{\mathcal{U^{\prime }}}^{2,-}(\operatorname{co}h)\left( u+v\left( u\right) ,z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) .
\end{equation*}
\end{proposition}
\begin{proof}
When $u:=P_{\mathcal{U^{\prime }}}\left[ m\left( z_{\mathcal{U^{\prime }}}+
\bar{z}_{\mathcal{V^{\prime }}}\right) \right] $ or $z_{\mathcal{U^{\prime }}
}\in \partial L_{\mathcal{U^{\prime }}}^{\varepsilon }\left( u\right)
=\partial k_{v}\left( u\right) $ (see Proposition \ref{prop:LU}) we have $z_{\mathcal{U^{\prime }}}+0_{\mathcal{V}^{\prime}}\in \partial \left( \operatorname{co}h\right) \left( u+v\left( u\right)
\right) $. We show that $\left(
\begin{array}{cc}
Q^{-1} & 0 \\
0 & 0
\end{array}
\right) $ is a subhessian of $\operatorname{co}h$ at $(u,v(u))$ for $z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\in \partial (\operatorname{co}h)\left(
u+v\left( u\right) \right) $ and hence deduce that (by definition) $Q^{-1}\in \partial _{\mathcal{U^{\prime }}}^{2,-}\left( \operatorname{co}h\right)
\left( u+v\left( u\right) ,z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) $. Expanding $k_{v}^{\ast }$ via a second order Taylor expansion
around $z_{\mathcal{U^{\prime }}}$ we have for all $y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\in B_{\varepsilon }\left( 0\right) $ a function $\delta \left( \varepsilon \right) \rightarrow 0$ as $\varepsilon \rightarrow
0$ with
\begin{eqnarray*}
\left( \operatorname{co}h\right) ^{\ast }\left( y_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) &=&k_{v}^{\ast }\left( y_{\mathcal{U^{\prime }}
}\right) \\
&=&k_{v}^{\ast }\left( z_{\mathcal{U^{\prime }}}\right) +\langle y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}},u\rangle +\frac{1}{2}\langle Q\left(
y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) ,\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) \rangle +o\left(
\left\Vert \left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right)
\right\Vert ^{2}\right) \\
&\leq &\left( \operatorname{co}h\right) ^{\ast }\left( \left( z_{\mathcal{U^{\prime }
}},0_{\mathcal{V^{\prime }}}\right) \right) +\langle y_{\mathcal{U^{\prime }}
}-z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}},u+v\left( u\right)
\rangle \\
&&\qquad +\frac{1}{2}\langle \left(
\begin{array}{cc}
Q & 0 \\
0 & 0
\end{array}
\right) \left( \left( y_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}
}\right) -\left( z_{\mathcal{U^{\prime }}},0\right) \right) ,\left( \left(
y_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}}\right) -\left( z_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}}\right) \right) \rangle \\
&&\qquad \qquad \qquad \qquad \qquad +\frac{1}{2} \delta \left( \varepsilon \right)
\left\Vert \left( \left( y_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}
}\right) -\left( z_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}}\right)
\right) \right\Vert ^{2}
\end{eqnarray*}
Then as $\operatorname{co}h\left( u+v\left( u\right) \right) =\langle z_{\mathcal{U^{\prime }}},u\rangle -\left( \operatorname{co}h\right) ^{\ast }\left( \left( z_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}}\right) \right) $ and $\langle Q\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right)
,\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) \rangle
\geq \alpha \left\Vert y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}
}\right\Vert ^{2}$ we have
\begin{eqnarray*}
\operatorname{co}h\left( u^{\prime }+v\left( u^{\prime }\right) \right)
&=&\sup_{\left( y_{\mathcal{U^{\prime }}},y_{\mathcal{V^{\prime }}}\right)
}\left\{ \langle \left( y_{\mathcal{U^{\prime }}},y_{\mathcal{V^{\prime }}
}\right) ,\left( u^{\prime },v\left( u^{\prime }\right) \right) \rangle
-\left( \operatorname{co}h\right) ^{\ast }\left( y_{\mathcal{U^{\prime }}}+y_{\mathcal{V^{\prime }}}\right) \right\} \\
&\geq &\sup_{y_{\mathcal{U^{\prime }}}}\left\{ \langle \left( y_{\mathcal{U^{\prime }}},0_{\mathcal{V^{\prime }}}\right) ,\left( u^{\prime },v\left(
u^{\prime }\right) \right) \rangle -\left( \operatorname{co}h\right) ^{\ast }\left(
y_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) \right\} \\
&\geq &\operatorname{co}h\left( u+v\left( u\right) \right) +\langle z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle +\sup_{y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\in B_{\varepsilon }\left( 0\right) }\left\{ \langle
y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}},u^{\prime }-u\rangle
\right. \\
&&\qquad \qquad \left. -\frac{1}{2}\left( 1+\alpha ^{-1}\delta \left(
\varepsilon \right) \right) \langle Q\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) ,\left( y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}\right) \rangle \right\}
\end{eqnarray*}
and when $u^{\prime }-u\in \left( 1+\alpha ^{-1}\delta \left( \varepsilon
\right) \right) \varepsilon \left\Vert Q^{-1}\right\Vert ^{-1}B_{1}\left(
0\right) $ we have the supremum attained at
\begin{equation*}
y_{\mathcal{U^{\prime }}}-z_{\mathcal{U^{\prime }}}=\left( 1+\alpha
^{-1}\delta \left( \varepsilon \right) \right) ^{-1}Q^{-1}\left( u^{\prime
}-u\right) \in B_{\varepsilon }\left( 0\right) .
\end{equation*}
Hence when $\left( u^{\prime },v^{\prime }\right) \in B_{\gamma \left(
\varepsilon \right) }\left( 0\right) $, for $\gamma \left( \varepsilon
\right) :=\left( 1+\alpha ^{-1}\delta \left( \varepsilon \right) \right)
\varepsilon \left\Vert Q^{-1}\right\Vert ^{-1}$, we have
\begin{eqnarray*}
\operatorname{co}h\left( u^{\prime }+v^{\prime }\right) &\geq &\operatorname{co}h\left(
u^{\prime }+v\left( u^{\prime }\right) \right) \geq \operatorname{co}h\left(
u+v\left( u\right) \right) +\langle \left( z_{\mathcal{U^{\prime }}}+0_{\mathcal{V^{\prime }}}\right) ,\left( u^{\prime },v^{\prime }\right) -\left(
u,v\left( u\right) \right) \rangle \\
&&\qquad \qquad +\frac{1}{2}\langle \left(
\begin{array}{cc}
Q^{-1} & 0 \\
0 & 0
\end{array}
\right) \left( u^{\prime },v^{\prime }\right) -\left( u,v\left( u\right)
\right) ,\left( u^{\prime },v^{\prime }\right) -\left( u,v\left( u\right)
\right) \rangle \\
&&\qquad \qquad \qquad \qquad -\beta \left( \varepsilon \right) \left\Vert
\left( u^{\prime },v^{\prime }\right) -\left( u,v\left( u\right) \right)
\right\Vert ^{2}
\end{eqnarray*}
where $\beta \left( \varepsilon \right) =\left[ 1-\left( 1+\alpha
^{-1}\delta \left( \varepsilon \right) \right) ^{-1}\right] \left\Vert
Q^{-1}\right\Vert \rightarrow 0$ as $\varepsilon \rightarrow 0$. That is
\begin{equation*}
\left(
\begin{array}{cc}
Q^{-1} & 0 \\
0 & 0
\end{array}
\right) \in \partial ^{2,-} (\operatorname{co} h)\left( u+v\left( u\right) ,z_{\mathcal{U^{\prime
}}}+0_{\mathcal{V^{\prime }}}\right) ,
\end{equation*}
from which the result follows.
\end{proof}
\section{The Main Result}
The main tools we use to establish our results are the convexification that
tilt stable local minimum enable us to utilise \cite{Drusvy:1}, the
correspondence between tilt stability and the strong metric regularity of
the locally restricted inverse of the subdifferential and the connection
conjugacy has to inversion of subdifferentials of convex functions \cite{Drusvy:1, Art:1}. These tools and the coderivative
characterisation (\ref{neqn:2}) of tilt stability (being applicable to
convex functions) allows a chain of implications to be forged. The
differentiability properties we seek may be deduced via strong metric
regularity or alternatively via the results of \cite{Miroslav:1} after
invoking the Mordukhovich coderivative criteria for the Aubin property for
the associated subdifferential.
Once again we will
consider subspaces $\mathcal{U^{\prime}} \subseteq \mathcal{U}$.
We now show that tilt stability is inherited by $k_{v}$.
\begin{proposition}
\label{prop:tilt1}Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }
$ is a proper lower semi-continuous function and $v\left( u\right) \in \operatorname{argmin}_{v^{\prime }\in \mathcal{V}\cap B_{\varepsilon }\left( 0\right)
}f\left( \bar{x}+u+v^{\prime }\right) .$ Suppose that $f$ has a tilt stable
local minimum at $\bar{x}$ for $0\in \partial f\left( \bar{x}\right) $ then $v\left( \cdot \right) :\mathcal{U^{\prime}}\rightarrow \mathcal{V^{\prime}}$
is uniquely defined and the associated function $k_{v}\left( \cdot \right) :
\mathcal{U^{\prime}} \rightarrow \mathbb{R}_{\infty }$ has a tilt stable
local minimum at $0$.
\end{proposition}
\begin{proof}
In this case we have $\left( \bar{z}_{\mathcal{U}^{\prime}}, \bar{z}_{\mathcal{V^{\prime}} }\right) =\left( 0,0\right) $. By tilt stability we have $m\left( \cdot \right) $ a single valued Lipschitz functions and hence $v\left( \cdot \right) $ is unique. From Proposition \ref{prop:LU} and $\left\{ u\right\} =P_{\mathcal{U^{\prime}}}\left[ m\left( z_{\mathcal{U^{\prime}}}\right) \right] $ we have $z_{\mathcal{U^{\prime}}}\in \partial
_{\operatorname{co}}\left[ L_{\mathcal{U^{\prime}}}^{\varepsilon }+\delta
_{B_{\varepsilon }^{\mathcal{U^{\prime}}}\left( 0\right) }\right] \left(
u\right) $ and from Propositions \ref{prop:m} and \ref{prop:co} that
\begin{eqnarray*}
\left\{ \left( u,v\left( u\right) \right) \right\} &=&m \left( z_{\mathcal{U^{\prime}} }+\bar{z}_{\mathcal{V^{\prime}}}\right) =\operatorname{argmin}_{\left(
u^{\prime },v^{\prime }\right) }\left\{ g\left( u^{\prime }+v^{\prime
}\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime }+v^{\prime }\rangle
\right\} \\
&=&\operatorname{argmin}_{\left( u^{\prime },v^{\prime }\right) }\left\{ h\left(
u^{\prime }+v^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime
}+v^{\prime }\rangle \right\} \\
\text{and so }\left\{ u\right\} &=&\operatorname{argmin}_{u^{\prime }\in \mathcal{U^{\prime}} }\left\{ \left[ h\left( u^{\prime }+v\left( u^{\prime }\right)
\right) -\langle 0,u^{\prime }+v\left( u^{\prime }\right) \rangle \right]
-\langle z_{\mathcal{U^{\prime}}},u^{\prime }\rangle \right\} \\
&=&\operatorname{argmin}_{u^{\prime }\in \mathcal{U^{\prime}}}\left\{ k_{v}\left(
u^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime }\rangle
\right\}
\end{eqnarray*}
implying $\left\{ u\right\} =P_{\mathcal{U^{\prime}}}\left[ m\left( z_{\mathcal{U^{\prime}} }+0\right) \right] \subseteq \operatorname{argmin}_{u^{\prime
}\in \mathcal{U^{\prime}}}\left\{ k_{v}\left( u^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime }\rangle \right\} =\left\{ u\right\} $.
Hence
\begin{equation*}
\operatorname{argmin}_{u^{\prime }\in \mathcal{U^{\prime}} }\left\{ k_{v}\left(
u^{\prime }\right) -\langle z_{\mathcal{U^{\prime}}},u^{\prime }\rangle
\right\} =P_{\mathcal{U^{\prime}} }\left[ m\left( z_{\mathcal{U^{\prime}}
}\right) \right]
\end{equation*}
is clearly a single valued, locally Lipschitz function of $z_{ \mathcal{U^{\prime}} }\in B_{\varepsilon }^{\mathcal{U^{\prime}}}\left( 0\right)
\subseteq \mathcal{U^{\prime}}$.
\end{proof}
\begin{remark}
Clearly Proposition \ref{prop:tilt1} implies $k_{v}\left( \cdot \right) :
\mathcal{U}^{2}\rightarrow \mathbb{R}_{\infty }$ has a tilt stable local
minimum at $0$ relative to $\mathcal{U}^{2}\subseteq \mathcal{U}$.
\end{remark}
The following will help connect the positive definiteness of the densely
defined Hessians of the convexification $h$ with the associated uniform
local strong convexity of $f$. This earlier results \cite[Theorem 24, Corollary 39]{eberhard:8} may be
compared with Theorem 3.3 of \cite{Drusvy:1} in that it links "stable strong
local minimizers of $f$ at $\bar{x}$" to tilt stability. We say $f_{z}:=f-\langle z,\cdot \rangle $ has a strict local minimum order two at $x^{\prime }$ relative to $B_{\delta }(\bar{x})\ni x^{\prime }$ when $f_{z}(x)\geq f_{z}\left( x^{\prime }\right) +\beta \left\Vert x-x^{\prime
}\right\Vert ^{2}$ for all $x\in B_{\delta }(\bar{x})$. It is a classical
fact that this is characterised by the condition $\left( f_{z}\right)
_{\_}^{\prime \prime }\left( x,0,h\right) >0$ for all $\left\Vert
h\right\Vert =1,$ see \cite[Theorem 2.2]{Studniarski:1986}.
The following
result gives conditions on $f$, in finite dimensions,
such that the coderivative in the second order sufficiency condition (\ref{neqn:2}) is uniformly bounded away from zero by a constant $\beta >0$.
Then indeed (\ref{neqn:2}) is equivalent to this strengthened condition. This follows from a uniform bound on the associated quadratic minorant associated with the
strong stable local minimum. This phenomena was also observed in \cite[Theorem 5.36]{BonnansShapiroBook}
in the case of infinite dimensions for a class of optimisation problems. As we already know this is
true for $C^{1,1}$ functions (see Corollary \ref{cor:equiv}) and as we know that application of the
infimal convolution to prox-regular functions produces a $C^{1,1}$ function, there is a clear path
to connect these results. Indeed this is the approach used in \cite{eberhard:8,eberhard:9}.
\begin{theorem} [\protect\cite{eberhard:8}, Theorem 34 part 1.]
\label{tilt:eb} \label{thm:main}Suppose $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is
lower--semicontinuous, prox--bounded (i.e. minorised by a quadratic) and $0\in \partial _{p}f(\bar{x})$.
Suppose in addition there exists $\delta >0$ and $\beta >0$ such that
for all $(x,z)\in B_{\delta }(\bar{x},0)\cap \operatorname{Graph}\,\partial _{p}f$
the function $f-\langle z,\cdot \rangle $ has a strict local minimum order
two at $x$ in the sense that there exists $\gamma >0$ (depending on $x,y$)
such that for each $x^{\prime }\in {B}_{\gamma }(x)$ we have
\begin{equation}
f(x^{\prime })-\langle z,x^{\prime }\rangle \geq f(x)-\langle z,x\rangle
+\beta \Vert x-x^{\prime }\Vert ^{2}. \label{neqn:130}
\end{equation}
Then we have for all $\Vert w\Vert=1$ and $0\neq p\in{D}^{\ast}(\partial
_{p}f)(\bar{x},0)(w)$ that $\langle w,p\rangle\geq\beta>0$.
\end{theorem}
\begin{corollary} \label{cor:strict}
Suppose $f:\mathbb{R}
^{n}\rightarrow \overline{\mathbb{R}}$ a is lower semi--continuous,
prox--bounded and $f$ is both prox--regular at $\bar{x}$ with respect to $0\in \partial _{p}f(\bar{x})$ and subdifferentially continuous there. Then the following are equivalent:
\begin{enumerate}
\item \label{equiv:1} For all $\Vert w\Vert=1$ and $p\in{D}^{\ast}(\partial
_{p}f)(\bar{x},0)(w)$ we have $\langle w,p\rangle>0$.
\item \label{equiv:2} There exists $\beta >0$ such that for all $\Vert w\Vert=1$ and $p\in{D}^{\ast}(\partial
_{p}f)(\bar{x},0)(w)$ we have $\langle w,p\rangle \geq \beta >0$.
\end{enumerate}
\end{corollary}
\begin{proof}
We only need show \ref{equiv:1} implies \ref{equiv:2}. By \cite[Theorem 1.3]{rock:7} we have \ref{equiv:1} implying a tilt stable local minimum at $\bar{x}$. Now apply \cite[Theorem 3.3]{Drusvy:1} to deduce the existence of a $\delta >0$ such that for all $(x,z)\in B_{\delta }(\bar{x},0)\cap \operatorname{Graph}
\,\partial _{p}f$ we have $x$ a strict local minimizer order two of
the function $f-\langle z,\cdot \rangle $ in the sense that (\ref{neqn:130}) holds for some uniform value $\beta >0$ for all $x^{\prime} \in B_{\gamma} (x)$. Now apply Theorem \ref{tilt:eb} to obtain \ref{equiv:2}.
\end{proof}
Another
condition equivalent to all of those in \cite[Theorem 1.3]{rock:7} is the
following
\begin{equation}
f_{s}^{\prime \prime }\left( x,z,u\right) >0\text{\quad for all }\left(
x,z\right) \in B_{\delta }(\bar{x},0)\cap \operatorname{Graph}\,\partial _{p}f,
\label{neqn:44}
\end{equation}
which is motivated by the classical observation that $f^{\prime \prime
}\left( x,z,u\right) >0$ implies $f-\langle z,\cdot \rangle $ has a strict
local minimum order 2 at $x$ (see \cite[Theorem 2.2]{Studniarski:1986}). We will show that a
stronger version gives an equivalent characterisation in Corollary \ref{cor:equiv} below. The following
construction is also standard. Denote
\begin{equation*}
\hat{D}^{\ast }\left( \partial _{p}f\right) (x,z)(w)=\{v\in \mathbb{R}^{n}\mid (v,-w)\in \hat{N}_{\operatorname{Graph}\,\partial _{p}f}(x,z)\},
\end{equation*}
where $\hat{N}_{\operatorname{Graph}\,\partial _{p}f}(x,z)=\left(
\limsup_{t\downarrow 0}\frac{\operatorname{Graph}\,\partial _{p}f-(x,p)}{t}\right)
^{\circ }$ is the contingent normal cone. Then we have $D^{\ast }\left(
\partial _{p}f\right) (\bar{x},0)(w)=g$-$\limsup_{\left( x,z\right)
\rightarrow _{S_{p}\left( f\right) }\left( \bar{x},0\right) }\hat{D}^{\ast
}\left( \partial _{p}f\right) (x,z)(w)$ (the graphical limit supremum \cite[page 327]{rock:6}).
\begin{corollary}\label{cor:equiv}
\label{cor:ebnonzero} Suppose $f:\mathbb{R}^{n}\rightarrow {\mathbb{R}_{\infty}}$ a is lower semi--continuous, prox--bounded and $f$ is both
prox--regular at $\bar{x}$ with respect to $0\in \partial _{p}f(\bar{x})$
and subdifferentially continuous there. Then the following are equivalent:
\begin{enumerate}
\item \label{ppart:1}For all $\Vert w\Vert =1$ and $p\in {D}^{\ast
}(\partial _{p}f)(\bar{x},0)(w)$ we have $\langle w,p\rangle >0$.
\item \label{ppart:2} There exists $\beta >0$ such that for all $\Vert w\Vert =1$ we have $f_{s}^{\prime \prime
}\left( x,z,w\right) \geq \beta >0$ for all $\left( x,z\right) \in B_{\delta }(\bar{x},0)\cap \operatorname{Graph}\,\partial _{p}f$, for some $\delta >0$.
\end{enumerate}
Moreover the $\beta$ in part \ref{ppart:2} may be taken as that in Corollary \ref{cor:strict} part \ref{equiv:2}.
\end{corollary}
\begin{proof}
(\ref{ppart:1}.$\implies $\ref{ppart:2}.) By Corollary \ref{cor:strict} we have \ref{ppart:1}
equivalent to condition \ref{equiv:2} of Corollary \ref{cor:strict} (for some fixed $\beta >0$).
Now define $G := f - \frac{\beta'}{2}\| \cdot - \bar{x} \|^2$ (for $0<\beta' < \beta$). Apply the sum rule for the limiting subgradient and that for the coderivatives \cite[Theorem 10.41]{rock:6} to deduce that $0 \in \partial G (\bar{x}) = \partial f(\bar{x})-\beta' \times 0$ and also
${D}^{\ast} (\partial G)(\bar{x},0) (w) \subseteq {D}^{\ast} (\partial f )(\bar{x},0) (w) -\beta' w$.
Then for any $v \in {D}^{\ast} (\partial G)(\bar{x},0) (w)$ we have
$\langle v, w \rangle = \langle p , w \rangle -\beta' \|w \|^2 >0$. Now apply Theorem 3.3 of \cite{Drusvy:1} to deduce there exists a strict local minimum order two for $G_z := G - \langle z , \cdot \rangle$ at each
$\left( x,z\right) \in B_{\delta }(\bar{x},0)\cap \operatorname{Graph}\,\partial G$ for some
$\delta >0$.
Noting that $\partial G$ and $\partial_p G$ locally coincide around $(\bar{x}, 0)$, after possibly reducing $\delta >0$, we apply (see \cite[Theorem 2.2]{Studniarski:1986}) to deduce that $(G_z)^{\prime\prime} (x,0,w)
= f^{\prime\prime} (x,z,w) - \beta' \|w\|^2 > 0$ for all $\beta '< \beta$. This implies \ref{ppart:2}.
(\ref{ppart:2}.$\implies $\ref{ppart:1}.) Let $ \left( x,z\right) \in B_{\delta }(\bar{x},0)\cap \operatorname{Graph}\,\partial _{p}f$. We use the fact that $f_{s}^{\prime
\prime }\left( x,z,w\right) > \beta' >0$ for all $0< \beta' < \beta$ and $\Vert w\Vert =1$ implies $$f_{z}:=f-\langle z,\cdot \rangle -\frac{\beta'}{2} \| \cdot - x \|^2$$ has $x$ as a strict local minimum
order 2 at $x$. We may now apply \cite[Theorem 67]{eberhard:9} to deduce that for all $y\in {\hat{D}}^{\ast}(\partial _{p}f_{z})(x,0)(w)$ we
have $\langle w,y\rangle \geq 0$. By direct calculation from definitions one may show that
${\hat{D}}^{\ast}(\partial _{p}f_{z})(x,0)(w)={\hat{D}}^{\ast }(\partial _{p}f)(x,z)(w) - \beta' w$
and hence $\langle p , w \rangle \geq \beta' \|w\|^2$ for all $p \in {\hat{D}}^{\ast }(\partial _{p}f)(x,z)(w)$.
Taking the graphical limit supremum \cite[identity 8(18)]{rock:6} of ${\hat{D}}^{\ast }(\partial _{p}f)(x,z)(\cdot )$
as $\left( x,z\right) \rightarrow _{S_{p}\left( f\right) }\left( \bar{x},0\right) $ gives \ref{ppart:1}.
\end{proof}
One of the properties that follows from \cite[Theorem 1.3]{rock:7} is that the Aubin Property (or pseudo-Lipschitz property) holds for the mapping $z \mapsto B_{\delta} (\bar{x}) \cap (\partial f)^{-1} (z)$. The Aubin property is related to differentiability via the following result.
\begin{theorem} [\protect\cite{Miroslav:1}, Theorem 5.3] \label{thm:eb} Suppose $H$ is a Hilbert space
and $f:H\mapsto \mathbb{R}_{\infty }$ is lower semi-continuous,
prox-regular, and subdifferentially continuous at $\bar{x}\in \operatorname{int}
\operatorname{dom}\partial f$ for some $\bar{v}\in \partial f(\bar{x})$. In
addition, suppose $\partial f$ is pseudo-Lipschitz (i.e. possess the Aubin
property) at a Lipschitz rate $L$ around $\bar{x}$ for $\bar{v}$. Then there
exists $\varepsilon >0$ such that $\partial f(x)=\{\nabla f(x)\}$ for all $x\in B_{\varepsilon }(\bar{x})$ with $x\mapsto \nabla f(x)$ Lipschitz at the
rate $L$.
\end{theorem}
\begin{corollary}
Under the assumption of Proposition \ref{prop:tilt1} we have $z\mapsto
\partial k_{v}^{\ast }(z)$ a single valued Lipschitz continuous mapping in
some neighbourhood of $0$.
\end{corollary}
\begin{proof}
We invoke Theorems \ref{tilt:eb} and \ref{thm:eb}. As $(\operatorname{co}
k_{v})^{\ast}= k_{v}^{\ast }$ and being a convex function it is prox-regular
and subdifferentially continuous so $(\partial _{p}\operatorname{co}
k_{v})^{-1}=(\partial \operatorname{co}k_{v})^{-1}=\partial k_{v}^{\ast }$ is single
valued and Lipschitz continuous by Theorem \ref{thm:eb}, noting that the
tilt stability supplies the Aubin property for $\left( \partial \operatorname{co}
k_{v}\right) ^{-1}$ via \cite[Theorem 1.3]{rock:7} .
\end{proof}
We include the following for completeness. We wish to apply this in conjuction with
Alexandrov's theorem and this is valid due to the equivalence of the existence of a
Taylor expansion and twice differentiability in the extended sense (see \cite[Corollary 13.42, Theorem 13.51]{rock:6}).
\begin{lemma}\label{lem:48}
Suppose $f: \mathbb{R}^n \to \mathbb{R}_{\infty}$ is a locally finite convex function at $B_{\varepsilon} (x)$ which is twice differentiable at $\bar{x} \in B_{\varepsilon} (x)$ with $\bar{z} := \nabla f(\bar{x})$ and $Q:= \nabla^2 f(\bar{x})$ positive definite. Then we we have
\begin{equation}
\bar{z} := \nabla f(\bar{x}) \text{ and } Q:= \nabla^2 f(\bar{x})
\quad \iff \quad \bar{x} = \nabla f^{\ast} (\bar{z}) \text{ and } Q^{-1}=\nabla^2 f^{\ast} (\bar{z}). \label{iffQ}
\end{equation}
\end{lemma}
\begin{proof}
In \cite{Gian:1} it is shown that when $g$ is convex with $g(0)=0$, $\nabla g (0) = 0$ and twice differentiable $x=0$ in the sense that the following Taylor expansion exists:
$
g(y) = \frac{1}{2} (Qy )T y + o(\| y \|^2).
$
Then we have the corresponding Taylor expansion:
$
g^{\ast} (x) = \frac{1}{2} (Q^{-1} x)^T x + o(\|x\|^2).
$
We may apply \cite[Corollary 13.42]{rock:6} to claim these expansions are equivalent to
the existence of a Hessian for both functions (twice differentiability in the extended sense)
where $0 = \nabla g(0)$, $Q = \nabla^2 g(0)$ and $0=\nabla g^{\ast} (0)$, $Q^{-1} = \nabla^2 g^{\ast} (0)$. Now apply this
to the function $g(y):=f(y+\nabla f(\bar{x})) - \langle \nabla f (\bar{x}) , y+\nabla f(\bar{x}) \rangle$ noting that $g^{\ast} (z) = f^{\ast} (z + \nabla f(\bar{x})) - \langle \bar{x}, z \rangle$.
We have $\nabla^2 g (0) = \nabla^2 f(\bar{x})$, $\nabla g^{\ast} (0) = 0$ implies
$f^{\ast} (\nabla f(\bar{x})) = \bar{x}$ and $\nabla^2 g^{\ast} (0) = Q^{-1} = \nabla f^{\ast} (\nabla f(\bar{x}))$, demonstrating the forward implication ($\implies$) in (\ref{iffQ}). To obtain the reverse implication we apply the proven result to the convex function $f^{\ast}$ using the
bi-conjugate formula $f^{\ast\ast} = f$.
\end{proof}
Our main goal is to demonstrate that the restriction of $f$ to the set $\mathcal{M}:=\left\{ \left( u,v\left( u\right) )\mid u\in \mathcal{U}^{2}\right) \right\} $ coincides with a $C^{1,1}$ smooth
function of $u \in \mathcal{U}$. Consequently we will be focusing on the case when
$\mathcal{U}^2$ is a linear subspace and so take $\mathcal{U^{\prime}}
\equiv \mathcal{U}^2$ in our previous results. The next result demonstrates
when there is a symmetry with respect to conjugation in the tilt stability
property for the auxiliary function $k_{v}$.
\begin{theorem}
Consider $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ is a proper
lower semi-continuous function, which is a prox-regular function at $\bar{x}$
for $0\in \partial f(\bar{x})$ with a nontrivial subspace $\mathcal{U}^{2}=b^{1}\left( \underline{\partial }^{2}f\left( \bar{x},0\right) \right)
\subseteq \mathcal{U}$. Denote $\mathcal{V}^{2}=(\mathcal{U}^{2})^{\perp }$,
let $v\left( u\right) \in \operatorname{argmin}_{v^{\prime }\in \mathcal{V}^{2}\cap B_{\varepsilon }\left( 0\right) }f\left( \bar{x}+u+v^{\prime
}\right) :\mathcal{U}^{2}\rightarrow \mathcal{V}^{2}$ and $k_{v}\left(
u\right) :=h\left( u+v\left( u\right) \right) :\mathcal{U}^{2}\rightarrow
\mathbb{R}_{\infty }$. Suppose also that $f$ has a tilt stable local minimum
at $\bar{x}$ for $0\in \partial f\left( \bar{x}\right) $ then for $p\neq 0$
we have
\begin{equation}
\forall q\in D^{\ast }\left( \nabla k_{v}^{\ast }\right) \left( 0,0\right)
\left( p\right) \quad \text{we have \quad }\langle p,q\rangle >0
\label{neqn:15}
\end{equation}
and hence $k_{v}^{\ast }$ has a tilt stable local minimum at $0\in \partial
k_{v}^{\ast }\left( 0\right) .$
\end{theorem}
\begin{proof}
On application of Propositions \ref{prop:tilt1} and \ref{prop:co} we have $\operatorname{co}k_{v}\left( \cdot \right) :\mathcal{U}^2\rightarrow \mathbb{R}_{\infty }$ possessing a tilt stable local minimum at $0$. As $\operatorname{co}
k_{v}\left( \cdot \right) $ is convex it is prox-regular at $0$ for $0\in
\partial \operatorname{co} k_{v}\left( 0\right) \ $and subdifferentially continuous
at $0$ \cite[Proposition 13.32]{rock:6}. Hence we may apply \cite[Theorem 1.3]{rock:7} to obtain the equivalent condition for tilt stability. For all $q \ne 0$
\begin{equation}
\langle p,q\rangle >0\text{\quad for all \quad }p\in D^{\ast }\left(
\partial \left[ \operatorname{co}k_{v}\right] \right) \left( 0,0\right) \left(
q\right) . \label{neqn:101}
\end{equation}
Now apply Corollary \ref{cor:strict} to deduce the existence of $\beta >0$ such that
$\langle p,q\rangle \geq \beta >0$ for all $(p,q)$ taken in (\ref{neqn:101})
with $\| q \| =1$.
For this choice of $v(\cdot)$ we have $k_v = L^{\varepsilon}_{\mathcal{U}^2}$. From Proposition \ref{prop:reg} part \ref{part:3}, Remark \ref{rem:lem} and
Lemma \ref{lem:conv} we see that $\nabla k_{v}(0)=\{0\}=\nabla \operatorname{co}
k_{v}(0)$. Then whenever $x^{k}\in S_{2}(\operatorname{co}k_{v})$ with $x^{k}\rightarrow 0$ (as we always have $z^{k}=\nabla \operatorname{co}
k_{v}(x^{k})\rightarrow 0=\nabla \operatorname{co}k_{v}(0)$) it follows from
Corollary \ref{cor:ebnonzero} that we have
\begin{equation}
\left( \operatorname{co}k_{v}\right) _{s}^{\prime \prime }\left( x^{k},\nabla \operatorname{co}k_{v}\left( x^{k}\right) ,h\right) =\langle \nabla ^{2}\operatorname{co}
k_{v}(x^{k})h,h\rangle >0\,\text{\quad for all }h\in \mathcal{U}^{2}
\label{neqn:46}
\end{equation}
for $k$ sufficiently large. By Alexandrov's theorem this positive
definiteness of Hessians must hold on a dense subset of some neighbourhood
of zero. By the choice of $v(\cdot )$ we have $k_{v}(u)=L_{\mathcal{U}^{2}}^{\varepsilon }(u)$ and hence we may assert that $\partial _{\operatorname{co}}L_{\mathcal{U}^{2}}^{\varepsilon }(u)=\partial \operatorname{co}k_{v}(u)\neq
\emptyset $ in some neighbourhood of the origin in $\mathcal{U}^{2}$.
Since $\left[ \operatorname{co}k_{v}\right] ^{\ast }=k_{v}^{\ast }$ and $\nabla
k_{v}^{\ast }$ $=\left[ \partial \operatorname{co}k_{v}\right] ^{-1}$ we may apply
\cite[identity 8(19)]{rock:6} to deduce that for $\left\Vert q\right\Vert =1$
we have
\begin{equation*}
-q\in D^{\ast }\left( \left[ \partial \operatorname{co}k_{v}\right] ^{-1}\right)
\left( 0,0\right) \left( -p\right) =D^{\ast }\left( \nabla k_{v}^{\ast
}\right) \left( 0,0\right) \left( -p\right) .
\end{equation*}
Hence we can claim that for $q\neq 0$, after a sign change, that $\langle
p,q\rangle =\langle -p,-q\rangle \geq \beta >0$. We need to rule out the
possibility that $0\in D^{\ast }\left( \nabla k_{v}^{\ast }\right) \left(
0,0\right) \left( p\right) $ for some $p\neq 0$. To this end we may use the
fact that $k_{v}^{\ast }$ is $C^{1,1}$ (and convex) and apply \cite[ Theorem
13.52]{rock:6} to obtain the following characterisation of the convex hull
of the coderivative in terms of limiting Hessians. Denote $S_{2}(k_{v}^{\ast
}):=\{x\mid \nabla ^{2}k_{v}^{\ast }(x)\text{ exists}\}$ then
\begin{equation*}
\operatorname{co}D^{\ast }(\nabla k_{v}^{\ast })(0,0)(p)=\operatorname{co}\{Ap\mid
A=\lim_{k}\nabla ^{2}k_{v}^{\ast }(z^{k})\text{ for some $z^{k}$($\in
S_{2}(k_{v}^{\ast })$)$\rightarrow 0$}\}.
\end{equation*}
Now suppose $0\in D^{\ast }\left( \nabla k_{v}^{\ast }\right) \left(
0,0\right) \left( p\right) $ then there exists $A^{i}=\lim_{k}\nabla
^{2}k_{v}^{\ast }(z_{i}^{k})$ for $z_{i}^{k}\rightarrow 0$ such that $0=q:=\sum_{i=1}^{m}\lambda _{i}A^{i}p\in \operatorname{co} D^{\ast }\left( \nabla
k_{v}^{\ast }\right) \left( 0,0\right) \left( p\right) $. As $p\neq 0$ we
must then have $\langle p,q\rangle =p^{T}(\sum_{i=1}^{m}\lambda
_{i}A^{i})p=0 $ where $B:=\sum_{i=1}^{m}\lambda _{i}A^{i}$ is a symmetric
positive semi-definite matrix. The inverse $(A_{k}^{i})^{-1}$ exists (relative to $\mathcal{U}^2$) due to (\ref{neqn:46}).
Now apply the duality formula for Hessians Lemma \ref{lem:48} to deduce that when $x_{i}^{k}:=\nabla k_{v}^{\ast }(z_{i}^{k})$
then $A_{k}^{i}=\nabla ^{2}k_{v}^{\ast }(z_{i}^{k})$ iff $(A_{k}^{i})^{-1}=\nabla ^{2}(\operatorname{co} k_{v})(x_{i}^{k})$.
We now apply Lemma \ref{lem:boundprox} to deduce that the limiting
subhessians of $h(w):=f(\bar{x}+w)$ satisfy (\ref{neqn:34}). We will want to
apply this bound to the limiting subhessians of $\operatorname{co}h$ at $x_{i}^{k}+v(x_{i}^{k})$. To this end we demonstrate that $\Delta _{2}h\left(
x_{i}^{k}+v(x_{i}^{k}),\left( z_{i}^{k},0\right) ,t,w\right) \geq \Delta
_{2}\left( \operatorname{co}h\right) \left( x_{i}^{k}+v(x_{i}^{k}),\left(
z_{i}^{k},0\right) ,t,w\right) $ for all $t\in \mathbb{R}$ and any $w$. This
follows from Lemma \ref{lem:conv}, Proposition \ref{cor:conv} in that $\left( z_{i}^{k},0\right) \in \partial \operatorname{co} h\left(
x_{i}^{k}+v(x_{i}^{k})\right) =\partial h\left(
x_{i}^{k}+v(x_{i}^{k})\right) ,$ $\operatorname{co}h\left(
x_{i}^{k}+v(x_{i}^{k})\right) =h\left( x_{i}^{k}+v(x_{i}^{k})\right) $ and $\operatorname{co}h\left( u+v\right) \leq h\left( u+v\right) $ for all $\left(
u,v\right) \in \mathcal{U}^2\times \mathcal{V}^2$. On taking the a limit
infimum for $t\rightarrow 0$ and $w\rightarrow u\in \mathcal{U}^2$ we obtain
\begin{eqnarray*}
&&q\left( \partial ^{2,-}\left( \operatorname{co}h\right) \left(
x_{i}^{k}+v(x_{i}^{k}),\left( z_{i}^{k},0\right) \right) \right) \left(
u\right) =\left( \operatorname{co}h\right) _{s}^{\prime \prime }\left(
x_{i}^{k}+v(x_{i}^{k}),\left( z_{i}^{k},0\right) ,u\right) \\
&\leq &h^{\prime \prime }\left( x_{i}^{k}+v(x_{i}^{k}),\left(
z_{i}^{k},0\right) ,u\right) =q\left( \partial ^{2,-}h\left(
x_{i}^{k}+v(x_{i}^{k}),\left( z_{i}^{k},0\right) \right) \right) \left(
u\right) .
\end{eqnarray*}
Hence the bound in (\ref{neqn:34}) involving the constant $M>0$ applies to any $Q_k \in \partial ^{2,-}\left( \operatorname{co}h\right) \left( x_{i}^{k}+v(x_{i}^{k}),\left( z_{i}^{k},0\right) \right) $
for $k$ large.
As $A_{k}^{i}=\nabla ^{2}k_{v}^{\ast }(z_{i}^{k})$ by Proposition \ref{prop:subhessianinverse} we have $(A_{k}^{i})^{-1}=(\nabla^2 _{\mathcal{U}^{2}}h^{\ast }(z_{i}^{k}+0_{\mathcal{V}^{2}}))^{-1}\in \partial _{\mathcal{U}^{2}}^{2,-}\left( \operatorname{co}h\right) (x_{i}^{k}+v(x_{i}^{k}))$ and on
restricting to the $\mathcal{U}^{2}$ space and using (\ref{neqn:33}), (\ref{neqn:34}) and (\ref{neqn:36}) we get for all $p\in \mathcal{U}^{2}$ that
\begin{equation*}
\langle A_{k}^{i},pp^{T}\rangle =\langle \nabla ^{2}k_{v}^{\ast
}(z_{i}^{k}),pp^{T}\rangle =\langle \nabla _{\mathcal{U}^{2}}^{2}h^{\ast
}(z_{i}^{k}+0_{\mathcal{V}}),pp^{T}\rangle =\langle \left[ (A_{k}^{i})^{-1}
\right] ^{-1},pp^{T}\rangle \geq \frac{1}{M}.
\end{equation*}
Thus $\{A_{k}^{i}\}$ are uniformly positive definite. By \cite{Gian:1} we
have $(A_{k}^{i})^{-1}=\nabla ^{2}(\operatorname{co}k_{v})(x_{i}^{k})$ existing at $x_{i}^{k}$ and hence
\begin{equation*}
(A_{k}^{i})^{-1} u=\nabla ^{2}(\operatorname{co}k_{v})(x_{i}^{k})u\in D^{\ast
}(\nabla \operatorname{co}k_{v})(x_{i}^{k}, z_{i}^{k})(u)\quad \text{for all $u\in
\mathcal{U} ^{2}$.}
\end{equation*}
Then, for $u\neq 0$, by Theorem \ref{tilt:eb} we have $\langle \nabla ^{2}(\operatorname{co}k_{v})(x_{i}^{k})u,u\rangle \geq \frac{\beta }{2}>0$ for $k$ large
implying $\left\{ (A_{k}^{i})^{-1}\right\} $ remain uniformly positive
definite on $\mathcal{U}^{2}$. Hence $\left\{ A_{k}^{i}\right\} $ remain
uniformly bounded within a neighbourhood of the origin within $\mathcal{U}^{2}$. Thus on taking the limit we get $A^{i}=\lim_{k}A_{k}^{i}$ is positive
definite and hence $B:=\sum_{i=1}^{m}\lambda _{i}A^{i}$ is actually positive
definite, a contradiction.
As $k_{v}^{\ast }$ is convex and finite at $0$, it is prox-regular and
subdifferentially continuous at $0$ for $0\in \partial k_{v}^{\ast }\left(
0\right) $ by \cite[Proposition 13.32]{rock:6}. Another application of \cite[Theorem 1.3]{rock:7} allows us to deduce that $k_{v}^{\ast }$ has a tilt
stable local minimum at $0\in \nabla k_{v}^{\ast }\left( 0\right) .$
\end{proof}
We may either use the strong metric regularity property to obtain the
existence of a smooth manifold or utilizes the Mordukhovich criteria for the
Aubin property \cite{rock:6} and the results of \cite{Miroslav:1} on single
valuedness of the subdifferential satisfying a pseudo-Lipschitz property,
namely:
\begin{proof}
\textbf{[of Theorem \ref{thm:1}]} \textit{using strong metric regularity}
\newline
Note first that $\mathcal{U}^{2}\subseteq \mathcal{U}$ corresponds to (\ref{eqn:44}) for $\bar{z}=0$. Let $\{v\left( u\right)\} = \operatorname{argmin}_{v^{\prime }\in
\mathcal{V}^{2}\cap B_{\varepsilon }\left( 0\right) }f\left( \bar{x}+u+v^{\prime }\right) $. We apply either \cite[Theorem 1.3]{rock:7} or \cite[Theorem 3.3]{Drusvy:1} that asserts that as $k_{v}^{\ast }$ is prox-regular
and subdifferentially continuous at $0$ for $0\in \partial k_{v}^{\ast
}\left( 0\right) $ then $\partial k_{v}^{\ast }$ is strongly metric regular
at $\left( 0,0\right) .$ That is there exists $\varepsilon >0$ such that
\begin{equation*}
B_{\varepsilon }\left( 0\right) \cap \left( \partial k_{v}^{\ast }\right)
^{-1}\left( u\right)
\end{equation*}
is single valued and locally Lipschitz for $u\in \mathcal{U}^{2}$
sufficiently close to $0$. But as $\left( \partial k_{v}^{\ast }\right)
^{-1}=\partial k_{v}^{\ast \ast }=\partial \left[ \operatorname{co}k_{v}\right] $ is
a closed convex valued mapping (and hence has connected images) we must have
the existence of $\delta >0$ such that for $u\in B_{\delta }^{\mathcal{U}^{2}}\left( 0\right) $ we have $\partial \left[ \operatorname{co}k_{v}\right] \left(
\cdot \right) $ a singleton locally Lipschitz mapping (giving
differentiability). As $\{v\left( u\right) \} = \operatorname{argmin}_{v^{\prime }\in
\mathcal{V}^{2}\cap B_{\varepsilon }\left( 0\right) }\left\{ h\left(
u+v^{\prime }\right) -\langle \bar{z}_{\mathcal{V}^{2}},v^{\prime }\rangle
\right\} :\mathcal{U}^{2}\cap B_{\varepsilon }\left( 0\right) \rightarrow
\mathcal{V}^{2}$ we have $k_{v}(u)=L_{\mathcal{U}^{2}}^{\varepsilon }(u)$
for $u\in \operatorname{int}B_{\varepsilon }^{\mathcal{U}^{2}}\left( 0\right) $.
Hence $\nabla \operatorname{co} L_{\mathcal{U}^{2}}^{\varepsilon }(u)\in \partial _{\operatorname{co}}L_{\mathcal{U}^{2}}^{\varepsilon }(u)\neq \emptyset $ and by
Corollary \ref{cor:conv} we have on $\mathcal{U}^{2}$ that $h\left(
u+v\left( u\right) \right) =\left[ \operatorname{co}h\right] \left( u+v\left(
u\right) \right) $ and hence
\begin{equation*}
\partial \left[ \operatorname{co}k_{v}\right] \left( u\right) =\partial \left[ \operatorname{co}h\right] \left( u+v\left( u\right) \right) =\partial g\left( u+v\left(
u\right) \right)
\end{equation*}
is single valued implying $\nabla _{u}g\left( u+v\left( u\right) \right) $
exists where $g\left( \cdot \right) :=\left[ \operatorname{co}h\right] \ \left(
\cdot \right) .$
\end{proof}
\begin{corollary}\label{cor:1}
Under the assumptions of Theorem \ref{thm:1} we have $\nabla L_{\mathcal{U}^{2}}^{\varepsilon }(u)$ existing as a Lipschitz function locally on $B^{\mathcal{U}}_{\varepsilon} (0)$.
\end{corollary}
\begin{proof}
Applying Corollary \ref{cor:conv} again we can assert that under our current
assumptions that locally we have $\operatorname{co}k_{v} = k_{v} = L_{\mathcal{U}^{2}}^{\varepsilon }$ and hence $\nabla k_{v} (u) = \nabla L_{\mathcal{U}^{2}}^{\varepsilon }(u)$ exists as a Lipschitz function locally
on $B^{\mathcal{U}}_{\varepsilon} (0)$.
\end{proof}
\begin{proof}
\textbf{[of Theorem \ref{thm:1}]} \textit{using the single valuedness of the
subdifferential satisfying a pseudo-Lipschitz property.} \newline
We show that $D^{\ast }(\partial \lbrack \operatorname{co}k_{v}])(0 ,
0)(0)=\{0\}$. To this end we use (\ref{neqn:15}). Indeed this implies that $q\neq 0$ for any $p\neq 0$ for all $q \in D^{\ast} (\nabla k^{\ast}_v ) (0,0)(0) (p)$. Applying the result \cite[identity 8(19)]{rock:6} on inverse functions and coderivatives we have $q=0$ implies $p=0$ for all $p\in D^{\ast }(\partial \lbrack \operatorname{co}
k_{v}])(0,0)(q)$. Hence we have $D^{\ast }(\partial \lbrack \operatorname{co}
k_{v}])(0, 0)(0)=\{0\}$. Now apply the Mordukhovich criteria for the
Aubin property \cite[Theorem 9.40]{rock:6} to deduce that $\partial \lbrack
\operatorname{co}k_{v}]$ has the Aubin property at $0$ for $0\in \partial \lbrack
\operatorname{co}k_{v}](0)$. Now apply Theorem \ref{thm:eb} to deduce that $u\mapsto
\nabla \lbrack \operatorname{co}k_{v}](u)$ exists a single valued Lipschitz mapping
in some ball $B_{\delta }^{\mathcal{U}^2}\left( 0\right) $ in the space $\mathcal{U}^2$. We now finish the proof as before in the first version.
\end{proof}
If we assume more, essentially what is needed to move towards partial smoothness
we get a $C^{1,1}$ smooth manifold.
\begin{proof}
\textbf{[of Theorem \ref{thm:2}] } First note that when we have (\ref{eqn:1}) holding using $f$ then we must (\ref{eqn:1}) holding using $g := \operatorname{co}
h $. Thus by Proposition \ref{prop:reg} part \ref{part:4} have (\ref{neqn:26}) holding using $g$ (via the convexification argument). As $g\left(
w\right) :=\left[ \operatorname{co}h \right] \left( w\right) $ for $w\in
B_{\varepsilon }\left( 0\right) $ is a convex function we have $g$ a regular in $B_{\varepsilon
}\left( 0\right) $. Moreover as $g\left( u+v\left( u\right) \right) =f\left(
\bar{x}+u+v\left( u\right) \right) $ (and $g\left( w\right) \leq f\left(
\bar{x}+w\right) $ for all $w$ ) we have the regular subdifferential of $g$ (at $u + v(u)$)
contained in that of $f$ (at $\bar{x} + u + v(u)$). As $g$ is regular the singular subdifferential
coincides with the recession directions of the regular subdifferential \cite[Corollary 8.11]{rock:6} and so are contained in the recession direction of
the regular subdifferential of $f$. We are thus able to write down the
following inclusion
\begin{equation*}
\partial ^{\infty }g \left( u+v\left( u\right) \right)
\subseteq \partial ^{\infty }f\left( \bar{x}+u+v\left( u\right) \right)
=\left\{ 0\right\} .
\end{equation*}
By the tilt stability we have $v$ a locally Lipschitz single valued mapping.
Thus by the basic chain rule of subdifferential calculus we have
\begin{equation*}
\left\{ \nabla _{u}g\left( u+v\left( u\right) \right) \right\} = \left( e_{\mathcal{U}}\oplus \partial v\left( u\right) \right) ^{T}\partial g\left(
u\oplus v\left( u\right) \right)
\end{equation*}
is a single valued Lipschitz mapping. Under the additional assumption we
have via Proposition \ref{prop:reg} part 4 that, $\operatorname{cone}\left[ \partial
_{\mathcal{V}}g\left( u+v\left( u\right) \right) \right] \supseteq \mathcal{V}$ for
$u\in B_{\varepsilon }\left( 0\right) \cap \mathcal{U}$. As $\partial
v\left( u\right) \subseteq \mathcal{V}$ it cannot be multi-valued and still
have $\left( e_{\mathcal{U}}\oplus \partial v\left( u\right) \right)
^{T}\partial g\left( u\oplus v\left( u\right) \right) $ single valued. This
implies the limiting subdifferential $\partial v\left( u\right) $ is locally single
valued and hence $\nabla v\left( u\right) $ exists locally. The upper-semi-continuity of the subdifferential and the single-valuedness implies $u \mapsto \nabla v(u)$ is a continuous mapping.
\end{proof}
The following example demonstrates the fact that even if $\partial
_{w}g\left( u+v\left( u\right) \right) $ is multi-valued we still have $\left( e_{\mathcal{U}}, \nabla v\left( u\right) \right) ^{T}\partial
_{w}g\left( u+v\left( u\right) \right) $ single valued.
\begin{example}
If $f:\mathbb{R}^{2}\rightarrow \mathbb{R}$ is given by $f=\max \{ f_1,
f_2\} $ where $f_1=w_1^2+(w_2-1)^2$ and $f_2=w_2$, then $\partial
_{w}g\left( u+v\left( u\right) \right) $ is multi valued but $\left( e_{\mathcal{U}},\partial v\left( u\right) \right) ^{T}\partial _{w}g\left(
u+v\left( u\right) \right) $ is single valued.
\end{example}
Using the notation in the Theorem, we put $\bar{w}=0,$ find that $\partial
f\left( 0\right) =\{\alpha \left( 0,1-\sqrt{5}\right) +(1-\alpha )\left(
0,1\right) \;|\;0\leq \alpha \leq 1\}$ so we have $\mathcal{U}=\left\{
\alpha \left( 1,0\right) \mid \alpha \in \mathbb{R}\right\} $ and $\mathcal{V}=\left\{ \alpha \left( 0,1\right) \mid \alpha \in \mathbb{R}\right\} $.
With $\epsilon <1/2$ then
\begin{equation*}
v\left( u\right) =\frac{3}{2}-\frac{\sqrt{9-4u^{2}}}{2},\quad g\left(
u+v\left( u\right) \right) =f\left( \bar{x}+u+v\left( u\right) \right) =
\frac{3}{2}-\frac{\sqrt{9-4u^{2}}}{2}.
\end{equation*}
It follows that
\begin{equation*}
\nabla v\left( u\right) =\frac{2u}{\sqrt{9-4u^{2}}}\quad \text{and}\quad
\left( e_{\mathcal{U}},\partial v\left( u\right) \right) ^{T}=\left( 1,\frac{2u}{\sqrt{9-4u^{2}}}\right) ^{T}.
\end{equation*}
Now we consider $\partial _{w}g\left( u+v\left( u\right) \right) =\partial
_{w}f\left( u+v\left( u\right) \right) $. At $u+v\left( u\right) $, from $f_{1}$ we know
\begin{equation*}
t_{1}=(2u,1-\sqrt{9-4u^{2}})=\nabla _{w}f_{1}\left( u+v\left( u\right)
\right)
\end{equation*}
and from $f_{2}$ we know
\begin{equation*}
t_{2}=(0,1)=\nabla _{w}f_{2}\left( u+v\left( u\right) \right) .
\end{equation*}
Thus
\begin{equation*}
\partial _{w}f\left( u+v\left( u\right) \right) =\{\alpha t_{1}+(1-\alpha
)t_{2}\;|\;0\leq \alpha \leq 1\},
\end{equation*}
that is, $\partial _{w}g\left( u+v\left( u\right) \right) $ is multi valued.
However, for all such $\alpha $, we have
\begin{equation*}
\left( e_{\mathcal{U}},\partial v\left( u\right) \right) ^{T}(\alpha
t_{1}+(1-\alpha )t_{2})=2\alpha u+(1-\alpha \sqrt{9-4u^{2}})\frac{2u}{\sqrt{9-4u^{2}}}=\frac{2u}{\sqrt{9-4u^{2}}}.
\end{equation*}
Therefore $\left( e_{\mathcal{U}},\partial v\left( u\right) \right)^{T}\partial _{w}g\left( u+v\left( u\right) \right) $ is single valued.
We may now demonstrate that we have arrived at a weakening of the second order expansions studied in \cite[Theorem 3.9]{Lem:1},
\cite[Equation (7)]{Mifflin:2004:2} and \cite[Theorem 2.6]{Miller:1}.
\begin{corollary}\label{cor:53}
Under the assumption of Theorem \ref{thm:2} we have the following local lower Taylor estimate holding: there exists $\delta >0$ such that for all $u \in B_{\delta } (0) \cap \mathcal{U}$ we have for all $u'+v' \in B_{\delta} (u + v(u))$
\begin{eqnarray*}
f(\bar{x} + u' + v' ) &\geq & f(\bar{x} + u + v(u)) + \langle z_{\mathcal{U}}(u) + \bar{z}_{\mathcal{V}}
,u' + v' - (u+v(u) \rangle \\
&& \quad + \frac{1}{2} (u' - u)^T Q (u' - u) + o(\|u' -u\|^2 ),
\end{eqnarray*}
for all $Q \in \partial^{2,-} L_{\mathcal{U}}^{\varepsilon }(u, z_{\mathcal{U}}(u))$, where
$ z_{\mathcal{U}}(u) := \nabla L_{\mathcal{U}}^{\varepsilon } (u) $.
\end{corollary}
\begin{proof}
We apply Corollary \ref{cor:Lagsubjet} taking note of the observation in remark \ref{rem:27}
to obtain the following chain of inequalities. As $Q \in \partial^{2,-} L_{\mathcal{U}}^{\varepsilon }(u, z_{\mathcal{U}}(u))$ we have
\begin{eqnarray*}
f(\bar{x} + u' + v') - \langle \bar{z}_{\mathcal{V}} , v' \rangle & \geq & f(\bar{x} + u' + v(u'))
- \langle \bar{z}_{\mathcal{V} }, v(u') \rangle = L_{\mathcal{U}}^{\varepsilon }(u')\\
&\geq & L_{\mathcal{U}}^{\varepsilon }(u) + \langle \nabla L_{\mathcal{U}}^{\varepsilon } (u) , u'-u \rangle + \frac{1}{2} (u'- u)Q(u'-u ) + o(\|u' - u \|^2 ) \\
&=& f(\bar{x} + u + v(u)) - \langle \bar{z}_{\mathcal{V} }, v(u) \rangle
+ \langle z_{\mathcal{U}}(u) , u'-u \rangle + \frac{1}{2} (u'- u)Q(u'-u ) \\
&& \qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad + o(\|u' - u \|^2 ) ,
\end{eqnarray*}
where we have used Corollary \ref{cor:1} to deduce that $\nabla L_{\mathcal{U}}^{\varepsilon } (u) = z_{\mathcal{U}} (u)$ exists
locally as a Lipschitz continuous function. The result now follows using the orthogonality of the $\mathcal{U}$ and $\mathcal{V}$.
\end{proof}
\begin{remark}
The function described in Theorem \ref{thm:2} are quite closely related to
the partial smooth class introduced by Lewis \cite{Lewis:2,Lewis:1}. Lewis
calls $f$ partially smooth at $x$ relative to a manifold $\mathcal{M}$ iff
\begin{enumerate}
\item \label{part1} We have $f|_{\mathcal{M}}$ is smooth around $x$;
\item \label{part2} for all points in $\mathcal{M}$ close to $x$ we have $f$
is regular and has a subgradient;
\item \label{part3} we have ${f_{\_}}^{\prime}(x,h) >- {f_{\_}}^{\prime}(x,-h)$ for all $h \in N_{\mathcal{M}} (x)$ and
\item \label{part4} the subgradient mapping $w \mapsto \partial f (w)$ is
continuous at $x$ relative to $\mathcal{M}$.
\end{enumerate}
It is not difficult to see that $\{0\}\times \mathcal{V}=N_{\mathcal{M}}(x)$. Clearly we have \ref{part1} and \ref{part3} holding for the function
described in Theorem \ref{thm:2}. As functions that are prox-regular at a
point $(x,0)\in \operatorname{Graph}\partial f$ are not necessarily regular at $x$
then \ref{part2} is not immediately obvious, although a subgradient must
exist. By Proposition \ref{prop:reg} the restricted function (to $\mathcal{U}
$) is indeed regular. Moreover the "convex representative" given by $g:=
\operatorname{co}h$ is regular, thanks to convexity. The potential for $w\mapsto
\partial g(w)$ to be continuous at $0$ (relative to $\mathcal{M}$) is clearly bound to the need for $w_{\mathcal{V}}\mapsto \partial _{\mathcal{V}}g(w_{\mathcal{V}})$ to be
continuous at $0$. As $0\in \operatorname{int}\partial _{\mathcal{V}}g\left(
u+v\left( u\right) \right) $ for $u\in B_{\varepsilon }\left( 0\right) \cap
\mathcal{U}$ this problem may be reduced to investigating whether $u\mapsto
\operatorname{int}\partial _{\mathcal{V}}g\left( u+v\left( u\right) \right) $ is
lower semi-continuous at $0$. This is not self evident either. So the
question as to whether $g$ is partially smooth is still open. The solution
to this issue may lie in the underlying assumption that $\mathcal{U}=
\mathcal{U}^{2}$ in Theorem \ref{thm:2} (see the discussion in Remark \ref{rem:fasttrack}).
On balance the authors would conjecture that the functions we described in Theorem \ref{thm:2}
are most likelihood partially smooth, despite failing to engineer a proof.
\end{remark}
We would like to finish this section with some remarks regarding the related work in \cite{Lewis:2}. Because of the gap we still currently have in providing a bridge to the concept of partial smoothness we can't make direct comparisons with the results of \cite{Lewis:2}. Moreover in \cite{Lewis:2} the authors deal with $C^2$-smooth manifolds while the natural notion of smoothness for this work is of type $C^{1,1}$. It would be interesting to see to what degree the very strong results of \cite{Lewis:2} carry over to this context. That is, a study of tilt stability of partially smooth functions under pinned by a $C^1$ or at least $C^{1,1}$-smooth manifold. This may be another avenue to close the gap that still exists.
\section{Appendix A}\label{Appendix:A}
The prove Proposition \ref{limpara} we need the following results regarding
the variation limits of rank-1 supports.
\begin{proposition}[\protect\cite{eberhard:6}, Corollary 3.3]
\label{ebcor:var}Let $\{\mathcal{A}(v)\}_{v\in W}$ be a family of non-empty
rank-1 representers (i.e. $\mathcal{A}(v)\subseteq \mathcal{S}\left(
n\right) $ and $-\mathcal{P}\left( n\right) \subseteq 0^{+}\mathcal{A}(v)$
for all $v$) and $W$ a neighbourhood of $w$. Suppose that $\limsup_{v\rightarrow w}\mathcal{A}(v)=\mathcal{A}(w)$. Then
\begin{equation}
\limsup_{v\rightarrow w}\inf_{u\rightarrow h}q\left( \mathcal{A}(v)\right)
(u)=q\left( \mathcal{A}(w)\right) (h) \label{ebneqn:3.30}
\end{equation}
\end{proposition}
Recall that $(x^{\prime },z^{\prime })\rightarrow _{S_{p}(f)}(\bar{x},z)$
means $x^{\prime }\rightarrow ^{f}\bar{x}$, $\ z^{\prime }\in \partial
_{p}f(x^{\prime })$ and $z^{\prime }\rightarrow z$.
\begin{corollary}
Let $f:\mathbb{R}^{n}\rightarrow \mathbb{R}_{\infty }$ be proper and lower
semicontinuous with $h\in b^{1}(\underline{\partial }^{2}f(\bar{x},\bar{z}
)). $ Then
\begin{equation}
q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) \left( h\right)
=\limsup_{(x^{\prime },z^{\prime })\rightarrow _{S_{p}(f)}(\bar{x},\bar{z}
)}\inf_{u\rightarrow h}q\left( \partial ^{2,-}f(x^{\prime },z^{\prime
})\right) (u). \label{neqn:47}
\end{equation}
\end{corollary}
\begin{proof}
Use Proposition \ref{ebcor:var} and Remark \ref{rem:limhess}.
\end{proof}
Denote the infimal convolution of $f$ by $f_{\lambda }(x):=\inf_{u\in
\mathbb{R}^{n}}\left( f(u)+\frac{1}{2\lambda }\Vert x-u\Vert ^{2}\right) $.
Recall that $f_{\lambda }\left( x\right) -\frac{1}{2\lambda }\left\Vert
x\right\Vert ^{2}=-\left( f\ +\frac{\lambda }{2}\Vert \cdot \Vert
^{2}\right) ^{\ast }(\lambda x)$ and this $f_{\lambda }$ is always
para-concave. Recall that in \cite[Lemma 2.1]{eberhard:6}, it is observed
that $f$ is locally $C^{1,1}$ iff $f$ is simultaneously a locally
para-convex and para-concave function. Recall \cite[Proposition 4.15]{rock:6}
that states that the limit infimum of a collection of convex sets is also
convex and that the upper epi-limit of a family of functions has an
epi-graph that is the limit infimum of the family of epi-graphs.
Consequently the epi-limit supremum of a family of convex functions give
rise to convex function.
\begin{proof}
(of Proposition \ref{limpara}) Begin by assuming $f$ is locally para-convex.
Let $\frac{c}{2}>0$ be the modulus of para--convexity of $f$ on $B_{\delta }(
\bar{x})$, $x\in B_{\delta }(\bar{x})$ with $z\in \partial f\left( x\right) $
and $\partial ^{2,-}f\left( x,z\right) \neq \emptyset $. Let $C_{t}(x)=\{h\mid x+th\in B_{\delta }(\bar{x})\}$ then we have
\begin{equation*}
h\mapsto \left( \frac{2}{t^{2}}\right) \left( f(x+th)-f(x)-t\langle
z,h\rangle \right) +\frac{c}{t^{2}}\left( \Vert x+th\Vert ^{2}-\Vert x\Vert
^{2}-t\langle 2x,h\rangle \right)
\end{equation*}
convex on $C_{t}(x)$ since $x\mapsto f(x)+\frac{c}{2}\Vert x\Vert ^{2}$ is
convex on $B_{\delta }(\bar{x})$. Next note that for every $K>0$ there
exists a $\bar{t}>0$ such that for $0<t<\bar{t}$ we have $B_{K}(0)\subseteq
C_{t}(x)$. Once again restricting $f$ to $B_{\delta }(\bar{x})$ we get a
family
\begin{equation}
\{h\mapsto \Delta _{2}f(x,t,z,h)+\frac{c}{t^{2}}\left( \Vert x+th\Vert
^{2}-\Vert x\Vert ^{2}-t\langle 2x,h\rangle \right) \}_{t<\bar{t}}
\label{neqn:3}
\end{equation}
of convex functions with domains containing $C_{t}(x)\,$(for each $t$) and
whose convexity (on their common domain of convexity) will be preserved
under an upper epi--limit as $t\downarrow 0$. Thus, using the fact that $\frac{c}{t^{2}}\left( \Vert x+th\Vert ^{2}-\Vert x\Vert ^{2}-t\langle
2x,h\rangle \right) $ converges uniformly on bounded sets to $c\Vert h\Vert
^{2}$, we have the second order circ derivative (introduced in \cite{ebioffe:4}) given by:
\begin{align*}
f^{\uparrow \uparrow }(x,z,h)+c\Vert h\Vert ^{2}& :=\limsup_{(x^{\prime
},z^{\prime })\rightarrow _{S_{p}}(x,z),t\downarrow 0}\inf_{u^{\prime
}\rightarrow h}(\Delta _{2}f(x^{\prime },t,z^{\prime },u^{\prime }) \\
& \qquad \qquad +\frac{c}{t^{2}}\left( \Vert x+th\Vert ^{2}-\Vert x\Vert
^{2}-t\langle 2x,h\rangle \right) )
\end{align*}
which is convex on $B_{K}(0)$, for every $K>0$, being obtained by taking an
epi-limit supremum of a family of convex functions given in (\ref{neqn:3}).
We then have $h\mapsto f^{\uparrow \uparrow }(x,z,h)+c\Vert h\Vert ^{2}$
convex (with $f^{\uparrow \uparrow }(x,z,\cdot )$ having a modulus of
para-convexity of $c$).
From \cite{com:2}, Proposition 4.1 particularized to $C^{1,1}$ functions $f$
we have that there exists a $\eta \in \lbrack x,y]$ such that
\begin{equation}
f(y)\in f(x)+\langle \nabla f(x),y-x\rangle +\frac{1}{2}\langle \overline{D}^{2}f(\eta ),(y-x)(y-x)^{T}\rangle . \label{ebneqn:31}
\end{equation}
Using (\ref{ebneqn:31}), Proposition \ref{prop:ebpenot} and the variational
result corollary \ref{ebcor:var}, we have when the limit is finite (for $\bar{z}:=\nabla f(\bar{x})$)
\begin{align*}
f^{\uparrow \uparrow }(\bar{x},\bar{z},h)& :=\limsup_{(x^{\prime },z^{\prime
})\rightarrow _{S_{p}}(\bar{x},\bar{z}),\;t\downarrow 0}\inf_{u^{\prime
}\rightarrow h}\Delta _{2}f(x^{\prime },t,z^{\prime },u^{\prime }) \\
& \leq \limsup_{x^{\prime }\rightarrow \bar{x},\;t\downarrow
0}\inf_{u^{\prime }\rightarrow h}\Delta _{2}f(x^{\prime },t,\nabla
f(x^{\prime }),u^{\prime })\leq \limsup_{\eta \rightarrow \bar{x}}\inf_{u^{\prime }\rightarrow h}q\left( \overline{D}^{2}f(\eta )\right)
(u^{\prime }) \\
& \leq q\left( \overline{D}^{2}f(\bar{x})-\mathcal{P}(n)\right) (h)\leq
q\left( \underline{\partial }^{2}f(\bar{x},\bar{z})\right) (h)\leq
f^{\uparrow \uparrow }(\bar{x},\bar{z},h),
\end{align*}
where the last inequality follows from \cite[Proposition 6.5]{ebioffe:4}.
Now assuming $f$ is quadratically minorised and is prox--regular at $\bar{x}\ $ for $\bar{p}\in \partial f(\bar{x})$ with respect to $\varepsilon $ and $r.$ Let $g(x):=f(x+\bar{x})-\langle \bar{z},x+\bar{x}\rangle $. Then $0\in
\partial g(0)$ and we now consider the infimal convolution $g_{\lambda }(x)$
which is para--convex locally with a modulus $c:=\frac{\lambda r}{2(\lambda
-r)}$, prox--regular at $0$ (see \cite[Theorem 5.2]{polrock:1}). We may now
use the first part of the proof to deduce that $g_{\lambda }^{\uparrow
\uparrow }(0,0,\cdot )$ is para--convex with modulus $c=\frac{2\lambda r}{(\lambda -r)}$ and $g_{\lambda }^{\uparrow \uparrow }(0,0,h)=q\left(
\underline{\partial }^{2}g_{\lambda }(0,0)\right) (h)$ since $g_{\lambda }$
is $C^{1,1}$ (being both para-convex and para-concave). Using Corollary \ref{ebcor:var} and \cite[Proposition 4.8 part 2.]{eberhard:2} we obtain
\begin{equation*}
\limsup_{\lambda \rightarrow \infty }\inf_{h^{\prime }\rightarrow
h}g_{\lambda }^{\uparrow \uparrow }(0,0,h^{\prime })=q\left(
\limsup_{\lambda \rightarrow \infty }\underline{\partial }^{2}g_{\lambda
}(0,0)\right) (h)=q\left( \underline{\partial }^{2}g(0,0)\right) (h).
\end{equation*}
Thus $q\left( \underline{\partial }^{2}g(0,0)\right) (h)+r\Vert h\Vert
^{2}=\limsup_{\lambda \rightarrow \infty }\inf_{h^{\prime }\rightarrow
h}\left( g_{\lambda }^{\uparrow \uparrow }(0,0,h^{\prime })+\frac{\lambda r}{(\lambda -r)}\Vert h\Vert ^{2}\right) $ is convex, being the variational
upper limit of convex functions. One can easily verify that $\underline{\partial }^{2}g(0,0)=\underline{\partial }^{2}f(\bar{x},\bar{z})$ and $g^{\uparrow \uparrow }(0,0,h)=f^{\uparrow \uparrow }(\bar{x},\bar{z},h)$.
\end{proof}
\subsection*{REFERENCES}
{\footnotesize \ \makeatletter
\let\ORIGINALlatex@openbib@code=\@openbib@code
\renewcommand{\@openbib@code}{\ORIGINALlatex@openbib@code
\adjustmybblparameters} \makeatother
}
{\footnotesize \ \renewcommand{\section}[2]{}
}
\end{document} |
\begin{equation}gin{document}
\title{Multipartite secret key distillation and bound entanglement }
\author{Remigiusz Augusiak}
\email{remigiusz.augusiak@icfo.es} \affiliation{Faculty of Applied
Physics and Mathematics, Gda\'nsk University of Technology,
Narutowicza 11/12, 80--952 Gda\'nsk, Poland}
\affiliation{ICFO--Institute Ci\'encies Fot\'oniques,
Mediterranean Technology Park, 08860 Castelldefels (Barcelona),
Spain}
\author{Pawe{\l} Horodecki}
\email{pawel@mif.pg.gda.pl} \affiliation{Faculty of Applied
Physics and Mathematics, Gda\'nsk University of Technology,
Narutowicza 11/12, 80--952 Gda\'nsk, Poland}
\begin{equation}gin{abstract}
Recently it has been shown that quantum cryptography beyond pure
entanglement distillation is possible and a paradigm for the
associated protocols has been established. Here we systematically
generalize the whole paradigm to the multipartite scenario. We provide
constructions of new classes of multipartite bound entangled
states, i.e., those with underlying twisted GHZ structure and
nonzero distillable cryptographic key. We quantitatively estimate
the key from below with help of the privacy squeezing technique.
\end{abstract}
\maketitle
\section{Introduction}
Quantum cryptography is one of the most successful applications of
quantum physics in information theory. The original pioneering
BB84 scheme \cite{BB84} was based on sending nonorthogonal states
through an insecure quantum channel. Then the alternative approach
(E91) \cite{Ekert} based on generating key from pure entangled
quantum state have been proposed and later extended to the case of
mixed states in quantum privacy amplification scheme \cite{QPA}
which exploited the idea of distillation of pure entangled quantum
states from more copies of noisy entangled (mixed) states
\cite{distillation}. Much later it was realized that actually
existence of (may be noisy) initial entanglement in the state is
necessary for any type of protocols distilling secret key from
quantum states \cite{Curty1,Curty2}. In a meantime the problem of
unconditional security (security in the most unfriendly scenario
when the eavesdropper may apply arbitrarily correlated
measurements on the sent particles or, in the entanglement
distillation scheme, distribute many particles in a single
entangled quantum state) was further solved in Ref. \cite{LoChau}
in terms of entanglement distillation showing equivalence between
the two (BB84 and E91) ideas (see Ref. \cite{ShorPreskill} for
an alternative proof). However, still the protocol worked only for
entanglement that could be distilled. Also, other
protocols \cite{DW1,DW2} that exploited a modern approach to secrecy
(based on classical notions) also were used in cases when pure
entanglement was distillable. It was known, however, for a
relatively long time that there are states (called bound
entangled) that can not be distilled to pure form \cite{bound}. In
the above context it was quite natural to expect that bound
entangled states cannot lead to private key. However, it happens
not to be true \cite{KH0}: one can extend the entanglement
distillation idea from distillation of pure states to distillation
of {\it private states} (in general mixed states that contain
a private bit) and further show that there are examples of bound
entangled states from which secure key can be distilled. A general
paradigm has been systematically worked out in Refs.
\cite{KH,KHPhD} with further examples of bound
entangled states with secure key \cite{PHRANATO,HPHH} and interesting applications
\cite{RenesSmith,RB,Christandl}. From the quantum channels
perspective the extended scheme \cite{KH0} represents secure key
distillation with help of a quantum channel with vanishing quantum
capacity (i.e., it is impossible to transmit qubit states faithfully).
Those channels \cite{KH0,HPHH} were later used in the discovery
\cite{SmithYard} of the drastically non intuitive, fully nonclassical
effect of mutual activation of zero capacity channels, which
"unlock" each other allowing to transmit quantum information
faithfully if encoded into entanglement across two channels
inputs. On the other hand with help of the seminal machinery
exploiting the notion of almost productness in unconditionally
secure quantum key distillation \cite{Renner} it has been shown
that unconditional security under channels that do not convey quantum
information is possible \cite{UnconditionalSecurity}. Here we
would like to stress that we focus on the approach to quantum
cryptography based on private states rather than the, to some extent,
complementary information--theoretic approach which has also been proven
very fruitful (see Refs. \cite{DW1,RK,KGR,MCL,Renner}).
The results discussed above concern bipartite states. The aim of
the present paper, which, among others, concludes part of the
analysis of \cite{DoktoratRA}, is to develop the general approach
to distillation of secure key from multipartite states. Basically
the content of the paper can be divided into two parts. In the first
part we systematically and in a consistent way generalize the approach
from Ref. \cite{KH}. Here the basic notion of multipartite p--dits
that has been introduced and analyzed already in the previous
paper \cite{PHRA}.
It should be stressed here that, as extensively discussed in
\cite{KHPhD}, other modifications of the paradigm are possible as
far as the so--called notion of "direct accessibility of cryptographic
key" is considered. The p--dit approach is based on local von
Neumann measurements, while it is possible also to consider local
POVM--s \cite{RenesSmith}. Both approaches (and additional one)
were proved to be equivalent in terms of the amount of distillable key
contained in a given bipartite state in Ref. \cite{KHPhD}. While
we leave this issue for further analysis we strongly believe that
the abstract proofs of the latter work naturally extend to our
multipartite case.
The first part of the present paper contains qualitatively new
elements like conditions for closeness to a p--dit state which were
not known so far, and a derivation of a lower bound for multipartite
key where an additional analysis of distance to so--called cq states
was needed. The second part of the paper contains constructions of
novel states ie. multipartite states that contain secure key
though are bound entangled. The states are based on the underlying
(twisted) $N$--partite GHZ structure and are PPT under any $N-1$
versus one system partial transpose. The secret key content is
bounded from below quantitatively with help of the technique adopted
form \cite{HPHH}.
More specifically after basic definitions and a generalization of
the modern definition (that has already become standard) of
secure key distillation from quantum state in Sec. II we pass to Sec. III
where the notion of multipartite private--dit state (in short
p--dit) and its properties are discussed including especially the
condition for $\epsilon-$closeness to a p--dit. Distillable
cryptographic key in terms of p-dits is analyzed in section IV.
Here an upper bound in terms of relative entropy is proved in analogy
to the bipartite case. A lower bound on the key based on
a modification of the one--way Devetak--Winter protocol \cite{DW1,DW2}
to the multipartite case is provided with help of a natural lemma with
a somewhat involved proof. Also the application \cite{HPHH} of
privacy squeezing \cite{KH0} is naturally extended and applied
here.
The next section is the longest one since it contains all the
constructions of multipartite bound entanglement with
cryptographic key. Note that the first construction, being
an extension and modification of bipartite examples from Ref.
\cite{PHRANATO}, requires nontrivial coincidence of several
conditions that are contained in Lemma V.3. They ensure that, on
the one hand the state is PPT, but on the other it allows to be
modified by the LOCC recurrence protocol to a state that is close to
a multipartite p--dit. This is equivalent to distillability.
Independently, a quantitative analysis is performed illustrating
how the lower bound for distillable key becomes positive. The
second class of bound entangled states (to some extent inspired by
bipartite four--qubit states from \cite{HPHH}) involves hermitian
unitary block elements of the density matrix. Here the
construction is different and, in comparison to the first one, the
observed secure key is much stronger. Finally we shortly recall
the limitations of quantum cryptography \cite{RAPH1,RAPH2}.
Section VI contains conclusions.
\section{Basic notions and the standard definition of secure key}
In what follows we shall be concerned with the scenario in
which $N$ parties $A_{1},\ldots,A_{N}$ wish to obtain
perfectly correlated strings of bits (or in general dits)
that are completely uncorrelated to the eavesdropper Eve by means of
local operations and public communication (LOPC). Let us recall that
the difference between the standard local operations and classical communication (LOCC)
and LOPC lies in the fact that in the latter we need to remember that
any classical message announced by the involved parties may be registered
by Eve. Therefore in comparison to the LOCC paradigm in the LOPC
paradigm one also includes the map (see e.g. Refs. \cite{Christandl,KH})
\begin{equation}gin{eqnarray}
\varrho_{AA'BE}&\negmedspaces=\negmedspaces&\sum_{i}\varrho_{ABE}^{(i)}\ot\proj{i}_{A'}\mathop{-\!\!\!-\!\!\!\longrightarrow}\nolimitsA
\varrho_{AA'BB'EE'}\nonumber\\
&\negmedspaces=\negmedspaces&\sum_{i}\varrho_{ABE}^{(i)}\ot\proj{i}_{A'}
\ot\proj{i}_{B'}\ot\proj{i}_{E'},
\end{eqnarray}
From the quantum cryptographic point of view the common
aim of all the parties $A_{1},\ldots,A_{N}$ is to distill the following state
\begin{equation}gin{equation}\langlebel{idealcq}
\varrho_{\mathsf{A}E}^{(N,\mathrm{id})}=
\frac{1}{d}\sum_{i=0}^{d-1}\proj{e_{i}^{(1)}\ldots e_{i}^{(N)}}\ot
\varrho^{E},
\end{equation}
called hereafter {\it ideal c...cq (\textsf{c}q) state},
by means of LOPC. Here $\mathsf{A}\equiv A_{1}\ldots A_{N}$ and
$\{\ket{e_{i}^{(j)}}\}_{i=0}^{d-1}$ is some orthonormal
basis in the Hilbert space corresponding to the $j$th party
(denoted hereafter by $\mathcal{H}_{j}$). Their tensor product constitutes
the product basis in $\mathcal{H}_{1}\ot\ldots\ot\mathcal{H}_{N}$, which
we shall denote as
\begin{equation}gin{equation}
\mathcal{B}_{N}^{\mathrm{prod}}=
\left\{\ket{e_{i_{1}}^{(1)}}\ot\ldots\ot\ket{e_{i_{N}}^{(N)}}\right\}_{i_{1},\ldots,i_{N}=0}^{d-1}.
\end{equation}
(In what follows we will be often assuming
$\{\ket{e_{i}^{(j)}}\}_{i=0}^{d-1}$ to be the standard basis in
$\mathcal{H}_{j}$.) One sees that the ideal \textsf{c}q states
represent perfect classical correlations with respect to
the product basis $\mathcal{B}_{N}^{\mathrm{prod}}$ that
uncorrelated to the eavesdropper's degrees of freedom.
We may also define a general \textsf{c}q state to be
\begin{equation}gin{equation}\langlebel{nccq}
\varrho_{\mathsf{A}E}^{(N,\textsf{c}q)}=\sum_{i_{1},\ldots,i_{N}=0}^{d-1}
p_{i_{1}\ldots i_{N}}\proj{e^{(1)}_{i_{1}}\ldots e_{i_{N}}^{(N)}}
\ot\varrho_{i_{1}\ldots i_{N}}^{E}.
\end{equation}
In the above considerations formula we could take different dimensions
on each side, however, for simplicity we restrict to the
case of equal dimensions. All the parties should have
strings of the same length at the end of the protocol to make a key.
It should be also emphasized that in what follows the $j$th party
is assumed to have an additional 'garbage' quantum system defined
on some Hilbert space $\mathcal{H}_{j}'$. Thus we will be assuming
that usually the states shared by the parties are defined on the
Hilbert space $\mathcal{H}\ot\mathcal{H}'$, where
$\mathcal{H}=\mathcal{H}_{1}\ot\ldots\ot\mathcal{H}_{N}$ and
$\mathcal{H}'=\mathcal{H}_{1}'\ot\ldots\ot\mathcal{H}_{N}'$,
$\mathcal{B}_{N}^{\mathrm{prod}}$ constitutes the product basis in
$\mathcal{H}$. Also, following Ref. \cite{KH}, the part of a given
state corresponding to $\mathcal{H}$ ($\mathcal{H}'$) will be sometimes called
{\it the key part} ({\it the shield part}). This terminology comes
from the fact that the key part is the one from which the parties
obtain the cryptographic key, while the shield part protects
secret correlation from the eavesdropper.
Following e.g. Refs. \cite{KH,Christandl}, using the notion of
\textsf{c}q states, we may define the distillable cryptographic
key in the multipartite scenario as follows.
{\it Definition II.1.} Let $\varrho_{\mathsf{A}E}$ be a state
acting on
$\mathbb{C}^{d_{1}}\ot\ldots\ot\mathbb{C}^{d_{N}}\ot\mathbb{C}^{d_{E}}$
and $(P_{n})_{n=1}^{\infty}$ be a sequence of LOPC operations such
that $P_{n}(\varrho_{\mathsf{A}E}^{\ot
n})=\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q},n)}$, where
$\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q},n)}$ is a
\textsf{c}q state with $\mathsf{A}$ part defined on
$\big(\mathbb{C}^{d_{n}}\big)^{\ot N}$. The set of operations
$P=(\Lambda_{n})_{n=1}^{\infty}$ is said to be a cryptographic
key distillation protocol if
\begin{equation}gin{equation}
\lim_{n\to\infty}\norsl{\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q},n)}-\varrho_{\mathsf{A}E}^{(\mathrm{id},n)}}=0,
\end{equation}
where $\varrho_{\mathsf{A}E}^{(\mathrm{id},n)}$ is the ideal $\mathsf{c}$q state defined on the same Hilbert space
as $\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q},n)}$. We define the rate of
the protocol $P=(P_{n})_{n=1}^{\infty}$ as
\begin{equation}gin{equation}
R_{P}(\varrho_{\mathsf{A}E})=\limsup_{n\to\infty}\frac{\log d_{n}}{n}
\end{equation}
and the distillable classical key as
\begin{equation}gin{equation}
C_{D}(\varrho_{\mathsf{A}E})=\sup_{P}R_{P}(\varrho_{\mathsf{A}E}).
\end{equation}
If instead of $\varrho_{\mathsf{A}E}$ one has the purification $\ket{\psi_{\mathsf{A}E}}$
we write $C_{D}(\varrho_{\mathsf{A}})$.
Let us also mention that a good indicator of the secrecy of our correlations
as well as the uniformity of the probability distribution $p_{i_{1}\ldots i_{N}}$
is the trace norm distance $\big\|\varrho_{\mathsf{A}E}^{(\mathrm{id})}-\varrho_{\mathsf{A}E}^{(\textsf{c}q)}\big\|_{1}$.
\section{Private states}
\subsection{Definition and properties}
Here we discuss the multipartite generalizations of two important
concepts of the scheme from Refs. \cite{KH0,KH}.
Firstly we introduce the notion of twisting and then the notion of
multipartite private states.
{\it Definition III.1.} Let $(U_{i_{1}\ldots i_{N}})_{i_{1}\ldots i_{N}}$ be some family of
unitary operations acting on
$\mathcal{H}'$. Given the $N$--partite product basis $\mathcal{B}_{N}^{\mathrm{prod}}$
we define {\it multipartite twisting} to be the unitary operation given by the following
formula
\begin{equation}gin{equation}
U_{t}=\sum_{i_{1},\ldots,i_{N}=0}^{d-1}\pr{e_{i_{1}}^{(1)}\ldots
e_{i_{N}}^{(N)}}\ot U_{i_{1}\ldots i_{N}}.
\end{equation}
This is an important notion since, as shown in the bipartite case in Ref. \cite{KH} (Theorem 1)
and as it holds also for multipartite states, application of twisting
(taken with respect to the product basis $\mathcal{B}_{N}^{\mathrm{prod}}$)
to a given state $\varrho_{\mathsf{AA}'}$ does not have any effect on the
\textsf{c}q state obtained upon a measurement of the $\mathsf{A}$ part of
the purification of $\varrho_{\mathsf{AA}'}$
in the
product basis $\mathcal{B}_{N}^{\mathrm{prod}}$. More precisely
states $\varrho_{\mathsf{AA}'}$ and $U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger}$
have the same \textsf{c}q state with respect to $\mathcal{B}_{N}^{\mathrm{prod}}$
for any twisting that is constructed using $\mathcal{B}_{N}^{\mathrm{prod}}$.
We can now pass to the notion of multipartite private states.
These are straightforward generalization of private states
from Refs. \cite{KH0,KH} and were defined already in Ref. \cite{PHRA}.
{\it Definition III.2.} Let $U_{i}$ be some unitary operations for every $i$ and
let $\varrho_{\mathsf{A}'}$ be a density matrix acting on $\mathcal{H}'$. By {\it
multipartite private state} or {\it multipartite pdit} we mean the
following
\begin{equation}gin{equation}\langlebel{mpbit}
\Gamma_{\mathsf{AA'}}^{(d)}=\frac{1}{d}\sum_{i,j=0}^{d-1}\ke{e_{i}^{(1)}\ldots
e_{i}^{(N)}}\br{e_{j}^{(1)}\ldots e_{j}^{(N)}}\ot
U_{i}\varrho_{\mathsf{A}'}U_{j}^{\dag}.
\end{equation}
Naturally, for $N=2$ the above reproduces the bipartite private
states $\gamma_{A_{1}A_{2}A_{1}'A_{2}'}^{(d)}$ introduced in Ref.
\cite{KH}. It follows from the definition that any multipartite
private state may be written as
$\Gamma_{\mathsf{AA'}}^{(d)}=U_{t}(P_{d,N}^{(+)}\ot\varrho_{\mathsf{A}'})U_{t}^{\dagger}$
with $\varrho_{\mathsf{A}'}$ and $U_{t}$ denoting some density
matrix acting on $\mathcal{H}'$ and some twisting, respectively.
Moreover, $P_{d,N}^{(+)}$ stands for the projector onto the
so--called $GHZ$ state \cite{GHZstates} given by
\begin{equation}gin{equation}\langlebel{GHZstates}
\ket{\psi_{d,N}^{(+)}}=\sum_{i=0}^{d-1}\ket{i}^{\ot N}.
\end{equation}
In other words we say that multipartite private states are twisted
$GHZ$ states tensored with an arbitrary density matrix
$\varrho_{\mathsf{A}'}$.
As a simple but illustrative example of a multipartite pdit one
may consider the following $(2D)^{N}\times (2D)^{N}$ state (with
$\mathcal{H}=(\mathbb{C}^{2})^{\ot N}$ and
$\mathcal{H}'=(\mathbb{C}^{D})^{\ot N}$)
\begin{equation}gin{eqnarray}
\Gamma_{\mathrm{ex}}^{(2)}&=&\frac{1}{2D^{N}} \left[
\begin{equation}gin{array}{cccc}
\mathbbm{1}_{D^{N}} & 0 & \ldots &
V_{\pi}^{(D)}\\
0 & 0 & \ldots & 0\\
\vdots & \vdots & \ddots & \vdots \\
V_{\pi}^{(D)\dagger} & 0 & \ldots & \mathbbm{1}_{D^{N}}
\end{array}
\right]\nonumber\\
&=&\frac{1}{2D^{N}}\left[\left(\proj{0}^{\ot N}+\proj{1}^{\ot
N}\right)\ot\mathbbm{1}_{D^{N}}\right.\nonumber\\
&&\left.+\left(\ket{0}\!\bra{1}^{\ot N}+\ket{1}\!\bra{0}^{\ot
N}\right)\ot V^{(D)}_{\pi}\right].
\end{eqnarray}
where $V^{(D)}_{\pi}$ is a permutation operator defined as
\begin{equation}gin{equation}
V^{(D)}_{\pi}=\sum_{i_{1},\ldots,i_{N}=0}^{D-1}\ket{i_{1}}\!\bra{i_{\pi(1)}}\ot \ket{i_{2}}\!\bra{i_{\pi(2)}}\ot\ldots
\ot\ket{i_{N}}\!\bra{i_{\pi(N)}}
\end{equation}
with $\pi$ being an arbitrary permutation of $N$--element set.
Clearly $V^{(D)}_{\pi}$ is unitary matrix for any permutation
$\pi$ and thus $\big|V_{\pi}^{(D)}\big|=\mathbbm{1}_{D^{N}}$
($|A|$ is defined as $\sqrt{A^{\dagger}A}$).
This, in view of the Lemma A.1 (Appendix), means that
$\mathcal{M}_{2}(\mathbbm{1}_{D^{N}},V_{\pi}^{(D)})\geq 0$ (for the definition
of $\mathcal{M}_{2}$ see Appendix) for any
$\pi$ and hence $\Gamma_{\mathrm{ex}}^{(2)}$ represents quantum state.
Moreover, $\Gamma_{\mathrm{ex}}^{(2)}$ may be derived from the
general form \eqref{mpbit} by substituting
$\varrho_{\mathsf{A}'}=\mathbbm{1}_{D^{N}}/D^{N}$, i.e., maximally
mixed state acting on $(\mathbb{C}^{D})^{\ot N}$. Finally, both
unitary operations in Eq. \eqref{mpbit} may be taken to be
$U_{0}=V_{\pi}^{(D)}$ and $U_{1}=\mathbbm{1}_{D^{N}}$.
As multipartite private state constitute a central notion of our
cryptographic scheme, below we shortly characterize multipartite
private states. Firstly, we notice that any state of which \textsf{c}q
state is the ideal one with respect to some basis $\mathcal{B}_{N}$
must be of the form \eqref{mpbit} and {\it vice versa}.
{\it Theorem III.1.} Let $\varrho_{\mathsf{AA}'}$ be a state
defined on $\mathcal{H}\ot\mathcal{H}'$ with
$\mathcal{H}=(\mathbb{C}^{d})^{\ot N}$ and arbitrary but finite--dimensional
$\mathcal{H}'$. Let also
$\varrho_{\mathsf{A}E}^{(\textsf{c}\mathrm{q})}$ denote the
\textsf{c}q state obtained from the purification of
$\varrho_{\mathsf{AA}'}$ upon the measurement of the $\mathsf{A}$
part in $\mathcal{B}_{N}^{\mathrm{prod}}$ and tracing out the
$\mathsf{A}'$ part. Then
$\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ is of the form
\eqref{idealcq} if and only if $\varrho_{\mathsf{AA}'}$ is of the
form \eqref{mpbit}, both with respect to
$\mathcal{B}_{N}^{\mathrm{prod}}$.
This fact may be proved in exactly the same way as its bipartite
version from Ref. \cite{KH}.
Secondly, it was shown in Ref. \cite{PHRA} that any multipartite
private state is distillable providing also a lower bound on
distillable entanglement. For completeness it is desirable to
briefly recall this result, which can be stated as follows.
For any multipartite private state $\Gamma_{\mathsf{AA}'}^{(d)}$
its distillable entanglement is bounded as
\begin{equation}gin{eqnarray}\langlebel{rate1}
&&E_{D}\big(\Gamma^{(d)}_{\mathsf{AA'}}\big)\geq\nonumber\\
&&\max_{\substack{i,j=0,\ldots,d-1\\i<j}}\left\{ a^{\max}_{ij}\left[1-H\left(\frac{1}{2}
+\frac{\eta_{ij}}{2\sqrt{a_{ij}^{(1)}a_{ij}^{(2)}}}\right)\right]\right\}
\nonumber\\
\end{eqnarray}
where $\eta_{ij}$, $a_{ij}^{(1)}$, $a_{ij}^{(1)}$, and finally
$a^{\max}_{ij}$ are parameters characterizing the given
private state $\Gamma_{\mathsf{AA}'}^{(d)}$. They are defined as
follows
\begin{equation}gin{equation}\langlebel{mpditEta}
\eta_{ij}=\max\left|\braa{f_{1}}\ldots\braa{f_{N}}
U_{i}\varrho_{\textsf{A}'}U_{j}^{\dagger}
\kett{g_{1}}\ldots\kett{g_{N}}\right|,
\end{equation}
where maximum is taken over a pair of pure product vectors
$\kett{f_{1}}\ldots\kett{f_{N}}$ and $\kett{g_{1}}\ldots\kett{g_{N}}$
belonging to $\mathcal{H}'$.
The parameters $a^{(1)}_{ij}$ and $a^{(2)}_{ij}$ are given by
\begin{equation}gin{equation}\langlebel{ampdits1}
a^{(1)}_{ij}=\bra{\widetilde{f}_{1}^{(ij)}}\ldots\bra{\widetilde{f}_{N}^{(ij)}}
U_{i}\varrho_{\textsf{A}'}U_{i}^{\dagger}\ket{\widetilde{f}_{1}^{(ij)}}\ldots
\ket{\widetilde{f}_{N}^{(ij)}}
\end{equation}
and
\begin{equation}gin{equation}\langlebel{ampdits2}
a^{(2)}_{ij}=\bra{\widetilde{g}_{1}^{(ij)}}\ldots\bra{\widetilde{g}_{N}^{(ij)}}
U_{j}\varrho_{\textsf{A}'}U_{j}^{\dagger}\ket{\widetilde{g}_{1}^{(ij)}}\ldots\ket{\widetilde{g}_{N}^{(ij)}},
\end{equation}
where $ \ket{\widetilde{f}_{1}^{(ij)}}\ldots
\ket{\widetilde{f}_{N}^{(ij)}}$ and $\ket{\widetilde{g}_{1}^{(ij)}}\ldots\ket{\widetilde{g}_{N}^{(ij)}}$
are the vectors realizing the maximum in Eq. (\ref{mpditEta}).
Finally $a_{ij}^{\max}$ denotes the larger of two numbers
$a_{ij}^{(1)}$ and $a_{ij}^{(2)}$.
It follows from Eqs. \eqref{mpditEta}, \eqref{ampdits1}, and
\eqref{ampdits2} that $\eta_{ij}$ is always positive and on the
other hand $\eta_{ij}\leq \sqrt{a^{(1)}_{ij}a^{(2)}_{ij}}$. This
means that $a_{ij}^{\mathrm{max}}>0$ and consequently for any pair
$(i<j)$ the expression under the maximum in Eq. \eqref{rate1} is
positive proving that $E_{D}$ of any multipartite private state is
nonzero.
Finally, we notice following Ref. \cite{PHRA} that for bipartite
private states also other entanglement measures were bounded from
below. Namely, it was shown that
\begin{equation}gin{equation}
E_{C}(\gamma_{A_{1}A_{2}A_{1}'A_{2}'}^{(d)})\geq \log d
\end{equation}
and, due to the fact that entanglement of formation
is not smaller than the entanglement cost, $E_{F}(\gamma_{A_{1}A_{2}A_{1}'A_{2}'}^{(d)})\geq \log d$.
\subsection{Conditions for closeness to multipartite private states}
Here we provide necessary and sufficient conditions allowing
for judging how close to some multipartite private state is
some given state $\varrho_{\mathsf{AA}'}$ defined on $\mathcal{H}\ot\mathcal{H}'$.
Let us firstly notice that any state acting on $\mathcal{H}\ot\mathcal{H}'$
may be written in the following block form
\begin{equation}gin{equation}\langlebel{form2}
\varrho_{\mathsf{AA}'}=\sum_{i_{1},\ldots,i_{N}=0}^{d-1}\sum_{j_{1},\ldots,j_{N}=0}^{d-1}
\ket{i_{1}\ldots i_{N}}\!\bra{j_{1}\ldots j_{N}}\ot\Omega_{i_{1}\ldots i_{N}}^{j_{1}\ldots j_{N}},
\end{equation}
where $\Omega_{i_{1}\ldots i_{N}}^{j_{1}\ldots j_{N}}$ are assumed
to be square matrices defined on $\mathcal{H}'$. Also by
$\widetilde{\varrho}_{\mathsf{A}}$ we denote the state
$\widetilde{\varrho}_{\mathsf{A}}=\Tr_{\mathsf{A}'}(U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})$
with some twisting $U_{t}$, and by
$(\widetilde{\varrho}_{\mathsf{A}})_{i_{1}\ldots i_{N}}^{j_{1}\ldots
j_{N}}$ its entries in the standard basis. Then we can prove the
following useful lemma.
{\it Lemma III.1.} Let $\varrho_{\mathsf{AA}'}$ be some density
matrix acting on $\mathcal{H}\ot\mathcal{H}'$ with
$\mathcal{H}=(\mathbb{C}^{d})^{\ot N}$ and arbitrary
finite--dimensional $\mathcal{H}'$. Then there exists such
twisting $U_{t}$ that for a fixed index $i$ all the elements
$(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i}^{j\ldots j}$ and
$(\widetilde{\varrho}_{\mathsf{A}})_{j\ldots j}^{i\ldots i}$
$(j=0,\ldots,d-1)$ of the $i$--th row and column of
$\widetilde{\varrho}_{\mathsf{A}}=\Tr_{\mathsf{A}'}(U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})$
equal $\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}$ and
$\big\|\Omega_{j\ldots j}^{i\ldots i}\big\|_{1}$, respectively.
{\it Proof.} The proof is a simple extension of the one presented
in Ref. \cite{KH}. Acting on the state $\varrho_{\mathsf{AA}'}$ with an
unitary twisting $U_{t}$ and tracing out the $\mathsf{A}'$ subsystem, one gets
\begin{equation}gin{eqnarray}\langlebel{rownanie}
\widetilde{\varrho}_{\mathsf{A}}&\negmedspaces=\negmedspaces&\sum_{i_{1},\ldots,i_{N}=0}^{d-1}\sum_{j_{1},\ldots,j_{N}=0}^{d-1}
\Tr\left(U_{i_{1}\ldots i_{N}}\Omega_{i_{1}\ldots i_{N}}^{j_{1}\ldots j_{N}}U_{j_{1}\ldots j_{N}}^{\dagger}\right)\nonumber\\
&&\hspace{3cm}\times\ket{i_{1}\ldots i_{N}}\bra{j_{1}\ldots j_{N}}.
\end{eqnarray}
First of all let us mention that from Eq. \eqref{rownanie} it
follows that we do not need to care about blocks lying on the
diagonal of $\varrho_{\mathsf{AA}'}$ as the blocks
$\Omega_{i_{1}\ldots i_{N}}^{i_{1}\ldots i_{N}}$ must be positive
and the following holds
\begin{equation}gin{eqnarray}
\Tr\left(U_{i_{1}\ldots i_{N}}
\Omega_{i_{1}\ldots i_{N}}^{i_{1}\ldots i_{N}}U_{i_{1}\ldots i_{N}}^{\dagger}\right)=
\norsl{\Omega_{i_{1}\ldots i_{N}}^{i_{1}\ldots i_{N}}}.
\end{eqnarray}
Now, let us focus now on the matrices $\Omega_{i\ldots i}^{j\ldots j}$
for some fixed $i$ and any $j\neq i$ (as the case of $i=j$ has just been discussed).
For simplicity and without any loss of generality
we can choose $i=0$ and thus we need to prove the theorem for
$j=1,\ldots,d-1$. At the beginning let us
concentrate on the matrix $\Omega_{0\ldots 0}^{1\ldots 1}$. We can
express it with the singular--value decomposition as
$\Omega_{0\ldots 0}^{1\ldots 1}=V_{1}D_{1}W_{1}^{\dagger}$, where
$V_{1}$ and $W_{1}$ are unitary matrices and $D_{1}$ stands for a diagonal
matrix containing singular values of $\Omega_{0\ldots 0}^{1\ldots 1}$, i.e.,
eigenvalues of $\big|\Omega_{0\ldots 0}^{1\ldots 1}\big|$. Then from
Eq. \eqref{rownanie} one infers that it suffices to take
$U_{0\ldots 0}=V_{1}^{\dagger}$ and $U_{1\ldots 1}=W$ in the twisting
$U_{t}$ to get
\begin{equation}gin{eqnarray}\langlebel{rownanie2}
\Tr\left(U_{0\ldots 0}\Omega_{0\ldots 0}^{1\ldots 1}U_{1\ldots 1}^{\dagger}\right)&\negmedspaces=\negmedspaces&
\Tr(V^{\dagger}V D W^{\dagger}W)\nonumber\\
&\negmedspaces=\negmedspaces&\Tr D=\norsl{\Omega_{0\ldots 0}^{1\ldots 1}}.
\end{eqnarray}
Now we may proceed with the remaining matrices $\Omega_{0\ldots
0}^{j\ldots j}$ $(j=2,\ldots,d-1)$. We need to find such matrices
in the twisting $U_{t}$ that Eq. \eqref{rownanie2} holds also for
the remaining $\Omega_{0\ldots 0}^{j\ldots j}$. Notice that
unitary matrices $U_{0\ldots 0}$ and $U_{1\ldots 1}$ have just
been fixed, however, we have still some freedom provided by
$U_{j\ldots j}$ $(j=2,\ldots,d-1)$. Using the singular value
decomposition of all $\Omega_{0\ldots 0}^{j\ldots j}$
$(j=2,\ldots,d-1)$ we may write $\Omega_{0\ldots 0}^{j\ldots
j}=V_{j}D_{j}W_{j}^{\dagger}$. This leads to
\begin{equation}gin{eqnarray}\langlebel{kolejnerownanie}
\Tr\left(U_{0\ldots 0}\Omega_{0\ldots 0}^{j\ldots j}U_{j\ldots j}^{\dagger}\right)
&\negmedspaces=\negmedspaces&\Tr\left(V^{\dagger}\Omega_{0\ldots 0}^{j\ldots j}U_{j\ldots j}^{\dagger}\right)\nonumber\\
&\negmedspaces=\negmedspaces&\Tr\left(V^{\dagger}V_{j}D_{j}W_{j}^{\dagger}U_{j\ldots j}^{\dagger}\right)\nonumber\\
&\negmedspaces=\negmedspaces&\Tr\left(D_{j}W_{j}^{\dagger}U_{j\ldots j}^{\dagger}V^{\dagger}V_{j}\right),
\end{eqnarray}
where we used the property of trace saying that $\Tr AB=\Tr BA$.
It is clear from the above that to get the trace norm of
$\Omega_{0\ldots 0}^{j\ldots j}$ for any $j=2,\ldots,d-1$ it
suffices to choose $U_{j\ldots j}$ in such way that
$W_{j}^{\dagger}U_{j\ldots
j}^{\dagger}V^{\dagger}_{1}V_{j}=\mathbbm{1}$. This means that
$U_{j\ldots j}=V^{\dagger}_{1}V_{j}W_{j}^{\dagger}$
$(j=2,\ldots,d-1)$. The remaining $U_{i_{1}\ldots i_{N}}$
appearing in the definition of $U_{t}$ may be chosen at will.
Concluding we showed that there exists such $U_{t}$ that for a
fixed $i$ it holds that $(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots
i}^{j\ldots j}=\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}$
$(j=0,\ldots,d-1)$. The fact that also
$(\widetilde{\varrho}_{\mathsf{A}})_{j\ldots j}^{i\ldots
i}=\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}$ follows
obviously from hermiticity of $\widetilde{\varrho}_{\mathsf{A}}$.
$\vrule height 4pt width 3pt depth2pt$
This is a very useful lemma due to the fact that twistings
do not change the \textsf{c}q state. It allows us to concentrate on a
particular form of a given state $\varrho_{\mathsf{AA}'}$. In
other words, we can think about the state $\varrho_{\mathsf{AA}'}$
as if it has such a reduction to \textsf{A} subsystem that some of
its elements in fixed row or column are trace norms of respective
blocks of $\varrho_{\mathsf{AA}'}$ (obviously with respect to the
same product basis $\mathcal{B}_{N}^{\mathrm{prod}}$). As an
illustrative example we can consider $\varrho_{\mathsf{AA}'}$ with
$d=2$. Then from Eq. \eqref{form2} it can be written as
\begin{equation}gin{equation}
\varrho_{\mathsf{AA}'}=\left[
\begin{equation}gin{array}{ccccc}
\Omega_{0\ldots 0}^{0\ldots 0} & \Omega_{0\ldots 0}^{0\ldots 1} & \ldots & \Omega_{0\ldots 0}^{1\ldots 1} \\*[1ex]
\Omega_{0\ldots 1}^{0\ldots 0} & \Omega_{0\ldots 1}^{0\ldots 1} & \ldots & \Omega_{0\ldots 1}^{1\ldots 1} \\*[1ex]
\vdots & \vdots & \ddots & \vdots \\*[1ex]
\Omega_{1\ldots 1}^{0\ldots 0} & \Omega_{1\ldots 1}^{0\ldots 1} & \ldots & \Omega_{1\ldots 1}^{1\ldots 1}
\end{array}
\right],
\end{equation}
where $\Omega_{i_{1}\ldots i_{N}}^{j_{1}\ldots
i_{N}}=\big(\Omega_{j_{1}\ldots j_{N}}^{i_{1} \ldots
i_{N}}\big)^{\dagger}$ and $\Omega_{i_{1}\ldots
i_{N}}^{i_{1}\ldots i_{N}}\geq 0$ for any $i_{k},j_{k}=0,1$. In
view of Lemma 3.2 the above may be brought to the following
state
\begin{equation}gin{eqnarray}\langlebel{twistedState}
\widetilde{\varrho}_{\mathsf{A}}&\negmedspaces\equiv\negmedspaces&\Tr_{\mathsf{A}'}(U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})\nonumber\\
&\negmedspaces=\negmedspaces&
\left[
\begin{equation}gin{array}{ccccc}
\norsl{\Omega_{0\ldots 0}^{0\ldots 0}} &
(\widetilde{\varrho}_{\mathsf{A}})_{0\ldots 0}^{0\ldots 1} &
\ldots & \norsl{\Omega_{0\ldots 0}^{1\ldots 1}} \\*[1ex]
(\widetilde{\varrho}_{\mathsf{A}})_{0\ldots 1}^{0\ldots 0} &
\norsl{\Omega_{0\ldots 1}^{0\ldots 1}} & \ldots &
(\widetilde{\varrho}_{\mathsf{A}})_{0\ldots 1}^{1\ldots 1}
\\*[1ex] \vdots & \vdots & \ddots & \vdots \\*[1ex]
\norsl{\Omega_{1\ldots 1}^{0\ldots 0}} &
(\widetilde{\varrho}_{\mathsf{A}})_{1\ldots 1}^{0\ldots 1} &
\ldots & \norsl{\Omega_{1\ldots 1}^{1\ldots 1}}
\end{array}
\right].
\end{eqnarray}
Now we are prepared to provide the aforementioned conditions for
closeness to multipartite private states (the bipartite case was
discussed in Ref. \cite{KH}). Firstly we show that if a given
$\varrho_{\mathsf{AA}'}$ is close to some multipartite pdit then
(due to the above lemma) there exist such $U_{t}$ that the
$\mathsf{A}$ subsystem has all the elements
$(\Tr_{\mathsf{A}'}U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})_{i\ldots
i}^{j\ldots j}= \big\|\Omega_{i\ldots i }^{j\ldots j}\big\|_{1}$
for $j=0,\ldots,d-1$ close to $1/d$.
{\it Theorem III.2.} Let $\Omega_{i_{1}\ldots i_{N}}^{j_{1}\ldots
j_{N}}$ be some matrices and $\varrho_{\mathsf{AA}'}$ be an
$N$--partite state of the form (\ref{form2}) such that
$\big\|\varrho_{\mathsf{AA}'}-\Gamma_{\mathsf{AA}'}^{(d)}\big\|_{1}\leq
\epsilon$ for some multipartite private state
$\Gamma_{\mathsf{AA}'}^{(d)}$ for some $\epsilon >0$. Then for a
fixed index $i$ one has $\big|\big\|\Omega_{i\ldots i}^{j\ldots
j}\big\|_{1}-(1/d)\big|\leq \epsilon$ and
$\big|\big\|\Omega_{j\ldots j}^{i\ldots
i}\big\|_{1}-(1/d)\big|\leq \epsilon$ for any $j=0,\ldots,d-1$.
{\it Proof.} The proof is a modification of the one given in Ref.
\cite{KH} (Proposition 3). Let $\Gamma_{\mathsf{AA}'}^{(d)}$ be
such a private state that
$\big\|\varrho_{\mathsf{AA}'}-\Gamma_{\mathsf{AA}'}^{(d)}\big\|_{1}\leq
\epsilon$ and $U_{t}$ be such twisting that
$\Gamma_{\mathsf{AA}'}^{(d)}=U_{t}(P_{d,N}^{(+)}\ot\sigma_{\mathsf{A}'})U_{t}^{\dagger}$
with $\sigma_{\mathsf{A}'}$ denoting some state on $\mathcal{H}'$.
Then, due to the invariance of the trace norm under unitary
operations, we have
\begin{equation}gin{equation}
\left\|U_{t}^{\dagger}\varrho_{\mathsf{AA}'}U_{t}-P_{d,N}^{(+)}\ot\sigma_{\mathsf{A}'}\right\|_{1}\leq
\epsilon.
\end{equation}
Now, utilizing the fact that the trace norm can only decrease
under the partial trace,
we get
\begin{equation}gin{equation}
\left\|\widetilde{\varrho}_{\mathsf{A}}-P_{d,N}^{(+)}\right\|_{1}\leq
\epsilon,
\end{equation}
where $\widetilde{\varrho}_{\mathsf{A}}$ is of the form
\eqref{rownanie}. Notice that in general $U_{t}$ does not have to
be the one bringing $\widetilde{\varrho}_{\mathsf{A}}$ to the form
discussed in Lemma III.1. After application of the explicit form
of $P_{d,N}^{(+)}$ and $\widetilde{\varrho}_{\mathsf{A}}$ given by
Eq. \eqref{rownanie}, one can rewrite the above as
\begin{equation}gin{eqnarray}
&&\left\|\sum_{\substack{i_{1},\ldots,i_{N}=0\\j_{1},\ldots,j_{N}=0}}^{d-1}
\ket{i_{1}\ldots i_{N}}\!\bra{j_{1}\ldots
j_{N}}\Tr\left(U_{i_{1}\ldots i_{N}}\Omega_{i_{1}\ldots
i_{N}}^{j_{1}\ldots j_{N}}U_{j_{1}\ldots
j_{N}}^{\dagger}\right)\right.\nonumber\\
&&\left.\hspace{2cm}-\frac{1}{d}\sum_{i,j=0}^{d-1}\ket{i}\!\bra{j}^{\ot
N}\right\|_{1}\leq \epsilon.
\end{eqnarray}
Now we may utilize the fact that for any
$A=\sum_{ij}a_{ij}\ket{i}\bra{j}$ square of its Hilbert--Schmidt
norm is given by $\|A\|_{2}^{2}=\sum_{ij}|a_{ij}|^{2}$ and that
$\|A\|_{2}\leq \|A\|_{1}$. Therefore, if $\|A\|_{2}\leq \epsilon$
for some $\epsilon>0$ then one infers that any of its elements
obeys $|a_{ij}|\leq \epsilon$ $(i,j=0,\ldots,d-1)$. This
reasoning, after application to the matrix
$\widetilde{\varrho}_{\mathsf{A}}-P_{d,N}^{(+)}$, leads us to the
conclusion that for any $i,j=0,\ldots,d-1$ it holds
\begin{equation}gin{equation}
\left|\Tr\left(U_{i\ldots i}\Omega_{i\ldots i}^{j\ldots
j}U_{j\ldots j}^{\dagger}\right)-\frac{1}{d}\right|\leq \epsilon,
\end{equation}
which eventually gives $|\Tr(U_{i\ldots i}\Omega_{i\ldots
i}^{j\ldots j}U_{j\ldots j}^{\dagger})| \geq 1/d-\epsilon$. This,
after application of the polar decomposition of $\Omega_{i\ldots
i}^{j\ldots j}$ and properties of trace can be rewritten as
$|\Tr(W_{ij}|\Omega_{i\ldots i}^{j\ldots j}|)| \geq 1/d-\epsilon$
with $W_{ij}$ being some unitary matrix. Now, applying the
Cauchy--Schwarz inequality to the Hilbert--Schmidt scalar product
we can infer that for any positive $A$ and unitary $W$ the
following chain of inequalities holds
\begin{equation}gin{eqnarray}
\left|\Tr\left(WA\right)\right|&=&\left|\Tr\left(W\sqrt{A}\sqrt{A}\right)\right|\nonumber\\
&\leq
&\sqrt{\Tr\left(W\sqrt{A}\sqrt{A}W^{\dagger}\right)}\sqrt{\Tr
\sqrt{A}\sqrt{A}}\nonumber\\
&=&\Tr A=\|A\|_{1}.
\end{eqnarray}
Thus we have that $\|\Omega_{i\ldots i}^{j\ldots j}\|_{1}\geq
1/d-\epsilon$ for any $(i,j=0,\ldots,d-1)$.
On the other hand we can apply such twisting $\widetilde{U}_{t}$
that after application to $\varrho_{\mathsf{AA}'}$ and tracing out
the $\mathsf{A}'$ subsystem we get
$\widetilde{\varrho}_{\mathsf{A}}$ such that in its $i$th row (or
column) $(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i}^{j\ldots
j}=\|\Omega_{i\ldots i}^{j\ldots j}\|$ for $j=1,\ldots,d-1$. Then,
one easily concludes that
\begin{equation}gin{equation}
\left\|\widetilde{\varrho}_{\mathsf{AA}'}-\widetilde{U}_{t}^{\dagger}\Gamma_{\mathsf{AA}'}^{(d)}\widetilde{U}_{t}\right\|_{1}\leq
\epsilon
\end{equation}
with
$\widetilde{\varrho}_{\mathsf{AA}'}=\widetilde{U}_{t}\varrho_{\mathsf{AA}'}\widetilde{U}_{t}^{\dagger}$.
After the analogous reasoning as in the previous case we get
\begin{equation}gin{equation}
\left|\left\|\Omega_{i\ldots i}^{j\ldots
j}\right\|_{1}-\frac{1}{d}\Tr(\widetilde{W}_{ij}\sigma_{\mathsf{A}'})\right|\leq
\epsilon
\end{equation}
for some chosen $i$ and $j=0,\ldots,d-1$. Here by $W_{ij}$ we
denoted all product of the respective unitary matrices following
from product of $\widetilde{U}_{t}$ and $U_{t}$. Using the fact
that $|z_{1}-z_{2}|\geq |z_{1}|-|z_{2}|$, we infer from the above
inequality that
\begin{equation}gin{equation}
\left\|\Omega_{i\ldots i}^{j\ldots j}\right\|_{1}\leq
\epsilon+\frac{1}{d}\left|\Tr\left(\widetilde{W}_{ij}\sigma_{\mathsf{A}'}\right)\right|.
\end{equation}
It follows from the Cauchy--Schwarz inequality that the absolute
value on the right--hand side is not greater that one. Thus we get
the inequalities
\begin{equation}gin{equation}
\left\|\Omega_{i\ldots i}^{j\ldots j}\right\|_{1}\leq
\epsilon+\frac{1}{d}
\end{equation}
for the chosen $i$ and $j=0,\ldots,d-1$. Joining this facts
together we get the desired result. $\vrule height 4pt width 3pt depth2pt$
Notice that in the particular case of $d=2$, discussed already in Ref. \cite{KH}
(Proposition 3), the only condition for $\big\|\Omega_{0\ldots 0}^{1\ldots 1}\big\|_{1}$
(and equivalently for $\big\|\Omega_{1\ldots 1}^{0\ldots 0}\big\|_{1}$)
is that $\big\|\Omega_{0\ldots 0}^{1\ldots 1}\big\|_{1}\geq 1/2-\epsilon$.
This is because, due to the fact that $\Tr\widetilde{\varrho}_{\mathsf{A}}=1$ and
the positivity of $\widetilde{\varrho}_{\mathsf{A}}\geq 0$
(and thus also of the $2\times 2$ matrix containing the elements
$\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}$ with $i,j=0,1$)
$\big\|\Omega_{0\ldots 0}^{1\ldots 1}\big\|_{1}\leq 1/2$.
Interestingly, one may prove also a converse statement, namely if
after applying respective twisting $U_{t}$,
for some fixed row, say the $i$th one, all $\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}$
are close to $1/d$, then there exists some multipartite private state
close to a given state $\varrho_{\mathsf{AA}'}$.
{\it Theorem III.3.} Let $\varrho_{\mathsf{AA'}}$ given by
Eq. (\ref{form2}) be such that for a fixed $i$ the blocks $\Omega_{i\ldots i}^{j\ldots j}$ obey
$\big|\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}- 1/d\big|\leq \epsilon$ for any $j=0,\ldots,d-1$ and
$0<\epsilon<1/d$.
Then there exists such a multipartite private state $\Gamma_{\mathsf{AA}'}^{(d)}$ that
\begin{equation}gin{eqnarray}
\norsl{\varrho_{\mathsf{AA'}}-\Gamma_{\mathsf{AA'}}^{(d)}}&\negmedspaces\le\negmedspaces &
\sqrt{\log2\left[2N\sqrt{d\eta(\epsilon)}\log
d+H(2\sqrt{d\eta(\epsilon)})\right]}\nonumber\\
&&+2\sqrt{d\eta(\epsilon)},
\end{eqnarray}
where $\eta(\epsilon)\to 0$ if $\epsilon\to 0$ and
consequently the function on the right--hand side tends to zero whenever $\epsilon\to 0$.
Here $H$ denotes the binary entropy.
{\it Proof.} The proof is based on the one given in Ref.
\cite{KH}. Let $U_{t}$ be such twisting that for fixed $i$ it
holds of $(\widetilde{\varrho}_{\mathsf{A}})_{i \ldots i}^{j\ldots
j}=\big\|\Omega_{i\ldots i}^{j\ldots j}\big\|_{1}$
$(j=0,\ldots,d-1)$. Then since
$(\widetilde{\varrho}_{\mathsf{A}})_{i \ldots i}^{j\ldots
j}=[(\widetilde{\varrho}_{\mathsf{A}})_{j \ldots j}^{i\ldots
i}]^{*}$ with asterisk denoting complex conjugation, the
Hilbert--Schmidt scalar product of
$\widetilde{\varrho}_{\mathsf{A}}$ and $P_{D,N}^{(+)}$ may be
expressed as
\begin{equation}gin{eqnarray}\langlebel{nierownosc}
\Tr\widetilde{\varrho}_{\mathsf{A}}P^{(+)}_{d,N}&\negmedspaces=\negmedspaces&\frac{1}{d}\sum_{i,j=0}^{d-1}
(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i}^{j\ldots j}\nonumber\\
&\negmedspaces=\negmedspaces&
\frac{1}{d}\sum_{i=0}^{d-1}(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots
i}^{i\ldots i}+\frac{2}{d}
\sum_{\substack{i,j=0\\i<j}}^{d-1}\mathrm{Re}(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots
i}^{j\ldots j}.
\end{eqnarray}
On the other hand from the positivity of $\widetilde{\varrho}_{\mathsf{A}}$
one may prove the following inequality
\begin{equation}gin{equation}
\sum_{i=0}^{d-1}(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i }^{i\ldots i}\geq \frac{2}{d-1}
\sum_{\substack{i,j=0\\i<j}}^{d-1}\mathrm{Re}(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i}^{j\ldots j},
\end{equation}
which after substitution to Eq. (\ref{nierownosc}) gives
\begin{equation}gin{equation}\langlebel{nierownosc2}
\Tr\widetilde{\varrho}_{\mathsf{A}}P^{(+)}_{d,N}\geq \frac{2}{d-1}
\sum_{\substack{i,j=0\\i<j}}^{d-1}\mathrm{Re}(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i}^{j\ldots j}.
\end{equation}
Now we can utilize Lemma A.2 (see Appendix) to the $d\times d$
matrix with entries $(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i
}^{j\ldots j}$ $(i,j=0,\ldots,d-1)$. Namely, due to the assumption
that for some fixed $i$ the elements
$(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i }^{j\ldots j}$
satisfy $|(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i }^{j\ldots
j}-1/d|\leq \epsilon$, we have from Lemma A.2 that
$|(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i }^{j\ldots
j}-1/d|\leq \eta(\epsilon)$ for any $i,j=0,\ldots,d-1$ with
$\eta(\epsilon)\to 0$ for $\epsilon\to 0$. This means that the
real parts of any $(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i
}^{j\ldots j}$ also satisfies the above condition. In this light we get
from Eq. \eqref{nierownosc2} that
\begin{equation}gin{eqnarray}\langlebel{nierownosc3}
\Tr\widetilde{\varrho}_{\mathsf{A}}P^{(+)}_{d,N}&\negmedspaces\geq\negmedspaces& \frac{2}{d-1}
\sum_{\substack{i,j=0\\i<j}}^{d-1}\mathrm{Re}(\widetilde{\varrho}_{\mathsf{A}})_{i\ldots i}^{j\ldots j}\nonumber\\
&\negmedspaces\geq \negmedspaces& \frac{2}{d-1}\sum_{\substack{i,j=0\\i<j}}^{d-1}\left(\frac{1}{d}-\eta(\epsilon)\right)\nonumber\\
&\negmedspaces=\negmedspaces& \frac{2}{d-1}\frac{d(d-1)}{2}\left(\frac{1}{d}-\eta(\epsilon)\right)\nonumber\\
&\negmedspaces=\negmedspaces& 1-d\eta(\epsilon),
\end{eqnarray}
where the first equality follows from the fact that the respective
sum contains $d(d-1)/2$ elements. The remainder of the proof goes along the
same line as its bipartite version from Ref. \cite{KH} leading to
the claimed inequality. $\vrule height 4pt width 3pt depth2pt$
Notice that to prove the theorem for the particular case of $d=2$
it suffices to assume that
$\big\|\Omega_{0\ldots 0}^{1\ldots 1}\big\|_{1}\geq 1/2-\epsilon$.
Concluding we obtained necessary and sufficient conditions for a
given state $\varrho_{\mathsf{AA}'}$ to be close to some
multipartite private state expressed in terms of the trace norm of
some blocks of $\varrho_{\mathsf{AA}'}$ (see Eq. \eqref{form2}).
\section{Distillable cryptographic key}
\subsection{Definition}
Having introduced the concept of multipartite private states we
may pass to the definition of multipartite cryptographic key. The
seminal fact behind the notion of multipartite private states is
that as shown in Refs. \cite{KH0,KH}, one can think about quantum
cryptography as a distillation of private states by means of LOCC.
In other words, we have a standard distillation scheme (as
entanglement distillation) in which we can forget about the
eavesdropper.
{\it Definition IV.1.} Let $\varrho_{\mathsf{A}}$ denote a given multipartite state acting on
$\mathbb{C}^{d_{1}}\ot\ldots\ot\mathbb{C}^{d_{N}}$ and $(\Lambda_{n})_{n=1}^{\infty}$
a sequence of LOCC operations giving $\Lambda_{n}(\varrho_{\mathsf{A}}^{\ot n})=\varrho_{\mathsf{AA}'}^{(n)}$
with $\varrho_{\mathsf{AA}'}^{(n)}$ being a state acting on $(\mathbb{C}^{d_{n}})^{\ot N}
\ot \mathcal{H}_{n}'$.
Here $\mathcal{H}_{n}'$ stands for a finite--dimensional
Hilbert space corresponding to the $\mathsf{A}'$ part of $\varrho_{\mathsf{AA}'}^{(n)}$.
Then we say that $\Lambda=(\Lambda_{n})_{n=1}^{\infty}$
is a multipartite private state distillation protocol if there exists
such a family of multipartite private states $(\Gamma_{\mathsf{AA}'}^{(d_{n})})_{n=1}^{\infty}$
that the condition
\begin{equation}gin{equation}
\lim_{n\to\infty}\norsl{\varrho_{\mathsf{AA}'}^{(n)}-\Gamma_{\mathsf{AA}'}^{(d_{n})}}=0
\end{equation}
holds. A rate of the protocol $\Lambda$ is defined as
$R_{\Lambda}(\varrho_{\mathsf{A}})=\limsup_{n\to \infty}[(1/n)\log d_{n}]$
and the distillable key as
\begin{equation}gin{equation}
K_{D}(\varrho_{\mathsf{A}})=\sup_{\Lambda}R_{\Lambda}(\varrho_{\mathsf{A}}).
\end{equation}
As shown in the bipartite case in Ref. \cite{KH}, both the
Definition II.1 and Definition IV.1 are equivalent in
the sense that if there exists LOCC protocol distilling some
multipartite private state there also exists LOPC protocol
distilling the ideal ccq state (when purification is considered)
with the same rate. As the proof from Ref. \cite{KH} may also be
applied to the multipartite case, we provide the generalized
version of the above fact below.
{\it Theorem IV.1.} The following two implications hold. Assume
that from a given state $\sigma_{\mathsf{A}}$ such that Eve has
its purification $\ket{\psi_{\mathsf{A}E}}$ one may create by LOPC
some \textsf{c}q state
$\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ (see Eq.
\eqref{nccq}) obeying
$\big\|\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}-\varrho_{\mathsf{A}E}^{(\mathrm{id})}\big\|_{1}\leq
\epsilon$ for some $\epsilon>0$ (recall that
$\varrho_{\mathsf{A}E}^{(\mathrm{id})}$ denotes the ideal
\textsf{c}q state given by Eq. \eqref{idealcq}). Then there exists
such LOCC protocol that can distill a state
$\varrho_{\mathsf{AA}'}$ from $\sigma_{\mathsf{A}}$ that satisfies
$\big\|\varrho_{\mathsf{AA}'}-\Gamma_{\mathsf{AA}'}^{(d)}\big\|_{1}\leq
2\sqrt{\epsilon}$ for some multipartite private state
$\Gamma_{\mathsf{AA}'}^{(d)}$. On the other hand if from
$\sigma_{\mathsf{A}}$ one can distill a state
$\varrho_{\mathsf{AA}'}$ close to some pdit
$\Gamma_{\mathsf{AA}'}^{(d)}$, i.e., such that
$\big\|\varrho_{\mathsf{AA}'}-\Gamma_{\mathsf{AA}'}^{(d)}\big\|_{1}\leq
\epsilon$ then there exists a LOPC protocol distilling from
$\varrho_{\mathsf{A}}$ a \textsf{c}q state such that
$\big\|\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}-\varrho_{\mathsf{A}E}^{(\mathrm{id})}\big\|_{1}\leq
2\sqrt{\epsilon}.$ Each subsystem of the $\mathsf{A}$ part of
$\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ and of the key
part of $\Gamma_{\mathsf{AA}'}^{(d)}$ is defined on
$\mathbb{C}^{d}$.
{\it Proof.} The proof goes directly along the same lines as the one
from Ref. \cite{KH}.
Interestingly, the distillable key $K_{D}$ may be used to quantify entanglement
among multipartite states. More precisely, from the definition it follows
that $K_{D}$ is monotonic under the action of LOCC operations (see e.g. \cite{MHMeasures}).
Moreover, it vanishes on multipartite states that have at least one separable cut,
which is a consequence of the straightforward multipartite generalization
of the results from Ref. \cite{Curty1,Curty2} provided in Ref. \cite{PH_przegladowka}.
Finally, as we shall show the distillable key is normalized on $GHZ$ states $P_{d,N}^{(+)}$
in the sense that $K_{D}(P_{d,N}^{(+)})=\log d$. However, firstly we need to provide
two bounds on $K_{D}$.
\subsection{Bounds on the distillable key}
The first bound is a simple multipartite generalization of
the upper bound provided in Ref. \cite{KH}, while the second bound
is a consequence of a multipartite adaptation of the Devetak--Winter
protocol \cite{DW1,DW2}. Let us start from the upper bound.
{\it Theorem IV.2.} Let $\varrho_{\mathsf{A}}$ be some $N$--partite state. Then
\begin{equation}gin{equation}\langlebel{boundEntropy}
K_{D}(\varrho_{\mathsf{A}})\leq E_{r}^{\infty}(\varrho_{\mathsf{A}}),
\end{equation}
where $E_{r}^{\infty}(\varrho_{\mathsf{A}})$ is a regularized version of
the relative entropy, i.e.,
\begin{equation}gin{equation}
E_{r}^{\infty}(\varrho_{\mathsf{A}})=
\lim_{n\to\infty}\frac{1}{n}\inf_{\varrho^{\mathrm{sep}}_{\mathsf{A}}\in\mathcal{D}}S(\varrho_{\mathsf{A}}^{\ot n}\|\varrho^{\mathrm{sep}}_{\mathsf{A}})
\end{equation}
and $\mathcal{D}$ denotes the set of all $N$--partite fully
separable states, i.e., states of the form
\begin{equation}gin{equation}
\varrho_{\mathsf{A}}^{\mathrm{sep}}=\sum_{i}p_{i}\varrho_{A_{1}}^{(i)}\ot\ldots\ot
\varrho_{A_{N}}^{(i)}.
\end{equation}
{\it Proof.} The proof is a generalization of the one
from Ref. \cite{KH}.
Interestingly, we may also bound $K_{D}$ from below. For this purpose we need to
prove the following theorem.
{\it Theorem IV.3.} Let
$\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ be some
multipartite $\mathsf{c}$q state acting on $(\mathbb{C}^{d})^{\ot
N}\ot\mathbb{C}^{d_{E}}$ and given by
\begin{equation}gin{equation}
\varrho_{\mathsf{A}E}^{(\mathrm{cq})}=\sum_{i_{1},\ldots,i_{N}=0}^{d-1}p_{i_{1}\ldots i_{N}}
\proj{i_{1}\ldots i_{N}}.
\end{equation}
Then it is arbitrarily close to the
ideal \textsf{c}q state if and only if for a chosen party $A_{i}$
all the reductions to three--partite systems $A_{i}A_{j}E$ with
$j\neq i$ are arbitrarily close to the bipartite ideal ccq state.
More precisely, if $\big\|\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}-\varrho_{\mathsf{A}E}^{(N,\mathrm{id})}\big\|\leq\epsilon$
holds for $\epsilon>0$,
then for the fixed party $A_{i}$ the following inequalities
\begin{equation}gin{eqnarray}\langlebel{Tw2tresc}
\left\|\sum_{i_{1},\ldots, i_{N}=0}^{d-1}p_{i_{1}\ldots
i_{N}}\proj{i_{i}i_{j}}\ot\varrho_{i_{1}\ldots i_{N}}^{E}
-\varrho_{\mathsf{A}E}^{(2,\mathrm{id})}\right\|_{1}\leq \epsilon\nonumber\\
\end{eqnarray}
are satisfied for $j=1,\ldots,i-1,i+1,\ldots,N$.
Conversely, assuming that for fixed $A_{i}$ the inequalities
(\ref{Tw2tresc})
hold for $\epsilon>0$ and $j\neq i$, one has
\begin{equation}gin{equation}
\left\|\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}-
\varrho_{\mathsf{A}E}^{(N,\mathrm{id})}\right\|_{1}\leq(4N-3)\epsilon
\end{equation}
{\it Proof.} We proceed in two steps. In the first step
we show that if the trace norm distance between some multipartite
\textsf{c}q state $\mathsf{\varrho}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$
and the ideal one is bounded by some $\epsilon>0$ then any bipartite
state arising by tracing out $N-2$ parties from the \textsf{c}q state is
close to the bipartite ideal ccq state. This part of the proof is relatively
easy since it suffices to utilize the fact that the trace norm distance
does not increase under the partial trace.
The proof of the converse statement is much more sophisticated.
Let us assume that the following
\begin{equation}gin{eqnarray}\langlebel{Tw1}
\left\|\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}-\varrho_{\mathsf{A}E}^{(N,\mathrm{id})}\right\|_{1}\leq\epsilon
\end{eqnarray}
holds for some small $\epsilon>0$. Then since the trace norm does
not increase under the partial trace we have immediately the
following set of inequalities
\begin{equation}gin{eqnarray}\langlebel{Tw2}
\left\|\sum_{i_{1},\ldots, i_{N}=0}^{d-1}p_{i_{1}\ldots
i_{N}}\proj{i_{k}i_{l}}\ot\varrho_{i_{1}\ldots i_{N}}^{E}
-\varrho_{\mathsf{A}E}^{(2,\mathrm{id})}\right\|_{1}\leq \epsilon\nonumber\\
\end{eqnarray}
for any pair of indices $k,l=1,\ldots,N$. To end the first part of the proof it suffices
to substitute $\sum_{I\setminus\{k,l\}}p_{i_{1}\ldots i_{N}}\varrho_{i_{1}\ldots i_{N}}^{E}=q_{i_{l}i_{k}}\varrho_{i_{1}\ldots i_{N}}^{E}$, where summation over
$I\setminus \{k,l\}$ means that we sum over all $i_{j}$ but $i_{k}$ and $i_{l}$.
To proceed with the second part of the proof we assume that one
chosen party, say $A_{1}$, shares with the remaining $N-1$ parties
a state that is close to the bipartite ideal \textsf{c}q state. In
other words we assume that for any $j=2,\ldots,N$ the following
inequalities
\begin{equation}gin{eqnarray}\langlebel{Tw3}
\left\|\sum_{i_{1},\ldots, i_{N}=0}^{d-1}p_{i_{1}\ldots
i_{N}}\proj{i_{1}i_{j}}\ot\varrho_{i_{1}\ldots i_{N}}^{E}
-\varrho_{\mathsf{A}E}^{(2,\mathrm{id})}\right\|_{1}\leq \epsilon\nonumber\\
\end{eqnarray}
are satisfied. Basing on this set of inequalities we will show
that the left--hand side of Eq. \eqref{Tw1} is bounded from above
by some linear function of $\epsilon$ vanishing for $\epsilon\to
0$. For this purpose let us denote the left--hand side of Eq.
\eqref{Tw1} by LHS and notice that it can be split into two sums
(see Eqs. \eqref{idealcq} and \eqref{nccq}), namely the one
containing the elements for $i_{1}=\ldots =i_{N}$ and the rest
ones. In this light, denoting by $I$ the set of
sequences $(i_{1},\ldots,i_{N})$ obtained by removing all those
with $i_{1}=\ldots=i_{N}$ from the set of all possible sequences,
we can write
\begin{equation}gin{eqnarray}\langlebel{Tw5}
\mathrm{LHS}&=&\sum_{(i_{1},\ldots,i_{N})\in I} p_{i_{1}\ldots
i_{N}}+\sum_{i=0}^{d-1}\norsl{p_{i\ldots i}\varrho_{i\ldots
i}^{E}-\frac{1}{d}\varrho^{E}}\nonumber\\
&\negmedspaces\leq\negmedspaces&\sum_{(i_{1},\ldots,i_{N})\in I}
p_{i_{1}\ldots i_{N}}+\sum_{i=0}^{d-1}p_{i\ldots i}\norsl{\varrho_{i\ldots i}^{E}-\varrho^{E}}\nonumber\\
&&+\sum_{i=0}^{d-1}\left|p_{i\ldots i}-\frac{1}{d}\right|,
\end{eqnarray}
where the inequality comes from the fact that the term
$p_{i\ldots i}\varrho_{i\ldots i}^{E}$ was added and subtracted in
the second term in the first line and from the inequality $\|A+B\|_{1}\leq \|A\|_{1}+\|B\|_{1}$.
The last equality is a simple consequence of the fact that the trace norm
of any density matrix is just one. In what follows, using the inequalities
\eqref{Tw3}, we show that all the three terms appearing in the above
are bounded by linear functions of $\epsilon$ vanishing for $\epsilon\to 0$.
With this aim, utilizing once more the fact that the trace norm does not increase
under the partial trace, we can infer from Eq. \eqref{Tw3} that
\begin{equation}gin{equation}\langlebel{Tw6}
\norsl{\sum_{i_{1},\ldots,i_{N}=0}^{d-1}p_{i_{1}\ldots i_{N}}\proj{i_{1}i_{j}}-\frac{1}{d}\sum_{i=0}^{d-1}\proj{ii}}\leq \epsilon
\end{equation}
for $j=2,\ldots,N$. Now we can divide all the terms appearing in the first sum into
two groups, namely, the one for $i_{1}=i_{j}$ and the remaining terms. This, after calculating
the respective norms, leads to the following inequality
\begin{equation}gin{eqnarray}\langlebel{Tw8}
&&\sum_{i=0}^{d-1}\left|\sum_{i_{2},\ldots,i_{j-1},i_{j+1},\ldots,i_{N}=0}^{d-1}p_{ii_{2}
\ldots i_{j-1}ii_{j+1}\ldots i_{N}}-\frac{1}{d}\right|\nonumber\\
&&\hspace{1cm}+\sum_ {\substack{i_{1},\ldots,i_{N}=0\\i_{1}\neq
i_{j}}}^{d-1}p_{i_{1}\ldots i_{N}} \leq\epsilon.
\end{eqnarray}
Obviously, since both terms in the above are nonnegative, any of them must be
less or equal to $\epsilon$. This allows us to write the inequalities
\begin{equation}gin{equation}\langlebel{Ineq47}
\sum_{i=0}^{d-1}\left|\sum_{i_{2},\ldots,i_{j-1},i_{j+1},\ldots,i_{N}=0}^{d-1}p_{ii_{2}
\ldots i_{j-1}ii_{j+1}\ldots i_{N}}-\frac{1}{d}\right|\leq \epsilon
\end{equation}
and
\begin{equation}gin{equation}\langlebel{Ineq48}
\sum_ {\substack{i_{1},\ldots,i_{N}=0\\i_{1}\neq
i_{j}}}^{d-1}p_{i_{1}\ldots i_{N}} \leq\epsilon.
\end{equation}
From the sum appearing under the sign of an absolute value
in (\ref{Ineq47}) we can extract the probability $p_{i\ldots i}$, obtaining
\begin{equation}gin{equation}\langlebel{Tw10}
\sum_{i=0}^{d-1}\left|p_{i \ldots i}-\frac{1}{d}+\sum_
{\substack{i_{2},\ldots
i_{j-1},i_{j+1},\ldots,i_{N}=0\\(i_{2},\ldots
i_{j-1},i_{j+1},\ldots,i_{N})\in\mathcal{I}_{i}}}^{d-1}
p_{ii_{2}\ldots i_{j-1}i i_{j+1} \ldots i_{N}}\right| \leq
\epsilon,
\end{equation}
where $\mathcal{I}_{i}$ denotes the strings of $N-2$ indices
$(i_{2},\ldots,i_{j-1},i_{j+1},\ldots,i_{N})$ such
that at least one of them is different from $i$.
Utilizing a simple inequality $|z_{1}-z_{2}|\geq |z_{1}|-|z_{2}|$ satisfied
by all $z_{1},z_{2}\in\mathbb{C}$, we get
\begin{equation}gin{eqnarray}\langlebel{Tw11}
\sum_{i=0}^{d-1}\left|p_{i \ldots i}-\frac{1}{d}\right|&\negmedspaces\leq
\negmedspaces&\epsilon+
\sum_{i=0}^{d-1}\sum_{(i_{2},\ldots,i_{j-1},i_{j+1},\ldots,i_{N})\in\mathcal{I}_{i}}
\hspace{-1cm}p_{ii_{2}\ldots i_{j-1}i i_{j+1}\ldots i_{N}}\nonumber\\
&\negmedspaces= \negmedspaces &\epsilon+
\sum_{(i_{1},\ldots,i_{N})\in\widetilde{\mathcal{I}}_{j}}
p_{i_{1}\ldots \ldots i_{N}},
\end{eqnarray}
where $\widetilde{\mathcal{I}}_{j}$ denotes the string of $N$
indices such that the first and $j$th ones are equal ($i_{1}=
i_{j}$) and at least one of the remaining ones is different from
$i_{1}$. One sees that the second term on the right--hand side of
Eq. \eqref{Tw11} may be bounded from above in the following way
\begin{equation}gin{eqnarray}\langlebel{Tw13}
\sum_{(i_{1},\ldots,i_{N})\in\widetilde{\mathcal{I}}_{j}}
p_{i_{1}\ldots i_{N}}&\negmedspaces\leq\negmedspaces& \sum_{k=2}^{j-1}\sum_{\substack{i_{1},\ldots,i_{N}=0\\i_{k}\neq i_{1}}}^{d-1}p_{i_{1}\ldots i_{N}}\nonumber\\
&&+\sum_{k=j+1}^{N}\sum_{\substack{i_{1},\ldots,i_{N}=0\\i_{k}\neq i_{1}}}^{d-1}p_{i_{1}\ldots i_{N}}\nonumber\\
&\negmedspaces\leq\negmedspaces &\sum_{k=2}^{j-1}\epsilon+\sum_{k=j+1}^{N}\epsilon\nonumber\\
&\negmedspaces=\negmedspaces& (N-2)\epsilon,
\end{eqnarray}
where the second inequality is a consequence of the inequality
given in Eq. \eqref{Ineq48}. Finally, application of Eq. \eqref{Tw13}
to Eq. \eqref{Tw11}, gives
\begin{equation}gin{equation}\langlebel{Tw14}
\sum_{i=0}^{d-1}\left|p_{i\ldots i}-\frac{1}{d}\right|\leq (N-1)\epsilon.
\end{equation}
This is a quite natural conclusion saying that if the measurement
outcomes between fixed party (here $A_{1}$) and each of the
remaining ones are almost perfectly correlated then the
measurement outcomes are almost perfectly correlated among all the
parties.
We have still two terms in Eq. \eqref{Tw5} unbounded.
Using once again the inequality $|z_{1}-z_{2}|\geq |z_{1}|-|z_{2}|$ $(z_{1},z_{2}\in\mathbb{C})$
and the fact that $p_{i_{1}\ldots i_{N}}$ represents some probability distribution
we may write
\begin{equation}gin{eqnarray}\langlebel{Tw17}
\sum_{(i_{1},\ldots,i_{N})\in I}
p_{i_{1}\ldots i_{N}}&\negmedspaces=\negmedspaces&1-\sum_{i=0}^{d-1}p_{i\ldots i}\nonumber\\
&\negmedspaces\leq \negmedspaces&1-[1-(N-1)\epsilon]\nonumber\\
&\negmedspaces=\negmedspaces&(N-1)\epsilon.
\end{eqnarray}
Thus, the only thing we need is to bound from above the last term
in Eq. \eqref{Tw5}. Remarkably, to achieve this aim it suffices to
utilize a single inequality from the whole set \eqref{Tw3}, say
the one for $j=2$. The we can write
\begin{equation}gin{widetext}
\begin{equation}gin{eqnarray}\langlebel{Tw19}
&&\norsl{\sum_{i_{1},\ldots,i_{N}=0}^{d-1}p_{i_{1}\ldots i_{N}}\proj{i_{1}i_{2}}\ot
\varrho_{i_{1}\ldots i_{N}}^{E}-\frac{1}{d}\sum_{i=0}^{d-1}\proj{ii}\ot\varrho^{E}}\nonumber\\
&&=\left\|\sum_{i=0}^{d-1}p_{i\ldots
i}\proj{ii}\ot\varrho_{i\ldots
i}^{E}-\frac{1}{d}\sum_{i=0}^{d-1}\proj{ii}\ot\varrho^{E}+\hspace{-0.4cm}
\sum_{(i_{1},\ldots,i_{N})\in\widetilde{\mathcal{I}}_{2}}
p_{i_{1}\ldots i_{N}}\proj{i_{1}i_{1}}\ot\varrho_{i_{1}\ldots
i_{N}}^{E} +
\hspace{-0.4cm}\sum_{\substack{i_{1},\ldots,i_{N}=0\\i_{1}\neq i_{2}}}^{d-1}
\hspace{-0.4cm}p_{i_{1}\ldots i_{N}}\proj{i_{1}i_{2}}\ot\varrho_{i_{1}\ldots i_{N}}^{E}\right\|_{1}.\nonumber\\
\end{eqnarray}
Then, due to the fact that $\|A-B\|_{1}\geq \|A\|_{1}-\|B\|_{1}$, we may rewrite the above
as
\begin{equation}gin{eqnarray}\langlebel{Tw20}
\norsl{\sum_{i=0}^{d-1}p_{i\ldots i}\proj{ii}\ot\varrho_{i\ldots
i}^{E}-\frac{1}{d}\sum_{i=0}^{d-1}\proj{ii}\ot\varrho^{E}}&\negmedspaces\leq\negmedspaces&
\epsilon
+\sum_{(i_{1},\ldots,i_{N})\in\widetilde{\mathcal{I}}_{2}}p_{i_{1}\ldots
i_{N}}
+\sum_{\substack{i_{1},\ldots,i_{N}=0\\i_{1}\neq i_{2}}}^{d-1}p_{i_{1}\ldots i_{N}}\nonumber\\
&\negmedspaces\leq\negmedspaces & \epsilon+(N-2)\epsilon+\epsilon=N\epsilon,
\end{eqnarray}
where the second inequality follows from Eqs. \eqref{Tw8} and \eqref{Tw13} (with $j=2$).
On the other hand, we can easily show that
\begin{equation}gin{eqnarray}\langlebel{Tw21}
\norsl{\sum_{i=0}^{d-1}p_{i\ldots i}\proj{ii}\ot\varrho_{i\ldots
i}^{E}-\frac{1}{d}\sum_{i=0}^{d-1}\proj{ii}\ot\varrho^{E}} \geq
\sum_{i=0}^{d-1}p_{i\ldots i}\norsl{\varrho_{i\ldots
i}^{E}-\varrho^{E}}-
\sum_{i=0}^{d-1}\left|p_{i\ldots i}-\frac{1}{d}\right|.\nonumber\\
\end{eqnarray}
Comparison of Eqs. \eqref{Tw14}, \eqref{Tw20} and \eqref{Tw21} allows us to write
\begin{equation}gin{eqnarray}\langlebel{Tw22}
\sum_{i=0}^{d-1}p_{i\ldots i}\norsl{\varrho_{i\ldots i}^{E}-\varrho^{E}}&\negmedspacess\leq \negmedspacess& N\epsilon+
\sum_{i=0}^{d-1}\left|p_{i\ldots i}-\frac{1}{d}\right|\nonumber\\
&\negmedspace\negmedspace\leq\negmedspace\negmedspace& N\epsilon+(N-1)\epsilon\nonumber\\
&\negmedspace\negmedspace=\negmedspace\negmedspace&(2N-1)\epsilon.
\end{eqnarray}
Putting now all the pieces together, that is, substituting Eqs. \eqref{Tw14}, \eqref{Tw17}, and \eqref{Tw22}
to Eq. \eqref{Tw5}, we finally have
\begin{equation}gin{equation}
\norsl{\sum_{i_{1},\ldots, i_{N}=0}^{d-1}p_{i_{1}\ldots
i_{N}}\proj{i_{1}\ldots i_{N}} \ot\varrho_{i_{1}\ldots i_{N}}^{E}-
\frac{1}{d}\sum_{i=0}^{d-1}\proj{i}^{\ot N}\ot\varrho^{E}}\leq
(4N-3)\epsilon.
\end{equation}
\end{widetext}
Noting that for fixed $N$ it holds that $(4N-3)\epsilon\to 0$
whenever $\epsilon\to 0$ we finish the proof. $\vrule height 4pt width 3pt depth2pt$
It should be mentioned that as it follows from the second part of
the proof, we do not need to assume the whole set of inequalities
given in \eqref{Tw3}. Actually it suffices to assume that a single
inequality from the set \eqref{Tw3} holds and the remaining ones
from the set given in Eq. \eqref{Tw6}. In other words it suffices
to assume that the measurement outcomes between a fixed party and
any from the other parties are almost perfectly correlated and
that Eve is almost completely uncorrelated from the measurement
outcomes of a single pair. This is in full agreement with our
intuition. Namely, if the measurement outcomes of any pair
$A_{i}A_{j}$ (with fixed $i$ and arbitrary $j\neq i$) are
perfectly correlated and Eve has a full knowledge about the
measurement outcomes of just a single party, she actually has the
knowledge about measurement outcomes of all parties. Therefore if
all the parties have perfect correlations and Eve is completely
uncorrelated from a single party, she must be completely
uncorrelated from all the parties. Consequently, it is sufficient
to assume that a single pair shares state that is close to a ccq
state and other chosen pairs have almost perfect correlations.
Now we are prepared to provide a lower bound on the multipartite
distillable key in the LOPC paradigm. We achieve this by extending
of the Devetak--Winter protocol to the multipartite case. We do
this by applying the bipartite Devetak--Winter protocol to $N-1$
pairs of parties in some state $\varrho_{\mathsf{A}E}$ such that
each of them consist of one chosen party, say $A_{1}$, and one of
the remaining ones. Everything works as in the standard
Devetak--Winter protocol, i.e., the party $A_{1}$ performs the
measurement in some basis, e.g. the standard one obtaining the
so--called c\textsf{q} state
(classical--quantum--$\ldots$--quantum)
\begin{equation}gin{equation}\langlebel{generalcq}
\varrho_{\mathsf{A}E}^{(c\textsf{q})}=\sum_{i}p_{i}\proj{i}_{A_{1}}\ot \varrho_{A_{2}\ldots A_{N}E}^{(i)}.
\end{equation}
Then, roughly speaking, the party $A_{1}$ performs the
Devetak--Winter protocol with the remaining parties
simultaneously. One knows that the correlation between $A_{1}$ and
the remaining parties $A_{j}$ $(j=2,\ldots,N)$ are described by
the mutual information
$I(A_{1}\!:\!A_{j})(\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})})$.
However, the establish common multipartite key we need to consider
the worst case, i.e.,
$\min_{j=2,\ldots,N}I(A_{1}\!:\!A_{j})(\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})})$.
On the other hand, the correlation between $A_{1}$ and $E$ are
given by $I(A_{1}\!:\!E)$ and this amount of bits has to be
substracted at the privacy amplification stage of the process.
Consequently, the rate of the protocol is
\begin{equation}gin{equation}\langlebel{rate}
\min_{j=2,\ldots,N}I(A_{1}\!:\!A_{j})(\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})})-
I(A_{1}\!:\!E)(\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})})
\end{equation}
and therefore, the multipartite distillable key satisfies
\begin{equation}gin{equation}\langlebel{DWbound1}
C_{D}(\varrho_{\mathsf{A}E})\geq
\min_{j=2,\ldots,N}I(A_{1}\!:\!A_{j})(\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})})-
I(A_{1}\!:\!E)(\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})}).
\end{equation}
Here, $\varrho_{A_{1}A_{j}E}^{(\mathrm{cqq})}$ denotes the cqq
state, which arises from \eqref{generalcq} by tracing out all the
parties but the first and $j$th one and Eve. Moreover, by
$I(X\!:\!Y)(\varrho_{XY})$ we denoted the mutual information
defined as
$I(X\!:\!Y)(\varrho_{XY})=S(\varrho_{X})+S(\varrho_{Y})-S(\varrho_{XY})$
with $S$ denoting the von Neumann entropy.
We have still some freedom in choosing the distributing party and
therefore we can always choose the one for which the rate of the
extended Devetak--Winter protocol is highest. In this way we get
the lower bound on $C_{D}$ of the form
\begin{equation}gin{eqnarray}\langlebel{DWbound2}
&&C_{D}(\varrho_{\mathsf{A}E})\geq
\max_{i=1,\ldots,N}\nonumber\\
&&\times\left[\min_{\substack{j=1,\ldots,N\\j\neq
i}}I(A_{i}\!:\!A_{j})(\varrho_{A_{i}A_{j}E}^{(\mathrm{cqq})})
-I(A_{i}\!:\!E)(\varrho_{A_{i}A_{j}E}^{(\mathrm{cqq})})\right],\nonumber\\
\end{eqnarray}
Let us finally mention that due to the Theorem IV.1 we can also
bound $K_{D}$ from below using \eqref{DWbound2}. Namely, since
$K_{D}(\varrho_{\mathsf{A}})=C_{D}(\ket{\psi_{\mathsf{A}E}})$, we
have the following
\begin{equation}gin{equation}\langlebel{DWbound3}
K_{D}(\varrho_{\mathsf{A}})\geq
\max_{i=1,\ldots,N}\left[\min_{\substack{j=1,\ldots,N\\j\neq i}}I(A_{i}\!:\!A_{j})-
I(A_{i}\!:\!E)\right],
\end{equation}
where the respective quantities are calculated from e.g., the \textsf{c}q state
following the purification of $\varrho_{\mathsf{A}}$.
Now we can go back to the definition of $K_{D}$. As previously
mentioned, it holds that $K_{D}(P_{d,N}^{(+)})=\log d$. To show it
explicitly, on the one hand we can utilize the above bound. We
know from Theorem IV.1 that
$K_{D}(P_{d,N}^{(+)})=C_{D}(\ket{\psi_{\mathsf{A}E}^{(+)}})$,
where $\ket{\psi_{\mathsf{A}E}^{(+)}}$ is a purification of
$P_{d,N}^{(+)}$ and obviously has the form
$\ket{\psi_{d,N}^{(+)}}_{\mathsf{A}}\ket{E}$ with $\ket{E}$ being
some state kept by Eve. Measurement of the $\mathsf{A}$ subsystem of
$\ket{\psi_{\mathsf{A}E}^{(+)}}$ with respect to the standard
basis leads us to the ideal \textsf{c}q state
$\varrho_{\mathsf{A}E}^{(\textsf{c}\mathrm{q})}=\omega_{\mathsf{A}}^{(d,N)}\ot\proj{E},$
where
\begin{equation}gin{equation}
\omega_{\mathsf{A}}^{(d,N)}=\frac{1}{d}\sum_{i=0}^{d-1}\proj{i}^{\ot N},
\end{equation}
which has the quantities $I(A_{i}\!:\!A_{j})=\log d$ $(i,j=1,\ldots,N)$
and $I(A_{i}\!:\!E)=0$ $(i=1,\ldots,N)$. Substituting both these facts into Eq.
\eqref{DWbound2} gives us $K_{D}(P_{d,N}^{(+)})\geq \log d$.
On the other hand we can utilize the bound given in Eq. \eqref{boundEntropy}.
Firstly, notice that $S(\rho^{\ot n}\|\sigma^{\ot n})=nS(\rho\|\sigma)$ for two
an arbitrary natural number $n$ and arbitrary density matrices $\rho$ and $\sigma$.
Secondly, one easily finds that (see e.g. Ref. \cite{RelativeEntanglement})
\begin{equation}gin{equation}
S\big(P_{d,N}^{(+)}\big\|\omega_{\mathsf{A}}^{(d,N)}\big)=\log d
\end{equation}
and consequently the following estimate holds
\begin{equation}gin{eqnarray}
K_{D}(P_{d,N}^{(+)})&\negmedspaces=\negmedspaces&\lim_{n\to\infty}\frac{1}{n}
\inf_{\varrho_{\mathsf{A}}^{\mathrm{sep}}\in\mathcal{D}}S(P_{d,N}^{(+)\ot n}\|\varrho_{\mathsf{A}}^{\mathrm{sep}})\nonumber\\
&\negmedspaces\leq\negmedspaces&\lim_{n\to\infty}\frac{1}{n}S\big(P_{d,N}^{(+)\ot n}\big\|\omega_{\mathsf{A}}^{(d,N)\ot n}\big)\nonumber\\
&\negmedspaces=\negmedspaces&\lim_{n\to\infty}\frac{1}{n}nS\big(P_{d,N}^{(+)}\big\|\omega_{\mathsf{A}}^{(d,N)}\big)\nonumber\\
&\negmedspaces=\negmedspaces&\log d.
\end{eqnarray}
Thus $K_{D}(P_{d,N}^{(+)})\leq \log d$ and taking into account the previously
obtained inequality $K_{D}(P_{d,N}^{(+)})\leq \log d$ we infer $K_{D}(P_{d,N}^{(+)})=\log d$.
Thus, as stated previously, the multipartite distillable key may be
considered as a entanglement measure.
Let us discuss the last issue of this section.
To apply the extended Devetak--Winter protocol
successfully, that is to get a nonzero rate, one obviously has to have
the right--hand side of Eq. \eqref{DWbound2} positive.
One knows from Theorem IV.1 that distillation of some multipartite
private state by means of LOCC is equivalent to the distillation
of an ideal \textsf{c}q state by means of LOPC. This in turn means
that the closer some particular state $\varrho_{\mathsf{AA}'}$ is
to some multipartite private state, the closer is a \textsf{c}q
state obtained from it to the ideal \textsf{c}q state.
Then, from Theorem IV.3 it follows that the closer some
\textsf{c}q state is to the ideal \textsf{c}q state the closer are
its bipartite reductions to the bipartite ideal ccq state. Both
these facts mean that by distilling some multipartite private
state from copies of a given state we can make the right--hand
side of Eq. \eqref{DWbound2} (equivalently Eq. \eqref{DWbound3})
positive. Consequently, concatenating some LOCC protocol
distilling multipartite private states (an example of such a
protocol is given in the following subsection) and the extended
Devetak--Winter protocol introduces a subtle effect here. Namely,
on the one hand, using more copies in the LOCC protocol producing
a state that is closer to some multipartite private state makes
the right--hand side of Eq. \eqref{DWbound2} larger. On the other
hand spending more copies decreases the success probability which
needs to be included in the overall rate of the protocol. This
issue will become more clear when some particular classes of
states will be investigated in the next section.
\subsection{Recursive LOCC protocol distilling multipartite private states}
\langlebel{LOCCProtocol}
Here we provide an illustrative example of a recursive LOCC
protocol allowing for distillation of multipartite private states
from some classes of states. This protocol is a generalization of
the LOCC protocol discussed in Ref. \cite{KH} to the case of an
arbitrary number of parties. Assume then that $N$ parties
$A_{1},\ldots,A_{N}$ have $k$ copies of some state
$\rho_{\mathsf{AA}'}$ in their possession. In $i$th step each
party performs the following operations.
\begin{equation}gin{itemize}
\item Take the state $\rho_{\mathsf{AA'}}^{(i-1)}$ (where $\rho_{\mathsf{AA'}}^{(0)}=\rho_{\mathsf{AA}'}$)
and one of the remaining $k-i$ copies of $\rho_{\mathsf{AA'}}$.
\item Treating $\mathsf{A}$ part of
$\rho_{\mathsf{AA'}}^{(i-1)}$ ($\rho_{\mathsf{AA'}}$) as source (target)
qubits, perform CNOT operations.
\item Finally, the parties perform the measurement in computational
basis on the target qubits and compare the results:
in the case of equal results (all zeros or all ones) the parties
keep the state, otherwise they get rid of it.
\end{itemize}
In this way, spending $k$ copies of some state
$\rho_{\mathsf{AA}'}$, all the parties can distill a state
$\varrho_{\mathsf{AA}'}^{(k)}$ that is closer to some multipartite
private state than the initial one, i.e., $\rho_{\mathsf{AA}'}$.
Quantitative analysis concerning this protocol after application
to two different constructions of states may be found in Sections
\ref{Giechazety} and \ref{Giechazety2}.
\subsection{Multipartite privacy squeezing}
\langlebel{PrivacySqueezing}
Concluding the discussion concerning the distillable key we need
to mention the multipartite version of the so--called {\it privacy
squeezing} \cite{KH0,KH} together with its application in the recent
important method \cite{HPHH} of bounding the secret key from
below. Following Lemma III.1 we know that having some state
$\varrho_{\mathsf{AA}'}$ expressed in the form \eqref{form2},
there always exists a twisting $U_{t}$ that the state
$\widetilde{\varrho}_{\mathsf{A}}=\Tr_{\mathsf{A}'}(U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})$
has some special form. Namely, in some chosen row (column) some of
its entries are trace norms of respective blocks of
$\varrho_{\mathsf{AA}'}$. We will
call the state $\widetilde{\varrho}_{\mathsf{A}}$ obtained
in this way {\it privacy squeezed state}. Furthermore, we already know
that twistings do not change the $\mathsf{c}$q state with respect
to some basis $\mathcal{B}_{N}^{\mathrm{prod}}$.
Let us now proceed by stating some of the conclusion
following both the above facts. As previously mentioned we have that
$K_{D}(\varrho_{\mathsf{A}})=C_{D}(\ket{\psi_{\mathsf{A}E}})$, where
$\ket{\psi_{\mathsf{A}E}}$ denotes the purification of $\varrho_{\mathsf{A}}$.
Assuming that all the parties share some state $\varrho_{\mathsf{AA}'}$ defined on
$\mathcal{H}\ot\mathcal{H}'$ and denoting by $\ket{\psi_{\mathsf{AA}'E}}$ the purification
of $\varrho_{\mathsf{AA}'}$, we have
\begin{equation}gin{equation}\langlebel{szacowanieKluczy}
K_{D}(\varrho_{\mathsf{AA}'})=C_{D}(\ket{\psi_{\mathsf{AA}'E}})\geq C_{D}(\varrho_{\mathsf{A}E})\geq
C_{D}(\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}).
\end{equation}
Here
$\varrho_{\mathsf{A}E}=\Tr_{\mathsf{A}'}\proj{\psi_{\mathsf{AA}'E}}$
and $\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ stands for a
\textsf{c}q state obtained upon the measurement of the
$\mathsf{A}$ subsystem in $\mathcal{B}_{N}^{\mathrm{prod}}$. The
first inequality follows from the fact that throwing out the
$\mathsf{A}'$ subsystem one can only lower the key as it could be
treated 'virtually' as giving it to Eve. The second inequality is
a consequence of the fact that measurement in some product basis
leads to classical state on the $\mathsf{A}$ part of the state
(notice that such measurement is LOPC operation which due to the
definition of $C_{D}$ can only lower its value).
Now we can formulate and prove the following theorem as a
multipartite generalization of the bipartite considerations from
Ref. \cite{HPHH} (cf. \cite{KHPhD}) which exploit privacy
squeezing to bound the secure key from below.
{\it Theorem IV.4.} Let $\varrho_{\mathsf{AA}'}$ be some $N$--partite
state defined on $\mathcal{H}\ot\mathcal{H}'$. Then
\begin{equation}gin{equation}\langlebel{theoremIV4}
K_{D}(\varrho_{\mathsf{AA}'})\geq
C_{D}(\widetilde{\varrho}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}),
\end{equation}
where $\widetilde{\varrho}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ is a \textsf{c}q state
derived from purification $\ket{\widetilde{\varrho}_{\mathsf{A}E}}$ of privacy squeezed state
$\widetilde{\varrho}_{\mathsf{A}}=\Tr_{\mathsf{A}'}(U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})$.
{\it Proof.} Denoting by $\ket{\psi_{\mathsf{AA}'E}}$ the purification of
$\varrho_{\mathsf{AA}'}$, we have immediately from Eq. \eqref{szacowanieKluczy}
that $K_{D}(\varrho_{\mathsf{AA}'})
\geq C_{D}(\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})})$ with
$\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ standing for a
$\mathsf{c}$q state being a result of the measurement of
$\mathsf{A}$ part in $\mathcal{B}_{N}^{\mathrm{prod}}$ and tracing
$\mathsf{A}'$ part of $\ket{\psi_{\mathsf{AA}'E}}$. Then, as
already stated, for any twisting $U_{t}$ (in
$\mathcal{B}_{N}^{\mathrm{prod}}$) the states
$\varrho_{\mathsf{AA}'}$ and
$\widetilde{\varrho}_{\mathsf{AA}'}\equiv
U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger}$ have the same
\textsf{c}q states with respect to the basis
$\mathcal{B}_{N}^{\mathrm{prod}}$. Consequently,
$C_{D}(\varrho_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})})=
C_{D}(\sigma_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})})$ with
$\sigma_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ being a
\textsf{c}q state derived from the twisted state
$U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger}$ (obviously {\it via}
its purification). Now, we can consider the situation in which the
$\mathsf{A}'$ subsystem is given to Eve. This means that instead
of taking 'huge' purification
$\ket{\widetilde{\psi}_{\mathsf{AA'}E}}$ of the privacy squeezed
state
$\widetilde{\varrho}_{\mathsf{A}}=\Tr_{\mathsf{A}'}\widetilde{\varrho}_{\mathsf{AA}'}
=\Tr_{\mathsf{A}'}(U_{t}\varrho_{\mathsf{AA}'}U_{t}^{\dagger})$ we
can take a 'smaller' version denoted by
$\ket{\widetilde{\varrho}_{\mathsf{A}E}}$ (more precisely to
purify some density matrix acting on $\mathcal{H}$ it suffices to
use a Hilbert space of lower dimensionality than to purify a state
acting on $\mathcal{H}\ot\mathcal{H}'$). The new purification
obviously must obey
$\widetilde{\varrho}_{\mathsf{A}}=\Tr_{E}\proj{\widetilde{\varrho}_{\mathsf{A}E}}$.
Now comparing these two situations we infer that
$C_{D}(\sigma_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})})\geq
C_{D}(\widetilde{\varrho}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})})$
holds, where
$\widetilde{\varrho}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ is
$\mathsf{c}$q state appearing upon measurement of $\mathsf{A}$
subsystem of $\ket{\widetilde{\varrho}_{\mathsf{A}E}}$ in
$\mathcal{B}_{N}^{\mathrm{prod}}$. The inequality is a consequence
of the fact that in the case of the first \textsf{c}q state the
$\mathsf{A}'$ part unused, however, kept by the parties. In turn,
in the second situation the $\mathsf{A}'$ subsystem is treated as
it would be given to Eve when deriving
$\widetilde{\varrho}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$.
Giving some part of state can only lower the secrecy as in this
case, roughly speaking, she gains some information about what is
shared by the parties. This concludes the proof. $\vrule height 4pt width 3pt depth2pt$
\section{Constructions}
In this section we present two constructions of multipartite bound
entangled states with nonzero distillable cryptographic key. Both
are based on the structure exhibited by the GHZ states and
therefore the scheme of secure key distillation presented above
easily applies here.
The first construction is a straightforward generalization of the
bipartite construction presented in Ref. \cite{PHRANATO}.
Therefore, for comparative purposes, we present also a plot
containing a lower bound on distillable key in the bipartite case.
The second construction is completely new and in comparison to the
first one allows to get a higher lower bounds on distillable key
than the first one.
Before we start it is desirable to establish the notation that we
will use extensively below. By $\mathcal{P}_{0}^{(N)}$ we shall
denote a projector onto the $N$--partite pure state
$\ket{\psi_{0}^{(N)}}=\ket{0}^{\ot N}$ and $\mathcal{P}_{i}^{(N)}$
$(i=1,\ldots,N)$ is a projector onto the $N$--partite state
$\ket{\psi_{i}^{(N)}}$, in which the $i$th party possesses
$\ket{1}$, while other particles are in the $\ket{0}$ state. For
instance $\mathcal{P}_{2}^{(4)}$ denotes the projector onto the
four--partite pure state $\ket{\psi_{2}^{(4)}}=\ket{0100}$.
Moreover, let $\overline{\mathcal{P}}_{0}^{(N)}$ and
$\overline{\mathcal{P}}_{i}^{(N)}$ denote projectors obtained from
$\mathcal{P}_{0}^{(N)}$ and $\mathcal{P}_{i}^{(N)}$, respectively,
by exchanging all zeros and ones. Thus, for example
$\overline{\mathcal{P}}_{2}^{(4)}$ is the projector onto
$\ket{\overline{\psi}_{2}^{(4)}}=\ket{1011}$. We will denote in an analogous way
by $\ket{\psi_{ij}^{(N)}}$ ($\ket{\overline{\psi}_{ij}^{(N)}}$)
a $N$--qubit pure state, in which $i$th and $j$th
qubits are in the $\ket{1}$ ($\ket{0}$) state and the remaining ones are
in the $\ket{0}$ ($\ket{1}$) state. Then by
$\mathcal{P}_{ij}^{(N)}$ and $\overline{\mathcal{P}}_{ij}^{(N)}$
we denote projectors onto $\ket{\psi_{ij}^{(N)}}$ and
$\ket{\overline{\psi}_{ij}^{(N)}}$, respectively.
Let also $T_{i}$ denote the partial transposition with respect to
$i$th party (with the exception that $T_{0}$ denotes the identity
map). Here we usually assume that each party has two subsystems of
a given state $\varrho_{\mathsf{AA}'}$ and sometimes $T_{i}$ will
be denoting the partial transposition with respect to one or both
subsystems. It will be, however, clear from the context which of
the subsystems are partially transposed. Concatenation of partial
transpositions with respect to some subset of parties, say
$A_{1},\ldots,A_{k}$ will be denoted by $T_{1,\ldots,k}$.
\subsection{The first construction}
\langlebel{Giechazety}
Here we assume that the key part on each site is of qubit
structure, while the shield part has arbitrary dimension,
however, with the same dimension on each site. More precisely, we have
$\mathcal{H}_{i}=\mathbb{C}^{2}$ and
$\mathcal{H}_{i}'=\mathbb{C}^{D}$ $(i=1,\ldots,N)$.
Now, let us introduce the following matrix
\begin{equation}gin{equation}\langlebel{XDn}
X_{D}^{(N)}=\frac{1}{D^{N}+2D-4}\left[(D-2)P^{(+)}_{D,N}-2P_{D}^{(N)}+Q_{D}^{(N)}\right],
\end{equation}
where, as previously, $P^{(+)}_{D,N}$ denotes a projector onto the
$N$--partite $D$--dimensional GHZ state (see Eq.
\eqref{GHZstates}), and $P_{D}^{(N)}$ and $Q_{D}^{(N)}$ are
projectors defined as
\begin{equation}gin{equation}\langlebel{projektory1}
P_{D}^{(N)}=R_{D}^{(N)}-P^{(+)}_{D,N}, \qquad
Q_{D}^{(N)}=\mathbbm{1}_{D^{N}}-R_{D}^{(N)},
\end{equation}
where
\begin{equation}gin{equation}\langlebel{projektory2}
R_{D}^{(N)}=\sum_{i=0}^{D-1}\proj{i}^{\ot N}.
\end{equation}
The projectors $P_{D}^{(N)}$ and $Q_{D}^{(N)}$ are chosen in such
a way that each operator from the triple $P^{(+)}_{D,N}$,
$P_{D}^{(N)}$, and $Q_{D}^{(N)}$ is defined on orthogonal support.
Furthermore, the denominator in Eq. \eqref{XDn} is chosen such
that the matrix $X_{D}^{(N)}$ is normalized in the trace norm.
The states under consideration are of the form
\begin{equation}gin{eqnarray}\langlebel{Constr1}
\varrho_{\mathsf{AA'}}^{(D,N)}&\negmedspacess=\negmedspacess&\frac{1}{\mathcal{N}_{D}^{(N)}}
\left[\sum_{i=0}^{N}\left(\mathcal{P}_{i}^{(N)}
+\mathcal{\overline{P}}^{(N)}_{i}\right)_{\mathsf{A}}\ot
\left(\mo{X_{D}^{(N)T_{i}}}^{T_{i}}\right)_{\mathsf{A}'}\right.\nonumber\\
&&\left.+\left(\ket{0}\!\bra{1}^{\ot N}+\ket{1}\!\bra{0}^{\ot
N}\right)_{\mathsf{A}}\ot
\left(X_{D}^{(N)}\right)_{\mathsf{A}'}\right],
\end{eqnarray}
where the subscripts $\mathsf{A}$ and $\mathsf{A}'$ are indicated
to distinguish their key and shield parts, respectively. However,
for the sake of clarity, in further considerations these
subscripts will be omitted.
The normalization factor $\mathcal{N}_{D}^{(N)}$ appearing in Eq. \eqref{Constr1}
is given by
\begin{equation}gin{equation}\langlebel{normfactor}
\mathcal{N}_{D}^{(N)}=2\frac{(N+1)D^{N}+2D-4}{D^{N}+2D-4}.
\end{equation}
At the beginning we need to show that the matrices
$\varrho_{\mathsf{AA}'}^{(D,N)}$ really represent quantum states,
i.e., they are positive (the normalization condition is already
satisfied). Firstly, let us notice that the blocks corresponding
to $\mathcal{P}_{0}$ and $\overline{\mathcal{P}}_{0}$ and the two
off--diagonal blocks in Eq. \eqref{Constr1} constitute a matrix of
the form
$\mathcal{M}_{2}\big(\big|X_{D}^{(N)}\big|,X_{D}^{(N)}\big)$ (see
Lemma A.1 for the definition of $\mathcal{M}_{N}$), positivity of
which is guaranteed by Lemma A.1. Thus the only thing we need to
deal with is to show that the remaining blocks lying on the
diagonal of $\varrho_{\mathsf{AA}'}^{(D,N)}$ are positive. To
achieve this goal, below we prove a more general lemma.
{\it Lemma V.1.} Let $X_{D}^{(N)}$ be defined by Eq. (\ref{XDn}).
Then the matrices $\big|X_{D}^{(N)T_{k}}\big|^{T_{l}}$ are
positive semi--definite for all $k,l=1,\ldots,N$.
{\it Proof.} Noticing that $R_{D}^{(N)}$ is diagonal for arbitrary
$D$ and $N$, the partial transposition of $X_{D}^{(N)}$ with
respect to the $k$--th subsystem may be written as
$X_{D}^{(N)T_{k}}=[1/(D^{N}+2D-4)]\big(S^{(N)T_{k}}_{D}-R_{D}^{(N)}\big)$,
where $S_{D}^{(N)}$ is defined as
\begin{equation}gin{equation}\langlebel{S}
S_{D}^{(N)}=\mathbbm{1}_{D^{N}}+DP_{D,N}^{(+)} -2R_{D}^{(N)}.
\end{equation}
As we will see below $S_{D}^{(N)T_{k}}$ is positive for any
$k=1,\ldots,N$ and $S_{D}^{(N)T_{k}}R_{D}^{(N)}=0$. Consequently,
the absolute value of $X_{D}^{(N)T_{k}}$ may be obtained by simple
changing the sign before the projector $R_{D}^{(N)}$. To prove
positivity of $S_{D}^{(N)T_{k}}$ let
$\ket{\psi}=\sum_{i_{1},\ldots,i_{N}}^{D-1}\alpha_{i_{1}\ldots
i_{N}}\ket{i_{1}\ldots i_{N}}$ denote an arbitrary vector from
$(\mathbb{C}^{D})^{\ot N}$ written in the standard basis of
$(\mathbb{C}^{D})^{\ot N}$. Then we have
\begin{equation}gin{eqnarray}\langlebel{pos_of_S}
\Br{\psi}S_{D}^{(N)T_{k}}\Ke{\psi}
&\negmedspaces=\negmedspaces&\sum_{i\neq j}
\alpha_{i\ldots j\ldots i}^{*}\alpha_{j\ldots i\ldots
j}+\sum_{(i_{1},\ldots,i_{N})\in I}
\left|\alpha_{i_{1}\ldots
i_{N}}\right|^{2}\nonumber\\
&\negmedspaces=\negmedspaces&\hspace{-0.3cm}\sum_{(i_{1},\ldots,i_{N})\in
I_{k}}\hspace{-0.3cm}
\left|\alpha_{i_{1}\ldots i_{N}}\right|^{2} +
\frac{1}{2}\sum_{i\neq j}
\left|\alpha_{i\ldots j\ldots i}+ \alpha_{j\ldots i \ldots
j}\right|^{2}\nonumber\\
&\negmedspaces\geq\negmedspaces & 0.
\end{eqnarray}
Here the notation $\alpha_{i\ldots j\ldots i}$ means that all
indices of $\alpha$s excluding the $k$--th one ($k$ stands for the
number of subsystem being partially transposed) are equal.
Moreover, as previously $I$ denotes the set of all sequences
$(i_{1},\ldots,i_{N})$ except the cases when $i_{1}=\ldots=i_{N}$,
while $I_{k}$ is the set $I$ minus all sequences in which all
indices but the one on $k$--th position are equal.
As the value of $k$ is not specified, the above considerations
holds for any $k=1,\ldots,N$. Furthermore, using the same
reasoning one can also prove positiveness of $S_{D}^{(N)}$ being
transposed with respect to any subset of different subsystems
(besides the full transposition). This fact will be utilized below.
By virtue of the positiveness of $S_{D}^{(N)T_{k}}$ we have that
$\big|X_{D}^{(N)T_{k}}\big|=[1/(D^{N}+2D-4)]\big(S_{D}^{(N)T_{k}}+R_{D}^{(N)}\big)$
for any $k=1,\ldots,N$. Therefore the partial transposition of the latter with respect
to the $l$--th subsystem gives
\begin{equation}gin{equation}\langlebel{cos}
\left|X_{D}^{(N)T_{k}}\right|^{T_{l}}=\frac{1}{D^{N}+2D-4}
\left(S_{D}^{(N)T_{k,l}}+R_{D}^{(N)}
\right),
\end{equation}
where $T_{k,l}$ denotes the partial transposition with respect to
two single subsystems $A_{k}'$ and $A_{l}'$.
Now we can distinguish two cases, namely, if $k=l$ and $k\neq l$.
In the first one, double partial transpositions with
respect to the same subsystem is just an identity map. Consequently from Eqs.
\eqref{projektory1}, \eqref{projektory2}, and \eqref{S}, one has
\begin{equation}gin{equation}\langlebel{Constr5}
\left|X_{D}^{(N)T_{k}}\right|^{T_{k}}=
\frac{1}{D^{N}+2D-4}
\left(Q_{D}^{(N)}+DP^{(+)}_{D,N}\right),
\end{equation}
Now the right--hand side of Eq. (\ref{Constr5}) is a linear combination of
two positive operators and thus is positive.
We have still left the second case, that is, when $k\neq l$. To resolve it we may
use the remark made above, saying that
the partial transposition of $S_{D}^{(N)}$ with respect to arbitrary
non only one--partite subsystem is a positive matrix. This ends the proof. $\vrule height 4pt width 3pt depth2pt$
Thus we have just proven that $\varrho_{\mathsf{AA}'}^{(D,N)}$ indeed represent
quantum states. Now, our aim is to prove that on the one hand they are bound entangled
and on the other hand they have nonzero distillable key. This purpose will be achieved
in two steps. Firstly we show that partial transposition with respect to any
elementary subsystem $(A_{i}A_{i}')$ of $\varrho_{\mathsf{AA}'}^{(D,N)}$ is positive.
Obviously, this does not confirm that the states are bound entangled since
we do not even know they are entangled. However, the latter may be proven
by showing that $K_{D}$ of these states is nonzero for $D\geq 3$.
Firstly, we concentrate on the positivity of all partial
transpositions of $\varrho_{\mathsf{A}\mathsf{A}'}^{(D,N)}$. To
gain a better look on the problem let us consider a particular
example of such a partial transposition, namely,
$\varrho_{\mathsf{A}\mathsf{A}'}^{(D,3)T_{3}}$. From Eq.
\eqref{Constr1} it follows that
\begin{equation}gin{widetext}
\begin{equation}gin{equation}\langlebel{Ex_PT}
\varrho_{\mathsf{A}\mathsf{A}'}^{(D,3)T_{3}}=\frac{1}{\mathcal{N}_{D}^{(3)}}\left[
\begin{equation}gin{array}{cccccccc}
\left|X_{D}^{(3)}\right|^{T_{3}} & \hspace{-0.3cm}0 & \hspace{-0.5cm}0 & \hspace{-0.8cm}0 & \hspace{-0.8cm}0 & \hspace{-0.8cm}0 & \hspace{-0.5cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}\left|X^{(3)T_{3}}_{D}\right| & \hspace{-0.5cm}0 & \hspace{-0.8cm}0 & \hspace{-0.8cm}0 & \hspace{-0.8cm}0 & \hspace{-0.5cm}X^{(3)T_{3}}_{D} & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}0 & \hspace{-0.5cm}\left|X^{(3)T_{2}}_{D}\right|^{T_{2,3}} & \hspace{-0.8cm}0 & \hspace{-0.7cm}0 & \hspace{-0.7cm}0 & \hspace{-0.5cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}0 & \hspace{-0.5cm}0 & \hspace{-0.8cm}\left|X^{(3)T_{1}}_{D}\right|^{T_{1,3}} & \hspace{-0.7cm}0 & \hspace{-0.7cm}0 & \hspace{-0.5cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}0 & \hspace{-0.5cm}0 & \hspace{-0.8cm}0 & \hspace{-0.7cm}\left|X^{(3)T_{1}}_{D}\right|^{T_{1,3}} & \hspace{-0.7cm}0 & \hspace{-0.5cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}0 & \hspace{-0.5cm}0 & \hspace{-0.8cm}0 & \hspace{-0.7cm}0 & \hspace{-0.7cm}\left|X^{(3)T_{2}}_{D}\right|^{T_{2,3}} & \hspace{-0.5cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}X^{(3)T_{3}}_{D} & \hspace{-0.5cm}0 & \hspace{-0.8cm}0 & \hspace{-0.7cm}0 & \hspace{-0.7cm}0 & \hspace{-0.5cm}\left|X^{(3)T_{3}}_{D}\right| & \hspace{-0.4cm}0 \\
0 & \hspace{-0.3cm}0 & \hspace{-0.5cm}0 & \hspace{-0.8cm}0 & \hspace{-0.7cm}0 & \hspace{-0.7cm}0 & \hspace{-0.5cm}0 & \hspace{-0.4cm}\left|X_{D}^{(3)}\right|^{T_{3}}
\end{array}
\right].
\end{equation}
\end{widetext}
As, due to Lemma A.1, the square matrix consisting of two diagonal
and two off--diagonal blocks of
$\varrho_{\mathsf{AA}'}^{(D,N)T_{i}}$ (cf. Eq. \eqref{Ex_PT})
i.e., the matrix
$\mathcal{M}_{2}(\big|X^{(N)T_{i}}_{D}\big|,X^{(N)T_{i}}_{D} )$,
is already positive, what we need to prove is positivity
of $\big|X_{D}^{(N)}\big|^{T_{i}}$ and
$\big|X_{D}^{(N)T_{i}}\big|^{T_{j,k}}$ for any $i,j,k=1,\ldots,N$.
Let us therefore prove the following lemma.
{\it Lemma V.2.} Let $X_{D}^{(N)}$ be given by Eq. \eqref{XDn}.
Then for any $i,j,k=1,\ldots,N$ it holds that
\begin{equation}gin{equation}
\left|X_{D}^{(N)}\right|^{T_{i}}\geq 0,\qquad \left|X_{D}^{(N)T_{i}}\right|^{T_{j,k}}\geq 0.
\end{equation}
{\it Proof.} Due to the definition of $X_{D}^{(N)}$ its absolute
value may be calculated simply by changing a sign before
$P_{D}^{(N)}$, giving
\begin{equation}gin{equation}
\left|X_{D}^{(N)}\right|=\frac{1}{D^{N}+2D-4}\left[(D-2)P^{(+)}_{D,N}+2P_{D}^{(N)}+Q_{D}^{(N)}\right].
\end{equation}
Application of partial transposition with respect to the $i$th
subsystem followed by substitution of Eq. \eqref{projektory1}
leads us to
\begin{equation}gin{equation}
\left|X_{D}^{(N)}\right|^{T_{i}}=\frac{1}{D^{N}+2D-4}\left[\mathbbm{1}_{D}+R_{D}^{(N)}+(D-4)P_{D,N}^{(+)T_{i}}
\right]
\end{equation}
for any $i=1,\ldots,N$. One may easily check that eigenvalues of
$P_{D,N}^{(+)T_{i}}$ belong to the interval $[-1/D,1/D]$ and
therefore the matrix $\mathbbm{1}_{D^{N}}+(D-4)P_{D,N}^{(+)T_{i}}$
is always positive. This, together with the fact that
$R_{D}^{(N)}\geq 0$, implies positivity of
$\big|X_{D}^{(N)}\big|^{T_{i}}$ for any $i=1,\ldots,N$.
The second fact of the lemma may be proven just by noting that by
virtue of Eq. (\ref{cos}) it holds
\begin{equation}gin{equation}
\left|X_{D}^{(N)T_{i}}\right|^{T_{j,k}}=\frac{1}{D^{N}+2D-4}
\left(S_{D}^{(N)T_{i,j,k}}+R_{D}^{(N)}
\right).
\end{equation}
As stated previously in the proof of Lemma V.1, the partial transposition
of $S_{D}^{(N)}$ with respect to arbitrary subsystems is positive.
This concludes the proof. $\vrule height 4pt width 3pt depth2pt$
The above lemma proves actually that all the partial
transpositions $\varrho_{\mathsf{AA}'}^{(D,N)T_{i}}$
$(i=1,\ldots,N)$ are positive. Therefore, the states
$\varrho_{\mathsf{AA}'}^{(D,N)}$ are bound entangled, of course
provided that they are entangled. This is because, due to the
result of Ref. \cite{bound}, positive partial transpositions with
respect to any elementary subsystem makes it impossible to distill
$k$--partite ($k=2,\ldots,N$) GHZ entanglement among any group of
parties.
Let us now pass to the proof that any state $\varrho_{\mathsf{AA}'}^{(D,N)}$
for $D\geq 3$ has nonzero $K_{D}$. For this purpose we show that using
the protocol from Section \ref{LOCCProtocol} we can produce a state that is closer
to some multipartite private state out of copies of $\varrho_{\mathsf{AA}'}^{(D,N)}$.
As we will show below we need to use as many copies as it is necessary to
make the quantity appearing on the right--hand side of Eq. \eqref{DWbound3}
strictly positive.
Application of the recursive LOCC protocol presented in Section
\ref{LOCCProtocol} to $k$ copies of
$\varrho_{\mathsf{AA}'}^{(D,N)}$ gives with probability
$p_{D,N}^{(k)}=2^{k-1}\mathcal{N}_{D,N}^{(k)}/\big(\mathcal{N}_{D}^{(N)}\big)^{k}$
the following state
\begin{equation}gin{eqnarray}\langlebel{Theta1}
\Theta_{\mathsf{AA'}}^{(N,k)}&\negmedspaces=\negmedspaces&\frac{1}{\mathcal{N}_{D,N}^{(k)}}
\left[
\sum_{i=0}^{N}\left(\mathcal{P}_{i}^{(N)}+\mathcal{\overline{P}}^{(N)}_{i}\right)\ot
\left(\mo{X_{D}^{(N)T_{i}}}^{T_{i}}\right)^{\ot k}\right.\nonumber\\
&&\left.+\left(\ket{0}\!\bra{1}^{\ot N}+\ket{1}\!\bra{0}^{\ot N
}\right)\ot \left(X_{D}^{(N)}\right)^{\ot k}\right],
\end{eqnarray}
where $\mathcal{N}_{D,N}^{(k)}$ is a normalization factor given by
\begin{equation}gin{eqnarray}\langlebel{normalization}
\mathcal{N}_{D,N}^{(k)}=
2\left[1+N\left(\frac{D^{N}}{D^{N}+2D-4}\right)^{k}\right].
\end{eqnarray}
Now, to simplify the considerations we can utilize the privacy
squeezing (see Section \ref{PrivacySqueezing}) to the obtained
states $\Theta_{\mathsf{AA'}}^{(N,k)}$. Namely, due to Lemma III.1
there exist such twistings $U_{t}^{(k)}$ that after application to
$\Theta_{\mathsf{AA'}}^{(N,k)}$ and tracing out the $\mathsf{A}'$
subsystem one arrives at the following class of $N$--qubit states
\begin{equation}gin{eqnarray}\langlebel{Question}
\widetilde{\Theta}_{\mathsf{A}}^{(N,k)}&\negmedspaces=\negmedspaces&\frac{1}{\mathcal{N}_{D,N}^{(k)}}\left[
\sum_{i=0}^{N}\left(\mathcal{P}_{i}^{(N)}+\mathcal{\overline{P}}^{(N)}_{i}\right)
\norsl{\mo{X_{D}^{(N)T_{i}}}^{T_{i}}}^{k}\right.\nonumber\\
&&\left.+\left(\ket{0}\!\bra{1}^{\ot N}+\ket{1}\!\bra{0}^{\ot N }\right)
\norsl{X_{D}^{(N)}}^{k}\right].
\end{eqnarray}
In other words, after 'rotation' with $U_{t}^{(k)}$ and throwing
out the $\mathsf{A}'$ subsystem we get a so--called privacy squeezed
state, i.e., the one in which blocks are replaced with their
norms. We also know from Theorem IV.4 that the distillable key of
the \textsf{c}q state obtained from the privacy squeezed state
$\widetilde{\Theta}_{\mathsf{A}}^{(N,k)}$ (measurement is
performed in the same basis as twisting) cannot be greater than
the distillable key of $\Theta_{\mathsf{AA'}}^{(N,k)}$.
From Eq. \eqref{normalization} it follows that since $D^{N}+2D-4>
D^{N}$ for any $D\geq 3$ one has $\mathcal{N}_{D,N}^{(k)}\to 2$,
while for $D=2$, $\mathcal{N}_{2,N}^{(k)}\to 2(N+1)$. In turn this
means that for the off--diagonal elements of
$\widetilde{\Theta}_{\mathsf{A}}^{(N,k)}$ one has that
\begin{equation}gin{equation}
\frac{1}{\mathcal{N}_{D,N}^{(k)}}\left\|X_{D}^{(N)}\right\|_{1}^{k}=\frac{1}{\mathcal{N}_{D,N}^{(k)}}
\stackrel{k\to\infty}{\mathop{-\!\!\!-\!\!\!\longrightarrow}\nolimits} \frac{1}{2}
\end{equation}
with $D\geq 3$. By virtue of Theorem III.3 one infers that the
more copies of $\varrho_{\mathsf{AA'}}^{(D,N)}$ we put into the
recurrence protocol, the closer we are to some multipartite
private state. This also means that with $k\to \infty$ the
sequence of states $\widetilde{\Theta}_{\mathsf{A}}^{(N,k)}$ goes
to $GHZ$ state $P_{2,N}^{(+)}$, however, for $D\geq 3$.
Now, to bound from below the distillable key of
$\Theta_{\mathsf{AA'}}^{(N,k)}$ according to the prescription
given above we need to calculate a \textsf{c}q state of the
privacy squeezed state $\widetilde{\Theta}_{\mathsf{A}}^{(N,k)}$.
(The \textsf{c}q state is found here with respect to the basis in
which the original state is defined.) Simple algebra gives
\begin{equation}gin{eqnarray}\langlebel{Constr1CCQWielocz}
\widetilde{\Theta}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}&\negmedspaces=\negmedspaces&\frac{1}{\mathcal{N}_{D,N}^{(k)}}
\left[R_{2}^{(N)}
\ot\proj{E_{0}}+\left(\frac{D^{N}}{D^{N}+2D-4}\right)^{k}\right.\nonumber\\
&&\left.\times\sum_{j=1}^{N}\left(P_{j}^{(N)}\ot\proj{E_{j}}
+\overline{P}_{j}^{(N)}\ot\proj{\overline{E}_{j}}\right)\right],\nonumber\\
\end{eqnarray}
where $\ket{E_{0}}$, $\ket{E_{j}}$, and $\ket{\overline{E}_{j}}$
constitute a set of orthonormal states held by Eve. One notices
immediately that
$\widetilde{\Theta}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$ tends
to the multipartite ideal \textsf{c}q state (see Eq.
\eqref{idealcq}) for any integer $D\geq 3$ whenever $k\to \infty$.
To find a lower bound on distillable key of
$\widetilde{\Theta}_{\mathsf{A}}^{(N,k)}$ we utilize Eq. \eqref{DWbound2}.
However, according to Eq. \eqref{DWbound2} one needs to calculate
the quantities $I(A_{i}\!:\!A_{j})$ for $i\neq j$ and
$I(A_{i}\!:\! E)$ for the respective reductions of
$\Theta_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}$. Fortunately, the
initial states $\varrho_{\mathsf{AA}'}^{(D,N)}$ have such
symmetrical structure, preserved by the recurrence protocol and
the privacy squeezing, that makes all the quantities
$I(A_{i}\!:\!A_{j})$ $(i\neq j)$ equal (the same holds for
$I(A_{i}\!:\! E)$). Consequently, in view of the above, using Eq.
\eqref{DWbound2} and Theorem IV.4 (see Eq. \eqref{theoremIV4}), we
infer the following inequality
\begin{equation}gin{equation}\langlebel{Constr2LowerBound}
K_{D}(\Theta_{\mathsf{AA}'}^{(N,k)})\geq
I(A_{1}\!:\!A_{2})(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})-I(A_{1}\!:\!E)(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})}).
\end{equation}
irrespectively of number of parties $N$. Exemplary behaviour of the right--hand side
of Eq. \eqref{Constr2LowerBound} (denoted by $K_{DW}$)
in the function of $k$ and $D$ for $N=3$ is shown in Fig. \ref{rys1Constr2}a.
\begin{equation}gin{figure}[h!]
(a)\includegraphics[width=6cm]{rys1_C2.eps}\\
(b)\includegraphics[width=6cm]{rysunek1C1.eps} \caption{An
exemplary plot of $K_{DW}\equiv
I(A_{1}\!:\!A_{2})(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})-I(A_{1}\!:\!E)(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})$
with $N=3$ (a) and for comparison in the case of $N=2$ (b), which
was discussed in Ref. \cite{PHRA}. For the sake of clarity, zero
is put whenever the plotted function is less than zero. Notice
also that even though $k$ and $D$ are discrete parameters, the
graph is made as if $K_{DW}$ were a function of continuous
parameters. It follows from both the figures that the number of
parties $N$ significantly influences the obtained lower bound.
Namely, for $N=3$ one needs to spend more copies of a given state
to get nonzero values of $K_{DW}$.} \langlebel{rys1Constr2}
\end{figure}
It is clear from Figure \ref{rys1Constr2}a that it is possible to
distill one secure bit of key from bound entangled states
$\Theta_{\mathsf{AA}'}^{(N,k)}$ for sufficiently large $k$. For
comparison, Fig. \ref{rys1Constr2}b contains a lower bound of the
distillable key in the case of $N=2$ discussed in Ref.
\cite{PHRANATO}.
We can also investigate the lower bound on $K_{D}$
for the initial states $\varrho_{\mathsf{AA}'}^{(D,N)}$. However, in
this case we need to take into account the probability $p_{D,N}^{(k)}$.
In this way we arrive at
\begin{equation}gin{eqnarray}\langlebel{Constr2LowerBound2}
K_{D}(\varrho_{\mathsf{AA}'}^{(D,N)})&\negmedspaces\geq\negmedspaces& p_{D,N}^{(k)}\left[ I(A_{1}\!:\!A_{2})(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})\right.\nonumber\\
&&\left.\hspace{1cm}-I(A_{1}\!:\!E)(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})\right].
\end{eqnarray}
Figure \ref{rys2Constr2}a presents exemplary behaviour
of the function appearing on the right--hand side
of Eq. \eqref{Constr2LowerBound2} (denoted by $\widetilde{K}_{DW}$) for $N=3$.
For comparison, in Figure \ref{rys2Constr2}b it is also plotted
the same function in the case of $N=2$ (this case was discussed in Ref. \cite{PHRA}).
\begin{equation}gin{figure}[h!]
(a)\includegraphics[width=7cm]{rys2_C2.eps}\\
(b)\includegraphics[width=6cm]{rysunek3C1.eps} \caption{An
exemplary plot of $\widetilde{K}_{DW}\equiv p_{D,3}^{(k)}
\left[I(A_{1}\!:\!A_{2})(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})-
I(A_{1}\!:\!E)(\Theta_{A_{1}A_{2}E}^{(\mathrm{ccq})})\right]$ with
$N=3$. For comparison it is also presented the case of $N=2$ (b).
For the sake of clarity, zero is put whenever the plotted function
is less or equal to zero. Also, though both the parameters $k$ and
$D$ are integer, for convenience, the function
$\widetilde{K}_{DW}$ is plotted as if it were a function of
continuous $k$ and $D$. It is clear that for $N=3$ the lower bound
on distillable key is considerably smaller. } \langlebel{rys2Constr2}
\end{figure}
Let us conclude the first construction with discussion of some of its general
properties. Above we used a particular class of matrices $X_{D}^{(N)}$
(defined in Eq. (75)), however, it seems interesting to ask wether there are
other matrices than $X_{D}^{(N)}$ that could be used in the construction.
In what follows we provide some constraints that the general matrix,
hereafter denoted by $Z_{D}^{(N)}$, must obey to be useful for
purposes of the construction. The first important condition is that
the trace norm of $Z_{D}^{(N)}$ has to be strictly larger than the trace norm of
$\big|Z_{D}^{(N)T_{i}}\big|^{T_{i}}$ for all $i=1,...,N$. This guarantees
convergence (in the trace norm) of the output states of the recursive
LOCC protocol (given in Sec. IV.C) to some multipartite
private states. Another crucial conditions
are $\big|Z_{D}^{(N) T_{i}}\big|^{T_{i}}\geq 0$ and
$\big|Z_{D}^{(N)}\big|^{T_{i}}\geq 0$ for all $i=1,\ldots,N$.
The first one is necessary for $\varrho_{\mathsf{AA}'}^{(D,N)}$
(when constructed with the matrix $Z_{D}^{(N)}$) to be positive,
while the second one allows to prove that $\varrho_{\mathsf{AA}'}^{(D,N)}$
have positive partial transposition with respect to any elementary subsystem.\\
{\it Lemma V.3.} Assume that $Z_{D}^{(N)}$ is arbitrary matrix
acting on $(\mathbb{C}^{D})^{\ot N}$ and that the following conditions
\begin{equation}gin{enumerate}
\item[(i)] $\norsl{Z_{D}^{(N)}}> \norsl{\left|Z_{D}^{(N)T_{i}}\right|^{T_{i}}}$ for all $i=1,\ldots,N$,
\item[(ii)] $\left|Z_{D}^{(N)T_{i}}\right|^{T_{i}}\geq 0$ for all $i=1,\ldots,N$,
\item[(iii)] $\left|Z_{D}^{(N)}\right|^{T_{i}}\geq 0$ for all $i=1,\ldots,N$
\end{enumerate}
{are satisfied. Then $Z_{D}^{(N)}\ngeq 0$ and $Z_{D}^{(N)T_{i}}\ngeq 0$ for all $i=1,\ldots,N$.}\\
{\it Proof.} ({\it ad absurdum}) We divide the proof into three parts:
\begin{equation}gin{enumerate}
\item[(i)] Assume that $Z_{D}^{(N)}\geq 0$ and $Z_{D}^{(N)T_{i}}\ngeq 0$ for any $i=1,\ldots,N$.
Then one can see that $\big|Z_{D}^{(N)}\big|^{T_{i}}=Z_{D}^{(N)T_{i}}\ngeq 0$ for any choice of $i$.
However, this contradicts the third assumption.
\item[(ii)] Assume that $Z_{D}^{(N)}\ngeq 0$ and there exists such $k$ that $Z_{D}^{(N)T_{k}}\geq 0$. Now,
one obtains $\big|Z_{D}^{(N)T_{k}}\big|^{T_{k}}=Z_{D}^{(N)}\ngeq 0$. Of course, this is in contradiction to the second assumption.
\item[(iii)] Finally, assume that $Z_{D}^{(N)}\geq 0$ and that there exists such $k$ that
$Z_{D}^{(N)T_{k}}\geq 0$. Then $\norsl{\big|Z_{D}^{(N)T_{k}}\big|^{T_{k}}}=\norsl{Z_{D}^{(N)}}$. This contradicts the first assumption.
$\vrule height 4pt width 3pt depth2pt$
\end{enumerate}
This lemma says that a matrix can be used in the above construction
if it is not positive and all its elementary partial transpositions are not positive.
Thus, in particular, a general density matrix is not suitable for this construction.
\subsection{The second construction} \langlebel{Giechazety2}
The crucial ideas behind the second construction are actually the same as in the
case of the first one, however, considerations will be a little bit more sophisticated.
Let us first define the analog of $X_{D}^{(N)}$ from the first construction
to be
\begin{equation}gin{equation}\langlebel{tildeX}
\widetilde{X}^{(N)}_{D}=\sum_{i,j=0}^{D-1}u_{ij}\ket{i}\!\bra{j}^{\ot
N},
\end{equation}
where we assume that $u_{ij}$ are elements of some $D\times D$
general unitary or unitary Hermitian matrix, hereafter denoted by $U_{D}$.
Thus $\widetilde{X}_{D}^{(N)}$ is an embedding of $U_{D}\in
M_{D}(\mathbb{C})$ ($M_{D}(\mathbb{C})$ denotes the set of
$D\times D$ matrices with complex entries) in
$M_{D^{N}}(\mathbb{C})$ and therefore
\begin{equation}gin{equation}\langlebel{relacion}
\left|\widetilde{X}_{D}^{(N)}\right|=R_{D}^{(N)}.
\end{equation}
For further simplicity we also impose the condition that
$|u_{ij}|=1/\sqrt{D}$ for $i,j=0,\ldots,D-1$, however whenever
possible all proofs will be given assuming that $U_{D}$ is
a general unitary matrix.
It should be also pointed out that the distinction on unitary or
unitary and Hermitian matrices $U_{D}$ made above plays an important
role here. This comes from the LOCC protocol presented in Section
\ref{LOCCProtocol} as in the case of unitary but not Hermitian
matrices it needs to be slightly modified. Namely, in its last
step all the parties keep the state only if all zeros occurred.
A particular example of a unitary but in general not Hermitian
matrix satisfying the above condition is the matrix
$\widetilde{V}_{D}=(1/\sqrt{D})V_{D}$, where $V_{D}$ denotes the
Vandermonde matrix of solutions to
the equation $z^{D}-1=0$ with $z\in\mathbb{C}$. As one knows the
solutions are of the form $\omega_{k}=\mathrm{e}^{2\pi
\mathrm{i}k/D}$ $(k=0,\ldots,D-1)$. It is then clear that
$\widetilde{V}_{D}$ is a unitary matrix for any $D\geq 2$, however
not always a Hermitian one. For instance, in the particular case
of $D=2$ one easily recognizes that $\widetilde{V}_{2}$ is the
known Hadamard matrix. A good example of some unitary and
Hermitian matrix is $k$th tensor power of $\widetilde{V}_{2}$.
Since $\widetilde{V}_{2}$ is unitary and Hermitian any matrix
of the form $\widetilde{V}_{2}^{\ot k}$ is also unitary and
Hermitian.
Now, let us consider following family of matrices
\begin{equation}gin{eqnarray}\langlebel{SecondConstr}
\widetilde{\varrho}^{(D,N)}_{\mathsf{A}\mathsf{A'}}&\negmedspaces=\negmedspaces&\frac{1}{\widetilde{\mathcal{N}}^{(N)}_{D}}
\left[\sum_{j=0}^{N}\left(\mathcal{P}_{j}^{(N)}
+\overline{\mathcal{P}}_{j}^{(N)}\right)\ot\sum_{i=1}^{N}\left|\widetilde{X}_{D,i}^{(N)T_{j}}\right|\right.\nonumber\\
&&\left.+\ket{ 0}\!\bra{1}^{\ot
N}\ot\sum_{i=1}^{N}\widetilde{X}_{D,i}^{(N)}+\ket{
1}\!\bra{0}^{\ot N}\ot\sum_{i=1}^{N}\widetilde{X}_{D,i}^{(N)\dagger}\right],\nonumber\\
\end{eqnarray}
where $\widetilde{\mathcal{N}}^{(N)}_{D}$ stands for the
normalization factor, which for arbitrary unitary $U_{D}$ is given
by
\begin{equation}gin{eqnarray}
\widetilde{\mathcal{N}}_{D}^{(N)}=2N\left(D+N\sum_{i,j=0}^{D-1}|u_{ij}|\right).
\end{eqnarray}
Obviously for $\widetilde{X}_{D}^{(N)}$ that comes from unitary
Hermitian $U_{D}$ the conjugation in the last term in Eq.
\eqref{SecondConstr} may be omitted. Moreover, taking into account
the assumption that $|u_{ij}|=1/\sqrt{D}$, the normalization
factor becomes
$\widetilde{\mathcal{N}}_{D}^{(N)}=2ND(1+N\sqrt{D})$.
As in the case of the first construction, we need to prove that
$\widetilde{\varrho}^{(D,N)}_{\mathsf{A}\mathsf{A'}}$ represent
quantum states. Moreover, we show also that they have positive
partial transpositions with respect to any elementary subsystem.
From Eq. \eqref{SecondConstr} it follows that to prove positivity
of $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ one has to show
that the inequalities
\begin{equation}gin{equation}\langlebel{Nier1}
\left|\sum_{i=1}^{N}\widetilde{X}_{D,i}^{(N)}\right|\leq
\sum_{i=1}^{N}\left|\widetilde{X}_{D,i}^{(N)}\right|
\end{equation}
are satisfied. Then simply utilizing Lemma A.1 and noting that the
remaining blocks lying on the diagonal of
$\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ are positive by
definition, the positivity of
$\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ is proved.
To deal with the problem of positivity of partial transpositions
let us look on the particular example of form of
$\widetilde{\varrho}^{(D,3)T_{3}}_{\mathsf{A}\mathsf{A'}}$. From
Eq. \eqref{SecondConstr} one infers that
\begin{equation}gin{widetext}
\begin{equation}gin{equation}\langlebel{EX_PT2}
\widetilde{\varrho}^{(D,3)T_{3}}_{\mathsf{A}\mathsf{A'}}=\frac{1}{\widetilde{\mathcal{N}}^{(3)}_{D}}\left[
\begin{equation}gin{array}{cccccccc}
\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)}\Big| &
\hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0
&\hspace{-0.4cm} 0 &
\hspace{-0.4cm}0 &\hspace{-0.4cm} 0 & \hspace{-0.4cm}0\\
0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)T_{3}}\Big|
& \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\widetilde{X}_{D,i}^{(3)T_{3}} &\hspace{-0.4cm} 0\\
0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)T_{2}}\Big|
& \hspace{-0.4cm}0
& \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)T_{1}}\Big|
& \hspace{-0.4cm}0
& \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 \\
0 & \hspace{-0.4cm}0 &\hspace{-0.4cm} 0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)T_{1}}\Big|
& \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0\\
0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)T_{2}}\Big|
& \hspace{-0.4cm}0 & \hspace{-0.4cm}0\\
0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\widetilde{X}_{D,i}^{(3)T_{3}\dagger}
& \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)T_{3}}\Big|
& \hspace{-0.4cm}0 \\
0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}0 & \hspace{-0.4cm}0 & \hspace{-0.4cm}0 &
\hspace{-0.4cm}\displaystyle\sum_{i=1}^{3}\Big|\widetilde{X}_{D,i}^{(3)}\Big|
\end{array}
\right],
\end{equation}
\end{widetext}
where we used the fact that
$\big|\widetilde{X}_{D,i}^{(n)T_{j}}\big|$ are diagonal in the
standard basis and therefore are not affected by partial
transposition with respect to any subsystems.
To show positivity of $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$
as well as its partial transpositions we prove the following
lemma.
{\it Lemma V.4.} Let $\widetilde{X}_{D}^{(N)}$ be defined as in
Eq. \eqref{tildeX}. Then the following equalities holds
\begin{equation}gin{equation}\langlebel{Nier1}
\left|\sum_{i=1}^{N}\widetilde{X}_{D,i}^{(N)T_{j}}\right|=
\sum_{i=1}^{N}\left|\widetilde{X}_{D,i}^{(N)T_{j}}\right| \qquad
(j=0,\ldots,N).
\end{equation}
{\it Proof.} Firstly we start by the above statement for $j=0$.
For this purpose let us notice that its
left--hand side may be written as
\begin{equation}gin{equation}
\left|\sum_{i=1}^{N}\widetilde{X}_{D,i}^{(N)}\right|=
\left|N\sum_{k=0}^{D-1}u_{kk}\proj{k}^{\ot
N}+\sum_{i=1}^{N}\sum_{\substack{k,l=0\\k\neq
l}}^{D-1}u_{kl}(\ket{k}\!\bra{l}^{\ot N})^{T_{i}}\right|.
\end{equation}
Straightforward algebra shows that both terms under the sign of
absolute value are defined on orthogonal supports. Moreover,
all the partial transpositions in the second term are defined on
orthogonal supports. Both these facts allow us to write
\begin{equation}gin{equation}
\left|\sum_{i=1}^{N}\widetilde{X}_{D,i}^{(N)}\right|=N\sum_{k,l=0}^{D-1}\left|u_{kl}\right|\proj{l}^{\ot(i-1)}\ot
\proj{k}\ot\proj{l}^{\ot(N-i-1)}.
\end{equation}
One finds immediately that this equals the right--hand side of
(\ref{Nier1}), finishing the first part of the proof.
To show Eq. \eqref{Nier1} for $j=1,\ldots,N$ we need to perform a
little bit more sophisticated analysis. With the same reasoning as
in the case of the first inequality we can reduce the claimed
inequalities to the following
\begin{equation}gin{eqnarray}\langlebel{a4}
&&\left|\widetilde{X}^{(N)}_{D}+(N-1)\sum_{k=0}^{D-1}u_{kk}\proj{k}^{\ot N}\right|\leq R_{D}^{(N)}
\nonumber\\
&&\hspace{1cm}+(N-1)\sum_{k=0}^{D-1}|u_{kk}|\proj{k}^{\ot N},
\end{eqnarray}
where we utilized Eq. \eqref{relacion}. One notices that the above
inequality may be further reduced to
\begin{equation}gin{equation}
\left|U_{D}+(N-1)\mathcal{D}\right|\leq\mathbbm{1}_{D}+(N-1)\left|\mathcal{D}\right|,
\end{equation}
where $\mathcal{D}$ denotes a diagonal matrix containing the
diagonal elements of $U_{D}$. Utilizing the fact that
$|u_{ij}|=1/\sqrt{D}$ for any $i,j=0,\ldots,D-1$, we infer that
$|\mathcal{D}|=(1/\sqrt{D})\mathbbm{1}_{D}$ and therefore
\begin{equation}gin{equation}\langlebel{inequality}
\left|U+(N-1)\mathcal{D}\right|\leq
[1+(N-1)/\sqrt{D}]\mathbbm{1}_{D}.
\end{equation}
To prove this inequality we can utilize the polar decomposition to
its left--hand side. More precisely we can write
$\left|U+(N-1)\mathcal{D}\right|=V^{\dagger}U+(N-1)V^{\dagger}\mathcal{D}$
with $V$ denoting some unitary matrix. This allows us to write
\begin{equation}gin{eqnarray}\langlebel{a5}
\bra{\Psi}\left|U+(N-1)\mathcal{D}\right|\ket{\Psi}&\negmedspaces=\negmedspaces&
\big|\bra{\Psi}\left|U+(N-1)\mathcal{D}\right|\ket{\Psi}\big|\nonumber\\
&\negmedspaces\leq\negmedspaces&\left|\bra{\Psi}V^{\dagger}U\ket{\Psi}\right|\nonumber\\
&&+(N-1)\left|\bra{\Psi}V^{\dagger}\mathcal{D}\ket{\Psi}\right|\nonumber\\
&\negmedspaces\leq \negmedspaces&1+(N-1)\left|\bra{\Psi}V^{\dagger}|\mathcal{D}|W\ket{\Psi}\right|\nonumber\\
&\negmedspaces\leq \negmedspaces&1+\frac{N-1}{\sqrt{D}},
\end{eqnarray}
where $\ket{\Psi}$ is an arbitrary normalized vector from
$\mathbb{C}^{D}$. The second and third inequality are consequences
of the fact that the product of unitary matrices is a unitary matrix
and that for any normalized $\ket{\psi}$ and unitary $U$ it holds
that $|\bra{\psi}U\ket{\psi}|\leq 1$. Moreover, we put here the
polar decomposition of $\mathcal{D}$, i.e.,
$\mathcal{D}=|\mathcal{D}|W$ with some unitary $W$. The last
inequality is also a result of application of aforementioned fact
that $|\mathcal{D}|=(1/\sqrt{D})\mathbbm{1}_{D}$.
Now, to finish the proof, it suffices to mention that the
resulting inequality is equivalent to \eqref{inequality}.
$\vrule height 4pt width 3pt depth2pt$
From the above lemma it clearly follows that $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$
represent quantum states for any $D\geq 2$ and $N\geq 2$, and they have positive partial transpositions
with respect to all elementary subsystems. The last thing we need to prove
is that the distillable key of $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$
is nonzero. This would also imply that $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$
represent entangled states.
Let us then apply the recursive protocol described previously in Section \ref{LOCCProtocol}
to $k$ copies of $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$, obtaining
\begin{equation}gin{eqnarray}\langlebel{SecondConstrLOCC}
\widetilde{\Theta}_{\mathsf{AA'}}^{(N,k)}&\negmedspaces=\negmedspaces&\frac{1}{\widetilde{\mathcal{N}}^{(k)}_{D,N}}
\left[\sum_{j=0}^{N}
\left(\mathcal{P}_{j}^{(N)}+\overline{\mathcal{P}}_{j}^{(N)}\right)\ot
\left(\sum_{i=1}^{N}\left|\widetilde{X}^{(N)T_{j}}_{D,i}\right|\right)^{\ot k}\right.\nonumber\\
&&+\ket{ 0}\!\bra{1}^{\ot N}\ot\left(\sum_{i=1}^{N}\widetilde{X}^{(N)}_{D,i}\right)^{\ot k}\nonumber\\
&&\left.+\ket{
1}\!\bra{0}^{\ot N}\ot\left(\sum_{i=1}^{N}\widetilde{X}^{(N)\dagger}_{D,i}\right)^{\ot k}\right],
\end{eqnarray}
with the normalization factor given by
\begin{equation}gin{equation}\langlebel{Constr2Norm}
\widetilde{\mathcal{N}}_{D,N}^{(k)}=2\left(ND\sqrt{D}\right)^{k}+2ND^{k}\left[1+(N-1)\sqrt{D}\right]^{k}.
\end{equation}
Notice that as previously mentioned, the LOCC protocol should be
modified in case when $\widetilde{X}^{(N)}_{D}$ follows from in
general unitary $U_{D}$. Due to the modification of the LOCC
protocol, the probability of obtaining
$\widetilde{\Theta}_{\mathsf{AA'}}^{(N,k)}$ in the case of unitary
and unitary Hermitian $U_{D}$ is different. Namely, in the case of
unitary Hermitian matrices amounts to
\begin{equation}gin{equation}\langlebel{prob1}
\widetilde{p}_{D,N}^{(k,1)}=2^{k-1}\widetilde{\mathcal{N}}_{D,N}^{(k)}/
\big(\widetilde{\mathcal{N}}_{D,N}^{(1)}\big)^{k},
\end{equation}
while in the case of unitary non Hermitian the probability of
success is considerably smaller and is given by
\begin{equation}gin{equation}\langlebel{prob2}
\widetilde{p}_{D,N}^{(k,2)}=\widetilde{\mathcal{N}}_{D}^{(N)}/
\big(\widetilde{\mathcal{N}}_{D}^{(N)}\big)^{k}.
\end{equation}
Now the multipartite privacy squeezing (see Section
\ref{PrivacySqueezing}) allows us to change blocks in Eq.
\eqref{SecondConstrLOCC} with their norms, obtaining
\begin{equation}gin{eqnarray}\langlebel{PrivSqueez}
\widetilde{\theta}_{\mathsf{A}}^{(N,k)}&\negmedspaces=\negmedspaces&\frac{1}{\widetilde{\mathcal{N}}^{(k)}_{D,N}}
\left[\sum_{j=0}^{N}\left(\mathcal{P}_{j}^{(N)}+\overline{\mathcal{P}}_{j}^{(N)}\right)
\norsl{\sum_{i=1}^{N}\left|\widetilde{X}^{(N)T_{j}}_{D,i}\right|}^{ k}\right.\nonumber\\
&&\left.+\left(\ket{ 0}\bra{1}^{\ot N}
+\ket{1}\bra{0}^{\ot N}\right)\norsl{\sum_{i=1}^{N}\widetilde{X}^{(N)}_{D,i}}^{k}\right].
\end{eqnarray}
Calculating the respective norms in the above, one may rewrite
Eq. \eqref{PrivSqueez} as
\begin{equation}gin{eqnarray}\langlebel{PrivSq2}
\widetilde{\theta}_{\mathsf{A}}^{(N,k)}&\negmedspaces=\negmedspaces&\frac{D^{k}}{\widetilde{\mathcal{N}}_{D,N}^{(k)}}
\left[ 2(N\sqrt{D})^{k}P_{2,N}^{(+)}\right.\nonumber\\
&&\left.+[1+(N-1)\sqrt{D}]^{k}\sum_{j=1}^{N}
\left(\mathcal{P}_{j}^{(N)}+\overline{\mathcal{P}}_{j}^{(N)}\right)\right].\nonumber\\
\end{eqnarray}
From Eqs. \eqref{Constr2Norm} and \eqref{PrivSq2} one easily
infers that $\widetilde{\theta}_{\mathsf{A}}^{(N,k)}\to
P_{2,N}^{(+)}$ for $k\to\infty$ for any $D\geq 2$, which by virtue
of Theorem III.3 means that the recursive protocol when applied to
copies of $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ produces
a state that is arbitrarily close to some multipartite pdit in the
limit of $k\to\infty$. In fact, as the probabilities of success
$\widetilde{p}_{D,N}^{(k,1)}$ and $\widetilde{p}_{D,N}^{(k,2)}$
(see Eqs. \eqref{prob1} and \eqref{prob2}) are positive, according
to the definition of $K_{D}$ (see Definition IV.1) the above
method leads to distillation of secure key from
$\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$. Below we provide
also plots of lower bounds on $K_{D}$ of
$\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$.
For this purpose we can find the purification of
$\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ and then the
\textsf{c}q state in the standard basis. The latter has the form
\begin{equation}gin{eqnarray}
\hspace{-0.5cm}\widetilde{\Theta}_{\mathsf{A}E}^{(\mathsf{c}\mathrm{q})}&\negmedspaces=\negmedspaces&a_{D,N}^{(k)}
R_{2}^{(N)}\ot\proj{E_{0}}
+b_{D,N}^{(k)}\nonumber\\
&&\hspace{-0.5cm}\times\sum_{j=1}^{N}\left(\mathcal{P}_{j}^{(N)}\ot\proj{E_{j}}+
\overline{\mathcal{P}}_{j}^{(N)}\ot\proj{\overline{E}_{j}}\right),
\end{eqnarray}
where $\ket{E_{0}}$, $\ket{E_{j}}$, and $\ket{\overline{E}_{j}}$
$(j=1,\ldots,N)$ are orthonormal states kept by Eve, and
coefficients $a_{D,N}^{(k)}$ and $b_{D,N}^{(k)}$ are given by
\begin{equation}gin{equation}
a_{D,N}^{(k)}=\frac{(ND\sqrt{D})^{k}}{\widetilde{\mathcal{N}}_{D,N}^{(k)}}
\end{equation}
and
\begin{equation}gin{equation}
b_{D,N}^{(k)}=\frac{D^{k}}{\widetilde{\mathcal{N}}_{D,N}^{(k)}}[1+(N-1)\sqrt{D}]^{k}.
\end{equation}
One can see from the above that the limit of $k\to \infty$ leads
us to the ideal \textsf{c}q state. Now we can apply the bound
given in Eq. \eqref{DWbound3}. It is easy to verify that all
the quantities $I(A_{i}\!:\!A_{j})$ are equal here (the same holds
for $I(A_{i}\!:\!E)$) and therefore we can rewrite Eq.
\eqref{DWbound3} as
\begin{equation}gin{equation}\langlebel{Klucz2Constr}
K_{D}(\widetilde{\Theta}_{\mathsf{AA}'}^{(N,k)})\geq I(A_{1}\!:\!A_{2})
(\widetilde{\Theta}_{A_{1}A_{2}E}^{(\mathrm{ccq})})-I(A_{1}:E)(\widetilde{\Theta}_{A_{1}A_{2}E}^{(\mathrm{ccq})})
\end{equation}
Exemplary plot of the function appearing on the right--hand side
of Eq. \eqref{Klucz2Constr} (denoted as $K_{DW}$) is presented in
Figure \ref{Constr3Fig1}.
\begin{equation}gin{figure}[h!]
\centering{\includegraphics[width=6cm]{rysC3bezPr1.eps}}
\caption{The function appearing on the right--hand side of Eq.
\eqref{Klucz2Constr} (denoted here by $K_{DW}$) in the function of
number of copies $k$ and the dimension $D$. Zero is put whenever
the function is less than zero. Notice that both the parameters
$k$ and $D$ are discrete, however, continuous plot is made to
indicate better the behavior of $K_{DW}$. It is clear from the
plot that for larger $k$ the distillable key of
$\widetilde{\theta}_{\mathsf{A}}^{(N,k)}$ approaches one bit (this
is actually a maximal value obtainable from two--qubit states) and
the convergence depends on $D$. Namely, for higher dimensions $D$
the convergence to the maximal value is
faster.}\langlebel{Constr3Fig1}
\end{figure}
The behavior of $K_{DW}$ (see Fig. \ref{Constr3Fig1}) confirms the
previous analysis, namely, the more copies we spend the closer
the state is to some multipartite private state we obtain using the
recursive protocol. Thus the higher key rate we can get from the
obtained state $\widetilde{\Theta}_{\mathsf{AA}'}^{(N,k)}$.
We can also get a lower bound on distillable key of the initial
states $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$. Here we need
to take into account the probability of success
($\widetilde{p}_{D,N}^{(k,1)}$ and $\widetilde{p}_{D,N}^{(k,2)}$)
in the recursive protocol.
The corresponding bounds
on the distillable keys of
$\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ are
\begin{equation}gin{eqnarray}
K_{D}^{(1(2))}(\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)})&\negmedspaces\geq\negmedspaces & \widetilde{p}^{(k,1(2))}_{D,N}\left[I(A_{1}\!:\!A_{2})(\widetilde{\Theta}_{A_{1}A_{2}E}^{(\mathrm{ccq})})\right.\nonumber\\
&&\left.-I(A_{1}\!:\!E)(\widetilde{\Theta}_{A_{1}A_{2}E}^{(\mathrm{ccq})})\right].
\end{eqnarray}
Exemplary plots of the right--hand side of the above (denoted by
$\widetilde{K}_{DW}^{(1(2))}$) both in the case of a unitary
Hermitian matrix (e.g. $\widetilde{V}_{2}^{\ot k}$) and only a
unitary matrix (e.g. $\widetilde{V}_{D}$) are given in Figure
\ref{Constr3Fig2}a and \ref{Constr3Fig2}b.
\begin{equation}gin{figure}[h!]
(a)\includegraphics[width=6cm]{rysC3Pr1.eps}\\
(b)\includegraphics[width=6cm]{rysC3Pr2.eps} \caption{Lower bounds
on $K_{D}$ of $\widetilde{\varrho}_{\mathsf{AA}'}^{(D,N)}$ in the
function of $k$ and $D$. The upper plot (a) presents lower bound
(denoted here by $\widetilde{K}_{DW}^{(1)}$) on $K_{D}$ in the
case of unitary Hermitian matrices $U_{D}$, while in the second
plot (b) lower bound ($\widetilde{K}_{DW}^{(2)}$) in the case of
unitary but not Hermitian matrices is given. Both are just a
product of probability $\widetilde{p}_{D,N}^{(k1)}$ (left) or
$\widetilde{p}_{D,N}^{(k,2)}$ (right) and $K_{DW}$ plotted in
Figure \ref{Constr3Fig1}. One infers that in the case of unitary
but not Hermitian matrix $U_{D}$ the region of nonzero values of
the plotted function is wider than in the case of unitary
Hermitian matrices.} \langlebel{Constr3Fig2}
\end{figure}
\section{Remarks on limitations in multipartite quantum cryptography}
So far, we discussed the general scheme allowing for distilling
secure key from multipartite states. It is desirable however to
discuss also what are the limitations of multipartite secure key
distillation.
An interesting effect, which we shall recall here, was reported in
Ref. \cite{RAPH1}, namely, it was shown that though maximal
violation of some Bell inequality it is impossible to distill
secure key from the so--called Smolin state \cite{Smolin}:
\begin{equation}gin{equation}
\varrho^{S}=\frac{1}{4}\sum_{i=0}^{3}\proj{\psi_{i}^{B}}\ot\proj{\psi_{i}^{B}},
\end{equation}
where $\ket{\psi_{i}^{B}}$ $(i=0,\ldots,3)$ are the so--called Bell
states given by
$\ket{\psi_{0(1)}^{B}}
=(1/\sqrt{2})\,\left(\ket{01}\pm\ket{10}\right)$
and $\ket{\psi_{2(3)}^{B}}
=(1/\sqrt{2})\,\left(\ket{00}\pm\ket{11}\right)$.
This conclusion may be also inferred for the generalizations of
the Smolin state provided in Ref. \cite{RAPH2} and independently
in Ref. \cite{GSS2} (see also Ref. \cite{WangYing} for further
generalizations). These are states of the form
\begin{equation}gin{eqnarray}\langlebel{construction}
&&\varrho_{2}=\proj{\psi_{0}^{B}},\nonumber\\
&&\varrho_{4}^{S}=\frac{1}{4}\sum_{m=0}^{3}U_{2}^{(m)}\varrho_{2}U_{2}^{(m)\dagger}\ot
U_{2}^{(m)}\varrho_{2}U_{2}^{(m)\dagger}\equiv \varrho^{S},\nonumber\\
&&\varrho_{6}^{S}=\frac{1}{4}\sum_{m=0}^{3}U_{4}^{(m)}\varrho_{4}U_{4}^{(m)\dagger}\ot
U_{2}^{(m)}\varrho_{2}U_{2}^{(m)\dagger},\nonumber\\
&&\vdots\nonumber\\
&&\varrho_{2(n+1)}^{S}=\frac{1}{4}\sum_{m=0}^{3}U_{2n}^{(m)}\varrho_{2n}U_{2n}^{(m)\dagger}\ot
U_{2}^{(m)}\varrho_{2}U_{2}^{(m)\dagger}\nonumber\\
\end{eqnarray}
with $U^{(m)}_{k}=\mathbbm{1}_{2}^{\ot (k-1)}\ot \sigma_{m}$
($m=0,\ldots,3$ and $k=2,\ldots$), where $\sigma_{m}$ $(m=1,2,3)$
denote the usual Pauli matrices and $\sigma_{0}=\mathbbm{1}_{2}$.
The state $\varrho_{2}$ is just one of the Bell states, while
$\varrho_{4}^{S}$ is the Smolin state. All states $\varrho_{2n}$
for $n\geq 2$ are bound entangled and for suitable choice of local
observables all states for $n\geq 1$ violate the Bell inequality
\begin{equation}gin{equation}\langlebel{BellInequality}
|E_{1\ldots 11}+E_{1\ldots 12}+E_{2\ldots 21} -E_{2\ldots22}|\le
2
\end{equation}
maximally ($E$ denotes the so--called correlation function, i.e.,
an average of products of local measurement outcomes taken over
many runs of experiment). On the other hand, due to the results of
Refs. \cite{Curty1,Curty2}, and Ref. \cite{PH_przegladowka}, one
may show that it is impossible to distill multipartite secure key
from states $\varrho_{2n}^{S}$ for $n\geq 2$. This shows that
bipartite Ekert protocol [2] cannot be straightforwardly
generalized to multipartite scenario since as discussed above the
maximal violation of most natural multipartite analog of the
CHSH--Bell inequality [38] does not imply nonzero secret key rate,
whereas maximal violation of CHSH--Bell inequality by two qubits
guarantees secrecy. Still, it would be an interesting problem for
further research to identify all Bell inequalities that do the job
in multipartite case as CHSH--Bell inequality does in the case of
two qubits. It should be stressed that some achievements in a
similar direction were already obtained in Refs.
\cite{ScaraniGisin,Sens}, where it was shown that violation of
some Bell inequalities is sufficient condition for security of
multipartite secret sharing protocols \cite{HilleryBuzek} under an
individual attack of some external party.
\section{Conclusions}
Quantum cryptography beyond entanglement distillation is a very
young subject. Until recent times it was natural to expect that
the latter is impossible. While there were significant
developments concerning the bipartite scenario the general
formulation for multipartite case was missing. The present paper
fills this gap by not only generalizing the scheme, but also by
providing new constructions of multipartite bound entangled states
which is really nontrivial. However, there are many unsolved
questions. First it seems to be true that the unconditional
security proof \cite{UnconditionalSecurity} can be extended here
at a cost of the number of estimated local observables, but an
exact analysis of this issue is needed. Moreover, given a fixed
number of parties, it is not known what is the minimal dimension
of elementary system of PPT like bound entangled state that allows
one-way secure key distillation. Does it increase with number of
particles and if so - how the dependence looks like? Are there
bound entangled states with multipartite cryptographic key with
underlying structure corresponding to other classes of pure states
like graph states (see Ref. \cite{Graph})? One may ask why we have
considered only bound entanglement in multipartite scenario. This
is when it is necessary to apply the generalized scheme. Otherwise
qualitatively (though may be not quantitatively -- see subsequent
discussion) just pure entanglement distillation is a sufficient
tool. Quite natural is a question of interplay between the two
approaches in distilling key -- to what extent can we abandon
distillation of p--dits? Finally, can the two processes always be
separated in optimal key distillation scheme: in a sense that one
gets some number of singlet states and some large p--dit which is
bound entangled)? If it were so, the two parts might serve as a
natural measures of free and bound entanglement in the system.
Most likely this is impossible, but one needs a proof. The closely
related question is the one concerning lockability of the secure
key $K_{D}$ (note that nonadditivity of $K_{D}$ has been proved
very recently in Ref. \cite{LiWinter}). While this seems to be a
very hard question in case of bipartite states (though lockability
with respect to Eve has already been ruled out in Ref.
\cite{Christandl}) it may happen to be easier within the
multipartite paradigm presented here (in analogy to classical
bound information which is known only in asymptotic bipartite form
\cite{BoundInfoBipartite} but naturally emerges form bound
entanglement in multipartite case \cite{BoundInfoMultipartite}. In
this context novel upper and lower bounds on $K_D$ are needed (for
recent development see Ref. \cite{MultiSquashed}). This point is
also interesting from the point of view of entanglement as $K_{D}$
is also an entanglement measure.
Further analysis of $K_{D}$ and finding its multi--coordinate
extensions to help in characterization of multipartite
entanglement seems to be rich program for future research.
Also, though in the present paper we are concerned with a
general problem of two--way distillabillity of secure key, it is
interesting to discuss the problem in the context of one--way
schemes. For instance one could ask about bounds on key within
such schemes (see e.g. Ref. \cite{MCL}). On the other hand, it
would be desirable to discuss the present approach in the context
of secure key distillation from continuous variables systems (see
e.g. Refs. \cite{NavascuesBae,2xN,Garcia-Patron}). For instance, it was shown in
Ref. \cite{Navascues} that the generalized version of the protocol
from Ref. \cite{NavascuesBae} does not allow for secure key
distillation from bound entangled states. It seems that within the
multipartite scenario the problem could be simpler a little as one
can have bound entangled multipartite states with some of its
partitions being still NPT.
Needless to say due to Choi--Jamio\l{}kowski isomorphism
\cite{ChJ} the present analysis provides new classes of multiparty
quantum channels for which natural questions on superactivation of
the type found in bipartite case \cite{SmithYard} and other
possible effects of similar type arise.
\acknowledgements This work was supported by UE IP project SCALA.
Partial support from LFPPI network is also acknowledged. R.A.
gratefully acknowledges the support from Ingenio 2010
QOIT and Foundation for Polish Science.
\appendix
\section{Some useful lemmas}
{\it Lemma A.1.} Assume that a given $d\times d$ matrix $B$ is normal.
If $A\geq |B|$ then the matrices
\begin{equation}gin{equation}
\mathcal{M}_{N}(A,B)=\left[
\begin{equation}gin{array}{cccc}
(N-1)A & B &\ldots & B \\
B^{\dagger} & (N-1)A & \ldots & B\\
\vdots & \vdots & \ddots & \vdots\\
B^{\dagger} & B^{\dagger}& \dots & (N-1)A
\end{array}
\right]
\end{equation}
and
\begin{equation}gin{equation}
\widetilde{\mathcal{M}}_{N}(A,B)=\left[
\begin{equation}gin{array}{ccccc}
(N-1)A & B & B & \ldots & B \\
B^{\dagger} & A & 0 & \ldots & 0\\
B^{\dagger} & 0 & A & \ldots & 0\\
\vdots & \vdots & \vdots & \ddots & \vdots\\
B^{\dagger} & 0 & 0 & \ldots & A
\end{array}
\right].
\end{equation}
are positive.
{\it Proof.} We prove the lemma only for $\mathcal{M}_{N}(A,B)$ as
the proof for $\widetilde{\mathcal{M}}_{N}(A,B)$ goes along the
same lines.
The matrix $\mathcal{M}_{N}(A,B)$
consists of $N^{2}$ blocks $d\times d$ each and consequently the whole
matrix has the dimensions $Nd\times Nd$. Thus
to prove positiveness we need to show that for any
$\ket{\Psi}\in\mathbb{C}^{Nd}$ the inequality $\bra{\Psi}\mathcal{M}_{N}(A,B)\ket{\Psi}\geq 0$ holds.
It is clear that an arbitrary vector $\ket{\Psi}\in\mathbb{C}^{Nd}$ may be written as
\begin{equation}gin{equation}\langlebel{WconstrLemat3}
\ket{\Psi}=\left[
\begin{equation}gin{array}{c}
\ket{x_{1}}\\
\vdots\\
\ket{x_{N}}
\end{array}
\right],
\end{equation}
where each $\ket{x_{i}}$ belongs to $\mathbb{C}^{d}$. Then a rather straightforward algebra
leads to
\begin{equation}gin{eqnarray}
\bra{\Psi}\mathcal{M}_{N}(A,B)\ket{\Psi}&\negmedspacess=\negmedspacess&(N-1)\sum_{i=1}^{N}\bra{x_{i}}A\ket{x_{i}}\nonumber\\
&&+2\sum_{\substack{i,j=1\\i<j}}^{N}\mathrm{Re}\bra{x_{i}}B\ket{x_{j}}.
\end{eqnarray}
By virtue of the assumption that $A\geq |B|$ and the inequality $\mathrm{Re}z\geq -|z|$
satisfied for any $z\in\mathbb{C}$, one has
\begin{equation}gin{eqnarray}\langlebel{WconstrLemat1}
\bra{\Psi}\mathcal{M}_{N}(A,B)\ket{\Psi}&\negmedspacess\geq\negmedspacess&
(N-1)\sum_{i=1}^{N}\bra{x_{i}}|B|\ket{x_{i}}\nonumber\\
&&-2\sum_{\substack{i,j=1\\i<j}}^{N}\left|\bra{x_{i}}B\ket{x_{j}}\right|,
\end{eqnarray}
Since $B$ is assumed to be a normal matrix it may be given as $B=\sum_{k}\langlembda_{k}\proj{\varphi_{k}}$
with $\{\langlembda_{k}\}$ being, in general, the complex eigenvalues of $B$, while $\{\ket{\varphi_{k}}\}$
its orthonormal eigenvectors. Putting the spectral decomposition of $B$ into
Eq. \eqref{WconstrLemat1}, introducing
$a_{ik}=|\langlengle x_{i}|\varphi_{k}\ranglengle|\geq 0$, and taking into account
the fact that $|\sum_{i}\xi_{i}|\leq \sum_{i}|\xi_{i}|$, we obtain
\begin{equation}gin{equation}\langlebel{WconstrLemat2}
\bra{\Psi}\mathcal{M}_{N}(A,B)\ket{\Psi}\geq \sum_{k}|\langlembda_{k}|
\left(\sum_{i=1}^{N}a_{ik}^{2}-2\sum_{\substack{i,j=1\\i<j}}^{N}a_{ik}a_{jk}\right).
\end{equation}
It is clear from Eq. \eqref{WconstrLemat2} that to show nonnegativity of
$\bra{\Psi}\mathcal{M}_{N}(A,B)\ket{\Psi}$ for any $\ket{\Psi}\in\mathbb{C}^{Nd}$,
one has to prove that for all $k$ the term in brackets in Eq. \eqref{WconstrLemat2}
is nonnegative. This, however, follows from the fact that
\begin{equation}gin{equation}
\sum_{\substack{i,j=1\\i<j}}^{N}(a_{ik}-a_{jk})^{2}\geq 0,
\end{equation}
finishing the proof. $\vrule height 4pt width 3pt depth2pt$\\
{\it Lemma A.2.} Let $A=\sum_{i,j=0}^{d-1}a_{i}^{j}\ket{i}\bra{j}$
be a positive matrix obeying $\Tr A\leq 1$. Assume that each
element of $A$ lying in $i$th row (and $i$th column due to
hermiticity of $A$) is close to $1/d$ in the sense that it obeys
$|a_{i}^{j}-1/d|\leq \epsilon$ for some $1/d>\epsilon>0$. Then
$|a_{i}^{j}-1/d|\leq\eta(\epsilon)$ for any $i,j=0,\ldots,d-1$,
where $\eta(\epsilon)\to 0$ for $\epsilon\to 0$.
{\it Proof.} The proof is rather technical and we present only its
sketch here (detailed proof may be found in Ref.
\cite{DoktoratRA}). First, let us fix the chosen row to be the
first one, i.e., $i=0$. Then, from the positivity of $A$ it
follows that any matrix of the form
\begin{equation}gin{equation}
\left[
\begin{equation}gin{array}{cc}
a_{0}^{0} & a_{0}^{j}\\[1ex]
a_{0}^{j*} & a_{j}^{j}
\end{array}
\right]
\end{equation}
is positive. Now, from its positivity we have that
$a_{0}^{0}a_{j}^{j}\geq \left|a_{0}^{j}\right|^{2}$, which
together with the assumption that $a_{0}^{0}$ and $a_{0}^{j}$ are
close to $1/d$ and $\epsilon<1/d$, implies that $a_{j}^{j}$ must
obey $a_{j}^{j}\geq 1/d-3\epsilon$ for any $j=1,\ldots,d-1$.
Taking into account the assumption that $\Tr A\leq 1$, one also
has that each $a_{j}^{j}$ must be bounded from above as
$a_{j}^{j}\leq 1/d+3(d-1)\epsilon$ for $j=1,\ldots,d-1$.
Therefore, we have that all diagonal elements of $A$ satisfy
\begin{equation}gin{equation}\langlebel{Appendix1}
\left|a_{j}^{j}-\frac{1}{d}\right|\leq \alpha(\epsilon)
\end{equation}
with $\alpha(\epsilon)\to 0$ for $\epsilon\to 0$. Now, we need to
prove that the remaining off--diagonal elements of $A$ are also
close to $1/d$. For this purpose let us notice that from the fact
that $A\geq 0$ one has that the following matrices
\begin{equation}gin{equation}
\left[
\begin{equation}gin{array}{ccc}
a_{0}^{0} & a_{0}^{i} & a_{0}^{j} \\[1ex]
a_{0}^{i*} & a_{i}^{i} & a_{i}^{j}\\[1ex]
a_{0}^{j*} & a_{i}^{j*} & a_{j}^{j}
\end{array}
\right] \qquad (0<i<j)
\end{equation}
are also positive. Since we can now say that all elements in the
first row (and column) and all the diagonal elements obey
(\ref{Appendix1}), it follows, after some technical calculations,
that $a_{i}^{j}$ has to satisfy such inequality, however, with
some other function which vanishes for $\epsilon\to 0$. Finally,
we have that any of the elements of $A$ satisfies
\begin{equation}gin{equation}
\left|a_{i}^{j}-\frac{1}{d}\right|\leq \eta(\epsilon) \qquad
(i,j=0,\ldots,d-1)
\end{equation}
with $\eta(\epsilon)\to 0$ whenever $\epsilon\to 0$.
Of course, we can always assume that the elements in some fixed
row of $A$ is bounded by different $\epsilon$s. Then, however, we
can take the largest one. $\vrule height 4pt width 3pt depth2pt$
\begin{equation}gin{thebibliography}{0}
\bibitem{BB84}
C. H. Bennett and G. Brassard, Quantum cryptography: Public key
distribution and coin tossing. In Proceedings of the IEEE
International Conference on Computers, Systems and Signal
Processing, pp. 175–179, Bangalore, India, December, 1984, IEEE
Computer Society Press, New York.
\bibitem{Ekert}A. K. Ekert,
Phys. Rev. Lett. {\bf 67}, 661 (1991).
\bibitem{QPA}
D. Deutsch, A. Ekert, R. Jozsa, C. Macchiavello, S. Popescu, and
A. Sanpera, Phys. Rev. Lett. {\bf 77}, 2818 (1996).
\bibitem{distillation}
C. H. Bennett, G. Brassard, S. Popescu, B. Schumacher, J. A.
Smolin, and W. K. Wootters, Phys. Rev. Lett. {\bf 76}, 722 (1996).
\bibitem{Curty1}M. Curty, M. Lewenstein, and N. L\"utkenhaus,
Phys. Rev. Lett. {\bf 92}, 217903 (2004).
\bibitem{Curty2}M. Curty, O. G\"uhne, M. Lewenstein, and N. L\"utkenhaus,
Phys. Rev. A {\bf 71}, 022306 (2005).
\bibitem{LoChau}H.--K. Lo and H. F. Chau,
Science {\bf 283}, 2050 (1999).
\bibitem{ShorPreskill}P. W. Shor and J. Preskill,
Phys. Rev. Lett. {\bf 85}, 441 (2000).
\bibitem{DW1}I. Devetak and A. Winter,
Phys. Rev. Lett. {\bf 93}, 080501 (2004).
\bibitem{DW2}I. Devetak and A. Winter,
Proc. R. Soc. Lond. A {\bf 461}, 207 (2005).
\bibitem{bound}M. Horodecki, P. Horodecki, and R. Horodecki,
Phys. Rev. Lett. {\bf 80}, 5239 (1998).
\bibitem{KH0}K. Horodecki, M. Horodecki, P. Horodecki, and J. Oppenheim,
Phys. Rev. Lett. {\bf 94}, 160502 (2005).
\bibitem{KH}K. Horodecki, M. Horodecki, P. Horodecki, and J. Oppenheim,
arXiv:quant-ph/0506189, IEEE Trans. Inf. Theory, in press.
\bibitem{KHPhD}K. Horodecki, {\it General paradigm
for distilling classical key from quantum states - on quantum
entanglement and security}, PhD Thesis, submitted to Faculty of
Mathematics, Informatics and Mechanics, University of Warsaw, May
2008.
\bibitem{PHRANATO}P. Horodecki and R. Augusiak, {\it On quantum
cryptography with bipartite bound entangled states},
in Quantum Information Processing: From Theory to Experiment,
proceedings of the NATO Advanced Study
Institute on Quantum Computation and Quantum Information, eds. D. G. Angelakis, M. Christandl,
A. Ekert, M. Kay, and S. Kulik, NATO Science Series, III: Computer
and Systems Sciences, vol. 199, 19-29, IOS Press, Amsterdam, 2006.
\bibitem{HPHH}K. Horodecki, \L{}. Pankowski, M. Horodecki, and P. Horodecki,
IEEE Trans. Inf. Theory {\bf 54}, 2621 (2008).
\bibitem{RenesSmith}J. M. Renes and G. Smith,
Phys. Rev. Lett. {\bf 98}, 020502, 2007.
\bibitem{RB}
J. M. Renes, J.--Ch. Boileau, Phys. Rev. A {\bf 78}, 032335
(2008).
\bibitem{Christandl}M. Christandl, A. Ekert, M. Horodecki, P. Horodecki, J. Oppenheim, and R. Renner
{\it Unifying classical and quantum key distillation}, in {\it
Proceedings of the 4th Theory of Cryptography Conference}, Lecture
Notes in Computer Science vol. 4392, pp. 456-478, 2007.
\bibitem{SmithYard}
G. Smith and J. Yard, Science {\bf 231}, 1812 (2008).
\bibitem{Renner}
R. Renner, Int. J. Quant. Inf. {\bf 6}, 1 (2008); see also
quant-ph/0512258.
\bibitem{UnconditionalSecurity}
K. Horodecki, M. Horodecki, P Horodecki, D. Leung, J. Oppenheim,
Phys. Rev. Lett. {\bf 100}, 110502 (2008); IEEE Trans. Inf. Theory
{\bf 54} 2604 (2008).
\bibitem{RK}R. Renner and R. K\"onig, {\it Universally composable privacy amplification against quantum adversaries }
in Proc. of TCC 2005, LNCS, Springer, vol. 3378 (2005), pp.
407-425 (arXiv:quant-ph/0403133).
\bibitem{KGR}R. Renner, N. Gisin, and B. Kraus, Phys. Rev. A
{\bf 72}, 012332 (2005); B. Kraus, N. Gisin, and R. Renner, Phys.
Rev. Lett. {\bf 95}, 080501 (2005);
\bibitem{MCL}T. Moroder, M. Curty, N. L\"utkenhaus, Phys. Rev. A {\bf 74}, 052301
(2006).
\bibitem{DoktoratRA}R. Augusiak, {\it On the distillation of secure key form
multipartite entangled quantum states}, PhD thesis, Gda\'nsk,
Poland, April 2008.
\bibitem{PHRA}P. Horodecki and R. Augusiak,
Phys. Rev. A {\bf 74}, 010302(R) (2006).
\bibitem{GHZstates}D. M. Greenberger, M. Horne, and A. Zeilinger,
in {\it Bell’s Theorem, Quantum Theory, and Conceptions of the Universe},
ed. M. Kafatos, Kluwer Academic, Dodrecht, 1989.
\bibitem{MHMeasures}M. Horodecki,
Quant. Inf. Comp. {\bf 1}, 3 (2001).
\bibitem{PH_przegladowka}P. Horodecki, {\it Bound entanglement},
in {\it Lecures on Quantum Information}, eds. D. Bruss and G. Leuchs,
Wiley-VCH Verlag, Weinheim, 2007, p. 209.
\bibitem{RelativeEntanglement}V. Vedral, M. B. Plenio, M. A. Rippin, and P. L. Knight,
Phys. Rev. Lett. {\bf 78}, 2275 (1997).
\bibitem{Ruskai}M. B. Ruskai,
Rev. Math. Phys. {\bf 6}, 1147 (1994).
\bibitem{RAPH1}R. Augusiak and P. Horodecki,
Phys. Rev. A {\bf 74}, 010305(R) (2006).
\bibitem{Smolin}J. A. Smolin,
Phys. Rev. A {\bf 63}, 032306 (2001).
\bibitem{RAPH2}R. Augusiak and P. Horodecki,
Phys. Rev. A {\bf 73}, 012318 (2006).
\bibitem{GSS2}S. Bandyopadhyay, I. Chattopadhyay, V. P. Roychowdhury, and D. Sarkar,
Phys. Rev. A {\bf 71}, 062317 (2005).
\bibitem{WangYing}G. Wang and M. Ying,
Phys. Rev. A {\bf 75}, 052332 (2007).
\bibitem{CHSH}J. F. Clauser, M. A. Horne, A. Shimony, and R. A. Holt,
Phys. Rev. Lett. {\bf 23}, 880 (1969).
\bibitem{ScaraniGisin}V. Scarani and N. Gisin, Phys. Rev. Lett. {\bf
87}, 117901 (2001); Phys. Rev. A {\bf 65}, 012311 (2001).
\bibitem{Sens}A. Sen(De), U. Sen, and M. \.Zukowski, Phys. Rev. A
{\bf 68}, 032309 (2003).
\bibitem{HilleryBuzek}M. Hillery, V. Bu\v{z}ek, and A. Berthiaume,
Phys. Rev. A {\bf 59}, 1829 (1999).
\bibitem{Graph}R. Raussendorf, D. E. Browne, and H.--J. Briegel,
Phys. Rev. A {\bf 68}, 022312 (2003).
\bibitem{LiWinter}K. Li, A. Winter, X.--B. Zou, G.--C. Guo,
{\it Nonadditivity of the Private Classical Capacity of a Quantum
Channel}, arXiv:0903.4308.
\bibitem{BoundInfoBipartite}R. Renner and S. Wolf, {\it Towards proving the existence of "bound" information}
in {\it Proceedings of 2002 IEEE International Symposium on
Information Theory (IEEE)}, Lausanne, Switzerland, June 30 - July
5, 2002, p. 103.
\bibitem{BoundInfoMultipartite}A. Ac\'in, J. I. Cirac, and Ll. Masanes,
Phys. Rev. Lett {\bf 92}, 107903 (2004).
\bibitem{MultiSquashed}D. Yang, K. Horodecki, M. Horodecki, P. Horodecki, J. Oppenheim, and W.
Song, {\it Squashed entanglement for multipartite states and
entanglement measures based on the mixed convex roof}, to be
published in IEEE Trans. Inf. Theory, arXiv:0704.2236.
\bibitem{NavascuesBae}M. Navascu\'es, J. Bae, J. I. Cirac, M. Lewenstein, A. Sanpera, and A.
Ac\'in, Phys. Rev. Lett. {\bf 94}, 010502 (2005); C. Rod\'o, O.
Romero--Isart, K. Eckert and A. Sanpera, Open Syst. Inf. Dyn. {\bf
14} 69 (2007).
\bibitem{2xN}J. Rigas, O. G\"uhne, N. L\"utkenhaus, Phys. Rev. A
{\bf 73}, 012341 (2006).
\bibitem{Garcia-Patron}R. Garc\'ia--Patr\'on, N. J. Cerf, Phys. Rev. Lett. {\bf 102}, 130501
(2009).
\bibitem{Navascues}M. Navascu\'es and A. Ac\'in, Phys. Rev. A {\bf
72}, 012303 (2005).
\bibitem{ChJ}M.-D. Choi, Linear Alg.
Appl. {\bf 10}, 285 (1975); A. Jamio\l{}kowski, Rep. Math. Phys.
{\bf 3}, 275 (1972).
\end{thebibliography}
\end{document} |
\begin{document}
\mathbb{R}UNAUTHOR{Castro, Bodur, and Song}
\mathbb{R}UNTITLE{Markov chain-based policies for multi-stage stochastic integer linear programming}
{\mathcal T}ITLE{
Markov Chain-based Policies for Multi-stage Stochastic Integer Linear Programming with an Application to Disaster Relief Logistics}
\ARTICLEAUTHORS{
\AUTHOR{Margarita P. Castro}
\AFF{Department of Industrial and Systems Engineering, Pontificia Universidad Católica de Chile, Santiago 7820436, Chile {\mathbb E}MAIL{margarita.castro@ing.puc.cl}}
\AUTHOR{Merve Bodur}
\AFF{Department of Mechanical and Industrial Engineering, University of Toronto, Toronto, Ontario M5S 3GH, Canada, {\mathbb E}MAIL{bodur@mie.utoronto.ca}}
\AUTHOR{Yongjia Song}
\AFF{Department of Industrial Engineering, Clemson University, Clemson, South Carolina 29631, {\mathbb E}MAIL{yongjis@clemson.edu}}
}
\ABSTRACT{
We introduce an aggregation framework to address multi-stage stochastic programs with mixed-integer state variables and continuous local variables (MSILPs). Our aggregation framework imposes additional structure to the integer state variables by leveraging the information of the underlying stochastic process, which is modeled as a Markov chain (MC). We demonstrate that the aggregated MSILP can be solved exactly via a branch-and-cut algorithm integrated with a variant of stochastic dual dynamic programming. To improve tractability, we propose to use this approach to obtain dual bounds. Moreover, we apply two-stage linear decision rule (2SLDR) approximations, in particular a new MC-based variant that we propose, to obtain high-quality decision policies with significantly reduced computational effort. We test the proposed methodologies in an MSILP model for hurricane disaster relief logistics planning. Our empirical evaluation compares the effectiveness of the various proposed approaches and analyzes the trade-offs between policy flexibility, solution quality, and computational effort. Specifically, the 2SLDR approximation yields provable high-quality solutions for our test instances supported by the proposed bounding procedure. We also extract valuable managerial insights from the solution behaviors exhibited by the underlying decision policies.
}
{\mathcal K}EYWORDS{Multi-stage stochastic programming, Markov chain, linear decision rules, stochastic dual dynamic programming, disaster relief logistics}
\texttt{MA}ketitle
\omegaection{Introduction} \label{sec:introduction}
Multi-stage stochastic integer linear programming (MSILP) problems {\mathcal M}erve{form an important} class of
{\mathcal M}erve{optimization}
models for sequential decision-making under uncertainty. These problems consider decisions at each stage of the sequential process, which consists of local variables---
that only participate in a single stage locally---and state variables---
that link multiple stages together.
{\mathcal M}erve{In this paper,} we consider MSILP problems with continuous local variables and mixed-integer state variables, which arise in many applications such as hydro-power scheduling \citep{hjelmeland2018nonconvex}, unit commitment \citep{zou2018multistage}, and disaster relief logistics planning (see Section \ref{sec:application}).
Despite the vast applicability of MSILP, these problems are computationally prohibitive to solve due to the non-convexity of the feasibility set caused by the integer state variables. Existing solution methods utilize the {\mathcal S}ong{stage-wise decomposition framework, such as the stochastic dual dynamic programming (SDDP) algorithm, with convexification procedures for the expected cost-to-go functions, which require certain {\mathcal M}erve{limiting} assumptions such as pure binary state variables~\citep{zou2019stochastic} and Lipschitz continuous cost-to-go functions~\citep{ahmed2020stochastic}.}
We propose a {\mathcal S}ong{different perspective to handle such MSILP problems without attempting to convexify the expected cost-to-go functions {\mathcal M}erve{that are parametrized by} a mixture of continuous and integer state variables}. Instead, we construct a \textit{partially extended reformulation} by transforming the original problem into one that has integer variables only in the first stage. By doing so, we can employ decomposition algorithms, such as the branch-and-cut (B\&C) algorithm, to decompose the problem into a master problem that corresponds to a first-stage mixed integer linear program, and a subproblem defined for the remaining stages, which is
a multi-stage stochastic linear program that can be handled by stage-wise decomposition algorithms such as the SDDP algorithm. However, this reformulation may lead to exponentially many first-stage integer variables, {\mathcal M}erve{thus can be} computationally challenging {\mathcal M}erve{to solve}.
To alleviate the main challenge of the partially extended reformulation, we present an aggregation framework that imposes certain structures to the reformulation by aggregating the integer state variables
. This framework could potentially leverage the structure of the underlying stochastic process to impose a restriction on the problem at hand that leads to an informative decision policy. In particular, we consider the case where the underlying stochastic process is modeled as a Markov chain (MC) and present several aggregation schemes based on the MC state information that lead to a wide range of aggregated MSILP approximations.
{\mathcal S}ong{
{\mathcal M}erve{We demonstrate that} the aggregated MSILP models
{\mathcal M}erve{can be solved exactly by a} B\&C framework integrated with a variant of the SDDP algorithm.} The framework decomposes the problem into a master problem and a set of subproblems, where the master problem is concerned with the first-stage decisions including all the integer state variables, and the subproblems deal with the remaining stages, which involve continuous state and local variables. {\mathcal S}ong{While this framework guarantees an optimal solution for the aggregated MSILP, solving a multi-stage stochastic program (via SDDP) at each incumbent solution encountered in the branch-and-bound process can be computationally prohibitive. We therefore settle with a relaxation bound from this approach (by relaxing the {\mathcal M}erve{SDDP} termination criteria
), and pursue a more tractable approach that further approximates the aggregated MSILP as a two-stage stochastic program using two-stage linear decision rules (2SLDRs) \citep{bodur2018two}. We present 2SLDR alternatives from the literature and propose a new variant that leverages the MC structure of the underlying stochastic process. The resulting problem can be solved via Benders decomposition or any of its enhanced versions.}
We perform a case study for the proposed aggregation framework and solution methods on a hurricane disaster relief logistic planning problem (HDR), where the stochastic process is given by an MC that models the evolution of the hurricane. We show how to create several aggregated MSILPs and 2SLDR approximations for this application. Our numerical experiments compare these alternatives, find suitable transformations and 2SLDR variants to obtain high-quality solutions with provable bounds, and present managerial insights about the resulting policies.
The remainder of the paper is organized as follows. Section \ref{sec:framework} presents our aggregation framework and a literature review on MSILP methodologies. Section \ref{sec:methodology} introduces our novel B\&C framework integrated with the SDDP algorithm and describes the 2SLDR scheme with our proposed MC-based variant. Section \ref{sec:application} presents a case study of the proposed methodologies for HDR. Section \ref{sec:experiments} presents the numerical experiment results and we end with some concluding remarks in Section \ref{sec:conclusions}.
\iffalse
\begin{enumerate}
\item This work focuses on multi-stage stochastic integer linear programming (MSILP) problems with mixed-integer state variables and continuous local/recourse variables.
\item These types of problems are very challenging, arise in many applications, and there are limited procedures to solve them. One of the main challenges with these types of problems is the presence of integer variables since the feasibility set is nonconvex.
\item If we only have integer variables in the first stage, we can handle this problem. We can decompose the problem into two phases. The first phase is a master problem and the second phase is a MSP with only continuous variables. It can also be solved by means of approximation using 2S-LDR.
\item There are few techniques to tackle these problems when we have integer variables in other stages. However, they are limited to special cases (e.g., binary variables, Lipschitz continuous). These techniques are variants of SDDP to handle integer variables.
\item With these in mind, we would like to transform our problem into the case when we only have integer variables in the first stage. This can be achieved by creating scenario copies of the integer variables and adding them to the first-stage problem along with nonanticipativity constraints. If the uncertainty model is in the form of a scenario tree, an alternative would be to create node-based copies of those variables instead, that are nonanticipative by definition. We call these variables \textit{first-stage reformulation variables}.
Such a reformulation usually requires an exponential number of new variables (even when we have stage-wise independence) since they depend on the history of the stochastic process. As such, it would be very challenging to solve.
\item One way to alleviate this difficulty is to remove the dependence of the first-stage reformulation variables to the history, by restricting them in a certain way to reduce their number. For instance, we can make them all the same regardless of their history if they belong to the same stage (i.e., stage-based restriction). Note that this is different to the standard two-stage restriction since we leave the continuous stage variables the same (reference from Shabbir). (find some references for the integer transformation---very likely that there are not).
\item The stage-based restriction would usually lead to poor quality solutions.
Thus, we extend this idea and propose MC-based restrictions for problems where the uncertainty is modeled as a Markov chain (see Section X.X for generalizations and stage-wise independent case).
That is, we try to \textbf{leverage the structure of the underlying stochastic process} to create high-quality policies.
We restrict the first-stage reformulation variables to be a function of the ``Markov state" of their associated stage (or their limited history), to get a good balance of the difficulty of solving the reformulation and the quality of the policy.
Thus, the resulting restriction will capture the structure of the stochastic process and allow us to have a manageable number of integer first-stage variables and only continuous variables in the other stages.
\item In order to solve the obtained restriction, where we have integer variables only in the first stage, we propose a branch-and-cut (B\&C) algorithm. The master problem only considers the first-stage variables. The subproblem is an MSP with only continuous variables that can be solved, for instance, with an SDDP algorithm. To the best of our knowledge, B\&C algorithms where the node relaxation is solved by SDDP
haven't been applied before to MSILP with only integer first-stage variables.
\item Solving an MSP on every node of the branch-and-bound tree can be very challenging. Therefore, we also consider a more tractable alternative that transforms our restriction problem into a two-stage SP via LDR, by restricting the continuous state variables to be a linear function of the history of the stochastic process. We consider some of the two-stage LDR presented in the literature and propose new ones using similar ideas to create policies for the integer state variables. The resulting problem can be solved via Benders decomposition and any of its enhancements proposed in the literature.
\item We test our procedure in a disaster relief problem for hurricanes, where the stochastic process is defined by a Markov chain. We show how to create MC-based policies and LDRs for this application. We compare them again stage-based policies and LDR, and the optimal policy solved by an extensive formulation.
\item Then, we have an experimental section focusing on the hurricane application.
\item Finish with the conclusions.
\end{enumerate}
\fi
\omegaection{Aggregation Framework for Integer State Variables} \label{sec:framework}
This section describes the proposed aggregation framework to generate informative policies for a class of MSILP problems,
{\mathcal M}erve{with} the main idea {\mathcal M}erve{of aggregating}
integer state variables in the MSILP by leveraging the structure of the underlying stochastic process.
\omegaubsection{Problem Formulation for MSILP}
We consider a class of MSILP models with continuous local variables and mixed-integer state variables. Although integer variables may also appear as local variables in a generic MSILP, the restriction here (local variables being continuous only) does not limit the model from being applicable to a broad range of problems that arise in real-world applications. For example, a typical multi-period stochastic optimization problem with local production and distribution constraints can be represented with this model, such as the one we study in Section~\ref{sec:application}, motivated by the application of disaster relief logistics planning. Other possible applications include hydro-power scheduling \citep{hjelmeland2018nonconvex} and unit commitment \citep{zou2018multistage} problems.
We assume that the underlying stochastic process has finite support and, as such, it can be represented by a scenario tree ${\mathcal T}$. This assumption is not restrictive since we can construct an approximate scenario tree {\mathcal M}erve{by means of sampling} if the stochastic process has continuous support, a common practice in the literature~\citep{shapiro2021lectures}. Let $T$ be the number of stages and let ${\mathcal N}$ be the set of nodes associated with the scenario tree ${\mathcal T}$. The set of nodes in each stage $t \in [\periods]:=\{1,...,T\}$ is given by ${\mathcal N}t$. The root node is denoted by $\textsf{r}$ and it is the only node in the first stage, i.e., ${\mathcal N}_1=\{\textsf{r}\}$. Each node $n \in {\mathcal N}t$ with $t>1$ has a unique parent in ${\mathcal N}_{t-1}$, denoted by $a(n)$. Thus, there is a unique path from $\textsf{r}$ to any node $n\in {\mathcal N}t$ in stage $t>1$, and we let ${\mathcal P}(n)$ represent the set of nodes on this path (including $\textsf{r}$ and $n$). For each non-leaf node $n$ (i.e., $n\in {\mathcal N}t$ for $t \in [T-1]$), ${\mathcal C}(n)$ is the set of children of $n$, that is, the nodes whose parent is $n$. The probability that node $n$ occurs is $p_n$, and we have $p_n>0$ for all $n\in {\mathcal N}$ and $\omegaum_{n \in {\mathcal N}t} p_n = 1$ for all $t\in [\periods]$. The transition probability from node $n \in {\mathcal N}t$ for $t\in [T-1]$ to $n' \in {\mathcal C}(n)$ can then be written as $\bar{p}_{nn'}:= p_{n'}/p_n$.
Next, we present a generic problem formulation for the class of MSILP problems that we consider in~\eqref{mod:scenarioTree}, which is often referred to as the nested formulation. For each node $n\in {\mathcal N}$, let $x_n \in \mathbb{R}^k$ and $z_n \in \Z^\ell$ be the set of continuous and integer state variables, respectively, and $y_n\in \mathbb{R}^r$ be the continuous local variables. The objective coefficient vectors associated with $x_n, y_n$, and $z_n$ are denoted by $d_n, h_n$, and $c_n$, respectively. The overall objective is to minimize the sum of the local cost and the expected future cost at the root node:
\begin{align}
Q_{\textsf{r}} = \min\; & c_\textsf{r}^\top z_{\rootnode} + d_\textsf{r}^\top x_{\rootnode} + h_\textsf{r}^\top y_{\rootnode} + \omegaum_{n \in {\mathcal C}(\textsf{r})}\bar{p}_{\textsf{r} n} Q_n(x_{\rootnode},z_{\rootnode} ) , \tag{$P$} \label{mod:scenarioTree} \\
\text{s.t.}\ &
H_{\textsf{r}} z_{\rootnode} \geq g_{\textsf{r}}, \;
J_{\textsf{r}} x_{\rootnode} \geq f_{\textsf{r}}, \;
C_{\textsf{r}} x_{\rootnode} + D_{\textsf{r}} z_{\rootnode} + E_{\textsf{r}} y_{\rootnode} \geq b_{\textsf{r}}, \label{eq:st_root}\\
&x_{\rootnode} \in \mathbb{R}^k,\; y_{\rootnode} \in \mathbb{R}^r,\; z_{\rootnode} \in \Z^\ell, \nonumber
\end{align}
where the cost-to-go function $Q_n(\cdot, \cdot)$ associated with each non-root node $n\in {\mathcal N}\omegaetminus \{\textsf{r}\}$ is:
\begin{subequations}
\begin{align}
Q_n(x_nparent, z_nparent) = \min\; & c_n^\top z_n + d_n^\top x_n + h_n^\top y_n + \omegaum_{n' \in {\mathcal C}(n)}\bar{p}_{nn'} Q_{n'}(x_n,z_n),\nonumber \\
\text{s.t.}\ & H_n z_n \geq G_n z_nparent + g_n, \label{eq:st_onlyz} \\
& J_n x_n \geq F_n x_nparent + f_n, \label{eq:st_onlyx} \\
& C_n x_n + D_n z_n + E_n y_n \geq A_n x_nparent + B_n z_nparent + b_n, \label{eq:st_all} \\
&x_n \in \mathbb{R}^k,\; y_n \in \mathbb{R}^r,\; z_n \in \Z^\ell. \nonumber
\end{align}
\end{subequations}
The cost-to-go function $Q_n(\cdot, \cdot)$ associated with a leaf node $n\in {\mathcal N}_T$ only involves the cost incurred in the terminal stage $T$ since its set of children nodes is empty, i.e., ${\mathcal C}(n) = \emptyset$. Our proposed formulation considers, separately, one set of constraints~\eqref{eq:st_onlyz} for integer state variables, one set of constraints~\eqref{eq:st_onlyx} for continuous state variables, and one set of constraints~\eqref{eq:st_all} that link state and local variables. Although alternative formulations are possible, this form accommodates the presentation of the proposed aggregation framework, which will be described in Section~\ref{sec:methodology}.
Problem \eqref{mod:scenarioTree} is notoriously challenging to solve for two reasons. The first challenge is the nature of multi-stage stochastic programs where the underlying stochastic process is modeled as a scenario tree of an exponentially large size. Thus, it is impractical to solve these large-scale problems directly and decomposition methods can be deemed necessary. The second challenge is the existence of integer decision variables, which makes the expected cost-to-go functions (i.e., the value functions that represent the expected future cost defined at each node of the scenario tree) to be nonconvex in general. The following section reviews existing methods in the literature to handle \eqref{mod:scenarioTree} and similar problems, focusing on how
{\mathcal M}erve{they attempt to overcome}
these challenges.
\omegaubsection{Literature Review}
In this section, we discuss recent advances in the literature that address MSILPs like formulation~\eqref{mod:scenarioTree}.
Nested Benders decomposition~\citep{birge1985decomposition} and its sampling variant under the assumption of stage-wise independence, SDDP~\citep{pereira1991multi}, are classical decomposition algorithms for multi-stage stochastic linear programs.
Recently,~\cite{zou2019stochastic} develop the SDDiP algorithm for MSILPs with pure binary state variables, where the nonconvex expected cost-to-go functions are lower approximated by a piecewise linear convex envelope constructed by the so-called Lagrangian cuts. These cuts are guaranteed to be exact at points where the associated state variables are binary, leading to the exactness and finite convergence of the algorithm. The problem becomes more challenging when the set of state variables includes general mixed-integer decision variables, which is the case for formulation~\eqref{mod:scenarioTree}. One option is to perform a binarization procedure to reformulate general integer state variables and approximate continuous state variables via binary variables, respectively, and then apply the SDDiP algorithm~\citep{zou2018multistage}. However, this approach may suffer from a large number of binary state variables as a result of the binarization.
Alternatively, when the expected cost-to-go functions are assumed to be Lipschitz continuous,~\cite{ahmed2020stochastic} propose nonlinear cuts to exactly approximate the nonconvex expected cost-to-go functions via the augmented Lagrangian method. More recently,~\cite{zhang2019stochastic} propose and study a unified theoretical framework for general multistage nonconvex stochastic mixed integer nonlinear programming problems where the expected cost-to-go functions are not necessarily Lipschitz continuous. Specifically, they propose regularized expected cost-to-go functions that can be exactly approximated by the generalized conjugacy cuts, generalizing the results of~\cite{ahmed2020stochastic}. However, despite these theoretical convergence properties, the nonlinear cut generation from augmented Lagrangian and the implementation of a variant of the nested Benders decomposition algorithm that incorporates these cuts can be computationally challenging.
\cite{fullner2022non} extend the ideas of binarization procedure in~\cite{zou2019stochastic} and the regularization procedure in~\cite{zhang2019stochastic} to develop the so-called nonconvex nested Benders decomposition algorithm for generic multi-stage mixed integer nonconvex nonlinear program. Despite its generality and successful implementation in \emph{deterministic} multi-stage MINLP problems, it is unclear, to the best of our knowledge, whether or not applying this approach to MSILPs with a large-scale scenario tree is computationally feasible.
In sum, all the works mentioned above aim to directly address the nonconvex (and sometimes non-Lipschitz) expected cost-to-go functions defined at each node of the scenario tree by developing exact lower-bounding techniques. In contrast, we circumvent the challenge of approximating the nonconvex expected cost-to-go functions by relocating all integer state variables (defined in the node subproblems) to the first stage (defined in the root node), so that the resulting cost-to-go functions defined at non-root nodes are convex and can be approximated (exactly) by a decomposition scheme (e.g., nested Benders or SDDP). {\mathcal S}ong{To the best of our knowledge, our paper is one of the first that presents the performance of {\mathcal M}erve{stage-wise decomposition based} algorithms for MSILPs with both integer and continuous state variables. In addition, we showcase the performance of the proposed heuristic decision policies via decision rules, {\mathcal M}erve{2SLDRs}, as well as the lower bounding technique constructed based on {\mathcal M}erve{our} exact algorithm.} We next discuss {\mathcal M}erve{the} proposed reformulation in detail.
\omegaubsection{A Partially Extended Formulation for MSILP}
We propose a reformulation for MSILP in the form of a partially extended formulation, in which all integer decisions can be considered as the first-stage variables. The advantage of this reformulation is two-fold. First, the resulting cost-to-go functions at each node $n \in {\mathcal N}noroot$ have only continuous variables (including local and continuous state variables), making it amenable to apply standard Benders-type cutting-plane approximation to these convex functions. Second, since integer variables only appear in the first-stage problem, we could apply, e.g., a B\&C procedure by branching on these first-stage integer variables and approximating the expected cost-to-go functions using stage-wise decomposition algorithms such as the SDDP (see Section \ref{sec:methodology}). Specifically, the proposed partially extended (nested) formulation is given by:
\begin{align*}
Q^R_{\textsf{r}} = \min\; &
\omegaum_{n\in{\mathcal N}}p_nc_n^\top z_n
+ d_\textsf{r}^\top x_{\rootnode} + h_\textsf{r}^\top y_{\rootnode} + \omegaum_{n \in {\mathcal C}(\textsf{r})} \bar{p}_{\textsf{r} n} Q^R_n(x_{\rootnode},z), \tag{$P^R$} \label{mod:reformulation} \\
\text{s.t.}\; &
H_{\textsf{r}} z_{\rootnode} \geq g_{\textsf{r}}, \;
J_{\textsf{r}} x_{\rootnode} \geq f_{\textsf{r}}, \;
C_{\textsf{r}} x_{\rootnode} + D_{\textsf{r}} z_{\rootnode} + E_{\textsf{r}} y_{\rootnode} \geq b_{\textsf{r}}, \\
& H_n z_n \geq G_n z_nparent + g_n, & \forall n \in {\mathcal N}noroot,
\end{align*}
where the cost-to-go function $Q^R_n(\cdot, \cdot)$ associated with
$n\in {\mathcal N}\omegaetminus \{\textsf{r}\}$ is defined in a nested fashion as:
\begin{equation*}
Q^R_n(x_nparent, z) = \min_{x_n\in \mathbb{R}^k, y_n \in \mathbb{R}^r} \left\{d_n^\top x_n + h_n^\top y_n + \omegaum_{n' \in {\mathcal C}(n)} \bar{p}_{n n'} Q^R_{n'}(x_n, z) \mid \eqref{eq:st_onlyx}-\eqref{eq:st_all}\right\}.
\end{equation*}
The key difference between this reformulation~\eqref{mod:reformulation} and the original formulation~\eqref{mod:scenarioTree} is that all the integer state variables $z = \{z_n\}_{n \in {\mathcal N}}$, along with their associated constraint sets~\eqref{eq:st_onlyz}, are now ``moved'' to the problem at the root node $\textsf{r}$, i.e., they are treated as first-stage variables and constraints. As such, the cost-to-go function associated with each non-root node only includes the state and local continuous variables and their associated constraints. Since the continuous state variables remain to be defined in the individual node-based subproblems through the nested form, we call this reformulation a \textit{partially extended formulation}. The validity of this reformulation is clear because we only relocate the (node-based) integer state variables in order to achieve the desired property of piece-wise linear convex expected cost-to-go functions, {\mathcal M}erve{given its first-stage decisions,} which accommodates the application of stage-wise decomposition methods such as SDDP.
\omegaubsection{An Aggregation Framework via MC-based Structural Policies}
While the proposed partially extended formulation~\eqref{mod:reformulation} has piece-wise linear convex expected cost-to-go functions, {\mathcal M}erve{given its first-stage decisions,} it may lead to exponentially many integer variables in the first stage
. We propose an aggregation framework that imposes structures to the formulation by aggregating the integer state variables to alleviate this challenge while maintaining high-quality solutions. {\mathcal M}erve{In that regard,} the idea is to leverage the structure of the underlying stochastic process to impose a restriction to the problem and obtain an informative decision policy.
In this paper, we consider the case where the underlying stochastic process is modeled as a Markov chain (MC). We choose this specific structure for two reasons. First, many stochastic processes in real-world applications can be modeled or {\mathcal M}erve{well-}approximated using MCs~\citep{lohndorf2019modeling}, including disaster relief logistics planning problems that we focus on in our numerical experiments (see Section~\ref{sec:application}). Second, it facilitates the exposition of our aggregation framework and allows us to construct a wide range of structured policies. We discuss alternative structural policies beyond those based on MC models in Remark~\ref{re:otherStochasticProcesses}.
To start, let ${\mathcal M}$ be the set of MC states where each $m\in {\mathcal M}$ is an $s$-dimensional vector (i.e., $m \in \mathbb{R}^s$). Let ${\mathcal M}_t$ represent the set of MC states that are reachable in stage $t\in [\periods]$, starting from a given initial state. Notation $m^t=(m_1,...,m_t)$, where $m_{t'} \in {\mathcal M}_{t'}$ for $t'\in [t]$, represents a sequence of admissible MC states, that is, the probability to reach $m_{t'+1}$ given $m_{t'}$ is positive for all $t' \in [t-1]$.
We now describe the connection between the MC states and the scenario tree model used in the MSILP formulation~\eqref{mod:scenarioTree}. Each node $n\in {\mathcal N}t$ ($t\geq 1$) of the scenario tree corresponds to an MC state, $m_t(n) \in {\mathcal M}_t$, and is uniquely determined by the trajectory of the stochastic process from the root node (initial MC state) to MC state $m_t(n)$, that is, the sequence of MC states $m^t(n) = (m_1(\textsf{r}), ..., m_{t-1}(a(n)),m_{t}(n))$. To simplify the exposition, we write $m^t$ instead of $m^t(n)$ and $m_t$ instead of $m_t(n)$ when we refer to any node $n \in {\mathcal N}t$.
As stated above, the MC representation of the stochastic process allows us to parameterize each node in the scenario tree by means of their corresponding stage and sequence of MC states, i.e., for each $n \in {\mathcal N}t$, we have $n \equiv (t, m^t)$. We can then rewrite the integer state variables $z_n$ associated with node $n$ as $z_n \equiv z_{t,\tmarkovhistory}$. Based on this parameterization, our aggregation framework relies on a linear transformation that will compress the full MC history up to stage $t$, namely the vector $m^t$ of size $s\cdot t$, into a vector $\texttt{MA}ptmarkov m^t$ of size $q_t \leq s\cdot t$, by means of a transformation matrix $\texttt{MA}ptmarkov \in \Z^{ q_t \times s\cdot t}$. That is, it parametrizes the integer state variables of stage $t$ by the limited information $\texttt{MA}ptmarkov m^t$:
\begin{equation} \label{eq:generalRestricition}
z_n \equiv z_{t,\tmarkovhistory} \rightarrow \za_{t,\maptmarkov \tmarkovhistory} \qquad \qquad \forall n \in {\mathcal N}t, t \in [\periods].
\end{equation}
It is clear that this aggregation significantly reduces the number of integer state variables if $q_t \ll s\cdot t$.
Different transformations can impose different structural properties on the discrete decisions and, thus, lead to policies with different levels of aggregation. Table \ref{tab:transformations} summarizes a set of transformations, the resulting aggregated variables, and the conditions under which the original integer state variables associated with nodes $n$ and $n'$ in ${\mathcal N}_t$ correspond to the same (aggregated) variables, i.e., $z_n$ and $z_nprime$ have the same value given by $\za_{t,\maptmarkov \tmarkovhistory}$. We explain each of these policies below.
\begin{table}[tb]
\tilde{a}boverulesep=0ex
\belowrulesep=0ex
\omegaetlength\extrarowheight{4pt}
\centering
\begin{tabular}{c|c|c|c}
\toprule
Name & Transformation matrix $\texttt{MA}ptmarkov$ & Aggr. var. & Condition for $z_n \equiv z_nprime, \ \forall n,n' \in {\mathcal N}t$ \\
\midrule
\texttt{HN} & $\bm{z}ero^{1 \times s \cdot t}$ & $z^A_{t,0}$ & $t(n) = t(n')$ \\
\texttt{MA} & $\left[
\bm{z}ero^{s \times s \cdot (t-1)} \ | \ \mathbb{I}^{s \times s}\right]$ & $z^A_{t,m_t} $ & $m_t(n) = m_t(n')$ \\
\texttt{MM} & $\left[
\bm{z}ero^{2 s \times s \cdot (t-2)} \ | \ \mathbb{I}^{2 s \times 2 s}
\right]$ & $z^A_{t,(m_{t-1},m_t)}$ & $ m_t(n) = m_t(n') \ \& \ m_{t-1}(a(n)) = m_{t-1}(a(n'))$ \\
\texttt{PM} & $\left[
\bm{z}ero^{(\bar{s} + s) \times (s \cdot t - \bar{s} - s)} \ | \ \mathbb{I}^{(\bar{s} + s) \times (\bar{s} + s)}
\right] $ & $z^A_{t,(\bar{m}_{t-1},m_t)}$ & $ m_t(n) = m_t(n') \ \& \ \bar{m}_{t-1}(a(n)) = \bar{m}_{t-1}(a(n')) $ \\
\texttt{FH} & $\mathbb{I}^{s \cdot t \times s \cdot t} $ & $z^A_{t,m^t}$ & $ m^t(n) = m^t(n')$ \\*[0.1cm]
\bottomrule
\end{tabular}
\caption{MC-based transformations examples, the resulting aggregated variables (Aggr. var.), and the equivalence of the original variables. $\bm{z}ero$ and $\mathbb{I}$ denote the zero and identity matrices, respectively.}
\label{tab:transformations}
\end{table}
{\mathcal M}argarita{
The simplest transformation we consider is
a \textit{Here-and-Now} (\texttt{HN}) transformation, which aggregates
integer state variables associated with nodes that are in the same stage
: $z_n \equiv z_{t,\tmarkovhistory} \rightarrow z^A_{t,0}$ for all $n \in {\mathcal N}t$. Thus, the corresponding
{\mathcal M}erve{integer state variables}
will be independent of the realization of the stochastic process,
{\mathcal M}erve{i.e., follow a} static policy
(see, e.g., \cite{basciftci2019adaptive}).
An alternative is to leverage the structure of the MC to create a transformation that leads to more informative policies. For example, the \textit{Markovian} (\texttt{MA}) transformation aggregates integer state variables associated with nodes that share the same stage and the same MC state: $z_n \equiv z_{t,\tmarkovhistory} \rightarrow z^A_{t,m_t}$ for all $n \in {\mathcal N}t$. Another option is to retain information from previous stages at the price of a larger number of aggregated variables. One such example is what we refer to as the \textit{Double Markovian} (\texttt{MM}) transformation, which goes one step beyond \texttt{MA}\ and aggregates integer state variables associated with nodes that share the same MC states in the current and previous stage. Following this pattern, the extreme case is to consider the entire history of the MC states starting from the initial MC state, referred to as the \textit{Full History} (\texttt{FH}) transformation. Note that \texttt{FH}\ leads to the original partially extended formulation for MSILP~\eqref{mod:reformulation}, since no integer state variables are effectively aggregated.
Although all the aforementioned MC-based transformations are constructed based on the complete MC state information, we may also consider partial information from MC states. Consider a subset of attributes of size $\bar{s} < s$ associated with MC state $m_t$, and assume without loss of generality that these attributes of interest correspond to the last $\bar{s}$ attributes in the MC state vector, i.e., we represent an MC state as $m_t = (\cdot, \bar{m}_t)\in \mathbb{R}^{s -\bar{s}}\times \mathbb{R}^{\bar{s}}$. Then, we can create a transformation based only on this partial vector $\bar{m}_t$ associated with the MC state $m_t$. As an example, we introduce the \textit{Partial Markovian} (\texttt{PM}) transformation that considers complete MC state information in the current stage but only partial information from the previous stage.
}
\begin{figure}
\caption{An illustrative example showing nodes with the same aggregated integer state variables for transformations \texttt{HN}
\label{fig:example_aggregation}
\end{figure}
\begin{example} \label{example:scenario_tree}
Consider an MC ${\mathcal M}$ with two possible states (i.e., light \mcLight{1}\ and dark \mcDark{1}) and positive transition probabilities for all transitions between states. The first drawing in Figure \ref{fig:example_aggregation} depicts a scenario tree built for ${\mathcal M}$ starting at a light state and representing all possible MC states for $T=4$ stages, where the color of each node (light or dark) represents the corresponding MC state. The remaining drawings in Figure \ref{fig:example_aggregation} illustrate the effect of aggregation on the nodes of the scenario tree for transformations \texttt{HN}, \texttt{MA}, and \texttt{MM}\ (we omit \texttt{PM}\ because in this example the MC state vector has only one attribute). The \texttt{HN}\ tree shows that all nodes in the same stage will share the same set of (aggregated) integer state variables $z^A$ (i.e., they have the same color and shape). In contrast, \texttt{MA}\ aggregates integer state variables in nodes with the same stage and MC state. Thus, the \texttt{MA}\ tree shows nodes of two different colors in each stage, which correspond to two separate sets of aggregated integer state variables for each stage (e.g., the transformation for node $n\equiv (t,m^t) = (3,\mcLight{1}\mcDark{1}\mcLight{1})\in {\mathcal N}_3$ is ${\mathcal P}hi_3 m^3(\mcLight{1}\mcDark{1}\mcLight{1}) = m_3(\mcLight{1}) = \tikz \node[node-polygon5,violet!40!white, ,fill=violet!40!white] {};$, thus, $z^A_{3,\tikz \node[node-polygon5,violet!40!white, ,fill=violet!40!white] {};}$ represents its associated aggregated variables).
Finally, nodes in each stage of the \texttt{MM}\ tree have up to four different colors, as a result of an aggregation based on the MC states in both the current stage and the previous stage
(e.g., $z^A_{3,\tikz \node[node-polygon5, violet!80!black, fill=violet!80!black] {};}$ is associated to node (3,\mcLight{1}\mcDark{1}\mcDark{1}) because ${\mathcal P}hi_3 m^3(\mcLight{1}\mcDark{1}\mcDark{1}) = (m_2(\mcDark{1}), m_3(\mcDark{1})) = \tikz \node[node-polygon5, violet!80!black, fill=violet!80!black] {};$).
Overall, \texttt{HN}\ contains four sets of $z^A$ variables (i.e., one for each stage), \texttt{MA} \ seven, and \texttt{MM} \ eleven, while the \texttt{FH}\ variant contains 15 sets of $z^A$ variables (i.e., one for each node of the scenario tree).
$\omegaquare$
\end{example}
\begin{remark}
These transformations are just a few examples of a wide range of aggregations that can be constructed within our framework. One key consideration in constructing these aggregations is that there is a clear trade-off between the quality of the underlying decision policy and the corresponding computational effort. Aggregations that include (preserve) more information would naturally lead to better decision policies but also make the resulting problem harder to solve due to the increased number of integer state variables in the first stage. Choosing the right transformation depends on the specific problem type and the available computational resources. We leave the problem of adaptively finding the best transformation as a future research direction.
\end{remark}
\begin{remark}\label{re:otherStochasticProcesses}
The main advantage of assuming that the stochastic process is defined by an MC is that most stage-dependent processes can be approximated by an MC using a discretization technique with high accuracy \citep{bally2003quantization,pages2004optimal}. Also, the work by \cite{lohndorf2019modeling} shows that such an approximation can lead to high-quality solutions compared to the ones obtained by approximating the stochastic process with autoregressive models in certain applications. We also note that our approach is applicable to stage-wise independent stochastic processes. In these cases, one can approximate the probability distribution of a random variable by partitioning its support into a set of clusters and creating aggregations based on these clusters. For example, considering an MSILP with stochastic demand parameters, we may define three levels of demand (low, medium, and high) and create aggregation schemes based on each demand level.
\end{remark}
\omegaubsection{Aggregated MSILP}
We now present the aggregated MSILP formulation after applying a transformation to the integer state variables $z$ via matrices $\texttt{MA}ptmarkov, t\in [\periods]$. The new set of integer state variables is given by $z^A_t\in \Z^{\ell \cdot q_t}$ for each $t\in [\periods]$. We employ a mapping function $\phi_t:{\mathcal N}_t\rightarrow \{t\}\times[q_t]$ to relate a node $n\in {\mathcal N}t$, which was originally used to index variables $z$, to the index corresponding to its aggregated variables, i.e., $\phi_t(n) = (t, \texttt{MA}ptmarkov m^t)$ for each node $n \in {\mathcal N}t$. Thus, the aggregated MSILP can be written as follows:
\begin{subequations}
\begin{align}
Q^A_{\textsf{r}} = \min\; & \omegaum_{n\in {\mathcal N}}p_n c_n^\top \za_{\phi_t(n)} + d_\textsf{r}^\top x_{\rootnode} + h_\textsf{r}^\top y_{\rootnode} + \omegaum_{n \in {\mathcal C}(\textsf{r})}\bar{p}_{\textsf{r} n} Q^A_n(x_{\rootnode},z^A) \tag{$P^A$} \label{mod:aggregated} \\
\text{s.t. }\; &
H_{\textsf{r}} z_{\rootnode} \geq g_{\textsf{r}}, \;
J_{\textsf{r}} x_{\rootnode} \geq f_{\textsf{r}}, \;
C_{\textsf{r}} x_{\rootnode} + D_{\textsf{r}} z_{\rootnode} + E_{\textsf{r}} y_{\rootnode} \geq b_{\textsf{r}}, \label{aggre:root}\\
& H_n \za_{\phi_t(n)} \geq G_n \za_{\phi_t(n)}parent + g_n, & \forall n \in {\mathcal N}noroot, \label{aggre:zphi}\\
& x_{\rootnode} \in \mathbb{R}^k, y_{\rootnode} \in \mathbb{R}^r, z^A \in \Z^{\ell \cdot \omegaum_{t \in [\periods]} q_t}, \label{aggre:vars}
\end{align}
\end{subequations}
where the cost-to-go function $Q^A_n(\cdot,\cdot)$ associated with
$n\in {\mathcal N}\omegaetminus \{\textsf{r}\}$ is defined in a nested fashion as:
\begin{align*}
Q^A_n(x_nparent, z^A) = \min\; & d_n^\top x_n + h_n^\top y_n + \omegaum_{n' \in {\mathcal C}(n)}\bar{p}_{n n'} Q^A_{n'}(x_n, z^A)\\
\text{s.t.}\; & J_n x_n \geq F_n x_nparent + f_n, \\
& C_n x_n + D_n \za_{\phi_t(n)} + E_n y_n \geq A_n x_nparent + B_n \za_{\phi_t(n)}parent + b_n, \\
& x_n \in \mathbb{R}^k, y_n \in \mathbb{R}^r.
\end{align*}
We note that any feasible solution of \eqref{mod:aggregated} is also feasible to \eqref{mod:scenarioTree} with the same objective value, because the only difference between \eqref{mod:aggregated} and \eqref{mod:scenarioTree} is that \eqref{mod:aggregated} enforces some integer variables of \eqref{mod:scenarioTree} to take the same values. However, the reverse is not true in general, and hence an optimal solution of \eqref{mod:aggregated} might be sub-optimal for \eqref{mod:scenarioTree}.
Although the aggregated problem~\eqref{mod:aggregated} potentially has much fewer integer variables than~\eqref{mod:reformulation}, depending on the transformation $\texttt{MA}ptmarkov$, it may remain computationally challenging for an exact solution approach due to the existence of (aggregated) first-stage integer variables and the continuous state variables defined at each node of the scenario tree. {\mathcal M}argarita{We next introduce an exact solution approach based on a variant of the SDDP algorithm, and then present alternative approximation approaches to generate feasible decision policies and dual bounds that are computationally efficient.}
\omegaection{Solution Methodology} \label{sec:methodology}
We present two solution methods for the aggregated MSILP formulation~\eqref{mod:aggregated}. Both are decomposition methods implemented as B\&C algorithms, taking advantage of the fact that all the integer variables in~\eqref{mod:aggregated} are in the first stage. The first one is an exact method that employs a variant of the SDDP algorithm adapted to our setting as a subroutine. The second method employs 2SLDR to impose additional solution structures that lead to a two-stage approximation to the
{\mathcal M}erve{MSILP}.
\omegaubsection{SDDP Integrated B\&C Algorithm}
\label{sec:sddp}
We now present an exact algorithm for solving formulation~\eqref{mod:aggregated} by integrating the SDDP algorithm within a B\&C framework. The high-level idea is to decompose the problem into a master problem and a set of subproblems, where the master problem is concerned about the first-stage decisions including all the integer variables of~\eqref{mod:aggregated}, and the subproblems deal with the remaining stages, which involve continuous state and local variables.
The algorithm performs a branch-and-bound (B\&B) procedure for the master problem and enters a cut generation procedure every time an integer relaxation solution is encountered (e.g., done through the so-called callback functions in modern IP solvers such as CPLEX). To generate a cut, following the basic idea of Benders decomposition, we need to solve a subproblem associated with each child node of $\textsf{r}$ in the scenario tree, which deals with the remaining stages of~\eqref{mod:aggregated} via a variant of the SDDP algorithm for a given candidate solution $(x_{\rootnode}sol, z^Asol, \widehat{\theta})$ from the master problem. {\mathcal S}ong{Note that we adapt the standard SDDP algorithm---which is applicable when we have stage-wise independence---to our aggregated MSILP, taking advantage of the MC structure in the stochastic process {\mathcal M}erve{(representing stagewise-dependent uncertainty)}.} In particular, we consider the so-called multi-cut version, where a variable $\theta_n$ is introduced for each $n \in {\mathcal C}(\textsf{r})$ to represent the value of the
outer approximation of the cost-to-go function at node $n$. Thus, the master problem is given by:
\begin{subequations}\label{SDDP-master}
\begin{align}
\overlineline{Q}^A_{\textsf{r}} = \min& \omegaum_{n\in {\mathcal N}}p_n c_n^\top \za_{\phi_t(n)} + d_\textsf{r}^\top x_{\rootnode} + h_\textsf{r}^\top y_{\rootnode} + \omegaum_{n \in {\mathcal C}(\textsf{r})}\bar{p}_{\textsf{r} n} \theta_{n} \\
\text{s.t.}\ & \eqref{aggre:root}-\eqref{aggre:vars}, \nonumber \\
& \theta_n \geq \tilde{a}lpha_n^\top x_{\rootnode} + \beta_n^\top z^A + \gamma_n, \quad \forall (\tilde{a}lpha_n,\beta_n,\gamma_n)\in \mathcal{B}_n, \ n \in {\mathcal C}(\textsf{r}),
\end{align}
\end{subequations}
where $\mathcal{B}_n$ stores the coefficients associated with all the Benders cuts constructed to approximate $Q^A_n(x_{\rootnode},z^A)$ during the solution procedure.
We refer the reader to Appendix \ref{app:bc_sddp} for a detailed description of the overall B\&C procedure, including a pseudo-code shown in Algorithm \ref{alg:bc-sddp}.
We next explain the implementation details of this B\&C framework integrated with SDDP. We first describe the SDDP subproblem structure and then present the entire SDDP sub-routine.
\omegaubsubsection{SDDP Subproblems.}
A key feature of the SDDP algorithm is its capability to leverage the structure of the underlying stochastic process in defining the cost-to-go functions and the corresponding SDDP subproblems. For example, we only need to create one SDDP subproblem per stage in the case of stage-wise independence \citep{pereira1991multi}, and one subproblem per stage and MC state in the case when the stage-wise dependence structure is modeled by an MC \citep{bonnans2012energy,philpott2012dynamic}. In what follows, we describe the SDDP subproblems for formulation~\eqref{mod:aggregated} based on the MC structure and the employed transformation.
First, we observe that if the first-stage integer variables $z^A$ do not exist, instead of defining one subproblem for each node $n\in {\mathcal N}$, we can have one subproblem defined for each stage $t\in [\periods]$ and MC state $m_t\in {\mathcal M}_t$, that is, all nodes $n \in {\mathcal N}t$ with $m_t(n)= m_t$ share the same cost-to-go function \citep{philpott2012dynamic}, thanks to the Markovian nature of the stochastic process. However, this might not be the case when we consider the aggregated variables $z^A$ because
cost-to-go functions with the same MC state might be associated with different aggregated variables depending on the chosen transformation. In such a case, the cost-to-go functions are defined over two different sets of variables. To see this, consider two nodes $n,n'\in {\mathcal N}t$ with the same MC state (i.e., $m_t(n) = m_t(n') = m_t$) but with different sets of (aggregated) integer variables $z^A$ (i.e., $\phi_t(n)\neq \phi_t(n')$), then we need to define two separate SDDP subproblems for $n$ and $n'$.
In other words, we can only define the same SDDP subproblem for two nodes $n,n'\in {\mathcal N}t$ if they correspond to the same MC state (i.e., $m_t(n)=m_t(n')$) \emph{and} the same aggregated variables (i.e., $\phi_t(n) = \phi_t(n')$). In the following example we use a policy graph---a concept introduced by \cite{dowson2020policy}---that represents the relationship between SDDP subproblems. Note that the policy graphs for \eqref{mod:aggregated} are constructed based on the MC transition graph and the chosen transformation.
\begin{example}\label{exa:sddp_subproblems}
Figure~\ref{fig:sddp_sub} illustrates different policy graphs for the \texttt{HN}, \texttt{MA}, and \texttt{MM}\ transformations based on the illustrative example shown in Figure~\ref{fig:example_aggregation}.
Recall that the underlying stochastic process is defined by an MC with two states (i.e., light \mcLight{1}\ and dark \mcDark{1}). Since at a minimum we should have one SDDP subproblem defined for each MC state, we have at least two SDDP subproblems defined in each stage $t\geq 2$. In the case of \texttt{MA}, since nodes with the same MC state share the same set of aggregated integer state variables, we have exactly two SDDP subproblems per stage (i.e., one per MC state) and the policy graph only depends on the MC transitions (i.e., a Markovian policy graph). The same applies for \texttt{HN}, although we only have one set of aggregated integer state variables per stage. Since their corresponding policy graphs are identical, we use \texttt{HN}/\texttt{MA}\ to denote the first policy graph in Figure~\ref{fig:sddp_sub}. On the other hand, the SDDP subproblem structure for \texttt{MM}\ is quite different: nodes with the same MC state can be associated with different aggregated integer state variables, because the \texttt{MM}\ transformation depends not only on the MC state in the current node but also its parent node. For example, nodes (3,\mcLight{1}\mcDark{1}\mcDark{1}) and (3,\mcLight{1}\mcLight{1}\mcDark{1}) are associated with the same stage and MC state ($m_{3}(\mcLight{1}\mcDark{1}\mcDark{1}) = m_3(\mcLight{1}\mcLight{1}\mcDark{1}) = \mcDark{1}$, see Figure~\ref{fig:example_aggregation}), but since the MC states of their parent nodes differ, they have different sets of aggregated integer state variables (i.e., $z^A_{3,\tikz \node[node-polygon5, violet!80!black, fill=violet!80!black] {};}$ and $z^A_{3,\tikz \node[node-polygon5,violet!40!white, ,fill=violet!40!white] {};}$, respectively). Therefore, \texttt{MM}\ has one SDDP subproblem for each pair of MC states, corresponding to both the current node and the parent node for stages $t\geq 3$. The policy graph for \texttt{MM}\ can be seen as an aggregated version of its scenario tree (see Figure \ref{fig:example_aggregation}) with one copy for each type of nodes.
$\omegaquare$
\end{example}
\begin{figure}
\caption{SDDP policy graphs and subproblems for various types of transformations, \texttt{HN}
\label{fig:sddp_sub}
\end{figure}
To formally define the SDDP subproblem for \eqref{mod:aggregated}, we represent the SDDP subproblem corresponding to node $n\in {\mathcal N}t$ in the scenario tree by parameterizing it with mapping $\omegaub(n) := (t, m_{t}(n), \phi_t(n))$. Consider $\omegaubSett$ to be the set of all SDDP subproblems defined for stage $t\geq 2$. The number of subproblems in stage $t=2$ is always the same (i.e., $|\omegaubSet_2| = {\mathcal N}_2$) since each node in that stage is associated to a different MC state, whereas the number of subproblems in stages $t > 2$ depends on the transformation employed. For a given SDDP subproblem $\omegaub\in \omegaubSett$, we define $\omegaubnodest(\omegaub)$ as the set of nodes associated with $\omegaub$, i.e., all nodes $n \in {\mathcal N}t$ such that $\omegaub(n) = \omegaub$.
To represent the relationship between SDDP subproblems illustrated by a policy graph, let ${\mathcal C}ub(\omegaub)$ be the set of children subproblems for each subproblem $\omegaub$ in the policy graph. Specifically, $\omegaub'$ is a child of $\omegaub$ if $\omegaub'\in \omegaubSet_{t+1}$ and there exists a node $n' \in \omegaubnodes_{t+1}(\omegaub')$ such that its parent is associated with $\omegaub$ (e.g., the children set of \omegaquareDark{1} in the \texttt{MM}\ policy graph is ${\mathcal C}ub(\omegaquareDark{1})=\{\tikz \node[node-polygon5, violet!80!black, fill=violet!80!black] {};, \pentaDark{1}\}$, as shown Figure \ref{fig:sddp_sub}). The transition probability from $\omegaub\in \omegaubSett$ to one of its children $\omegaub' \in {\mathcal C}ub(\omegaub)$ is given by the MC transition probability, that is, $\bar{p}_{\omegaub\omegaub'}= p_{a(n')n'}$ for any node $n' \in \omegaubnodes_{t+1}(\omegaub')$ with parent $a(n') \in \omegaubnodest(\omegaub)$. Lastly, since there is at least one SDDP subproblem defined for every MC state, the random coefficients involved in the SDDP subproblems can be parameterized by the corresponding MC states, for example, the random cost vector in $\omegaub\in \omegaubSett$ is defined as $d_\omegaub = d_n$ for any $n \in \omegaubnodest(\omegaub)$.
To preserve the stage-wise dependency structure between SDDP subproblems in two consecutive stages, we introduce local copies of the first-stage integer state variables $z^A$ in the subproblems. In other words, we copy the first-stage integer solution from an SDDP subproblem to its children subproblems to ensure that each subproblem has the needed information given by $z^A$. We note that the use of local copies is common in other SDDP variants to maintain the stage-wise decomposition structure of the problem (see, e.g.,~\cite{zou2019stochastic}). Then, the SDDP subproblem $\omegaubn:=\omegaigma(n)$ for any node $n\in {\mathcal N}t$ with $t>1$ is given by:
\begin{subequations}
\begin{align}
\overlineline{Q}^A_{\omegaubn}(x_nparentsub, \zetaparent) = \min\; & d_{\omegaubn}^\top x_nsub + h_{\omegaubn}^\top y_nsub + \omegaum_{\omegaubn' \in {\mathcal C}ub(\omegaubn)}\bar{p}_{\omegaubn \omegaubn'} \theta_{\omegaubn'} \nonumber \\
\mbox{s.t.} \; & J_\omegaub x_nsub \geq F_\omegaub x_nparentsub + f_\omegaub, \\
&\zetan = \zetaparent, \label{eq:sub_zcopy} \\
& C_\omegaub x_nsub + D_\omegaub \zetaphin + E_\omegaub y_nsub \geq A_\omegaub x_nparentsub + B_\omegaub \zetaphiparent + b_\omegaub, \label{eq:sub_allvars} \\
& \theta_{\omegaubn'} \geq \tilde{a}lpha_{\omegaubn'}^\top x_nsub + \beta_{\omegaubn'}^\top \zetan + \gamma_{\omegaubn'}, \qquad \forall (\tilde{a}lpha_{\omegaubn'},\beta_{\omegaubn'},\gamma_{\omegaubn'})\in \mathcal{B}_{\omegaubn'}, \ \omegaubn' \in {\mathcal C}ub(\omegaubn), \label{eq:sddp_cut}
\end{align}
\end{subequations}
where $\zetan$ denotes the continuous local copy of the first-stage integer state variables $z^A$, which comes from its parent subproblem $\zetaparent$ via inequality \eqref{eq:sub_zcopy}. We note that since $\overlineline{Q}^A_{\textsf{r}}$ contains the original first-stage integer variables $z^A$, the SDDP subproblems for stage $t = 2$ directly receive these variables instead of making local copies from the root (i.e., $\overlineline{Q}^A_{\omegaub}(x_nparentsub, z^A)$ for each $\omegaub \in {\mathcal C}ub(\textsf{r})$, see Figure \ref{fig:sddp_b&c}). A variable $\theta_{\omegaub}$ is introduced for each SDDP subproblem $\omegaub$ to represent the outer approximation of the cost-to-go function, and a set $\mathcal{B}_{\omegaubn}$ is introduced to store the coefficients associated with all the cutting planes constructed in this approximation during the solution procedure.
Finally, $\overlineline{Q}^A_{\omegaubn}$ requires information from the parent of $\omegaubn$ (i.e., $a(\omegaub)$), however, an SDDP subproblem can have multiple parents in the policy graph, (e.g., both \tikz \node[node-polygon5,violet!40!white, ,fill=violet!40!white] {};\ and \tikz \node[node-polygon5, violet!80!black, fill=violet!80!black] {};\ are parents of \tikz \node[node-polygon6, draw=cyan!70!black, fill=cyan!70!black] {};, see Figure \ref{fig:sddp_sub}). This ambiguity is not an issue for our SDDP implementation as we only consider one scenario at a time in the SDDP forward pass. So each time $\overlineline{Q}^A_{\omegaubn}$ is solved, a specific node $n \in \omegaubnodest(\omegaub)$ is considered as its parent node, and, thus, the parent of the SDDP subproblem would be $a(\omegaubn) = \omegaub(a(n))$.
\omegaubsubsection{SDDP Algorithm.}
Algorithm \ref{alg:sddp} describes the SDDP sub-routine implemented in our B\&C algorithm. The procedure takes a candidate solution $(x_{\rootnode}sol,z^Asol,\widehat{\theta})$ from the master problem~\eqref{SDDP-master} and one child node of the root $n'\in {\mathcal C}(\textsf{r})$, and runs the SDDP algorithm until we find a valid inequality that cuts off $(x_{\rootnode}sol,z^Asol,\widehat{\theta})$ or show that no violated cuts can be found. We use the multi-cut version of SDDP (i.e., one variable to approximate the objective value of each SDDP subproblem \citep{philpott2012dynamic}) and the single-scenario iteration scheme (i.e., sample one scenario at a time from the scenario tree during the SDDP forward pass \citep{philpott2008convergence}). Lastly, our algorithm considers the quick pass version of SDDP (i.e., we evaluate all the subproblems in a sample path before generating cuts in the backward pass \citep{morton1996enhanced}). We refer the reader to \cite{fullner2021sddpvariants} for further details on this and other SDDP variants.
\begin{algorithm}[tb]
\omegamall
\linespread{1.2}\omegaelectfont
\caption{SDDP procedure} \label{alg:sddp}
\begin{algorithmic}[1]
{\mathbb{P}}ocedure{\texttt{SDDP}}{$(x_{\rootnode}sol,z^Asol,\widehat{\theta}), n', \epsilon, K, \texttt{exact}$}
\mathbb{R}epeat
{\mathcal S}tate Sample $K$ paths from the sub-tree rooted at $n'$ (i.e., ${\mathcal T}(n')$) and store indices in ${\mathcal K}$.
{\mathcal S}tate \texttt{cutAdded} := \textbf{false}.
{\mathcal F}or{$k \in {\mathcal K}$}
{\mathcal F}or{$t \in \{2,...,T\}$} {
\color{gray} \%\% Forward pass sub-routines}
{\mathcal S}tate Select node of scenario $k$ and stage $t$, $n:=n_{t,k}$, and its subproblem $\omegaub: =\omegaub(n)$.
{\mathcal S}tate Solve subproblem $\omegaub$, let $\overlineline{Q}^Asol_{\omegaub} :=\overlineline{Q}^A_{\omegaub}(\widehat{x}_{a(\omegaub)},\zetaparentsol)$. Save primal and dual solutions.
{\mathbb E}ndFor
{\mathcal F}or{$t \in \{T,...,2\}$} {
\color{gray} \%\% Backward pass sub-routines}
{\mathcal S}tate Select node of scenario $k$ and stage $t$, $n:=n_{t,k}$, and its subproblem $\omegaub: =\omegaub(n)$.
\mathbb{I}f{$|\widehat{\theta}_{\omegaub} - \overlineline{Q}^Asol_{\omegaub}| \geq \epsilon|\overlineline{Q}^Asol_{\omegaub}|$}
{\mathcal S}tate Add inequality \eqref{eq:sddp_cut} to all subproblems with variables $\theta_{\omegaub}$, including $\overlineline{Q}^A_{a(\omegaub)}$.
{\mathcal S}tate \texttt{cutAdded} := \textbf{true}.
\mathbb{I}f {$t = 2$} \textbf{return}\ $(\tilde{a}lpha_{\omegaub(n')}, \beta_{\omegaub(n')}, \gamma_{\omegaub(n')})$. {
\color{gray} \%\% Add cut to master problem}
{\mathbb E}ndIf
{\mathbb E}ndIf
{\mathbb E}ndFor
{\mathbb E}ndFor
{\mathcal U}ntil{\texttt{SDDPConverge}($K$, \texttt{cutAdded}, \texttt{exact}).} {
\color{gray} \%\% Termination criteria}
{\mathbb E}ndProcedure
\end{algorithmic}
\end{algorithm}
We now provide a detailed explanation of the main components of the algorithm \texttt{SDDP}. We first sample (without replacement) $K$ scenarios (sample paths) from the sub-tree rooted at node $n'\in {\mathcal N}_2$ to generate cuts for its associated variable $\theta_{n'}$ in the master problem. For each scenario, we then perform the forward and backward pass of the SDDP algorithm (lines 5-14). The forward pass iterates over all stages from $t=2$ to $t = T$ on a randomly sampled scenario (sample path), selects the associated node at each stage, and solves the corresponding SDDP subproblem (lines 6-8). The backwards pass iterates over all stages from $t = T$ to $t = 2$ (lines 9-14) and checks the relative difference between the approximated value $\widehat{\theta}_{\omegaub}$ and $\overlineline{Q}^Asol_{\omegaub}$ (line 11). If this difference is larger than a given tolerance $\epsilon$, we generate a Benders cut as represented in \eqref{eq:sddp_cut}. In particular, the $(\tilde{a}lpha_{\omegaub'}, \beta_{\omegaub'}, \gamma_{\omegaub'})$ coefficients for any $\omegaubn' \in {\mathcal C}ub(\omegaubn)$ are given by:
\begin{align*}
\tilde{a}lpha_{\omegaubn'} & = {\pi^\eqref{eq:st_onlyx}}^\top F_{\omegaubn'} + {\pi^\eqref{eq:sub_allvars}}^\top A_{\omegaubn'}, \qquad \beta_{\omegaubn'} = {\pi^\eqref{eq:sub_allvars}}^\top B_{\omegaubn'}, \qquad
\gamma_{\omegaubn'} = \overlineline{Q}^Asol_{\omegaubn'} - \left( \tilde{a}lpha_{\omegaubn'}^\top \widehat{x}_{\omegaubn} + \beta_{\omegaubn'}^\top \zetaphisol \right).
\end{align*}
where ${\pi^\eqref{eq:st_onlyx}}$ and $\pi^\eqref{eq:sub_allvars}$ are the dual solutions associated with inequalities \eqref{eq:st_onlyx} and \eqref{eq:sub_allvars} of subproblem $\omegaub'$, respectively. This cut is added to the parent subproblem (i.e., $\overlineline{Q}^A_{\omegaub(a(n))}$) but it is also valid for any subproblem in stage $t-1$ involving variable $\theta_{\omegaub(a(n))}$ \citep{philpott2012dynamic}. We note that while the forward pass randomly samples scenarios rooted at one particular child node of the root node, the backward pass can add cuts to subproblems in subtrees rooted at other children nodes of the root node. Also, we consider feasibility cuts during the forward pass if one of the subproblems is infeasible.
Lastly, we terminate the current SDDP iterations for scenarios rooted at $n'$ if a cut is added to the first-stage (master) problem (i.e., $t=2$), otherwise, we continue iterating over the remaining scenarios until the termination criterion is met.
Procedure \texttt{SDDPConverge}\ ensures that we terminate the SDDP algorithms when we can guarantee that no cut can be added to the master problem. To do so, we increase the number of sampled scenarios $K$ to be equal to the total number of scenarios (i.e., $K=|{\mathcal N}_T|$)) when no cut was added using the original sample size. Once $K=|{\mathcal N}_T|$, we continue iterating until no violated cut can be added to any subproblem (i.e., \texttt{cutAdded}=\textbf{false}). We note that this termination criterion can lead to several iterations over the full set of scenarios of the scenario tree, which is computationally expensive. However, this scenario increment is necessary to ensure an optimal solution to~\eqref{mod:aggregated}.
\begin{example}\label{exa:sddp_b&c}
Figure \ref{fig:sddp_b&c} illustrates the main components of our SDDP integrated B\&C algorithm for the \texttt{MA}\ transformation. Note that in this case an SDDP subproblem is defined for each MC state in each stage $t > 1$. The two illustrations show the decomposition structure over the policy graph: the root node \mcLight{1}\ corresponds to the master problem while other nodes correspond to the SDDP subproblems. The drawing on the left depicts the forward pass of Algorithm \ref{alg:sddp} for scenario $\mcLight{1} \rightarrow \mcDark{1} \rightarrow \mcDark{1} \rightarrow \mcDark{1}$. Note that the solution of the SDDP master problem, $(z^Asol, \widehat{x}_{\mcLight{0.6}})$, is used as part of the input parameters to \omegaquareDark{1}\ and, similarly, $(\widehat{\zeta}_{\omegaquareDark{0.6}}, \widehat{x}_{\omegaquareDark{0.6}})$ is used as part of the input to \pentaDark{1}. Recall that the $\zeta$ variables are just the copies of $z^A$, so $z^Asol=\widehat{\zeta}_{\omegaquareDark{0.6}} = \widehat{\zeta}_{\pentaDark{0.6}}$. The drawing on the right illustrates the backward pass where the dual optimal solution of an SDDP subproblem is used to generate cuts to improve the outer-approximation of the cost-to-go function defined at its parent subproblems. For example, the optimal dual solution of \pentaDark{1}\ is used to generate cuts for subproblems \omegaquareDark{1}\ and \omegaquareLight{1}.
$\omegaquare$
\end{example}
\begin{figure}
\caption{An illustrative example of the SDDP integrated B\&C algorithm. It depicts decomposition structure and the two main SDDP subroutines (i.e., forward and backward pass).}
\label{fig:sddp_b&c}
\end{figure}
\omegaubsubsection{Lower Bound Computation.}\label{sec:sddp_lb}
Algorithm \ref{alg:bc-sddp} guarantees an optimal solution to~\eqref{mod:aggregated}, but the procedure can be computationally expensive because the number of SDDP subproblems depends on the underlying MC structure and the chosen transformation. In addition, we may need to call the SDDP sub-routine multiple times to correctly evaluate each candidate integer relaxation solution encountered during the B\&C procedure. With this in mind, we propose an alternative to the aforementioned procedure to considerably reduce its computation time and return valuable information about the problem in the form of a lower bound. This lower bound can be used, for example, to provide an overestimate of the optimality gap associated with a known feasible solution. We utilize this lower bound to evaluate the quality of solutions obtained by our proposed 2SLDR approximations (see Section~\ref{sec:ldr} for details).
The overall idea for this lower-bounding technique is to apply a limited number of iterations of the SDDP sub-routine. First, we increase the cut violation tolerance parameter $\epsilon$ only to add cuts that have a significant relative difference to its true value (e.g., using $\epsilon=0.1$ instead of a conservative value $\epsilon=0.0001$). Also, we limit the number of rounds in the \texttt{SDDPConverge}\ procedure in Algorithm~\ref{alg:sddp}. Specifically, when Boolean variable $\texttt{exact}= \textbf{false}$, we limit the number of rounds to only three and avoid increasing the number of sampled scenarios $K$. This is in contrast with the $\texttt{exact}= \textbf{true}$ version (see the previous subsection), where we iterate until no more cuts are added to any subproblem and the number of sampled scenarios increases to cover all possibilities.
These modifications to the termination criteria considerably reduce the computational time of the SDDP procedure. However, we only obtain lower-approximations to the cost-to-go functions as we might be missing cuts in the SDDP master and subproblems, resulting in a lower bound for \eqref{mod:aggregated}.
\omegaubsection{Two-stage Linear Decision Rule Approximation} \label{sec:ldr}
In this section, we describe an approximation scheme to create feasible solutions for \eqref{mod:aggregated} by imposing additional structures to the decision policies in the form of 2SLDR \citep{bodur2018two}. Imposing additional structures in such a way transforms the multi-stage stochastic program into a two-stage stochastic program, which requires significantly less computational effort than the B\&C framework integrated with the SDDP algorithm that we presented in the previous section. Moreover, the 2SLDR can be customized to leverage the structure of the underlying stochastic process, and we provide new schemes for the MC case.
Let $\xi_t$ be the vector of random variables at stage $t\in [\periods]$, and $\xi_{t,n}$ be the realization of these random variables at node $n\in {\mathcal N}t$, i.e.,
$ \xi_{t,n} = \{A_n,B_n,C_n,D_n,E_n,F_n,J_n,b_n,c_n,d_n,f_n,h_n\}. $ Note that $\xi_\textsf{r}$, the data associated with the root node of the scenario tree, is assumed to be a deterministic vector. In addition, $\xi^t=\{\xi_1,...,\xi_t\}$ and $\xi^t_n= \{\xi_{1,\textsf{r}},...,\xi_{t-1, a(n)}, \xi_{t,n}\}$ represent the trajectory of the random variables up to stage $t$ and their realizations for a particular node $n$ in the scenario tree, respectively. We consider $\xi_t$ and $\xi^t$ as vectors of random variables with dimensions $l_t$ and $ l_tt =\omegaum_{i = 1}^t l_t$ for all $t\in [\periods]$, respectively, and the same applies to the realizations of these random variables.
The general idea behind 2SLDRs is to replace the original {\mathcal M}erve{continuous} state variables $x_n$ with a new set of variables for each node $n \in {\mathcal N}$ by means of a linear relation of node-specific data. By doing so, we impose additional (linear relation) structure over the state variables, and in return transform the multi-stage stochastic program into a two-stage stochastic program \citep{bodur2018two}. Specifically, 2SLDRs employ basis functions $\texttt{MA}pldr(\cdot)$ to map $\xi^t_n$ into values that are then used to create a linear representation for the state variables $x_n$, i.e., $x_n = \mu_t^\top \texttt{MA}pldr(\xi^t_n) \in \mathbb{R}^k$, where $\mu_t$ is a vector of the so-called LDR variables at stage $t$. Although the basis functions $\texttt{MA}pldr(\cdot)$ can be any form of functions that map the node-specific data $\xi^t_n$ to certain values, for ease of exposition, we consider simple basis functions that return the actual random variable realizations, that is, $\texttt{MA}pldr(\xi^t_n) = \xi^t_n$. In this case, the LDR is given by:
\begin{equation}\label{eq:ldr}
x_n = \mu_t^\top \xi^t_n, \ \forall n \in {\mathcal N}t, t \in [\periods].
\end{equation}
We observe that these simple basis functions return high-quality decision policies for our case study (see Section \ref{sec:experiments}), although more complex basis functions can potentially lead to better decision policies \citep{chen2008linear,bampou2011scenario}.
\omegaubsubsection{Aggregation Framework for 2SLDR.}
Similar to the aggregation framework introduced in Section~\ref{sec:framework}, we create different aggregation schemes for the LDR variables $\mu$, and as a result, obtain approximate 2SLDR-based decision policies. Table \ref{tab:ldr} summaries a few LDR variable structures that we consider. The first two LDR variants (\texttt{LDR-TH}\ and \texttt{LDR-T}) are common in the literature \citep{shapiro2005complexity,kuhn2011primal}, while \texttt{LDR-M}\ is a new LDR variant based on the MC structure of the stochastic process.
\begin{table}[htb]
\omegaetlength\extrarowheight{4pt}
\centering
\begin{tabular}{c|l|l|l}
\toprule
Name & LDR variables for $t\in [\periods]$ & Standard basis function & Resulting LDR \\
\midrule
\texttt{LDR-TH} & $\mu_t \in \mathbb{R}^{k\times l_tt}$ & $\texttt{MA}pldr(\xi^t_n) = \xi^t_n$ & $x_n = \mu_t^\top \xi^t_n$ \\
\texttt{LDR-T} & $\mu_{t} \in \mathbb{R}^{k\times l_t}$ & $\texttt{MA}pldr(\xi^t_n) = \xi_{t,n}$ & $x_n = \mu_t^\top \xi_{t,n}$ \\
\texttt{LDR-M} & $\mu_{t,m}\in \mathbb{R}^{k\timesl_t}, \quad \forall\; m \in {\mathcal M}_t$ & $\texttt{MA}pldr(\xi^t_n) = \xi_{t,n}$ & $x_n = \mu_{t, m(n)}^\top \xi_{t,n}$ \\
\bottomrule
\end{tabular}
\caption{Examples of LDR variable structures and basis functions for each $n\in {\mathcal N}t$ and $t\in [\periods]$.}
\label{tab:ldr}
\end{table}
The first LDR variant, \texttt{LDR-TH}, which we refer to as a stage-history LDR, constructs the linear relation based on the trajectory of realizations of the random variables up to the current stage. The \texttt{LDR-TH}\ variant can lead to high-quality decision policies (thanks to the amount of information incorporated) but may result in a large number of LDR variables, especially for problems with a large number of stages. An alternative is \texttt{LDR-T}\ which constructs the linear relation only based on the realization of the random vector at the current stage, which leads to a smaller number of LDR variables at the expense of lower solution quality~\citep{bodur2018two}.
In addition to these LDR variants, we propose \texttt{LDR-M}\ which takes advantage of the MC structure of the underlying stochastic process. The idea is to have one copy of LDR variables $\mu_{t,m}$ for each MC state $t$ and stage $m$. It is clear that \texttt{LDR-M}\ will lead to better policies compared to \texttt{LDR-T}\ because both LDR variants use the same information (realizations of random vectors), but the latter has a single copy of the LDR variables for all nodes in the same stage. We note that, however, \texttt{LDR-M}\ and \texttt{LDR-TH}\ are incomparable in general, as shown in our empirical results.
\omegaubsubsection{2SLDR Models.}
We now present the resulting model after applying the LDR transformation \eqref{eq:ldr} to \eqref{mod:aggregated}. For ease of exposition, we only present the 2SLDR variant \texttt{LDR-TH}\ and the other two alternatives are similar. The overall idea is to replace the state variables $x_n$ with the LDR variables using linear function \eqref{eq:ldr}. The resulting model for the root node is as follows:
\begin{subequations}
\begin{align}
{Q}^L_{\textsf{r}} = \min\; &
\omegaum_{n\in {\mathcal N}}p_n \left( c_n^\top \za_{\phi_t(n)} + d_n^\top(\mu_{t(n)}^\top\xi^{t(n)}_n) \right) + h_\textsf{r}^\top y_{\rootnode} + \omegaum_{n \in {\mathcal N}noroot }p_{n} {Q}^L_n(\mu,z^A) \tag{$P^L$} \label{mod:ldr} \\
\text{s.t.}\; & H_{\textsf{r}} z_{\rootnode} \geq g_{\textsf{r}}, \;
J_{\textsf{r}} \mu_{1}^\top\xi^{1}_\textsf{r} \geq f_{\textsf{r}}, \;
C_{\textsf{r}} \mu_{1}^\top\xi^{1}_\textsf{r} + D_{\textsf{r}} z_{\rootnode} + E_{\textsf{r}} y_{\rootnode} \geq b_{\textsf{r}}, \label{eq:ldr-constr0} \\
& H_n \za_{\phi_t(n)} \geq G_n \za_{\phi_t(n)}parent + g_n, & \forall n \in {\mathcal N}noroot, \label{eq:ldr-constr1} \\
& J_n \mu_{t(n)}^\top\xi^{t(n)}_n \geq F_n \mu_{t(a(n)) }^\top\xi^{t(a(n))}_{a(n)}, & \forall n \in {\mathcal N}noroot, \label{eq:ldr-constr2} \\
& (y_{\rootnode}, \mu, z^A) \in \mathbb{R}^r \times \mathbb{R}^{k\cdot \omegaum_{t \in [\periods]}l_tt} \times \Z^{\ell \cdot \omegaum_{t \in [\periods]} q_t}. \nonumber
\end{align}
\end{subequations}
Model \eqref{mod:ldr} contains all the root node variables and constraints of \eqref{mod:aggregated} and includes all the LDR variables and its associated constraints \eqref{eq:ldr-constr2} from all nodes in the scenario tree. We note that there is no need to define LDR variables for the root node since the $x_{\rootnode}$ variables are already first-stage variables, but we include them in the model for ease of exposition. Lastly, the cost-to-go function for node $n \in {\mathcal N}noroot$ only depends on the first-stage variables $\mu$ and $z^A$ and is given by:
\begin{equation*}
{Q}^L_n(\mu, z^A) = \min_{y_n \in \mathbb{R}^r} \left\{ h_n^\top y_n \mid C_n \mu_{t(n)}^\top\xi^{t(n)}_n + D_n \za_{\phi_t(n)} + E_n y_n \geq A_n \mu_{t(n) -1 }^\top\xi^{t(n)-1}_{a(n) } + B_n \za_{\phi_t(n)}parent + b_n\right\}.
\end{equation*}
Note that \eqref{mod:ldr} is indeed a two-stage stochastic program. Applying \eqref{eq:ldr}
we remove the dependency between parent and child nodes, thus, all cost-to-go functions become functions of only the first-stage decisions. This two-stage stochastic program is easier to solve than its multi-stage
counterpart since it eliminates the nested dependency between the cost-to-go functions in different stages.
Formulation \eqref{mod:ldr} considers one cost-to-go function for each node in the scenario tree. However, the number of cost-to-go functions can be considerably smaller depending on the LDR variant and transformation ${\mathcal P}hi_t$. For example, \texttt{LDR-TH}\ requires one cost-to-go function for each node in the scenario tree because it considers the entire trajectory of realizations of random variables. Therefore, the cost-to-go functions for \texttt{LDR-TH}\ are unique for each node even if they share the same stage and MC state. On the contrary, \texttt{LDR-T}\ and \texttt{LDR-M}\ only employ local information, so the cost-to-go functions for two nodes $n, n'\in {\mathcal N}t$ are the same if $\omegaub(n) = \omegaub(n')$, that is, if they share the same stage $t$, MC state $m(n)=m(n')$, and aggregated variables $\phi_t(n)= \phi_t(n')$. Therefore, we have the same number of distinct cost-to-go functions as SDDP subproblems, which can be significantly smaller than the size of the scenario tree. In conclusion, \texttt{LDR-T}\ and \texttt{LDR-M}\ have a significant computational advantage over \texttt{LDR-TH}\ due to the fewer LDRs variables and cost-to-go functions employed (along with all the variables and constraints necessary to define them).
We solve the resulting two-stage stochastic programming model using Benders decomposition, a standard approach in the stochastic programming literature (see, e.g.,~\cite{zverovich2012computational}). Our specific implementation details can be found in Appendix \ref{app:2sldr_benders}.
\omegaection{Case Study: A Hurricane Disaster Relief Planning Problem} \label{sec:application}
In this section, we present a case study to exemplify the aggregated framework and evaluate the proposed solution methodologies. We consider a class of multi-period hurricane disaster relief logistics planning (HDR) problem, where a number of contingency modality options can be activated during the planning horizon to increase the capacities at the distribution centers (DCs).
The HDR problems are well-studied in the operations research literature because of their practical impacts on reducing economic loss and human suffering~\citep{graumann2006hurricane,seraphin2019natural}. The operational challenges in HDR problems arise from hurricanes' stochastic nature \citep{sabbaghtorkan2020prepositioning}. To address these challenges, most of the HDR literature focuses on two-stage stochastic programming models, where the first-stage model makes strategic facility location and resource pre-positioning decisions in terms of shelter preparation and resource allocation, while the second stage deals with disaster relief logistics decisions once the damage and demand information are revealed after the hurricane's landfall (see, e.g., \cite{duran2011pre,lodree2012pre,davis2013inventory,alem2016stochastic}, and a survey paper by \cite{sabbaghtorkan2020prepositioning}).
Recent works have also considered the multi-stage HDR variants where sequential logistics decisions are made in a rolling horizon (RH) fashion~\citep{pacheco2016forecast,siddig2022multi,yang2022optimizing}. For example, \cite{pacheco2016forecast} consider a relief supply preposition and reposition problem starting from the time when the hurricane is first detected, with updated forecast information on the hurricane's attributes every six hours. The authors proposed a forecast-driven dynamic model for the problem, and a solution approach combining scenario analysis and RH. Similarly, \cite{siddig2022multi} consider a multi-period preposition problem where the demand is realized at landfall (i.e., last period). The authors proposed an MSLP formulation and showed the value of multi-stage stochastic programming solutions compared to solutions given by an RH approach and a two-stage approximation.
Despite their wide range of problem settings, the HDR problems have several assumptions in common. For example, the evolution of the hurricane and intensity are commonly modeled using an MC \citep{taskin2010inventory,pacheco2016forecast} and the estimated demand at each shelter can be modeled using a deterministic mapping from the current hurricane state (i.e., hurricane location and intensity) \citep{siddig2022multi}. Similarly, the HDR problem that we study follows these assumptions as well. One unique feature of our HDR problem is that we consider a set of contingency modality options that can be activated during the planning horizon to increase the inventory capacities of the DCs. We introduce more details about the problem next.
\omegaubsection{Problem Description and Model Formulation}
Our multi-stage HDR problem is concerned with the logistics decisions of producing and distributing relief commodities to shelters prior to the landfall of a hurricane.
The relief commodities are used to fulfill the demand for civilians that are evacuated to shelters. As in the related problems, we consider that demand can be estimated according to the location and intensity of the hurricane, updated every six hours, which is the frequency of the hurricane forecast update by the National Hurricane Center~\citep{regnier2019hurricane}. We assume that shelters, DCs, and possible positions of the hurricane are located in a two-dimensional grid with a discrete set of $x$- and $y$-coordinates. The
goal is to produce and transport relief commodities from the DCs to the shelters at a minimum (expected) cost, which consists of the penalty cost for unsatisfied demand and operational costs incurred in all time periods.
In addition, we consider a set of contingency modalities that can be activated to increase the DCs' capacities. These modalities make the disaster relief logistics system more resilient to extreme hurricane events that may lead to unexpectedly high demand at the price of higher logistics costs. We make a practically relevant assumption that these modalities can be activated at most once during the planning horizon, and once activated, the modality will stay the same until the end of the planning horizon. An active modality represents an incremental capacity increase at every stage on the planning horizon. These considerations are consistent with the practical constraints associated with these large-scale critical logistics operations {\mathcal S}ong{(e.g., the phased activation of national guards for disaster relief efforts)}, which have this ``all or nothing'' feature and have to be implemented in small increments. Lastly, we consider a one-leg delay for these modalities (i.e., the capacity starts to increase from the stage immediately after the stage when a modality activation decision is made), which partially captures the logistical challenges of modality changes during disaster relief.
We assume that the hurricane's evolution can be characterized by an MC ${\mathcal M}$. Each MC state corresponds to two attributes of the hurricane: location and intensity, that is, $m=(m^x, m^y, m^i)\in {\mathcal M}$ where $m^x$ and $m^y$ are the $x$- and $y$-coordinates of the hurricane's location, respectively, and $m^i$ represents the hurricane's intensity level. We consider independent probability transition matrices for each hurricane attribute. For the hurricane intensity, We use the transition probability matrix described in~\cite{pacheco2016forecast} in our test instances. For the hurricane movement, we assume that the hurricane originates from the bottom row of the grid (i.e., $m^y = 0$ for the initial MC state) and that $m^y$ increases by one in each period (i.e., the hurricane advances upwards by one step in each period) and will reach land in exactly $T$ periods (see Appendix \ref{app:hdr} for additional details). {\mathcal S}ong{We note that our approach can be easily extended to the case when the number of stages until landfall is random, as long as the underlying stochastic process is modeled by an MC. }
Given the stochastic behavior of the hurricane, we consider an MSILP model with a stage defined for each time period when new information is realized (i.e., the hurricane moves to a new MC state). We construct a scenario tree ${\mathcal T}$ by considering an initial MC state and all possible MC transitions for up to $T$ stages (i.e., until landfall). We consider a set of contingency modalities ${\mathcal L}$, a set of shelters $\omegahelters$, and a set of DCs $\texttt{MA}thcal{J}$, where each DC $j\in \texttt{MA}thcal{J}$ has an initial capacity $C_j$ and inventory $I_j$. At each node $n\in {\mathcal N}$ of the scenario tree ${\mathcal T}$, one needs to decide the amount of relief commodities $v_{j}\geq 0$ to produce for each DC $j \in \texttt{MA}thcal{J}$, the amount of relief commodities $y_{ij}\geq 0$ to transport from each DC $j\in \texttt{MA}thcal{J}$ to each shelter $i \in \omegahelters$, and the amount of unsatisfied demand $w_i \geq 0$ at each shelter $i\in \omegahelters$. We introduce two continuous state variables $x_n = (x^{\texttt{I}}_{n}, x^{\texttt{C}}_{n})$ to represent the inventory and capacity for each DC $j\in \texttt{MA}thcal{J}$ at node $n\in {\mathcal N}$, respectively. Lastly, we introduce a binary state variable $z_{n\ell} \in \{0,1\}$ to represent whether or not a contingency modality $\ell \in {\mathcal L}$ is active at node $n \in {\mathcal N}$.
Each node $n\in {\mathcal N}$ is associated with realizations of random variables according to the corresponding MC state of the hurricane. Specifically, for a given node $n \in {\mathcal N}t$, $t \in [\periods]$, the demand of a shelter $i\in \omegahelters$, $d_{ni}$, the production cost at DC $j\in \texttt{MA}thcal{J}$, $q_{nj}$, and the transportation cost from DC $j$ to shelter $i$, $f_{nij}$, depend solely on the MC state $m_t(n)$ associated to node $n$. Thus, two nodes $n,n'\in {\mathcal N}t$ with the same MC state at stage $t$, $m_t(n)=m_t(n')$, share the same realizations of random variables. The remaining model parameters are assumed to be deterministic, which include the unit inventory cost $g_j$ at $j\in \texttt{MA}thcal{J}$, the contingency modality cost $c_\ell$ for $\ell\in {\mathcal L}$, the penalty cost for each unit of unsatisfied demand $b_i$ for $i\in \omegahelters$, and the capacity increase $K_{j\ell}$ at DC $j$ in each stage when modality $\ell$ is active. Further details on the problem instance generation can be found in Appendix \ref{app:hdr}. The MSILP model for the HDR problem is given by:
\begin{subequations}
\begin{align}
\min \quad & \omegaum_{j \in \texttt{MA}thcal{J}}\left( g_{j}x^{\texttt{I}}_{\textsf{r} j} + q_{\textsf{r} j} v_{j} +\omegaum_{i \in \omegahelters} f_{\textsf{r} ij} y_{ij} \right) + \omegaum_{i \in \omegahelters} b_{i}w_{i} + \omegaum_{\ell \in {\mathcal L}} z_{\textsf{r} \ell}c_{\ell} + \omegaum_{n \in {\mathcal C}(\textsf{r})} \lefteqn{\bar{p}_{\textsf{r} n} Q_n(x_{\rootnode}, z_{\rootnode}) } \tag{$\textit{HDR}$} \label{dr_root} \\
\text{s.t.} \quad & \omegaum_{j \in \texttt{MA}thcal{J}} y_{ij} + w_{i} \geq d_{\textsf{r} i}, & \forall i \in \omegahelters, \label{dr_root:demand} \\
& x^{\texttt{I}}_{\textsf{r} j} = I_{j} - \omegaum_{i \in \omegahelters} y_{ij} + v_{j}, & \forall j \in \texttt{MA}thcal{J}, \label{dr_root:inventory} \\
& v_{j} \leq x^{\texttt{C}}_{\textsf{r} j}, & \forall j \in \texttt{MA}thcal{J}, \label{dr_root:production}\\
& x^{\texttt{C}}_{\textsf{r} j} = C_j, & \forall j \in \texttt{MA}thcal{J}, \label{dr_root:capacity} \\
& \omegaum_{\ell \in {\mathcal L}} z_{\textsf{r} \ell} \leq 1, \label{dr_root:modality} \\
& x^{\texttt{I}}_{\textsf{r} j}, x^{\texttt{C}}_{\textsf{r} j}, v_{j}, w_{i}, y_{ij} \geq 0, \; z_{\rootnode}{}_\ell \in \{0,1\}, & \forall j \in \texttt{MA}thcal{J}, \; i \in \omegahelters, \; \ell \in {\mathcal L}. \nonumber
\end{align}
\end{subequations}
Constraints \eqref{dr_root:demand} and \eqref{dr_root:inventory} represent the demand and inventory constraints, respectively. Constraints \eqref{dr_root:production} enforce the production capacity and constraints \eqref{dr_root:capacity} represent the initial capacity of each DC. Constraint \eqref{dr_root:modality} enforces that at most one modality can be activated at the root node of the scenario tree. Lastly, the cost-to-go function for a node $n\in {\mathcal N}noroot$ is given by:
\begin{subequations}
\begin{align}
Q_n(x_nparent, z_nparent) & = \min \omegaum_{j \in \texttt{MA}thcal{J}}\left( g_{j}x^{\texttt{I}}_{n j} + q_{n j} v_{j} + \omegaum_{i \in \omegahelters}f_{nij} y_{ij} \right) + \omegaum_{i \in \omegahelters} b_{i}w_{i} + \lefteqn{\omegaum_{\ell \in {\mathcal L}} z_{n \ell}c_{\ell}
+\!\! \omegaum_{n' \in {\mathcal C}(n)}\!\!\bar{p}_{n n'} Q_{n'}(x_n, z_n) } \quad
\nonumber \\
\text{s.t.} \quad & \omegaum_{j \in \texttt{MA}thcal{J}} y_{ij} + w_{i} \geq d_{ni}, & \forall i \in \omegahelters, \label{dr_node:demand} \\
& x^{\texttt{I}}_{nj} = x^{\texttt{I}}_{a(n)j} - \omegaum_{i \in \omegahelters} y_{ij} + v_{j}, & \forall j \in \texttt{MA}thcal{J}, \label{dr_node:inventory} \\
& v_{j} \leq x^{\texttt{C}}_{nj}, & \forall j \in \texttt{MA}thcal{J},\label{dr_node:production} \\
& x^{\texttt{C}}_{nj} = x^{\texttt{C}}_{a(n)j} + \omegaum_{\ell \in {\mathcal L}} K_{j \ell}z_{a(n) \ell}, & \forall j \in \texttt{MA}thcal{J}, \label{dr_node:capacity}\\
& \omegaum_{\ell \in {\mathcal L}} z_{n\ell} \leq 1, & \label{dr_node:modality}\\
& z_{a(n)\ell} \leq z_{n\ell}, & \forall \ell \in {\mathcal L}, \label{dr_node:modality2}\\
& x^{\texttt{I}}_{n j}, x^{\texttt{C}}_{n j}, v_{j}, w_{i}, y_{ij} \geq 0, \; z_n{}_\ell \in \{0,1\}, & \forall j \in \texttt{MA}thcal{J}, i \in \omegahelters, \ell \in {\mathcal L}. \nonumber
\end{align}
\end{subequations}
The meanings of constraints \eqref{dr_node:demand}-\eqref{dr_node:production} and \eqref{dr_node:modality} are identical to their counterparts in~\eqref{dr_root}. Constraints \eqref{dr_node:capacity} represent the capacity increase at a DC depending on whether or not a contingency modality is active in the previous stage. Constraints \eqref{dr_node:modality2} enforce that an active contingency modality in the previous stage will remain active in the current stage.
\omegaubsection{Aggregation Framework}
{\mathcal M}argarita{
We consider the four MC-based transformations introduced in Table \ref{tab:transformations} of {\mathcal M}erve{Section \ref{sec:framework}}, i.e., \texttt{HN}, \texttt{MA}, \texttt{MM}, and \texttt{PM}, for the aggregated model. The mathematical formulation of the aggregated model~\eqref{dr_aggre} can be found in Appendix \ref{app:hdr_aggregated}, which is a straightforward application of our proposed framework.
{\mathcal M}erve{In the context of our HDR problem, aggregation is done for the contingency modality decisions.}
\texttt{HN}\ considers the same {\mathcal M}erve{decisions} for all nodes in a given stage; \texttt{MA}\ aggregates variables based on the
{\mathcal M}erve{hurricane} state information in a given stage, which corresponds to the location and intensity of the hurricane; and
\texttt{MM}\ incorporates the {\mathcal M}erve{hurricane}
states at the current node and its parent node. The
transformation \texttt{PM}\ can be customized
depending on how one defines the partial MC state information. In our experiments, we consider the full {\mathcal M}erve{hurricane} state information
(i.e., {\mathcal M}erve{both} location and intensity) at the current node and only the hurricane intensity information at its parent node, that is, the difference between \texttt{MM}\ and \texttt{PM}\ for the HDR problem is that the former incorporates the hurricane location information at the parent node into the aggregation, while the latter does not.
}
\omegaubsection{Two-stage LDR approximation}
As explained in Section \ref{sec:ldr}, we can apply a further restriction to the aggregated MSILP model \eqref{dr_aggre} to obtain a two-stage model by applying LDR to the continuous state variables (i.e., inventory variables $x^{\texttt{I}}_n$). We present three LDRs variants similar to the ones introduced in Table \ref{tab:ldr}.
First, we introduce notation $\xi_{t,n}=(d_n,q_n,f_n)$ to represent the vector of realizations of random variables for each node $n \in {\mathcal N}t$ in the scenario tree, including the demand $d_n$, production cost $q_n$, and transportation cost $f_n$. Since the demand is one of the most important parameters for the HDR problem, in our experiment we choose to use basis functions (for constructing LDRs) that are only based on either the demand realization at the current node $\texttt{MA}pldr(\xi_{n}^t) = d_n$ or the entire history of demand realizations up to the current node $\texttt{MA}pldr(\xi_{n}^t) = (d_\textsf{r},...,d_{a(n)}, d_n)$. Then, the LDR variants for the inventory variables for a given node $n\in {\mathcal N}t$ and DC $j \in \texttt{MA}thcal{J}$ are defined as:
\[
\texttt{LDR-TH}: \; x^{\texttt{I}}_{n j} = \!\!\omegaum_{n' \in {\mathcal P}(n)}\omegaum_{i \in \omegahelters}\mu_{t(n')tji}d_{n'i} \quad\;
\texttt{LDR-T}: \; x^{\texttt{I}}_{n j} = \omegaum_{i \in \omegahelters}\mu_{t(n)ji}d_{ni} \quad\;
\texttt{LDR-M}: \; x^{\texttt{I}}_{n j} = \omegaum_{i \in \omegahelters}\mu_{t m_t(n) ji}d_{ni}
\]
where $\mu_{t't} \in \mathbb{R}^{|\texttt{MA}thcal{J}|\times |\omegahelters|}$ for $t' \in [t]$, $\mu_{t} \in \mathbb{R}^{|\texttt{MA}thcal{J}|\times |\omegahelters|}$, and $\mu_{tm}\in \mathbb{R}^{|\texttt{MA}thcal{J}|\times|\omegahelters|}$ for $m \in {\mathcal M}_t$ are the LDR variables associated to each variant, respectively.
\texttt{LDR-TH}\ considers the full demand history, while \texttt{LDR-T}\ and \texttt{LDR-M}\ only consider the demand realization at the current node.
\texttt{LDR-M}\ has one set of variables for each {\mathcal M}erve{hurricane state,}
while \texttt{LDR-T}\ and \texttt{LDR-TH}\ employ one set of variables per stage. We discuss the implications of these differences in Section \ref{sec:ldr}, also in our numerical results in Section \ref{sec:experiments-approx}.
These LDR variants lead to three different two-stage approximations of \eqref{dr_aggre}, which are solved via Benders decomposition as discussed in Section \ref{sec:ldr} and Appendix \ref{app:2sldr_benders}. Note that while the MSILP problem has the relatively complete recourse property, these two-stage approximations do not necessarily have this property. For instance, LDR variables $\mu$ can take negative values, which may lead to an infeasible solution with negative inventory values. {\mathcal M}argarita{We could force $\mu\geq 0$ to void this issue, but this further limits the approximation and can lead to a worse solution, as observed during preliminary experimentation.}
Thus, we incorporate Benders feasibility cuts {\mathcal M}erve{in this regard.}
\omegaection{Numerical Experiment Results} \label{sec:experiments}
We now present the numerical results for the proposed aggregation framework
and solution approaches.
We test the models and approaches over various instances of the described HDR problem.
In what follows, we first provide details on the test instances and the experimental setup. We then evaluate the computational performances of the proposed methodologies, focusing on the approximation methods (i.e., 2SLDR solutions with SDDP-based lower bounds) which perform best in practice. Lastly, we analyze the obtained decision policies and provide managerial insights.
\omegaubsection{Experimental Setup}
We generate instances for our MSILP HDR problem for two grid sizes, $4\times5$ and $5\times6$, which correspond to small-size and large-size instances, respectively (see Appendix \ref{app:model_size} for more details on the sizes of these instances). We experiment with different levels of initial capacities for the DCs (i.e., $C_j$ for $j \in \texttt{MA}thcal{J}$) to analyze the impact of decision policies derived from the aggregation framework to systems with different capacities to address demand fluctuations.
Specifically, we consider instances where the initial capacities across all DCs are 20\%, 25\%, or 30\% of the maximum demand across all shelters (the maximum demand is a predefined parameter for instance generation, see Appendix \ref{app:hdr} for details). We also consider two types of contingency modality options: conservative (Type-1) and aggressive (Type-2). For Type-1 modality, DC capacity expansion options are 10\%, 20\%, 30\%, or 40\% per stage, while for Type-2 modality, the options are 15\%, 30\%, 45\%, or 60\% per stage. We randomly generate a number of instances for each instance configuration and eliminate the ones where the optimal decision policies under transformations \texttt{HN}\ and \texttt{FH}\ are identical to make a meaningful comparison between different transformations. Overall, we obtain ten random instances for each of the six different instance configurations on two grid sizes.
The code for the decomposition algorithms was implemented in \texttt{C++} using solver IBM ILOG CPLEX 20.1 with callback functions. All experiments were run using a single core/thread and a six-hour time limit. The experiments were run in the University of Toronto SciNet server Niagara, using cores with 16GB RAM each\footnote{See \underlinerl{https://docs.scinet.utoronto.ca/index.php/Niagara_Quickstart} for further server specifications.}. We will make our code and instances available upon publication.
\omegaubsection{Computational Performance of the Proposed Methodologies} \label{sec:experiments-approx}
We now present the empirical performance of the proposed methodologies to solve the aggregated MISLP models. We focus on the best performing procedures which could be more valuable for practitioners, that is, the 2SLDR variants described in Section \ref{sec:application} (i.e., \texttt{LDR-TH}, \texttt{LDR-T}, and \texttt{LDR-M}) and the SDDP-based methods to obtain a lower bound (\omegaddplb) and an upper bound (\omegaddpub). We briefly discuss the results of the exact methods at the end of this section, that is, the B\&C SDDP approach (\omegaddp) and the extensive model (\texttt{Ex}), and refer to Appendix \ref{app:experiments-exact} for further details. For brevity, we only show results for the \texttt{PM}\ transformation since it achieves the best trade-off between policy quality and computational effort. See Appendix \ref{app:approx-additional-resutls} for the results of the other transformations.
We first present results over small-size instances in Table \ref{tab:2sldr-pm-smallsize}. Columns ``Average Time (sec)'' report each approach's
time to obtain their respective optimal solution (all techniques obtained optimal solutions within the time limit), averaged over instances with the same configuration. Columns ``Relative Difference (\%)'' compare the solution found by each approach and the (true) optimal solution obtained from \texttt{Ex}, that is, $(|Obj_{\texttt{Ex}} - Obj_{i}|)/Obj_{\texttt{Ex}}$ for all $i \in \{\texttt{LDR-TH},\texttt{LDR-T},\texttt{LDR-M},\omegaddpub,\omegaddplb\}$. We observe that \texttt{LDR-T}\ and \texttt{LDR-M}\ take the least
time on average. Moreover, they yield the smallest relative difference (
$<$ 0.3\% on average in all configurations), indicating the high quality of their corresponding solutions. In particular, the {\mathcal M}erve{newly} proposed LDR variant, \texttt{LDR-M}, provides the best approximation and takes the least time in most configurations. This shows the value of developing decision policies that leverage the structure of the underlying stochastic process.
\begin{table}[tb]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{PM}$ over small-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrr|r|r|rrrr|r}
\toprule
& & \multicolumn{5}{c|}{Average Time (sec)} & \multicolumn{5}{c}{Relative Difference (\%)} \\
\midrule
Modality & \multicolumn{1}{l|}{Cap.} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c|}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddplb} & \multicolumn{1}{c|}{\texttt{Ex}} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddpub} & \multicolumn{1}{c}{\omegaddplb} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 813.0 & 164.4 & \textbf{157.9} & 549.1 & 961.3 & 0.12 & 0.26 & 0.11 & \textbf{0.01} & 0.36 \\
& \multicolumn{1}{l|}{25\%} & 380.1 & 84.3 & \textbf{77.7} & 347.0 & 487.2 & 0.08 & 0.11 & \textbf{0.03} & 0.26 & 0.71 \\
& \multicolumn{1}{l|}{30\%} & 408.2 & \textbf{79.0} & 85.9 & 400.7 & 222.0 & 0.12 & 0.25 & \textbf{0.00} & 0.55 & 1.24 \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 1406.0 & \textbf{247.2} & 265.2 & 940.5 & 2047.3 & 0.13 & 0.26 & 0.13 & \textbf{0.00} & 0.28 \\
& \multicolumn{1}{l|}{25\%} & 670.5 & 108.2 & \textbf{98.1} & 534.4 & 651.0 & 0.09 & 0.13 & \textbf{0.04} & 0.23 & 0.49 \\
& \multicolumn{1}{l|}{30\%} & 532.6 & 90.0 & \textbf{89.5} & 434.8 & 270.6 & 0.12 & 0.26 & \textbf{0.01} & 0.98 & 1.29 \\
\midrule
\multicolumn{2}{c|}{Average} & 701.7 & \textbf{128.9} & 129.1 & 534.4 & 773.2 & 0.11 & 0.21 & \textbf{0.05} & 0.34 & 0.73 \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-pm-smallsize}
\end{table}
Table \ref{tab:2sldr-pm-smallsize} also shows the performance of the SDDP lower (\omegaddplb) and upper (\omegaddpub) bound techniques. Both provide high-quality bounds with small relative differences (i.e., less than 1.5\%). Also, \omegaddplb\ is computationally more efficient compared to \texttt{Ex}\ in most cases. Nonetheless,
\omegaddpub\ (evaluating the incumbent solution from \omegaddplb) can {\mathcal M}erve{require} significant computational effort due to the additional SDDP calls:
around 1-2 hours for small-size instances and up to $12$ hours for large ones.
\begin{table}[tbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{PM}$ over large-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrrl|rrr}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{3}{c}{Opt. Gap (\%)} \\
\midrule
Modality & Cap. & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddplb} & (opt) & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddpub} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 5,353 & \textbf{ 3,732} & 13,564 & (9) & 0.62 & 0.39 & \textbf{0.24} \\
& \multicolumn{1}{l|}{25\%} & \textbf{ 4,235} & 4,716 & 13,557 & (6) & 2.56 & 2.06 & \textbf{1.91} \\
& \multicolumn{1}{l|}{30\%} & \textbf{ 1,555} & 1,675 & 2,067 & (10) & 0.80 & 0.63 & \textbf{0.27} \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 7,489 & \textbf{ 4,855} & 18,323 & (5) & 9.00 & \textbf{7.33} & 9.01 \\
& \multicolumn{1}{l|}{25\%} & 6,664 & \textbf{ 5,560} & 14,790 & (4) & 4.98 & 4.38 & \textbf{4.28} \\
& \multicolumn{1}{l|}{30\%} & 1,717 & \textbf{ 1,640} & 6,971 & (10) & 0.81 & 0.63 & \textbf{0.30} \\
\midrule
\multicolumn{2}{c|}{Av. (Total)} & 4,502 & \textbf{ 3,696} & 11,545 & (44) & 3.13 & \textbf{2.57} & 2.67 \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-pm-largesize}
\end{table}
Table \ref{tab:2sldr-pm-largesize} presents results obtained by these approximation methods over large-size instances. The LDR variant \texttt{LDR-TH}\ is not included here as the corresponding formulation cannot be loaded into CPLEX due to the large number of first-stage LDR variables and constraints. Also, due to the lack of exact optimal solutions for large-size instances, the optimality gaps are conservative estimations computed using the best lower bound found by \omegaddplb, that is, we replace $Obj_\texttt{Ex}$ with $Obj_\omegaddplb$.
As in the small-size instances, the 2SLDR variants find high-quality solutions with relatively low computational effort. In fact, \texttt{LDR-T}\ and \texttt{LDR-M}\ find their respective optimal solutions within the time limit for all the instances, while \omegaddplb\
proves optimality in 44 of the 60 tested instances (as seen in the ``(opt)" column). Nonetheless, \omegaddplb\ finds high-quality lower bounds and \omegaddpub\ yields marginally higher-quality solutions in most cases.
The results suggest that 2SLDR, specifically \texttt{LDR-M}, is the best
for finding high-quality solutions with limited computational effort, while the SDDP-based methods can yield high-quality lower and upper bounds given sufficient
time.
\begin{remark}
Appendix \ref{app:experiments-exact} analyzes the results of using exact methodologies (i.e., \texttt{Ex}\ and \omegaddp) to solve the aggregated models. To summarize, \texttt{Ex}\ is faster and solves more instances than \omegaddp\ in small-size instances. However, \texttt{Ex}\ cannot be solved for large-size problems due to memory requirements (the models use $\omegaim$16GB RAM after the pre-solve phase of CPLEX), while \omegaddp\ can solve several instances to optimality and find feasible solutions with small optimality gaps. The performance of \omegaddp\ is mostly explained by the large branching factor of the MC process and, consequently, the large number of SDDP subproblems (up to 593 for large-size instances). We believe that \omegaddp\ may be competitive in applications where the MC has fewer reachable states per stage (e.g., ten or fewer).
\end{remark}
\omegaubsection{Decision Policies and Managerial Insights}
\label{sec:experiments-policy}
We now analyze the quality and structure of the decision policies (in activating contingency modalities) associated with transformations \texttt{HN}, \texttt{MA}, \texttt{PM}\ and \texttt{MM}, and the original non-aggregated model (\texttt{FH}). The analysis considers small-size instances (i.e., $4\times5$ grids) since its purpose is to provide insights from the provably optimal solutions, and the non-aggregated model \texttt{FH}\ can only be solved by \texttt{Ex}, which cannot handle the large-size instances due to computational limitations.
Table \ref{tab:policy-quality} summarizes the performances of the considered models in terms of the resulting objective values. Columns ``\% Gap closed'' present the percentage of the gap between \texttt{HN}\ and \texttt{FH}\ that is closed by each transformation averaged over all the instances, where the gap closed for each instance is calculated by $(Obj_{\texttt{HN}} - Obj_{i})/(Obj_{\texttt{HN}}- Obj_{\texttt{FH}}), \forall i \in \{\texttt{MA}, \texttt{PM}, \texttt{MM}\}$. Thus, $0\%$ corresponds to the same objective value given by \texttt{HN}\, and 100\% corresponds to the objective value given by \texttt{FH}.
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Policy quality comparison for all transformations in our aggregation framework.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cl|rrrrr|ccc}
\toprule
& & \multicolumn{5}{c|}{\textbf{Average Objective Value}} & \multicolumn{3}{c}{\textbf{\% Gap closed}
} \\
\midrule
Modality & Cap. & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \texttt{MA} & \texttt{PM} & \texttt{MM} \\
\midrule
\multirow{3}[1]{*}{Type-1} & 20\% & 104,162 & 102,572 & 92,924 & 92,924 & 82,193 & 7.5 & 51.9 & 51.9 \\
& 25\% & 73,879 & 73,442 & 66,650 & 66,238 & 62,972 & 3.2 & 69.4 & 73.1 \\
& 30\% & 48,970 & 48,951 & 47,425 & 47,314 & 47,117 & 0.3 & 89.5 & 96.7 \\
\midrule
\multirow{3}[1]{*}{Type-2} & 20\% & 104,135 & 102,625 & 92,612 & 92,612 & 81,182 & 6.7 & 50.9 & 50.9 \\
& 25\% & 73,919 & 73,487 & 66,573 & 65,958 & 63,654 & 3.3 & 75.6 & 81.2 \\
& 30\% & 48,970 & 48,968 & 47,540 & 47,349 & 47,253 & 0.0 & 80.3 & 97.0 \\
\bottomrule
\end{tabular}
}
\label{tab:policy-quality}
\end{table}
We first observe that the difference between the objective values given by \texttt{HN}\ and \texttt{FH}\ is smaller when the initial DC capacities are higher---about $4\%$ when the initial capacity is 30\%, and about $28\%$ when the initial capacity is $20\%$. This suggests that even the most restrictive transformation \texttt{HN}\ can lead to high-quality policies when the DCs are capable of addressing random fluctuating demand without much adaptability. Conversely, less restrictive transformations, which yield more adaptive contingency modality activation plans, become valuable when the initial capacity is small.
Table \ref{tab:policy-quality} also shows that the solution quality of \texttt{MA}\ and \texttt{HN}\ are similar (with $< 10\%$ difference) in all instances, indicating that information in the current stage is insufficient to obtain high-quality policies for our test instances.
This effect is
mitigated by in \texttt{PM}\ and \texttt{MM}\, since
both can lead to significant performance improvements---closing more than 50\% of the gap between \texttt{FH}\ and \texttt{HN}\ in all cases. In particular, the objective values of \texttt{PM}\ and \texttt{MM}\ are very close to those obtained by \texttt{FH}\ when the initial capacity is 30\%, but this is not the case when the initial capacity is 20\%. \texttt{PM}\ and \texttt{MM}\ perform similarly, suggesting that the hurricane intensity state captures most of the valuable information
from the previous stage. In Section \ref{model-implication} we further explore the reasons behind these.
We now take a closer look at the underlying contingency modality activation policies associated with different transformations. Specifically, we characterize the contingency modality activation by defining some key metrics and report them in Table \ref{tab:solution-structure-type1} for Type-1 instances (see Appendix \ref{app:policy-managerial} for similar results on Type-2 instances). Columns ``Nodes (\%)'' corresponds to the average percentage of nodes in the scenario tree with an active contingency modality, and columns ``\# of Contingencies'' provide the average number of contingency modalities used by a policy among all instances. Columns ``Aggressiveness (\%)'' refer to the percentage of capacity increase (between 10\% and 40\%) resulting from the contingency modality activation, and ``Intensity'' corresponds to the hurricane's intensity when a contingency modality is first activated. We note that the average values in the second and third columns are computed only over nodes with active modalities. For example,
\texttt{PM}\ only actives one modality for the instance with 20\% initial capacity, which is an aggressive modality (i.e., 30\%) activated when the hurricane reaches intensity three or higher.
\begin{table}[tbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution structures for different policies and initial capacities for Type-1 instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{l|rrrrr|rrrrr|rrrrr|rrrrr}
\toprule
& \multicolumn{5}{c|}{Nodes (\%) } & \multicolumn{5}{c|}{\# of Contingencies } & \multicolumn{5}{c|}{Aggressiveness (\%) } & \multicolumn{5}{c}{Intensity} \\
\midrule
Cap. & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c}{\texttt{FH}} \\
\midrule
20\% & 100 & 93 & 72 & 72 & 53 & 1.0 & 1.0 & 1.0 & 1.0 & 3.5 & 16 & 38 & 30 & 30 & 35 & 2.3 & 2.5 & 2.9 & 2.9 & 2.9 \\
25\% & 10 & 50 & 22 & 22 & 47 & 1.0 & 1.0 & 1.5 & 2.1 & 4.4 & 10 & 15 & 35 & 36 & 21 & 2.3 & 2.9 & 4.0 & 4.0 & 3.0 \\
30\% & 0 & 7 & 16 & 15 & 19 & - & 1.0 & 2.0 & 4.3 & 4.6 & - & 9 & 17 & 18 & 15 & - & 3.0 & 4.1 & 4.1 & 4.0 \\
\bottomrule
\end{tabular}
}
\label{tab:solution-structure-type1}
\end{table}
From Table \ref{tab:solution-structure-type1}, we see that
a lower initial capacity typically leads to more contingency modality activation and modality choices that are more aggressive. Comparing \texttt{HN}\ with other transformations, we see that it has more active nodes when the initial capacity is lower, and has active fewer nodes when the initial capacity is higher. This is attributed to the structure of the policy under \texttt{HN}\, which resembles an ``all-or-nothing'' feature due to the lack of adaptability. The behaviors of \texttt{HN}\ and \texttt{MA}\ are quite similar, although the activation timing of \texttt{MA}\ is later, resulting in a different number of nodes with an active modality. We attribute this similarity to the ``propagation effect'' in MC-based aggregation policies, which we discussed in Section~\ref{model-implication}. In contrast, \texttt{PM}\ and \texttt{MM}\ result in more adaptive policies that resemble a ``wait-and-activate'' type of behavior, similar to \texttt{FH}. In these more adaptive policies, contingency modalities tend to be activated when the hurricane reaches a higher intensity state. In addition, the more adaptive policies lead to a more diverse set of modality choices and a better balance between aggressiveness and the number of nodes actives depending on the initial capacity.
Comparing the MC-based transformations \texttt{PM}\ and \texttt{MM}\ with \texttt{FH}, which has full adaptability, we see the value of additional adaptability in activating less nodes throughout the scenario tree with more diverse contingency modalities with appropriate aggressiveness.
\omegaubsection{Implications of the MC-based Aggregation in HDR}\label{model-implication}
We now analyze impacts of the MC-based aggregation on the resulting HDR solution policies.
In particular, we study why the number of possible contingency modalities is low for some of the transformations (e.g., at most one for \texttt{MA}) in our test instances. First, recall that binary variables $z_{n\ell}$ represent whether contingency modality $\ell \in {\mathcal L}$ is active in node $n \in {\mathcal T}$. In addition, the model imposes that only one modality is active in each state \eqref{dr_node:modality} and every active modality remains active in the following stages \eqref{dr_node:modality2}. Thus, these constraints restrict the number of different modalities that can be active in any solution (including \texttt{FH}), {\mathcal M}erve{especially }
when an aggregation is applied.
To understand the implications of an MC-based aggregation over nodes in the scenario tree with active modalities, Figure \ref{fig:solution_restricction} illustrates solutions of different transformations in an abstract scenario tree with two states
(from Example) \ref{example:scenario_tree}
Specifically, \omegaquareRed{1} are nodes where a modality is \textit{initially activated} (as opposed to passively activated
due to propagation, as explained below), \pentaGreen{1} are nodes that have active modalities because of prior activation at their parent nodes, and \hexaOrange{1} are nodes where the modality is \textit{passively activated} (i.e., the activation is a consequence of the propagation caused by the transformation that forces certain nodes to have the same $z$ values). For example, we can see that the \texttt{MA}\ policy
initially activates a modality in $t=2$ at the dark node. However, this causes the two \hexaOrange{1} nodes to be passively activated because nodes with the same MC state must have the same $z$ values according to transformation \texttt{MA}. This propagation, as a result of the MC-based aggregation, may cause an initial activation to have ``unintended consequences'' of passively activating nodes that may not necessarily benefit from the modality activation. This explains the similarity between \texttt{HN}\ and \texttt{MA}\ that we observe in Section~\ref{sec:experiments-policy}. We can also observe such propagation behaviors with \texttt{PM}\ and \texttt{MM}\, but they occur less often since the aggregation depends on the MC state of the current stage and the previous one. In contrast, the activation when using \texttt{FH}\ does not have any such propagation effect since no aggregation is imposed (i.e., no \hexaOrange{1} can be observed from \texttt{FH}).
\begin{figure}
\caption{Activation solutions over an abstract scenario tree for different MC-based transformations.}
\label{fig:solution_restricction}
\end{figure}
Lastly, Figure \ref{fig:heatmap} illustrates the resulting policies for a single instance in a series of images corresponding to the grid of the problem where the hurricane originates at $y=0$ (i.e., bottom row) and lands at $y=5$ (i.e., top row). The top grids show the percentage of nodes in the scenario tree with an active contingency modality for each cell. The bottom grids are heat-maps showing the proportion of nodes in a cell where a contingency modality is the first activated (either initially activated or passively activated), and the color scheme corresponds to the hurricane intensity level (i.e., darker colors for higher intensity). Each row is associated with a symbol \omegaquareRed{1} or \hexaOrange{1}, which distinguishes if the nodes are initially or passively activated, respectively. We observe that \texttt{MA}, \texttt{PM}, and \texttt{MM}\ lead to a wait-and-activate type behavior by activating modalities in high-intensity nodes at later stages to reduce the number of active modalities in the last stage, a similar strategy to \texttt{FH}. In contrast, \texttt{HN}\ activates a modality in the first stage even though it may not be necessary for many nodes in later stages. We also observe that the intensity levels of \hexaOrange{1} nodes tend to be much smaller than the ones associated with \omegaquareRed{1} nodes. This reflects the ``unintended consequence'' of forcing nodes of low intensity to be passively activated due to the restrictions imposed by the transformations.
\begin{figure}
\caption{Graphical representation of the solutions of an instance of Type-1 with 20\% initial capacity.}
\label{fig:heatmap}
\end{figure}
\omegaection{Conclusions} \label{sec:conclusions}
We present an aggregation framework to address MSILP problems with mixed-integer state variables and continuous local variables. The main idea of the framework is to reformulate the original MSILP such that all integer state variables are first-stage and then apply a suitable transformation to aggregate these variables to reduce the problem size while not sacrificing the solution quality. We describe a number of transformations that rely on the underlying structure of the stochastic process, which we assume is given by an MC. We demonstrate an exact solution approach based on a B\&C framework integrated with the SDDP algorithm to solve the resulting aggregated MSILP. Also, we propose a computationally more tractable alternative based on the 2SLDR approximations and introduce new MC-based decision rules. These approaches are tested on a hurricane disaster relief logistics application with contingency modality decisions. Empirical results show the trade-off between different transformations regarding the solution quality and computational effort, and the best trade-off is yielded by transformations that consider information from current and previous MC states (i.e., \texttt{PM}). The SDDP lower and upper bounding procedures have the best performance in terms of solution quality for large-scale instances, while the MC-based 2SLDR approach returns high-quality solutions with significantly less computational effort.
As shown by our empirical results, the performance of our aggregation framework largely depends on the chosen transformation. With this in mind, one future research direction is to investigate how to choose an appropriate transformation for a given problem instance. {\mathcal S}ong{An alternative research direction is to develop a dynamic extension of the current approach to iteratively refine the aggregation over time, e.g., in a rolling horizon procedure.}
{
}
\begin{APPENDICES}
\omegaection{B\&C Integrated with the SDDP algorithm}
\label{app:bc_sddp}
Algorithm \ref{alg:bc-sddp} describes the B\&C procedure. The procedure receives as input a few parameters used by an SDDP sub-routine to be explained in detail later. Note that lines 4-7 correspond to the standard B\&B algorithm of commercial solvers.
\begin{algorithm}[H]
\omegamall
\linespread{1.2}\omegaelectfont
\caption{A B\&C framework integrated with the SDDP algorithm} \label{alg:bc-sddp}
\begin{algorithmic}[1]
{\mathbb{P}}ocedure{\texttt{SDDP-B\&C}}{$\texttt{exact}, K, \epsilon$}
{\mathcal S}tate Initialize B\&B search for $\overlineline{Q}^A_\textsf{r}$ (master problem) and the \texttt{SDDP}\ environment.
{\mathcal S}tate Initialize $\mathcal{B}_n :=\emptyset$ for all $n \in {\mathcal C}(\textsf{r})$ and $\texttt{cutAdded} := \textbf{false}$.
\mathbb{R}epeat
{\mathcal S}tate Choose a B\&B node from its list of open nodes. Solve the associated node relaxation problem.
\mathbb{I}f {Current B\&B node is infeasible or the relaxation bound is worse than the bound given by an incumbent solution}
{\mathcal S}tate Prune this B\&B node.
{\mathbb E}ndIf
\mathbb{I}f {Values $z^Asol$ of the current solution $(x_{\rootnode}sol, z^Asol, \widehat{\theta})$ are integer}
\mathbb{R}epeat {
\color{gray} \%\% Cutting plane subroutine}
{\mathcal S}tate $\texttt{cutAdded} := \textbf{false}$.
{\mathcal F}or {$n' \in {\mathcal C}(\textsf{r})$} {
\color{gray} \%\% Find cuts parameters for each child subproblem}
{\mathcal S}tate Get cut coefficients $(\tilde{a}lpha_{n'},\beta_{n'},\gamma_{n'})$ := \texttt{SDDP}($(x_{\rootnode}sol, z^Asol, \widehat{\theta}), n',\epsilon,K,\texttt{exact}$).
\mathbb{I}f{Cut with coefficients $(\tilde{a}lpha_{n'},\beta_{n'},\gamma_{n'})$ is violated by $(x_{\rootnode}sol, z^Asol, \widehat{\theta})$}
{\mathcal S}tate $\mathcal{B}_{n'}:= \mathcal{B}_{n'}\cup(\tilde{a}lpha_{n'},\beta_{n'},\gamma_{n'})$, $\texttt{cutAdded}:= \textbf{true}$.
{\mathbb E}ndIf
{\mathbb E}ndFor
{\mathcal U}ntil{$\texttt{cutAdded} = \textbf{false}$.}
{\mathbb E}lse
{\mathcal S}tate Branch on the current B\&B node and update the list of open nodes.
{\mathbb E}ndIf
{\mathcal U}ntil{The list of open B\&B nodes is empty.}
{\mathbb E}ndProcedure
\end{algorithmic}
\end{algorithm}
There are a few important points to mention about this procedure. First, we call the SDDP sub-routine for each child of the root node $\textsf{r}$, in which we perform the SDDP forward pass only considering sample paths that contain that specific child. Also, we look for cuts for all children nodes of the root node $\textsf{r}$ before re-solving the current B\&B node relaxation. An alternative would be to re-solve the B\&B node relaxation whenever we find a violated cut, but this option showed worse performance in our preliminary experiments. Lastly, all cuts added to enhance the outer approximation of the cost-to-go functions associated with the SDDP subproblems in the previous iterations are automatically carried over to the current iteration.
\omegaection{Benders Decomposition for the 2SLDR Model} \label{app:2sldr_benders}
In this section we present a Benders decomposition approach to solve \eqref{mod:ldr}. We consider a Benders master problem that handles the first-stage decisions and decompose the second-stage problem into one Benders subproblem per node. Because the number of subproblems can be exponentially many (e.g., when \texttt{LDR-TH}\ is used), we consider an aggregated approach where we have one variable $\theta_{t,m}^L$ that approximates all the cost-to-go functions associated with nodes in stage $t$ and with MC state $m \in {\mathcal M}_t$. This variant can be seen as a hybrid between the single-cut version (i.e., using a single $\theta$ variable) and the multi-cut version (i.e., using one $\theta_{t,n}^L$ variable for each subproblem) of Benders decomposition \citep{van1969shaped,birge1988multicut}. Our preliminary experiments show that our Benders variant performs the best on our test instances, however, other variants of Benders decomposition and computational enhancements can also be considered (see, e.g., \cite{zverovich2012computational} and \cite{bodur2017strengthened}).
In what follows we present the details of our Benders decomposition for \texttt{LDR-TH}\ (the other two cases, \texttt{LDR-T}\ and \texttt{LDR-M}\, are similar). The Benders master problem is given by
\begin{align}
{Q}^Lsub_{\textsf{r}} = \min\; &
\omegaum_{n\in {\mathcal N}}p_n \left( c_n^\top \za_{\phi_t(n)} + d_n^\top(\mu_{t(n)}^\top\xi^{t(n)}_n) \right) + h_\textsf{r}^\top y_{\rootnode} + \omegaum_{t = 2}^T\omegaum_{m \in {\mathcal M}_t}\theta_{t,m}^L \nonumber\\
\text{s.t.}\; & \eqref{eq:ldr-constr0}-\eqref{eq:ldr-constr2}, \nonumber \\
& \theta^L_{t,m} \geq \tilde{a}lpha^L_{t,m}{}^\top \mu + \beta^L_{t,m}{}^\top z^A + \gamma^L_{t,m}, \quad \forall (\tilde{a}lpha^L_{t,m},\beta^L_{t,m},\gamma^L_{t,m})\in \mathcal{B}ldr_{t,m}, \ m \in {\mathcal M}_t, \ t \in \{2,...,T\}, \label{eq:ldr_benderscut}\\
& (y_{\rootnode}, \mu, z^A) \in \mathbb{R}^r \times \mathbb{R}^{k\times \omegaum_{t\in [\periods]}l_tt} \times \Z^{\ell \cdot \omegaum_{t \in [\periods]} q_t}, \nonumber
\end{align}
where $\theta^L \in \mathbb{R}^{\omegaum_{t=2}^{T}|{\mathcal M}_t|}$ is introduced to represent the cost-to-go function approximation for each Markovian state in all remaining stages (except for the first stage) via constraints~\eqref{eq:ldr_benderscut}. Here $\mathcal{B}ldr_{t,m}$ stores the coefficients associated with the Benders cuts for each MC state $m \in {\mathcal M}_t$.
\begin{algorithm}[htb]
\omegamall
\linespread{1.2}\omegaelectfont
\caption{Benders Decomposition} \label{alg:benders}
\begin{algorithmic}[1]
{\mathbb{P}}ocedure{\texttt{Benders}}{$\epsilon$}
{\mathcal S}tate Initialize B\&B search for ${Q}^Lsub_\textsf{r}$ (master problem)
\mathbb{R}epeat
{\mathcal S}tate Choose a node B\&B from its node list. Solve the associated node relaxation problem.
\mathbb{I}f {Current B\&B node is infeasible or the relaxation bound is worse than the bound given by an incumbent solution}
{\mathcal S}tate Prune this B\&B node.
{\mathbb E}ndIf
\mathbb{I}f {Current solution $(\musol, z^Asol, \widehat{\theta}^L)$ is integer}
\mathbb{R}epeat {
\color{gray} \%\% Cutting plane subroutine}
{\mathcal F}or {$m \in {\mathcal M}_t$ and $t \in \{2,...,T\}$}
{\mathcal F}or {$n \in {\mathcal N}t(m)$}
{\mathcal S}tate Solve ${Q}^Lsol_n:={Q}^L_n(\musol, z^Asol)$ and save dual solutions ${\pi^{n}}$.
{\mathbb E}ndFor
{\mathcal S}tate Save value ${Q}^Lsol_{t,m}:=\omegaum_{n \in {\mathcal N}t(m)} p_n{Q}^Lsol_n$.
\mathbb{I}f{$|{Q}^Lsol_{t,m} - \widehat{\theta}^L_{t,m}| \geq \epsilon |{Q}^Lsol_{t,m}|$} {
\color{gray} \%\% Add cut if needed}
{\mathcal S}tate Add inequality \eqref{eq:ldr_benderscut} and exit the for loop.
{\mathbb E}ndIf
{\mathbb E}ndFor
{\mathcal U}ntil{No new inequality is added to the master problem ${Q}^Lsub_\textsf{r}$.}
{\mathbb E}lse
{\mathcal S}tate Branch on the current B\&B node and update list of nodes.
{\mathbb E}ndIf
{\mathcal U}ntil{No more nodes in the B\&B node list.}
{\mathbb E}ndProcedure
\end{algorithmic}
\end{algorithm}
Algorithm \ref{alg:benders} describes our Benders decomposition for \eqref{mod:ldr}. The procedure is a B\&C algorithm similar to the one described in Algorithm \ref{alg:bc-sddp}. Once an integer solution is found during B\&B search, we enter the cutting plane subroutine. The algorithm iterates over each stage and MC state and its corresponding nodes, where ${\mathcal N}t(m)$ is the set of all nodes in stage $t$ associated with MC state $m \in {\mathcal M}_t$. It then evaluates the Benders subproblems for all the nodes associated with a particular $m$ and $t$ and checks if the current approximation $\widehat{\theta}^L_{t,m}$ is close enough to the actual value ${Q}^Lsol_{t,m}$ for a given tolerance $\epsilon$. If the approximation is not good enough, then the algorithm returns a cut and stops evaluating other subproblems. Similar to our SDDP variant, we add cuts to each candidate integer solution until no more cuts are found. The complete procedure ends when there are no more nodes to explore in the B\&B search tree, i.e., we have found an optimal solution or proven infeasibility.
The cuts added to the master problem \eqref{eq:ldr_benderscut} are Benders cuts over all the nodes associated to a given stage and MC state. Specifically, the cut coefficients for $t \in \{2,...,T\}$ and $m \in {\mathcal M}_t$ are
\begin{align*}
\tilde{a}lpha_m^L & = \omegaum_{n \in {\mathcal N}t(m)}p_n {\pi^{n}}^\top A_{n}, \qquad \beta_m = \omegaum_{n \in {\mathcal N}t(m)} p_n {\pi^{n}}^\top B_{n}, \\
\gamma_m &= \omegaum_{n \in {\mathcal N}t(m)} p_n \left( {Q}^Lsol_n - ({\pi^{n}}^\top A_{n} )^\top \widehat{\mu}_{t(n) -1}- ({\pi^{n}}^\top B_{n})^\top z^Asol_{\phi_t(a(n))} \right),
\end{align*}
where ${\pi^{n}}$ are the dual solutions associated with the cost-to-go function ${Q}^L(n)$. Note that each coefficient here corresponds to a weighted sum of the associated coefficients in the subproblems. Also, the algorithm generates Benders feasibility cuts when necessary, which is common in 2SLDR approximations \citep{bodur2018two}.
\omegaection{Aggregated Model for HDR Applicaton} \label{app:hdr_aggregated}
We now present how to transform the MSILP model \eqref{dr_root} with integer state variables at each node of the scenario tree to an MSILP model with integer variables only at the root node, following the ideas presented in Section~\ref{sec:framework} and Section~\ref{sec:methodology}. The general idea is to consider all integer state variables as first-stage variables and apply a suitable transformation $\texttt{MA}ptmarkov$ to aggregate the number of variables to a manageable size. In what follows, we present the resulting aggregated model \eqref{dr_aggre} for a generic transformation and then show four alternatives given the specific structure of \eqref{dr_root}.
Following the notation introduced in Section \ref{sec:framework}, we consider $z^A$ to be the set of aggregated integer state variables for each node. There are two aspects to consider when applying this transformation. First, constraints \eqref{dr_node:modality} and \eqref{dr_node:modality2} are now first-stage constraints. Second, we can omit state variables $x^{\texttt{C}}_n$ because these variables are only used to model the capacity dependency from one stage to the next, which is fully dictated by the first-stage variables $z^A$. The resulting aggregated model is:
\begin{subequations}\label{aggregated-HDR}
\begin{align}
\min \ & \omegaum_{j \in \texttt{MA}thcal{J}}\left( g_{j}x^{\texttt{I}}_{\textsf{r} j} + q_{\textsf{r} j} v_{j} + \omegaum_{i \in \omegahelters}f_{\textsf{r} i j} y_{ij} \right) + \omegaum_{i \in \omegahelters} b_{i}w_{i} + \omegaum_{n \in {\mathcal N}}\omegaum_{\ell \in {\mathcal L}} \lefteqn{
z^A_{\phi_t(n) \ell} c_{\ell} + \omegaum_{n \in {\mathcal C}(\textsf{r})}\bar{p}_{\textsf{r} n} Q^A_n(x_{\rootnode}^I, z^A) } \tag{$\textit{HDR}^A$} \label{dr_aggre} \\
\text{s.t.} \ & \eqref{dr_root:demand}-\eqref{dr_root:inventory} \nonumber \\
& v_{j} \leq C_j, & \forall j \in \texttt{MA}thcal{J}, \label{dr_aggre:capacity}\\
& \omegaum_{\ell \in {\mathcal L}} z^A_{\phi_t(n) \ell} \leq 1, & \forall n\in {\mathcal N}, \label{dr_aggre:modality1} \\
& z^A_{\phi_{t-1}(a(n)) \ell} \leq z^A_{\phi_t(n) \ell}, & \forall \ell \in {\mathcal L}, n \in {\mathcal N}noroot, \label{dr_aggre:modality2} \\
& x^{\texttt{I}}_{\textsf{r} j}, v_{j}, w_{i}, y_{ij} \geq 0, \; z^A_{\phi_t(n) \ell} \in \{0,1\}, & \forall j \in \texttt{MA}thcal{J}, \; i \in \omegahelters, \ell \in {\mathcal L}, n \in {\mathcal N}noroot, \nonumber
\end{align}
\end{subequations}
where the cost-to-go function for a node $n\in {\mathcal N}noroot$ is given by:
\begin{subequations}
\begin{align}
Q^A_n(x_nparent^I, z^A) = \min \ & \omegaum_{j \in \texttt{MA}thcal{J}}\left( g_{j}x^{\texttt{I}}_{n j} + q_{n j} v_{j} + \omegaum_{i \in \omegahelters} f_{nij}y_{ij} \right) + \omegaum_{i \in \omegahelters} b_{i}w_{i} + \lefteqn{\omegaum_{n' \in {\mathcal C}(n)}\bar{p}_{n n'} Q^A_{n'}(x_n^I, z^A)} \nonumber \\
\text{s.t.}\; & \eqref{dr_node:demand}-\eqref{dr_node:inventory} \nonumber \\
& v_{j} \leq C_j + \omegaum_{n' \in {\mathcal P}(n)}\omegaum_{\ell \in {\mathcal L}} K_{j \ell}z^A_{\phi_t(n') \ell}, & \forall j \in \texttt{MA}thcal{J}, \label{dr_aggre:capacity2}\\
& x^{\texttt{I}}_{n j}, v_{j}, w_{i}, y_{ij} \geq 0, & \forall j \in \texttt{MA}thcal{J}, \; i \in \omegahelters. \nonumber
\end{align}
\end{subequations}
Note that the right-hand side of constraint \eqref{dr_aggre:capacity2} represents the current capacity of a DC by considering the modality activation and capacity increases at previous stages. Also, although constraints \eqref{dr_aggre:modality1} and \eqref{dr_aggre:modality2} are imposed for each node, the fact is that some of them might be identical because they share the same problem parameters and aggregated variables. For example, transformation \texttt{HN}\ only needs one set of constraints \eqref{dr_aggre:modality1} per stage because of the stage-based variable aggregation.
\omegaection{Problem Description and Instance Generation for HDR} \label{app:hdr}
We now describe the main components of our HDR problem and present details of the instance generation procedure. We focus on the grid representation for the problem, the MC, and three main parameters (i.e., demand, modalities, and capacity). All other details about instances generation (e.g., cost parameters) can be found in our instances generation code. We will make the instance generation code and the set of instances publicly available upon publication.
\omegaubsection{Grid Representation and Locations of Shelters and DCs}
Similar to previous works in the literature, we use a grid network to represent the potential locations of shelters and DCs, as well as possible locations of the hurricane (see Figure~\ref{fig:hurricane_grid}). The top row on the grid represents the land, where each cell contains a subset of shelters and DCs. The remaining cells in the grid correspond to the sea and are used to represent possible locations of the hurricane at any stage during the planning horizon. For simplicity, we assume that any shelter can be supplied by any DC in the network.
\begin{figure}
\caption{A $6\times 4$ grid example that shows the initial location of the hurricane $(m^x,m^y)=(5,0)$ and all possible locations in the following stages (i.e., shaded region).}
\label{fig:hurricane_grid}
\end{figure}
Land and sea cells have pre-defined dimensions: land cells have a size of $50\times100$, while sea cells have a size of $20\times100$. We randomly generate 3 to 7 shelters and 2 to 4 DCs in each land cell. Shelters and DCs are positioned uniformly at random inside their corresponding cells. The coordinates of the hurricane located in a cell are assumed to be the coordinates of the center of the cell.
\omegaubsection{MC description}
As described in the main text, each MC state corresponds to two attributes of the hurricane: location and intensity, that is, $m=(m^x, m^y, m^i)\in {\mathcal M}$ where $m^x$ and $m^y$ are the $x$- and $y$-coordinates of the hurricane's location, respectively, and $m^i$ represents the hurricane's intensity level. The MC is given by two independent transition probability matrices: one for the hurricane intensity level and one for its location. The intensity level transition matrix is identical to the one used by \cite{pacheco2016forecast}, which considers six levels of intensity $\{0,1,\dots,5\}$ where level 5 corresponds to the maximum hurricane intensity and level 0 corresponds to the case when the hurricane dissipates. The following matrix shows the transition probabilities where $P_{ij}$ represents the probability of transitioning from intensity level $i$ to $j$.
\[
P = \bordermatrix{
~ & 0 & 1 & 2 & 3 & 4 & 5 \cr
0 & 1 & 0 & 0 & 0 & 0 & 0 \cr
1 & 0.11& 0.83& 0.06& 0 & 0.6 & 0 \cr
2 & 0 & 0.15& 0.6 & 0.25& 0 & 0 \cr
3 & 0 & 0 & 0.04& 0.68& 0.28& 0 \cr
4 & 0 & 1 & 0 & 0.18& 0.79& 0.03 \cr
5 & 0 & 0 & 0 & 0 & 0.5 & 0.5 \cr
}
\]
For the hurricane movement, we assume that the hurricane originates at the bottom row of the grid (i.e., $m^y = 0$ for the initial MC state) and that $m^y$ increases by one in each period (i.e., the hurricane advances upwards by one step in each period) and will reach land in exactly $T$ periods. The hurricane can also move on the x-axis by either staying in the same x-coordinate, moving one cell to the left or one cell to the right. To determine the transition probabilities of each movement, we assign random weights to each movement and normalize them. In particular, we assign a uniform weight between 30 and 40 for staying in the same x-coordinate and a weight between 20 and 40 for moving to the right and left cells. If the current x-coordinate is at one of the grid borders (e.g., $m^x=0$), then the probability of stepping out of the grid is zero. As an example, consider a $4\times 5$ grid, current hurricane location $(m^x,m^y)=(1,1)$, and weights 25, 36, and 22, for moving to locations $(0,2)$, $(1,2)$, $(2,2)$, respectively. Then, the transition probability of moving from location $(1,1)$ to $(0,2)$ is $\frac{25}{83}\tilde{a}pprox 0.3$.
The hurricane movement transition probability matrix is generated at random for each instance. The initial $x$-coordinate is chosen uniformly at random considering all possible options. The initial intensity level is also generated with a uniform random distribution considering hurricane intensity levels of two or higher.
\omegaubsection{Demand Generation}
The demand for each shelter depends on the current MC state of the hurricane (i.e., location and intensity). We consider a maximum demand parameter $d^{\texttt{MA}x}$ and a maximum distance parameter $\delta^{\texttt{MA}x}$ to model this dependency. The maximum demand $d^{\texttt{MA}x}$ is chosen at random between values 1000 and 1500 for each instance and represents the maximum demand for all shelters in a cell. The maximum demand at a cell is randomly distributed across all shelters in that cell. Therefore, each shelter $j\in \omegahelters$ has a maximum demand $d^{\texttt{MA}x}_j$ and the total maximum demand of all shelters in the same cell is equal to $d^{\texttt{MA}x}$.
The maximum distance $\delta^{\texttt{MA}x}=150$ represents the distance of influence of the hurricane, i.e., all shelters which are farther than $\delta^{\texttt{MA}x}$ from the hurricane have zero demand. If the distance between a shelter $j \in \omegahelters$ and the hurricane is less than $\delta^{\texttt{MA}x}$, then the shelters demand for MC state $m=(m^x, m^y, m^i) \in {\mathcal M}$ is given by:
\[ d_{j,m} = d^{\texttt{MA}x}_j \left(1 - \frac{\delta_{m,j}}{\delta^{\texttt{MA}x}}\right)\cdot \left(\frac{m^i}{5}\right)^2, \]
where $\delta_{m,j}$ is the Euclidean distance between the current location of the hurricane and the shelter. Thus, the demand generation is linear with respect to the hurricane location and quadratic with respect to the hurricane intensity. Note that the demand is zero if the intensity level is 0 and that the maximum demand is achieved when the hurricane is on top of the shelter and has the highest intensity level (i.e., $\delta_{m,j} = 0$ and $m^i=5$).
\omegaubsection{Initial Capacity, Contingency Modality Options, and Capacity Expansion}
The initial capacity of a DC depends on both $d^{\texttt{MA}x}$ and the initial capacity percentage defined in Section \ref{sec:experiments}. The total initial capacity over all DCs in a cell is given by $C^\text{ini}= d^{\texttt{MA}x}\cdot p$, where $p \in \{0.2, 0.25, 0.3\}$ is the initial capacity percentage. The total initial capacity of a cell $C^\text{ini}$ is then distributed uniformly at random across all DCs in that cell. Thus, the initial capacity $C_j$ of a particular DC $j \in \texttt{MA}thcal{J}$ is given by a portion of $C^\text{ini}$.
We generate different contingency modality options depending on the grid size (i.e., number of land cells) and the modality Type (i.e., either Type-1 or Type-2, see Section \ref{sec:experiments}). Our modality set ${\mathcal L}$ considers options that increase DCs capacity in adjacent cells and options that increase the capacity for all DCs. For example, in a $4\times5$ grid we consider modality options that increase capacity for DCs in cells 0 and 1, 1 and 2, 2 and 3, and options that increase the capacity in all DCs, that is, a total of four different contingency modality options based on location.
In addition, we consider capacity increments of Type-1 or Type-2, each one with four different options: Type-1 has options $\{10\%, 20\%, 30\%, 40\%\}$ and Type-2 has options $\{15\%, 30\%, 45\%, 60\%\}$. Then each modality option in ${\mathcal L}$ corresponds to a combination of the affected DCs based on location and the capacity increase. Considering a $4\times5$ grid example with Type-1 modalities, a possible modality option is to increase the capacity of DCs in cells 2 and 3 by 20\% increments. In this example, we have a total of 16 modality options (i.e., four location options and four increments options).
The capacity increments for each modality are based on the initial capacity of the DCs. For example, consider the $4\times5$ grid with Type-1 modalities and active modality $\ell \in {\mathcal L}$ affecting cells 2 and 3 by 20\% increments. Then, a DC $j \in \texttt{MA}thcal{J}$ in cell 0 has no capacity increments given the current active modality (i.e., $K_{j\ell} =0$), but a DC $j' \in \texttt{MA}thcal{J}$ in cell 2 with initial capacity $C_{j'}=100$ has capacity increments $K_{j'\ell}= 20$.
\omegaection{Model Size for HDR Instances}
\label{app:model_size}
Tables \ref{tab:model_size_small} and \ref{tab:model_size_large} show the average number of variables and constraints for both small-size ($4\times5$ grid) and large-size ($5\time6$ grid) instances.
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Average number of variables and constraints for different aggregations over small-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{l|rrrrr}
\toprule
& \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c}{\texttt{FH}} \\
\midrule
Continuous Variables & 566,370 & 566,370 & 566,370 & 566,370 & 566,370 \\
Integer Variables & 80 & 1,120 & 6,720 & 16,528 & 41,760 \\
Total Variables & 566,450 & 567,490 & 573,090 & 582,898 & 608,130 \\
Constraints & 120,129 & 124,834 & 129,373 & 137,171 & 164,414 \\
\bottomrule
\end{tabular}
}
\label{tab:model_size_small}
\end{table}
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Average number of variables and constraints for different aggregations over large-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{l|rrrrr}
\toprule
& \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c}{\texttt{FH}} \\
\midrule
Continuous Variables & 12,281,770 & 12,281,770 & 12,281,770 & 12,281,770 & 12,281,770 \\
Integer Variables & 120 & 2,300 & 13,800 & 46,800 & 733,240 \\
Total Variables & 12,281,890 & 12,284,070 & 12,295,570 & 12,328,570 & 13,015,010 \\
Constraints & 2,163,164 & 2,175,033 & 2,188,428 & 2,224,132 & 2,932,940 \\
\bottomrule
\end{tabular}
}
\label{tab:model_size_large}
\end{table}
\omegaection{Computational Effort for Exact Methods}
\label{app:experiments-exact}
We now compare two solution methods for the aggregated models with four types of transformations, that is, solving the extensive form model directly with CPLEX (\texttt{Ex}) versus the B\&C procedure integrated with the SDDP algorithm proposed in Section~\ref{sec:methodology} (\omegaddp). Table~\ref{tab:exact_small} shows the average solution time (for instances where optimal solutions are obtained within the time limit) and the average optimality gaps (for unsolved instances) for the small-size instances. We also include the results of solving \texttt{FH}\ with \texttt{Ex}\ as a point of reference. We note that \texttt{Ex}\ solves to optimality all the instances for all transformations except for 33 when solving \texttt{FH}, while \omegaddp\ fails to solve one instance for \texttt{PM}\ and solves only one instance to optimality for \texttt{MM}.
From Table~\ref{tab:exact_small} we see a clear advantage of \texttt{Ex}\ over \omegaddp\ in both computational time and number of instances solved within the time limit across all transformations and instance configurations. These results are mostly explained by the fact that approximately 90\% of the computational time is spent inside the SDDP sub-routine. One main factor is the number of SDDP subproblems for each transformation: \texttt{HN}\ and \texttt{MA}\ have on average $69$ SDDP subproblems, \texttt{PM}\ has $144$, and \texttt{MM}\ has $294$, which explains the relatively poor performance of \texttt{MM}.
\begin{table}[tb]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Performance comparison between \omegaddp\ and \texttt{Ex}\ for small-size instances ($4\times5$ grid size).}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rr|rr|rr|rr|r|rr|r}
\toprule
& & \multicolumn{9}{c|}{\textbf{Average Time (sec)}} & \multicolumn{3}{c}{\textbf{Gap (\%)}} \\
\midrule
& & \multicolumn{2}{c|}{\texttt{HN}} & \multicolumn{2}{c|}{\texttt{MA}} & \multicolumn{2}{c|}{\texttt{PM}} & \multicolumn{2}{c|}{\texttt{MM}} & \multicolumn{1}{l|}{{\mathcal M}argarita{\texttt{FH}}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c}{{\mathcal M}argarita{\texttt{FH}}} \\
\midrule
Modality & \multicolumn{1}{l|}{Cap.} & \multicolumn{1}{c}{\texttt{Ex}} & \multicolumn{1}{c|}{\omegaddp} & \multicolumn{1}{c}{\texttt{Ex}} & \multicolumn{1}{c|}{\omegaddp} & \multicolumn{1}{c}{\texttt{Ex}} & \multicolumn{1}{c|}{\omegaddp} & \multicolumn{1}{c}{\texttt{Ex}} & \multicolumn{1}{c|}{\omegaddp} & \multicolumn{1}{c|}{\texttt{Ex}} & \multicolumn{1}{c}{\omegaddp} & \multicolumn{1}{c}{\omegaddp} & \multicolumn{1}{c}{\texttt{Ex}} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & \textbf{ 260 } & 1,221 & \textbf{ 712 } & 5,969 & \textbf{ 961 } & 4,553 & \textbf{ 1,853 } & - & 1,402 & - & 14.9 & - \\
& \multicolumn{1}{l|}{25\%} & \textbf{ 132 } & 195 & \textbf{ 361 } & 1,313 & \textbf{ 487 } & 2,760 & \textbf{ 626 } & - & 3,974 & - & 3.5 & 0.8 \\
& \multicolumn{1}{l|}{30\%} & \textbf{ 73 } & 102 & \textbf{ 109 } & 272 & \textbf{ 222 } & 3,380 & \textbf{ 367 } & - & - & - & 2.1 & 0.4 \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & \textbf{ 253 } & 1,138 & \textbf{ 919 } & 5,880 & \textbf{ 2,047 } & 13,224 & \textbf{ 2,951 } & 20,699 & 2,546 & 19.9 & 17.8 & 0.9 \\
& \multicolumn{1}{l|}{25\%} & \textbf{ 167 } & 140 & \textbf{ 393 } & 1,324 & \textbf{ 651 } & 3,635 & \textbf{ 817 } & - & - & - & 6.2 & 0.8 \\
& \multicolumn{1}{l|}{30\%} & \textbf{ 82 } & 102 & \textbf{ 119 } & 205 & \textbf{ 271 } & 4,347 & \textbf{ 556 } & - & - & - & 2.3 & 0.6 \\
\midrule
\multicolumn{2}{c|}{Average} & \textbf{ 161 } & 483 & \textbf{ 436 } & 2,494 & \textbf{ 773 } & 5,316 & \textbf{ 1,195 } & 20,699 & 2,641 & 19.9 & 7.8 & 0.7 \\
\bottomrule
\end{tabular}
}
\label{tab:exact_small}
\end{table}
The previous results may suggest that \texttt{Ex}\ requires less computational effort for small-size instances than \omegaddp. However, large-size instances cannot even be loaded into the solver due to the large number of variables and constraints in the formulation, that is, approximately 16GB of memory after the presolved phase (see Appendix \ref{app:model_size} for further model size information), so \omegaddp\ is our only resort for these instances. Table \ref{tab:exact_sddp_large} shows the number of instances solved to optimality, the number of instances where the algorithm found a feasible solution (but could not prove optimality), and the average optimality gap over instances with a feasible solution for \omegaddp\ over large-size instances. We see that \omegaddp\ performs quite well for transformations \texttt{HN}\ and \texttt{MA}\, solving several instances to optimality; however, it fails to even find any feasible solution for \texttt{MM}. As before, these results are explained mainly by a large number of SDDP subproblems, and for these large-size instances, \texttt{HN}\ and \texttt{MA}\ have on average $114$ subproblems, \texttt{PM}\ has $249$, and \texttt{MM}\ has $593$.
\begin{table}[tb]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Performance of the SDDP integrated B\&C algorithm in large-size instances ($5\times6$ grid size).}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrrr|rrrr|rrrr}
\toprule
& & \multicolumn{4}{c|}{\textbf{\# Optimal}} & \multicolumn{4}{c|}{\textbf{\# Feasible}} & \multicolumn{4}{c}{\textbf{Opt. Gaps (\%)}} \\
\midrule
Modality & \multicolumn{1}{l|}{Cap.} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c|}{\texttt{MM}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c|}{\texttt{MM}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 2 & 0 & 0 & 0 & 8 & 10 & 7 & 1 & 53.4 & 51.8 & 56.1 & 82.0 \\
& \multicolumn{1}{l|}{25\%} & 5 & 2 & 0 & 0 & 5 & 8 & 6 & 0 & 35.7 & 26.2 & 27.9 & - \\
& \multicolumn{1}{l|}{30\%} & 10 & 8 & 0 & 0 & 0 & 1 & 4 & 0 & - & 15.4 & 4.3 & - \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 3 & 0 & 0 & 0 & 7 & 10 & 7 & 1 & 43.5 & 52.9 & 61.4 & 86.0 \\
& \multicolumn{1}{l|}{25\%} & 6 & 3 & 0 & 0 & 4 & 7 & 6 & 0 & 37.2 & 24.9 & 32.8 & - \\
& \multicolumn{1}{l|}{30\%} & 9 & 8 & 0 & 0 & 1 & 1 & 6 & 0 & 12.0 & 13.3 & 6.4 & - \\
\midrule
\multicolumn{2}{c|}{Total/Av.} & 35 & 21 & 0 & 0 & 25 & 37 & 36 & 2 & 36.4 & 30.7 & 31.5 & 84.0 \\
\bottomrule
\end{tabular}
}
\label{tab:exact_sddp_large}
\end{table}
\omegaection{Additional Results for Approximation Methods}
\label{app:approx-additional-resutls}
Tables \ref{tab:2sldr-hn-smallsize} to \ref{tab:2sldr-mm-largesize} show the performance of the approximated methods based on 2SLDR and SDDP for small-size and large-size instances, and transformations \texttt{HN}, \texttt{PM}, and \texttt{MM}.
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{HN}$ over small-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrr|r|rrrr|r}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{5}{c}{Relative Difference (\%)} \\
\midrule
Modality & \multicolumn{1}{l|}{Cap.} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{l}{\texttt{LDR-T}} & \multicolumn{1}{l|}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddplb} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{l}{\texttt{LDR-T}} & \multicolumn{1}{l}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddpub} & \multicolumn{1}{c}{\omegaddplb} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 193.7 & 103.6 & \textbf{71.7} & 76.5 & 0.04 & 0.05 & \textbf{0.00} & 0.06 & 0.44 \\
& \multicolumn{1}{l|}{25\%} & 132.2 & \textbf{31.3} & 33.1 & 15.2 & 0.05 & 0.07 & \textbf{0.00} & \textbf{0.00} & 0.63 \\
& \multicolumn{1}{l|}{30\%} & 82.5 & 20.0 & \textbf{19.0} & 9.9 & 0.10 & 0.21 & \textbf{0.00} & \textbf{0.00} & 1.36 \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 262.8 & \textbf{64.3} & 76.0 & 61.6 & 0.03 & 0.05 & \textbf{0.00} & 0.03 & 0.46 \\
& \multicolumn{1}{l|}{25\%} & 131.7 & \textbf{29.3} & 35.8 & 12.4 & 0.05 & 0.07 & \textbf{0.00} & 0.01 & 0.62 \\
& \multicolumn{1}{l|}{30\%} & 80.4 & 19.7 & \textbf{19.0} & 10.0 & 0.10 & 0.21 & \textbf{0.00} & \textbf{0.00} & 1.36 \\
\midrule
\multicolumn{2}{c|}{Average} & 147.2 & 44.7 & \textbf{42.4} & 30.9 & 0.06 & 0.11 & \textbf{0.00} & 0.02 & 0.81 \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-hn-smallsize}
\end{table}
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{HN}$ over large-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrrl|rrr}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{3}{c}{Opt. Gap (\%)} \\
\midrule
Modality & Cap. & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddplb} & (opt) & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddpub} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 2,100.9 & 1,961.8 & \textbf{ 449.3} & (10) & 0.51 & 0.33 & \textbf{0.13} \\
& \multicolumn{1}{l|}{25\%} & 1,679.4 & 1,628.6 & \textbf{ 288.0} & (10) & 0.78 & 0.39 & \textbf{0.15} \\
& \multicolumn{1}{l|}{30\%} & 965.1 & 864.2 & \textbf{ 56.6} & (10) & 0.61 & 0.48 & \textbf{0.18} \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 2,501.3 & 1,794.3 & \textbf{ 730.9} & (10) & 0.56 & 0.31 & \textbf{0.12} \\
& \multicolumn{1}{l|}{25\%} & 1,660.5 & 1,764.6 & \textbf{ 203.7} & (10) & 0.78 & 0.40 & \textbf{0.13} \\
& \multicolumn{1}{l|}{30\%} & 950.1 & 1,035.3 & \textbf{ 57.9} & (10) & 0.62 & 0.49 & \textbf{0.18} \\
\midrule
\multicolumn{2}{c|}{Av. (Total)} & 1,642.9 & 1,508.1 & \textbf{ 297.7} & (60) & 0.64 & 0.40 & \textbf{0.15} \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-hn-largesize}
\end{table}
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{MA}$ over small-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrr|r|rrrr|r}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{5}{c}{Relative Difference (\%)} \\
\midrule
Modality & \multicolumn{1}{l|}{Cap.} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{l}{\texttt{LDR-T}} & \multicolumn{1}{l|}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddplb} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{l}{\texttt{LDR-T}} & \multicolumn{1}{l}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddpub} & \multicolumn{1}{c}{\omegaddplb} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 431.1 & 104.2 & \textbf{95.2} & 367.3 & 0.26 & 0.29 & 0.23 & \textbf{0.08} & 0.19 \\
& \multicolumn{1}{l|}{25\%} & 368.3 & 58.4 & \textbf{49.2} & 167.4 & 0.14 & 0.18 & \textbf{0.11} & 0.22 & 1.11 \\
& \multicolumn{1}{l|}{30\%} & 119.3 & 25.7 & \textbf{25.4} & 17.1 & 0.11 & 0.22 & 0.02 & \textbf{0.00} & 1.56 \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 630.6 & 119.4 & \textbf{117.1} & 439.4 & 0.12 & 0.14 & 0.10 & \textbf{0.09} & 0.26 \\
& \multicolumn{1}{l|}{25\%} & 464.3 & 49.3 & \textbf{46.9} & 111.2 & 0.15 & 0.18 & \textbf{0.11} & 0.18 & 1.15 \\
& \multicolumn{1}{l|}{30\%} & 113.8 & \textbf{22.7} & 23.1 & 16.4 & 0.10 & 0.21 & 0.01 & \textbf{0.00} & 1.58 \\
\midrule
\multicolumn{2}{c|}{Average} & 354.5 & 63.3 & \textbf{59.5} & 186.5 & 0.15 & 0.21 & \textbf{0.09} & 0.10 & 0.97 \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-ma-smallsize}
\end{table}
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{MA}$ over large-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrrl|rrr}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{3}{c}{Opt. Gap (\%)} \\
\midrule
Modality & Cap. & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddplb} & (opt) & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddpub} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 2,266.5 & 2,855.8 & \textbf{ 1,913.0} & (10) & 0.50 & 0.32 & \textbf{0.13} \\
& \multicolumn{1}{l|}{25\%} & 3,067.1 & 2,458.6 & \textbf{ 1,295.8} & (10) & 0.84 & 0.46 & \textbf{0.19} \\
& \multicolumn{1}{l|}{30\%} & 1,051.1 & 1,028.4 & \textbf{ 133.1} & (10) & 0.62 & 0.49 & \textbf{0.19} \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 4,065.5 & \textbf{ 3,488.9} & 3,500.0 & (10) & 0.59 & 0.34 & \textbf{0.14} \\
& \multicolumn{1}{l|}{25\%} & 4,295.9 & 4,107.4 & \textbf{ 1,960.6} & (10) & 0.89 & 0.40 & \textbf{0.18} \\
& \multicolumn{1}{l|}{30\%} & 1,272.7 & 1,252.9 & \textbf{ 150.3} & (10) & 0.65 & 0.52 & \textbf{0.20} \\
\midrule
\multicolumn{2}{c|}{Av. (Total)} & 2,669.8 & 2,532.0 & \textbf{ 1,492.1} & (60) & 0.68 & 0.42 & \textbf{0.17} \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-ma-largesize}
\end{table}
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{MM}$ over small-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrr|r|rrrr|r}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{5}{c}{Relative Difference (\%)} \\
\midrule
Modality & \multicolumn{1}{l|}{Cap.} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{l}{\texttt{LDR-T}} & \multicolumn{1}{l|}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddplb} & \multicolumn{1}{c}{\texttt{LDR-TH}} & \multicolumn{1}{l}{\texttt{LDR-T}} & \multicolumn{1}{l}{\texttt{LDR-M}} & \multicolumn{1}{c|}{\omegaddpub} & \multicolumn{1}{c}{\omegaddplb} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 2766.2 & \textbf{262.2} & 277.0 & 3521.5 & 0.12 & 0.26 & 0.11 & \textbf{0.00} & 0.39 \\
& \multicolumn{1}{l|}{25\%} & 1272.7 & 176.6 & \textbf{154.9} & 12447.8 & 0.09 & 0.13 & \textbf{0.04} & 0.25 & 0.98 \\
& \multicolumn{1}{l|}{30\%} & 4049.5 & \textbf{235.3} & 310.3 & 14746.8 & 0.13 & 0.25 & \textbf{0.00} & 1.37 & 1.77 \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 3916.3 & 504.6 & \textbf{396.0} & 11703.3 & 0.13 & 0.26 & 0.13 & \textbf{0.02} & 0.22 \\
& \multicolumn{1}{l|}{25\%} & 1486.7 & 226.3 & \textbf{164.9} & 21226.9 & 0.09 & 0.12 & \textbf{0.01} & 0.38 & 2.30 \\
& \multicolumn{1}{l|}{30\%} & 8042.7 & \textbf{431.0} & 720.4 & 15323.7 & 0.13 & 0.24 & \textbf{0.01} & 0.95 & 1.80 \\
\midrule
\multicolumn{2}{c|}{Average} & 3589.0 & \textbf{306.0} & 337.2 & 13161.7 & 0.11 & 0.21 & \textbf{0.05} & 0.50 & 1.24 \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-mm-smallsize}
\end{table}
\begin{table}[htbp]
\def\tilde{a}rraystretch{\omegatretchTableResults}
\centering
\caption{Solution time and quality of 2SLDR and SDDP bounds. Results for $\texttt{MM}$ over large-size instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{cc|rrrl|rrr}
\toprule
& & \multicolumn{4}{c|}{Average Time (sec)} & \multicolumn{3}{c}{Opt. Gap (\%)} \\
\midrule
Modality & Cap. & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddplb} & (opt) & \multicolumn{1}{c}{\texttt{LDR-T}} & \multicolumn{1}{c}{\texttt{LDR-M}} & \multicolumn{1}{c}{\omegaddpub} \\
\midrule
\multirow{3}[2]{*}{Type-1} & \multicolumn{1}{l|}{20\%} & 8,288.1 & \textbf{ 4,776.9} & 20,125.0 & (1) & 39.71 & \textbf{33.00} & 39.39 \\
& \multicolumn{1}{l|}{25\%} & 14,398.1 & \textbf{ 12,203.6} & 21,601.1 & (0) & 15.73 & \textbf{15.08} & 17.53 \\
& \multicolumn{1}{l|}{30\%} & \textbf{ 1,911.4} & 2,076.6 & 21,601.6 & (0) & 2.41 & \textbf{0.67} & 2.09 \\
\midrule
\multirow{3}[2]{*}{Type-2} & \multicolumn{1}{l|}{20\%} & 14,668.7 & \textbf{ 13,135.3} & 21,600.7 & (0) & 39.55 & \textbf{37.42} & 43.68 \\
& \multicolumn{1}{l|}{25\%} & 16,280.7 & \textbf{ 16,051.0} & 21,601.6 & (0) & 23.98 & \textbf{23.22} & 26.70 \\
& \multicolumn{1}{l|}{30\%} & 2,391.2 & \textbf{ 1,925.4} & 21,601.4 & (0) & 3.63 & \textbf{1.51} & 3.50 \\
\midrule
\multicolumn{2}{c|}{Av. (Total)} & 9,656.4 & \textbf{ 8,361.5} & 21,355.2 & (1) & 20.84 & \textbf{18.48} & 22.15 \\
\bottomrule
\end{tabular}
}
\label{tab:2sldr-mm-largesize}
\end{table}
\omegaection{Additional Results for Policy Structures and Managerial Insights}
\label{app:policy-managerial}
Table \ref{tab:solution-structure-type2} presents the same metrics shown in Table \ref{tab:solution-structure-type1} but for Type-2 small-size instances.
\begin{table}[htbp]
\centering
\caption{Solution structures for different policies and initial capacities for Type-2 instances.}
\omegacalebox{\omegacaleTableResults}{
\begin{tabular}{l|rrrrr|rrrrr|rrrrr|rrrrr}
\toprule
& \multicolumn{5}{c|}{Nodes (\%) } & \multicolumn{5}{c|}{\# of Contingencies } & \multicolumn{5}{c|}{Aggressiveness (\%) } & \multicolumn{5}{c}{Intensity} \\
\midrule
Cap. & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c|}{\texttt{FH}} & \multicolumn{1}{c}{\texttt{HN}} & \multicolumn{1}{c}{\texttt{MA}} & \multicolumn{1}{c}{\texttt{PM}} & \multicolumn{1}{c}{\texttt{MM}} & \multicolumn{1}{c}{\texttt{FH}} \\
\midrule
20\% & 100 & 92 & 72 & 72 & 53 & 1.0 & 1.0 & 1.0 & 1.0 & 4.1 & 14 & 39 & 52 & 52 & 46 & 2.3 & 2.5 & 2.9 & 2.9 & 2.9 \\
25\% & 10 & 38 & 22 & 16 & 31 & 1.0 & 1.0 & 1.9 & 3.8 & 5.3 & 15 & 20 & 51 & 55 & 33 & 2.3 & 2.9 & 4.0 & 4.1 & 3.5 \\
30\% & 0 & 7 & 15 & 14 & 17 & - & 1.0 & 1.9 & 4.8 & 5.7 & - & 14 & 29 & 27 & 23 & - & 3.0 & 4.2 & 4.1 & 4.0 \\
\bottomrule
\end{tabular}
}
\label{tab:solution-structure-type2}
\end{table}
\end{APPENDICES}
\end{document} |
\begin{document}
\title{Universal dynamics of superradiant phase transition in the anisotropic quantum Rabi model}
\author{Xunda Jiang$^{1,2}$}
\author{Bo Lu$^{1}$}
\author{Chengyin Han$^{1}$}
\author{Ruihuan Fang$^{1,2}$}
\author{Minhua Zhao$^{1,2}$}
\author{Zhu Ma$^{1,2}$}
\author{Tian Guo$^{1,2}$}
\author{Chaohong Lee$^{1,2,}$}
\email{lichaoh2@mail.sysu.edu.cn; chleecn@gmail.com}
\address{$^1$Guangdong Provincial Key Laboratory of Quantum Metrology and Sensing $\&$ School of Physics and Astronomy, Sun Yat-Sen University (Zhuhai Campus), Zhuhai 519082, China}
\address{$^2$State Key Laboratory of Optoelectronic Materials and Technologies, Sun Yat-Sen University (Guangzhou Campus), Guangzhou 510275, China}
\begin{abstract}
We investigate the universally non-equilibrium dynamics of superradiant phase transition in the anisotropic quantum Rabi model.
By introducing position and momentum operators, we obtain the ground states and their excitation gaps for both normal and superradiant phases via perturbation theory.
We analytically extract the critical exponents from the excitation gap and the diverging length scale near the critical point, and find that the critical exponents are independent upon the anisotropy ratio.
Moreover, by simulating the real-time dynamics across the critical point, we numerically extract the critical exponents from the phase transition delay and the diverging length scale, which are well consistent with the analytical ones.
Our study provides a dynamic way to explore universal critical behaviors in the quantum Rabi model.
\end{abstract}
\maketitle
\section{INTRODUCTION}
Spontaneous symmetry breaking and quantum phase transitions (QPTs) are two fundamental and important concepts in physics.
The second-order QPTs always associate with spontaneous symmetry breaking~\cite{Sachdev2011,Morikawa1995,Kibble1980}, in which gapless energy spectra and degenerate ground states appear in the thermodynamical limit.
Due to the gapless excitations at the critical point, the adiabaticity breaks down when a system goes through a continuous phase transition.
As a consequence, nontrivial excitations such as domains~\cite{Kibble1976,Lee2009,Davis2011,Davis2012,Swislocki2013,Hofmann2014,Wu2017,Xu2016,Navon2015,Ye2018,Ye2018,Jiang2019}, vortices~\cite{Anderson2008,Su2013,Wu2016} and solitons~\cite{Damski2010,Witkowska2011,Zurek2009} appear spontaneously and obey the well-konwn Kibble-Zurek mechanism (KZM)~\cite{Kibble1976,Kibble1980,Zurek1985,Zurek1996,JDziarmaga2000,Polkovnikov2011,Bloch2008}.
The KZM has been extensively studied in various systems, from the early universe~\cite{Kibble1976,Kibble1980}, condensed matter systems~\cite{Ruutu1996,Bauerle1996,Monaco2009}, trapped ions~\cite{Campo2010,Ulm2013,Pyka2013,Ejtemaee2013,Lv2018}, to ultracold atomic gases~\cite{Anderson2008,Zurek2009,Lee2009,Damski2010,Witkowska2011,Davis2011,Davis2012,Navon2015,Lamporesi2013,Anquez2016,Clark2016,Feng2018,Swislocki2013,Hofmann2014,Wu2017,Xu2016,Ye2018,Ye2018,Jiang2019}.
The quantum Rabi model (QRM), a paradigmatic model in quantum optics, describes the fundamental interaction between quantized fields and two-level quantum systems~\cite{Forn2019,Kockum2019,Rabi1936,Rabi1937,Zhong2013,Zhong2014,Xie2017}.
In the thermodynamic limit, the QRM exhibits normal-superradiant phase transition, which provides an excellent platform for exploring universal behavior in both equilibrium~\cite{Ashhab2013,Bishop1996,Larson2017,Hwang2010} and non-equilibrium dynamics~\cite{Hwang2018,Puebla2017,Hwang2015}.
The anisotropic QRM, whose rotating and counter-rotating interactions have different coupling strengths~\cite{Fan2014,Zhang2017,WangMY2018}, is a generalized QRM.
In recent, QPTs in the anisotropic QRM and their universality are studied~\cite{Lin2017}.
However, the corresponding non-equilibrium universal dynamics is still unclear, it is worthy to clarify whether the anisotropic ratio affects the universality.
In this work, we investigated the non-equilibrium universal dynamics in the anisotropic QRM.
Under the description of position and momentum operators, making use of the Schrieffer-Wolff (SW) transformation, we obtain the ground states and their excitation gaps with the second-order perturbation theory.
Then, we analytically extract the critical exponents from the excitation gap and the diverging length scale, which reveal that the anisotropic QRM shares the same critical exponents for different anisotropy ratios between the rotating and counter-rotating terms.
Furthermore, we numerically simulate the real-time dynamics of the anisotropic QRM whose coupling strength is linearly swept across the critical point.
With the non-equilibrium dynamics, we numerically extract two universal scalings from the phase transition delay and the diverging length scale with respect to the quench time.
The critical exponents extracted from the numerical simulation are well consistent with the analytical ones.
The paper is organized as follows.
In Sec.~\uppercase\expandafter{\romannumeral2}, we introduce the anisotropic QRM and give its ground states and excitation gaps.
In Sec.~\uppercase\expandafter{\romannumeral3}, we briefly review the KZM, and analytically extract the critical exponents from the excitation gap and the variance of the position and momentum operators.
In Sec.~\uppercase\expandafter{\romannumeral4}, we present the real-time non-equilibrium universal dynamics, and extract the critical exponents from the phase transition delay and the diverging length scale.
Finally, we give a brief summary and discussion in Sec.~\uppercase\expandafter{\romannumeral5}.
\section{The Anisotropic Quantum Rabi Model: Ground states and Excitation gaps}
In the units of $\hbar=1$, the anisotropic QRM can be described by the full-quantum Hamiltonian,
\begin{equation}
H = \omega {a^\dag }a + \frac{\Omega }{2}{\sigma _x} + g\left[ {\left( {{\sigma _ + }a + {\sigma _ - }{a^\dag }} \right) + \lambda \left( {{\sigma _ + }{a^\dag } + {\sigma _ - }a} \right)} \right], \label{Hamiltonian0}
\end{equation}
where $a^{\dagger}(a)$ are the creation (annihilation) operators of the phonons with frequency $\omega$, $g$ is the coupling strength and $\lambda$ denotes the anisotropic ratio between rotating and counter-rotating terms.
Given the Pauli matrices $\sigma_{x,y,z}$, the second term describes a two-level system ${\sigma _ \pm } = \left( {{\sigma _z} \mp i{\sigma _y}} \right)/2$ with a transition frequency $\Omega$.
Defining the dimensionless position and momentum operators $x = \left( {a + {a^\dag }} \right){\rm{ }}/{\sqrt 2 }$ and $p ={i}\left( {{a^\dag } - a} \right)/{\sqrt 2 }$, the Hamiltonian reads
\begin{small}
\begin{equation} \label{Hamiltonian1}
H = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \frac{\Omega }{2}{\sigma _x} + \tilde g\sqrt {\frac{{\Omega \omega }}{8}} \left[ {\left( {1 + \lambda } \right){\sigma _z}x + \left( {1 - \lambda } \right){\sigma _y}p} \right],
\end{equation}
\end{small}
where $\tilde{g}=2g/\sqrt{\Omega\omega}$.
The Hamiltonian becomes the QRM when $\lambda=1$, while it is the JC model when $\lambda=0$.
The second term becomes dominant in the limit $\Omega/\omega \to \infty$, thus the relevant low-energy states have $\langle \sigma_{x} \rangle \simeq -1$.
Within this subspace, the ground states can be determined by the competition between the first term (a conventional oscillator) and the last term (the coupling between the phonon field and the two-level system)~\cite{Luo2015,Lin2017}.
In Fig.~\ref{GS_1}, we show the typical ground state of Eq.~(\ref{Hamiltonian1}) for different coupling strength.
It clearly show the ground states undergoes a spontaneous symmetry breaking from symmetric to asymmetric when $\tilde{g}$ increases, see Fig.~\ref{GS_1}(a).
\begin{figure}
\caption{(Color online) Density distributions of the ground states of the quantum Rabi model. (a) The total density distribution $\left|\psi_{1}
\label{GS_1}
\end{figure}
In the weak coupling region, $\tilde{g}<\tilde{g}_{c}$, the ground state is dominated by the oscillator term in the normal phase as shown in Fig.~\ref{GS_1}(b), which is the vacuum of the phonon field and atom in the low energy space.
However, as the field-matter coupling is increased to the deep strong coupling regime $\tilde{g}>\tilde{g}_{c}$, the ground state turns from the normal phase to the superradiant phase, in which both the atom and the phonon field become excited [see Fig.~\ref{GS_1}(c,d)].
To describe the superradiant phase transition, one may choose the excitation of the atom and the phonon field can be served as an order parameter~\cite{Larson2017,Hwang2015}.
\subsection{Normal phase}
Below we briefly review the derivation of ground states and their excitation gaps from a low-energy effective Hamiltonian~\cite{Lin2017}.
To obtain the low-energy effective Hamiltonian, one may apply the SW transformation.
The Hamiltonian~(\ref{Hamiltonian1}) includes an unperturbed Hamiltonian $H_{0}$ and a off-diagonal perturbation $H_{V}$ as follows
\begin{equation}
\begin{array}{l}
{H_0} = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \frac{\Omega }{2}{\sigma _x}, \\
{H_V} = \tilde g\sqrt {\frac{{\Omega \omega }}{8}} \left[ {\left( {1 + \lambda } \right){\sigma _z}x + \left( {1 - \lambda } \right){\sigma _y}p} \right].
\end{array}
\end{equation}
By introducing a unitary operator $S_{0}$~\cite{Lin2017}, which is the generator of the SW transformation and is an anti-Hermitian operator,
\begin{equation}
{S_{0}} = i\tilde g\sqrt {\frac{\omega }{{8\Omega }}} \left[ {\left( {1 - \lambda } \right){\sigma _z}p - \left( {1 + \lambda } \right){\sigma _y}x} \right].
\end{equation}
Therefore the second-order low-energy effective Hamiltonian is given as
\begin{equation}\label{Eff_Ham1}
H_{eff}^{(2)} = \left\langle - \right|H_{eff}^{\left( 2 \right)}\left| - \right\rangle \simeq \frac{\omega }{2}\left( {1 - {\xi ^{'}}^2} \right){p^2} + \frac{\omega }{2}\left( {1 - {\xi ^2}} \right){x^2},
\end{equation}
where $\left| \pm \right\rangle$ are the eigenstates of $\sigma_{x}$, $ \xi = \tilde g\left( {1 + \lambda } \right)/2$ and ${\xi ^{'}} = \tilde g\left( {1 - \lambda } \right)/2$.
In the weak coupling region, the effective Hamiltonian behaves as the conventional harmonic oscillator as shown in Fig.~\ref{GS_1}(b), which corresponds to the normal phase.
The normal phase is the vacuum of atom and phonon excitations.
The excitation gap is given as
\begin{equation}\label{Energy_Gap_0}
\varpi_{0} =\omega \sqrt {\left( {1 - {\xi ^2}} \right)\left( {1 - {\xi ^{'}}^2} \right)}.
\end{equation}
The normal-to-superradiant phase transition occurs at $\varpi _{0}=0$, which gives $\xi_{c}=1$ or $\xi^{'}_{c}=1$, that is,
\begin{equation}
{{\tilde g}_c} = \frac{2}{{1 + \left| \lambda \right|}}.
\end{equation}
In the region of $\tilde{g} \le \tilde{g}_{c}$, the ground state is a normal phase ${\psi _0}\left( x,\alpha_{0} \right) = {e^{ - {S_{0}}}}{\phi _0}\left( x,\alpha_{0} \right)\left| - \right\rangle$ with
\begin{equation}\label{Ground_state}
{\phi _0}\left( x,\alpha_{0} \right) = \frac{{\sqrt \alpha_{0} }}{{{\pi ^{1/4}}}}\exp \left( { - \frac{1}{2}{\alpha_{0} ^2}{x^2}} \right)
\end{equation}
denoting the ground state of the harmonic oscillator.
Here the effective mass $m_{0} = 1/\left[\omega\left( {1 - {\xi ^{'}}^2} \right)\right]$ and the wavepacket width $\alpha_{0} = \sqrt{m_{0} \varpi_{0}}$.
\subsection{Superradiant phase}
We now discuss the ground states and the corresponding excitation gaps for the superradiant phase.
In the region of $\tilde{g}>\tilde{g}_{c}$, the system enters into the superradiant phase and the effective Hamiltonian~(\ref{Eff_Ham1}) for the normal phase breaks down.
This means that $P = \left| - \right\rangle \left\langle - \right|$ is no longer the suitable low-energy subspace.
Making use of the SW transformation, we introduce new generators to diagonalize the Hamiltonian for both $\lambda>0$ and $\lambda<0$.
Then, one may obtain an effective Hamiltonian and give its ground states and excitation gaps.
In the case of $\lambda>0$, we introduce a new displaced operator ${\cal D}_{1}\left[ {{\alpha }} \right] = {e^{ - i{\alpha}p}}=e^{-\alpha\frac{\partial}{\partial x}}$ with the parameter $\alpha$ to be determined, thus the Hamiltonian~(\ref{Hamiltonian1}) is transformed as
\begin{equation} \label{Dis_Ham_1}
\begin{array}{l}
H\left( \alpha \right) = {{\cal D}_{1}^\dag }\left( \alpha \right)H{\cal D}_{1}\left( \alpha \right) = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \frac{\Omega }{2}{\sigma _x}\\
+ \sqrt {\frac{{\Omega \omega }}{2}} \left( {\xi {\sigma _z}x + {\xi ^{'}}{\sigma _y}p} \right) + \omega \alpha x + \frac{{\alpha \delta_{1} }}{2}{\sigma _z} + \frac{{\omega {\alpha ^2}}}{2},
\end{array}
\end{equation}
where $\delta_{1} ={\sqrt {2\Omega \omega } \xi }$.
The eigenstates of the atomic part $H_a=\frac{\Omega }{2}{\sigma _x} + \frac{{\alpha \delta_{1} }}{2}{\sigma _z}$ are
\begin{equation}
\left| \uparrow \right\rangle = \cos \theta \left| + \right\rangle + \sin \theta \left| - \right\rangle ,{\kern 6pt} \left| \downarrow \right\rangle = - \sin \theta \left| + \right\rangle + \cos \theta \left| - \right\rangle,
\end{equation}
with $\sin 2\theta = \alpha \delta_{1} /\widetilde \Omega $, $ \cos 2\theta = \Omega /\widetilde \Omega$ and the new atomic transition frequency $\widetilde \Omega = \sqrt {{\Omega ^2} + {{\left( {\delta_{1} \alpha } \right)}^2}}$.
In terms of Pauli matrices $\tau_{x,y,z}$ associated with $\left\{ {\left| \uparrow \right\rangle ,\left| \downarrow \right\rangle } \right\}$, the Hamiltonian~(\ref{Dis_Ham_1}) becomes
\begin{equation}
\begin{array}{l}
H\left( \alpha \right) = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \sqrt {\frac{{\Omega \omega }}{2}} \left( {\xi \cos 2\theta x{\tau _x} - {\xi ^{'}}p{\tau _y}} \right)\\
{\kern 40pt}{\rm{ }} + \left( {\omega \alpha + \sqrt {\frac{{\Omega \omega }}{2}} \sin 2\theta \xi {\tau _z}} \right)x + \frac{{\widetilde \Omega }}{2}{\tau _z} + \frac{{\omega {\alpha ^2}}}{2}.
\end{array}
\end{equation}
To eliminate the perturbation term, $\left( {\omega \alpha + \sqrt {\Omega \omega /2} \sin 2\theta \xi {\tau _z}} \right)x$, we choose the parameter $\alpha$ such that $\omega \alpha - \sqrt {\Omega \omega /2} \sin 2\theta \xi = 0$, which gives the nontrivial solutions
\begin{equation}
\alpha = \pm {\alpha _g} = \pm \sqrt {\left( {\Omega /2\omega {\xi ^2}} \right)\left( {{\xi ^4} - 1} \right)}.
\end{equation}
Given $\alpha = \pm\alpha_{g}$, the Hamiltonian reads
\begin{equation}\label{Super_Ham_1}
\widetilde H\left( { \pm {\alpha _g}} \right) = {\widetilde H_{\rm{0}}} + {\widetilde H_V},
\end{equation}
with
\begin{equation}
\begin{array}{l}
{\widetilde H_0} = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \frac{{\widetilde \Omega }}{2}{\tau _z},\\
{\widetilde H_V} = \sqrt {\frac{{\Omega \omega }}{2}} \left( {\xi \cos 2\theta x{\tau _x} - {\xi ^{'}}p{\tau _y}} \right).
\end{array}
\end{equation}
Making use of the SW transformation, we find a new generator,
\begin{equation}
{S_1} = i\sqrt {\frac{{\Omega \omega }}{{2{{\widetilde \Omega }^2}}}} \left( {{\xi ^{'}}p{\tau _x} + \xi \cos 2\theta x{\tau _y}} \right).
\end{equation}
for diagonalizing the Hamiltonian~(\ref{Super_Ham_1}) under the condition of $\tilde{\Omega}/\omega \gg 1$.
Thus the second-order low-energy effective Hamiltonian reads,
\begin{small}
\begin{equation}
\widetilde H_{eff}^{\left( 2 \right)} \simeq \frac{\omega }{2}\left( {1- \frac{\Omega }{{\widetilde \Omega }}{\xi ^{'}}^2} \right){p^2} + \frac{\omega }{2}\left( {1 - \frac{\Omega }{{\widetilde \Omega }}{\xi ^2}{{\cos }^2}2\theta } \right){x^2}.
\end{equation}
\end{small}
Comparing with the simple harmonic oscillator, the excitation gap (see Fig.~\ref{Energy_Gap_Two_Phase_1}) is given as
\begin{equation} \label{Energy_Gap_1}
\varpi_{1} {\rm{ = }}\omega \sqrt {\left( {{\rm{1}} - \frac{1}{{{\xi ^4}}}} \right)\left( {{\rm{1}}{\kern 1pt} {\kern 1pt} - \frac{{{\xi ^{'}}^{\rm{2}}}}{{{\xi ^2}}}} \right)}.
\end{equation}
The excitation gap recovers the previous result when $\lambda=1$ ~\cite{Hwang2015}. Obviously, the $\varpi_{1}$ vanishes at $\xi_{c}=1$, which gives the critical point
\begin{equation}
\tilde{g}_{c}=\frac{2}{1+\lambda}, (\lambda>0).
\end{equation}
The corresponding ground-state for $\tilde{g}>\tilde{g}_{c}$ is ${\psi _1}\left( x,\alpha_{1} \right) = {{\cal D}_1}\left( {{\alpha _g}} \right){e^{ - {S_1}}}{\phi _0}\left( {x,{\alpha _1}} \right)\left| \downarrow \right\rangle$, where $\phi_{0}\left( {x,{\alpha _1}} \right)$ is the ground state of the harmonic oscillator with $\alpha_{1}=\sqrt{m_{1}\varpi_{1}}$ and the effective mass $m_{1}=1/\left[{{\omega \left( {{\rm{1}} - {\xi ^{'}}^{\rm{2}}/{\xi ^2}} \right)}}\right]$.
In the case of $\lambda<0$, we introduce another displaced operator ${{\cal D}_2}\left( \beta \right) = {e^{ - i\beta x}} = {e^{ - \beta \frac{\partial }{{\partial p}}}}$ with the parameter $\beta$ to be determined, thus the Hamiltonian~(\ref{Hamiltonian1}) is transformed as
\begin{equation}\label{Dis_Ham_2}
\begin{array}{l}
H\left( \beta \right) = {{\cal D}_2}^\dag \left( \beta \right)H{{\cal D}_2}\left( \beta \right) = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \frac{\Omega }{2}{\sigma _x}\\
+ \sqrt {\frac{{\Omega \omega }}{2}} \left( {\xi {\sigma _z}x + {\xi ^{'}}{\sigma _y}p} \right) + \omega \beta p + \frac{{\beta {\delta _2}}}{2}{\sigma _y} + \frac{{\omega {\beta ^2}}}{2},
\end{array}
\end{equation}
where $\delta_{2}={\sqrt {{\rm{2}}\Omega \omega } {\xi ^{'}}}$.
The eigenstates of the atomic part $H_a=\frac{\Omega }{2}{\sigma _x} + \frac{{\beta \delta_{2} }}{2}{\sigma _y}$ are
\begin{equation}
\left| {\widetilde \uparrow } \right\rangle = \cos {\theta ^{'}}\left| + \right\rangle - i\sin {\theta ^{'}}\left| - \right\rangle ,{\kern 1pt} \left| {\widetilde \downarrow } \right\rangle = \sin {\theta ^{'}}\left| + \right\rangle + i\cos {\theta ^{'}}\left| - \right\rangle,
\end{equation}
with $\sin 2\theta^{'} = \alpha \delta_{2} /\widetilde{\Omega}^{'}$, $ \cos 2\theta^{'} = \Omega /\widetilde{\Omega}^{'}$, and the new atomic transition frequency $\widetilde \Omega^{'} = \sqrt {{\Omega ^2} + {{\left( {\delta_{2} \beta } \right)}^2}}$.
In terms of Pauli matrices $\tau^{'}_{x,y,z}$ associated with $\left\{ {\left| \tilde{\uparrow} \right\rangle ,\left| \tilde{\downarrow} \right\rangle } \right\}$, the new Hamiltonian reads
\begin{equation}
\begin{array}{l}
H\left( \beta \right) = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) - \sqrt {\frac{{\Omega \omega }}{2}} \left( {\xi x{\tau^{'} _y} + {\xi ^{'}}\cos 2\theta p{\tau^{'} _x}} \right)\\
{\kern 35pt} + \left( {\omega \beta + \sqrt {\frac{{\Omega \omega }}{2}} \sin 2\theta {\xi ^{'}}{\tau^{'} _z}} \right)p + \frac{{\widetilde \Omega }}{2}{\tau^{'} _z} + \frac{{\omega {\beta ^2}}}{2}.
\end{array}
\end{equation}
To eliminate the perturbation term, $\left( {\omega \beta + \sqrt {\Omega \omega /2} \sin 2\theta {\xi ^{'}}{\tau^{'} _z}} \right)p$, we choose $\omega \beta - \sqrt {\Omega \omega /2} \sin 2\theta \xi^{'} = 0$, which gives
\begin{equation}
\beta = \pm {\beta _g} = \sqrt {\left( {\Omega /2\omega {\xi ^{'}}^2} \right)\left( {{\xi ^{'}}^4 - 1} \right)}.
\end{equation}
Given $\beta=\pm\beta_{g}$, the Hamiltonian becomes
\begin{equation}\label{Super_Ham_2}
\widetilde H^{'}\left( { \pm {\beta _g}} \right) = {\widetilde H_{\rm{0}}}^{'} + {\widetilde H_V}^{'},
\end{equation}
with
\begin{small}
\begin{equation}
\widetilde H_0^{'} = \frac{\omega }{2}\left( {{p^2} + {x^2}} \right) + \frac{{\widetilde \Omega }}{2}{\tau^{'} _z}, \widetilde H_V^{'} = - \sqrt {\frac{{\Omega \omega }}{2}} \left( {\xi x{\tau^{'}_y} + {\xi ^{'}}\cos 2\theta p{\tau^{'}_x}} \right).
\end{equation}
\end{small}
Through performing SW transformation, under the condition of $\tilde{\Omega}^{'}/\omega \gg 1$, we diagonalize the Hamiltonian~(\ref{Super_Ham_2}) with the generator
\begin{equation}
S_{2}= i\sqrt {\frac{{\Omega \omega }}{{2{{\widetilde \Omega }^2}}}} \left( {\xi x{\tau^{'}_x} - {\xi ^{'}}\cos 2\theta p{\tau^{'}_y}}\right).
\end{equation}
Then we obtain the second-order low-energy effective Hamiltonian,
\begin{equation}
{\widetilde{H^{'}}}_{eff}^{(2)} \simeq \frac{\omega }{2}\left( {1 - \frac{\Omega }{{\widetilde \Omega }}{\xi ^{'2}}{{\cos }^2}2\theta } \right){p^2} + \frac{\omega }{2}\left( {1 - \frac{\Omega }{{\widetilde \Omega }}{\xi ^2}} \right){x^2},
\end{equation}
and the excitation gap (see Fig.~\ref{Energy_Gap_Two_Phase_1})
\begin{equation} \label{Energy_Gap_2}
\varpi_{2}{\rm{ = }}\omega \sqrt {\left( {1 - \frac{1}{{{\xi ^{'}}^4}}} \right)\left( {1 - \frac{{{\xi ^2}}}{{{\xi ^{'}}^2}}} \right)}.
\end{equation}
Obviously, the excitation gap $\varpi_{2}$ vanishes at the critical point $\xi^{'}_{c}=1$, that is,
\begin{equation}
\tilde{g}_{c}=\frac{2}{1-\lambda}, (\lambda<0).
\end{equation}
The corresponding ground state is ${\psi _2}\left( x,\alpha_{2} \right) = {{\cal D}_2}\left( {{\beta _g}} \right){e^{ - {S_2}}}{\phi _0}\left( {x,{\alpha _2}} \right)\left| \tilde{\downarrow} \right\rangle$, where $\phi_{0}\left( {x,{\alpha _2}} \right)$ is the ground state of the simple harmonic oscillator with $\alpha_{2}=\sqrt{m_{2}\varpi_{2}}$ and the effective mass $m_{2}=1/\left[\omega \left( {1 - 1/{\xi ^{'4}}} \right)\right]$.
\begin{figure}
\caption{(Color online) The excitation gap obtained from the second-order perturbative Hamiltonian.
Insets: the universal scalings of the energy gaps near the critical region labelled by the dashed-line rectangle, where the left and right insets respectively correspond to normal and superradiant phases.}
\label{Energy_Gap_Two_Phase_1}
\end{figure}
\section{Universal critical dynamics across superradiant phase transition}
\subsection{Analytical Kibble-Zurek scalings}
In the following, we briefly introduce the KZM and analytically derive the universal critical exponents.
Near the quantum critical point, due to the vanishing of the energy gap, the
correlation (or healing) length $\zeta$ and relaxation time $\tau$ diverge as
\begin{equation}\label{Healing_Relaxation_Scaling}
\tau \sim {\left| \epsilon \right|^{-vz}},{\kern 10pt} \zeta \sim {\left| \epsilon \right|^{ - v}},
\end{equation}
where $\epsilon$ is the dimensionless distance from the critical point, and $(v,z)$ are the critical exponents.
To drive system from normal to superradiant phase, we linearly quench the coupling strength according to
\begin{equation}\label{Dim_parameter}
\epsilon\left(t\right)=\frac{|\tilde{g}\left(t\right)-\tilde{g}_{c}|}{\tilde{g}_{c}}=\frac{t}{\tau_{Q}},
\end{equation}
where $\tau_{Q}$ is the quench time. In a QPTs, the relaxation time is defined by the inverse of the
gap between the ground state and the first relevant excited state, i.e. $ \tau \simeq {\varpi ^{ - 1}}$.
However, the relaxation time is divergent in the vicinity of the critical point, in which the gap vanishes as
\begin{equation}\label{Energy_Scaling}
\varpi \sim {\left| \epsilon \right|^{vz}}.
\end{equation}
When the transition rate $\left| \dot{\epsilon}/\epsilon\right|=1/\left|t\right|$ equals to the gap $\varpi \sim {\left| \epsilon \right|^{vz}}={\left| t/\tau_{Q} \right|^{vz}}$, the adiabaticity fails near an instant $t=\hat{t}$ ,
\begin{equation}\label{Freeze_time}
\hat{t}\sim \tau_{Q}^{\frac{vz}{1+vz}}, \quad \hat{\epsilon}\sim\tau_{Q}^{-\frac{1}{1+vz}},
\end{equation}
and the corresponding correlation length becomes as
\begin{equation}
\hat{\zeta}\sim\hat{\epsilon}^{-v}\sim\tau_{Q}^{\frac{v}{1+vz}}.
\end{equation}
In the region of $\tilde{g}\le \tilde{g}_{c}$, the excitation gap $\varpi_{0}$ near the critical point vanishes as
\begin{equation} \label{Rabi_energy_scale}
\varpi_{0}\propto f_{{\varpi }}\left( \lambda \right){\left| \epsilon \right|^{1/2}}, {\kern 10pt} \lambda \neq0,
\end{equation}
where
\begin{equation}
f_{{\varpi }}\left( \lambda \right) = \omega \left[ {1 - {{\left( {\frac{{1 -\left| \lambda \right| }}{{1 + \left| \lambda \right| }}} \right)}^{\rm{2}}}} \right]^{1/2}.
\end{equation}
Comparing with Eq.~(\ref{Energy_Scaling}), we analytically obtain $vz=1/2$.
When $\lambda=0$, the excitation gap becomes
\begin{equation}
\varpi_{0} \propto \left| \epsilon \right|^{1}.
\end{equation}
Given $vz=1$, the critical exponents for $\lambda=0$ are belong to a different universality class~\cite{Hwang2016}, which will not be discussed below.
For the anisotropic QRM, the energy gap $\varpi_{0}$ near the critical point vanishes, see the left insets of Fig.~\ref{Energy_Gap_Two_Phase_1}, it clearly reveals that the anisotropic QRM shares the same universal class.
To extract the critical exponents, we introduce the position variance $\Delta x$ and the momentum variance $\Delta p$.
In the normal phase, $\Delta x$ and $\Delta p$ are obtained via the ground state $\psi_{0}$ .
\begin{equation}
\Delta x =\left[\frac{1}{2}\left( {1 - \frac{\omega }{\Omega }\xi {\xi ^{'}}} \right)\sqrt {\frac{{ {1 - {\xi ^{'}}^2} }}{{{1 - {\xi ^2}} }}} + \frac{{\omega {\xi ^{'}}^2}}{{2\Omega }}\right]^{\frac{1}{2}},
\end{equation}
\begin{equation}
\Delta p = \left[\frac{1}{2}\left( {1 - \frac{\omega }{\Omega }{\xi ^{'}}\xi } \right)\sqrt {\frac{{ {1 - {\xi ^2}} }}{{ {1 - {\xi ^{'}}^2} }}} + \frac{{\omega {\xi ^2}}}{{2\Omega }}\right]^{\frac{1}{2}}.
\end{equation}
Near the neighborhood of the phase transition, the length scale $\Delta x$ behaves as
\begin{equation}
\begin{array}{l}
\Delta x \propto f\left( \lambda \right){\left| \epsilon \right|^{ - 1/4}}, {\kern 10pt}\lambda > 0,\\
\Delta x \propto {f^{ - 1}}\left( \lambda \right){\left| \epsilon \right|^{1/4}}, {\kern 6pt}\lambda < 0,
\end{array}
\end{equation}
where
\begin{equation}
f\left( \lambda \right) = {\left[ {1 - {{\left( {\frac{{1 - \left| \lambda \right|}}{{1 + \left| \lambda \right|}}} \right)}^{\rm{2}}}} \right]^{1/4}}.
\end{equation}
The critical behavior of $\Delta x$ shows that it is divergent when $\lambda>0$, while it vanishes when $\lambda<0$.
It's worthy to note that $\Delta x$ plays an analogous role of the diverging length scale when $\lambda>0$~\cite{Sachdev2011,Hwang2015}.
Comparing with Eq.~(\ref{Healing_Relaxation_Scaling}), we obtain the static correlation length critical exponent $v=1/4$ and the dynamic critical exponent $z=2$.
For the momentum variance $\Delta p$, its critical behavior obeys
\begin{equation}
\begin{array}{l}
\Delta p \propto {f^{ - 1}}\left( \lambda \right){\left| \epsilon \right|^{1/4}},{\kern 7pt} \lambda > 0,\\
\Delta p \propto f\left( \lambda \right){\left| \epsilon \right|^{ - 1/4}},{\kern 12pt} \lambda < 0.
\end{array}
\end{equation}
In contrast to $\Delta x$, $\Delta p$ becomes divergent when $\lambda<0$, while it vanishes when $\lambda>0$.
In the case of $\lambda<0$, the diverging length scale $\Delta p$ gives the critical exponent $v=1/4$ according to KZM, and so that we have the critical exponent $z=2$.
In the region of $\tilde{g} > \tilde{g}_{c}$, we divide the superradiant phase into two parts, which label as $x$-type($p$-type) superradiant phase when $\lambda > 0 (\lambda < 0)$~\cite{Lin2017}, respectively.
In the superradiant phase, the excitation gap $\varpi_{1,2}$ near the critical point vanishes as
\begin{equation}
\varpi_{1,2} \propto f_{{\varpi }}\left( \lambda \right){\left| \epsilon \right|^{1/2}},
{\kern 10pt} \lambda \neq0.
\end{equation}
The critical behaviors of the excitation gap are shown in the right insets of Fig.~\ref{Energy_Gap_Two_Phase_1}, which gives $vz=1/2$.
In the $x$-type superradiant phase, $\Delta{x}$ and $\Delta{p}$ are obtained via
the ground state ${\psi _1}\left( x,\alpha_{1} \right) $.
\begin{small}
\begin{equation}
\Delta x = {\left[ {\frac{1}{2}\left( {1 - \frac{{\omega {\xi ^{'}}}}{{\Omega {\xi ^5}}}} \right)\sqrt {\frac{{{\xi ^2} - {\xi ^{'2}}}}{{{\xi ^2} - {\xi ^{ - 2}}}}} + \frac{{\omega {\xi ^{'2}}}}{{2\Omega {\xi ^4}}} - {\frac{{{\xi ^{'}}}}{{2{\xi ^3}}} + \frac{{{\xi ^{'}}}}{{2{\xi ^7}}}} } \right]^{\frac{1}{2}}},
\end{equation}
\end{small}
\begin{equation}
\Delta p = \left[\frac{1}{2}\left( {1 - \frac{{\omega {\xi ^{'}}}}{{\Omega {\xi ^5}}}} \right)
\sqrt {\frac{{{\xi ^2} - {\xi ^{ - 2}}}}{{{\xi ^2} - {\xi ^{'}}^{\rm{2}}}}} + \frac{\omega }{{2\Omega {\xi ^6}}}\right]^{\frac{1}{2}}.
\end{equation}
Near the critical point, the critical behavior gives
\begin{equation}
\begin{array}{l}
\Delta x \propto f\left( \lambda \right){\left| \epsilon \right|^{ - 1/4}}, \quad
\Delta p \propto {f^{ - 1}}\left( \lambda \right){\left| \epsilon \right|^{1/4}}.
\end{array}
\end{equation}
For $p$-type superradiant phase, $\Delta x$ and $\Delta p$ are obtained
via the ground state ${\psi _2}\left( x,\alpha_{2} \right) $.
\begin{equation}
\Delta x = {\left[ {\frac{1}{2}\left( {1 - \frac{{\omega \xi }}{{\Omega {\xi ^{'}}^5}}} \right)\sqrt {\frac{{{\xi ^{'}}^2 - {\xi ^{'}}^{ - 2}}}{{{\xi ^{'}}^2 - {\xi ^2}}}} + \frac{\omega }{{2\Omega {\xi ^{'}}^6}}} \right]^{\frac{1}{2}}},
\end{equation}
\begin{sequation}
\Delta p = \left[\frac{1}{2}\left( {1 - \frac{{\omega \xi }}{{\Omega {\xi ^{'}}^5}}} \right)\sqrt {\frac{{{\xi ^{'}}^2 - {\xi ^2}}}{{{\xi ^{'}}^2 - {\xi ^{'}}^{ - 2}}}} + \frac{{\omega {\xi ^2}}}{{2\Omega {\xi ^{'}}^4}} \\
- {\frac{\xi }{{2{\xi ^{'}}^3}} + \frac{\xi }{{2{\xi ^{'}}^7}}} \right]^{\frac{1}{2}}.
\end{sequation}
Near the critical point, the critical behavior gives
\begin{equation}
\begin{array}{l}
\Delta x \propto f^{-1}\left( \lambda \right){\left| \epsilon \right|^{ 1/4}}, \quad
\Delta p \propto {f}\left( \lambda \right){\left| \epsilon \right|^{-1/4}}.
\end{array}
\end{equation}
In the $x$-type superradiant phase, $\Delta x$ acts as the diverging length scale, while
in the $p$-type superradiant phase, $\Delta p$ is the diverging length scale.
According to Eq.~(\ref{Healing_Relaxation_Scaling}), the diverging length scale gives the critical exponent $v=1/4$ and the dynamical critical exponent $z=2$.
\begin{figure*}
\caption{(Color online) The time-evolution of density distribution for different quench times $\tau_{Q}
\label{State_Evolution_1}
\end{figure*}
\subsection{Numerical scalings}
Below we show how to numerically extract the Kibble-Zurek scalings from the non-equilibrium dynamics.
We perform the numerical simulations based on the Hamiltonian~(\ref{Hamiltonian1}).
To study the non-equilibrium dynamics, we prepare the initial ground state deeply in the normal phase, in order to drive the system cross the superradiant phase transition, the coupling strength $\tilde{g}$ is linearly quenched according to
\begin{equation}\label{quench_way}
\tilde{g}(t)=\tilde{g}_{c}\left(1-t/\tau_{Q}\right),
\end{equation}
where $\tilde{g}_{c}$ is the critical point, and $\tau_{Q}$ is the quench time.
The typical total density distributions for different quench times are shown in Fig.~\ref{State_Evolution_1}.
When the system is quenched at a fast rate (which respond to small $\tau_{Q}$), the state may remain the information of the normal phase even in the deep superradiant region, see Fig.~\ref{State_Evolution_1}(a).
However, the state evolves more adiabatic as the quench time becomes larger, in which the oscillation amplitude of the state becomes smaller, see Fig.~\ref{State_Evolution_1}(c).
When $\tau_{Q}\to\infty$, the quench dynamic returns to the equilibrium case, see the Fig.~\ref{GS_1}(a).
Base on aforementioned description, the phonon number $n_{c}=\langle \omega (p^{2}+x^{2})/2 \rangle$ serves as the order parameter.
When the coupling strength $\tilde{g}$ is quenched across the phase transition $\tilde{g}_{c}$, the time-evolution of $n_{c}$ are shown in Fig.~\ref{bd_vs_time_tauq_1}(a).
In the case of equilibrium phase transition, the phonon number becomes non-zeros when the system sweeps through the critical point.
However, in the case of the non-equilibrium dynamics, the phonon number delays to increase until the system crosses the freeze time $\hat{t}$, where the state restarts to evolve.
\begin{figure}
\caption{(Color online) (a) The time evolution of the phonon number $n_{c}
\label{bd_vs_time_tauq_1}
\end{figure}
To study the phase transition delay $b_{d}$, we define the $b_{d}$ as
\begin{equation}\label{Phase_transition_delay}
b_{d}\sim\left|\epsilon\right|\sim\left|\tilde{g}(\hat{t})-\tilde{g}_{c}\right|\sim\tau_{Q}^{-\frac{1}{1+vz}},
\end{equation}
where $\hat{t}$ is the freeze time.
In our calculation, $\hat{t}$ is determined when the phonon number $n_{c}$ reaches at fixed value $n_{c}^{fix}$.
According to the KZM, the instantaneous state freezes at $-\hat{t}$ but with global phase evolution during the impulse region.
Hence, the order parameter remains zero in the first adiabatic region and the impulse region.
When the system crosses over the freeze time $\hat{t}$, the instantaneous state restarts to evolve again, but the states at this moment are no longer the eigenstates of the Hamiltonian.
Therefore, the order parameter becomes nonzero after the freeze time $\hat{t}$.
We determine the freeze time $\hat{t}$ when the phonon number $n_{c}$ satisfies $n_{c}^{fix}=5$.
In Fig.~\ref{bd_vs_time_tauq_1}(b), we show the universal scaling of the the phase transition delay $b_{d}$ with respect to the quench time $\tau_{Q}$, the numerical scalings for different $\lambda$ are well consistent with the analytical result $b_{d}\sim\tau_{Q}^{-\frac{1}{1+vz}}\sim\tau_{Q}^{-2/3}$.
\begin{figure}
\caption{(Color online) (a,c) Universal scalings of the diverging length scale $\Delta x$ and $\Delta p$ with respect to the quench time $\tau_{Q}
\label{Deltaxp_vs_tauq}
\end{figure}
Here, we treat the position variance $\Delta x$ (or the momentum variance $\Delta p$) as the diverging length scale when $\lambda>0$(or $\lambda<0$).
In Fig.~\ref{Deltaxp_vs_tauq}(a,b), we demonstrate the universal scaling of the $\Delta x$ with respect to the quench time $\tau_{Q}$.
The power laws are well agree with the analytical result $\Delta x \sim \tau_{Q}^{\frac{v}{1+vz}}\sim\tau_{Q}^{1/6}$.
In the case of $\lambda<0$, the momentum variance $\Delta p$ serves as the diverging length scale.
Similarly, $\Delta p$ shows universal scaling with respect to the quench time $\tau_{Q}$ as shown in Fig.\ref{Deltaxp_vs_tauq}(c,d), the power law is well consistent with the analytical result $\Delta p \sim \tau_{Q}^{\frac{v}{1+vz}}\sim\tau_{Q}^{1/6}$.
Combing the scalings of the phase transition delay and the diverging length scale, we finally give the numerical critical exponents of anisotropic QRM for different $\lambda$ in TABLE.~\ref{Critical_Exponent_Table}.
\begin{table}
\normalsize
\begin{center}
\begin{tabular}{|l|l|l|l|l|}
\hline
Anisotropic ratio $\lambda$ & -0.5 & -1.0 & -1.5 & -2 \\ \hline
z(Critical exponent) & 1.994 & 1.989 & 2.017 & 2.009 \\ \hline
$\nu$(Critical exponent) & 0.2511 & 0.2501 & 0.2507 & 0.2486 \\ \hline
Anisotropic ratio $\lambda$ & 0.5 & 1.0 & 1.5 & 2 \\ \hline
z(Critical exponent) & 1.998 & 1.991& 2.019 & 2.013 \\ \hline
$\nu$(Critical exponent) & 0.2506 & 0.2497 & 0.2504 & 0.2480 \\ \hline
\end{tabular}
\caption{The numerical critical exponents(z,$\nu$) of the anisotropic QRM for different $\lambda$, which are well consistent with the analytical ones $(z=2,\nu=1/4)$. }
\label{Critical_Exponent_Table}
\end{center}
\end{table}
\section{Summary and Discussions}
In summary, we have investigated the non-equilibrium dynamics across a normal-to-superradiant phase transition in the anisotropic QRM.
Through performing the SW transformation, the Hamiltonian can be diagonalized and so that the ground states and their excitation gaps can be analytically obtained.
By analyzing the excitation gap and the diverging length scale, we give the critical exponents ($z=2,\nu=1/4$).
Meanwhile, we also simulate the real-time slow dynamics across the normal-to-superradiant phase transition.
To extract the critical exponents, we study the phase transition delay and diverging length scale near the critical point, which show universal scalings with respect to the quench time.
By introducing position and momentum operators, we clearly show the spontaneous symmetry breaking in the anisotropic QRM, which manifests the total density distribution spontaneously varies from a single-peak to double-peak shape.
Moreover, we reveal that the anisotropic QRM shares the same universal class (i.e. the identical critical exponents) in spite of the anisotropic ratio.
It is possible to realize the QRM in the ultrastrong coupling regime and the deep strong coupling regime via superconducting circuits~\cite{Langford2017,Braumuller2017,Leroux2018}, cold atoms~\cite{Felicetti2017} and trapped ions~\cite{Pedernales2015,Lv2018}.
The realization of the anisotropic QRM is more challengeable, some attempts have been proposed via quantum well~\cite{Schliemann2003,Wang2016}, circuit QED systems~\cite{Baksic2014,Yang2017}, superconducting flux qubits~\cite{WangMY2018}.
The ratio between the atomic transition frequency $\Omega$ and the phonon field frequency $\omega$ can be tuned by adjusting the frequency detuning of the time-dependent magnetic fields, the qubits frequency and the LC oscillator frequency.
The coupling interaction strength $g$ and the anisotropic ratio $\lambda$ can be tuned by adjusting the phase of the time-dependent magnetic fields.
\begin{acknowledgments}
This work is supported by the Key-Area Research and Development Program of GuangDong Province under Grants No. 2019B030330001, the National Natural Science Foundation of China (NNSFC) under Grants No. 11874434 and No. 11574405, and the Science and Technology Program of Guangzhou (China) under Grants No. 201904020024.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{The joy of implications, aka pure Horn formulas: mainly a survey}
\author{Marcel Wild}
\date{}
\maketitle
\begin{quote}
A{\scriptsize BSTRACT}: Pure Horn clauses have also been called (among others) functional dependencies, strong association rules, or simply implications. We survey the mathematical theory of implications with an emphasis on the progress made in the last 30 years.
\end{quote}
{\bf Key words:}
pure Horn functions and their minimization, Boolean logic, association rule, lattice theory, Formal Concept Analysis, closure system, convex geometry, prime implicates, meet-irreducibles, universal algebra.
\section{Extended introduction}
This article is devoted to the mathematics and (to lesser extent) algorithmics of implications; it is mainly a survey of results obtained in the past thirty years but features a few novelties as well.
The theory of implications mainly developed, often under mutual ignorance, in these five fields:
Boolean Function Theory, Formal Concept Analysis, Lattice Theory, Relational Database Theory, Learning Theory.
As standard text-books in these fields we recommend [CH], [GW], [Bi] $+$ [G], [MR2] $+$ [M], and [RN, ch.VI] $+$ [FD] respectively. Broadly speaking we collect from each field only those major results that concern (or can be rephrased in terms of) ``abstract implications'', and {\it not} the substance matter of the field itself. There are three minor exceptions to this rule. First, there will be two detours (Subsections 4.1, 4.2) into lattice theory; among the five fields mentioned this is the one the author is most acquainted with. Second, in Subsection 1.1 just below, in order to motivate the theory to come, we glance at three ``real life'' occurencies of implications in these areas:
Relational Databases,
Formal Concept Analysis, and Learning Spaces. The third exception concerns 3.6; more on that later.
The second part (1.2) of our extended introduction gives the detailed section break up of the article.
{\bf 1.1} We shall only give very rudimentary outlines of three areas mentioned above; more detailed accounts of 1.1.1 to 1.1.3 are found in [MR2], [GW], [FD]. The sole purpose here is to convey a feeling for the many meanings that a statement ``$A$ implies $B$'' can have. This will contrast with the uniform mathematical treatment that all ``abstract'' implications $A \rightarrow B$ obey.
\begin{center}
\includegraphics[scale=0.5]{JoyOfImplicFig1}
\end{center}
{\bf 1.1.1} As to relational database (RDB), imagine this as a large array in which every row (called record) corresponds to a particular object $t_i$, and in which the columns correspond to the various attributes $a_j$
that apply. See Figure 1. Each attribute has a domain which is the set of values that it may assume. Following an example of J. Ullman, take a relational database whose records match the ``teaching events'' occuring at a university in a given semester. The attributes are $C=$ course, $T=$ teacher, $H=$ hour, $R =$ room, $S=$ student. The domain of $C$ may be $\{$algebra, analysis, lattice theory, $\cdots \}$, the domain of $T$ could be $\{$Breuer, Howell, Janelidze, $ \cdots \}$, and so forth. If $A, B$ are sets of attributes then the validity of $A \rightarrow B$ means that {\it any two objects which have identical values for all attributes in $A$, also have identical values for all attributes in $B$.} Examples of implications $A \rightarrow B$ (also called {\it functional dependencies}) that likely hold in a well designed database include the following: $\{C\} \rightarrow \{T\}$ (each course has one teacher), $\{H,R\} \rightarrow \{C\}$ (only one course meets in a room at one time), $\{H, S\} \rightarrow \{R\}$ (a student can be in only one room at a given time).
{\bf 1.1.2} Let now $G$ and $M$ be any sets and $I \subseteq G \times M$ be a binary relation. In Formal Concept Analysis (FCA) one calls the triple $(G, M, I)$ a {\it context}, and $gIm$ is interpreted as the {\it object} $g \in G$ having the {\it attribute} $m \in M$. If $A, B \subseteq M$ then the validity of $A \rightarrow B$ has a different\footnote{One may view a context as a RBD all of whose attribute domains are Boolean, thus $\{\mbox{True}, \mbox{False}\}$ or $\{1,0\}$. But depending on viewing it as RBD or context, different implications hold.} ring from before: {\it Any object that has all attributes in $A$, also has all attributes in $B$} (see also 2.1.2 and 2.2.3).
Let us focus on particular contexts of type $(G, M, \ni)$. Thus the objects $g \in G$ become {\it subsets} $X$ of some set $M$ of {\it items}. Saying that $g \in G$ ``has attribute'' $m \in M$ now just means $X \ni m$. Often the sets $X$ are called {\it transactions}, and the elements $m \in M$ are called {\it items}.
If $A, B \subseteq M$ then $A \rightarrow B$ is a valid implication iff {\it every transaction $X$ that contains the itemset $A$, also contains the itemset $B$}. For instance, each transaction can contain the items a customer bought at a supermarket on a particular day. In this scenario a plausible implication e.g. is $\{$butter, bread$\} \rightarrow \{$milk$\}$. Notice that $A \rightarrow B$ may be a valid implication simply because many transactions do not contain $A$ at all. To exclude this possibility one often strengthens the previous definition of ``valid implication'' by additionally demanding that say 70\% of all transactions must contain the itemset $A$. The terminology ``transaction'' and ``itemset'' is borrowed from Frequent Set Mining (FSM), a paradigm that developed in parallel to FCA for a long time, despite of close ties. See also 3.6.3.4.
{\bf 1.1.3} As to Learning Spaces [FD], these are mathematical structures applied in mathematical modeling of education. In this framework (closer in spirit to [GW] than to [RN] type learning theory) the validity of an implication $A \rightarrow B$ means the following: {\it Every student mastering the (types of) problems in set $A$ also masters the problems in set $B$.} See also Expansion 16.
{\bf 1.2} Some readers may have guessed that this zoo of implications fits the common hat of pure Horn functions, i.e. Boolean functions like $(x_1 \wedge x_2 \wedge x_3) \rightarrow x_4$ and conjunctions thereof. While this is true the author, like others, has opted for a more {\it stripped down formalism}, using elements and sets rather than literals and truth value assignments, etc. Nevertheless, discarding pure Horn function terminology altogether would be short-sighted; certain aspects can only be treated, in any sensible way, in a framework that provides immediate access to the empire of general Boolean function theory that e.g. houses prime implicates and the consensus algorithm.
Without further mention, all structures considered in this article will be {\bf finite}. Thus we won't point out which concepts extend or can be adapted to the infinite case. A word on [CH, chapter 6, 56 pages] is in order. It is a survey on Horn functions to which the present article (PA) compares as follows. Briefly put, the intersection $CH \cap PA$ is sizeable (though not notation-wise), and so are $CH\backslash PA$ (e.g. applications, dualization, special classes), as well as $PA \backslash CH$ (e.g. 3.6 and 4.1 to 4.4). We note that 4.1 also features special classes but {\it others}.
Here comes the section break up. Section 2 recalls the basic connections between closure operators $c$ and closure systems ${\cal F}$ (2.1), and then turns to implications ``lite'' in 2.2. Crucially, each family $\Sigma$ of implications $A \rightarrow B$ gives rise to a closure operator $c(\Sigma, -)$ and whence to a closure system ${\cal F} = {\cal F}(\Sigma)$. Furthermore, {\it each} closure operator $c$ is of type $c= c(\Sigma, -)$ for suitable $\Sigma$. Section 3 is devoted to the finer theory of implications. Centerpieces are the Duquenne-Guigues implicational base (3.2) and the canonical direct base in 3.3. Subsection 3.4 is about mentioned pure Horn functions, 3.5 is about acyclic and related closure operators, and 3.6 surveys the connections between two devices to grasp closure systems ${\cal F}$. One device is any implicational base, the other is the subset $M({\cal F}) \subseteq {\cal F}$ of meet-irreducible closed sets.
Section 4 has the title ``Selected topics''.
In 4.1 the attention turns from meet to join-irreducibles, i.e. we show that {\it every} lattice ${\cal L}$ gives rise to a closure system ${\cal F}_J$ on its set $J = J({\cal L})$ of join irreducibles. Consequently it makes sense to ask about optimum implicational bases $\Sigma$ for various types of lattices. We have a closer look at modular, geometric and meet-distributive lattices. The other topics in brief are: an excursion into universal algebra (4.2), {\it ordered} direct implicational bases (4.3), an algorithm for generating ${\cal F}(\Sigma)$ in compact form (4.4), and general (impure) Horn functions in 4.5. According to Theorem 6 implications ``almost'' suffice to capture even impure Horn functions.
In order to have full proofs of some results without interrupting the story line, we store these proofs in little ``boxes'' (called Expansion 1 to Expansion 20) in Section 5. Most of these results are standard; nevertheless we found it worthwile to give proofs fitting our framework. Some Expansions simply contain further material. Due to space limitations the full versions of some Expansions are only available in the preliminary draft [W7].
Recall that this article attempts to survey the {\it mathematical theory} of pure Horn functions ($=$ implications), and apart from mentioned exceptions {\it not} their applications. Our survey also includes a couple of new results, mainly in 2.2.5, 3.3.2, 3.4.3, 4.1.6, in Expansion 8 and in (33). Further Theorem 3 and 6 are new. In order to stimulate research four {\it Open Problems} are dispersed throughout the text (in 3.6.2, \ Expansion 5, \ Expansion 15).
\section{The bare essentials of closure systems and implications}
Everything in Section 2 apart from 2.2.5 is standard material.
Because of the sporadic appearance of contexts (1.1.2) a good reference among many is [GW].
\subsection{Closure systems and closure operators}
A {\it closure system} ${\cal F}$ with universe $E$ is a subset of the powerset ${\cal P}(E)$ with the property that
(1) \quad $\bigcap {\cal G} \in {\cal F}$ for all ${\cal G} \subseteq {\cal F}$.
Here $\bigcap {\cal G}$ denotes the intersection of all sets contained in ${\cal G}$. Its smallest element is $\bigcap {\cal F}$ and, crucially, it has a largest element as well. Namely, as a matter of taste, one may either postulate that $E$ belongs to ${\cal F}$, or one may argue that $\emptyset \subseteq {\cal F}$ implies $\bigcap \emptyset \in {\cal F}$, and that $\bigcap \emptyset = E$. Thus ${\cal F}: = {\cal P}(E)$ is the largest closure system with universe $E$, and ${\cal F}: = \{E\}$ is the smallest. The members $X\in {\cal F}$ are called {\it closed} sets, and $X \in {\cal F} \backslash \{E\}$ is
{\it meet-irreducible} if there are no strict closed supersets $A$ and $B$ of $X$ with $A \cap B = X$. We write $M({\cal F})$ for the set of meet irreducibles of ${\cal F}$. It is clear that
(2) \quad $(\forall X \in {\cal F}) \ \ ({\cal F} \backslash \{X\}$ is closure system $\Leftrightarrow X \in M({\cal F})$)
{\bf 2.1.1} Closure systems are linked to closure operators\footnote{We recommend [BM, sec.6] for a historic account of the origins of these two concepts.}. (The link to lattices is postponed to 4.1.) Namely, {\it closure operators} are maps $c: {\cal P}(E) \rightarrow {\cal P}(E)$ which are extensive ($U \subseteq c(U)$), idempotent ($c(c(U)) = c(U))$ and monotone ($U \subseteq U'\mathbb{R}a c(U) \subseteq c(U'))$. In this situation (see Expansion 1)
(3) \quad ${\cal F}_c : = \{X \in {\cal P}(E) : c(X) = X \}$ is a closure system.
As to the reverse direction,
if ${\cal F} \subseteq {\cal P}(E)$ is a closure system then
$c_{\cal F}(U) : = \bigcap \{S \in {\cal F}: \ S \supseteq U \}$
yields a closure operator $c_{\cal F} : {\cal P}(E) \rightarrow {\cal P}(E)$.
One can show [GW, Theorem 1] that ${\cal F}_{(c_{\cal F})} = {\cal F}$ and $c_{({\cal F}_c)} = c$. One calls $U$ a {\it generating set} of $X \in {\cal F}$ if $c_{\cal F} (U) = X$. On a higher level ${\cal H} \subseteq {\cal P}(E)$ is a {\it generating set} of ${\cal F}$ if ${\cal F} ({\cal H}) : = \{\bigcap {\cal G}: {\cal G} \subseteq {\cal H}\}$ equals ${\cal F}$. It is easy to see that ${\cal H}$ is a generating set of ${\cal F}$ iff ${\cal H} \supseteq M({\cal F})$. In this case
$c_{\cal F}(U)$ can also be calculated as
(4) \quad $c_{\cal H}(U) = \bigcap \{S \in{\cal H}: S \supseteq U\}$.
The first idea that springs to mind to calculate ${\cal F}({\cal H})$ from ${\cal H}_1 : = {\cal H}$ is to keep on calculating ${\cal H}_{k+1} = {\cal H}_k \ast {\cal H}_1 : = \{X \cap Y : X \in {\cal H}_k, Y \in {\cal H}_1\} \ (k = 1, 2, \ldots)$ until ${\cal H}_{k+1} = {\cal H}_k = {\cal F}({\cal H})$. Unfortunately the approach is doomed by the frequent recalculation of closed sets, and the need to keep large chunks of ${\cal F}({\cal H})$ in central memory. A clever idea of C.E. Dowling [FD, p.50] avoids the recalculations, but not the space problem; see also Expansion 4.
{\bf 2.1.2.} Here comes a frequent source of closure operators. Let $E_1, E_2$ be sets and let $R \subseteq E_1 \times E_2$ be a binary relation.
For all $X \subseteq E_1$ and $Y \subseteq E_2$ put
$$\begin{array}{lllll}
X^\dagger & : = & f(X) & : = & \{y \in E_2: (\forall x \in X) (x,y) \in R\}\\
\\
Y^\ast & : = & g(Y) & : =& \{x \in E_1: (\forall y \in Y) (x,y) \in R\} \end{array}$$
Then the pair $(f, g)$ yields a {\it Galois connection}. It is easy to see that $X \subseteq Y^\ast$ iff $X^\dagger \supseteq Y$. Furthermore, it holds [GW, Section 0.4] that $c_1 : = g \circ f$ is a closure operator ${\cal P}(E_1) \rightarrow {\cal P}(E_1)$, and $c_2 : =f \circ g$ is a closure operator ${\cal P}(E_2) \rightarrow {\cal P}(E_2)$.
For instance, let $(G, M, I)$ be a context in Formal Concept Analysis as glimpsed in 1.1.2. If $A \subseteq M$ is any set of attributes then $c_2(A) = A{^{\ast \dagger}}$ is the set of attributes $m$ enjoyed by every object $g \in A^\ast$, i.e. by every object $g$ that has all attributes of $A$. Put another way, $A \rightarrow c_2(A)$ is a ``valid'' implication in the sense that whenever $g$ has all attributes in $A$, then $g$ has all attributes in $c_2(A)$. This matches our discussion of ``implications'' $A \rightarrow B$ in 1.1.2. See [PKID1] for a survey of 1072 papers dedicated to applications of FCA.
\subsection{Implications ``lite''}
A pair of subsets $(A, B) \in {\cal P}(E) \times {\cal P}(E)$ will be called an {\it implication}. Both $A = \emptyset$ or $B = \emptyset$ are allowed. (See 3.4.2 for the full picture). We shall henceforth write $A \rightarrow B$ instead of $(A,B)$ and call $A$ the {\it premise} and $B$ the {\it conclusion} of the implication. Any family
(5) \quad $\Sigma : = \{A_1 \rightarrow B_1, A_2 \rightarrow B_2, \cdots, A_n \rightarrow B_n\}$
of implications gives rise to a closure operator as follows. Putting $[n] : = \{1, 2, \ldots, n\}$ for any set $S \subseteq E$ we define
(6) \quad $S': = S \cup \bigcup \{B_i: \ i \in [n], A_i \subseteq S \}$.
By finiteness the chain $S \subseteq S'\subseteq (S')'\subseteq \cdots$ stabilizes at some set $c(\Sigma, S)$. This algorithm matches {\it forward chaining} in [CH, 6.2.4]. We call $c(\Sigma, S)$ the $\Sigma$-{\it closure} of $S$. It is clear that the function $c(\Sigma, -)$ is a closure operator on ${\cal P}(E)$. As to speeding up the calculation of $c(\Sigma, X)$ see Expansion 2.
It is evident that $\Sigma \subseteq \Sigma'$ implies $c(\Sigma, U) \subseteq c(\Sigma', U)$ for all $U \subseteq E$, but say $\Sigma = \Sigma_1 \cup \Sigma_2$ does not entail $c(\Sigma, U) = c(\Sigma_2, c(\Sigma_1, U))$.
By (3) the closure operator $c(\Sigma, -)$ induces a closure system ${\cal F}(\Sigma)$. Hence for all $X \subseteq E$ it holds that
(7) \quad $X \in {\cal F}(\Sigma) \ \Leftrightarrow \ X = c(\Sigma, X) \ \Leftrightarrow \ \forall (A \rightarrow B) \in \Sigma: \ A \not\subseteq X$ \ or \ $B \subseteq X$
Skipping $c(\Sigma, -)$, it is easy to show directly that for any given family $\Sigma$ of implications the sets $X \subseteq E$ with $(A \subseteq X \mathbb{R}a B \subseteq X$, for all $(A \rightarrow B) \in \Sigma)$ constitute a closure system.
{\bf 2.2.1} We say that $\Sigma$ is {\it equivalent} to $\Sigma'$ (written $\Sigma \equiv \Sigma'$) if the closure operators $c(\Sigma, -)$ and $c(\Sigma', -)$ coincide. There are three obvious (and others in 3.4) notions of ``smallness'' for families $\Sigma$ of implications as in (5):
\begin{itemize}
\item $\Sigma$ is {\it nonredundant} if $\Sigma \backslash \{A_i \rightarrow B_i\}$ is not equivalent to $\Sigma$ for all $1 \leq i \leq n$.
\item $\Sigma$ is {\it minimum} if $ca (\Sigma):=|\Sigma|$ equals $\min \{ |\Sigma'|: \Sigma' \equiv \Sigma \}$.
\item $\Sigma$ is {\it optimum} if $s(\Sigma) : = |A_1 | + \cdots +|A_n| + |B_1| + \cdots + |B_n|$ equals\\
$\min \{s(\Sigma') : \Sigma' \equiv \Sigma \}$.
\end{itemize}
For instance, $\Sigma_1 : = \{\{1\} \rightarrow \{2\}, \ \{1 \} \rightarrow \{3\}, \ \{1\} \rightarrow \{2,3\} \}$ is {\it redundant} ($=$ not nonredundant) because say $\{1\} \rightarrow \{2,3\}$ can be dropped.
Both $\Sigma_2 : = \{\{1\} \rightarrow \{2\}, \{1\} \rightarrow \{3\} \}$ and $\Sigma_3: = \{\{1\} \rightarrow \{2, 3\} \}$ are equivalent to $\Sigma_1$, and are clearly nonredundant. The latter is minimum, in fact optimum. Generally each minimum family is nonredundant. Less obvious, each optimum family is minimum as proven in Theorem 1.
{\bf 2.2.2} From $\{1\} \rightarrow \{2\}$ and $\{2\} \rightarrow \{3\}$ ``somehow follows'' $\{1\} \rightarrow \{3\}$, but this notion needs to be formalized. We thus say that $A \rightarrow B$ {\it follows} from (or: is a {\it consequence} of) a family $\Sigma$ of implications, and write $\Sigma \vDash (A \rightarrow B)$, if $\Sigma \cup \{A \rightarrow B\}$ is equivalent to $\Sigma$. The following fact is often useful:
(8) \quad $\Sigma \vDash (A \rightarrow B)$ if and only if $B \subseteq c(\Sigma, A)$
{\it Proof of (8)}. As to $\mathbb{R}a$, by assumption the two closure operators $c(\Sigma, -)$ and $c(\Sigma \cup \{A \rightarrow B\}, -)$ coincide. Thus in particular $B \subseteq c(\Sigma \cup \{A \rightarrow B\}, A) = c(\Sigma, A)$. As to $\Leftarrow$, it suffices to show that $c(\Sigma \cup \{A \rightarrow B\}, U)$ which clearly coincides with $c(\Sigma \cup \{A \rightarrow B\}, c(\Sigma, U))$, is contained in $c(\Sigma, U)$ for $U \subseteq E$. {\it Case 1:} $A \not\subseteq c(\Sigma, U)$. Then $c(\Sigma \cup \{A \rightarrow B\}, c(\Sigma, U)) = c(\Sigma, U)$ by the very definition of the closure operator $c(\Sigma \cup \{A \rightarrow B\}, -)$, {\it Case 2:} $A \subseteq c(\Sigma, U)$. Then by assumption $B \subseteq c(\Sigma, A) \subseteq c(\Sigma, U)$, and so again $c(\Sigma \cup \{A \rightarrow B\}, c(\Sigma, U)) = c(\Sigma, U)$. \quad $\square$
In Expansion 3 we introduce among other things a ``syntactic'' notion $\vdash$ of {\it derivability} and show that $\Sigma \vdash (A \rightarrow B)$ is equivalent to $\Sigma \vDash (A \rightarrow B)$.
{\bf 2.2.3} Conversely, let us {\it start out} with any closure operator $c: {\cal P}(E) \rightarrow {\cal P}(E)$. Then a family $\Sigma$ of implications is called an {\it implicational base} or simply {\it base} of $c$ if $c(S) = c(\Sigma, S)$ for all $S \subseteq E$. Each closure operator $c$ {\it has} an implicational base, in fact $\Sigma_c : = \{X \rightarrow c(X) : X \subseteq E\}$ does the job\footnote{This is slightly less trivial than it first appears. Clearly $c(Y) \subseteqq c(\Sigma_c, Y)$, but why not $\subsetneqq$?}. Unfortunately, $\Sigma_c$ is too large to be useful. How to find smaller ones is the theme of Section 3.
{\bf 2.2.4} Putting $B = c(\Sigma, A)$ in (8) we see that $A \rightarrow c(\Sigma, A)$ is a consequence of $\Sigma$.
Thus for any closure operator $c$ the implication $A \rightarrow c(A)$ is a consequence of any $\Sigma$ that happens to be an implicational base of $c$. But implications $A \rightarrow c(A)$ often carry a natural meaning ``on their own'', such as $A \rightarrow c_2(A)$ in 2.1.2.
{\bf 2.2.5} Streamlining the proof of [KN, Theorem 20] here comes an example of a visually appealing closure operator $c$, all of whose optimum bases can be determined ``ad hoc'', i.e. without the theory to be developed in Section 3.2. Namely, $c$ arises from an affine point configuration $E \subseteq \mathbb{R}^2$ by setting $c(A) : = E\cap ch(A)$ where $ch(A)$ is the ordinary (infinite) convex hull of $A$. For instance, if $E = [8]$ is as in Figure 2, then $c(\{1,2,4\}) = \{1, 2, 4, 5, 8\}$.
\begin{center}
\includegraphics[scale=0.8]{JoyOfImplicFig2}
\end{center}
From the deliberations below (which generalize to point sets in $\mathbb{R}^n$ without $n+1$ points in a hyperplane) it will readily follow that $c$ has exactly $144$ optimum bases.
Let ${\cal T}$ be the set of all 3-element subsets $T \subseteq E$ with $ch(T) \cap (E \backslash T)\neq \emptyset$.
Let $\Sigma$ be any base of $c$ and let $T \in {\cal T}$ be arbitrary. From $c(\Sigma, T) = c(T) \varsupsetneqq T$, and the fact that all proper subsets of $T$ are closed, follows that $\Sigma$ must contain an implication with premise $T$. Now consider a set $\Sigma_{op}$ of implications $T \rightarrow \{e_T\}$ where $T$ scans ${\cal T}$ and where $e_T \in ch(T) \cap (E \backslash T)$ is arbitrary. Obviously, $c(\Sigma_{op}, S) \subseteq c(S)$ for all $S \subseteq E$. If we can show that $\Sigma_{op}$ is a base at all, then it must be optimum by the above. By way of contradication assume that $\Sigma_{op}$ is no base, and fix a set $S \subseteq E$ with $c(\Sigma_{op}, S) \varsubsetneqq c(S)$ for which $ch(S)$ is minimal. From $S \varsubsetneqq c(S)$ follows\footnote{This follows from the well-known fact that convex hulls like $ch(S)$ can be obtained by repeatedly taking closures of $3$-element sets.} that $T \subseteq S$ for at least one $T \in {\cal T}$, and thus $e_T \in c(\Sigma_{op}, S)$. Consider the unique triangulation of $ch(S)$ into triangles $ch(T_i)(i \in I)$ all of whose (3-element) vertex sets $T_i$ contain $e_T$. Then $T_i \subseteq c(\Sigma_{op}, S)$, and so $c(\Sigma_{op}, T_i) \subseteq c(\Sigma_{op}, S)$. Furthermore from $ch(T_i)\varsubsetneqq ch(S)$ follows $c(\Sigma_{op}, T_i) = c(T_i)$, and so
$$c(\Sigma_{op}, S) \supseteq \displaystyle\bigcup_{i\in I} c(\Sigma_{op},T_i) \ = \ \displaystyle\bigcup_{i \in I} c(T_i) \ \stackrel{4}{=} \ c(S),$$
which contradicts $c(\Sigma_{op}, S) \varsubsetneqq c(S)$. The mentioned number $144$ arises as $2^4 \cdot 3^2$ in view of the fact that exactly four $T \in {\cal T}$ have $|c(T) \backslash T| =2$ (namely $T = 123, 124, 134, 234)$, and exactly two $T \in {\cal T}$ have $|c(T) \backslash T|=3$ (namely $T = 127, 345$). Here we e.g. wrote 124 instead of $\{1, 2, 4\}$. This kind of shorthand will be used frequently.
\section{The finer theory of implications}
In 3.1 we couple to each closure operator $c$ some quasiclosure operator $S \mapsto S^\bullet$ which will be crucial in the sequel. In [W3] it is shown that certain minimization results independently obtained by Guigues-Duquenne [GD] and Maier [M] are equivalent. By now the formalisation of Guigues-Duquenne has prevailed (mainly due to the beneficial use of closure operators), and also is adopted in Section 3.2. Section 3.3 introduces the canonical direct implication base. Section 3.4 finally introduces pure Horn functions, and 3.5 addresses the acyclic case.
It seems that the link between implications and the meet-irreducibles of the induced closure system (Section 3.6) must be credited to Mannila and R\"{a}ih\"{a} [MR1]. As indicated in the introduction, in 3.6 we also shed some light on {\it why} it is important to go from $\Sigma$ to $M({\cal F})$ and vice versa.
\subsection{Quasiclosed and pseudoclosed sets}
Given any closure operator $c: {\cal P}(E) \rightarrow {\cal P}(E)$ and $S \subseteq E$ we put
(9) \quad $S^\circ : = S \cup \bigcup \{c(U): \ U \subseteq S,\quad c(U) \neq c(S) \}$.
Because $E$ is finite the chain $S \subseteq S^\circ \subseteq (S^\circ)^\circ \subseteq \cdots$ will stabilize at some set $S^\bullet$. It is clear that $S \mapsto S^\bullet$ is a closure operator and that $S^\bullet \subseteq c(S)$ for all $S \subseteq E$. We call $S \mapsto S^\bullet$ the $c${\it -quasiclosure}, or simply {\it quasiclosure} operator when $c$ is clear from the context.
\includegraphics[scale=0.6]{JoyOfImplicFig3}
As an example, consider the $4 \times 5$ grid $E$ in Figure 3 and the closure system ${\cal F} \subseteq {\cal P}(E)$ of all contiguous rectangles $I \times J$ (thus $I \subseteq [4]$ and $J \subseteq [5]$ are {\it intervals}). Let $c: = c_{\cal F}$ be the coupled closure operator. For $S: = \{(2,1), (2,4), (4,4)\}$ (matching the three gray squares on the left in Figure 3) all singleton subsets are closed, and for the 2-element subsets we have
$$\begin{array}{lll}
c(\{(2,1), (2,4))\}) & =& \{(2,1), (2,2),(2,3), (2,4)\} = : S_1 \neq c(S),\\
\\
c(\{(2,4), (4,4)\}) &= & \{(2,4), (3,4), (4,4) \} = : S_2 \neq c(S),\\
\\
c(\{(2,1), (4,4)\}) & =& \{2,3,4\} \times \{1,2,3,4\} = c(S). \end{array}$$
Hence $S^\circ = S_1 \cup S_2$. If $T \subseteq S^\circ$ is any set with $(4,1) \in c(T)$ then necessarily $(2,1), (4,4) \in T$ (why?), whence $c(T) = c(S)$. Hence $S^{\circ \circ} \subseteq c(S) \backslash \{(4,1)\}$. Jointly with
$$c(\{(2,2), (4,4)\}) \cup c(\{(2,1), (3,4)\}) = c(S) \backslash \{(4,1)\}$$
follows that $S^{\circ \circ} =c(S) \backslash \{(4,1)\}$. Finally $S^{\circ \circ \circ} = S^\bullet = c(S)$ because e.g. $(4,1) \in c(\{(3,1), (4,2)\}) \neq c(S)$.
We call\footnote{Unfortunately no standard terminology exists. It holds that $Y \subseteq X^\bullet$ iff $X$ {\it directly determines} $Y$ (modulo some ``cover of functional dependencies'') in the sense of [M, Def.5.9]. Do not confuse this notion of ``direct'' with the one in Section 3.3.} a subset {\it properly quasiclosed} if we like to emphasize that it is quasiclosed but {\it not} closed.
For instance the set $S = \{(2,1), (2,4)\}$ in Fig.3 is properly quasiclosed.
\includegraphics[scale=0.5]{JoyOfImplicFig4}
{\bf 3.1.1} As another example take $E = [7]$ and let ${\cal F} \subseteq {\cal P}(E)$ be the closure system of Figure 4(a) with associated closure operator $c: = c_{\cal F}$. For our $c$ at hand the properly quasiclosed generating sets for each closed set are these:
$\begin{array}{cll}
12 & : & \hbox{\boldmath$\emptyset$}, 1,2\\
1234 & : & {\bf 123}, {\bf 124} \ (\mbox{why not 13?})\\
125 & : & \mbox{none}\\
1234567 & : & {\bf 126}, {\bf 127}, 1256, 1257, 1267, {\bf 12345}, 12346, 12347,
12567, 123456, 123457, 123467
\end{array}$
Let ${\cal F} \subseteq {\cal P}(E)$ be a closure system. As opposed to (2) one can show that
(10) \quad $(\forall Q \subseteq E) \ \ {\cal F} \cup \{Q\}$ is a closure system \ $\Leftrightarrow \ Q$ is quasiclosed
See Figure 4(b) where $Q : = \{1, 2, 7\}$ was added to ${\cal F}$. One checks that indeed $Q \cap X \in {\cal F}$ for all $X \in {\cal F}$.
\subsection{The canonical Guigues-Duquenne base}
For closure operators $c: \ {\cal P}(E) \rightarrow {\cal P}(E)$ we define the equivalence relation $\theta \subseteq {\cal P}(E) \times {\cal P}(E)$ by
(11) \quad $(U, U') \in \theta \ : \ \Leftrightarrow \ c(U) = c(U')$.
For any implicational base $\Sigma$ of $c$ and for any $X \subseteq E$ let $\Sigma (X)$ be the set of those implications $A \rightarrow B$ in $\Sigma$ for which $c(A) = c(X)$. It holds that
(12) \quad $Y^\bullet = c(\Sigma \backslash \Sigma (Y), Y)$ \quad for all \quad $Y \subseteq E$,
where $Y \mapsto Y^\bullet$ is the $c$-quasiclosure operator. Being a key ingredient for establishing Theorem 1 below let us repeat and slightly amend the proof of (12) given in [W5, Lemma 4]. For starters we replace $\Sigma$ by the equivalent family $\overline{\Sigma}$ of implications which has each $U \rightarrow V$ from $\Sigma (Y)$ replaced by the {\it full} implication $U \rightarrow c(\Sigma, U)$. Because $\Sigma \setminus \Sigma (Y)$ equals $\overline{\Sigma} \setminus \overline{\Sigma}(Y)$ it suffices to prove that
$(\overline{12})$ \quad $Y^\bullet = c(\overline{\Sigma} \setminus \overline{\Sigma} (Y), Y)$ for all $Y \subseteq E$.
The inclusion $\supseteq$ being obvious it suffices to show that $B \subseteq c(\overline{\Sigma} \setminus \overline{\Sigma}(Y), Y)$ implies $B^\circ \subseteq c(\overline{\Sigma} \setminus \overline{\Sigma}(Y), Y)$. Since $B^\circ= B \cup \bigcup \{c(\overline{\Sigma}, X): X \subseteq B$ and $c (\overline{\Sigma}, X) \subsetneqq c(\overline{\Sigma}, B)\}$ this further reduces to show that $c(\overline{\Sigma}, X) \subsetneqq c(\overline{\Sigma}, Y)$ implies that $c(\overline{\Sigma}, X) = c(\overline{\Sigma} \setminus \overline{\Sigma}(Y), X)$. But this holds since by construction all implications from $\overline{\Sigma} (Y)$ are of type $(U \rightarrow c(\overline{\Sigma}, U)) = (U \rightarrow c(\overline{\Sigma}, Y))$, and thus cannot be used in the generating process of $c(\overline{\Sigma}, X)$. This proves $(\overline{12})$ and hence (12).
A properly quasiclosed set $P$ is {\it pseudoclosed}\footnote{From an algorithmic point of view this equivalent defintion is more appropriate: $P$ is pseudoclosed iff $P \neq c(P)$ and $c(P_0) \subseteq P$ for all pseudoclosed sets $P_0$ strictly contained in $P$. Another name for pseudoclosed is {\it critical} (not to be confused with ``critical'' in 4.1.5).} if it is minimal among the properly quasiclosed sets in its $\theta$-class. (In the set listing of 3.1.1 these are the boldface sets.) Consider now the family of implications
(13) \quad $\Sigma_{GD} : = \{P \rightarrow c(P): \ P \subseteq E$ is pseudoclosed$\}$,
where $GD$ stands for Guigues-Duquenne. Clearly $c(\Sigma_{GD}, Y) \subseteq c(Y)$ for all $Y \subseteq E$, and so $\Sigma_{GD}$ will be an implicational base of $c$ if we can show that $c(\Sigma_{GD}, Y) \supseteq c(Y)$ for all $Y \subseteq E$. Indeed, by (12) applying the implications from $\Sigma_{GD}\backslash \Sigma_{GD}(Y)$ blows up $Y$ to $Y^\bullet$. If $Y^\bullet \neq c(Y)$ then by definition there is a pseudoclosed set $P\subseteq Y^\bullet$ with $c(P) = c(Y^\bullet) = c(Y)$. Applying the implication $(P \rightarrow c(P))\in \Sigma_{GD}$ to $Y^\bullet$ shows that $c(\Sigma_{GD}, Y) \supseteq c(Y)$.
This establishes part (a) of Theorem 1 below. For the remainder see [W3, Thm.5] which draws on [GD] and again uses (12). Two more concepts are in order.
One calls $X \in {\cal F}_c$ {\it essential} if $X$ contains a properly quasiclosed generating set. Thus the essential sets coincide with the closures of the pseudoclosed sets.
The {\it core} [D] of a closure operator $c: {\cal P}(E) \rightarrow {\cal P}(E)$ is
(14) \quad $\mbox{core}(c) =\, \mbox{core}({\cal F}_c): = \{X \in {\cal F}_c : \ X \ \mbox{is essential} \}$.
\begin{tabular}{|l|} \hline \\
{\bf Theorem 1:} Let $c: \ {\cal P}(E) \rightarrow {\cal P}(E)$ be a closure operator.\\
\\
(a) The family of implications $\Sigma_{GD}$ is an implicational base of $c$.\\
\\
(b) If $\Sigma$ is any implicational base then $|\Sigma| \geq |\Sigma_{GD}|$. More specifically, for each pseudoclosed\\
\hspace*{.5cm} $P \subseteq E$ there is some $(A_P \rightarrow B_P) \in \Sigma$ with $A_P \subseteq P$ and $A^\bullet_P = P$.\\
\\
(c) If $\Sigma$ is a nonredundant implicational base then $\{c(A): (A \rightarrow B) \in \Sigma\}$ equals $\mbox{core} ({\cal F}_c)$.\\
\\
(d) If $\Sigma$ is a nonredundant implicational base which moreover consists of {\it full} implications\\
\hspace*{.5cm} $A \rightarrow c(A)$ then $\Sigma$ is minimum.\\
\\
(e) If $\Sigma$ is optimum then $\Sigma$ is minimum. Furthermore for each of the implications $A_P \rightarrow B_P$\\
\hspace*{.5cm} defined in (b) the cardinality of $A_P$ is uniquely determined by $P$ as\\
\hspace*{.5cm} $\min \{|X|: X \subseteq P, \quad c(X) = c(P)\}.$\\ \\ \hline
\end{tabular}
Because of (b) the Guigues-Duquenne base is often called {\it canonical}\footnote{Some authors as [GW] speak of the {\it stem base} but for us ``stem'' has another meaning (see 3.3).}.
Those families $\Sigma$ of implications that are of type $\Sigma = \Sigma_{GD}$ for some closure operator $c$ were {\it inherently} characterized by Caspard [C]. The whole of Theorem 1 can be raised to the level of semilattice congruencies\footnote{For a glimpse on semilattice congruences in another but related context see 4.2.1.} [D2] but this further abstraction hasn't flourished yet. For practical purposes any minimum base $\Sigma$ is as good as $\Sigma_{GD}$. For instance, a trivial way to shorten $\Sigma_{GD}$ to $\Sigma'_{GD}$ is to replace each $P \rightarrow c(P)$ in $\Sigma_{GD}$ by $P \rightarrow (c(P) \backslash P)$. The extra benefit of $\Sigma_{GD}$ is its beauty on a theoretical level as testified by Theorem 1.
{\bf 3.2.1} To illustrate Theorem 1 we consider $c: = c_{\cal F}$ where ${\cal F}$ is the closure system from 3.1.1. Hence the canonical base of $c$ is
$\Sigma_{GD} = \{\emptyset \rightarrow 12, \ \ 123 \rightarrow 1234, \ \ 124 \rightarrow 1234, \ \ 126 \rightarrow [7], \ \ 127 \rightarrow [7], \ \ 12345 \rightarrow [7]\}$.
It happens that all premises (apart from $12345$ which has $35$ and $45$) contain {\it unique} minimal generating sets of the conclusions, and so by Theorem 1(e) each optimum base of $c$ must be of type
$\Sigma_{op} = \{\emptyset \rightarrow B_1, \ \ 3 \rightarrow B_2, \ \ 4 \rightarrow B_3, \ \ 6 \rightarrow B_4, \ \ 7 \rightarrow B_5, \ \ 35 \rightarrow B_6 \ \ (\mbox{or} \ 45 \rightarrow B_6)\}$.
It turns out that e.g.
$\Sigma_1 = \{\emptyset \rightarrow 12,\ \ 3 \rightarrow 4, \ \ 4 \rightarrow 3, \ \ 6 \rightarrow 357, \ \ 7 \rightarrow 6, \ 35 \rightarrow 6\}$
is optimum. To prove it one must (a) show that $\Sigma_1$ is a base at all, and (b) show that the sum $2+1+1+3+1+1=9$ of the sizes of the conclusions is minimum. We omit the argument. See also Problem 4 in Expansion 15.
{\bf 3.2.2} In this section and (only here) $[n]$ denotes the strong component of $n$, i.e. not $\{1, 2, \cdots, n\}$. As a less random application of Theorem 1 consider the case where $c$ admits a base $\Sigma$ of {\it singleton} premise implications\footnote{We disallow $\emptyset$ as premise in order to avoid distracting trivial cases. Further we point to 4.1.2 for the connection to lattice distributivity.}. Such a situation can be captured by a directed graph. For instance
(15) \quad $\Sigma : = \{ 1 \rightarrow 6, \ 2 \rightarrow 56, \ 3 \rightarrow 2, \ 4 \rightarrow 3689, \ 5 \rightarrow 347, \ 6 \rightarrow 9, \ 7 \rightarrow 8, \ 8 \rightarrow 7 \}$
matches the arcs in the directed graph $G(\Sigma)$ in Figure 5(a). What, then, do $\Sigma_{GD}$ and the optimal bases $\Sigma_0$ look like? Being singletons, and because of $c(\emptyset) =\emptyset$, all premises of implications in $\Sigma$ are pseudoclosed (note $\{9\}$ is closed), and so Theorem 1(b) implies that these are {\it all} pseudoclosed sets of $c$. From this and Figure 5(a) it follows that
$$\begin{array}{lll} \Sigma_{GD} & = & \{1 \rightarrow 169, \ 2 \rightarrow 23456789, \ 3 \rightarrow 23456789, \ 4 \rightarrow 23456789, \\
\\
& & \ \ \ 5 \rightarrow 23456789, \ 6 \rightarrow 69, \ 7 \rightarrow 78, \ 8 \rightarrow 78 \} \end{array}$$
\begin{center}
\includegraphics[scale=0.6]{JoyOfImplicFig5}
\end{center}
The strong components of $G(\Sigma)$ are $\{1\}, \{2,3,4, 5\}, \{6\}$, $\{7,8\}, \{9\}$ and the resulting factor poset $(P, \leq)$ is depicted in Figure 5(b). We claim that the optimal bases $\Sigma_0$ look like this: The elements in each strong component $K$ are set up, in arbitrary circle formation such as $2 \rightarrow 5 \rightarrow 4 \rightarrow 3 \rightarrow 2$ for $K = [2]$. (For $|K| =1$ the circle formation reduces to a point.) Furthermore, for any non-minimal $K$ choose any minimal transversal $T$ of the lower covers of $K$ in $(P, \leq)$ and distribute $T$ to the circle formation of $K$ in arbitrary fashion. Thus $K = [2]$ admits $T_1 = \{6,7\}$ and $T_2 = \{6,8\}$. Choosing $T_1$ one can e.g. pad up $\{2 \rightarrow 5, 4 \rightarrow 3\}$ to $\{2 \rightarrow 56, 4 \rightarrow 37\}$ or alternatively $\{5 \rightarrow 4\}$ to $\{5 \rightarrow 467\}$. Choosing $T_2$ one can e.g. pad up $\{2 \rightarrow 5, 3 \rightarrow 2\}$ to $\{2 \rightarrow 56, 3 \rightarrow 28\}$. The latter choice yields an optimum base
$$\Sigma_0 = \{2 \rightarrow 56, \ 5 \rightarrow 4, \ 4 \rightarrow 3, \ 3 \rightarrow 28; \ 1 \rightarrow 6; \ 6 \rightarrow 9; \ 7 \rightarrow 8, \ 8 \rightarrow 7\}.$$
To prove the claim, first note that families of type $\Sigma_0$ obviously {\it are} implicational bases. We next show that {\it each} family $\Sigma'$ equivalent to $\Sigma$ in (15) must contain implications that link [2] to both lower covers [6] and [7]. Indeed, suppose each $\{\alpha \} \rightarrow B$ in $\Sigma'$ with $\alpha \in [2]$ has $B \cap [6] = \emptyset$. Then we get the contradiction that $[2] \cup [7] \cup [9]$ is $\Sigma'$-closed but not $\Sigma$-closed. From this it readily follows that the bases of type $\Sigma_0$ have minimum size $s(\Sigma_0)$. This kind of argument carries over to the optimization of all families $\Sigma$ with merely singleton premises.
Calculating $\Sigma_{GD}$ depends in which way $c$ is given. The two most prominent cases are $c = c_{\cal H}$ and $c = c(\Sigma, -)$.
The first is hard (3.6.3), the second easy (Expansion 11).
\subsection{The canonical direct implicational base}
An implicational base $\Sigma$ of $c$ is {\it direct} if $c(\Sigma, X) = X'$ for all $X \subseteq E$ (see (6)).
Analogous to Theorem 1 each closure operator again admits a {\it canonical} direct implicational base $\Sigma_{cd}$. In order to state this in Theorem 2 we need a few definitions.
Let $U \cup \{e\} \subseteq E$ with $e \not\in U$. Following [KN] we call $U$ a {\it stem for} $e$, and $e$ a {\it root for} $U$, if $U$ is minimal with the property that $e \in c(U)$. (Other names have been used by other authors.) Further $U \subseteq E$ is a {\it stem} if it is a stem for some $e$, and $e \in E$ is a {\it root} if it is a root for some $U$. If $U$ is a stem, we put
(16) \quad roots$(U) : = \{ e \in E: \ e \ \mbox{is a root for} \ U\}$,
For instance, if $c(\emptyset) \neq \emptyset$ then roots$(\emptyset) = c(\emptyset)$. Dually, if $e$ is root, we put
(17) \quad stems$(e): = \{U \subseteq E: \ U \ \mbox{is a stem for} \ e \}$.
Note that $e \in E$ is {\it not} a root iff $E \backslash \{e\}$ is closed. Vice versa, a subset $S$ does {\it not} contain a stem iff all subsets of $S$ (including $S$ itself) are closed. Such sets $S$ are called\footnote{An equivalent definition occurs in 3.3.1. Note that in [W3] the meaning of ``free'' is ``independent''.} {\it free}.
\begin{tabular}{|l|} \hline \\
{\bf Theorem 2:} Let $c : {\cal P}(E) \rightarrow {\cal P}(E)$ be a closure operator. Then\\
\\
\hspace*{3cm} $\Sigma_{cd}: = \{X \rightarrow \ \mbox{roots}(X) : \ X \subseteq E \ \mbox{is a stem} \}$\\
\\
is a direct implicational base of $c$ of minimum cardinality.\\ \\ \hline \end{tabular}
{\it Proof.} Let $Y \subseteq E$. We first show that $Y' = c(Y)$. We may assume that $c(Y) \neq Y$ and pick any $e \in c(Y) \backslash Y$. Obviously there is $X \in \, \mbox{stems}(e)$ with $X \subseteq Y$. From $(X \rightarrow \, \mbox{roots}(X)) \in \Sigma_{cd}$ it follows that $e \in Y'$. Thus $\Sigma_{cd}$ is a direct implicational base of $c$.
To show that $|\Sigma| \geq |\Sigma_{cd}|$ for any direct base $\Sigma$ of $c$ we fix any stem $X$ (say with root $e$). It suffices to show that at least one implication in $\Sigma$ has the premise $X$. Consider the $\Sigma$-closure
$$c(X) = c(X, \Sigma)=X' = X \cup \{B_i : \ (A_i \rightarrow B_i) \in \Sigma, \ A_i \subseteq X\}.$$
Suppose we had $A_i \neq X$ for all premises $A_i$ occuring in $\Sigma$. Then each $A_i$ contained in $X$ is a {\it proper} subset of $X$, and so the minimality of $X$ forces $e \not\in c(A_i)$, whence $e \not\in B_i \subseteq c(A_i)$, whence $e \not\in X'$. The contradiction $e \not\in c(X)$ shows that at least one $A_i$ equals $X$. \quad $\square$
We stress that ``minimum'' in Theorem 2 concerns only the {\it directness} of $\Sigma_{cd}$; as will be seen, small subsets of $\Sigma_{cd}$ can remain (non-direct but otherwise appealing) bases of $c$. The base $\Sigma_{cd}$, has been rediscovered in various guises by various authors; see [BM] for a survey. We may add that in the context of FCA and the terminology of ``proper premises'' $\Sigma_{cd}$ seemingly was first introduced in [DHO]. In the relational database world $\Sigma_{cd}$ is called a ``canonical cover'' [M, 5.4] and (according to D. Maier) first appeared in Paredens [P].
We shall relate $\Sigma_{cd}$ to prime implicates of pure Horn functions in 3.4, and to $M({\cal F})$ in 3.6, and we consider {\it ordered} direct bases in 4.3. Other aspects related to $\Sigma_{cd}$ are discussed in Expansions 5 and 6. Furthermore, the following concept will be more closely investigated in the framework of 4.1.5. We define it here because it is of wider interest. Namely, a stem $X$ is {\it closure-minimal} with respect to its root $e$ if $c(X)$ is a minimal member of $\{c(U): U \in stems(e)\}$.
{\bf 3.3.1} If $c: {\cal P}(E) \rightarrow {\cal P}(E)$ is a closure operator then $X \subseteq E$ is called {\it independent} if $x \not\in c(X \backslash \{x\})$ for all $x \in X$. A closed independent set is {\it free}. Further, a minimal generating set $X$ of $S \in {\cal F}_c$ is a {\it minimal key for} $S$, or simply a {\it minimal key} (if $S$ is irrelevant). Recall that a {\it set ideal} is a set system ${\cal S} \subseteq {\cal P}(E)$ such that $Y \in S$ and $X \subseteq Y$ jointly imply $X \in {\cal S}$. The maximal members of ${\cal S}$ are its {\it facets}. The following facts are easy to prove:
\begin{enumerate}
\item [(a)] A subset is independent iff it is a minimal key.
\item[(b)] The family Indep$(c)$ of all independent (e.g. free) sets is a set ideal.
\item[(c)] Each stem is independent but not conversely.
\end{enumerate}
Since each $S \in {\cal F}_c$ contains at least one minimal key for $S$, it follows that $|{\cal F}_c| \leq |\mbox{Indep}(c)|$.
Instead of ``minimal key'' other names such as ``minimal generator'' are often used, and ``minimal key'' sometimes means ``minimal key of $E$''. Generating all minimal keys has many applications and many algorithms have been proposed for the task. See [PKID1, Section 5.1.1] for a survey focusing on FCA applications.
{\bf 3.3.2} Let us indicate an apparently new method to get all minimal keys; details will appear elsewhere. The facets $S_1, S_2, \cdots S_t$ of Indep$(c)$ can be calculated with the {\it Dualize and Advance} algorithm (google that). It is then clear that the minimal keys of any closed set $X \in {\cal F}_c$ are {\it among} the (often few) maximal members of $\{S_1 \cap X, \cdots, S_t \cap X\}$. For special types of closure operators more can be said (see 4.1.4 and 4.1.5).
\subsection{Pure Horn functions, prime implicates, and various concepts of minimization}
We recall some facts about Boolean functions with which we assume a basic familiarity; e.g. consult [CH] as reference. Having dealt with the consensus method and prime implicates on a general level in 3.4.1, we zoom in to pure Horn functions in 3.4.2 and link them to implications. (Impure Horn functions appear in 4.5.) In 3.4.3 we show that the canonical direct base $\Sigma_{cd}$ in effect is the same as the set of all prime implicates. Subsection 3.4.4 is devoted to various ways of measuring the ``size'' of an implicational base, respectively pure Horn function.
{\bf 3.4.1} Recall that a function $f: \{0,1\}^n \rightarrow \{0,1\}$ is called a {\it Boolean function}. A {\it bitstring} $a \in \{0,1\}^n$ is called a {\it model} of $f$ if $f(a) =1$. We write Mod$(f)$ for the set of all models of $f$. For instance, $f$ is a {\it negative} (or {\it antimonotone}) Boolean function if $x \leq y$ implies $f(x) \geq f(y)$.
Thus, if we identify $\{0,1\}^n$ with the powerset ${\cal P}[n]: = {\cal P}([n])$ as we henceforth silently do, then Mod$(f)$ is a set ideal in ${\cal P}[n]$ iff $f$ is a negative Boolean function. Using {\it Boolean variables} $x_1, \cdots, x_n$ one can represent each Boolean function $f$ (in many ways) by a {\it Boolean formula} $F(x) = F(x_1, \cdots, x_n)$. We then say that $F$ {\it induces} $f$. A {\it literal} is either a Boolean variable or its negation; thus $x_2$ and $\overline{x}_5$ are literals. A {\it clause} is a disjunction of literals, such as $x_1 \vee \overline{x}_3 \vee \overline{x}_4 \vee x_7$. A {\it conjunctive normal form} (CNF) is a conjunction of clauses. The CNF is {\it irredundant} if dropping any clause changes the represented Boolean function. Let $f$ be a Boolean function and let $C$ be a clause. Then $C$ is an {\it implicate} of $f$ if every model of $f$ is a model of $C$. We emphasize that ``implicate'' should not be confused with ``implication'' $A \rightarrow B$, but there are connections as we shall see. One calls $C$ a {\it prime implicate} if dropping any literal from $C$ results in a clause which is no longer an implicate of $f$. In Expansion 7 we show how {\it all} prime implicates of $f$ can be generated from an arbitrary CNF of $f$. A {\it prime} CNF is a CNF all of whose clauses are prime implicates.
{\bf 3.4.2} A Boolean function $f: {\cal P}[n] \rightarrow \{0,1\}$ is a {\it pure Horn function} if Mod$(f) \subseteq {\cal P}[n]$ is a closure system\footnote{Some authors, e.g. [CH, chapter 6], use a different but dual definition, i.e. that $\{a \in \{0,1\}^n: f(a) =0 \}$ must be a closure system. Each theorem in one framework immediately translates to the dual one. Do not confuse this kind of duality with the kind of duality in [CH, 6.8].}.
The induced closure operator ${\cal P}[n] \rightarrow {\cal P}[n]$ we shall denote by $c_f$. Conversely, each closure operator $c: {\cal P}[n] \rightarrow {\cal P}[n]$ induces the pure Horn function $f_c: {\cal P}[n] \rightarrow \{0,1\}$ defined by $f_c^{-1} (1) = {\cal F}_c$. Similar to 2.1.1 one has $f_{(c_f)} =f$ and $c_{(f_c)} = c$. As mentioned in 3.4.1 many distinct formulas $F$ induce any given\footnote{For instance, using concatenation instead of $\wedge$, {\it one} formula $F$ for the Horn function $f$ induced by the closure system in Figure 4(a) is $F(x_1, \cdots, x_7) = x_1 x_2 x_3 x_4 x_5 x_6 x_6 x_7 \vee x_1 x_2 x_3 x_4 \overline{x}_5 \overline{x}_6 \overline{x}_7 \vee x_1 x_2 \overline{x}_3 \overline{x}_4 x_5 \overline{x}_6 \overline{x}_7 \vee x_1 x_2 \overline{x}_3 \overline{x}_4 \overline{x}_5 \overline{x}_6 \overline{x}_7$.} pure Horn function $f$.
As is common, we shall focus on the most ``handy'' kind of formula $F$, for which the letter $H$ will be reserved.
In order to define $H$ we first define a {\it pure} (or {\it definite}) {\it Horn clause} as a clause with exactly one positive literal.
Thus $\overline{x}_1 \vee \overline{x}_2 \vee \overline{x}_3 \vee x_4$ is a pure Horn clause $C$. Accordingly consider the implication $\{1,2,3\} \rightarrow \{4\}$. One checks that the Boolean function induced by formula $C$ is a Horn function $f: {\cal P}[n] \rightarrow \{0,1\}$ (for any fixed $n \geq 4$). In fact $\mbox{Mod}(f) = {\cal F} (\{123 \rightarrow 4\})$. However, this doesn't extrapolate to the implication $12 \rightarrow 34$ which doesn't match $\overline{x}_1 \vee \overline{x}_2 \vee x_3 \vee x_4$! Rather $\{12 \rightarrow 34\}$ is equivalent to $\{12 \rightarrow 3, \ 12 \rightarrow 4\}$ and whence\footnote{This is a good place to address a source of confusion. The formula $x_1 \wedge x_2$ {\it also} is the conjunction of two pure Horn clauses; it matches the implication $\emptyset \rightarrow \{1,2\}$. The formula $x_1 \wedge x_2 \rightarrow {\tt True}$ is a {\it tautology} which matches the implication $\{1,2\} \rightarrow \emptyset$. But $x_1 \wedge x_2 \rightarrow {\tt False}$ matches {\it no implication}. Rather it amounts to the {\it impure} Horn clause $\overline{x}_1 \vee \overline{x}_2$, the topic of Section 4.5.} matches the conjunction $(\overline{x}_1 \vee \overline{x}_2 \vee x_3) \wedge (\overline{x}_1 \vee \overline{x}_2 \vee x_4)$ of {\it two} pure Horn clauses. Generally, a {\it pure Horn CNF} $H$ is defined as a conjunction of pure Horn clauses. Thus $H$ matches a family $\Sigma_H$ of unit implications. In particular, this shows that the Boolean function $f$ induced by $H$ really {\it is} a pure Horn function: $\mbox{Mod}(f)$ equals ${\cal F}(\Sigma_H)$, which we know to be closure system (2.2). Conversely, starting with any family $\Sigma$ of implications, the {\it unit expansion} $\Sigma^u$ is obtained by replacing each $(A \rightarrow B) \in \Sigma$ by the unit implications $A \rightarrow \{b\} \ (b \in B)$. By definition $H_\Sigma$ is the pure Horn CNF whose clauses match the members of $\Sigma^u$. Notice that special features of $\Sigma$ need not be mirrored in $H_\Sigma$, and vice versa for $H$ and $\Sigma_H$.
For instance, if $\Sigma$ is optimum then the pure Horn clauses in $H_\Sigma$ need not be prime. See also 3.4.4.1.
{\bf 3.4.3} It is evident from the definitions of stem, root and prime implicate, and from Theorem 2, that each implication in $(\Sigma_{cd})^u$ yields a prime implicate of the pure Horn function $f : {\cal P}[n] \rightarrow \{0,1\}$ determined by $\Sigma_{cd}$. Do we get {\it all} prime implicates (Horn or not) of $f$ in this way? Yes. The traditional proof is e.g. in [CH, p.271], and a fresh one goes like this.
Suppose $f$ had a prime implicate $C$ which is not a Horn clause, say without loss of generality $C$ is $\overline{x}_1 \vee \overline{x}_2 \vee x_3 \vee x_4$. Then both $\overline{x}_1 \vee \overline{x}_2 \vee x_3$ and $\overline{x}_1 \vee \overline{x}_2 \vee x_4$ are no implicates of $f$. Hence there are $S, T \in \, \mbox{Mod}(f)$ such that $\{1, 2\} \subseteq S$ but $3 \not\in S$, and such that $\{1,2\} \subseteq T$ but $4 \not\in T$. Thus $\{1,2\} \subseteq S \cap T \in \, \mbox{Mod}(f)$ but both $3,4 \not\in S \cap T$. Hence $S \cap T$ is a model of $f$ but not of $C$, contradicting the assumption that $C$ is an implicate of $f$. \quad $\square$
Thus the members of $\Sigma_{cd}^u$ are in bijection with the prime implicates of $f$. Any (usually non-direct) base of implications $\Sigma \subseteq \Sigma_{cd}^u$ will henceforth be called a {\it base of prime implicates}. In other words, bases of prime implicates match prime pure Horn CNF's.
{\bf 3.4.4} We now drop pure Horn functions until 3.4.4.1. Apart from $ca(\Sigma )$ and $s(\Sigma)$ introduced in 2.2 there are other ways to measure families of implications. If say
(18) \qquad $\Sigma = \{\, \{a,b\} \rightarrow \{c,d\},\quad \{a, c, e\} \rightarrow \{b\},\quad \{d\} \rightarrow \{b,f\}\,\}$
then $ca(\Sigma) = 3$ and $s(\Sigma) = 11$. Further the {\it left hand size} is defined as the sum of the cardinalities of the premises, thus $lhs (\Sigma) : = 2+3+1 = 6$. Similarly the {\it right hand size} is $rhs(\Sigma) : = 2+1+2=5$.
What are the relations between ``usual'' optimality ($op$ as defined in 2.2) and the new kinds of optimality lhs-op and rhs-op? Suppose first $\Sigma_0$ is simultaneously lhs-op and rhs-op. If $\Sigma$ is any other base of ${\cal F}(\Sigma_0)$ then
$$s(\Sigma_0) = lhs(\Sigma_0 ) + rhs(\Sigma_0) \leq lhs (\Sigma) + rhs (\Sigma) = s(\Sigma),$$
and so $\Sigma_0$ is optimal. This was observed in [AN1] and likely elsewhere before. Conversely, it follows at once from Theorem 1(e) that op $\mathbb{R}a$ lhs-op. In [ADS] it is shown (see Figure 6) that also op $\mathbb{R}a$ rhs-op. For instance, it is impossible that a closure operator has two optimum bases with implications $\ast \ast \rightarrow \ast \ast \ast, \ \ \ast \ast \rightarrow \ast \ast$ and
$\ast \ast \ast \rightarrow \ast, \ \ast \ast \ast \rightarrow \ast \ast$ respectively. To summarize:
(19) \qquad op \ $\Leftrightarrow$ \ lhs-op and rhs-op
A slightly less natural parameter is (ca$+$rhs)$(\Sigma) : = |\Sigma| + rhs(\Sigma)$. According to [ADS] these implications (and their consequences, but no others) take place:
\begin{center}
\includegraphics{JoyOfImplicFig6}
\end{center}
{\bf 3.4.4.1} Let us stick with the measures above and re-enter pure Horn functions to the picture. For starters, when $\Sigma$ in (18) is translated in a pure Horn CNF we get
$(18')$ \qquad $H_\Sigma \ \ = \ \ (\overline{a} \vee \overline{b} \vee c) \quad \wedge \quad (\overline{a} \vee \overline{b} \vee d) \quad \wedge \quad (\overline{a} \vee \overline{c} \vee \overline{e} \vee b) \quad \wedge \quad (\overline{d} \vee b) \quad \wedge \quad (\overline{d} \wedge f)$
Notice that $rhs (\Sigma) = 5$ and $5$ is the number of clauses of $H_\Sigma$. Generally, for a fixed pure Horn function $f: {\cal P}[n] \rightarrow \{0,1\}$ put
$$rhs(f) : = \min \{rhs(\Sigma) : \ \Sigma \ \mbox{is a base of} \ \mbox{Mod}(f)\}.$$
Thus $rhs(f)$ is the minimum number\footnote{Many other acronyms for this measure are dispersed throughout the literature. For instance, [CH, p.297] uses $\tau (f)$ for $rhs(f)$. On the side of uniformity, our notation $\lambda$ above matches the one in [CH, p.297].}
of pure Horn clauses needed to represent $f$. Rephrasing the [ADS] result above (which is reproven in [AN1, Thm.10]) one can say: If $\Sigma$ is any optimum base of $c$ then $H_\Sigma$ has rhs$(f_c)$ many clauses. The ``inverse'' operation of unit expansion is {\it aggregation}. Thus if $\Sigma = \{12, \rightarrow 3, 12 \rightarrow 4, 35 \rightarrow 4, 35 \rightarrow 1, 45 \rightarrow 2\}$ then $\Sigma^{ag}: = \{12 \rightarrow 34, 35 \rightarrow 14, 45 \rightarrow 2\}$.
If similarly to $rhs(f)$ we define
$$ca(f) : = \min \{ca (\Sigma ) : \Sigma \ \mbox{is a base of Mod}(f)\},$$
then $ca(f)$ is not so succinctly expressed in terms of Horn clauses (but is e.g. useful in 4.5.2). Similarly the likewise defined parameters $lhs(f)$ and $s(f)$ are clumsier than their counterparts $lhs(\Sigma)$ and $s(\Sigma)$. Apart from $rhs(f)$, the most natural measure for pure Horn functions is the minimum number $\lambda(f)$ of literals appearing in any pure Horn CNF representation of $f$. One calls $\lambda$ the {\it number of literals} measure. Clearly $\lambda (f) \geq s(f)$. For instance, if $H_\Sigma$ from $(18')$ induces $f$, then $\lambda (f) \leq 14$. Similarly $s(f) \leq 11$ in view of (18).
Both rhs-optimization and $\lambda$-optimization are NP-hard, and even {\it approximation} remains hard [BG].
\subsection{Acyclic closure operators and generalizations}
To any family $\Sigma$ of implications on a set $E$ we can associate its {\it implication-graph}\footnote{The terminology is from [BCKK], while $G(\Sigma)$ itself was independently introduced in [W3, p.137] and [HK, p.755].} $G(\Sigma)$. It has vertex set $E$ and arcs $a \rightarrow b$ whenever there is an implication $A \rightarrow B$ in $\Sigma$ with $a \in A$ and $b \in B$. What happens when $\Sigma$ merely has singleton-premise implications was dealt with in 3.2.2. Another natural question is: If $G(\Sigma)$ is acyclic, i.e. has no directed cycles, what does this entail for the closure operator $X \mapsto c(\Sigma, X)$?
The first problem is that for equivalent families $\Sigma$ and $\Sigma'$ it may occur that $G(\Sigma)$ is acyclic but $G(\Sigma')$ isn't. For instance, in the example from [HK, p.755] one checks that $\Sigma = \{1 \rightarrow 2, 2 \rightarrow 3\}$ and $\Sigma'= \{1 \rightarrow 3, 2 \rightarrow 3, 13 \rightarrow 2 \}$ are equivalent. While $G(\Sigma)$ is acyclic, $G(\Sigma')$ is not because it has the cycle $2 \rightarrow 3 \rightarrow 2$. Observe that $13 \rightarrow 2$ is no prime implicate because
it follows from $1 \rightarrow 2$.
Indeed, the problem evaporates if one restricts attention to the prime implicates. More precisely, call\footnote{In [HK] the authors talk about the acyclicity of pure Horn formulas (or functions). Recall from 3.4.2 the equivalence between closure operators and pure Horn functions.} a closure operator $c$ {\it acyclic} if there is a base $\Sigma$ of $c$ which has an acyclic implication-graph $G(\Sigma)$. As shown in [HK, Cor.V.3] a closure operator $c$ is acyclic iff $G(\Sigma)$ is acyclic for each base $\Sigma$ of prime implicates. Hence (consensus method, Expansion 7) for an arbitrary family $\Sigma$ of implications it can be checked in quadratic time whether $c(\Sigma, -)$ is an acyclic closure operator.
{\bf 3.5.1} Let $(E, \leq)$ be any poset and let $c: {\cal P}(E) \rightarrow {\cal P}(E)$ be a closure operator with $c(\emptyset) = \emptyset$ and such that for all $Z \subseteq E$ and $y \in c(Z)$ it follows that $y \in c(\{z \in Z: \ z \geq y \})$. Put another way, $c(Z)$ is always a {\it subset} of the order ideal $Z \downarrow$ generated by $Z$. Following\footnote{This terminology is more telling than ``$G$-geometry'' used in [W3].} [SW] we call such an operator of {\it poset type}.
\begin{tabular}{|l|} \hline \\
{\bf Theorem 3}: A closure operator $c$ is acyclic if and only if it is of poset type.\\ \\ \hline \end{tabular}
{\it Proof.} We shall trim the argument of [W3, Cor.15]. So let $c: {\cal P}(E) \rightarrow {\cal P}(E)$ be acyclic and let $\Sigma$ be any base of $c$ for which $G(\Sigma)$ is acyclic. On $E$ we define a transitive binary relation $>$ by setting $b > a$ iff there is a directed path from $b$ to $a$ in $G(\Sigma)$. By the acyclicity of $G(\Sigma)$ this yields a poset $(E,\leq)$. Consider $Z \subseteq E$ and $y \in E$ such that $y \in c(Z)$. Then $c(Z) = c(\Sigma, Z)$ because $\Sigma$ is a base of $c$. To fix ideas suppose $c(\Sigma, Z) = Z''$ where $Z'$ is as defined in (6), and say that $Z' = Z \cup \{3,4\}$ because $(\{1,2\} \rightarrow \{3,4\}) \in \Sigma$ and $\{1,2\} \subseteq Z$. Further let $Z'' = Z'\cup \{6,y\}$ in view of $(\{3,5\} \rightarrow \{6,y\}) \in \Sigma$ and $3,5 \in Z'$. Then $1, 2, 5 \in Z$ and all of them are $> y$ because $G(\Sigma)$ has directed paths $1 \rightarrow 3 \rightarrow y$ and $2 \rightarrow 3 \rightarrow y$ and $5 \rightarrow y$. Hence $y \in c(\Sigma, \{1,2,5\}) \subseteq c(\{z\in Z: z \geq y\})$. Thus $c$ is of poset type.
Conversely let $c$ be of poset type with underlying poset $(E, \leq)$. Let $\Sigma$ be a base of $c$ whose unit expansion yields a {\it prime} Horn CNF. It suffices to show that $G(\Sigma)$ is acyclic. Suppose to the contrary $G(\Sigma)$ contains a directed cycle, say $1 \rightarrow 2 \rightarrow 3 \rightarrow 4 \rightarrow 1$. By definition of $G(\Sigma)$ there is $(A \rightarrow B) \in \Sigma$ with $4 \in A$ and $1 \in B$, and so $1 \in c(A)$. By assumption $1 \in c(A_0)$ where $A_0 : = \{z \in A: z \geq 1\}$. If we had $4 \not\in A_0$ then $A_0 \rightarrow \{1\}$ would be an implicate of $\Sigma$, which cannot be since $A \rightarrow \{1\}$ is a prime implicate. It follows that $4 \in A_0$, whence $4 > 1$. By the same token one argues that $3 > 4$, and eventually $1 > 2 > 3 > 4 > 1$, which is the desired contradiction.
$\square$
According to [HK, p.756] each acyclic closure operator $c$ admits a unique nonredundant base $\Sigma_{acyc}$ of prime implicates. Consequently (why?) $\Sigma_{acyc}$ is rhs-optimal and $\lambda$-optimal. Starting out with any family $\Sigma$ of unit implications for which $G(\Sigma)$ is acylic (and whence $c : = c(\Sigma, -)$ is acyclic), it is easy to calculate $\Sigma_{acyc}$.
To fix ideas, one checks that
$$\Sigma:= \{4 \rightarrow 5, \ \ 6 \rightarrow 1, \ \ 23 \rightarrow 4, \ \ 23 \rightarrow 1, \ \ 35 \rightarrow 6, \ \ 34 \rightarrow 6, \ \ 234 \rightarrow 5\}$$
has $G(\Sigma)$ acyclic. Any $A \rightarrow \{b\}$ in $\Sigma$ which is not a prime implicate, can only fail to be one because some $A_0 \varsubsetneqq A$ satisfies $b \in c(\Sigma \backslash \{A \rightarrow \{b\}\}, A_0)$, and so $A \rightarrow \{b\}$ is redundant. Here only $234 \rightarrow 5$ isn't a prime implicate (take $A_0 = \{2,3\}$). But also prime implicates in $\Sigma$ may be redundant. In our case $34 \rightarrow 6$ is a consequence of $4 \rightarrow 5$ and $35 \rightarrow 6$. One checks that $\Sigma \backslash \{234 \rightarrow 5, 34 \rightarrow 6\}$ consists of prime implicates and is nonredundant. Hence it must be $\Sigma_{acyc}$. Obviously $\Sigma_{acyc}$ is not minimum among {\it all} bases of $c$ since $23 \rightarrow 1$ and $23 \rightarrow 4$ can be aggregated to $23 \rightarrow 14$.
{\bf 3.5.2} As to generalizations, two variables $x$ and $y$ of a Boolean formula $F = F(u_1, \cdots, u_n)$ are {\it logically equivalent} if they have the same truth value in every model of (the function induced by) $F$. This amounts to say that both $x\rightarrow y$ and $y \rightarrow x$ are (prime) implicates of $f$. A closure operator $c$ is {\it quasi-acyclic} if there is a base $\Sigma$ of prime implicates such that all elements within a strong component of $G(\Sigma)$ are logically equivalent. Each acyclic closure operator is quasi-acylic because all components of $G(\Sigma)$ are singletons. Also the kind of closure operators $c = c(\Sigma, -)$ considered in 3.2.2 are evidently quasi-acyclic.
A closure operator $c$ is {\it component-wise quadratic} $(CQ)$ if there is a base $\Sigma$ of prime implicates such that $G(\Sigma)$ has the following property. For each prime implicate $A \rightarrow \{y\}$ of $c$ and each strong component $K$ of $G(\Sigma)$ it follows from $y \in K$ that $|A \cap K| \leq 1$. Thus for each component $K$ of $G(\Sigma)$ the ``traces'' of the prime implicates on $K$ are ``quadratic'' in the sense of having cardinality $\leq 2$. Here comes the argument of why quasi-acyclic entails $CQ$. Suppose $A \rightarrow \{y\}$ is a prime implicate of $c$ such that $y \in K$ and $A \cap K \neq \emptyset$. Take $x \in A \cap K$. Because $\{x\} \rightarrow \{y\}$ is an implicate of $c$ by quasi-acyclicity, we must have $A = \{x\}$ (which implies $|A\cap K|=1$). In a tour de force it is shown in [BCKK] that for each $CQ$ closure operator an rhs-optimum base (i.e. minimizing the number of clauses) can be calculated in polynomial time; many auxiliary graphs beyond $G(\Sigma)$ appear in [BCKK]. The quasi-acyclic case had been dealt with in [HK]. Another way to generalize ``acylcic'' is to forbid so-called $D$-cycles, see Expansion 18.
\subsection{Implications and meet-irreducibles}
First some prerequisites about hypergraphs.
A {\it hypergraph} is an ordered pair $(E, {\cal H})$ consisting of a {\it vertex set} $E$ and a set of {\it hyperedges} ${\cal H}$. The hypergraph is {\it simple} if $X \not\subseteq Y$ for all distinct $X,Y \in {\cal H}$. (An ordinary simple graph is the special case where $|X| = 2$ for all $X \in {\cal H}$). A {\it transversal} of ${\cal H}$ is a set $Y \subseteq E$ such that $Y \cap X \neq \emptyset$ for all $X \in {\cal H}$. We write ${\cal T}r({\cal H})$ for the set of all transversals. Furthermore, the {\it transversal hypergraph} $mtr({\cal H})$ consists of all {\it minimal} members of ${\cal T}r({\cal H})$. It is easy to see that ${\cal H} \subseteq mtr(mtr({\cal H}))$. Arguably the single most important fact about general simple hypergraphs is [S, p.1377] that equality takes place:
(20) \quad $mtr(mtr({\cal H})) = {\cal H}$
The {\it transversal hypergraph problem} (or {\it hypergraph dualization}), i.e. the problem of calculating $mtr({\cal H})$ from ${\cal H}$ has many applications and has been investigated thoroughly. See [EMG] for a survey and [MU] for a cutting edge implementation of hypergraph dualization.
Let ${\cal F} \subseteq {\cal P}(E)$ be a closure system and let $M({\cal F}) \subseteq {\cal F}$ be its set of meet-irreducibles (see 2.1).
Clearly the set max$({\cal F})$ of all maximal members of ${\cal F} \backslash \{E\}$ is a subset of $M({\cal F})$. Adopting matroid terminology (4.1.4) we refer to the members of $\max({\cal F})$ as {\it hyperplanes}. More generally, for any $e \in E$ let
\begin{center}
${\bf \hbox{\boldmath$\max$}({\cal F},e)}$ be the set of all $Y \in {\cal F}$ that are maximal with the property that $e \not\in Y$.
\end{center}
If $\bigcap {\cal F} = \emptyset$ (which we assume to avoid trivial cases) then $\max ({\cal F},e) \neq \emptyset$ for all $ e \in E$. In fact each $Y \in \max ({\cal F}, e)$ is meet-irreducible.
Conversely, every $Y \in M({\cal F})$ belongs to some $\max ({\cal F}, e)$. (See Expansion 12.)
Therefore:
(21) \quad $M({\cal F}) \quad = \quad \bigcup \{\max ({\cal F}, e): \ e \in E\}$.
It is convenient that the sets $\max ({\cal F}, e)$ can be retrieved from any generating set ${\cal H}$ of ${\cal F}$, i.e. not the whole of ${\cal F}$ is required:
(22) \quad $\max ({\cal F}, e) = \max \{Y \in {\cal H}: e \not\in Y\}$.
The proof is given in Expansion 10.
The smaller ${\cal H}$, the faster we can calculate the simple hypergraphs
(23) \quad ${\bf cmax ({\cal F}, e)} \ \ : = \ \ \{E \backslash X: \ X \in \max({\cal F}, e)\} \quad (e\in E)$.
\begin{center}
\includegraphics{JoyOfImplicFig7}
\end{center}
The next result is crucial for traveling the right hand side of the triangle in Figure 7.
\begin{tabular}{|l|} \hline \\
{\bf Theorem 4:} For any closure system ${\cal F} \subseteq {\cal P}(E)$ with $\bigcap {\cal F} = \emptyset$ one has\\
\\
(a) \ stems$(e) \cup \{e\} = mtr(\mbox{cmax}({\cal F},e)) \quad (e \in E)$\\
\\
(b) \ cmax$({\cal F},e) = mtr(\mbox{stems}(e) \cup \{e\}) \quad (e \in E)$\\
\\
\hline \end{tabular}
{\it Proof.} We draw on [MR2, Lemma 13.3 and Cor.13.1]. We first show that for any fixed $e\in E$ it holds for all $Y \subseteq E$ that:
(24) \quad $e \in c(Y) \ \Leftrightarrow \ Y \in {\cal T}r (\mbox{cmax}({\cal F}, e))$.
{\it Proof of (24)}. Suppose $Y$ is such that $e \in c(Y) = \cap \{X \in M({\cal F}): X \supseteq Y\}$; see (4). Thus from $X \in M({\cal F})$ and $X \supseteq Y$ follows $e \in X$. For each $X \in \mbox{max}({\cal F}, e) \subseteq M({\cal F})$ (see (21)) we have $e \not\in X$, hence $X \not\supseteq Y$, hence $Y \cap (E \backslash X) \neq \emptyset$, hence $Y \in {\cal T}r(cmax({\cal F}, e))$. Conversely, let $Y$ be such that $e \not\in c (Y)$. Then, because of $c(Y) = \cap \{X \in M(F) : X \supseteq Y\}$, there is $X \in M({\cal F})$ with $e \not\in X \supseteq Y$. We may assume that $X$ is {\it maximal} within $M({\cal F})$ with respect to $e \not\in X$. It then follows from (22) (put ${\cal H} : = M({\cal F})$) that $X \in \max ({\cal F}, e)$. From $X \supseteq Y$ it follows that $Y \cap (E \backslash X) = \emptyset$, and so $Y \not\in {\cal T}r(cmax ({\cal F}, e))$. This proves (24).
Let $e \in E$ be fixed. Then the family of minimal $Y$'s satisfying $e \in c(Y)$ is stems$(r) \cup \{e\}$. Likewise the family of minimal $Y$'s satisfying $Y \in {\cal T}r(cmax ({\cal F}, e))$ is $mtr(cmax({\cal F}, e))$. By (24) these two set families coincide, which proves (a).
As to (b), it follows from (a) and (20) that
$mtr(\mbox{stems}(e) \cup \{e\}) = mtr(mtr(\mbox{cmax}({\cal F}, e))) = \mbox{cmax}({\cal F},e)$.
$\square$
As was independently done in [BDVG], let us discuss the six directions in the triangle of Figure 7. Notice that matters don't change much if instead of $M({\cal F})$ we substitute any ``small'' (informal notion) generating set ${\cal H}$ of ${\cal F}$ in Figure 7, and instead of $\Sigma_{GD}$ we sometimes consider any ``small'' (w.r.t. $\Sigma_{GD}$) base $\Sigma$ of ${\cal F}$.
Both practical algorithms illustrated by examples, and theoretic complexity will be discussed. As to going from $\Sigma_{cd}$ to a minimum base $\Sigma$, the most elegant and only slightly sub-optimal method is the one of Shock [Sh]; see Expansion 11. The way from $\Sigma$ to $\Sigma_{cd}$ can be handled by the consensus method (Expansion 7); for another method see [RCEM].
In Subsections 3.6.1 to 3.6.3 we outline how to travel the remaining four directions, with more details provided in Expansions.
{\bf 3.6.1} Recall from Theorem 2 that knowing the canonical direct base $\Sigma_{cd}$ means knowing the members of $\bigcup \{\mbox{stems}(e): e \in E\}$. Likewise, by (21) and (23), knowing $M({\cal F})$ amounts to knowing the set collections cmax$({\cal F},e)\, (e \in E)$. Therefore Theorem 4 says that getting $\Sigma_{cd}$ from $M({\cal F})$ or vice versa is as difficult as calculating all minimal transversals of a hypergraph. To fix ideas let us carry out the way from $M({\cal F})$ to $\Sigma_{cd}$ on a toy example. Suppose that $E = [6]$ and ${\cal F}$ is such that
(25) \quad $M({\cal F}) = \{12, 12345, 124, 1245, 13456, 245, 25, 3456, 356\}$.
From (21) and (22) we get
(26) \quad $M({\cal F}) \ \ = \ \ \max ({\cal F},1) \cup \cdots \cup \max ({\cal F},6)$
\hspace*{2.2cm} $= \{245, 3456\} \cup \{13456\} \cup \{1245\} \cup \{25,12, 356\} \cup \{124\} \cup \{12345\}$.
The set union in (26) happens to be disjoint. Generally the union in (21) is disjoint iff $|X^\ast \backslash X| =1$ for all $X \in M({\cal F})$. Here $X^\ast$ is the unique upper cover of $X$ in ${\cal F}$. From say $\max ({\cal F}, 4) = \{25, 12, 356\}$ we get $\mbox{cmax}({\cal F}, 4) = \{1346, 3456, 124\}$, and by Theorem 4(a) we have stems$(4) \cup \{4\} = mtr(\{1346, 3456, 124\})$ which turns out to be $\{4, 13, 16, 23, 26, 15\}$. Dropping $\{4\}$ yields stems$(4)$. Likewise one calculates stems$(1) = \{23, 26\}$, stems$(3) = \{6\}$, stems$(5) = \{3, 6\}$, stems$(2) =$ stems$(6) = \emptyset$. By definition of $\Sigma_{cd}$ in Theorem 2 we conclude that
(27) \quad $\Sigma_{cd} = \{13 \rightarrow 4, \ 16 \rightarrow 4, \ 23 \rightarrow 14, \ 26 \rightarrow 14, \ 15 \rightarrow 4, \ 6 \rightarrow 35, \ 3 \rightarrow 5 \}$.
Let us mention a natural enough alternative [W1, Algorithm 3] for $M({\cal F}) \rightarrow \Sigma_{cd}$.
By processing the members of $M({\cal F})$ one-by-one it updates a corresponding direct base. The worst case complexity being poor, average behaviour still awaits proper evaluation.
{\bf 3.6.2} How to get $M({\cal F})$ from an {\it arbitrary} (non-direct) implication base $\Sigma$? One of the first methods was [MR2, Algorithm 13.2], which was improved in [W1, Sec.9].
In brief, in view of (21) both methods proceed as follows. For $\Sigma = \{A_1 \rightarrow B_1, A_2 \rightarrow B_2, \cdots, A_n \rightarrow B_n\}$ let $\Sigma_i: = \{A_1 \rightarrow B_1,\cdots, A_i \rightarrow B_i \}$. Then $\max (i, e) : = \max ({\cal F}(\Sigma_i),e)$ can be expressed in terms of the set families $\max (i -1, e)$ and $\max (i-1, a)$ where $a$ ranges over $A_i$. Another idea for $\Sigma \rightarrow M({\cal F})$ in [BMN] features an interesting fixed-parameter-tractability result.
Expansion 8 exhibits a fourth way.
{\bf 3.6.2.1} Unfortunately it is shown in [KKS] that $|M({\cal F})|$ can be exponential with respect to $|\Sigma|$, and vice versa. Furthermore, according to [K] both transitions $\Sigma \rightarrow M({\cal F})$ and $M({\cal F}) \rightarrow \Sigma$ are at least as hard as the transversal hypergraph problem. What's more, whatever the complexity of these transitions, they are equivalent under polynomial reductions.
Along the way a fifth algorithm [K, p.360-361] to get the {\it characteristic models} (i.e. $M({\cal F})$) from $\Sigma$ is offered. (Some of these results extend to the {\it arbitrary} Horn functions in 4.5.)
{\bf Open Problem 1}: Compare on a common platform and in a careful manner akin to [KuO1], mentioned five methods (and possibly others) for calculating $M({\cal F})$ from $\Sigma$.
{\bf 3.6.2.2} What is the point of calculating $M({\cal F})$ from $\Sigma$? This problem first arose in the vestige of finding an {\it Armstrong Relation} ($=$ short example database) for a given set of functional dependencies. Albeit an Armstrong Relation is not quite the same as $M({\cal F})$, the number of its records is $|M({\cal F})| +1$, see [MR2, Thm.14.4]. Having $M({\cal F})$ enables a ``model-based'' approach to reasoning. For instance, deciding whether $\Sigma \vDash (A \rightarrow B)$ holds, reduces to check whether $A \subseteq X$ entails $B \subseteq X$ for all $X \in M({\cal F})$. This beats the test in (8) when $|M({\cal F})| \ll |\Sigma|$. With the eye on using model-based reasoning in Knowledge Bases article [KR] extends (as good as possible) the concept of characteristic models from Horn functions to arbitrary Boolean functions. Observe that $|M({\cal F})| \ll |\Sigma|$ also occurs in the context of Cayley multiplication tables (4.2.2). Furthermore, many combinatorial problems (e.g. calculating all minimal cutsets of a graph) amount to calculate the subset max$({\cal F}) \subseteq M({\cal F})$ from $\Sigma$.
{\bf 3.6.3} How can one conversely get a small or minimum base $\Sigma$ from $M({\cal F})$ (or from another generating set ${\cal H} \subseteq {\cal F}$)?
This process is nowadays known as {\it Strong Association Rule Mining} (applications follow in 3.6.3.4). For succinctness, suppose we want $\Sigma = \Sigma_{GD}$. Unfortunately, as shown in [KuO2], not only can $|\Sigma_{GD}|$ be exponential in the input size $|M({\cal F})|\times |E|$, but also calculating the {\it number} $|\Sigma_{GD}| $ is \#$P$-hard. Despite the exponentiality of $|\Sigma_{GD}|$ one could imagine (in view of (36)) that $\Sigma_{GD}$ can at least be generated in output-polynomial time, given $M({\cal F})$. As shown in [DS], this problem is at least as hard as generating all minimal transversals. Given $M({\cal F})$, the pseudoclosed sets cannot be enumerated in lexicographic order [DS], or reverse lexicographic order [BK], with polynomial delay unless $NP = P$. Several related results are shown in [BK]. For instance, given ${\cal H} \subseteq {\cal P}(E)$ and $A \subseteq E$, it is $coNP$-complete to decide whether any minimum base $\Sigma$ of ${\cal F}({\cal H})$ (see 2.1.1) contains an implication of type $A \rightarrow B$. (Conversely, ${\cal F}$ can also be ``large'' with respect to $\Sigma_{GD}$, see Expansion 4.)
{\bf 3.6.3.1} A different approach to go from ${\cal H}$ to a small base $\Sigma$ of ${\cal F} = {\cal F}({\cal H})$ was hinted at in [W1, p.118] and developed in [RDB]. It essentially amounts to a detour ${\cal H} \rightarrow M({\cal F})$ and then $M({\cal F}) \rightarrow \Sigma_{cd} \rightarrow \Sigma$, but in a clever way that avoids to generate large chunks of $\Sigma_{cd}$. It is argued that even if the resulting base $\Sigma$ is considerably larger than $\Sigma_{GD}$, this is more than offset by the short time to obtain $\Sigma$. A similar approach is taken in [AN2], but instead of $\Sigma_{cd}$ the $D$-basis of 4.3 (a subset of $\Sigma_{cd}$) is targeted. Furthermore the likely superior [MU] subroutine for hypergraph dualization is used.
{\bf 3.6.3.2} In another vein, it was recently observed in [R] that for given ${\cal H} \subseteq {\cal P}(E)$ one can readily exhibit a set $\Sigma'$ of implications based on a superset $E'\supseteq E$ such that ${\cal F}': = {\cal F} (\Sigma')$ satisfies ${\cal F}'[E] = {\cal F}({\cal H})$. Here ${\cal F}'[E] : = \{X \cap E: X \in {\cal F}'\}$ is the {\it projection} of ${\cal F}'$ upon $E$. Furthermore, $|E'| = |E| + |{\cal H}|$ and $\Sigma'$ has a mere $2|E|$ implications. What also is appealing: If ${\cal F}'$ is given by $012n$-rows as in 4.4 then ${\cal F}'[E]$ is smoothly calculated by setting to $0$ all components with indices from $E'\setminus E$, and adapting the other components accordingly.
{\bf 3.6.3.3} A natural variation of the ${\cal H} \rightarrow \Sigma$ theme is as follows. For any ${\cal H} \subseteq {\cal P}[n]$ call a family $\Sigma$ of implications a {\it Horn approximation} of ${\cal H}$ if ${\cal H} \subseteq {\cal F}(\Sigma)$. The intersection of all these ${\cal F}(\Sigma)$ is the smallest closure system ${\cal F}({\cal H})$ that contains ${\cal H}$. Given ${\cal H} \subseteq {\cal P}[n]$ and any $\varepsilon, \delta \in (0,1]$ there is by [KKS, Thm.15] a randomized polynomial algorithm that calculates a family $\Sigma$ of implications which is a Horn approximation of ${\cal H}$ with probability $1 - \delta$ and moreover satisfies $2^{-n}(|{\cal F}(\Sigma)| - |{\cal F}({\cal H})|) < \varepsilon$.
{\bf 3.6.3.4} It should be emphasized that current efforts in data mining do however concern ``approximations'' that involve parameters different from $\varepsilon$ and $\delta$ above. These approximations are called {\it association rules} and they involve a support-parameter $\sigma$ and a confidence-parameter $\gamma$ taking values in the interval $(0,1]$. The association rule $A \rightarrow B$ has {\it confidence} $\gamma = 0.57$ if in 57\% of all situations $A \subseteq X \in {\cal F}$ one has $B \subseteq X$. Our ordinary implications $A \rightarrow B$ coincide with the {\it strong} association rules, i.e. having $\gamma =1$. Even ordinary implications like $\{$butter, bread$\} \rightarrow \{$milk$\}$ in 1.1.2 can have a small {\it support} like $\sigma = 0.15$. Namely, when merely 15\% of all transactions actually feature {\it both} butter and bread, whereas in the other 85\% the implication ``trivially'' holds. See [B] for an introduction to Association Rule Mining that focuses on the underlying mathematics. See also [PKID2, Section 5.1].
\section{Selected topics}
See the introduction (1.2) for a listing of the five selected topics. More detailed outlooks will be provided at the beginning of each Subsection 4.1 to 4.5.
\subsection{Optimum implicational bases for specific closure operators and lattices}
We first show (4.1.1) that {\it each} lattice ${\cal L}$ is isomorphic to a closure system ${\cal F}_J$ on the set $J({\cal L})$ of its join-irreducibles. It thus makes sense to speak of implicational bases of lattices, and we shall investigate special classes of lattices in this regard. Actually, for some lattices ${\cal L}$ it is more natural to start out with a suitable closure operator $c$ and turn to ${\cal L} \simeq {\cal F}_c$ later. For us these ${\cal F}_c$'s are distributive (4.1.2), geometric (4.1.4) and meet-distributive (4.1.5) lattices respectively.
{\bf 4.1.1} We use a basic familiarity with posets, semilattices and lattices, see e.g. [G]. We denote by $\top$ the largest element of a join semilattice, and by $\bot$ the smallest element of a meet semilattice. Recall that a lattice is a poset $({\cal L}, \leq)$ which is both a join and meet semilattice with respect to the ordering $\leq$. In this case some relevant interplay between the sets $J({\cal L})$ and $M({\cal L})$ of join respectively meet-irreducibles occurs (see Expansion 12).
Each closure system ${\cal F} \subseteq {\cal P}(E)$ yields an example of a meet semilattice: The meet of $A, B \in {\cal F}$ (i.e. the largest common lower bound) obviously is $A \cap B$. The smallest element is $\bot = \bigcap {\cal F}$, and ${\cal F}$ has a largest element $\top = E$ as well. Whenever a meet semilattice happens to have $\top$ then it automatically becomes a lattice. The most important instance of this phenomenon concerns closure systems:
(28) \quad Each closure system ${\cal F} \subseteq {\cal P}(E)$ is a lattice $({\cal F}, \wedge, \vee)$ with meets and joins given by\\
\hspace*{1cm} $X \wedge Y = X \cap Y$ and $X \vee Y = \bigcap \{Z \in {\cal F}: Z \supseteq X \cup Y\}=c_{\cal F} (X \cup Y)$.
\begin{center}
\includegraphics[scale=0.4]{JoyOfImplicFig8}
\end{center}
Let us show that conversely {\it every} lattice ${\cal L}$ arises in this way. What's more, the set $E$ can often be chosen much smaller than ${\cal L}$. Thus for a lattice ${\cal L}$ and any $x \in J := J({\cal L})$ we put
$$J(x) : = \{p\in J : p \leq x\}.$$
We claim that $J(x) \cap J(y) = J(x \wedge y)$. As to $\supseteq$, from $x \wedge y \leq x$ follows $J(x \wedge y) \subseteq J(x)$. Similarly $J(x \wedge y) \subseteq J(y)$, and so $J(x \wedge y) \subseteq J(x) \cap J(y)$. As to $\subseteq$, take $p \in J(x) \cap J(y)$. Then $p \leq x$ and $p \leq y$ which (by the very definition of $\wedge$) implies that $p \leq x \wedge y$, and so $p \in J(x \wedge y)$. If $x \leq y$ then $J(x) \subseteq J(y)$. If $x \not\leq y$ then each $p \in {\cal L}$ minimal with the property that $p \leq x, p \not\leq y$ is easily seen to be join irreducible. Hence $x \not\leq y$ implies $J(x) \not\subseteq J(y)$. Summarizing we see\footnote{Switching from ${\cal F}_J$ to the dually defined ${\cal F}_M$ (see 4.1.6) is sometimes more beneficial.} that:
(29) \quad For each lattice ${\cal L}$ the set system ${\cal F}_J: = \{J(x): x \in {\cal L}\}$ is a closure system and $x \mapsto J(x)$\\
\hspace*{1cm} is a lattice isomorphism from $({\cal L},\wedge, \vee)$ onto $({\cal F}_J, \cap, \vee)$.
Following [AN1] we call ${\cal F}_J$ the {\it standard} closure system coupled to the lattice ${\cal L}$ (recall $J = J({\cal L})$). The standard closure system ${\cal F}_J$ of ${\cal L}$ in Fig.8(a) is shown in Fig.8(b). Now let $c_J: {\cal P}(J) \rightarrow {\cal P}(J)$ be the {\it standard} closure operator coupled to ${\cal F}_J \subseteq {\cal P}(J)$. Explicitely
(30) \quad $c_J(\{ p_1, \cdots, p_n\}) = J(p_1 \vee p_2 \vee \cdots \vee p_n)$
for all subsets $\{p_1, \cdots, p_n\} \subseteq J$. For instance $c_J(\{p_2, p_5\}) = J(u) = \{p_1, p_2, p_3, p_5, p_6\}$ in Fig.8(a).
We emphasize that not every closure operator $c$ is ``isomorphic'' to one of type $c_J$, see Expansion 14. Each $c_J$-quasiclosed subset of $J$ clearly is an order ideal of $(J, \leq)$. This invites to replace each implication $P \rightarrow (c_J (P) \setminus P)$ in $\Sigma'_{GD}$ by $\max (P) \rightarrow \max (c_J(P))$. Along these lines one can associate with each standard closure system ${\cal F}_J$ a (generally not unique) {\it $K$-base} $\Sigma_K$ which stays minimum but satisfies $s(\Sigma_K) \leq s (\Sigma'_{GD})$. See [AN1, Sec.5]. By definition the {\it binary part} of a family $\Sigma$ of implications is $\Sigma^b: = \{(A \rightarrow B) \in \Sigma : |A| = 1\}$. As shown in [AN1, Sec.4], for standard closure spaces the binary parts of implication bases can be ``optimized independently'' to some extent. That relates to Open Problem 3 in Expansion 15.
We now discuss four types of lattices or closure operators for which the structure of the optimum implicational bases is known. These are in turn
all distributive, all modular, some geometric, and some meet-distributive lattices.
{\bf 4.1.2} A closure operator $c: {\cal P}(E) \rightarrow {\cal P}(E)$ is {\it topological} if $c(X \cup Y) = c(X) \cup c(Y)$ for all $X, Y \in {\cal P}(E)$. For instance, if $\Sigma$ consists of {\it singleton-premise} implications as in 3.2.2 then $c(\Sigma, -)$ is easily seen to be topological. Conversely, if $c$ is topological then by iteration $c(\{x_1, \cdots, x_n\}) = c(\{x_1 \}) \cup \cdots \cup c(\{x_n\})$, and so $\Sigma = \{\{x\} \rightarrow c(\{x\}): x \in E\}$ is a
base for $c$.
Furthermore, for $X = c(X)$ and $Y = c(Y)$ in ${\cal F}_c$ it follows from (28) that
$X \vee Y \ = \ c (X \cup Y) = c(X) \cup c(Y) = X \cup Y.$
By (28) always $X \wedge Y = X \cap Y$, and so ${\cal F}_c$ is a sublattice of the distributive lattice $({\cal P}(E), \cup, \cap)$, which thus must be distributive itself.
In Expansion 15 we show that conversely {\it every} distributive lattice ${\cal L}$ is isomorphic to a sublattice of ${\cal P}(J)$, and we determine the unique optimum base $\Sigma_J$ of ${\cal L}$.
{\bf 4.1.3} A lattice ${\cal L}$ is {\it modular} if it follows from $x \leq z$ that $(x \vee y) \wedge z = x \vee (y \wedge z)$. For instance the lattice of all submodules of an $R$-module is modular. Furthermore, each distributive lattice is modular.
The $(n+2)$-element lattice consisting of $n$ atoms and $\bot, \top$ will be denoted by $M_n$. It is modular but not distributive for $n \geq 3$. In fact every modular but nondistributive lattice has $M_3$ as a sublattice. For any lattice ${\cal L}$ and any $x \in {\cal L}\backslash \{\bot\}$ we define $x_\ast$ as the meet of all lower covers of $x$. We call $x \in {\cal L}$ an $M_n${\it -element} if the interval $[x_\ast, x]$ is isomorphic to $M_n$ for some $n \geq 3$. According to [W2] each optimum base $\Sigma$ of a modular lattice is of type $\Sigma = \Sigma_J \cup \Sigma_{HW}$ where $\Sigma_J$ is as in Expansion 15, and the implications constituting $\Sigma_{HW}$ are as follows. Coupled to each $M_n$-element $x$ choose ${n \choose 2}$ suitable implications of type $\{p,q\} \rightarrow \{v\}$. They are not uniquely determined by $x$ but they all satisfy $p \vee q = x$ among other restrictions. To fix ideas, the lattice ${\cal L}_0$ in Fig. 8(a) is modular and one possible optimum base is $\Sigma = \Sigma_J \cup \Sigma_{HW}$ where $\Sigma_{HW}$ contains the nine implications
$$\begin{array}{lll}
\{p_2, p_5\} \rightarrow \{p_6\}, & \{p_2, p_6\} \rightarrow \{p_5 \}, & \{p_5, p_6\} \rightarrow \{p_2\},\\
\\
\{p_3, p_7\} \rightarrow \{p_8\}, & \{p_3, p_8\} \rightarrow \{p_7 \}, & \{p_7, p_8\} \rightarrow \{p_3\},\\
\\
\{p_6, p_8\} \rightarrow \{p_9\}, & \{p_6, p_9\} \rightarrow \{p_8 \}, & \{p_8, p_9\} \rightarrow \{p_6\},\end{array}$$
It is convenient to think of the $n$ join-irreducibles underlying the ${n \choose 2}$ implications coupled to a fixed $M_n$-element as a {\it line} $\ell$. These lines have properties akin to the lines occuring in projective geometry (see also 4.1.4). Modular lattices which are freely generated by a poset (in a sense akin to 4.2) are economically computed by combining Theorem 5 with the technique of 4.4. A preliminary version of this work in progress is in [arXiv: 1007.1643.v1].
{\bf 4.1.4} A closure operator $c: {\cal P}(E) \rightarrow {\cal P}(E)$ is a {\it matroid} (operator) if it satisfies this {\it exchange axiom} for all $X \subseteq E$ and $x,y \in E$:
(31) \quad $(y \in c(X \cup \{x\})$ and $y \not\in c(X) ) \quad \mathbb{R}a \quad x \in c(X \cup \{y\})$
As a consequence each minimal generating set of $E$ (or $X = c(X)$) is maximal independent. Thus for matroids the word ``among'' in 3.3.2 can be replaced by ``exactly''. The edge set $E$ of any graph yields a ``graphic'' matroid $c: \ {\cal P}(E) \rightarrow {\cal P}(E)$ whose circuits in the sense of Expansion 5 coincide with the circuits in the usual graph theoretic sense. As another example, let $F$ be any field and let $E \subseteq F^n$ be any (finite) subset which need not be a subspace. If for $X \subseteq E$ we define $c(X) : = \mbox{span}(X) \cap E$, then the restriction $(E, c)$ is an {\it $F$-linear} matroid. The particular features of $(E,c)$ depend on the kind of subset $E$ chosen. For instance, if $E$ is a linearly independent set then $c(X) = X$ for all $X \subseteq E$. Another extreme case is $E = F^n$. Then
$\Sigma: = \{ \{x,y\} \rightarrow \mbox{span}(\{x,y\}): \ x, y \in F^n \}$
is a base of $c$ and ${\cal F}(\Sigma)$ is the {\it complemented} modular lattice\footnote{In fact, for {\it any} matroid $c$ the coupled lattice is complemented but usually only {\it semi}-modular. Such lattices are also called {\it geometric}.} of all subspaces of $F^n$, thus a special case of 4.1.3. In fact, the $M_n$-elements of ${\cal F}(\Sigma)$ are the rank two subspaces ($=$ projective lines). The features of a $F$-linear matroid also depend on the field of scalars $F$. For $F = \mathbb{Z}_2$ one speaks of {\it binary matroids}, in which case the family $\Sigma$ of implications $(K\backslash \{x\}) \rightarrow \{x\}$, where $K$ ranges over all {\it closed} circuits $K$ and $x$ ranges over $K$, is the unique optimum implication base of $(E, c)$, see [W3]. It is well known that each graphic matroid is binary, but not conversely. For the many facets of matroids see [S, Part IV]. We mention in passing that [S] arguably is the most comprehensive, and likely the most readable book on combinatorial optimization around.
{\bf 4.1.5} A closure operator $c: {\cal P}(E) \rightarrow {\cal P}(E)$ is a {\it convex geometry} (operator) if it satisfies this {\it anti-exchange axiom}:
(32) \quad If $x \neq y$ \ and \ $x,y \not\in c(X)$ \ and \ $y \in c(X \cup \{x\}) \ \mbox{then} \ x \not\in c(X \cup \{y\})$.
The kind of operator $c$ in 2.2.5 is the name-giving example of a convex geometry.
As to another example, it was observed by Bernhard Ganter (around 1990, unpublished) and also follows from [SW, Lemma 7.7] that each closure operator $c$ of poset type (see 3.5.1) is a convex geometry.
One deduces from (32) that each $X \subseteq E$ contains the {\it unique} minimal generating set $ex(X)$ of $c(X)$. In particular $|{\cal F}_c| = |\mbox{Indep}(c)|$ in 3.3.1. The elements of $ex(X)$ are the {\it extreme} points of $X$. If $X$ is closed then so is $X \backslash \{x\}$ for all $x \in ex (X)$.
Each circuit $K$ of $c$ (Expansion 5) has a {\it unique} root $e$. If one needs to emphasize $e$, one speaks of the {\it rooted circuit} $(K, e)$. Other than for arbitrary closure operators, if $U$ is a stem of $e$ in a convex geometry then $(U \cup \{e\}, e)$ is a rooted circuit. It follows [W3, Cor.13] that the family of all rooted circuits matches the family $\Sigma_{cd}^u$ of all prime implicates. A rooted circuit $(K, e)$ is {\it critical} if $c(K) \backslash \{e, x\}$ is closed for all $x \in K \backslash \{e\}$. Recall the definition of closure-minimal in 3.3. As we show in Expansion 16, for each rooted circuit $(K, e)$ it holds that:
(33) \quad $(K, e)$ is critical $\Leftrightarrow \ c(K)\backslash \{e\}$ is quasiclosed $\Leftrightarrow$ the stem $K \backslash \{e\}$ of $e$ is closure-minimal
As opposed to the antimatroid side of the coin (Expansion 16), note that the subfamily
$$\Sigma_{crci} : = \{(K \backslash \{e\}) \rightarrow \{e\} : (K, e) \ \mbox{is critical rooted circuit of} \ c\}$$
of $\Sigma_{cd}^u$ usually is {\it no} implicational base of $c$. For instance, the set $\Sigma_{cd}^u$ of prime implicates of the convex geometry $c$ in 2.2.5 is the union of all sets $\{T \rightarrow \{e\} : e \in c(T) \backslash T\}$ where $T$ ranges over ${\cal T}$. If such a rooted circuit $(T,e)$ has $c(T) = T \cup \{e\}$ then $c(T) \setminus \{e\}$ is quasiclosed. Conversely, assume $c(T)$ contains a point $f \neq e$. By considering the triangulation of $ch(T)$ induced by $f$ (as in 2.2.5) one sees that $e \in (c(T) \setminus \{e\})^\bullet$, and so $c(T) \setminus \{e\}$ is {\it not} quasiclosed. It follows from (33) that $\Sigma_{crci} = \{T\rightarrow \{e\} : T \in {\cal T}, c(T) = T \cup \{e\} \}$. Hence $\Sigma_{crci}$ is contained in every base of prime implicates but is not itself a base (unless the point configuration in $\mathbb{R}^2$ is rather trivial).
We mention that closure-minimality of (order-minimal) stems also features in the so-called $E$-basis of [A] and [AN1]. The convex geometries of type 2.2.5 and 3.5.1 can be generalized (Expansion 18) but the results and proofs become quite technical. This is one reason for dualizing (29) in 4.1.6.
{\bf 4.1.6} For any lattice ${\cal L}$ and $x \in {\cal L}$ put $M(x) : = \{m \in M({\cal L}) : m \geq x\}$. Dually to (29), ${\cal F}_M : = \{M(x) : x \in {\cal L}\}$ is a closure system which is bijective to ${\cal L}$ under the map $x \mapsto M(x)$. In particular, if ${\cal L}$ is meet-distributive (thus ${\cal L}$ ``is'' a convex geometry according to Expansion 16) then a crisp implication base $\Sigma = \Sigma_M \cup \Sigma_{JNW}$ of ${\cal F}_M$ is obtained as follows\footnote{Mutatis mutandis, this is Theorem 2 in [W4]. The acronym JNW means Janssen-Nourine-Wild.}. First $\Sigma_M$ is the dual of $\Sigma_J$ from Expansion 15. Second, each doubleton $\{m, m_0\} \subseteq M({\cal L})$ which admits a (unique if existing) $p\in J({\cal L})$ with $p \updownarrow m$ and $p \updownarrow m_0$, induces two implications. One is $\{m\} \cup \mbox{ucov}(m_0) \rightarrow \{m_0\}$, the other $\{m_0\} \cup \mbox{ucov}(m) \rightarrow \{m\}$. Here $\updownarrow$ is as in Expansion 12, and say $\mbox{ucov}(m)$ is the set of upper covers of $m$ in the poset $(M({\cal L}), \leq)$. All these implications make up $\Sigma_{JNW}$. In view of $|M({\cal L})| \geq |J({\cal L})|$ the philosophy in 4.1.6 is similar to 3.6.3.2 which also trades a larger universe for a smaller implication base.
\subsection{Excursion to universal algebra: Finitely presented semilattices and subalgebra lattices}
First we show (4.2.1) that finding an implicational base for a lattice ${\cal L}$ in the sense of 4.1 means finding a presentation for ${\cal L}$, viewed as $\vee$-semilattice, in the sense of universal algebra. Afterwards we show (4.2.2) how subalgebra lattices and homomorphisms between algebras can be calculated by setting up appropriate implications.
{\bf 4.2.1} For starters imagine a $\vee$-semilattice that has a set $J$ of (not necessarily distinct) generators $p_1, \ldots, p_6$ that satisfies this set ${\cal R}$ of (inequality) relations:
(34) \quad $p_3 \geq p_5, \quad p_1 \vee p_5 \geq p_4, \quad p_6 \geq p_3, \quad p_2 \vee p_3 \geq p_1$
An example of such a semilattice $S_1$ (with say $p_2$ replaced by $2'$) is given in Figure 9 on the left. Notice that all relations hold; e.g. $p_2 \vee p_3 \geq p_1$ holds because $p_2 \vee p_3 = p_2 > p_1$.
It isn't a priori clear whether there is a {\it largest} such semilattice, but universal algebra tells us it must exist.
It is the so-called {\it relatively free} $\vee$-semilattice $F = FS(J, {\cal R})$ with set of generators $J$ and subject to the relations in ${\cal R}$, shown on the right in Figure 9 (discard $\emptyset$). Every other $\vee$-semilattice satisfying ${\cal R}$ must be an epimorphic image of $F$; in our case the definition of the epimorphism $f: \ F \rightarrow S_1$ is that $\circ$ on the right maps to $\circ$ on the left, $\bullet$ maps to $\bullet$, and so forth.
Each ($\vee$-semilattice) inequality, like $p_1 \vee p_5 \geq p_4$, can be recast as an identity $p_1 \vee p_5 = p_1 \vee p_5 \vee p_4$. Conversely each identity can be replaced by two inequalities. If in turn inequalities $a_1 \vee \cdots \vee a_s \geq b_1 \vee \cdots \vee b_t$ are viewed as implications $\{a_1, \cdots, a_s \} \rightarrow \{b_1, \cdots, b_t\}$ then we can state the following.
\begin{center}
\includegraphics[scale=0.6]{JoyOfImplicFig9}
\end{center}
\begin{tabular}{|l|} \hline \\
{\bf Theorem 5 :} The relatively free $\vee$-semilattice $FS(J, {\cal R})$ is isomorphic to the\\
$\vee$-semilattice ${\cal F}(\Sigma) \backslash \{\emptyset\}$. Here the family $\Sigma$ is obtained from ${\cal R}$ by replacing
each \\
inequality in ${\cal R}$ by the matching implication, and each identity in ${\cal R}$ by two\\
implications $A \rightarrow B$ and $B \rightarrow A$.\\
\\ \\ \hline \end{tabular}
The proof of Theorem 5 is given in [W5, Thm.5]. The closure system ${\cal F}(\Sigma)$ can be calculated from $\Sigma$ in compressed form as explained in 4.4. Specifically for the $\Sigma$ matching the inequalities in (34), thus $\Sigma = \{3 \rightarrow 5, 15 \rightarrow 4, 6 \rightarrow 3, 23 \rightarrow 1\}$, one gets ${\cal F}(\Sigma)$ as $r_9 \cup r_{10} \cup r_{11} \cup r_{12}$ for certain set systems $r_9$ to $r_{12}$ in Table 1 of 4.4. We mention that $FS(J, {\cal R})$ is also isomorphic to the semilattice $({\cal P}(E) \backslash \{\emptyset\}, \cup )$ modulo a congruence relation $\theta$. Here $E = [6]$ and $\theta$ is as in (11) where $c$ is $c(\Sigma, -)$ with $\Sigma$ from Theorem 5.
See also Expansion 17.
{\bf 4.2.2} As to subalgebra lattices, we only peak at semigroups but the ideas carry over to general algebraic structures (and what concerns homomorphisms, also to graphs). Suppose we know the multiplication table (Cayley table) of a semigroup $(S, \ast)$ where $S = \{a_1, a_2, \cdots, a_n\}$. Obviously the subsets of $S$ closed with respect to the $n^2$ implications $\{a_i, a_j\} \rightarrow \{a_i \ast a_j\}$ are exactly the subsemigroups of $S$.
The algorithm from 4.4 can thus be invoked to give a compressed representation of all subsemigroups.
In another vein, sticking again to semigroups $(S, \ast)$ and $(S', \bullet)$ for simplicity, the same algorithm also achieves the enumeration of all homomorphisms $f: S \rightarrow S'$. Namely, these $f$'s are exactly the functions\footnote{More precisely, imposing these $n^4$ implications yields the closure system ${\cal F}$ of all homomorphic {\it relations} $f \subseteq S \times S'$ in output-polynomial time. True, one needs to sieve the functions among them, but this is often feasible. As to the large cardinality $n^4$ of our family $\Sigma$ of implications, instead of calculating ${\cal F}$ as ${\cal F}(\Sigma)$ one may directly target $M({\cal F})$, see 3.6.2. All of this is work in progress.} $f \subseteq S \times S'$ which are closed with respect to all $n^4$ implications of type $\{(a,x), (b,y)\} \rightarrow \{(a \ast b, x \bullet y)\}$. How these ideas compete with other computational tools in algebra (e.g. consult the Magma Handbook) remains to be seen. They will fare the better the fewer structural properties of the algebras at hand can be exploited. Put another way, there are greener pastures for our approach than e.g. the beautiful theory of subgroup lattices of Abelian groups [Bu].
\subsection{Ordered direct implicational bases}
We start by introducing order-minimal prime implicates, thus a third kind besides the closure-minimal ones in 3.3 and the strong ones in Expansion 6. To minimize technicalities we focus on the case of standard closure operators $c_J$. Then the prime implicates of $c_J$ are the nonredundant join covers in the lattice ${\cal L}$ that underlies $c_J$. Specifically, $\{2, 5\}$ in Figure 10 (taken from [ANR]) is a {\it join cover} of 6 since $2 \vee 5 \geq 6$. It is nonredundant since $2 \not\geq 6$ and $5 \not\geq 6$. (Generally, {\it nonredundant} means that no proper subset is a join cover.) Correspondingly $\{2,5\} \rightarrow \{6\}$ is a prime implicate of $c_J$. However $\{2, 5\} \rightarrow \{6\}$ is not {\it order-minimal} since $4 < 5$ and still $\{2, 4\} \rightarrow \{6\}$ is a prime implicate. The general definition of ``order minimal'' is the obvious one. The relevance this concept was first observed in [N, p.525]. Notice that $\{2, 4\} \rightarrow \{6\}$ is not closure-minimal since $\{2,3\} \rightarrow \{6\}$ is a prime implicate with $2 \vee 3 < 2 \vee 4$. Conversely a closure-minimal prime implicate need not be order-minimal.
We are now in a position to address the topic in the title.
Recall from 3.3 that the {\it direct} basis $\Sigma_{cd}$ of a closure operator $c$ has the advantage that $c(\Sigma_{cd}, X) = X'$ as opposed to $c(\Sigma, X) = X^{'' \cdots '}$ (as to $X'$, see (6)). However the drawback of $\Sigma_{cd}$ is its usually large cardinality. As a kind of compromise we present {\it ordered direct} implicational bases $\Sigma$. The key is a specific {\it ordering} in which the implications of $\Sigma$ must be applied exactly once: For given $X \subseteq E$ applying the first implication $A_1 \rightarrow B_1$ of $\Sigma$ to $X$ yields $X_1 \supseteq X$. Applying $A_2 \rightarrow B_2$ to $X_1$ yields $X_2 \supseteq X_1$. And so forth until applying the last implication $A_n \rightarrow B_n$ to $X_{n-1}$ yields $X_n \supseteq X_{n-1}$ which is the correct closure of $X$. Of course such a $\Sigma$ is also an implication base in the ordinary sense.
Listing (in any order) all\footnote{In certain circumstances, one or both ``all'' in this sentence can be weakened (by restricting ``any order'').} {\it binary} prime implicates $x \rightarrow y$ (thus $x > y$), and then listing (in any order) all order-minimal prime implicates, yields a particular ordered direct implicational base $\Sigma_D$ which is called a $D${\it -basis}. The ``$D$'' derives from the so-called $D$-relation discussed in Expansion 18.
\begin{center}
\includegraphics[scale=0.6]{JoyOfImplicFig10}
\end{center}
In our example one possibility is
(35) \quad $\Sigma_D = (2 \rightarrow 1, \ 6 \rightarrow 3, \ 6 \rightarrow 1, \ 5 \rightarrow 4, \ 3 \rightarrow 1, \ 14 \rightarrow 3, \ 24 \rightarrow 5, \ 15 \rightarrow 6, \ 24 \rightarrow 6, \ 23 \rightarrow 6)$.
Applying $\Sigma_D = (A_1 \rightarrow B_1, \cdots, A_{10} \rightarrow B_{10})$ in this order to say $X = \{2,5\}$ yields
$$X_1=X_2 = X_3 = 251, \quad X_4=X_5 = 2514, \quad X_6=X_7 = 25143, \quad X_8 = X_9 = X_{10} = 251436$$
In contrast, ordinary forward chaining (2.2) needs three runs to find the closure:
$$X'= 2514, \quad X'' = 251436, \quad X''' = 251436 = X'' = c(X)$$
Notice that the underlying unordered set of any $D$-basis coincides with $\Sigma_{cd}^u$ if $J({\cal L})$ is an antichain: Then there is no binary part, and so each member of $\Sigma_{cd}^u$ is trivially order-minimal. There is actually no need to stick to bases of prime implicates. Given any basis $\Sigma$ of $c_J$ one can aim for an ordered direct base by suitably ordering $\Sigma$, and perhaps repeat some implications. Unfortunately the canonical base $\Sigma_{GD}$ needs not be orderable in this sense [ANR, p.719].
\subsection{Generating ${\cal F}(\Sigma)$ in compact form}
Calculating ${\cal F}(\Sigma)$ amounts to generating the model set Mod$(f)$ of a pure Horn function $f$ given in CNF (see 3.4). As glimpsed this has applications in Formal Concept Analysis, Learning Theory, and Universal Algebra. One could be tempted to calculate ${\cal F}(\Sigma)$ from $\Sigma$ with NextClosure (Expansion 4). But this yields the closed sets {\it one-by-one} which is infeasible when ${\cal F}(\Sigma)$ is large.
In 4.4.1 we thus outline an algorithm for {\it compactly} generating ${\cal F}(\Sigma)$ from $\Sigma$. In 4.4.2 we discuss how to get a compact representation of ${\cal F}$ not from $\Sigma$, but from a generating set ${\cal H} \subseteq {\cal F}$.
{\bf 4.4.1} A $012$-{\it row} like $(0,2,1,1,2,2)$ is a succinct representation for the interval $\{U \subseteq {\cal P}[6]: \{3,4\} \subseteq U \subseteq \{3,4,2,5,6\}\}$, which thus has cardinality $2^3$. Each ``2'' in $(0,2,1,1,2,2)$ is used as a {\it don't care} symbol (other texts use ``$\ast$'') which indicates that both 0 and 1 can be chosen. For instance, if the clause $\overline{x}_1 \vee x_4 \vee \overline{x}_5$ (thus $15 \rightarrow 4$) is viewed as a Boolean function of $x_1, \cdots, x_6$, then Mod$(\overline{x}_1 \vee x_4 \vee \overline{x}_5)$ clearly is the disjoint union of these four $012$-rows:
\begin{tabular}{|c|c|c|c|c|c|}
1 & 2 & 3& 4 & 5 & 6\\ \hline \hline
0 & 2 & 2 & 2& 0 & 2 \\ \hline
0 & 2 & 2 & 2 & 1 & 2 \\ \hline
1 & 2 & 2& 2& 0 & 2\\ \hline
1 & 2 & 2 & 1 & 1& 2 \\ \hline \end{tabular}
If we let the $n$-{\it bubble} $(n, n, \cdots, n)$ mean ``{\it at least one 0 here}'' then the first three rows can be compressed to the $012n${\it -row} $r_1$ in Table 1. It thus follows that Mod$(\overline{x}_1 \vee x_4 \vee \overline{x}_5)$ is the disjoint union of $r_1$ and $r_2$ in Table 1. Consider the pure Horn function $f: \{0,1\}^6 \rightarrow \{0,1\}$ given by
$$f(x) : = (\overline{x}_1 \vee x_4 \vee \overline{x}_5) \wedge (x_1 \vee \overline{x}_2 \vee \overline{x}_3) \wedge (\overline{x}_3 \vee x_5) \wedge (x_3 \vee \overline{x}_6).$$
In order to calculate Mod$(f)$ we
need to ``sieve'' from $r_1$, and then from $r_2$, those bitstrings which also satisfy $x_1 \vee \overline{x}_2 \vee \overline{x}_3$. It is evident that this shrinks $r_1$ to $r_3 \cup r_4$ and does nothing to $r_2 =: r_5$. In $r_3$ the two $n$-bubbles are independent of each other and distinguished by subscripts.
\begin{tabular}{c|c|c|c|c|c|c|}
& 1 & 2 & 3 & 4 & 5 & 6 \\ \hline
& & & & & &\\ \hline
$r_1=$ & $n$ & 2 & 2 & 2& $n$ & 2\\ \hline
$r_2=$ & 1 & 2 & 2 & 1 & 1& 2\\ \hline
& & & & & & \\ \hline
$r_3=$ & $n_1$ & ${\bf n_2}$ & ${\bf n_2}$ & 2 & $n_1$ & 2\\ \hline
$r_4=$ & 1 & ${\bf 1}$ & ${\bf 1}$ & 2 & 0 & 2 \\ \hline
$r_5=$ & 1 & 2& 2 & 1 & 1& 2 \\ \hline
& & & & & & \\ \hline
$r_6=$ & $n$ & 2 & ${\bf 0}$ & 2 & $n$ & 2 \\ \hline
$r_7=$ & 0 & 0 & ${\bf 1}$ & 2 & 1 & 2\\ \hline
$r_8=$ & 1 & 2& 2& 1 & 1& 2 \\ \hline
& & & & & & \\ \hline
$r_9=$ & $n$ & 2 & 0 & 2 & $n$ & 0\\ \hline
$r_{10}=$ & 0 & 0 & 1 & 2& 1 & 2\\ \hline
$r_{11}=$ & 1& 2 & 2 & 1& 1& ${\bf 0}$ \\ \hline
$r_{12}=$ & 1 & 2& 1 & 1 & 1& ${\bf 1}$ \\ \hline \end{tabular}
Table 1: Using $012n$-rows to compress a closure system
Note that forcing the first component of $n_1 n_1$ to 1 in $r_4$ (due to $23 \rightarrow 1$) forces the second to 0. Imposing the constraint $\overline{x}_3 \vee x_5$ (i.e. $3 \rightarrow 5$) upon $r_3 \cup r_4 \cup r_5$ replaces $r_3$ by $r_6 \cup r_7$, deletes $r_4$, and leaves $r_5= r_8$ unscathed. Imposing the implication $6 \rightarrow 3$ upon $r_6 \cup r_7 \cup r_8$ yields $r_9 \cup r_{10} \cup r_{11} \cup r_{12} = \, \mbox{Mod}(f)$. We were lucky that $n_2 n_2$ didn't clash with $n_1n_1$, otherwise things would get uglier. Concerning the deletion of $r_4$, with some precautions the deletion of rows can be avoided, which is the main reason making the implication $n$-algorithm output-polynomial [W6]. The implication $n$-algorithm easily extends to a {\it Horn $n$-algorithm} which can handle impure Horn functions in the sense of 4.5. Concerning a speed-up for singleton-premise implications, see Expansion 19. As to connections to $M({\cal F})$ and CNF $\rightarrow$ DNF conversion, see Expansion 8 and 9 respectively.
{\bf 4.4.2} As to calculating ${\cal F}$ from a {\it generating} set ${\cal H} \subseteq {\cal F}$, the first idea that springs to mind is to use NextClosure or some other algorithm discussed in [KuO1]. However, this as before yields the closed sets one-by-one which is infeasible when ${\cal F}$ is large. Alternatively, one may calculate a base $\Sigma$ of ${\cal F}$ by either proceeding as in 3.6.3.1 or 3.6.3.2. Feeding $\Sigma$ to the implication $n$-algorithm yields a compact representation of ${\cal F}(\Sigma) = {\cal F}$. An analysis of the pro's and con's of these ways to enumerate ${\cal F}$ is pending.
\subsection{General Horn functions}
We discuss negative functions in 4.5.1 and then use them to define general Horn functions in 4.5.2. Theorem 6 says, in essence, that good old implications suffice to economically capture any impure Horn function; only {\it one} additional impure Horn clause is necessary.
{\bf 4.5.1} For any nonempty ${\cal H} \subseteq {\cal P}(E)$ the set ideal {\it generated} by ${\cal H}$ is ${\cal H}\downarrow \ : = \{U \subseteq E: (\exists U'\in {\cal H}) \ U \subseteq U'\}$. By 3.4.1 a Boolean function $g$ is negative if and only if Mod$(g)$ is a set ideal. Dually one defines {\it set filters}. Consider an arbitrary family $\Gamma$ of sets $A \subseteq E$ which we refer to as {\it complications}\footnote{This is handy ad hoc terminology which conveys a link to ``implications''.}.
Call $X \subseteq E$ a {\it noncover} (of $\Gamma$) if it doesn't cover any complication, i.e. $X \not\supseteq A$ for all $A \in \Gamma$. It is evident that the set ${\cal N}{\cal C}(\Gamma)$ of all noncovers is a set ideal ${\cal G}$.
Among all families $\Gamma'$ with ${\cal N}{\cal C}(\Gamma') = {\cal G}$ there is smallest one; it obviously is the family $\Gamma_0$ of all minimal members of the set filter ${\cal P}(E)\backslash {\cal G}$. In particular $\Gamma_0$ is an antichain (no two distinct members of $\Gamma_0$ are comparable). Conversely, {\it each} set ideal ${\cal G}$ admits a unique antichain $\Gamma_0 \subseteq {\cal P}(E)$ of complications $A$ that yields ${\cal G}= {\cal N}{\cal C}(\Gamma_0)$. Put another way, each negative Boolean function $g: \{0,1\}^n \rightarrow \{0,1\}$ admits a {\it unique} irredundant CNF of {\it negative clauses}. For instance if $E = [7]$ and by definition the model set of $g: {\cal P}(E) \rightarrow \{0,1\}$ is the set ideal, ${\cal N} {\cal C}(\{\{2,3,5\}, \{2, 4\}\})$, then $g = g(x_1, \ldots, x_7)$ has the unique irredundant CNF $(\overline{x}_2 \vee \overline{x}_3 \vee \overline{x}_5) \wedge (\overline{x}_2 \vee \overline{x}_4)$. We see that the ``representation theory'' of negative Boolean functions $g$ via complications ($=$ negative clauses) is much simpler than the representation theory of pure Horn functions $f$ via implications ($=$ pure Horn clauses).
{\bf 4.5.2} This leads us to the definition of a {\it Horn function} $h: \{0,1\}^n \rightarrow \{0,1\}$ as one that can be represented as a conjunction $h = f \wedge g$ of a pure Horn function $f$ with a negative function $g$. One checks that pure Horn functions and negative functions are special cases of Horn functions. It is evident that Mod$(h) = {\cal N}{\cal C}(\Gamma) \cap {\cal F}(\Sigma)$ where $\Sigma$ and $\Gamma$ are such that ${\cal F}(\Sigma)= \mbox{Mod}(f)$ and ${\cal N}{\cal C}(\Gamma) = \, \mbox{Mod}(g)$.
We call $\Sigma \cup \Gamma$ a {\it base} of $h$. Thus our previous bases $\Sigma$ become the special case where $\Gamma = \emptyset$. With Mod$(f)$ and Mod$(g)$ also Mod$(h)$ is a subsemilattice\footnote{The only difference between subsemilattices ${\cal S} \subseteq {\cal P}(E)$ and closure systems ${\cal F} \subseteq {\cal P}(E)$ is that subsemilattices need not contain $E$. The usefulness of meet-irreducible sets, also in the impure case, remains.} of $({\cal P}[n], \cap )$. But Mod$(h)$ can be empty, and so different from 3.4 a general Horn function $h$ need not be satisfiable. The good news is, because ${\cal F}(\Sigma)$ has a {\it smallest} member $\bigcap {\cal F}(\Sigma)$, it follows that Mod$(h) = \emptyset$ iff $\bigcap {\cal F}(\Sigma)$ contains some $A \in \Gamma$. Since $\bigcap {\cal F}(\Sigma)$ can be calculated from $\Sigma$ as $c(\emptyset, \Sigma)$, satisfiability can be tested in linear time. (In plenty texts this simple state of affairs is veiled by clumsy notation.)
Observe that the above representation $h= f\wedge g$ is not unique since the subsemilattice ${\cal S} = \,\mbox{Mod}(h)$ can be written as an intersection ${\cal F} \cap {\cal G}$ of a closure system ${\cal F}$ with a set ideal ${\cal G}$ in many ways. The most obvious way is ${\cal S} = \bot \cap ({\cal S} \downarrow )$ where $\bot$ is the closure system ${\cal S} \cup \{E\}$. (The notation $\bot$ foreshadows the framework (39) in Expansion 20.)
The parameters defined for pure Horn functions $f$ in 3.4.4.1 carry over to general Horn functions $h$. Here we are only interested in
$$ca(h): = \min \{|\Sigma \cup \Gamma |: \ \Sigma \cup \Gamma \ \mbox{is a base of} \ h\}.$$
Note that $ca(h) = \sigma (h)$ in [CH, p.297], i.e. the minimum number of ``source sides'' possible.
\begin{tabular}{|l|} \hline \\
{\bf Theorem 6:} Let $h : {\cal P}(E) \rightarrow \{0,1\}$ be any Horn function, and let $f_\bot$ be the {\it pure} Horn function\\
defined by Mod$(f_\bot) := \, \mbox{Mod}(h) \cup \{E\}$. Then $ca(f_\bot) \leq ca(h) \leq ca(f_\bot) +1$.\\ \\ \hline \end{tabular}
{\it Proof.} Since Mod$(h) \subseteq {\cal P}(E)$ is a subsemilattice, Mod$(h) \cup \{E\}$ is indeed a closure system. Let $f_\bot$ be the induced pure Horn function, and let $\Sigma_0$ be a base of implications for Mod$(h) \cup \{E\}$ of minimum cardinality $ca(f_\bot)$. We claim that $\Sigma_0 \cup \{E\}$ is a base of $h$: Indeed, if say $E=[n]$ then spelling out the complication $E$ gives $\overline{x}_1 \vee \cdots \vee \overline{x}_n$. It kills exactly one $\Sigma_0$-closed set, namely $E$. Therefore $ca(h) \leq ca(f_\bot) +1$.
Conversely, let $\Sigma \cup \Gamma$ be a base of $h$ of cardinality $ca(h)$. Putting $\Sigma' : = \{A \rightarrow E: A \in \Gamma \}$, it suffices to show that $\Sigma \cup \Sigma'$ is a base of $f_\bot$; then $ca(f_\bot) \leq |\Sigma \cup \Sigma'| = ca(h)$ as claimed. First, each model $X \subseteq E$ of $\Sigma \cup \Gamma$ remains a model of $\Sigma \cup \Sigma'$ because $A \not\subseteq X$ for all $(A \rightarrow E) \in \Sigma'$. Second, let $X \subseteq E$ be a model of $\Sigma \cup \Sigma'$ which is not a model of $\Sigma \cup \Gamma$. Then $A \subseteq X$ for some $A \in \Gamma$, and so $X = E$ in view of $(A \rightarrow E) \in \Sigma'$. \quad $\square$
Theorem 6 suggests a simple procedure to ``almost minimize'' a given base $\Sigma \cup \Gamma$ of $h$: Take the base $\Sigma \cup \Sigma'$ of $f_\bot$ and replace it by a minimum base $\Sigma_0$ e.g. by using Shock's algorithm (Expansion 11). Then $\Sigma_0 \cup \{E\}$ is a base of $h$ of cardinality at most $ca(h) +1$.
In Expansion 20 we indicate that calculating the precise value of $ca(h)$ is comparatively tedious.
{\bf 4.5.3} An analogue of the Guigues-Duquenne base (3.2) is introduced in [AB] for general Horn functions $h$. It is shown that a well known query leraning algorithm of Angluin et al. in fact always produces this base, independently of the counterexamples it receives.
\section{Omitted proofs and various expansions}
{\bf Expansion 1}. We note that ${\cal F}_c$ as defined in (3) is a closure system even when $c$ is not idempotent. See [W7, Expansion 1] for details.
{\bf Expansion 2}. As to the algorithmic complexity of calculating $c(\Sigma, S)$, let us merely look at the partial problem of calculating $S'$ from $S$. If $|E| = m$ then it costs time $O(m)$ to check whether or not $A_i \subseteq E$ for some fixed index $i$. Thus for $\Sigma$ as in (5) it costs $O(nm)$ to get $S'$ from $S$ in the ``naive way'' suggested by definition (6). If we think of the premises $A_i$ as the rows of a $n \times m$ matrix $M$ with entries $0$ and $1$, then the naive way amounts to process $M$ row-wise. It isn't hard to see [W1, p.114] how a {\it column-wise} processing of $M$ also yields $S'$. The theoretic cost is the same, i.e. $O(mn) = O(nm)$, but in practise the column-wise way is the better the larger $n/m$. For instance, it takes more time to process a million sets of cardinality 100 (since they need to be ``fetched'' individually) than to process only 100 sets albeit each of cardinality a million. This trick, known as {\it vertical layout} in the Frequent Set Mining community (also observed in [W1]), often works when many but small sets need to be manipulated. In the Relational Database community the algorithm {\it LinClosure} [MR2] to calculate $c(\Sigma, S)$ has become the standard. Whether LinClosure or vertical layout or something else is best, depends on the shape of $\Sigma$ and a smart implementation of vertical layout.
{\bf Expansion 3.} Recall from Boolean logic (or other logic frameworks) that a formula $\psi$ is a ``consequence'' of a formula $\phi$ (written $\phi \vDash \psi$) if every ``structure'' that satisfies $\phi$ also satisfies $\psi$. This is the {\it semantic} level. It contrasts with the {\it syntactic} level where a formula $\psi$ is ``derivable'' from a formula $\phi$ (written $\phi \vdash \psi$) if $\psi$ can be obtained from $\phi$ with certain ``inference rules'' in a step-by-step manner. Two pages of details can be found in [W7, Expansion 3].
{\bf Expansion 4}. One algorithm for enumerating all closed sets, called NextClosure, was devised by B. Ganter in 1984 and became a cornerstone of FCA. Its key idea is to generate the closed sets in lexicographic order. See [GW, Thm.5], from which one also readily deduces the following:
(36) \quad Suppose the closure operator $c: {\cal P}[n] \rightarrow {\cal P}[n]$ is such that calculating $c(X)$ takes time at\\
\hspace*{.9cm} most $T$ for any $X \subseteq [n]$. Then NextClosure enumerates all $N = |{\cal F}_c|$ many closed sets in\\
\hspace*{.9cm} output polynomial time $O(NTn)$.
One benefit of NextClosure is that it doesn't matter in which way the closure operator $c$ is provided. Thus $c$ could be given as $c(U) = \bigcap \{S \in {\cal H}: \ S \supseteq U\}$ where ${\cal H}$ is a $\cap$-generating set of ${\cal F}$ (first way), or $c(U) = c(\Sigma, U)$ where $\Sigma$ is an implication base (second way), or any other way. In fact $c$ itself can be a certain selfmap of ${\cal P}(E)$ more general than a closure operator, see [GR].
As to the first way, apart from NextClosure and Dowling's algorithm (2.1.1), many other methods to construct ${\cal F}({\cal H})$ from ${\cal H}$ are evaluated in [KuO1]. As to the second way, it usually cannot compete with the compressed calculation of ${\cal F}(\Sigma)$ in Section 4.4. However, the issue (3.6.3) is often how to find an implication base $\Sigma$ of ${\cal F}$ in the first place. Another popular application of NextClosure is {\it attribute exploration} [GW, p.85]. This particular kind of Query Learning strives to compute the canonical base $\Sigma_{GD}$ of some hidden closure system ${\cal F}$. Unfortunately, as a not always welcome side product, the whole of ${\cal F}$ gets calculated one by one along the way. Impressive strides to avoid this succeed for the kind of ``modern'' attribute exploration proposed in [RDB] and [AN2].
{\bf Expansion 5} A non-independent set is {\it dependent}, and minimal dependent sets are {\it circuits}. This terminology [W3] is motivated by the established use of ``circuit'' for matroids (4.1.4) and convex geometries (4.1.5). Let now $K$ be a circuit of $c$. Since $K$ is dependent there is at least one $e \in K$ with $e \in c(K\backslash \{e\})$. The minimality of $K$ implies that $U : = K\backslash \{e\}$ is a {\it stem} with root $e$. Thus if
$$\mbox{roots}(K) : = \{e \in K: \ e \in c(K \backslash \{e\})\, \},$$
then $|\mbox{roots}(K) | \geq 1$ and each $e \in \, \mbox{roots}(K)$ induces a {\it root-stem-partition} $K = \{e\} \cup U$. Observe that an arbitrary root $e$ with stem $U$ need {\it not} yield a circuit $K = U \cup \{e\}$. For instance, let $c$ be the closure operator induced by the implications $\{1,2\} \rightarrow \{3\}$ and $\{3\} \rightarrow \{2\}$. Then $\{1,2\}$ is a stem for the root $3$ but $\{1,2,3\}$ is no circuit because it contains the proper dependent subset $\{2,3\}$.
{\bf Open Problem 2}: Develop a theory for those closure operators (e.g. their optimum bases), for which each root-stem-partition $U \cup \{e\}$ is a circuit.
Most prominently, matroids and convex geometries belong to this class of closure operators. In the first case each circuit $K$ has roots$(K) = K$, in the second case $|\mbox{roots}(K)| =1$.
{\bf Expansion 6} It is easy to see that neither a properly quasiclosed set $Q$ needs to contain a $\theta$-equivalent stem $X$, nor is a stem $X$ necessarily contained in a $\theta$-equivalent proper quasiclosed set. Nevertheless, those stems $X$ that {\it coincide} with a properly quasiclosed set can be characterized neatly. For starters, since each stem $X$ is independent and a proper subset of an independent set has a strictly smaller closure, we see that:
(37) \quad Each stem which is properly quasiclosed is in fact pseudoclosed.
This raises the problem to grasp the ``pcst-sets'' which by definition are pseudoclosed and a stem (i.e. belong to $\Sigma_{GD}$ {\it and} $\Sigma_{cd}$). If $P$ is pseudoclosed then one can decide whether $P$ is pcst as follows: For all $e \in c(P)\backslash P$ check whether $P$ is {\it minimal} with the property that $e \in c(P)$. No better description of the pcst-sets {\it within the family of all pseudoclosed sets} seems to be known. In contrast, the pcst-sets look neat {\it within the family of all stems}:
\begin{tabular}{|l|} \hline \\
{\bf Theorem 7:} For each stem $X$ of a closure operator $c: \ {\cal P}(E) \rightarrow {\cal P}(E)$ the \\
following properties are equivalent:\\
\\
(i) \ $X$ is pseudoclosed.\\
\\
(ii) $X$ is inclusion-minimal among all stems of $c$.\\
\\
(iii) $X$ is a {\it strong} stem in the sense that roots$(X) = c(X) \backslash X$.\\ \\ \hline \end{tabular}
{\it Proof of Theorem 7.}
As to (i) \ $\Leftrightarrow$ \ (ii), we show that $\neg$(i) \ $\Leftrightarrow \ \neg$(ii), i.e. that
$$X \varsubsetneqq X^\circ \Leftrightarrow Y \varsubsetneqq X \ \mbox{for some stem} \ Y.$$
As to ``$\mathbb{R}a$'', take $e \in X^\circ \backslash X$. By the definition of $X^\circ$ there is a $Y_o \varsubsetneqq X$ with $e \in c(Y_o) \varsubsetneqq c(X)$. We can shrink $Y_o$ to a stem $Y$ of $e$. As to ``$\Leftarrow$'', because $Y \varsubsetneqq X$ is a stem we can be sure that $c(Y) \backslash Y \neq \emptyset$. If $e \in c(Y) \backslash Y$ then $e \in c(Y) \varsubsetneqq c(X)$, where $\varsubsetneqq$ is due to the independence of $X$. Thus $e \in X^\circ \backslash X$.
As to (i) $\mathbb{R}a$ (iii), if $Y \varsubsetneqq X$ then again $c(Y) \varsubsetneqq c(X)$ since $X$ (being a stem) is independent. Hence $c(Y) \subseteq X^\bullet = X$. So for {\it each} $e \in c(X) \backslash X$ the set $X$ is minimal w.r.t. the property that its closure captures $e$. As to (iii) $\mathbb{R}a$ (ii), suppose $Y \varsubsetneqq X$ was a stem, say $Y \in \, \mbox{stems}(e)$. Necessarily $e \in c(X) \backslash X$ since $X$ is independent. But then $e \in \, \mbox{roots}(X)$ by assumption, and so $e \in c(Y)$ is impossible. This contradiction shows that $X$ is inclusion-minimal. \quad $\square$
Theorem 7 draws on [KN]. We changed ``prime stem'' in [KN] to ``strong stem'' in order to avoid confusion with the prime implicates in 3.4.3.
{\bf Expansion 7.} If $f$ is given as a CNF then the well-known {\it consensus method} [CH, 2.7] is applicable to generate all prime implicates of $f$. For instance let $f: \{0,1\}^6 \rightarrow \{0,1\}$ be the conjunction of the four clauses at level $L1$ in Table 2 below (where e.g. $\overline{3} \vee 5$ abbreviates $\overline{x}_3 \vee x_5$). The clauses $C_1= \overline{3} \vee 5$ and $C_2 = \overline{1} \vee 4 \vee \overline{5}$ are such that there is exactly {\it one} literal $x_i$ which appears in one clause and its negation in the other; namely $x_i = x_5$. In this situation we add (while keeping $C_1, C_2$) the {\it consensus} clause $\overline{1} \vee \overline{3} \vee 4$ which is thus obtained by dropping $5$ and $\overline{5}$ from the disjunction $C_1 \vee C_2$. All consensi obtained from level $L1$ are listed in level $L2$. One continues by building consensi between $L1$ and $L2$, and then between $L2$ and $L2$. All of these are listed in $L3$. The list $L1 \cup L2 \cup L3$ is long enough that some of its members get unveiled as redundant; such as $\overline{2} \vee \overline{3} \vee 4 \vee \overline{5}$ which is implied by $\overline{2} \vee \overline{3} \vee 4$. Level $L4$ contains the pruned list. Building consensi within $L4$ (more precisely between the first and second line of $L4$) yields $L5$. Pruning $L4 \cup L5$ yields $L6$.
$\begin{array}{ll}
L1, \ \mbox{start} : & \overline{3} \vee 5, \quad \overline{1} \vee 4 \vee \overline{5}, \quad 3 \vee \overline{6}, \quad 1 \vee \overline{2} \vee \overline{3}\\
\\
L2, \ \mbox{consensus}: & \overline{1} \vee \overline{3} \vee 4, \quad 5 \vee \overline{6}, \quad \overline{2} \vee \overline{3} \vee 4 \vee \overline{5}, \quad 1 \vee \overline{2} \vee \overline{6}\\
\\
L3, \ \mbox{consensus}: & \overline{2} \vee \overline{3} \vee 4, \quad \overline{1} \vee 4 \vee \overline{6}, \quad \overline{2} \vee 4 \vee \overline{5} \vee \overline{6}, \quad 1 \vee 4 \vee \overline{6}, \quad 2 \vee 4 \vee \overline{5} \vee \overline{6}, \\
& \overline{2} \vee \overline{3} \vee 4; \quad \overline{2} \vee \overline{3} \vee 4 \vee \overline{6}, \quad \overline{2} \vee \overline{3} \vee 4 \vee \overline{6}\\
\\
L4, \ \mbox{pruning}: & \overline{3} \vee 5, \quad \overline{1} \vee 4 \vee \overline{5}, \quad 3 \vee \overline{6}, \quad 1 \vee \overline{2} \vee \overline{3}, \quad \overline{1} \vee \overline{3} \vee 4, \quad 5 \vee \overline{6}, \\
& 1 \vee \overline{2} \vee \overline{6}, \quad \overline{2} \vee \overline{3} \vee 4, \quad \overline{1} \vee 4 \vee \overline{6}, \quad \overline{2} \vee 4 \vee \overline{5} \vee \overline{6}\\
\\
L5, \mbox{consensus} : & \overline{2} \vee \overline{3} \vee 4 \vee \overline{6}, \quad \overline{2} \vee \overline{4} \vee \overline{5} \vee \overline{6}, \quad \overline{2} \vee 4 \vee \overline{6}, \quad \overline{2} \vee \overline{3} \vee 4 \vee \overline{6}, \quad \overline{2} \vee 4 \vee \overline{6}\\
\\
L6, \mbox{pruning}: & \overline{3} \vee 5, \quad \overline{1} \vee 4 \vee \overline{5}, \quad 3 \vee \overline{6}, \quad 1 \vee \overline{2} \vee \overline{3}, \quad \overline{1} \vee \overline{3} \vee 4, \quad 5 \vee \overline{6}, \\
& 1 \vee \overline{2} \vee \overline{6}, \quad \overline{2} \vee \overline{3} \vee 4,\quad \overline{1} \vee 4 \vee \overline{6}, \quad \overline{2} \vee 4 \vee \overline{6}
\end{array}$
Table 2: The consensus algorithm (simple version)
Now $L6$ yields no new consensi. According to a famous 1959 theorem of Quine [Q] the members in $L6$ hence constitute {\it all} prime implicates of $f(x_1, \cdots, x_6)$. We mention that $L6$ matches $\Sigma_{cd}$ in (27). See [CH, chapter 6.5] for a consensus method working for all Boolean functions $f$, and running in polynomial incremental time in the case of Horn functions $f$. The consensus method can be viewed as a special case of an algorithm [AACFHS] that generates all maximal bicliques ($=$ complete bipartite subgraphs) of a graph $G$. If $G$ itself is bipartite, say with shores $E_1, E_2$ this problem amounts to generate all closed sets of a Galois connection (2.1.2).
{\bf Expansion 8}. We present a novel way for the direction $\Sigma \rightarrow M({\cal F})$.
Suppose that
(38) \quad $\Sigma := \{\{3\} \rightarrow \{5\}, \quad \{1, 5\} \rightarrow \{4\}, \quad \{6\} \rightarrow \{3\}, \quad \{2, 3\} \rightarrow \{1\}\}.$
Observe that $\Sigma$ is equivalent to $L1$ in Expansion 7 and whence to the family of implications in (27). Hence, if our method is correct, we will wind up with $M({\cal F})$ as in (26). As shown in
Section 4.4 by running the implication $n$-algorithm one can represent ${\cal F} := {\cal F}(\Sigma)$ as a disjoint union of eight $012$-rows, i.e. subcubes of ${\cal P}[6]$, as shown in Table 3. Let us argue that such a representation readily yields $M({\cal F})$ as a side product.
\begin{tabular}{c|c|c|c|c|c|c|}
& 1 & 2& 3& 4& 5& 6 \\ \hline
$r'_1=$ & 0 & 2& 0 &2 & 2& 0 \\ \hline
$r'_2 =$ & 1 &2 & 0 & 2 & 0 & 0 \\ \hline
$r'_3=$ & 1 & 2& 0 & 1& 1& 0 \\ \hline
$r'_4=$ & 0 & 0 & 1 & 2 & 1 & 1 \\ \hline
$r'_5 =$ & 1 & 0 & 1 & 1 & 1 &1 \\ \hline
$r'_6=$ & 1 & 1 & 1& 1 & 1& 2 \\ \hline
$r'_7=$ & 0 & 0 & 1 & 2& 1& 0 \\ \hline
$r'_8=$ & 1 & 0 & 1 & 1 & 1 &0 \\ \hline \end{tabular}
Table 3: Getting $M({\cal F})$ by column-wise processing a compressed representation of ${\cal F}$
By (21) it suffices to show how to get $\max ({\cal F}, e)$ for any particular $e \in E = [6]$. Say $e=4$. If $r'_i$ has its fourth component equal to 1 then $r'_i$ cannot contain a member of $\max({\cal F},4)$. This e.g. happens for $r'_3$. If the fourth component of $r'_i$ is 0 or 2 then at most the unique row-maximal set $\max (r'_i, 4) \in r'_i$ {\it may} belong to $\max ({\cal F}, 4)$. Hence the collection of all maximal row-maximal sets is $\max ({\cal F}, 4)$. Thus
$$\begin{array}{lll}
\max({\cal F}, 4) & =& \max \{\max (r'_1, 4), \max (r'_2, 4), \max (r'_4, 4), \max (r'_7, 4) \} \\
\\
& =& \max \{\{2, 5\}, \{1, 2\}, \{3, 5, 6\}, \{3, 5\}\}\\
\\
& = & \{\{2, 5\}, \{1, 2\}, \{3, 5, 6\} \}. \end{array}$$
Likewise the other collections $\max ({\cal F}, e)$ are obtained, and so we get (matching (26)) that
$$\begin{array}{lll}
M({\cal F}) & =& \max ({\cal F},1) \cup \cdots \cup \max ({\cal F}, 6)\\
\\
& =& \{245, 3456\} \cup \{13456\} \cup \{1245\} \cup \{25, 12, 356\} \cup \{124\} \cup \{12345\}. \end{array}$$
Let $\max ({\cal F}) = \{H_1, \cdots, H_s\}$ be the set of hyperplanes of $c$. Obviously the minimal keys of $E$ are exactly the minimal transverals of ${\cal H} = \{E \backslash H_1, \cdots, E \backslash H_s\}$, and so any good algorithm for mtr$({\cal H})$ yields them, provided the hyperplanes are known. In particular, the $H_i$'s can be gleaned from a table like Table 3 since $\max ({\cal F}) \subseteq M({\cal F})$.
{\bf Expansion 9.} Here we present another view of Table 3 in Expansion 8. But first we need to dualize some concepts from 3.4.1.
Thus a conjunction of literals is called a {\it term}. The model set of a term $T$, viewed as a Boolean function $T: \{0,1\}^n \rightarrow \{0,1\}^n$, is an interval in the lattice $\{0,1\}^n = {\cal P}[n]$. (It is also common, although less precise, to speak of ``subcubes'' instead of intervals.) For instance if $T$ is $x_1 \wedge \overline{x}_3 \wedge \overline{x}_5 \wedge \overline{x}_6$ then Mod$(T) = (1,2, 0, 2, 0,0)$. This $012$-{\it row} is a succinct notation for the interval $\{U\subseteq {\cal P}[6] : \{1\} \subseteq U \subseteq \{1,2,4\}\}$. A {\it disjunctive normal form} (DNF) is any disjunction of terms.
Now back to Table 3. The pure Horn function matching $\Sigma$ in (38) is
$$f(x_1, \cdots, x_6) = (\overline{x}_3 \vee x_5) \wedge (\overline{x}_1 \vee \overline{x}_5 \vee x_4) \wedge (\overline{x}_6 \vee x_3) \wedge (\overline{x}_2 \vee \overline{x}_3 \vee x_1).$$
We aim to convert this CNF into a DNF. Because Mod$(f) = {\cal F}(\Sigma)$ is represented as the union of the $012$-rows $r'_i$ in Table 3, and because $r'_i = \, \mbox{Mod}(T_i)$ for obvious terms $T_i$, one DNF for $f(x_1, \cdots, x_6)$ is
$$T_1 \vee \cdots \vee T_8 : = (\overline{x}_1 \wedge \overline{x}_3 \wedge \overline{x}_6) \vee (x_1 \wedge \overline{x}_3 \wedge \overline{x}_5 \wedge \overline{x}_6) \vee \cdots \vee (x_1 \wedge \overline{x}_2 \wedge x_3 \wedge x_4 \wedge x_5 \wedge \overline{x}_6).$$
The above DNF is {\it orthogonal} [CH, chapter 7] in the sense that Mod$(T_i) \cap \, \mbox{Mod}(T_j)=\emptyset$ for $i \neq j$. It would be interesting to know how to exploit the orthogonality of a DNF in a (dual) consensus method.
{\bf Expansion 10}. {\it Proof of (22)}. As to $\subseteq$, from $X \in \max ({\cal F}, e)$ follows that $X$ is maximal within ${\cal F}$ w.r.to $e \not\in X$. A fortiori $X$ is maximal within ${\cal H} \subseteq {\cal F}$ w.r.to $e \not\in X$, {\it provided} $X$ belongs to ${\cal H}$ at all. But this follows from (21) and $M({\cal F}) \subseteq {\cal H}$. As to $\supseteq$, let $X \in {\cal H}$ be maximal w.r.to $e \not\in X$. Then there is $Y \in {\cal F}$ which is maximal w.r.to $Y \supseteq X$ and $e \not\in Y$. Hence $Y \in \max ({\cal F}, e)$ by definition of the latter, and so $Y \in M({\cal F}) \subseteq {\cal H}$ by (21). By the maximality property of $X$, we have $X = Y \in \max ({\cal F}, e)$.
{\bf Expansion 11}. As to going from $\Sigma_{cd}$ (or in fact from any base) to a minimum base $\Sigma_0$, we illustrate the method of Shock [Sh], which first demands to replace, for each $A \rightarrow B$ in $\Sigma_{cd}$, the conclusion $B$ by $c(B)$ where $c$ is the closure operator induced by $\Sigma_{cd}$. For $\Sigma_{cd}$ in (27) we get an equivalent family of full implications
$$\Sigma^\ast_{cd} = \{13 \rightarrow 1345, \ 16 \rightarrow 16435, \ 23 \rightarrow 23145, \ 26 \rightarrow 261435, \ 15 \rightarrow 154, \ 6 \rightarrow 635,\ 3 \rightarrow 35 \}.$$
Recall from (8) that $(A \rightarrow B) \in \Sigma^\ast_{cd}$ is redundant iff $B$ is contained in the $(\Sigma^\ast_{cd} \backslash \{A \rightarrow B\})$-closure of $A$. Incidentally $\Sigma (A)$, as defined before (12), is $\{A \rightarrow B\}$ for all$(A \rightarrow B) \in \Sigma^\ast_{cd}$, and so the $(\Sigma^\ast_{cd} \backslash \{A \rightarrow B\})$-closure of $A$ is $A^\bullet$ by (12). Because of $1345 \subseteq 13^\bullet = 1354$ we can thus drop $13 \rightarrow 1345$ from $\Sigma^\ast_{cd}$. Further $16 \rightarrow 16435$ can be dropped because of $16435 \subseteq 16^\bullet = 16354$, and $26 \rightarrow 261435$ can be dropped because of $261435 \subseteq 26^\bullet = 263514$. The resulting base
$$\Sigma_0 = \{23 \rightarrow 23145, \ 15 \rightarrow 154, \ 6 \rightarrow 635, \ 3 \rightarrow 35\}$$
is nonredundant and whence minimum by Theorem 1(d). The kind of minimum base $\Sigma_0$ obtained by Shock can by Theorem 1 easily be ``blown up'' to $\Sigma_{GD}$.
{\bf Expansion 12}. In [W7, Expansion 13] it is shown how $\max ({\cal F}, e)$ relates to lattice theory, in particular to the relations $\uparrow, \downarrow, \updownarrow$ which originated in [D1] and are akin to the ones in [GW, p.31]. Coupled to each lattice ${\cal L}$ there is an importatn bipartite graph with shores $J({\cal L})$ and $M({\cal L})$.
{\bf Expansion 13}. In [W7, Expansion 14] we show the well known fact [CM] that the collection ${\cal C}$ of all closure systems ${\cal F} \subseteq {\cal P}(E)$ is itself a closure system, in fact (viewed as a lattice) it is meet-distributive. Furthermore the technical proof of property (39) in Expansion 20 features there.
{\bf Expansion 14.} For any closure operator $c : {\cal P}(E) \rightarrow {\cal P}(E)$ consider these properties:
\begin{tabbing}
123456\=\kill
$(T 0)$ \> $(\forall p, q \in E) \ \ (p \neq q \ \mathbb{R}a \ c(\{p\}) \neq c(\{q\}))$\\
\\
$(T \frac{1}{2})$ \> $(\forall p \in E) \ \ \ c(\{p\}) \backslash \{p\}$ \ is closed\\
\\
$(T 1)$ \> $(\forall p \in E) \ \ \ c(\{p\}) = \{p\}$\\
\end{tabbing}
The properties $(T 0)$ and $(T 1)$ are well known ``separation axioms'' from topology. For instance ${\cal F}$ in Figure 4(a) violates $(T0)$.
The notation $(T \frac{1}{2})$ stems from [W5] but the property was previously considered. All three axioms make sense for non-topological operators $c$. It is an exercise to verify $(T 1) \mathbb{R}a (T \frac{1}{2}) \mathbb{R}a (T 0)$; furthermore $c(\emptyset) = \emptyset$ when $(T \frac{1}{2})$ holds. In fact, as shown in [W5, Thm.8], $c$ is isomorphic to a standard operator $c_J$ as in (30) iff $c$ satisfies $(T \frac{1}{2})$. It is easy to ``boil down'' any closure operator $c$ on a set $E$ to an operator $\overline{c}$ of type $(T 0)$ on a smaller set $\overline{E}$, and $\overline{c}$ to $c_J$ of type $(T \frac{1}{2})$ on a still smaller set $J$, in such a way that the lattices ${\cal F}_c$ and ${\cal F}_J$ are isomorphic.
See [W5, p.165] or [GW, ch.1.1, 1.2] for details.
Albeit the lattices ${\cal F}_c$ and ${\cal F}_J$ are isomorphic, this may be of little help to get a good base of $c$ from one of $c_J$. For instance it takes some effort to find an optimum base for the closure system ${\cal F} ={\cal F}_c$ in Figure 4(a). In contrast ${\cal F}_J$ is a Boolean lattice and whence has the empty set as an optimum base! (See also Open Problem 4 in Expansion 15.)
{\bf Expansion 15}. Recall from (29) that $x \mapsto {\cal J}(x)$ is a lattice isomorphism from ${\cal L}$ onto ${\cal F}_J = \{{\cal J}(x) : x \in {\cal L}\}$ and that ${\cal J}(x\wedge y) = {\cal J}(x) \cap {\cal J}(y)$ but usually ${\cal J}(x \vee y) \varsupsetneqq {\cal J}(x) \cup {\cal J}(y)$. To see that ``$=$'' takes place in the distributive case, fix $p \in {\cal J}(x \vee y)$. Then $p \leq x \vee y$ and distributivity imply that $p = p\wedge (x \vee y) = (p \wedge x) \vee (p \wedge y)$. Since $p$ is join irreducible this forces $p = p \wedge x$ or $p =p \wedge y$, whence $p \leq x$ or $p \leq y$, whence $p \in {\cal J}(x) \cup {\cal J}(y)$. Hence ${\cal F}_J$ is a sublattice of $({\cal P}(J), \cap , \cup)$. Consequently the closure operator $c_J$ from (30) is topological, in fact $c_J(\{p_1, \cdots, p_t\}) = J(p_1) \cup \cdots \cup J(p_t)$.
Therefore ${\cal F}_J$ is the lattice ${\cal L}({\cal J}, \leq)$ of all order ideals of the poset $({\cal J}, \leq)$.
In particular, since ${\cal L} \simeq {\cal F}_J$ by (29), we have ${\cal L} \simeq {\cal L}(J, \leq)$. This is Birkhoff's Theorem, see [Bi, p.59].
As to implicational bases, for any lattices ${\cal L} \simeq {\cal F}_J$ it is natural to consider the set
of implications
$$\Sigma_J : = \{\{p\} \rightarrow \ell cov(p): \ p \in J^\ast \},$$
where $\ell cov(p)$ is the set of lower covers of $p$ within $(J, \leq)$ and $J^\ast$ is the set of all non-minimal members of $(J, \leq)$. It is clear that ${\cal F}(\Sigma_J)$ is the collection of all order ideals of $(J, \leq)$. Hence $\Sigma_J$ is a base of ${\cal L}$ iff ${\cal L}$ is distributive. Actually $\Sigma_J$ is the unique {\it optimum} base for each distributive lattice ${\cal L}$. That follows immediately from 3.2.2 (all circle formations are points here). Note that $\Sigma_J = \emptyset$ when ${\cal L} \simeq {\cal P}(J)$ is Boolean. For nondistributive lattices $\Sigma_J$ may constitute a relevant part of larger bases. Most prominently, according to 4.1.3 each optimum base of a modular lattice includes $\Sigma_J$. On the downside, $\Sigma_J$ needs {\it not} be part of every optimum base of a lattice. For instance the lattice ${\cal L}_0$ in Figure 11 has $\Sigma_J = \{\top \rightarrow 23, 2 \rightarrow 4\}$ whereas one optimum base of ${\cal L}_0$ is $\{\top \rightarrow 34, 2 \rightarrow 4, 34 \rightarrow 2\}$.
{\bf Open Problem 3}: Determine the class ${\cal K}$ lattices ${\cal L}$ (among which all modular ones) for which $\Sigma_J$ in Expansion 15 is part of every optimum base of ${\cal L}$.
\begin{center}
\includegraphics[scale=0.7]{JoyOfImplicFig11}
\end{center}
As seen above, for topological operators $c$ the lattice ${\cal F}_c$ is a sublattice of ${\cal P}(E)$, and whence distributive. However as seen in 3.1, ${\cal F}_c$ can be distributive without being a sublattice of ${\cal P}(E)$.
{\bf Open Problem 4:} Let $c : {\cal P}(E) \rightarrow {\cal P}(E)$ have a distributive lattice ${\cal F}_c$ which is {\it not} a sublattice of ${\cal P}(E)$. Can one find an optimum base of $c$ in polynomial time?
{\bf Expansion 16.} We start by proving (33) in 4.1.5. So let $(K, e)$ be critical, i.e. $c(K)\backslash \{e, x\}$ is closed for all $x \in K \backslash \{e\}$. In order to show that $S: = c(K) \backslash \{e\}$ is quasiclosed\footnote{Notice that when $S$ is quasiclosed then it is properly quasiclosed since $c(S) =c(K) \neq S$.} we take (in view of (9)) $U \subseteq S$ with $c(U) \neq c(S)$ and aim to show that $c(U) \subseteq S$. There must be an $x \in K\backslash \{e\}$ with $x \not\in U$ (otherwise $K \backslash \{e\} \subseteq U$ yields the contradiction $c(U) = c(K)$). But then $U \subseteq c(K) \backslash \{e, x\}$, and so by assumption $c(U) \subseteq c(K) \backslash \{e, x\} \subseteq S$.
Next, assuming $S = c(K) \backslash \{e\}$ is quasiclosed, we show that $K \backslash \{e\}$ is a closure-minimal stem of $e$ in the sense of Expansion 6. Suppose to the contrary there was a stem $U$ of $e$ with $c(U) \varsubsetneqq c(K \backslash \{e\}) = c(K)$. From $U \subseteq S$ and $c(U) \neq c(S)$ follows (since $S^\bullet = S$) that $c(U) \subseteq S$. This is impossible since $e \in c(U)$ (by the definition of stem).
Finally, letting $K \backslash \{e\}$ be closure-minimal, assume by way of contradiction that $Y : = c(K) \backslash \{e, x\}$ is not closed for some $x \in K \backslash \{e\}$. First, $c(K) \backslash \{x\} = Y\cup \{e\}$ is closed because $x \in ex(c(K)) = ex (K)$. Hence $c(Y) = Y \cup \{e\}$, and so there is a stem $U \subseteq Y$ of $e$. It satisfies $c(U) \subseteq Y \cup \{e\} \varsubsetneqq c(K) = c(K \backslash \{e\})$, and thus $K \backslash \{e\}$ is not closure-minimal. This proves (33). \quad $\square$
Yet another (equivalent) definition of ``critical'' is given in [W3, p.136]. Furthermore $(K, e)$ is called {\it extra-critical} in [W3] if the quasiclosed set $c(K) \backslash \{e\}$ in
(33) coincides with $(K \backslash \{e\})^\bullet$.
If $c: {\cal P}(E) \rightarrow {\cal P}(E)$ is a convex geometry, then the set system ${\cal A}_c : = \{E \setminus X: X \in {\cal F}_c\}$ is a so called {\it antimatroid}. One can define antimatroids independent of $c$ as union-closed set systems ${\cal A} \subseteq {\cal P}(E)$ which are hereditary in the sense that for each nonempty $A \in {\cal A}$ there is some $x \in A$ with $A \setminus \{x\} \in {\cal A}$. What we defined as a rooted circuit $(K, e)$ in 4.1.5 relates as follows to ${\cal A}_c$: Whenever $e \in A \in {\cal A}$ then $(K \setminus \{e\}) \cap A \neq \emptyset$; and $K$ is minimal with this property. In fact this is the {\it original} definition of a rooted circuit [KLS, p.28]. Apart from rooted circuits our definition of a critical circuit $(K, e)$ in 4.1.5 similarly matches the definition given in [KLS, p.31]. Each antimatroid ${\cal A}$ can (apart from the set system view) equivalently be rendered as a certain {\it hereditary language}. From this perspective the critical circuits provide an optimal representation of ${\cal A}$, see [KLS, Thm.3.11]. This contrasts with the fact that $\Sigma_{crci}$ usually is {\it no} implicational base (see 4.1.5). Antimatroids and convex geometries arise in many contexts, often related to combinatorial optimization, see [KLS, III.2].
A lattice ${\cal L}$ is {\it meet-distributive} if the interval $[x_\ast, x] \subseteq {\cal L}$ is Boolean for all $x \in {\cal L} \backslash \{\bot\}$. (Many equivalent characterizations exist.) Every convex geometry $c$ has a meet-distributive lattice ${\cal F}_c$. Conversely, if ${\cal L}$ is meet-distributive then $c_J$ is a convex geometry.
The dual concept of meet-distributivity is {\it join-distributivity}, i.e. when
$[x,x^\ast]$ is a Boolean interval for all $x \in {\cal L} \backslash \{\top\}$. A lattice which is both meet and join-distributive must be distributive, and conversely.
A lattice ${\cal L}$ is {\it join-semidistributive} $(SD_\vee)$ if for all $x, y, z \in {\cal L}$ it follows from $x \vee y = x \vee z$ that $x \vee z = x \vee (y \wedge z)$.
In such a lattice $|J({\cal L})| \leq |M({\cal L})|$. See also [W7, Expansion 13]. Notice that ``meet-distributive $\mathbb{R}a$ join-semidistributive''. In fact, the $SD_\vee$ lattices ${\cal L}$ of length $d({\cal L}) =
|J({\cal L})|$ are exactly the meet-distributive lattices. If ${\cal L}$ is $SD_\vee$ then by [AN1, prop.49] every essential set $X$ of $c_J$ has a {\it unique} quasi-closed generating set $Q$ (which equals $ex(X)$ in the meet distributive case). Conversely such a {\it unique-criticals} lattice need not be $SD_\vee$. See Figure 12. Further topics in [AN1] include the uniqueness of the $K$-basis (see 4.1.1) for $SD_\vee$ standard closure systems ${\cal L}$, and the fact that such ${\cal L}$ generally don't belong to the class ${\cal K}$ in Open Problem 3 of Expansion 15. Dually to $SD_\vee$ one defines {\it meet-semidistributivity} $(SD_\wedge)$. It comes as no surprise that ``join-distributive $\mathbb{R}a$
meet-semidistributive''. Results about bases of $SD_\wedge$-lattices are given in [JN], and exploited in [W4]. See also Expansion 18.
{\bf Expansion 17.} As a variation of Theorem 5, $\vee$-semilattices (in particular lattices ${\cal L}$) can also be described as systems of {\it restricted} order ideals of a poset. This generalizes the representation of distributive lattices, for which {\it all} order ideals are used (Expansion 15). The restriction imposed on the order ideals is governed by core$({\cal L}) : = \, \mbox{core}(c_J)$ where $c_J$ is as in (30) and core$(c)$ as in (14). We mention that in [D] core$({\cal L})$ is determined for many types of lattices ${\cal L}$. Notice that $|\Sigma_{GD}| \geqq |\mbox{core}({\cal L})|$ and that from core$({\cal L})$ alone one cannot obtain $\Sigma_{GD}$. See [W7, Expansion 18] for more details.
{\bf Expansion 18.} The $D$-relation, which is of importance in the study of free lattices, is defined as follows. For $p, q \in J({\cal L})$ put $pDq$ if $q$ appears in some order-minimal join cover $A$ of $p$. A {\it $D$-cycle} is a configuration of type $p_1Dp_2D \cdots p_n Dp_1$. For instance the convex geometry in 2.2.5 has the $D$-cycle $6D 8D6$ because 146 is a minimal join cover of 8 and 238 is a minimal join cover of 6.
Each $D$-cycle induces a cycle in $G(\Sigma)$ for each base $\Sigma$ of $c_J$, but not conversely. Hence closure operators without $D$-cycles are strictly more general than acyclic operators.
Indeed, the former have $SD_\vee$ closure systems by [FJN], the latter meet-distributive ones by Theorem 3 (see also Expansion 16). While the presence of $D$-cycles can be decided from $\Sigma_{GD}$ in polynomial time [AN1, Thm.43], this is unknown for checking $SD_\vee$.
Likewise the {\it affine} convex geometries (as 2.2.5 but in $\mathbb{R}^n$, not just $\mathbb{R}^2$) can be generalized, i.e. to convex geometries satisfying the so-called $n$-{\it Carousel Property}. This property was crucial in article [AW] that dealt with the realizability (in $\mathbb{R}^2$) of convex geometries. Implication bases of convex geometries with the $n$-Carousel Property can be optimized in polynomial time [A, Thm.12], but the arguments get ``uglier'' than the deliberations in 2.2.5. Notice that checking the $n$-Carousel property ($n$ fixed), as opposed to checking realizability, is ``straightforward'' albeit tedious. Furthermore, optimization of implication bases of order-convex\footnote{By definition the closed sets of an {\it order-convex} geometry are all intervals of some poset.} geometries is polynomial-time [A, sec.6].
\begin{center}
\includegraphics{JoyOfImplicFig12}
\end{center}
{\bf Expansion 19}. It is easy to replace a $012n$-row by a couple of disjoint $012$-rows. For instance $(n,n,n)$ is the same as $(0,2,2) \cup (1,0,2) \cup (1,1,0)$.
Sometimes $012$-rows are easier to handle, if only for pedagogical reasons as in Table 3 of Expansion 8. Conversely, a random collection of $012$-rows usually cannot be compressed to fewer $012n$-rows. As seen in 4.4 the $n$-algorithm produces its rows ``from scratch'' without an intermediate state of $012$-rows.
Further fine-tuning is possible. For instance, instead of replacing $r_8$ by $r_{11} \cup r_{12}$ in Table 1 we could have replaced it by the single row $(1, 2, b, 1, 1, a)$ where generally the wildcard $abb\cdots b$ signifies that either ${\bf 0}22\cdots 2$ or ${\bf 1}11\cdots 1$ must take place.
The author exploited this idea in the special case where all $(A\rightarrow B) \in \Sigma$ are of type $\{a\} \rightarrow B$ in the first place; this essentially amounts to enumerating all order ideals of a poset.
In a similar manner all anticliques ($=$ independent vertex sets) of a graph can be enumerated in a compact manner (work in progress).
{\bf Expansion 20.} Let us sketch how to (a) get a $ca$-minimum base of a Horn function $h$, and (b) how to merely calculate $ca(h)$.
As to (a), it relates to [W7, Expansion 14] where we showed that for any $\cap$-subsemilattice ${\cal S} \subseteq {\cal P}(E)$ the collection
(39) \quad ${\cal C}({\cal S}): = \{{\cal F} \subseteq {\cal P}(E) \ \mbox{closure system}\ | \ \ (\exists \ \mbox{set ideal} \ {\cal G} \subseteq {\cal P}(E))\ {\cal F} \cap {\cal G} = {\cal S} \}$
is a sublattice of the lattice ${\cal C}$ of all closure systems on $E$. Clearly $\bot = {\cal S} \cup \{E\}$ is the smallest element of ${\cal C}({\cal S})$. Let $f_\top$ be the pure Horn function matching the {\it largest} element $\top$ of ${\cal C}({\cal S})$. Albeit $\top$ as a subset of ${\cal P}(E)$ cannot be described as simply as $\bot$, it is shown in [HK, Lemma 4.2] that $f_\top$ must be the conjunction of all {\it pure} prime implicates of $h$. Once calculated (consensus method), this {\it pure Horn part} $f_\top$ of $h$ can be used as follows to minimize $h$. Compute all {\it negative} prime implicates ($=$ complications) $A_1, A_2, \cdots$ of $h$. Take them as the vertices of a graph $G(h)$ which has an arc from $A_i$ to $A_j$ iff $A_j$ is a consequence of $A_i \wedge f_\top$. Let $P_1, \cdots, P_s$ be the strong components of $G(h)$ that have in-degree $0$ when viewed as elements of the induced factor poset. Now let $\Gamma$ be any transversal of $\{P_1, \cdots, P_s\}$ and let $\Sigma_\top$ be any minimum base of $f_\top$. Then $\Sigma_\top \cup \Gamma$ is a minimum base of $h$ [HK,Theorem 6.2].
As to (b), up to duality in [CH, 6.7.3] one associates with an impure Horn function $h$ in $n$ variables a {\it pure} Horn function $h'$ in $n+1$ variables as follows. Take any base $\Sigma \cup \Gamma$ of $h$ and let $h'$ be the function induced by $\Sigma \cup \Sigma^\ast$ where $\Sigma^\ast := \{A \rightarrow \{x_{n+1} \}: A \in \Gamma \}$. According to [CH, Lemma 6.8, Thm.6.15] this is well-defined, i.e. independent of the chosen base $\Sigma \cup \Gamma$ of $h$. Furthermore $ca(h') = ca (h)$. The intricasies of proving $ca(h') =ca(h)$ are not mirrored on the algorithmic side: Switching from $\Sigma \cup \Gamma$ to $\Sigma \cup \Sigma^\ast$ is trivial, and minimizing $\Sigma \cup \Sigma^\ast$ to $\Sigma_0$ works in quadratic time (Expansion 11) and yields $ca(h) = ca(h') = |\Sigma_0|$.
\section*{Acknowledgement:} I am grateful for comments from Kira Adaricheva, Roni Khardon, Sergei Kuznetsov, Jos\'{e} Balc\'{a}zar, Ron Fagin, Gert Stumme, Sergei Obiedkov, Sebastian Rudolph, Hiroshi Hirai, Giorgio Ausiello, Bernard Monjardet.
\section*{References}
\begin{enumerate}
\item[{[A]}] K. Adaricheva, Optimum basis of finite convex geometry, to appear in Disc. Appl. Mathematics.
\item[{[AN1]}] K. Adaricheva, J.B. Nation, On implicational basis of closure systems with unique critical sets, Appl. Math. 162 (2014) 51-69.
\item[{[AN2]}] K. Adarichva, J.B. Nation, Discovery of the $D$-basis in binary tables based on hypergraph dualization, arXiv:1504.02875v2.
\item[{[ANR]}] K. Adaricheva, J.B. Nation, R. Rand, Ordered direct implicational basis of a finite closure system, Discrete Appl. Math. 161 (2013) 707-723.
\item[{[AACFHS]}] G. Alexe, S. Alexe, Y. Crama, S. Foldes, P.L. Hammer, B. Simeone, Consensus algorithms for the generation of all maximal bicliques, Disc. Appl. Math. 145 (2004) 11-21.
\item [{[AB]}] M. Arias, J.L. Balcazar, Canonical Horn representations and Query Learning, Lecture Notes in Computer Science 5809 (2009) 156-170.
\item[{[ADS]}] G. Ausiello, A. D'Atri, D. Sacca, Minimal representation of directed hypergraphs, SIAM J. Comput. 15 (1986) 418-431.
\item[{[AW]}] K. Adaricheva, M. Wild, Realization of abstract convex geometries by point configurations, Europ. J. Comb. 31 (2010) 379-400.
\item[{[B]}] J. L. Balc\'{a}zar, Redundancy, deduction schemes and minimum-size bases for association rules, Logical Methods in Computer Science 6 (2010) 1 - 33.
\item[{[Bi]}] G. Birkhoff, Lattice Theory, AMS 1984.
\item[{[BCKK]}] E. Boros, O. Cepek, A. Kogan, P. Kucera, A subclass of Horn CNFs optimally compressible in polynomial time, Annals Math. Artif. Intelligence (2009) 249-291.
\item[{[BDVG]}] K. Bertet, C. Demko, J.F. Viaud, C. Gu\'{e}rin, Lattices, closure systems and implication bases: a survey of structural aspects and algorithms, arXiv.
\item[{[BG]}] E. Boros, A. Gruber, Hardness results for approximate pure Horn CNF Formulae minimization, Ann. Math. Artif. Intell. 71 (2014) 327-363.
\item[{[BK]}] M.A. Babin, S.O. Kuznetsov, Computing premises of a minimal cover of functional dependencies is intractable, Disc. Applied Math. 161 (2013) 742-749.
\item[{[BM]}] K. Bertet, B. Monjardet, The multiple facets of the canonical direct unit implicational basis, Theoretical Computer Science 411 (2010) 2155-2166.
\item[{[BMN]}] L. Beaudou, A. Mary, L. Nourine, Algorithms for $k$-meet semidistributive lattices, arXiv.
\item[{[Bu]}] L.M. Butler, Subgroup lattices and symmetric functions, Memoirs AMS 539 (1994).
\item[{[C]}] N. Caspard, A characterization theorem for the canonical basis of a closure operator, Order 16 (1999) 227-230.
\item[{[CH]}] Y. Crama, P.L. Hammer, Boolean Functions, Encyc. of Math. and Appl. 142, Cambridge Univ. Press 2011.
\item[{[CM]}] N. Caspard, B. Monjardet, The lattices of closure systems, closure operators, and implicational systems on a finite set: a survey. Discrete Applied Mathematics 127 (2003) 241-269.
\item[{[D]}] V. Duquenne, The core of finite lattices, Discrete Mathematics 88 (1991) 133-147.
\item[{[D1]}] A. Day, Characterization of finite lattices that are bounded-homomorphic images or sublattices of free lattices, Can. J. Math. 31 (1979) 69-78.
\item[{[D2]}] A. Day, The lattice theory of functional dependencies and normal decompositions, International Journal of Algebra and Computation 2 (1992) 409-431.
\item[{[DHO]}] P.O. Degens, H.J. Hermes, O. Opitz (eds), Die Klassifikation und ihr Umfeld, Indeks Verlag, Frankfurt 1986.
\item[{[DS]}] F. Distel, B. Sertkaya, On the complexity of enumerating pseudo-intents, Disc. Appl. Math. 159 (2011) 450-466.
\item[{[EMG]}] T. Eiter, K. Makino, G. Gottlob, Computational aspects of monotone dualization: A brief survey, Discrete Appl. Math. 156 (2008) 2035-2049.
\item[{[F]}] R. Fagin, Functional dependencies in a relational data-base and propositional logic, IBM. J. Res. Develop. 21 (1977) 534-544. (Cited in [W7].)
\item[{[FV]}] R. Fagin, M.Y. Vardi, The theory of database dependencies - a survey. Mathematics of Information Processing, Proceedings of Symposia in Applied Mathematics 34 (1986) 19-71. (Cited in [W7].)
\item[{[FD]}] J.C. Falmagne, J.P. Doignon, Learning Spaces, Springer-Verlag Berlin Heidelberg 2011.
\item[{[FJN]}] R. Freese, J. Jezek, J.B. Nation, Free lattices, Math. Surveys and Monographs 42, Amer. Math. Soc. 1995.
\item[{[G]}] G. Gr\"{a}tzer, Lattice Theory: Foundation, Birkh\"{a}user 2011.
\item[{[GD]}] J.L. Guigues, V. Duquenne, Familles minimales d'implications informatives r\'{e}sultant d'une table de donn\'{e}es binaires, Math. Sci. Hum. 95 (1986) 5-18.
\item[{[GW]}] B. Ganter, R. Wille, Formal Concept Analysis, Springer 1999.
\item[{[GR]}] B. Ganter, K. Reuter, Finding all closed sets: A general approach, Order 8 (1991) 283-290.
\item[{[HK]}] P.L. Hammer, A. Kogan, Quasi-acyclic propositional Horn knowledge bases: Optimal compression, IEEE Trans. on knowledge and data engineering 7 (1995) 751-762.
\item[{[JN]}] P. Jansen, L. Nourine, Minimum implicational bases for $\wedge$-semidistributive lattices, Inf. Proc. Letters 99 (2006) 199-202.
\item[{[KKS]}] H. Kautz, M. Kearns, B. Selman, Horn approximations of empirical data, Artificial Intelligence 74 (1995) 129-145.
\item[{[K]}] R. Khardon, Translating between Horn Representations and their characteristic models, Journal of Artificial Intelligence Research 3 (1995) 349-372.
\item[{[KLS]}] B. Korte, L. Lova\'{a}sz, R. Schrader, Greedoids, Springer-Verlag 1991.
\item[{[KN]}] K. Kashiwabara, M. Nakamura, The prime stems of rooted circuits of closure spaces, The electronic journal of combinatorics 20 (2013), Paper 22, 13 pages.
\item[{[KR]}] R. Khardon, D. Roth, Reasoning with models, Artificial Intelligence 87 (1996) 187-213.
\item[{[KuO1]}] S.O. Kuznetsov, S. Obiedkov, Comparing performance of algorithms for generating concept lattices, J. Expt. Theor. Art. Intelligence 14 (2002) 189-216.
\item[{[KuO2]}] S.O. Kuznetsov, S.A. Obiedkov, Some Decision and Counting Problems of the Duquenne-Guigues Basis of Implications. Discrete Applied Mathematics 156 (2008) 1994-2003.
\item[{[M]}] D. Maier, The Theory of Relational Databases, Computer Science Press 1983.
\item[{[Ma]}] D. Marker, Model Theory: An Introduction, Springer Verlag 2002. (Cited in [W7].)
\item[{[MR1]}] H. Mannila, K-J. R\"{a}ih\"{a}, Design by example: An application of Armstrong Relations, Journal of Computer and System Sciences 33 (1986) 126-141.
\item[{[MR2]}] H. Mannila, K.J. R\"{a}ih\"{a}, The design of relational databases, Addison-Wesley 1992.
\item[{[MU]}] K.Murakami, T. Uno, Efficient algorithms for dualizing large scale hypergraphs, Disc. Appl. Math. 170 (2014) 83-94.
\item[{[N]}] J.B. Nation, An approach to lattice varieties of finite height, Algebra Universalis 27 (1990) 521-543.
\item[{[P]}] J. Paredaens, About functional dependencies in a database structure and their coverings, Philips MBLE Lab. Report 342, Brussels 1977.
\item[{[PKID1]}] J. Poelmans, S.O. Kuznetsov, D.I. Ignatov, G. Dedene, Formal Concept Analysis in Knowledge Processing: A survey on models and techniques.
\item[{[PKID2]}] J. Poelmans, S.O. Kuznetsov, D.I. Ignatov, G. Dedene, Formal Concept Analysis in Knowledge Processing: A survey on applications, Expert Systems with Applications 40 (2013) 6538-6560.
\item[{[Q]}] WV. Quine, On cores and prime implicants of truth functions, Amer. Math. Monthly 66 (1959) 755-760.
\item[{[R]}] S. Rudolph, Succinctness and tractability of closure operator representations, arXiv.
\item[{[RCEM]}] E. Rodriguez-Lorenzo, P. Cordero, M. Enciso, A. Mora, A logical approach for direct-optimal basis of implications, Bull. Eur. Assoc. Theor. Comp. Sci. 116 (2015) 204-211.
\item[{[RDB]}] U. Ryssel, F. Distel, D. Borchmann, Fast algorithms for implication bases and attribute exploration using proper premises, Ann Math Artif Intell 70 (2014) 25-53.
\item[{[RN]}] S. Russell, P. Norvig, Artificial Intelligence: A modern approach, Prentice Hall 2003.
\item[{[S]}] A. Schrijver, Combinatorial Optimization (three volumes), Springer 2003.
\item[{[Sh]}] R.C. Shock, Computing the minimum cover of functional dependencies, Inf. Proc. Letters 22 (1986) 157-159.
\item[{[SW]}] L. Santocanale, F. Wehrung, Lattices of regular closed subsets of closure spaces, Internat. J. Algebra Comput. 24 (2014) 969-1030.
\item[{[W1]}] M. Wild, Computations with finite closure systems and implications, Lecture Notes in Computer Science 959 (1995) 111-120. (An extended version, available as pdf, is the Tech. Hochschule Darmstadt Preprint Nr. 1708 from 1994.)
\item[{[W2]}] M. Wild, Optimal implicational bases for finite modular lattices, Quaestiones Mathematicae 23 (2000) 153-161.
\item[{[W3]}] M. Wild, A theory of finite closure spaces based on implications, Advances in Mathematics 108 (1994) 118-139.
\item[{[W4]}] M. Wild, Compressed representation of Learning Spaces. To appear in the Journal of Mathematical Psychology.
\item[{[W5]}] M. Wild, Implicational bases for finite closure systems, {\it Arbeitstagung, Begriffsanalyse und K\"{u}nstliche Intelligenz}, Informatik-Bericht 89/3 (1989), pp.147-169, Institut f\"{u}r Informatik, Clausthal. (The article is downloadable from the ResearchGate.)
\item[{[W6]}] M. Wild, Compactly generating all satisfying truth assignments of a Horn formula, Journal on Satisfiability, Boolean Modeling and Computation 8 (2012) 63-82.
\item[{[W7]}] M. Wild, The joy of implications, aka pure Horn formulas: mainly a survey. This is a preliminary version (arXiv: 1411.6432v2) of the present article. It features the full versions of Expansions 1, 3, 12, 13, 17.
\item[{[Wi]}] R. Wille, Subdirect decomposition of concept lattices, Algebra Universalis 17 (1983) 275-287. (Cited in [W7].)
\end{enumerate}
\end{document} |
\begin{document}
\title{Generating permutations with a given major index}
\large
\begin{abstract}
In [S. Effler, F. Ruskey, A CAT algorithm for listing permutations with
a given number of inversions, {\it I.P.L.}, 86/2 (2003)]
the authors give an algorithm, which appears to be CAT, for
generating permutations with a given major index.
In the present paper we give a new algorithm for generating
a Gray code for subexcedant sequences. We show that this
algorithm is CAT and
derive it into a CAT generating algorithm for
a Gray code for permutations
with a given major index.
\end{abstract}
\section{Introduction}
We present the first guaranteed constant average time generating algorithm for
permutations with a fixed index.
First we give a co-lex order generating algorithm
for bounded compositions. Changing its generating order and
specializing it for particular classes of compositions
we derive a generating algorithms for
a Gray code for fixed weight subexcedant sequences;
and after some improvements we obtain an efficient
version of this last algorithm.
The generated Gray code has the remarkable property that
two consecutive sequences differ in at most
three adjacent positions and by a bounded amount in these positions.
Finally applying a bijection introduced in \cite{Vaj_11} between subexcedant sequences and
permutations with a given index we derive the desired algorithm,
where consecutive generated permutations differ by at most three
transpositions.
Often, Gray code generating algorithms can be re-expressed simpler
as algorithms with the same time complexity and generating the same class of objects,
but in different ({\em e.g.} lexicographical) order.
This is not the case in our construction: the {\em Grayness} of the
generated subexcedant sequences is critical in the construction of
the efficient algorithm generating permutations with a fixed
index.
A {\em statistic} on the set $\frak S_n$ of length $n$ permutations
is an association of an element of $\mathbb{N}$
to each permutation in $\frak S_n$.
For $\pi\in\frak S_n$ the {\em major index}, ${\scriptstyle \mathsf{MAJ}}$, is a statistic
defined by (see, for example, \cite[Section 10.6]{Lothaire_83})
$$
\displaystyle {\scriptstyle \mathsf{MAJ}}\, \pi = \mathop{\sum_{1\leq i <n}}_{\pi_i>\pi_{i+1}} i.
$$
\begin{De}
For two integers $n$ and $k$, an {\it $n$-composition of $k$}
is an $n$-sequence $\bsb{c}=c_1c_2\ldots c_n$ of non-negative integers
with $\sum_{i=1}^n c_i=k$.
For an $n$-sequence $\bsb{b}=b_1b_2\ldots b_n$, $\bsb{c}$ is said
{\it $\bsb{b}$-bounded if $0\leq c_i\leq b_i$}, for all $i$, $1\leq i\leq n$.
\end{De}
In this context $b_1b_2\ldots b_n$ is called {\it bounding sequence}
and we will consider only bounding sequences
with either $b_i>0$ or $b_i=b_{i-1}=\ldots =b_1=0$
for all $i$, $1\leq i\leq n$. Clearly, $b_i=0$ is equivalent
to fix $c_i=0$.
We denote by $C(k,n)$ the set of all $n$-compositions of $k$,
and by $C^{\bsb{b}}(k,n)$ the set of $\bsb{b}$-bounded $n$-compositions of $k$;
and if $b_i\geq k$ for all $i$, then $C^{\bsb{b}}(k,n)=C(k,n)$.
\begin{De}
A {\it subexcedant sequence} $\bsb{c}=c_1c_2\ldots c_n$ is an $n$-sequence with
$0\leq c_i\leq i-1$, for all $i$; and $\sum_{i=1}^nc_i$ is called
the {\it weight} of $\bsb{c}$.
\end{De}
We denote by $S(k,n)$ the set
of length $n$ and weight $k$ subexcedant sequences,
and clearly $S(k,n)=C^{\bsb{b}}(k,n)$ with $\bsb{b}=0\,1\,2\,\ldots \,(n-1)$.
\section{Generating fixed weight subexcedant sequences}
We give three generating algorithms, and the third one generates
efficiently combinatorial objects in bijection with
permutations having fixed index :
\begin{itemize}
\item {\tt Gen\_Colex} generates the set $C^{\bsb{b}}(k,n)$
of bounded compositions in co-lex order (defined later).
\item {\tt Gen1\_Gray} which is obtained from {\tt Gen\_Colex} by:
\begin{itemize}
\item changing its generating order, and
\item restricting it to the bounding sequence $\bsb{b}=01\ldots (n-1)$.
\end{itemize}
It produces a Gray code for the set $S(k,n)$,
and it can be seen as the definition of this Gray code.
\item {\tt Gen2\_Gray} is a an efficient version of {\tt Gen1\_Gray}.
\end{itemize}
Finally, in Section~\ref{gen_perms}, regarding the subexcedant
sequences in $S(k,n)$ as McMahon permutation codes
(defined in Section~\ref{sec_Mc_code}), a constant average time
generating algorithm for a Gray code for the set of permutations of length $n$
with the major index equals $k$ is obtained.
\subsection{Algorithm {\tt Gen\_Colex}}
This algorithm generates
$C^{\bsb{b}}(k,n)$ in {\it co-lex order}, which is defined as:
$c_1c_2\ldots c_n$ precedes $d_1d_2\ldots d_n$ in co-lex order
if $c_nc_{n-1}\ldots c_1$ precedes $d_nd_{n-1}\ldots d_1$ in lexicographical
order. Its worst case time complexity is $O(k)$ per composition.
For a set of bounded compositions $C^{\bsb{b}}(k,n)$,
an {\it increasable position} (with respect to $C^{\bsb{b}}(k,n)$) in
a sequence $c_1c_2\ldots c_n\notin C^{\bsb{b}}(k,n)$
is an index $i$ such that:
\begin{itemize}
\item $c_1=c_2=\ldots c_{i-1}=0$, and
\item there is a composition $d_1d_2\ldots d_n\in C^{\bsb{b}}(k,n)$
with $c_i<d_i$ and $c_{i+1}=d_{i+1}$, $c_{i+2}=d_{i+2}$, \dots,
$c_n=d_n$.
\end{itemize}
For example, for $C^{01233}(3,5)$ the increasable positions
are underlined in the following sequences: $0\,0\,\underline{0}\,\underline{1}\,0$
and $0\,\underline{0}\,2\,0\,0$.
Indeed, the first two positions in $0\,0\,0\,1\,0$ are not
increasable since there
is no composition in $C^{01233}(3,5)$ with the suffix $0\,1\,0$;
and the third position in $0\,0\,2\,0\,0$ is not increasable
because $2$ is the maximal value
in this position.
Clearly, if $\ell<r$ are two increasable positions in $\bsb{c}$,
then each $i$, $\ell<i<r$, is still an increasable position in $\bsb{c}$
(unless $b_i=0$).
Here is the sketch of the co-lex order
generating procedure for $C^{\bsb{b}}(k,n)$:
\begin{itemize}
\item[$\bullet$] initialize $\bsb{c}$ by the length $n$ sequence $0\,0\,\ldots\, 0$;
\item[$\bullet$] for each increasable position $i$ in $\bsb{c}$, increase
$c_i$ by one and call recursively the generating
procedure if the obtained sequence $\bsb{c}$ is not a composition in $C^{\bsb{b}}(k,n)$,
and output it elsewhere.
\end{itemize}
The complexity of the obtained algorithm is $O(k)$ per generated composition
and so inefficient. Indeed, too many nodes in the generating tree induced by
this algorithm have degree one. Algorithm {\tt Gen\_Colex} in Figure
\ref{algo_colex} avoids some of these nodes.
We will identify a node in a generating tree by the corresponding
value of the sequence $\bsb{c}$; and
a {\it redundant node} in a generating tree
induced by the previous sketched algorithm is a node with
a unique successor and which differs in the same position
from its ancestor and its successor.
For example, in Figure \ref{two_tree} (a) redundant nodes are:
$0\,0\,0\,1$, $0\,0\,0\,2$, $0\,0\,1\,3$, $0\,0\,2\,3$ and $0\,1\,3\,3$.
These nodes occur when, for a given suffix, the
smallest value allowed in an increasable position in the
current sequence $\bsb{c}$ is not $1$, and this position is
necessarily $\ell$, the leftmost increasable one. Algorithm {\tt Gen\_Colex}
avoids redundant nodes by setting $c_{\ell}$ to its minimal value $e=k-\sum_{j=1}^{\ell-1}b_j$
(and $\sum_{j=1}^{i}b_j$ can be computed for each $i$, $1\leq i\leq n$,
in a pre-processing step).
For example, in Figure \ref{two_tree} (b) there are no redundant nodes.
However, in the generating tree induced by {\tt Gen\_Colex}
there still remain arbitrary length sequences of successive nodes
with a unique successor; they are avoided in procedure {\tt Gen2\_Gray}.
Algorithm {\tt Gen\_Colex} is given in Figure \ref{algo_colex}
where $\ell$ is the leftmost increasable position in the
current sequence $\bsb{c}$, and $r$ the leftmost non-zero position in $\bsb{c}$,
and thus the rightmost increasable
position in $\bsb{c}$ is $r$ if $c_r<b_r$ and $r-1$ elsewhere ($b_1b_2\ldots b_n$ being the
bounding sequence).
The main call is {\tt Gen\_Colex($k$,$n$)}
and initially $\bsb{c}$ is $0\,0\,\ldots\, 0$.
(As previously, in this algorithm the function
$k\mapsto \min\{s\,|\,\sum_{j=1}^s b_j\geq k\}$
can be computed and stored in an array, in a pre-processing step.)
The induced generating tree for the call {\tt Gen\_Colex($4$,$5$)} is given
in Figure \ref{fig_2_trees} (a).
\begin{figure}
\caption{
\label{two_tree}
\label{two_tree}
\end{figure}
\begin{figure}
\caption{\label{algo_colex}
\label{algo_colex}
\end{figure}
\begin{figure}
\caption{\label{fig_2_trees}
\label{fig_2_trees}
\end{figure}
\subsection{Algorithm {\tt Gen1\_Gray}}
This algorithm is defined in Figure \ref{algos_Gen1_Gray}
and is derived from {\tt Gen\_Colex}: the order of recursive calls
is changed according to a direction (parameter $dir$), and it is specialized for
bounding sequences $\bsb{b}=0\,1\,2\,\ldots\, (n-1)$, and so
it produces subexcedant sequences.
It has the same time complexity as {\tt Gen\_Colex} and
we will show that it produces a Gray code.
The call of {\tt Gen1\_Gray} with $dir=0$ produces, in order,
a recursive call with $dir=0$, then $r-\ell$ calls in the {\tt for} statement
with $dir$ equals successively:
\begin{itemize}
\item $0,1,\ldots 0,1$, if $r-\ell$ is even, and
\item $1,0,\ldots 1,0,1$, if $r-\ell$ is odd.
\end{itemize}
In any case, the value of $dir$ corresponding to the last call is $1$.
The call of {\tt Gen1\_Gray} with $dir=1$ produces the same
operations as previously but in reverse order,
and in each recursive call the value of $dir$ is replaced by $1-dir$.
Thus, the call of {\tt Gen1\_Gray} with $dir=1$ produces,
in order, $r-\ell$ calls in the {\tt for} statement
with $dir$ equals alternatively $0,1,0,\ldots$, then a last call with $dir=1$.
See Figure \ref{fig_2_trees} (b) for an example of generating tree
induced by this procedure.
Let $\mathcal{S}(k,n)$ be the {\it ordered list}
for $S(k,n)$ generated by the call {\tt Gen1\_Gray($k$,$n$,$0$)}, and
it is easy to see that $\mathcal{S}(k,n)$ is suffix partitioned,
that is, sequences with the same suffix are contiguous;
and Theorem \ref{main_th} shows that $\mathcal{S}(k,n)$ is a Gray code.
For a sequence $\bsb{c}$, a $k\geq 1$ and $dir\in \{0,1\}$
we denote by $\mathrm{first}(k;dir;\bsb{c})$ and $\mathrm{last}(k;dir;\bsb{c})$,
the first and last subexcedant sequence produced by the
call of {\tt Gen1\_Gray$(k,r,dir)$} if the current
sequence is $\bsb{c}$, and $r$ the position of the leftmost non-zero value
in $\bsb{c}$.
In particular, if $\bsb{c}=0\,0\,\ldots\,0$, then
$\mathrm{first}(k;0;\bsb{c})$ is the first sequence in $\mathcal{S}(k,n)$,
and $\mathrm{last}(k;0;\bsb{c})$ the last one.
\begin{Rem}$ $
\label{rem_2_points}
\begin{enumerate}
\label{rev_01_rem_reverse}
\item For a sequence $\bsb{c}$, the list produced by the call
{\tt Gen1\_Gray$(k,r,0)$} is the reverse of the list
produced by the call {\tt Gen1\_Gray$(k,r,1)$}, and with the
previous notations we have
\begin{eqnarray*}
\mathrm{last}(k;dir;\bsb{c})=\mathrm{first}(k;1-dir;\bsb{c}),
\end{eqnarray*}
for $dir\in\{0,1\}$.
\item Since the bounding sequence is $\bsb{b}=0\,1\,\ldots\, (n-1)$
it follows that, for $\bsb{c}=0\,0\,\ldots\, 0\,c_ic_{i+1}\ldots c_n$,
$c_i\neq 0$, $\mathrm{first}(k;0;\bsb{c})$ is
\begin{itemize}
\item $a_1a_2\ldots a_{i-1}c_ic_{i+1}\ldots c_n$ if
$k\leq\sum_{j=1}^{i-1}(j-1)=\frac{(i-1)\cdot(i-2)}{2}$,
where $a_1a_2\ldots a_{i-1}$ is the smallest sequence, in co-lex order, in
$S(k,i-1)$,
\item $a_1a_2\ldots a_ic_{i+1}\ldots c_n$ if $k>\frac{(i-1)\cdot(i-2)}{2}$,
where $a_1a_2\ldots a_i$ is the smallest sequence, in co-lex order, in
$S(k+c_i,i)$.
\end{itemize}
\end{enumerate}
\end{Rem}
\begin{figure}
\caption{\label{algos_Gen1_Gray}
\label{algos_Gen1_Gray}
\end{figure}
Now we introduce the notion of close sequences.
Roughly speaking, two sequences are close if they differ in at most
three adjacent positions and by a bounded amount in these positions.
Definition \ref{3_tuple} below defines formally this notion, and
Theorem \ref{main_th} shows that consecutive subexcedant
sequences generated by {\tt Gen1\_Gray}
are close.
Let $\bsb{s}=s_1s_2\ldots s_n$ and $\bsb{t}=t_1t_2\ldots t_n$ be two subexcedant sequences
of same weight which differ in at most three adjacent positions, and let $p$
be the rightmost of them (notice that necessarily $p\geq 3$).
The {\it difference} between $\bsb{s}$ and $\bsb{t}$
is the $3$-tuple
$$
(a_1,a_2,a_3)=(s_{p-2}-t_{p-2},s_{p-1}-t_{p-1},s_p-t_p).
$$
Since $\bsb{s}$ and $\bsb{t}$ have same weight
it follows that $a_1+a_2+a_3=0$; and we denote by $-(a_1,a_2,a_3)$
the tuple $(-a_1,-a_2,-a_3)$.
\begin{De}
\label{3_tuple}
Two sequences $\bsb{s}$ and $\bsb{t}$ in $S(k,n)$ are
{\it close} if:
\begin{itemize}
\item $\bsb{s}$ and $\bsb{t}$ differ in at most three adjacent positions, and
\item if $(a_1,a_2,a_3)$ is the difference between
$\bsb{s}$ and $\bsb{t}$, then
$$
(a_1,a_2,a_3)\in \{\pm(0,1,-1),\pm(0,2,-2),\pm(1,-2,1),\pm(1,-3,2),\pm(1,1,-2),\pm(1,0,-1)\}.
$$
\end{itemize}
\end{De}
Even if the second point of this definition sound somewhat
arbitrary, it turns out that consecutive sequences generated by
algorithm {\tt Gen1\_Gray} are close under this definition,
and our generating algorithm for permutations with a given
index in Section \ref{gen_perms} is based on it.
\begin{Exa}
The following sequences are close:
$0\underline{12}01$ and $0\underline{03}01$;
$010\underline{03}$ and $010\underline{21}$;
$0\underline{020}1$ and $0\underline{101}1$;
$01\underline{132}$ and $01\underline{204}$;
the positions where the sequences differ are underlined.
Whereas the following sequences are not close:
$0\underline{0211}$ and $0\underline{1030}$ (they differ in more than
three positions);
$01\underline{201}$ and $01\underline{030}$ (the difference $3$-tuple
is not a specified one).
\end{Exa}
\begin{Rem}
\label{rem_inter}
If $\bsb{s}$ and $\bsb{t}$ are two close
subexcedant sequences in $S(k,n)$, then there are at most two `intermediate'
subexcedant sequences $\bsb{s'}$, $\bsb{s''}$ in $S(k,n)$
such that the differences
between $\bsb{s}$ and $\bsb{s'}$,
between $\bsb{s'}$ and $\bsb{s''}$, and
$\bsb{s''}$ and $\bsb{t}$ are
$\pm(1,-1,0)$.
\end{Rem}
\begin{Exa}
\label{un_example}
Let $\bsb{s}=0\,1\,0\,1\,1\,1$ and $\bsb{t}=0\,0\,2\,0\,1\,1$
be two sequences in $S(4,6)$. Then $\bsb{s}$ and $\bsb{t}$ are close since they
difference is $(1,-2,1)$, and there is one `intermediate' sequence
$\bsb{s'}=0\,0\,1\,1\,1\,1$ in $S(4,6)$ with
\begin{itemize}
\item the difference between $\bsb{s}$ and $\bsb{s'}$ is $(1,-1,0)$,
\item the difference between $\bsb{s'}$ and $\bsb{t}$ is $(-1,1,0)$.
\end{itemize}
\end{Exa}
A consequence of Remark \ref{rev_01_rem_reverse}.2 is:
\begin{Rem}$ $
\label{heredit}
If $\bsb{s}$ and $\bsb{t}$ are close subexcedant sequences
and $m$ is an integer such that both $\bsb{u}=\mathrm{first}(m;0;\bsb{s})$
and $\bsb{v}=\mathrm{first}(m;0;\bsb{t})$ exist,
then $\bsb{u}$ and $\bsb{v}$ are also close.
\end{Rem}
\begin{The}
\label{main_th}
Two consecutive sequences in $S(k,n)$ generated by the algorithm
{\tt Gen1\_Gray} are close.
\end{The}
\begin{proof}
Let $\bsb{s}$ and $\bsb{t}$ be two consecutive sequences
generated by the call of {\tt Gen1\_Gray($k$,$n$,$0$)}.
Then there is a sequence $\bsb{c}=c_1c_2\ldots c_n$ and a
recursive call of {\tt Gen1\_Gray} acting on $\bsb{c}$
(referred later as the {\it root call} for $\bsb{s}$ and $\bsb{t}$) which produces,
in the {\tt for} statement, two calls so that $\bsb{s}$ is the
last sequence produced by the first of them and $\bsb{t}$
the first produced by the second of them.
By Remark \ref{rev_01_rem_reverse}.1 it is enough to prove that $\bsb{s}$
and $\bsb{t}$ are close when
their root call has direction $0$.
Let $\ell$ and $r$, $\ell\neq r$, be the leftmost and the rightmost increasable positions
in $\bsb{c}$ (and so $c_1=c_2=\ldots =c_{r-1}=0$, and possibly $c_r=0$);
and $i$ and $i+1$ be the positions where
$\bsb{c}$ is modified by the root call in order to produce
eventually $\bsb{s}$ and $\bsb{t}$. Also we denote $m=k-\sum_{j=1}^n c_j$ and
$e=m-\frac{\ell\cdot (\ell-1)}{2}$.
We will give the shape of $\bsb{s}$ and $\bsb{t}$ according to
the following four cases.
\begin{enumerate}
\item $i=\ell$ and $r-\ell$ is even,
\item $i=\ell$ and $r-\ell$ is odd,
\item $i\neq\ell$ and the call corresponding to $i$ in the {\tt for}
statement of the root call has direction $0$
(and so that corresponding to $i+1$ has direction $1$),
\item $i\neq\ell$ and the call corresponding to $i$ in the {\tt for}
statement of the root call has direction $1$
(and so that corresponding to $i+1$ has direction $0$).
\end{enumerate}
\noindent
Case 1.
\begin{eqnarray*}
\bsb{s} & = & \mathrm{last}(m-e;0;00\ldots e c_{\ell+1}\ldots c_n)\\
& = & \mathrm{first}(m-e;1;00\ldots e c_{\ell+1}\ldots c_n)\\
& = & \left\{ \begin {array}{lcc}
\mathrm{first} (m-e-(\ell-2);0;00\ldots (\ell-2)ec_{\ell+1} \ldots c_n) & {\rm if} & e=\ell-1\\
\mathrm{first} (m-e-(\ell-2);0;00\ldots
(\ell-3)(e+1)c_{\ell+1}\ldots c_n) & {\rm if} &
e<\ell-1,
\end {array}
\right.
\end{eqnarray*}
and
\begin{eqnarray*}
\bsb{t} & = & \mathrm{first} (m-1;0;00\ldots (c_{\ell+1}+1)\ldots c_n)\\
& = & \mathrm{first} (m-e;0;00\ldots (e-1)(c_{\ell+1}+1)\ldots c_n)\\
& = & \mathrm{first} (m-e-(\ell-2);0;00\ldots (\ell-2)(e-1)(c_{\ell+1}+1)\ldots c_n).
\end{eqnarray*}
\noindent
Case 2. In this case $\bsb{s}$ is the same as in the previous case and
\begin{eqnarray*}
\bsb{t} &
= & \mathrm{first}(m-1;1;00\ldots 0(c_{\ell+1}+1)\ldots c_n) \\
&
= & \left\{ \begin {array}{lcc}
\mathrm{first} (m-2;0;00\ldots 0 (c_{\ell+1}+2)\ldots c_n) & {\rm if} & c_{\ell+1}+2\leq \ell\\
\mathrm{first} (m-e;0;00\ldots 0(e-1)(c_{\ell+1}+1)\ldots c_n) & {\rm if} & c_{\ell+1}+2>\ell
\end {array}
\right.\\
&
= & \left\{ \begin {array}{lcc}
\mathrm{first} (m-e-(\ell-2);0;00\ldots 0(\ell-2)(e-2)(c_{\ell+1}+2)\ldots c_n) & {\rm if} & c_{\ell+1}+2\leq \ell\\
\mathrm{first} (m-e-(\ell-2);0;00\ldots (\ell-2)(e-1)(c_{\ell+1}+1)\ldots c_n) &
{\rm if} & c_{\ell+1}+2>\ell.
\end {array}
\right.
\end{eqnarray*}
\noindent
Case 3. In this case $c_i=0$ and
\begin{eqnarray*}
\bsb{s} & = & \mathrm{last} (m-1;0;00\ldots 01c_{i+1}\ldots c_n)\\
& = & \mathrm{last} (m-2;1;00\ldots 02c_{i+1}\ldots c_n)\\
& = & \mathrm{first} (m-2;0;00\ldots 02c_{i+1}\ldots c_n),
\end{eqnarray*}
and
\begin{eqnarray*}
\bsb{t} & = & \mathrm{first} (m-1;1;00\ldots 0(c_{i+1}+1)\ldots c_n)\\
& = &
\left\{ \begin {array}{lcc}
\mathrm{first} (m-2;0;00\ldots 0(c_{i+1}+2)\ldots c_n) & {\rm if} & c_{i+1}+2\leq i\\
\mathrm{first} (m-2;0;00\ldots 1(c_{i+1}+1)\ldots c_n) & {\rm if} & c_{i+1}+2> i.
\end {array}
\right.
\end{eqnarray*}
\noindent
Case 4. As previously, $c_i=0$ and
\begin{eqnarray*}
\bsb{s} & = & \mathrm{last} (m-1;1;00\ldots 01c_{i+1}\ldots c_n)\\
& = & \mathrm{first} (m-1;0;00\ldots 01c_{i+1}\ldots c_n),
\end{eqnarray*}
and
$$\bsb{t}=\mathrm{first} (m-1;0;00\ldots 00(c_{i+1}+1)\ldots c_n).
$$
Finally, by Remark \ref{heredit} it follows that in each of the
four cases $\bsb{s}$ and $\bsb{t}$
are close, and the statement holds.
\end{proof}
As a byproduct of the previous theorem and Remark \ref{rem_2_points}.2 we have
\begin{Rem}
\label{boure}
If $\bsb{s}=s_1s_2\ldots s_n$ and
$\bsb{t}=t_1t_2\ldots t_n$ are two consecutive sequences generated by
{\tt Gen1\_Gray} and $p$ is the rightmost position where
they differ, then
$s_1s_2\ldots s_{p-2}$ and $t_1t_2\ldots t_{p-2}$ are the smallest,
in co-lex order, sequences in $S(x,p-2)$ and $S(y,p-2)$,
respectively, with $x=s_1+s_2+\ldots +s_{p-2}$ and
$y=t_1+t_2+\ldots +t_{p-2}$.
Remark that $s_1s_2\ldots s_{p-2}=t_1t_2\ldots t_{p-2}$,
and so $x=y$, if $\bsb{s}$ and $\bsb{t}$ differ in two (adjacent) positions.
\end{Rem}
\subsection{Algorithm {\tt Gen2\_Gray}}
\begin{figure}
\caption{\label{q_terminal_n}
\label{q_terminal_n}
\end{figure}
Since the generating tree induced by the call of {\tt Gen1\_Gray}
contains still arbitrary length branches of
nodes of degree one it has a poor time complexity.
Here we show how some of these nodes can be avoided in order to obtain the
efficient generating algorithm {\tt Gen2\_Colex}
presented in Figure \ref{Algo_Gen2Gray}.
A {\it quasi-terminal node} ({\it q-terminal node} for short)
in the tree induced by a generating algorithm
is defined recursively as:
a q-terminal node is either a terminal node
(node with no successor) or a node with only
one successor which in turn is a q-terminal node.
The q-terminal nodes occur for the calls of {\tt Gen1\_Gray($k,r,dir$)}
when $k=\frac{r(r-1)}{2}$.
See Figure~\ref{q_terminal_n} for an example.
The key improvement made by {\tt Gen2\_Gray} consists in its last
parameter $p$, which gives
the rightmost position where the current sequence differ from its previous
one in the list $\mathcal{S}(k,n)$, and {\tt Gen2\_Gray}
stops the recursive calls of more than three
successive q-terminal calls.
Thus, {\tt Gen2\_Gray} generates
only suffixes of the form $c_{p-2}c_{p-1}c_{p}\ldots c_n$;
see Table \ref{list_pref} for an example.
Since two consecutive sequences in the Gray code $\mathcal{S}(k,n)$
differ in at most three adjacent positions, these suffixes
are enough to generate efficiently $\mathcal{S}(k,n)$,
and to generate (in Section \ref{gen_perms}) a Gray code for the set of length
$n$ permutations having the major index equal to $k$.
Now we explain how the parameter $p$ propagates through recursive calls.
A non terminal call of {\tt Gen2\_Gray} produces one or several calls.
The first of them (corresponding to a left child
in the generating tree) inherits the value of the parameter
$p$ from its parent call; in the other calls the value of this parameter
is the rightmost position where the current sequence
differs from its previous generated one; this value is $i$ if $dir=0$ and
$i+1$ if $dir=1$. So, each call keeps in the last
parameter $p$ the rightmost position where the current generated sequence
differs from its previous one in the list ${\mathcal S}(k,n)$.
Procedure {\tt Gen2\_Gray} prevents to produce more than three successive
q-terminal calls. For convenience, initially $p=0$.
The last two parameters $p$ and $u$ of procedure {\tt Gen2\_Gray} and
output by it are used by procedure
{\tt Update\_Perm} in Section \ref{gen_perms} in order to generates
permutations with a given major index; $u$ keeps the value
of $c_1+c_2+\ldots +c_p$, and for convenience, initially $u=0$.
Even we will not make use later we
sketch below an algorithm for efficiently
generating the list ${\mathcal S}(k,n)$:
\begin{itemize}
\item initialize $\bsb{d}$ by the first sequence in $\mathcal{S}(k,n)$,
i.e, the the smallest sequence in $S(k,n)$
in co-lex order, or equivalently, the largest one in lexicographical orders,
and $\bsb{c}$ by $0\,0\,\ldots\, 0$,
\item run {\tt Gen2\_Gray($k,n,0,0,0)$} and for each $p$ output by it
update $\bsb{d}$ as: $d[p-2]:=c[p-2]$, $d[p-1]:=c[p-1]$, $d[p]:=c[p]$.
\end{itemize}
\begin{figure}
\caption{\label{Algo_Gen2Gray}
\label{Algo_Gen2Gray}
\end{figure}
\begin{table}
\begin{center}
\begin{tabular}{|r|c|c||r|c|c|}
\hline
sequence & $p$ & permutation & sequence & $p$ & permutation\\
\hline
$0\,1\,2\,1\,0\,0$ & & $2\,1\,4\,3\,5\,6$ & $0\,1\,\underline{0\,0\,1}\,2$ & $5$ & $5\,3\,6\,1\,2\,4$ \\
$0\,\underline{1\,0\,3}\,0\,0$ & $4$ & $3\,2\,4\,1\,5\,6$ & $\underline{0\,0\,1}\,0\,1\,2$ & $3$ & $6\,3\,5\,1\,2\,4$ \\
$\underline{0\,0\,1}\,3\,0\,0$ & $3$ & $4\,2\,3\,1\,5\,6$ & $0\,\underline{0\,0\,1}\,1\,2$ & $4$ & $1\,3\,5\,6\,2\,4$ \\
$0\,\underline{0\,2\,2}\,0\,0$ & $4$ & $4\,1\,3\,2\,5\,6$ & $0\,0\,\underline{0\,0\,2}\,2$ & $5$ & $2\,3\,5\,6\,1\,4$ \\
$\underline{0\,1\,1}\,2\,0\,0$ & $3$ & $3\,1\,4\,2\,5\,6$ & $0\,0\,0\,\underline{0\,0\,4}$ & $6$ & $3\,4\,5\,6\,1\,2$ \\
$0\,1\,\underline{2\,0\,1}\,0$ & $5$ & $2\,1\,5\,3\,4\,6$ & $0\,0\,0\,\underline{0\,1\,3}$ & $6$ & $2\,4\,5\,6\,1\,3$\\
$0\,\underline{1\,1\,1}\,1\,0$ & $4$ & $3\,1\,5\,2\,4\,6$ & $0\,0\,\underline{0\,1\,0}\,3$ & $5$ & $1\,4\,5\,6\,2\,3$ \\
$\underline{0\,0\,2}\,1\,1\,0$ & $3$ & $5\,1\,3\,2\,4\,6$ & $0\,\underline{0\,1\,0}\,0\,3$ & $4$ & $6\,4\,5\,1\,2\,3$ \\
$0\,\underline{0\,0\,3}\,1\,0$ & $4$ & $1\,2\,3\,5\,4\,6$ & $\underline{0\,1\,0}\,0\,0\,3$ & $3$ & $5\,4\,6\,1\,2\,3$ \\
$0\,\underline{0\,1\,2}\,1\,0$ & $4$ & $5\,2\,3\,1\,4\,6$ & $0\,1\,0\,\underline{0\,2\,1}$ & $6$ & $4\,3\,6\,1\,2\,5$ \\
$\underline{0\,1\,0}\,2\,1\,0$ & $3$ & $3\,2\,5\,1\,4\,6$ & $\underline{0\,0\,1}\,0\,2\,1$ & $3$ & $6\,3\,4\,1\,2\,5$ \\
$0\,1\,\underline{0\,0\,3}\,0$ & $5$ & $4\,3\,5\,1\,2\,6$ & $0\,\underline{0\,0\,1}\,2\,1$ & $4$ & $1\,3\,4\,6\,2\,5$ \\
$\underline{0\,0\,1}\,0\,3\,0$ & $3$ & $5\,3\,4\,1\,2\,6$ & $0\,0\,\underline{0\,0\,3}\,1$ & $5$ & $2\,3\,4\,6\,1\,5$ \\
$0\,\underline{0\,0\,1}\,3\,0$ & $4$ & $1\,3\,4\,5\,2\,6$ & $0\,0\,\underline{0\,2\,1}\,1$ & $5$ & $1\,2\,4\,6\,3\,5$ \\
$0\,0\,\underline{0\,0\,4}\,0$ & $5$ & $2\,3\,4\,5\,1\,6$ & $0\,\underline{0\,1\,1}\,1\,1$ & $4$ & $6\,2\,4\,1\,3\,5$ \\
$0\,0\,\underline{0\,2\,2}\,0$ & $5$ & $1\,2\,4\,5\,3\,6$ & $\underline{0\,1\,0}\,1\,1\,1$ & $3$ & $4\,2\,6\,1\,3\,5$ \\
$0\,\underline{0\,1\,1}\,2\,0$ & $4$ & $5\,2\,4\,1\,3\,6$ & $0\,\underline{0\,2\,0}\,1\,1$ & $4$ & $6\,1\,4\,2\,3\,5$ \\
$\underline{0\,1\,0}\,1\,2\,0$ & $3$ & $4\,2\,5\,1\,3\,6$ & $\underline{0\,1\,1}\,0\,1\,1$ & $3$ & $4\,1\,6\,2\,3\,5$\\
$0\,\underline{0\,2\,0}\,2\,0$ & $4$ & $5\,1\,4\,2\,3\,6$ & $0\,1\,\underline{1\,1\,0}\,1$ & $5$ & $3\,1\,6\,2\,4\,5$ \\
$\underline{0\,1\,1}\,0\,2\,0$ & $3$ & $4\,1\,5\,2\,3\,6$ & $\underline{0\,0\,2}\,1\,0\,1$ & $3$ & $6\,1\,3\,2\,4\,5$ \\
$0\,1\,1\,\underline{0\,0\,2}$ & $6$ & $5\,1\,6\,2\,3\,4$ & $0\,\underline{0\,0\,3}\,0\,1$ & $4$ & $1\,2\,3\,6\,4\,5$\\
$\underline{0\,0\,2}\,0\,0\,2$ & $3$ & $6\,1\,5\,2\,3\,4$ & $0\,\underline{0\,1\,2}\,0\,1$ & $4$ & $6\,2\,3\,1\,4\,5$\\
$0\,\underline{0\,0\,2}\,0\,2$ & $4$ & $1\,2\,5\,6\,3\,4$ & $\underline{0\,1\,0}\,2\,0\,1$ & $3$ & $3\,2\,6\,1\,4\,5$ \\
$0\,\underline{0\,1\,1}\,0\,2$ & $4$ & $6\,2\,5\,1\,3\,4$ & $0\,\underline{1\,2\,0}\,0\,1$ & $4$ & $2\,1\,6\,3\,4\,5$ \\
$\underline{0\,1\,0}\,1\,0\,2$ & $3$ & $5\,2\,6\,1\,3\,4$ & & & \\
\hline
\end{tabular}
\end{center}
\caption{
\label{list_pref}The subexcedant sequences generated by the call of {\tt Gen1\_Gray($4,6,0$)}
and their corresponding length $6$ permutations with major index equals $4$,
permutations descent set is either $\{1,3\}$ or $\{4\}$.
The three leftmost entries ($c_{p-2}$,$c_{p-1}$,$c_p$) updated by the call of
{\tt Gen2\_Gray($4,6,0,0,0$)} are underlined, where
$p$ is the rightmost position where a subexcedant
sequence differ from its predecessor.
}
\end{table}
\subsubsection*{Analyze of {\tt Gen2\_Gray}}
For a call of {\tt Gen2\_Gray($k$,$r$,$dir$,$p$,$u$)}
necessarily $k\leq\frac{r(r-1)}{2}$, and
if $k>0$ and
\begin{itemize}
\item $k\leq \frac{(r-1)(r-2)}{2}$, then this call
produces at least two recursive calls,
\item $\frac{(r-1)(r-2)}{2}<k<\frac{r(r-1)}{2}$,
then this call produces a unique recursive call
(of the form {\tt Gen2\_Gray($k'$,$r$,$\cdot$,$\cdot$,$\cdot$)},
with $k'=k-\frac{(r-1)(r-2)}{2}$),
which in turn produce two calls,
\item $k=\frac{r(r-1)}{2}$, then this call is q-terminal call.
\end{itemize}
Sine the procedure {\tt Gen2\_Gray} stops after three successive
q-terminal calls, with a slight modification of Ruskey and van Baronaigien's \cite{Roe_93}
{\it `CAT'} principle (see also \cite{Rus_00}) it follows that {\tt Gen2\_Gray} runs in constant
amortized time.
\section{The McMahon code of a permutation}
\label{sec_Mc_code}
Here we present the bijection
$\psi:S(n)\rightarrow \frak S_n$, introduced in \cite{Vaj_11}, which have the following properties:
\begin{itemize}
\item the image through $\psi$ of $S(k,n)$ is the set of permutations in $\frak S_n$ with
major index $k$,
\item $\psi$ is a `Gray code preserving bijection' (see Theorem \ref{sigma_t_a}),
\item $\tau$ is easily computed from $\sigma$ and from the difference between $\bsb{s}$ and $\bsb{t}$,
the McMahon code of $\sigma$ and $\tau$, if $\bsb{s}$ and $\bsb{t}$ are close.
\end{itemize}
In the next section we apply $\psi$ in order
to construct a list for the permutations in $\frak S_n$ with a major index
equals $k$ from the Gray code list $\mathcal{S}(k,n)$.
Let permutations act on indices, i.e., for
$\sigma=\sigma_1\,\sigma_2\, \ldots \,\sigma_n$ and
$\tau=\tau_1\,\tau_2\, \ldots \,\tau_n$
two permutations in $\frak S_n$,
$\sigma\cdot\tau=\sigma_{\tau_1}\,\sigma_{\tau_2}\, \ldots \,\sigma_{\tau_n}$.
For a fixed integer $n$, let $k$ and $u$ be two integers, $0\leq k<u\leq n$,
and define
$[\hspace{-0.3mm}[ u,k ]\hspace{-0.3mm}]\in\frak S_n$ as the permutation obtained after
$k$ right circular shifts of the length-$u$ prefix of the identity
in $\frak S_n$.
In two line notation
$$
[\hspace{-0.3mm}[ u,k ]\hspace{-0.3mm}]=
\left(
\begin{array}{cccccccccc}
1 & 2 & \cdots & k & k+1 & \cdots & u & u+1 &\cdots & n \\
u-k+1 & u-k+2 & \cdots & u & 1 &\cdots & u-k & u+1 &\cdots & n
\end{array}
\right).
$$
For example, in $\frak S_5$ we have:
$[\hspace{-0.3mm}[ 3,1]\hspace{-0.3mm}]=\underline{3\,1\,2}\,4\,5$,
$[\hspace{-0.3mm}[ 3,2]\hspace{-0.3mm}]=\underline{2\,3\,1}\,4\,5$ and
$[\hspace{-0.3mm}[ 5,3]\hspace{-0.3mm}]=\underline{3\,4\,5\,1\,2}$ (the rotated elements are underlined).
Let
$\psi:S(n)\rightarrow \frak S_n
$
be the function defined by
\begin{equation}
\label{def_psi}
\begin{array}{ccl}
\psi(t_1t_2\ldots t_n)
& = & [\hspace{-0.3mm}[ n,t_n]\hspace{-0.3mm}]\cdot [\hspace{-0.3mm}[ n-1,t_{n-1}]\hspace{-0.3mm}]\cdot\ldots\cdot [\hspace{-0.3mm}[ i,t_i]\hspace{-0.3mm}]\cdot
\ldots \cdot[\hspace{-0.3mm}[ 2,t_2]\hspace{-0.3mm}]\cdot [\hspace{-0.3mm}[ 1,t_1 ]\hspace{-0.3mm}] \\
& = & \displaystyle \prod_{i=n}^1[\hspace{-0.3mm}[ i,t_i]\hspace{-0.3mm}].
\end{array}
\end{equation}
\begin{Lem}[\cite{Vaj_11}]$ $
\begin{enumerate}
\item
The function $\psi$ defined above is a bijection.
\item
For every $\bsb{t}=t_1t_2\ldots t_n\in S(n)$, we have
${\scriptstyle \mathsf{MAJ}} \prod_{i=n}^1[\hspace{-0.3mm}[ i,t_i]\hspace{-0.3mm}]=\sum_{i=1}^nt_i$.
\end{enumerate}
\end{Lem}
The first point of the previous lemma says that every permutation
$\pi\in\frak S_n$ can be uniquely written as
$\prod_{i=n}^1[\hspace{-0.3mm}[ i,t_i]\hspace{-0.3mm}]$ for some $t_i$'s, and
the subexcedant sequence $t_1t_2\ldots t_n$ is called the
{\it McMahon code} of $\pi$.
As a consequence of the second point of this lemma we have:
\begin{Rem}
The restriction of $\psi$ maps bijectively permutations in
$S(k,n)$ into permutations in $\frak S_n$ with major index equals $k$.
\end{Rem}
\begin{Exa}
The permutation $\pi =5\,2\,1\,6\,4\,3\in \frak S_n$ can be obtained from
the identity by the following prefix rotations:
$$1\,2\,3\,4\,5\,6
\overset{[\hspace{-0.3mm}[ 6,3 ]\hspace{-0.3mm}]}{\longrightarrow}
4\,5\,6\,1\,2\,3
\overset{[\hspace{-0.3mm}[ 5,4 ]\hspace{-0.3mm}]}{\longrightarrow}
5\,6\,1\,2\,4\,3
\overset{[\hspace{-0.3mm}[ 4,2 ]\hspace{-0.3mm}]}{\longrightarrow}
1\,2\,5\,6\,4\,3
\overset{[\hspace{-0.3mm}[ 3,2 ]\hspace{-0.3mm}]}{\longrightarrow}
2\,5\,1\,6\,4\,3
\overset{[\hspace{-0.3mm}[ 2,1 ]\hspace{-0.3mm}]}{\longrightarrow}
5\,2\,1\,6\,4\,3
\overset{[\hspace{-0.3mm}[ 1,0 ]\hspace{-0.3mm}]}{\longrightarrow}
5\,2\,1\,6\,4\,3,
$$
so
$$
\pi=
[\hspace{-0.3mm}[ 6,3]\hspace{-0.3mm}]\cdot[\hspace{-0.3mm}[ 5,4 ]\hspace{-0.3mm}]\cdot[\hspace{-0.3mm}[ 4,2 ]\hspace{-0.3mm}]\cdot[\hspace{-0.3mm}[ 3,2
]\hspace{-0.3mm}]\cdot[\hspace{-0.3mm}[ 2,1 ]\hspace{-0.3mm}]\cdot[\hspace{-0.3mm}[ 1,0 ]\hspace{-0.3mm}],$$
and thus
$$
{\scriptstyle \mathsf{MAJ}}\ \pi =3+4+2+2+1+0=12.
$$
\end{Exa}
Theorem \ref{sigma_t_a} below states that if two permutations have
their McMahon code differing in two adjacent positions, and by
$1$ and $-1$ in these positions, then these permutations differ by the transposition
of two entries.
Before proving this theorem we need the following
two propositions,
where the transposition $\langle u, v\rangle$ denote the permutation
$\pi$ (of convenient length) with $\pi(i)=i$ for all $i$, except $\pi(u)=v$
and $\pi(v)=u$.
\begin{Pro}
\label{first_trans}
Let $n,u$ and $v$ be three integers,
$n\geq 3$, $0\leq u\leq n-2$, $1\leq v\leq n-2$, and
$\sigma,\tau\in\frak S_n$ defined by:
\begin{itemize}
\item $\sigma= [\hspace{-0.3mm}[ n,u]\hspace{-0.3mm}]\ \cdot [\hspace{-0.3mm}[ n-1,v]\hspace{-0.3mm}]$, and
\item $\tau = [\hspace{-0.3mm}[ n,u+1]\hspace{-0.3mm}] \cdot [\hspace{-0.3mm}[ n-1,v-1]\hspace{-0.3mm}]$.
\end{itemize}
Then
$$
\tau=\sigma\cdot\langle n, v\rangle.
$$
\end{Pro}
\begin{proof}
First, remark that:
\begin{itemize}
\item $[\hspace{-0.3mm}[ n,u+1]\hspace{-0.3mm}]$, is a right circular shift of $[\hspace{-0.3mm}[ n,u]\hspace{-0.3mm}]$, and
\item $[\hspace{-0.3mm}[ n-1,v-1]\hspace{-0.3mm}]$ is a left circular shift of
the first $(n-1)$ entries of $[\hspace{-0.3mm}[ n-1,v]\hspace{-0.3mm}]$,
\end{itemize}
and so $\sigma(i)=\tau(i)$ for all $i$, $1\leq i\leq n$, except for
$i=n$ and $i=v$.
\end{proof}
\begin{Exa}
For $n=7$, $u=4$ and $v=3$ we have
\begin{itemize}
\item $\sigma=[\hspace{-0.3mm}[ n,u ]\hspace{-0.3mm}]\cdot [\hspace{-0.3mm}[ n-1,v ]\hspace{-0.3mm}]=[\hspace{-0.3mm}[ 7,4]\hspace{-0.3mm}]\cdot [\hspace{-0.3mm}[
6,3]\hspace{-0.3mm}]=7\,1\,2\,4\,5\,6\,3$,
\item $\tau= [\hspace{-0.3mm}[ n,u+1]\hspace{-0.3mm}]\cdot [\hspace{-0.3mm}[ n-1,v-1]\hspace{-0.3mm}]=[\hspace{-0.3mm}[ 7,5]\hspace{-0.3mm}]\cdot [\hspace{-0.3mm}[ 6,2]\hspace{-0.3mm}]=
7\,1\,3\,4\,5\,6\,2$,
\item $\langle n,v\rangle=\langle 7,3\rangle$,
\end{itemize}
and $\tau=\sigma\cdot \langle n,v\rangle$.
\end{Exa}
\begin{Pro}
\label{before_th}
If $\pi\in\frak S_n$ and $\langle u,v\rangle$ is a transposition in
$\frak S_n$, then
$$\pi^{-1}\cdot \langle u,v\rangle\cdot\pi=
\langle\pi^{-1}(u),\pi^{-1}(v)\rangle.$$
\end{Pro}
\begin{proof}
Indeed, $(\pi^{-1}\cdot \langle u,v\rangle\cdot\pi)(i)=i$,
for all $i$, except for $i=\pi^{-1}(u)$ and $i=\pi^{-1}(v)$.
\end{proof}
\begin{The}
\label{sigma_t_a}
Let $\sigma$ and $\tau$ be two permutations in $\frak S_n $, $n\geq 3$, and
$\bsb{s}=s_1s_2\ldots s_n$ and $\bsb{t}=t_1t_2\ldots t_n$
their McMahon codes. If there is a $f$, $2\leq f\leq n-1$
such that $t_i=s_i$ for all $i$, except $t_f=s_f-1$
and $t_{f+1}=s_{f+1}+1$, then $\tau$ and $\sigma$ differ by a
transposition. More precisely,
$$
\tau=\sigma \cdot \langle \alpha^{-1}(u), \alpha^{-1}(v)\rangle
$$
where
$$
\alpha=\prod_{i=f-1}^{1}[\hspace{-0.3mm}[ i,s_i ]\hspace{-0.3mm}]=\prod_{i=f-1}^{1}[\hspace{-0.3mm}[ i,t_i ]\hspace{-0.3mm}],
$$
and $u=f+1$, $v=s_f$.
\end{The}
\begin{proof} $ $
\begin{itemize}
\item
$\tau=\prod_{i=n}^{1}[\hspace{-0.3mm}[ i,t_i ]\hspace{-0.3mm}]$, and so
$\tau\cdot\alpha^{-1}=\prod_{i=n}^{f}[\hspace{-0.3mm}[ i,t_i ]\hspace{-0.3mm}]$, and
\item
$\sigma=\prod_{i=n}^{1}[\hspace{-0.3mm}[ i,s_i ]\hspace{-0.3mm}]$, and
$\sigma\cdot\alpha^{-1}=\prod_{i=n}^{f}[\hspace{-0.3mm}[ i,s_i ]\hspace{-0.3mm}]$.
\end{itemize}
But, by Proposition \ref{first_trans},
$$
\prod_{i=n}^{f}[\hspace{-0.3mm}[ i,t_i ]\hspace{-0.3mm}]=
\prod_{i=n}^{f}[\hspace{-0.3mm}[ i,s_i ]\hspace{-0.3mm}]\cdot
\langle f+1,s_f\rangle
$$
or, equivalently
$$
\tau\cdot\alpha^{-1}=\sigma\cdot\alpha^{-1}\cdot\langle f+1,s_f\rangle,
$$
and by Proposition \ref{before_th}, the results holds.
\end{proof}
The previous theorem says that $\sigma$ and $\tau$
`have a small difference' provided that their McMahon code,
$\bsb{s}$ and $\bsb{t}$, do so. Actually, we need that
$\bsb{s}$ and $\bsb{t}$ are consecutive sequences
in the list $\mathcal{S}(k,n)$
and they have a more particular shape (see Remark \ref{boure}).
In this context, permutations having minimal
McMahon code play a particular role.
It is routine to check the following proposition (see Figure \ref{Fig_3}
for an example).
\begin{Pro}
\label{alpha_n_k}
Let $n$ and $k$ be two integers, $0<k\leq\frac{n(n-1)}{2}$;
$\bsb{a}=a_1a_2\ldots a_n$ be the smallest subexcedant sequence in co-lex order
with $\sum_{i=1}^n a_i=k$, and
$\alpha=\alpha_{n,k}=\psi(\bsb{a})$ be the permutation in $\frak S_n$ having
its McMahon code $\bsb{a}$.
Let $j=\max\, \{ i : a_i\neq 0\}$, that is, $\bsb{a}$ has the form
$$ 012\ldots (j-3)(j-2)a_j00\ldots 0.$$
Then
\begin{equation}
\label{def_alpha}
\alpha(i)=\left\{ \begin {array}{ccc}
j-a_j-i & {\rm if} & 1\leq i\leq j-(a_j+1), \\
2j-a_j-i & {\rm if} & j-(a_j+1)<i\leq j, \\
i & {\rm if} & i>j.
\end {array}
\right.
\end{equation}
\end{Pro}
\begin{figure}
\caption{
The permutation $\alpha=2\,1\,6\,5\,4\,3\,8\,7$ with the McMahon code
$\bsb{a}
\label{Fig_3}
\end{figure}
\begin{Rem}
\label{rem_inv}
The permutation $\alpha$ defined in Proposition in \ref{alpha_n_k}
is an involution, that is $\alpha^{-1}=\alpha$.
\end{Rem}
Combining Proposition \ref{alpha_n_k} and Remark \ref{rem_inv},
Theorem \ref{sigma_t_a} becomes in particular
\begin{Pro}
\label{combi}
Let $\sigma$, $\tau$, $\bsb{s}$ and $\bsb{t}$ be as in Theorem \ref{sigma_t_a}.
In addition, let suppose that there is a $j$, $0\leq j\leq f-1$, such that
\begin{itemize}
\item[1.] $s_i=t_i=0$ for $j<i\leq f-1$, and
\item[2.] if $j>0$, then
\begin{itemize}
\item $s_j=t_j\neq 0$, and
\item $s_i=t_i=i-1$ for $1\leq i<j$.
\end{itemize}
\end{itemize}
Then
$$
\tau=\sigma \cdot \langle \phi_j(f+1), \phi_j(s_f)\rangle
$$
with
\begin{equation}
\label{phi}
\phi_j(i)=\left\{ \begin {array}{ccc}
j-s_j-i & {\rm if} & 1\leq i\leq j-(s_j+1), \\
2j-s_j-i & {\rm if} & j-(s_j+1)<i\leq j, \\
i & {\rm if} & i>j.
\end {array}
\right.
\end{equation}
\end{Pro}
\noindent
Notice that, the conditions 1 and 2 in the previous proposition
require that $s_1s_2\ldots s_{f-1}=t_1t_2\ldots t_{f-1}$
be the smallest subexcedant sequence, in co-lex order, in $S(f-1)$ with
fixed value for $\sum_{i=1}^{f-1}s_i=\sum_{i=1}^{f-1}t_i$.
Also, for point 2, necessarily $j\geq 2$.
\section{Generating permutations with a given major index}
\label{gen_perms}
Let $\sigma$ and $\tau$ be two permutations
with their McMahon code $\bsb{s}=s_1s_2\ldots s_n$
and $\bsb{t}=t_1t_2\ldots t_n$ belonging to $S(k,n)$, and
differing in positions $f$ and $f+1$ by $1$ and $-1$ in
these positions.
Let
\begin{itemize}
\item $v=s_f-t_f\in \{-1,1\}$, and
\item $x=\sum_{i=1}^{f-1}s_i=\sum_{i=1}^{f-1}t_i$.
\end{itemize}
If $s_1s_2\ldots s_{f-1}$ is the smallest sequence in
$S(x,f-1)$, in co-lex order, then applying Proposition \ref{combi}
it follows that the run of the procedure {\tt transp($v,f,x$)} defined in
Figure~\ref{algos_transp} transforms $\sigma$ into $\tau$ and $\bsb{s}$ into $\bsb{t}$.
\begin{figure}
\caption{\label{algos_transp}
\label{algos_transp}
\end{figure}
Let now $f$ be the leftmost position where two consecutive
sequences $\bsb{s}$ and $\bsb{t}$ in the list $\mathcal{S}(k,n)$
differ, and $\sigma$ and $\tau$ be the permutations
having they McMahon code $\bsb{s}$ and $\bsb{t}$.
By Remarks \ref{rem_inter} and \ref{boure} we have that,
repeated calls of {\tt transp}
transform $\bsb{s}$ into $\bsb{t}$, and $\sigma$ into $\tau$.
This is true for each possible
$3$-tuples given in Definition~\ref{3_tuple} and
corresponding to two consecutive
subexcedant sequences in $\mathcal{S}(k,n)$, and
algorithm {\tt Update\_Perm} in Figure \ref{algos_update}
exhausts all these $3$-tuples.
For example, if $\bsb{s}$ and $\bsb{t}$ are the two sequences in
Example \ref{un_example} with they difference $(1,-2,1)$, $f=2$ and
$x=0$, then the calls
{\tt transp($1,f,x$)}; \\
\indent {\tt transp($-1,f+1,x+s[f]$)};\\
\noindent
transform $\bsb{s}$ into $\bsb{t}$ and
$\sigma$ into $\tau$.
Algorithm {\tt Gen2\_Gray} provides $p$,
the rightmost position where the current sequence $\bsb{c}$ differs
from the previous generated one, and $u=\sum_{i=1}^p c_i$.
Algorithm {\tt Update\_Perm} uses $f$, the leftmost position where
$\bsb{c}$ differs from the previous generated sequence, and
$x=\sum_{i=1}^{f-1}c_i$.
\begin{figure}
\caption{\label{algos_update}
\label{algos_update}
\end{figure}
Now, we sketch the generating algorithm for the set
of permutations in $\frak S_n$ having index $k$.
\begin{itemize}
\item initialize $\bsb{s}$ by the smallest, in co-lex order, sequence
in $S(k,n)$ and $\sigma$ by the permutation in $\frak S_n$
having its McMahon code $\bsb{s}$,
\item run {\tt Gen2\_Gray($k,n,0,0,0$)} where
{\tt output($p,u$)} is replaced by {\tt Update\_Perm($p,u$)}.
\end{itemize}
The obtained list of permutations is the image of the Gray code
$\mathcal{S}(k,n)$ through the bijection $\psi$ defined in relation
(\ref{def_psi}); it consists of all permutations in $\frak S_n$ with
major index equal to $k$, and two consecutive permutations differ
by at most three transpositions.
See Table \ref{list_pref} for the list of permutations in $\frak S_6$
and with major index $4$.
\section{Final remarks}
\label{Conc}
Numerical evidences show that if we change the generating order
of algorithm {\tt Gen\_Colex} as for
{\tt Gen1\_Gray}, but without restricting it to subexcedant
sequences, then the obtained list for bounded compositions
is still a Gray code with the closeness definition slightly relaxed:
two consecutive compositions differ in at most four adjacent positions.
Also, T. Walsh gives in \cite{Walsh} an efficient
generating algorithm for a Gray code
for bounded compositions of an
integer, and in particular for subexcedant sequences.
In this Gray code two consecutive sequences
differ in two positions and by $1$ and $-1$ in these positions;
but these positions can be arbitrarily far,
and so the image of this Gray code through the bijection
$\psi$ defined by relation (\ref{def_psi}) in Section \ref{sec_Mc_code}
does not give a Gray code for
permutations with a fixed index.
\end{document} |
\begin{document}
\title{$\mathbb{Z}_3\times \mathbb{Z}_3$ crossed products}
\author{Eliyahu Matzri}
\thanks{This work was supported by the U.S.-Israel Binational Science Foundation (grant no. 2010149).}
\thanks{The author also thanks Daniel Krashen and UGA for hosting him while this work was done.}
\maketitle
\begin{abstract}
Let $A$ be the generic abelian crossed product with respect to $\mathbb{Z}_3\times \mathbb{Z}_3$, in this note we show that $A$ is similar to the tensor product of 4 symbol algebras (3 of degree 9 and one of degree 3) and if $A$ is of exponent $3$ it is similar to the product of 31 symbol algebras of degree $3$. We then use \cite{RS} to prove that if $A$ is any algebra of degree $9$ then $A$ is similar to the product of $35840$ symbol algebras ($8960$ of degree $3$ and $26880$ of degree $9$) and if $A$ is of exponent $3$ it is similar to the product of $277760$ symbol algebras of degree $3$. We then show that the essential $3$-dimension of the class of $A$ is at most $6$.
\end{abstract}
\sigmaection{Introduction}
Throughout this note we let $F$ be a field containing all necessary roots of unity, denoted $\rho_n$.
The well known Merkurjev-Suslin theorem says that: assuming $F$ contains a primitive $n$-th root of $1$, there is an isomorphism
$\psi : K_2(F)/nK_2(F)\longrightarrow \Br(F)_n$ sending the symbol $\{a,b\}$ to the symbol algebra $(a,b)_{n,F}$.
In particular the $n$-th torsion part of the Brauer group is generated by symbol algebras of degree $n$.
This means every $A\in \Br(F)_n$ is similar (denoted $\sigmaim$) to the tensor product of symbol algebras of degree $n$.
However, their proof is not constructive. It thus raises the following questions. Let $A$ be an algebra of degree $n$ and exponent $m$. Can one explicitly write $A$ as the tensor product of degree $m$ symbol algebras? Also, what is the smallest number of factors needed to express $A$ as the tensor product of degree $m$ symbol algebras? This number is sometimes called the Merkurjev-Suslin number.
These questions turn out to be quite hard in general and not much is known. Here is a short summary of some known results.
\begin{enumerate}
\item Every degree $2$ algebra is isomorphic to a quaternion algebra.
\item Every degree $3$ algebra is cyclic thus if $\rho_3\in F$ it is isomorphic to a symbol algebra (Wedderburn \cite{W}).
\item Every degree $4$ algebra of exponent $2$ is isomorphic to a product of two quaternion algebras (Albert \cite{Al}).
\item Every degree $p^n$ symbol algebra of exponent $p^m$ is similar to the product of $p^{n-m}$ symbol algebras of degree $p^m$(Tignol \cite{T1}).
\item Every degree $8$ algebra of exponent $2$ is similar to the product of four quaternion algebras (Tignol \cite{T2}).
\item Every abelian crossed product with respect to $\mathbb{Z}_n\times \mathbb{Z}_2$ is similar to the product of a symbol algebra of degree $2n$ and a quterinion algebra, in particular, due to Albert \cite{Al}, every degree $4$ algebra is similar to the product of a degree $4$ symbol algebra and a quaternion algebra (Lorenz, Rowen, Reichstein, Saltman \cite{LRRS}).
\item Every abelian crossed product with respect to $(\mathbb{Z}_2)^4$ of exponent $2$ is similar to the product of $18$ quaternion algebras (Sivatski \cite{SV}).
\item Every $p$-algebra of degree $p^n$ and exponent $p^m$ is similar to the product of $p^n-1$ cyclic algebras of degree $p^m$ (Florence \cite{MF}).
\end{enumerate}
In this paper we prove theorems \ref{MT2} and \ref{MT3} stating:
\textit{Let $A$ be an abelian crossed product with respect to $\mathbb{Z}_3\times \mathbb{Z}_3$. Then
\begin{enumerate}
\item $A$ is similar to the product of $4$ symbol algebras ($3$ of degree $9$ and one of degree $3$).
\item If $A$ is of exponent $3$ then $A$ is similar to the product of $31$ symbol algebras of degree $3$.
\end{enumerate}
}
We then use \cite{RS} to deduce the general case of an algebra of degree $9$ to get theorem \ref{MT4} stating:
\textit{
Let $A$ be an $F$-central simple algebra of degree $9$. Then
\begin{enumerate}
\item $A$ is similar to the product of $35840$ symbol algebras, ($8960$ of degree $3$ and $26880$ of degree $9$).
\item If $A$ is of exponent $3$ then $A$ is similar to the product of $277760$ symbol algebras of degree $3$.
\end{enumerate}}
\sigmaection{$\mathbb{Z}_p\times \mathbb{Z}_p$ abelian crossed products}
Let $A$ be the generic abelian crossed product with respect to $G=\mathbb{Z}_p\times \mathbb{Z}_p$ over $F$, where $p$ is an odd prime. In the notation of \cite{AS} this means:
$A=(E,G,b_1,b_2,u)=E[z_1,z_2| z_i e z_i^{-1}=\sigmaigma_i(e); z_1^p=b_1; z_2^p=b_2; z_2z_1=uz_1z_2; b_i\in E_i= E^{<\sigmaigma_i>}; u\in E^{\times} \ s.t. \N_{E/F}(u)=1]$ where ${\operatorname{Gal}}(E/F)=<\sigmaigma_1,\sigmaigma_2>\cong G$.
Let $A$ be as above.
Write $E=E_1E_2$ where $E_1=F[t_1| \ t_1^{p}=f_1\in F^{\times}]$ and $E_2=F[t_2| \ t_2^{p}=f_2\in F^{\times}]$ thus we have $z_i t_i z_i^{-1}=\sigmaigma_i(t_i)=t_i$ and $z_1 t_2=\rho_pt_2z_1; \ z_2 t_1=\rho_pt_1z_2$.
Since $b_i\in E_i$ we can write $b_1=c_0+c_1t_1+...+c_{p-1}t_1^{p-1}; \ b_2=a_0+a_1t_2+...+a_{p-1}t_2^{p-1}$ where $a_i,c_i\in F^{\times}$.
\begin{prop}\label{1}
Define $v=e_1z_1+e_2z_2$ for $e_i\in E$. If $v\neq 0$, then $[F[v^p]:F]=p$.
\end{prop}
\begin{proof}
First we compute $vt_1t_2=(e_1z_1+e_2z_2)t_1t_2=e_1z_1t_1t_2+ e_2z_2t_1t_2=\rho_pt_1t_2e_1z_1+ \rho_pt_1t_2e_2z_2=\rho_pt_1t_2(e_1z_1+e_2z_2)=\rho_pt_1t_2v$.
Thus $v^p$ commutes with $t_1t_2$ where $v$ does not, implying $[F[v]:F[v^p]]=p$. By the definition of $v$ we have $v\notin F$. Thus $\deg(A)=p^2$ imply $[F[v]:F]\in \{p,p^2\}$. If $[F[v]:F]=p$ we get that $A$ contains the sub-algebra
generated by $t_1t_2, v$ which is a degree $p$ symbol over $F$ and by the double centralizer this will imply that $A$ is decomposable which is not true in the generic case. Thus
$[F[v]:F]=p^2$ implying $[F[v^p]:F]=p$ and we are done.
\end{proof}
The first step we take is to find a $v$ satisfying $\Tr(v^p)=0$.
In order to achieve that we will tensor $A$ with an $F$-symbol of degree $p$.
Define $B=(E_1,\sigmaigma_2, \frac{-c_0}{a_0})\sigmaim (E,G,1,\frac{-c_0}{a_0},1)$. Now by \cite{AS} $ A\otimes B$ is similar to $C=(E,G,b_1,\frac{-c_0}{a_0}b_2,u)$.
Abusing notation we write $z_1,z_2$ for the new ones in $C$.
\begin{prop}
Defining $v=z_1+z_2$ in $C$ we have $\Tr(v^p)=0$.
\end{prop}
\begin{proof}
First notice that $C=\sigmaum_{i,j=0}^{p-1} Ez_1^iz_2^j$. Thus $C_0=\{d\in C | \ \Tr(d)=0\}=E_0 +\sigmaum_{i,j=0;(i,j)\neq(0,0)}^{p-1} Ez_1^iz_2^j$ where $E_0=\sigmaum_{i,j=0;(i,j)\neq(0,0)}^{p-1} Ft_1^it_2^j$ is the set of trace zero elements of $E$.
Now computing we see $v^p=z_1^p+e_{p-1,1}z_1^{p-1}z_2+....+e_{1,p-1}z_2^{p-1}z_1+z_2^p=b_1+e_{p-1,1}z_1^{p-1}z_2+....+e_{1,p-1}z_2^{p-1}z_1+b_2$ where $e_{i,j}\in E$. Define $r=v^p-(b_1+b_2)$. Clearly $\Tr(r)=0$, since the powers of $z_1,z_2$ in all monomial appearing in $r$ are less then $p$ and at least one is greater than zero. Thus, $v^p=b_1+b_2+r=c_0+c_1t_1+...+c_{p-1}t_1^{p-1}+(-c_0+\frac{-c_0a_1}{a_0}t_2+...+\frac{-c_0a_{p-1}}{a_0}t_2^{p-1})+r\in C_0$, and we are done.
\end{proof}
\begin{prop}
$K\doteqdot F[t_1t_2,v^p]$ is a maximal subfield of $C$.
\end{prop}
\begin{proof}
First, notice $C$ is a division algebra of degree $p^2$. To see this assume it is not, then it is similar to a degree $p$ algebra, $D$.
Thus $A\otimes B$ is similar to $D$, which implies $A$ is isomorphic to $D\otimes B^{op}$. But then $A$ has exponent $p$ which is false.
In the proof of \ref{1} we saw that $[v^p,t_1t_2]=0$ so we are left with showing $[K:F]=p^2$. Assuming $[K:F]=p$, we have $v^p\in F[t_1t_2]$. Let $\sigmaigma$ be a generator of ${\operatorname{Gal}}(F[t_1t_2]/F)=<\sigmaigma>$. Clearly $z_ix=\sigmaigma(x)z_i$ for $i=1,2$ and $x\in F[t_1t_2]$, hence $vx=\sigmaigma(x)v$, that is $\sigmaigma(x)=vxv^{-1}$. In particular, $\sigmaigma(v^p)=vv^pv^{-1}=v^p$, implying $v^p\in F$. But then $C$ contains the sub algebra $F[t_1t_2,v]$ which is an F-csa of degree $p$, thus by the double centralizer $C$ would decompose into two degree $p$ algebras. This will imply that $A$ has exponent $p$, which is false.
\end{proof}
The next step is to make $K$ Galois. Let $T$ be the Galois closure of $F[v^p]$.
Its Galois group is a subgroup of $S_p$ so has a cyclic $p$-Sylow subgroup, define $L$ to be the fixed subfield.
Clearly $F[v^p]\otimes L$ is Galois, with group $\mathbb{Z}_p$.
Thus in $C_L$ we have $K_L$ as a maximal Galois subfield with group $\mathbb{Z}_p\times \mathbb{Z}_p$.
Now writing $C_L$ as an abelian crossed product we have $C_L=(K,G,b_1,b_2,u)$ where this time we have $\Tr(b_2)=0$. Thus we can write $K_L=L[t_1t_2,t_3 | (t_1t_2)^p=f_1f_2; t_3^p=l\in L],$ \ $b_1\in L[t_1t_2]$ and $b_2=l_1t_3+...+l_{p-1}t_3^{p-1}$.
Now we change things even more. Define $D=(f_1f_2,(-\frac{f_1f_2}{l_1})^pl^{-1})_{p^2,L}=(K_L,G,t_1t_2,-\frac{f_1f_2}{l_1}(t_3)^{-1},\rho_{p^2})$ and again by \cite{AS} we have
$R\doteqdot C_L\otimes D=(K_L,G,t_1t_2b_1,-f_1f_2-\frac{f_1f_2l_2}{l_1}t_3-...-\frac{f_1f_2l_{p-2}}{l_1}t_3^{p-2},\rho_{p^2} u)$.
\sigmaection{Generic $\mathbb{Z}_3\times \mathbb{Z}_3$ abelian crossed products}
From now we specialize to $p=3$.
\begin{prop}
$R$ from the end of the previous section is a symbol algebra of degree $9$.
\end{prop}
\begin{proof}
This proof is just as in \cite{LRRS}.
Since we assume $\rho_9\in F$ it is enough to find a $9$-central element.
Notice that in $R$ we have $z_2t_1t_2=\rho_3 t_1t_2z_2$; $z_2^3=-f_1f_2-\frac{f_1f_2l_2}{l_1}t_3$ and $(t_1t_2)^3=f_1f_2$.
Thus defining $x=t_1t_2+z_2$ we get $x^3=(t_1t_2+z_2)^3=(t_1t_2)^3+z_2^3=-\frac{f_1f_2l_2}{l_1}t_3$ implying $x^9=-(\frac{f_1f_2l_2}{l_1})^3 l\in L$.
Thus $R=(l_3,-(\frac{f_1f_2l_2}{l_1})^3 l)_{9,L}$ for some $l_3\in L$ and we are done.
\end{proof}
All of the above gives the following theorem:
\begin{thm}\label{MT1}
Let $A$ be a generic abelian crossed product with respect to $\mathbb{Z}_3\times \mathbb{Z}_3$. Then after a quadratic extension $L/F$ we have
$A_L$ is similar to $R\otimes D^{-1}\otimes B^{-1}$ where $R,D,B$ are symbols as above.
\end{thm}
In order to go down to $F$ we take corestriction. Using Rosset-Tate and the projection formula, (\cite{GT} 7.4.11 and 7.2.7), we get:
\begin{thm}\label{MT2}
Let $A$ be a generic abelian crossed product with respect to $\mathbb{Z}_3\times \mathbb{Z}_3$. Then
$A=\sigmaum_{i=1}^{4}C_i$ where $C_1,C_2,C_3$ are symbols of degree 9 and $C_4$ is a symbol of degree 3.
\end{thm}
\begin{proof}
One gets $C_1,C_2$ from the corestriction of $R$ using R.T. $C_3$ from the corestriction of $D$ using the projection formula and $C_4$ comes from $B$.
\end{proof}
\sigmaection{The exponent $3$ case}
In this section we will consider the case were $\exp(A)=3$.
Notice that from \ref{MT1} $A_L \sigmaim R\otimes D^{-1}\otimes B^{-1} = (a,b)_{9,L}\otimes(\gamma,c)_{9,L} \otimes (\alpha,\beta)_{3,L}$ where $\alpha,\beta,\gamma \in F^{\times}$ and $a,b,c\in L^{\times}$.
\begin{thm}\label{MT3}
Assume $A$ has exponent $3$, then $A$ is similar to the sum of $16$ degree $3$ symbols over a quadratic extension and $31$ degree $3$ symbols over $F$.
\end{thm}
\begin{proof}
The idea for this proof is credited to L.H. Rowen, U. Vishne and E. Matzri.
Since $\exp(A)=3$ we have $F\sigmaim A^3\sigmaim R^3\otimes D^{-3}\otimes B^{-3}\sigmaim R^3\otimes D^{-3}\sigmaim (a,b)_{3,L}\otimes(\gamma,c)_{3,L}$.
Thus we get $(a,b)_{3,L}=(\gamma,c^{-1})_{3,L}$. Now by the chain lemma for degree 3 symbols in \cite{Rost} or \cite{MV} we have $x_{1,2,3} \in L^{\times}$ such that: $$(a,b)_{3,L}=(a,x_1)_{3,L}= (x_2,x_1)_{3,L}= (x_2,x_3)_{3,L}= (\gamma,x_3)_{3,L}=(\gamma,c^{-1})_{3,L}$$
Now we write $$(a,\frac{b}{x_1})_{9,L}\otimes(\frac{a}{x_2},x_1)_{9,L}\otimes(x_2,\frac{x_1}{x_3})_{9,L}\otimes(\frac{x_2}{\gamma},x_3)_{9,L}\otimes(\gamma,x_3c)_{9,L}\sigmaim (a,b)_{9,L}\otimes(\gamma,c)_{9,L}$$
Thus $A\sigmaim (a,b)_{9,L}\otimes(\gamma,c)_{9,L} \otimes (\alpha,\beta)_{3,L}\sigmaim (a,\frac{b}{x_1})_{9,L}\otimes(\frac{a}{x_2},x_1)_{9,L}\otimes(x_2,\frac{x_1}{x_3})_{9,L}\otimes(\frac{x_2}{\gamma},x_3)_{9,L}\otimes(\gamma,x_3c)_{9,L}\sigmaim (a,b)_{9,L}\otimes(\gamma,c)_{9,L}\otimes (\alpha,\beta)_{3,L}$ where now all the degree $9$ symbols are of exponent $3$.
But by a theorem of Tignol, \cite{T1}, each of these symbols is similar to the product of three degree $3$ symbols. Thus we have that $A_L$ is similar to the product of $16$ degree $3$ symbols and over $F$ to the product of $31$ symbols of degree $3$ and we are done.
\end{proof}
\sigmaection{The general case of a degree $9$ algebra}
In this section we combine the results of sections $2$ and $3$ with \cite{RS} to handle the general case of a degree $9$ algebra of exponent $9$ and $3$.
Let $A$ be a $F$-central simple algebra of degree $9$.
The first step would be to follow \cite{RS} to find a field extension $P/F$
such that $A_P$ is an abelian crossed product with respect to $\mathbb{Z}_3\times \mathbb{Z}_3$ and $[P:F]$ is prime to $3$.
The argument in \cite{RS} basically goes as follows:
Let $K{\sigmaubseteq}set A$ be a maximal subfield, i.e. $[K:F]=9$.
Now let $F{\sigmaubseteq}set K {\sigmaubseteq}set E$ be the normal closure of $K$ over $F$.
Since we know nothing about $K$ we have to assume $G={\operatorname{Gal}}(E/F)=S_9$.
Let $H<G$ be a $3$-sylow subgroup and $L=E^H$, then $[L:H]=4480$.
Now extend scalars to $L$, then $KL{\sigmaubseteq}set A_L$ as a maximal subfield. By Galois correspondence $KL=E^{H_1}$ for some subgroup $H_1<H$ and $[H:H_1]=[KL:L]=9$.
Since $H$ is a $3$-group we can find $H_1{\operatorname{Tr}}iangleleft H_2{\operatorname{Tr}}iangleleft H$ such that $[H:H_2]=3$ thus we have $L=E^H{\sigmaubseteq}set E^{H_2} {\sigmaubseteq}set KL=E^{H_1}{\sigmaubseteq}set E$ and since $H_2 {\operatorname{Tr}}iangleleft H$ we know the extension $E^{H_2}/L$ is Galois with group~${\operatorname{Gal}}(E^{H_2}/L)=<\sigmaigma>\cong H/H_2\cong C_3$.
Thus in $A_L$ we have the subfield $E^{H_2}$ which has a non trivial $L$- automorphism $\sigmaigma$. Now let $z\in A$ be an element inducing $\sigmaigma$ (such $z$ exists by Skolem-Noether). Consider the subfield $L[z]/L$, since $z^3$ commutes with $E^{H_2}$ and $z$ does not $[L[z]:L[z^3]]=3$.
In the best case scenario we have $L[z^3]=L$ which will imply $A_L$ decomposes into the tensor product of two symbols of degree $3$ and we are done. In the general case we will have $[L[z^3]:L]=3$. If $L[z^3]/L$ is Galois we are done since $E^{H_2}[z^3]$ will be a maximal subfield Galois over $L$ with group isomorphic to $\mathbb{Z}_3\times \mathbb{Z}_3$, but again in general this should not be the case. However we can extend scalars to make $L[z^3]/L$ Galois, in particular consider $P=L[\disc(L[z^3])]$ then, $[P:L]=2$ and $P[z^3]/P$ is Galois and we are done.
To summarize we have found an extension $P=L[\disc(L[z^3])]$ with $[P:F]=4480\cdot 2=8960$ such that $A_P$ contains a maximal subfield $PE^{H_2}[z^3]/P$ Galois over $P$ with group isomorphic to $\mathbb{Z}_3\times \mathbb{Z}_3$.
Combining the above with the results of sections $2,3$ and using Rosset-Tate we get the following theorem.
\begin{thm}\label{MT4}
Let $A$ be an $F$-central simple algebra of degree $9$. Then
\begin{enumerate}
\item $A$ is similar to the product of $35840$ symbol algebras, ($8960$ of degree $3$ and $26880$ of degree $9$).
\item If $A$ is of exponent $3$ then $A$ is similar to the product of $ 277760$ symbol algebras of degree $3$.
\end{enumerate}
\end{thm}
\sigmaection{Application to essential dimension}
In \cite{M} Merkurjev computes the essential $p$-dimension of $PGL_{p^2}$ relative to a fixed field $k$ to be $p^2+1$.
One can interprate this result as follows:
Let $F$ be a field of definition (relative to a base field $k$) for the generic division algebra of degree $p^2$. Let $E/F$ be the prime to $p$ closure of $F$. Let $l,$ $l{\sigmaubseteq}set k {\sigmaubseteq}set E$, be a subfield of $E$ over which $A$ is defined. Then $l/k$ has transcendece degree at least $p^2+1$ (and such $l$ exists with transcendence degree exactly $p^2+1$).
It makes sense to define the essential dimension and the essential $p$-dimension of the class of an algebra $A$ (with respect to a fixed base field $k$).
\begin{defn}
Let $A\in \Br(F)$. Define the essential dimension and the essential $p$-dimension of the class of $A$ (with respect to a fixed base field $k$) as:
$$\edc(A)=\min\{\ed(B) | B\sigmaim A\}$$
$$\edc_p(A)=\min\{\ed_p(B) | B\sigmaim A\}$$
\end{defn}
Notice that \cite{M} for p=2 gives $\ed_2(PGL_{2^2})=5$ and for $p=3$ it gives $\ed_3(PGL_{3^2})=10$.
Now assume $F$ is prime to $p$ closed. Then as proved in \cite{RS} every $F$-csa of degree $p^2$ is actually an abelian crossed product with respect to $\mathbb{Z}_p \times \mathbb{Z}_p$.
Thus, in this language, in \cite{LRRS} they prove:
\begin{thm}
Let $A$ be a generic division algebra of degree $4$, then
$\edc(A)=\edc_2(A)=4$
\end{thm}
For $p=3$ Theorem \ref{MT2} says:
\begin{thm}
Let $A$ be a generic division algebra of degree $9$, then
$\edc_3(A)\leq 6$
\end{thm}
\end{document} |
\begin{document}
\title{The relation between the counting function $N\left( \lambda\right) $ and the
heat kernel $K\left( t\right) $}
\author{BY WU-SHENG DAI and MI XIE \thanks{We are very indebted to Dr G. Zeitrauman
for his encouragement. This work is supported in part by NSF of China, under
Project No.10605013 and No.10675088.}}
\date{}
\maketitle
\begin{abstract}
For a given spectrum $\left\{ \lambda_{n}\right\} $ of the Laplace operator
on a Riemannian manifold, in this paper, we present a relation between the
counting function $N\left( \lambda\right) $, the\ number of eigenvalues
(with multiplicity) smaller than $\lambda$, and the heat kernel $K\left(
t\right) $, defined by $K\left( t\right) =\sum_{n}e^{-\lambda_{n}t}$.
Moreover, we also give an asymptotic formula for $N\left( \lambda\right) $
and discuss when $\lambda\rightarrow\infty$ in what cases $N\left(
\lambda\right) =K\left( 1/\lambda\right) $.
\end{abstract}
The relation between the spectrum of the Laplace operator on a Riemannian
manifold and the geometry of this Riemannian manifold is an important subject
\cite{Berard,Berger,Milnor,GWW}, and the problem of spectral asymptotics is
one of the central problems in the theory of partial differential operators
\cite{Ivrii}. For a given spectrum $\left\{ \lambda_{n}\right\} $ of the
Laplace operator on a Riemannian manifold, one can in principle obtain the
counting function $N\left( \lambda\right) $, defined to be
\begin{equation}
N\left( \lambda\right) =\text{the\ number of eigenvalues (with multiplicity)
of the Laplace operator smaller than }\lambda\text{,}
\end{equation}
and the heat kernel, defined to be
\begin{equation}
K\left( t\right) =\sum_{n}e^{-\lambda_{n}t}.
\end{equation}
One of the main problems is to seek the asymptotic expansions of $N\left(
\lambda\right) $ and $K\left( t\right) $. Usually, it is relatively easy to
obtain the asymptotic expansion of the heat kernel $K\left( t\right) $.
Nevertheless, it is difficult to calculate the asymptotic expansion of the
counting function $N\left( \lambda\right) $ \cite{Berger}. The
Hardy-Littlewood-Karamata Tauberian theorem gives the first term of the
asymptotic expansion of $N\left( \lambda\right) $ \cite{Kac}, but does not
provide any information beyond the first-order term. In this paper, we point
out a relation between $N\left( \lambda\right) $ and $K\left( t\right) $.
\begin{theorem}
\begin{equation}
K\left( t\right) =t\int_{0}^{\infty}N\left( \lambda\right) e^{-\lambda
t}d\lambda. \label{035}
\end{equation}
\end{theorem}
\begin{proof}
The generalized Abel partial summation formula reads
\begin{equation}
\sum_{u_{1}<\lambda_{n}\leq u_{2}}b\left( n\right) f\left( \lambda
_{n}\right) =B\left( u_{2}\right) f\left( u_{2}\right) -B\left(
u_{1}\right) f\left( u_{1}\right) -\int_{u_{1}}^{u_{2}}B\left( u\right)
f^{\prime}\left( u\right) du,\label{040}
\end{equation}
where $\lambda_{i}\in\mathbb{R}$, $\lambda_{1}\leq\lambda_{2}\leq\cdots
\leq\lambda_{n}\leq\cdots$, and $\lim_{n\rightarrow\infty}\lambda_{n}=\infty$.
$f\left( u\right) $ is a continuously differentiable function on $\left[
u_{1},u_{2}\right] $ $\left( 0\leq u_{1}<u_{2}\text{, }\lambda_{1}\leq
u_{2}\right) $, $b\left( n\right) $ $\left( n=1,2,3,\cdots\right) $ are
arbitrary complex numbers, and $B\left( u\right) =\sum_{\lambda_{n}\leq
u}b\left( n\right) $. We apply the generalized Abel partial summation
formula, Eq. (\ref{040}), with $f\left( u\right) =e^{-u\left(
s-s_{0}\right) }$ and $b\left( n\right) =a_{n}e^{-\lambda_{n}s_{0}}$, where
$s$, $s_{0}\in\mathbb{C}$. Then
\begin{equation}
A\left( u_{2},s\right) -A\left( u_{1},s\right) =A\left( u_{2}
,s_{0}\right) e^{-u_{2}\left( s-s_{0}\right) }-A\left( u_{1},s_{0}\right)
e^{-u_{1}\left( s-s_{0}\right) }+\left( s-s_{0}\right) \int_{u_{1}}
^{u_{2}}A\left( u,s_{0}\right) e^{-u\left( s-s_{0}\right) }du,\label{050}
\end{equation}
where
\begin{equation}
A\left( u,s\right) =\sum_{\lambda_{n}\leq u}a_{n}e^{-\lambda_{n}
s}.\label{055}
\end{equation}
Setting $a_{n}=1$ in Eq. (\ref{055}), we find
\[
A\left( \lambda,0\right) =\sum_{\lambda_{n}\leq\lambda}1=N\left(
\lambda\right) ,
\]
the counting function, and
\[
A\left( \infty,t\right) =\sum_{n}e^{-\lambda_{n}t}=K\left( t\right) ,
\]
the heat kernel. By Eq. (\ref{055}), we also have $A\left( 0,t\right) =0$.
Then, by Eq. (\ref{050}), we have
\begin{equation}
K\left( t\right) =A\left( \infty,t\right) -A\left( 0,t\right) =t\int
_{0}^{\infty}N\left( \lambda\right) e^{-\lambda t}d\lambda. \label{060}
\end{equation}
This is just Eq. (\ref{035}).
\end{proof}
Furthermore we can also obtain the following theorem.
\begin{theorem}
\begin{equation}
N\left( \lambda\right) =\frac{1}{2\pi i}\int_{c-i\infty}^{c+i\infty}K\left(
t\right) \frac{e^{\lambda t}}{t}dt,\text{ \ \ }c>\lim_{n\rightarrow\infty
}\frac{\ln n}{\lambda_{n}}. \label{065}
\end{equation}
\end{theorem}
\begin{proof}
By the Perron formula, we have
\begin{equation}
\sum_{\mu_{n}<x}a_{n}=\frac{1}{2\pi i}\int_{c-i\infty}^{c+i\infty}f\left(
t\right) \frac{x^{t}}{t}dt,\label{080}
\end{equation}
where
\begin{equation}
f\left( s\right) =\sum_{n=1}^{\infty}\frac{a_{n}}{\mu_{n}^{s}},\label{070}
\end{equation}
and $c$ is a constant which is greater than the abscissa of absolute
convergence of the Dirichlet series $f\left( s\right) $. Setting
\[
a_{n}=1\text{ \ and }\mu_{n}=e^{\lambda_{n}}
\]
in Eq. (\ref{070}), we obtain the heat kernel,
\[
f\left( t\right) =\sum_{n=1}^{\infty}e^{-\lambda_{n}t}=K\left( t\right) .
\]
The abscissa of absolute convergence of $f\left( t\right) $ equals its
abscissa of convergence, equaling $\overline{\lim}_{n\rightarrow\infty}\ln
n/\lambda_{n}=\lim_{n\rightarrow\infty}\ln n/\lambda_{n}$. Thus, by Eq.
(\ref{080}), we have
\[
N\left( \lambda\right) =\sum_{\lambda_{n}<\lambda}1=\frac{1}{2\pi i}
\int_{c-i\infty}^{c+i\infty}K\left( t\right) \frac{e^{\lambda t}}{t}dt,
\]
and $c>\lim_{n\rightarrow\infty}\ln n/\lambda_{n}$. This proves the theorem.
\end{proof}
The above two theorems give the relation between the counting function
$N\left( \lambda\right) $ and the heat kernel $K\left( t\right) $.
One of the reasons why the counting function $N\left( \lambda\right)
=\sum_{\lambda_{n}<\lambda}1$ is very difficult to calculate is that one often
encounters some unsolved problems in number theory when calculating $N\left(
\lambda\right) $. For example, when calculating the counting function for the
spectrum of the Laplace operator on a tori, one encounters the Gauss circle
problem in number theory \cite{Berger}. In the following we will give an
asymptotic formula for $N\left( \lambda\right) $.
\begin{theorem}
\begin{equation}
N\left( \lambda\right) =\sum_{n}\frac{1}{e^{\beta\left( \lambda_{n}
-\lambda\right) }+1},\text{ \ \ }\left( \beta\rightarrow\infty\right) .
\label{100}
\end{equation}
\end{theorem}
\begin{proof}
Observing that
\[
\lim_{\beta\rightarrow\infty}\frac{1}{e^{\beta\left( \lambda_{n}
-\lambda\right) }+1}=\left\{
\begin{array}
[c]{cc}
1, & \text{ when }\lambda_{n}<\lambda,\\
0, & \text{ when }\lambda_{n}>\lambda,
\end{array}
\right.
\]
we have
\[
\lim_{\beta\rightarrow\infty}\sum_{n}\frac{1}{e^{\beta\left( \lambda
_{n}-\lambda\right) }+1}=\sum_{\lambda_{n}<\lambda}1=N\left( \lambda\right)
.
\]
\end{proof}
\begin{remark}
The asymptotic formula for $N\left( \lambda\right) $ given by Eq.
(\ref{100}) converts a partial sum ($\sum_{\lambda_{n}<\lambda}$) into a sum
over all possible values ($\sum_{\lambda_{n}<\infty}$). This will make the
calculation somewhat easy.
\end{remark}
In some cases the counting function approximately equals the heat kernel.
\begin{theorem}
Let $\rho\left( \lambda\right) $ be the number of eigenstates per unit
interval (the density of eigenstates). In the limit $\lambda\rightarrow\infty$
or $t\rightarrow0$,
\begin{equation}
N\left( \lambda\right) =K\left( \frac{1}{\lambda}\right) \text{\ or
\ }N\left( \frac{1}{t}\right) =K\left( t\right) , \label{090}
\end{equation}
when $\rho\left( \lambda\right) $ is a constant.
\end{theorem}
\begin{proof}
In the limit $\lambda\rightarrow\infty$ or $t\rightarrow0$, the summations can
be converted into integrals:
\begin{align*}
N\left( \lambda\right) & =\sum_{\lambda_{n}\leq\lambda}1=\int_{0}
^{\lambda}\rho\left( \lambda^{\prime}\right) d\lambda^{\prime},\\
K\left( t\right) & =\sum_{n}e^{-\lambda_{n}t}=\int_{0}^{\infty}\rho\left(
\lambda^{\prime}\right) e^{-\lambda^{\prime}t}d\lambda^{\prime}.
\end{align*}
If $\rho\left( \lambda\right) =C$, where $C$ is a constant, then
\begin{align*}
N\left( \lambda\right) & =C\lambda,\\
K\left( t\right) & =\frac{C}{t}.
\end{align*}
This proves the theorem.
\end{proof}
This is just the case that Weyl \cite{Weyl}, Pleijel \cite{Pleijel}, and Kac
\cite{Kac} discussed.
\vskip 1cm
{\footnotesize SCHOOL OF SCIENCE, TIANJIN UNIVERSITY, TIANJIN, P. R. CHINA }
{\footnotesize LIUHUI CENTER FOR APPLIED MATHEMATICS, NANKAI UNIVERSITY \&
TIANJIN UNIVERSITY, TIANJIN, P. R. CHINA }
{\footnotesize E-\textit{mail address}: daiwusheng@tju.edu.cn\newline }
{\footnotesize SCHOOL OF SCIENCE, TIANJIN UNIVERSITY, TIANJIN, P. R. CHINA }
{\footnotesize LIUHUI CENTER FOR APPLIED MATHEMATICS, NANKAI UNIVERSITY \&
TIANJIN UNIVERSITY, TIANJIN, P. R. CHINA }
{\footnotesize E-\textit{mail address}: xiemi@tju.edu.cn }
\end{document} |
\begin{document}
\mathbf{t}itle{Uniform test of algorithmic randomness over a general space}
\author{Peter G\'acs}
\address{Boston University}
\email{gacs@bu.edu}
\mathbf{d}ate{\mathbf{t}oday}
\begin{abstract}
The algorithmic theory of randomness is well developed when the underlying
space is the set of finite or infinite sequences and the underlying
probability distribution is the uniform distribution or a computable
distribution.
These restrictions seem artificial.
Some progress has been
made to extend the theory to arbitrary Bernoulli distributions (by
Martin-L\"of), and to arbitrary distributions (by Levin). We recall the main
ideas and problems of Levin's theory, and report further progress in the
same framework.
The issues are the following:
{
\settowidth\leqslantftmarginii{\hspace{0.5pc}--}
\begin{enumerate}[--]
\item Allow non-compact spaces (like the space of continuous functions,
underlying the Brown\-ian motion).
\item The uniform test (deficiency of randomness) $\mathbf{d}_{P}(x)$
(depending both on the outcome $x$ and the measure $P$) should be defined in
a general and natural way.
\item See which of the old results survive:
existence of universal tests,
conservation of randomness,
expression of tests in terms of description complexity,
existence of a universal measure,
expression of mutual information as "deficiency of independence".
\item The negative of the new randomness test is shown to be a generalization of
complexity in continuous spaces; we show that the addition theorem
survives.
\end{enumerate}
The paper's main contribution is introducing an appropriate framework for
studying these questions and related ones (like statistics for a general
family of distributions).
}
\end{abstract}
\keywords{algorithmic information theory, algorithmic entropy,
randomness test, Kolmogorov complexity, description complexity}
\subjclass{60A99; 68Q30}
\mathbf{m}aketitle
\section{Introduction}
\subsection{Problem statement}
The algorithmic theory of randomness is well developed when the underlying
space is the set of finite or infinite sequences and the underlying
probability distribution is the uniform distribution or a computable
distribution.
These restrictions seem artificial.
Some progress has been
made to extend the theory to arbitrary Bernoulli distributions by
Martin-L\"of in~\cite{MLof66art}, and to arbitrary distributions, by Levin
in~\cite{LevinRand73,LevinUnif76,LevinRandCons84}.
The paper~\cite{HertlingWeihrauchRand98} by Hertling and Weihrauch
also works in general spaces, but it is restricted to computable
measures.
Similarly, Asarin's thesis~\cite{Asarin88} defines randomness for sample
paths of the Brownian motion: a fixed random process with computable
distribution.
The present paper has been inspired mainly by Levin's early
paper~\cite{LevinUnif76} (and the much more elaborate~\cite{LevinRandCons84}
that uses different definitions): let us summarize part of the content
of~\cite{LevinUnif76}.
The notion of a constructive topological space $\mathbf{m}athbf{X}$ and the space of
measures over $\mathbf{m}athbf{X}$ is introduced.
Then the paper defines the notion of a uniform test.
Each test is a lower semicomputable function $(\mathbf{m}u,x) \mathbf{m}apsto f_{\mathbf{m}u}(x)$,
satisfying $\int f_{\mathbf{m}u}(x) \mathbf{m}u(dx) \leqslant 1$ for each measure $\mathbf{m}u$.
There are also some additional conditions.
The main claims are the following.
\begin{enumerate}[\upshape (a)]
\item There is a universal test $\mathbf{t}_{\mathbf{m}u}(x)$, a test
such that for each other test $f$ there is a constant $c > 0$ with
$f_{\mathbf{m}u}(x) \leqslant c\cdot \mathbf{t}_{\mathbf{m}u}(x)$.
The \mathbf{d}f{deficiency of randomness} is defined as
$\mathbf{d}_{\mathbf{m}u}(x) = \log\mathbf{t}_{\mathbf{m}u}(x)$.
\item The universal test has some strong properties of ``randomness
conservation'': these say, essentially,
that a computable mapping or a computable
randomized transition does not decrease randomness.
\item There is a measure $M$ with the property that for
every outcome $x$ we have $\mathbf{t}_{M}(x) \leqslant 1$.
In the present paper, we will call such measures \mathbf{d}f{neutral}.
\item\label{i.Levin.semimeasure}
Semimeasures (semi-additive measures)
are introduced and it is shown that there is a lower semicomputable
semimeasure that is neutral (so we can assume that the $M$ introduced above
is lower semicomputable).
\item Mutual information $I(x : y)$
is defined with the help of (an appropriate
version of) Kolmogorov complexity, between outcomes $x$ and $y$.
It is shown that $I(x : y)$ is essentially equal to $\mathbf{d}_{M \mathbf{t}imes M}(x,y)$.
This interprets mutual information as a kind of ``deficiency of independence''.
\end{enumerate}
This impressive theory leaves a number of issues unresolved:
\begin{enumerate}[\upshape 1.]
\item The space of outcomes is restricted to be a compact topological
space, moreover, a particular compact space: the set of
sequences over a finite alphabet (or, implicitly in~\cite{LevinRandCons84},
a compactified infinite alphabet).
However, a good deal of modern probability theory happens over
spaces that are not even locally compact:
for example, in case of the Brownian motion, over the
space of continuous functions.
\item The definition of a uniform randomness test includes some conditions
(different ones in~\cite{LevinUnif76} and
in~\cite{LevinRandCons84}) that seem somewhat arbitrary.
\item No simple expression is known for the general
universal test in terms of description complexity.
Such expressions are nice to have if they are available.
\end{enumerate}
\subsection{Content of the paper}
The present paper intends to carry out as much of Levin's program as seems
possible after removing the restrictions.
It leaves a number of questions open, but we feel that they are worth
to be at least formulated.
A fairly large part of the paper
is devoted to the necessary conceptual machinery.
Eventually, this
will also allow to carry further some other initiatives started in the
works~\cite{MLof66art} and~\cite{LevinRand73}: the study of tests that test
nonrandomness with respect to a whole class of measures (like the Bernoulli
measures).
Constructive analysis has been developed by several authors,
converging approximately on the same concepts.
We will make use of a simplified version of the theory introduced
in~\cite{WeihrauchComputAnal00}.
As we have not found a constructive measure theory in the
literature fitting our purposes, we will develop this theory here,
over (constructive) complete separable metric spaces.
This generality is well supported by standard results in measure
theoretical probability, and is
sufficient for constructivizing a large part of current probability
theory.
The appendix recalls some of the needed topology, measure theory
and constructive analysis.
Constructive measure theory is introduced in Section~\ref{s.constr-meas}.
Section~\ref{s.unif-test} introduces uniform randomness tests.
It proves the existence of universal uniform tests, under a reasonable
assumption about the topology (``recognizable Boolean inclusions'').
Then it proves conservation of randomness.
Section~\ref{s.complexity} explores the relation between description
(Kolmogorov) complexity and uniform randomness tests.
After extending randomness tests
over non-normalized measures, its negative logarithm will be seen
as a generalized description complexity.
The rest of the section explores the extent to which the old results
characterizing a random infinite string by the description complexity of
its segments can be extended to the new setting.
We will see that the simple formula working for computable measures over
infinite sequences does not generalize.
However, still rather simple formulas are available in some cases: namely,
the discrete case with general measures, and a space allowing a certain
natural cell decomposition, in case of computable measures.
Section~\ref{s.neutral} proves Levin's theorem about the existence of a
neutral measure, for compact spaces.
Then it shows that the result does not generalize to non-compact spaces,
not even to the discrete space.
It also shows that with our definition of tests, the neutral measure cannot
be chosen semicomputable, even in the case of the discrete space
with one-point compactification.
Section~\ref{s.rel-entr} takes up the idea of viewing
the negative logarithm of a randomness test as generalized description
complexity.
Calling this notion \mathbf{d}f{algorithmic entropy}, this section explores its
information-theoretical properties.
The main result is a (nontrivial)
generalization of the addition theorem of prefix
complexity (and, of course, classical entropy) to the new setting.
\subsection{Some history}
Attempts to define randomness rigorously have
a long but rather sparse history starting with von Mises and continuing
with Wald, Church, Ville.
Kolmogorov's work in this area inspired Martin-L\"of whose
paper~\cite{MLof66art} introduces the notion of randomness used here.
Description complexity has been introduced independently by
Solomonoff, Kolmogorov and Chaitin.
Prefix complexity has been introduced independently by Levin and Chaitin.
See~\cite{LiViBook97} for a discussion of priorities and contributions.
The addition theorem (whose generalization is given here) has been proved
first for Kolmogorov complexity, with a logarithmic error term, by
Kolmogorov and Levin.
For the prefix complexity its present form has been proved
jointly by Levin and G\'acs in~\cite{GacsSymm74}, and
independently by Chaitin in~\cite{Chaitin75}.
In his PhD thesis, Martin-L\"of also characterized randomness of finite
sequences via their complexity.
For infinite sequences, complete characterizations of their
randomness via the complexity of their segments were given
by Levin in~\cite{LevinRand73}, by Schnorr
in~\cite{Schnorr73} and in~\cite{Chaitin75} (attributed).
Of these, only Levin's result is formulated for general computable measures:
the others apply only to coin-tossing.
Each of these works uses a different variant of description complexity.
Levin uses monotone complexity and the logarithm of the universal
semicomputable measure (see~\cite{GacsRel83} for the difficult proof that
these two complexities are different).
Schnorr uses ``process complexity'' (similar to monotone
complexity) and prefix complexity.
The work~\cite{GacsExact80} by the present author gives
characterizations using the original Kolmogorov
complexity (for general computable measures).
Uniform tests over the space of infinite sequences,
randomness conservation and neutral measures
were introduced in Levin's work~\cite{LevinUnif76}.
The present author could not verify every result in that paper (which
contains no proofs); he reproduced most of them with a changed definition
in~\cite{GacsExact80}.
A universal uniform test with yet another definiton appeared
in~\cite{LevinRandCons84}.
In this latter work, ``information conservation'' is a central tool
used to derive several results in logic.
In the constellation of Levin's concepts, information conservation
becomes a special case of randomness conservation.
We have not been able to reproduce this exact relation with our definition
here.
The work~\cite{GacsBoltzmann94} is based on the observation that
Zurek's idea on ``physical'' entropy and the ``cell volume'' approach of
physicists to the definition of entropy can be unified: Zurek's entropy
can be seen as an approximation of the limit arising in a
characterization of a randomness test by complexity.
The author discovered in this same paper that the negative logarithm of a
general randomness test can be seen as a generalization of complexity.
He felt encouraged by the discovery of the generalized addition theorem
presented here.
The appearence of other papers in the meantime
(including~\cite{HertlingWeihrauchRand98}) convinced the author that
there is no accessible and detailed reference work on
algorithmic randomness for general measures and general spaces,
and a paper like the present one, developing the foundations, is needed.
(Asarin's thesis~\cite{Asarin88} does develop the theory of randomness for
the Brownian motion.
It is a step in our direction in the sense that the space is not compact,
but it is all done for a single explicitly given computable measure.)
We do not advocate the uniform randomness test proposed here
as necessarily the ``definitive'' test concept.
Perhaps a good argument can be found for some additional conditions,
similar to the ones introduced by Levin, providing additional structure
(like a semicomputable neutral measure) while preserving naturalness
and the attractive properties presented here.
\subsection{Notation for the paper}
(Nothing to do with the formal concept of ``notation'', introduced
later in the section on constructive analysis.)
The sets of natural numbers, integers, rational numbers, real numbers
and complex numbers
will be denoted respectively by $\mathbf{m}athbb{N}, \mathbf{m}athbb{Z}, \mathbf{m}athbb{Q}, \mathbf{m}athbb{R}$.
The set of nonnegative real numbers will be denoted by $\mathbf{m}athbb{R}_{+}$.
The set of real numbers with $-\infty,\infty$ added (with the appropriate
topology making it compact) will be denoted by $\ol\mathbf{m}athbb{R}$.
We use $\et$ and $\V$ to denote $\mathbf{m}in$ and $\mathbf{m}ax$, further
\[
|x|^{+} = x\V 0,\quad |x|^{-} = |-x|^{+}
\]
for real numbers $x$.
We partially follow~\cite{WeihrauchComputAnal00},
\cite{BrattkaComputTopStruct03} and~\cite{HertlingWeihrauchRand98}.
In particular, adopting the notation of~\cite{WeihrauchComputAnal00},
we denote intervals of the real line as follows, (to avoid the conflict
with the notation of a pair $(a,b)$ of objects).
\[
\clint{a}{b} = \setof{x : a \leqslant x \leqslant b}, \quad
\opint{a}{b} = \setof{x : a < x < b},
\quad \lint{a}{b} = \setof{x : a \leqslant x < b}.
\]
If $X$ is a set then $X^{*}$ is
the set of all finite strings made up of elements of $X$, including the
``empty string'' $\Lg$.
We denote by $X^{\og}$ the set of all infinite sequences of elements of
$X$.
If $A$ is a set then $1_{A}(x)$ is its indicator function, defined to
be 1 if $x\in A$ and to 0 otherwise.
For a string $x$, its length is $|x|$, and
\[
x^{\leqslant n} = (x(1),\mathbf{d}ots,x(n)).
\]
The relations
\[
f \leqslanta g,\quad f\leqslantm g
\]
mean inequality to within an additive constant and multiplicative constant
respectively.
The first is equivalent to $f \leqslant g + O(1)$, the second to $f = O(g)$.
The relation $f\eqm g$ means $f \leqslantm g$ and $f \geqslantm g$.
Borrowing from~\cite{PollardUsers01}, for
a function $f$ and a measure $\mathbf{m}u$, we will use the notation
\[
\mathbf{m}u f = \int f(x)\mathbf{m}u(dx),
\quad \mathbf{m}u^{y} f(x, y) = \int f(x,y)\mathbf{m}u(dy).
\]
\section{Constructive measure theory}\label{s.constr-meas}
The basic concepts and results of measure theory are recalled in
Section~\ref{s.measures}.
For the theory of measures over metric spaces, see
Subsection~\ref{ss.measure-metric}.
We introduce a certain fixed, enumerated sequence of Lipschitz functions
that will be used frequently.
Let $\mathbf{m}athcal{F}_{0}$ be the set of functions
of the form $g_{u,r,1/n}$ where $u \in D$, $r\in \mathbf{m}athbb{Q}$, $n = 1, 2, \mathbf{d}ots$,
and
\begin{equation*}
g_{u,r,\eps}(x) = |1 - |d(x, u) - r|^{+}/\eps|^{+}
\end{equation*}
is a continuous function that is $1$ in the ball
$B(u,r)$, it is 0 outside $B(u, r+\eps)$, and takes intermediate
values in between.
Let
\begin{equation}\label{e.bd-Lip-seq}
\mathbf{m}athcal{E} = \{g_{1}, g_{2}, \mathbf{d}ots \}
\end{equation}
be the smallest set of functions containing $\mathbf{m}athcal{F}_{0}$
and the constant 1, and closed under $\V$, $\et$ and rational linear
combinations.
The following construction will prove useful later.
\begin{proposition}\label{p.bd-Lip-set}
All bounded continuous functions can be
obtained as the limit of an increasing sequence of functions from
the enumerated countable set $\mathbf{m}athcal{E}$ of bounded computable
Lip\-schitz functions introduced in~\eqref{e.bd-Lip-seq}.
\end{proposition}
The proof is routine.
\subsection{Space of measures}
Let $\mathbf{m}athbf{X} = (X, d, D, \ag)$ be a computable metric space.
In Subsection~\ref{ss.measure-metric}, the space
$\mathbf{m}athcal{M}(\mathbf{m}athbf{X})$ of measures over $\mathbf{m}athbf{X}$ is defined, along with a natural
enumeration $\nu = \nu_{\mathbf{m}athcal{M}}$ for a subbase $\sg = \sg_{\mathbf{m}athcal{M}}$
of the weak topology.
This is a constructive topological space $\mathbf{m}athbf{M}$ which can be metrized by
introducing, as in~\ref{sss.Prokh}, the
\mathbf{d}f{Prokhorov distance} $p(\mathbf{m}u, \nu)$:
the infimum of all those $\eps$ for which, for all Borel sets $A$ we have
$\mathbf{m}u(A) \leqslant \nu(A^{\eps}) + \eps$,
where $A^{\eps} = \setof{x : \exists y\in A\; d(x, y) < \eps}$.
Let $D_{\mathbf{m}athbf{M}}$ be the set of
those probability measures that are concentrated on finitely many points of
$D$ and assign rational values to them.
Let $\ag_{\mathbf{m}athbf{M}}$ be a natural enumeration of $D_{\mathbf{m}athbf{M}}$.
Then
\begin{equation}\label{e.metric-measures}
(\mathbf{m}athcal{M}, p, D_{\mathbf{m}athbf{M}}, \ag_{\mathbf{m}athbf{M}})
\end{equation}
is a computable metric space whose constructive topology is equivalent to
$\mathbf{m}athbf{M}$.
Let $U=B(x, r)$ be one of the balls in $\mathbf{m}athbf{X}$,
where $x\in D_{\mathbf{m}athbf{X}}$, $r \in \mathbf{m}athbb{Q}$.
The function $\mathbf{m}u \mathbf{m}apsto \mathbf{m}u(U)$ is typically not computable,
not even continuous.
For example, if $\mathbf{m}athbf{X}=\mathbf{m}athbb{R}$ and $U$ is the open interval $\opint{0}{1}$,
the sequence of probability measures $\mathbf{d}g_{1/n}$ (concentrated on $1/n$)
converges to $\mathbf{d}g_{0}$, but $\mathbf{d}g_{1/n}(U)=1$, and $\mathbf{d}g_{0}(U)=0$.
The following theorem shows that the situation is better with
$\mathbf{m}u \mathbf{m}apsto \mathbf{m}u f$ for computable $f$:
\begin{proposition}\label{p.computable-integral}
Let $\mathbf{m}athbf{X} = (X, d, D, \ag)$ be a computable metric space, and let
$\mathbf{m}athbf{M} = (\mathbf{m}athcal{M}(\mathbf{m}athbf{X}), \sg, \nu)$ be the effective topological space of
probability measures over $\mathbf{m}athbf{X}$.
If function $f : \mathbf{m}athbf{X} \mathbf{t}o \mathbf{m}athbb{R}$ is bounded and computable
then $\mathbf{m}u \mathbf{m}apsto \mathbf{m}u f$ is computable.
\end{proposition}
\begin{proof}[Proof sketch]
To prove the theorem for bounded Lip\-schitz functions, we can invoke
the Strassen coupling theorem~\ref{p.coupling}.
The function $f$ can be obtained as a limit of a computable
monotone increasing sequence of computable Lip\-schitz functions $f^{>}_{n}$,
and also as a limit of a computable monotone decreasing
sequence of computable Lip\-schitz functions $f^{<}_{n}$.
In step $n$ of our computation of $\mathbf{m}u f$,
we can approximate $\mathbf{m}u f^{>}_{n}$ from above
to within $1/n$, and $\mathbf{m}u f^{<}_{n}$ from below to within $1/n$.
Let these bounds be $a^{>}_{n}$ and $a^{<}_{n}$.
To approximate $\mathbf{m}u f$ to within $\eps$,
find a stage $n$ with $a^{>}_{n} - a^{<}_{n} +2/n < \eps$.
\end{proof}
\subsection{Computable measures and random
transitions}\label{ss.computable-trans}
A measure $\mathbf{m}u$ is called \mathbf{d}f{computable} if it is a computable element of the
space of measures.
Let $\{g_{i}\}$ be the set of bounded Lip\-schitz functions over $X$
introduced in~\eqref{e.bd-Lip-seq}.
\begin{proposition}\label{p.computable-meas-crit}
Measure $\mathbf{m}u$ is computable if and only if so is the function
$i \mathbf{m}apsto \mathbf{m}u g_{i}$.
\end{proposition}
\begin{proof}
The ``only if'' part follows from Proposition~\ref{p.computable-integral}.
For the ``if'' part, note that in order to
trap $\mathbf{m}u$ within some Prokhorov neighborhood of size $\eps$,
it is sufficient to compute $\mathbf{m}u g_{i}$ within a small
enough $\mathbf{d}g$, for all $i\leqslant n$ for a large enough $n$.
\end{proof}
\begin{example}
Let our probability space be the set $\mathbf{m}athbb{R}$ of real numbers with its
standard topology.
Let $a < b$ be two computable real numbers.
Let $\mathbf{m}u$ be the probability
distribution with density function
$f(x) = \frac{1}{b-a}1_{\clint{a}{b}}(x)$
(the uniform distribution over the interval $\clint{a}{b}$).
Function $f(x)$ is not computable, since it is not even continuous.
However, the measure $\mathbf{m}u$ is computable: indeed,
$\mathbf{m}u g_{i} = \frac{1}{b-a} \int_{a}^{b} g_{i}(x) dx$ is a computable
sequence, hence
Proposition~\ref{p.computable-meas-crit} implies that $\mathbf{m}u$ is computable.
\end{example}
The following theorem compensates somewhat for the fact
mentioned earlier, that the
function $\mathbf{m}u \mathbf{m}apsto \mathbf{m}u(U)$ is generally not computable.
\begin{proposition}
Let $\mathbf{m}u$ be a finite computable measure.
Then there is a computable map $h$ with the property that for every
bounded computable function $f$ with $|f| \leqslant 1$ with
the property $\mathbf{m}u(f^{-1}(0))=0$,
if $w$ is the name of $f$ then $h(w)$ is the
name of a program computing the value $\mathbf{m}u\setof{x: f(x) < 0}$.
\end{proposition}
\begin{proof}
Straightforward.
\end{proof}
\begin{remark}
Suppose that there is a computable function that for each $i$ computes a
Cauchy sequence $j \mathbf{m}apsto m_{i}(j)$ with the property that for
$i < j_{1} < j_{2}$ we have $|m_{i}(j_{1})-m_{i}(j_{2})| < 2^{-j_{1}}$, and
that for all $n$, there is a measure $\nu$ with the property that
for all $i \leqslant n$, $\nu g_{i} = m_{i}(n)$.
Is there a measure $\mathbf{m}u$ with the property that for each $i$ we have
$\lim_{j} m_{i}(j) = \mathbf{m}u g_{i}$?
Not necessarily, if the space is not compact.
For example, let $X = \{1,2,3,\mathbf{d}ots\}$ with the discrete topology.
The sequences $m_{i}(j) = 0$ for $j > i$ satisfy these conditions, but
they converge to the measure 0, not to a probability measure.
To guarantee that the sequences $m_{i}(j)$ indeed define a probability
measure, progress must be made, for example, in terms of the narrowing of
Prokhorov neighborhoods.
\end{remark}
Let now $\mathbf{m}athbf{X},\mathbf{m}athbf{Y}$ be computable metric spaces.
They give rise to measurable spaces with $\sg$-algebras $\mathbf{m}athcal{A}, \mathbf{m}athcal{B}$
respectively.
Let $\Lg = \setof{\lg_{x} : x \in X}$ be a probability kernel from $X$ to
$Y$ (as defined in Subsection~\ref{ss.transitions}).
Let $\{g_{i}\}$ be the set of bounded Lip\-schitz functions over $Y$
introduced in~\eqref{e.bd-Lip-seq}.
To each $g_{i}$, the kernel assigns a (bounded) measurable function
\[
f_{i}(x) = (\Lg g_{i})(x) = \lg_{x}^{y} g_{i}(y).
\]
We will call $\Lg$ \mathbf{d}f{computable} if so is the assignment
$(i, x) \mathbf{m}apsto f_{i}(x)$.
In this case, of course, each function $f_{i}(x)$ is continuous.
The measure $\Lg^{*}\mathbf{m}u$ is determined by the values
$\Lg^{*} g_{i} = \mathbf{m}u (\Lg g_{i})$, which are computable from $(i, \mathbf{m}u)$ and
so the function $\mathbf{m}u \mathbf{m}apsto \Lg^{*}\mathbf{m}u$ is computable.
\begin{example}\label{x.computable-determ-trans}
A computable function $h : X \mathbf{t}o Y$ defines an operator
$\Lg_{h}$ with $\Lg_{h} g = g \circ h$ (as in Example~\ref{x.determ-trans}).
This is a deterministic computable transition, in which
$f_{i}(x) = (\Lg_{h} g_{i})(x) = g_{i}(h(x))$ is,
of course, computable from $(i,x)$.
We define $h^{*}\mathbf{m}u = \Lg_{h}^{*}\mathbf{m}u$.
\end{example}
\subsection{Cells}\label{ss.cells}
As pointed out earlier, it is not convenient to define
a measure $\mathbf{m}u$ constructively starting from $\mathbf{m}u(\Gg)$ for open cells
$\Gg$.
The reason is that no matter how we fix $\Gg$, the function
$\mathbf{m}u \mathbf{m}apsto \mathbf{m}u(\Gg)$ is typically not computable.
It is better to work with bounded computable functions, since for such
a function $f$, the correspondence $\mathbf{m}u \mathbf{m}apsto \mathbf{m}u f$ is computable.
Under some special conditions, we will still get ``sharp'' cells.
Let $f$ be a bounded computable function over $\mathbf{m}athbf{X}$, let
$\ag_{1}<\mathbf{d}ots<\ag_{k}$ be rational numbers,
and let $\mathbf{m}u$ be a computable measure with the property
that $\mathbf{m}u f^{-1}(\ag_{j})=0$ for all $j$.
In this case, we will say that $\ag_{j}$ are \mathbf{d}f{regular points} of $f$
with respect to $\mathbf{m}u$.
Let $\ag_{0}=-\infty$, $\ag_{k+1}=\infty$, and for $j=0,\mathbf{d}ots,k$, let
Let $U_{j} = f^{-1}((j,j+1))$.
The sequence of disjoint
r.e.~open sets $(U_{0},\mathbf{d}ots,U_{k})$ will be called the
\mathbf{d}f{partition generated by} $f,\ag_{1},\mathbf{d}ots,\ag_{k}$.
(Note that this sequence is not a partition in the sense of
$\bigcup_{j}U_{j}=\mathbf{m}athbf{X}$, since the boundaries of the sets are left out.)
If we have several partitions $(U_{i0},\mathbf{d}ots,U_{i,k})$,
generated by different functions $f_{i}$ ($i=1,\mathbf{d}ots,m$)
and different regular cutoff sequences $(\ag_{ij}: j=1,\mathbf{d}ots,k_{i})$,
then we can form a new partition generated by all possible intersections
\[
V_{j_{1},\mathbf{d}ots,j_{n}} = U_{1,j_{1}}\cap \mathbf{d}ots \cap U_{m,j_{m}}.
\]
A partition of this kind will be called a \mathbf{d}f{regular partition}.
The sets $V_{j_{1},\mathbf{d}ots,j_{n}}$ will be called the \mathbf{d}f{cells} of this
partition.
\begin{proposition}\label{p.reg-partit-meas-cptable}
In a regular partition as given above,
the values $\mathbf{m}u V_{j_{1},\mathbf{d}ots,j_{n}}$ are computable
from the names of the functions $f_{i}$ and the cutoff points $\ag_{ij}$.
\end{proposition}
\begin{proof}
Straightforward.
\end{proof}
Assume that a computable sequence of functions
$b_{1}(x),b_{2}(x),\mathbf{d}ots$ over $X$ is given,
with the property that for every pair
$x_{1},x_{2}\in X$ with $x_{1}\ne x_{2}$, there is a $j$
with $b_{j}(x_{1})\cdot b_{j}(x_{2}) < 0$.
Such a sequence will be called a \mathbf{d}f{separating sequence}.
Let us give the correspondence between the set $\mathbf{m}athbb{B}^{\og}$
of infinite binary sequences and elements of the set
\[
X^{0} = \setof{x\in X: b_{j}(x)\ne 0,\;j=1,2,\mathbf{d}ots}.
\]
For a binary string
$s_{1}\mathbf{d}otsm s_{n} = s\in\mathbf{m}athbb{B}^{*}$, let
\[
\Gg_{s}
\]
be the set of elements of $X$ with the property that for
$j=1,\mathbf{d}ots,n$, if $s_{j}=0$ then $b_{j}(\og) < 0$, otherwise
$b_{j}(\og)>0$.
This correspondence has the following properties.
\begin{enumerate}[(a)]
\item $\Gg_{\Lg}=X$.
\item For each $s\in \mathbf{m}athbb{B}$, the sets $\Gg_{s0}$ and
$\Gg_{s1}$ are disjoint and their union is contained in $\Gg_{s}$.
\item For $x\in X^0$, we have $\{x\} = \bigcap_{x\in\Gg_{s}} \Gg_{s}$.
\end{enumerate}
If $s$ has length $n$ then $\Gg_{s}$ will be called a \mathbf{d}f{canonical
$n$-cell}, or simply canonical cell, or $n$-cell.
From now on, whenever $\Gg$ denotes a subset of $X$, it means a
canonical cell.
We will also use the notation
\[
l(\Gg_{s})=l(s).
\]
The three properties above say that if we restrict ourselves to the set
$X^0$ then the canonical cells behave somewhat like binary subintervals:
they divide $X^0$ in half, then each half again in half, etc.
Moreover, around each point, these canonical cells become ``arbitrarily
small'', in some sense (though, they may not be a basis of neighborhoods).
It is easy to see that if $\Gg_{s_1},\Gg_{s_2}$ are two canonical
cells then they either are disjoint or one of them contains the other.
If $\Gg_{s_1}\sbs\Gg_{s_2}$ then $s_2$ is a prefix of $s_1$.
If, for a moment, we write $\Gg^0_s=\Gg_s\cap X^0$ then we have the
disjoint union $\Gg^0_s=\Gg^0_{s0}\cup\Gg^0_{s1}$.
For an $n$-element binary string $s$, for $x \in \Gg_{s}$, we will write
\[
\mathbf{m}u(s) = \mathbf{m}u(\Gg_{s}).
\]
Thus, for elements of $X^0$, we can talk about the $n$-th bit $x_n$
of the description of $x$: it is uniquely determined.
The $2^n$ cells (some of them possibly empty)
of the form $\Gg_s$ for $l(s)=n$ form a partition
\[
\mathbf{m}athcal{P}_n
\]
of $X^0$.
\begin{examples}\label{x.cells}\
\begin{enumerate}[\upshape 1.]
\item If $\mathbf{m}athbf{X}$ is the set of infinite binary sequences with its usual
topology, the functions $b_{n}(x) = x_{n}-1/2$ generate the usual
cells, and $\mathbf{m}athbf{X}^{0}=\mathbf{m}athbf{X}$.
\item If $\mathbf{m}athbf{X}$ is the interval $\clint{0}{1}$, let
$b_{n}(x) = -\sin(2^{n}\pi x)$.
Then cells are open
intervals of the form $\opint{k\cdot 2^{-n}}{(k+1)\cdot 2^{n}}$,
the correspondence between infinite binary strings
and elements of $X^0$ is just the usual representation of $x$ as the
binary decimal string $0.x_{1}x_{2}\mathbf{d}ots$.
\end{enumerate}
\end{examples}
When we fix canonical cells,
we will generally assume that the partition chosen is also
``natural''.
The bits $x_1,x_2,\ldots$ could contain information about the
point $x$ in decreasing order of importance from a macroscopic
point of view.
For example, for a container of gas, the first few bits may
describe, to a reasonable degree of precision, the amount of gas in
the left half of the container, the next few bits may describe the
amounts in each quarter, the next few bits may describe the
temperature in each half, the next few bits may describe again the
amount of gas in each half, but now to more precision, etc.
From now on, whenever $\Gg$ denotes a subset of $X$, it means a
canonical cell.
From now on, for elements of $X^0$, we can talk about the $n$-th
bit $x_n$ of the description of $x$: it is uniquely determined.
The following observation will prove useful.
\begin{proposition}\label{p.compact-cell-basis}
Suppose that the space $\mathbf{m}athbf{X}$ is compact and we have a separating
sequence $b_{i}(x)$ as given above.
Then the cells $\Gg_{s}$ form a basis of the space $\mathbf{m}athbf{X}$.
\end{proposition}
\begin{proof}
We need to prove that for every ball $B(x,r)$, there is a cell
$x\in\Gg_{s}\sbs B(x,r)$.
Let $C$ be the complement of $B(x,r)$.
For each point $y$ of $C$, there is an $i$ such that
$b_{i}(x)\cdot b_{i}(y) < 0$.
In this case, let $J^{0} = \setof{z: b_{i}(z) < 0}$,
$J^{1} = \setof{z: b_{i}(z) > 0}$.
Let $J(y)=J^{p}$ such that $y\in J^{p}$.
Then $C \sbs \bigcup_{y} J(y)$, and compactness implies that there is a
finite sequence $y_{1},\mathbf{d}ots,y_{k}$ with
$C \sbs \bigcup_{j=1}^{k} J(y_{j})$.
Clearly, there is a cell
$x \in \Gg_{s} \sbs B(x,r) \xcpt \bigcup_{j=1}^{k} J(y_{j})$.
\end{proof}
\section{Uniform tests}\label{s.unif-test}
\subsection{Universal uniform test}
Let $\mathbf{m}athbf{X} = (X, d, D, \ag)$ be a computable metric space, and let
$\mathbf{m}athbf{M} = (\mathbf{m}athcal{M}(\mathbf{m}athbf{X}), \sg, \nu)$ be the constructive topological space of
probability measures over $\mathbf{m}athbf{X}$.
A \mathbf{d}f{randomness test} is a function $f : \mathbf{m}athbf{M} \mathbf{t}imes \mathbf{m}athbf{X} \mathbf{t}o \ol\mathbf{m}athbb{R}$
with the following two properties.
\begin{condition}\label{cnd.test}\
\begin{enumerate}[\upshape 1.]
\item The function $(\mathbf{m}u, x) \mathbf{m}apsto f_{\mathbf{m}u}(x)$ is lower
semicomputable.
(Then for each $\mathbf{m}u$, the integral $\mathbf{m}u f_{\mathbf{m}u} = \mathbf{m}u^{x} f_{\mathbf{m}u}(x)$
exists.)
\item\label{i.test.integr} $\mathbf{m}u f_{\mathbf{m}u} \leqslant 1$.
\end{enumerate}
\end{condition}
The value $f_{\mathbf{m}u}(x)$ is intended to quantify
the nonrandomness of the outcome $x$ with respect to the probability
measure $\mathbf{m}u$.
The larger the values the less random is $x$.
Condition~\ref{cnd.test}.\ref{i.test.integr} guarantees that the
probability of those outcomes whose randomness is $\geqslant m$ is at most
$1/m$.
The definition of tests is in the spirit of Martin-L\"of's tests.
The important difference is in the semicomputability condition:
instead of restricting the measure $\mathbf{m}u$ to be computable, we require the
test to be lower semicomputable also in its argument $\mathbf{m}u$.
Just as with Martin-L\"of's tests, we want to find a universal test;
however, we seem to need a condition on the space $\mathbf{m}athbf{X}$.
Let us say that a sequence $i \mathbf{m}apsto U_{i}$ of sets has \mathbf{d}f{recognizable
Boolean inclusions} if the set
\[
\setof{ (S, T) : S, T \mathbf{t}xt{ are finite, }
\bigcap_{i \in S} U_{i} \sbs \bigcup_{j \in T} U_{j}}
\]
is recursively enumerable.
We will say that a computable metric space has recognizable Boolean inclusions
if this is true of the enumerated basis consisting of balls of the form
$B(x, r)$ where $x \in D$ and $r > 0$ is a rational number.
It is our conviction that the important metric spaces studied in
probability theory have recognizable Boolean inclusions,
and that proving this in each
individual case should not be too difficult.
For example, it does not seem difficult to prove this for the space
$C\clint{0}{1}$ of Example~\ref{x.cptable-metric-{0}{1}}, with the set of
rational piecewise-linear functions chosen as $D$.
But, we have not carried out any of this work!
\begin{theorem}\label{t.univ-unif}
Suppose that the metric space $\mathbf{m}athbf{X}$ has recognizable
Boolean inclusions.
Then there is a universal test, that is a test $\mathbf{t}_{\mathbf{m}u}(x)$ with the
property
that for every other test $f_{\mathbf{m}u}(x)$ there is a constant $c_{f} > 0$ with
$c_{f} f_{\mathbf{m}u}(x) \leqslant \mathbf{t}_{\mathbf{m}u}(x)$.
\end{theorem}
\begin{proof}\
\begin{enumerate}[1.]
\item\label{univ-unif.g'}
We will show that there is a mapping that to each name $u$ of a lower
semicomputable function $(\mathbf{m}u, x) \mathbf{m}apsto g(\mathbf{m}u, x)$ assigns the name of a
lower semicomputable function
$g'(\mathbf{m}u, x)$ such that $\mathbf{m}u^{x} g'(\mathbf{m}u,x) \leqslant 1$, and
if $g$ is a test then $g'=g$.
To prove the statement, let us represent the space $\mathbf{m}athbf{M}$ rather as
\begin{equation}\label{e.metric-measures-again}
\mathbf{m}athbf{M} = (\mathbf{m}athcal{M}(\mathbf{m}athbf{X}), p, D, \ag_{\mathbf{m}athbf{M}}),
\end{equation}
as in~\eqref{e.metric-measures}.
Since $g(\mathbf{m}u, x)$ is lower semicomputable, there is a computable sequence of
basis elements $U_{i} \sbs \mathbf{m}athbf{M}$ and $V_{i} \sbs \mathbf{m}athbf{X}$
and rational bounds $r_{i}$ such that
\[
g(\mathbf{m}u, x) = \sup_{i}\; r_{i} 1_{U_{i}}(\mathbf{m}u)1_{V_{i}}(x).
\]
Let $h_{n}(\mathbf{m}u, x) = \mathbf{m}ax_{i \leqslant n} r_{i} 1_{U_{i}}(\mathbf{m}u)1_{V_{i}}(x)$.
Let us also set $h_{0}(\mathbf{m}u, x) = 0$.
Our goal is to show that
the condition $\forall \mathbf{m}u\; \mathbf{m}u^{x} h_{n}(\mathbf{m}u, x) \leqslant 1$ is decidable.
If this is the case then we will be done.
Indeed, we can define $h'_{n}(\mathbf{m}u,x)$ recursively as follows.
Let $h'_{0}(\mathbf{m}u, x)=0$.
Assume that $h'_{n}(\mathbf{m}u,x)$ has been defined already.
If $\forall\mathbf{m}u\; \mathbf{m}u^{x} h_{n+1}(\mathbf{m}u,x) \leqslant 1$ then
$h'_{n+1}(\mathbf{m}u,x) = h_{n+1}(\mathbf{m}u,x)$; otherwise, it is $h'_{n}(\mathbf{m}u,x)$.
The function $g'(\mathbf{m}u, x) = \sup_{n} h'_{n}(\mathbf{m}u,x)$ clearly satisfies our
requirements.
We proceed to prove the decidability of the condition
\begin{equation}\label{e.finite-test-cond}
\forall \mathbf{m}u\; \mathbf{m}u^{x} h_{n}(\mathbf{m}u, x) \leqslant 1.
\end{equation}
The basis elements $V_{i}$ can be taken as
balls $B(q_{i},\mathbf{d}g_{i})$ for a computable sequence $q_{i} \in D$ and
computable sequence of rational numbers $\mathbf{d}g_{i}>0$.
Similarly, the basis element $U_{i}$ is the set of measures that is a ball
$B(\sg_{i}, \eps_{i})$, in the metric
space~\eqref{e.metric-measures-again}.
Here, using notation~\eqref{e.finite-Prokhorov},
$\sg_{i}$ is a measure concentrated on a finite set $S_{i}$.
According to Proposition~\ref{p.simple-Prokhorov-ball},
the ball $U_{i}$ is the set of measures $\mathbf{m}u$ satisfying the inequalities
\[
\mathbf{m}u(A^{\eps_{i}}) > \sg_{i}(A) - \eps_{i}
\]
for all $A \sbs S_{i}$.
For each $n$, consider the finite set of balls
\[
\mathbf{m}athcal{B}_{n} = \setof{B(q_{i},\mathbf{d}g_{i}) : i \leqslant n} \cup
\setof{B(s, \eps_{i}) : i \leqslant n,\; s \in S_{i}}.
\]
Consider all sets of the form
\[
U_{A,B} = \bigcap_{U \in A} U \xcpt \bigcup_{U \in B} U
\]
for all pairs of sets $A, B \sbs \mathbf{m}athcal{B}_{n}$.
These sets are all finite intersections of balls or complements of
balls from the finite set $\mathbf{m}athcal{B}_{n}$ of balls.
The space $\mathbf{m}athbf{X}$ has
recognizable Boolean inclusions, so it is decidable which of these sets
$U_{A,B}$ are nonempty.
The condition~\eqref{e.finite-test-cond} can be formulated as a
Boolean formula involving linear inequalities with rational coefficients,
for the variables $\mathbf{m}u_{A,B}=\mathbf{m}u(U_{A,B})$, for those $A,B$ with
$U_{A,B}\ne\emptyset$.
The solvability of such a Boolean condition can always be decided.
\item\label{univ-unif.end}
Let us enumerate all lower semicomputable functions $g_{u}(\mathbf{m}u, x)$ for
all the names $u$.
Without loss of generality, assume these names to be natural numbers,
and form the functions $g'_{u}(\mathbf{m}u,x)$ according to the
assertion~\ref{univ-unif.g'} above.
The function $t = \sum_{u} 2^{-u-1} g'_{u}$ will be the desired universal
test.
\end{enumerate}
\end{proof}
From now on, when referring to randomness tests, we will always assume that
our space $\mathbf{m}athbf{X}$ has recognizable Boolean inclusions and hence has a
universal test.
We fix a universal test $\mathbf{t}_{\mathbf{m}u}(x)$, and call the function
\[
\mathbf{d}_{\mathbf{m}u}(x) = \log \mathbf{t}_{\mathbf{m}u}(x).
\]
the \mathbf{d}f{deficiency of randomness} of $x$ with respect to $\mathbf{m}u$.
We call an element $x\in X$ \mathbf{d}f{random} with respect to $\mathbf{m}u$
if $\mathbf{d}_{\mathbf{m}u}(x) < \infty$.
\begin{remark}\label{r.cond-test}
Tests can be generalized to include an arbitrary parameter $y$:
we can talk about the universal test
\[
\mathbf{t}_{\mathbf{m}u}(x \mathbf{m}id y),
\]
where $y$ comes from some constructive topological space $\mathbf{m}athbf{Y}$.
This is a maximal (within a multiplicative constant) lower semicomputable
function $(x,y,\mathbf{m}u) \mathbf{m}apsto f(x,y,\mathbf{m}u)$ with the property
$\mathbf{m}u^{x} f(x,y,\mathbf{m}u) \leqslant 1$.
\end{remark}
\subsection{Conservation of randomness}
For $i=1,0$, let
$\mathbf{m}athbf{X}_{i} = (X_{i}, d_{i}, D_{i}, \ag_{i})$ be computable metric spaces,
and let $\mathbf{m}athbf{M}_{i} = (\mathbf{m}athcal{M}(\mathbf{m}athbf{X}_{i}), \sg_{i}, \nu_{i})$
be the effective topological space of
probability measures over $\mathbf{m}athbf{X}_{i}$.
Let $\Lg$ be a computable probability kernel from $\mathbf{m}athbf{X}_{1}$ to $\mathbf{m}athbf{X}_{0}$ as
defined in Subsection~\ref{ss.computable-trans}.
In the following theorem, the same notation $\mathbf{d}_{\mathbf{m}u}(x)$ will
refer to the deficiency of randomness with respect to two different spaces,
$\mathbf{m}athbf{X}_{1}$ and $\mathbf{m}athbf{X}_{0}$, but this should not cause confusion.
Let us first spell out the conservation theorem before interpreting it.
\begin{theorem}\label{p.conservation}
For a computable probability kernel $\Lg$ from $\mathbf{m}athbf{X}_{1}$ to $\mathbf{m}athbf{X}_{0}$,
we have
\begin{equation}\label{e.conservation}
\lg_{x}^{y} \mathbf{t}_{\Lg^{*}\mathbf{m}u}(y) \leqslantm \mathbf{t}_{\mathbf{m}u}(x).
\end{equation}
\end{theorem}
\begin{proof}
Let $\mathbf{t}_{\nu}(x)$ be the universal test over $\mathbf{m}athbf{X}_{0}$.
The left-hand side of~\eqref{e.conservation} can be written as
\[
u_{\mathbf{m}u} = \Lg \mathbf{t}_{\Lg^{*}\mathbf{m}u}.
\]
According to~\eqref{e.Lg-Lg*}, we have
$\mathbf{m}u u_{\mathbf{m}u} = (\Lg^{*}\mathbf{m}u) \mathbf{t}_{\Lg^{*}\mathbf{m}u}$ which is $\leqslant 1$ since $\mathbf{t}$ is
a test.
If we show that $(\mathbf{m}u,x) \mathbf{m}apsto u_{\mathbf{m}u}(x)$ is lower semicomputable
then the universality of $\mathbf{t}_{\mathbf{m}u}$ will imply $u_{\mathbf{m}u} \leqslantm \mathbf{t}_{\mathbf{m}u}$.
According to Proposition~\ref{p.lower-semi-as-limit}, as
a lower semicomputable function, $\mathbf{t}_{\nu}(y)$ can be written as
$\sup_{n} g_{n}(\nu, y)$, where $(g_{n}(\nu, y))$ is a computable sequence
of computable functions.
We pointed out in Subsection~\ref{ss.computable-trans} that the function
$\mathbf{m}u \mathbf{m}apsto \Lg^{*}\mathbf{m}u$ is computable.
Therefore the function $(n, \mathbf{m}u, x) \mathbf{m}apsto g_{n}(\Lg^{*}\mathbf{m}u, f(x))$
is also a computable.
So, $u_{\mathbf{m}u}(x)$ is the supremum of a computable sequence of computable
functions and as such, lower semicomputable.
\end{proof}
It is easier to interpret the theorem first in the special case when
$\Lg = \Lg_{h}$ for a computable function $h : X_{1} \mathbf{t}o X_{0}$,
as in Example~\ref{x.computable-determ-trans}.
Then the theorem simplifies to the following.
\begin{corollary}\label{c.conservation-determ}
For a computable function $h : X_{1} \mathbf{t}o X_{0}$, we have
$\mathbf{d}_{h^{*}\mathbf{m}u}(h(x)) \leqslanta \mathbf{d}_{\mathbf{m}u}(x)$.
\end{corollary}
Informally, this says that if $x$ is random with respect to $\mathbf{m}u$ in
$\mathbf{m}athbf{X}_{1}$ then $h(x)$ is essentially at least as random with respect to the
output distribution $h^{*}\mathbf{m}u$ in $\mathbf{m}athbf{X}_{0}$.
Decrease in randomness
can only be caused by complexity in the definition of the function $h$.
It is even easier to interpret the theorem when $\mathbf{m}u$ is defined over a product
space $\mathbf{m}athbf{X}_{1}\mathbf{t}imes \mathbf{m}athbf{X}_{2}$, and $h(x_{1},x_{2}) = x_{1}$ is the projection.
The theorem then says, informally, that if the pair $(x_{1},x_{2})$ is
random with respect to $\mathbf{m}u$ then $x_{1}$ is random with respect to the
marginal $\mathbf{m}u_{1} = h^{*}\mathbf{m}u$ of $\mathbf{m}u$.
This is a very natural requirement: why would the throwing-away of the
information about $x_{2}$ affect the plausibility of the
hypothesis that the outcome $x_{1}$ arose from the distribution
$\mathbf{m}u_{1}$?
In the general case of the theorem, concerning random transitions,
we cannot bound the randomness of each outcome uniformly.
The theorem asserts that the average nonrandomness, as measured by
the universal test with respect to the output distribution, does not
increase.
In logarithmic notation:
$\lg_{x}^{y} 2^{\mathbf{d}_{\Lg^{*}\mathbf{m}u}(y)} \leqslanta \mathbf{d}_{\mathbf{m}u}(x)$,
or equivalently,
$\int 2^{\mathbf{d}_{\Lg^{*}\mathbf{m}u}(y)} \lg_{x}(dy) \leqslanta \mathbf{d}_{\mathbf{m}u}(x)$.
\begin{corollary}
Let $\Lg$ be a computable probability kernel from $\mathbf{m}athbf{X}_{1}$ to $\mathbf{m}athbf{X}_{0}$.
There is a constant $c$ such that
for every $x\in\mathbf{m}athbf{X}^{1}$, and integer $m > 0$ we have
\[
\lg_{x}\setof{y : \mathbf{d}_{\Lg^{*}\mathbf{m}u}(y) > \mathbf{d}_{\mathbf{m}u}(x) + m + c} \leqslant 2^{-m}.
\]
\end{corollary}
Thus, in a computable random transition,
the probability of an increase of randomness deficiency by
$m$ units (plus a constant $c$) is less than $2^{-m}$.
The constant $c$ comes from the description complexity
of the transition $\Lg$.
A randomness conservation result related to
Corollary~\ref{c.conservation-determ} was proved
in~\cite{HertlingWeihrauchRand98}.
There, the measure over the space $\mathbf{m}athbf{X}_{0}$ is not the output
measure of the transformation, but is assumed to
obey certain inequalities related to the transformation.
\section{Tests and complexity}\label{s.complexity}
\subsection{Description complexity}
\subsubsection{Complexity, semimeasures, algorithmic entropy}
Let $X=\Sg^{*}$.
For $x \in \Sg^{*}$ for some finite alphabet $\Sg$,
let $H(x)$ denote the prefix-free description complexity of the finite
sequence $x$ as defined, for example, in~\cite{LiViBook97} (where it is
denoted by $K(x)$).
For completeness, we give its definition here.
Let $A : \{0,1\}^{*} \mathbf{t}imes \Sg^{*} \mathbf{t}o \Sg^{*}$ be a computable
(possibly partial) function with the property that if $A(p_{1},y)$ and
$A(p_{2},y)$ are defined for two different strings $p_{1}, p_{2}$, then
$p_{1}$ is not the prefix of $p_{2}$.
Such a function is called a (prefix-free) \mathbf{d}f{interpreter}.
We denote
\[
H^{A}(x \mathbf{m}id y) = \mathbf{m}in_{A(p,y)=x} |p|.
\]
One of the most important theorems of description complexity is the
following:
\begin{proposition}[Invariance Theorem, see for
example~\protect\cite{LiViBook97}]
There is an optimal interpreter $T$ with the above property: with it, for
every interpreter $A$ there is a constant $c_{A}$ with
\[
H^{T}(x \mathbf{m}id y) \leqslant H^{A}(x \mathbf{m}id y) + c_{A}.
\]
\end{proposition}
We fix an optimal interpreter $T$ and write
$H(x \mathbf{m}id y) = H^{T}(x \mathbf{m}id y)$, calling it
the conditional complexity of a string $x$ with respect to string $y$.
We denote $H(x) = H(x \mathbf{m}id \Lg)$.
Let
\[
\mathbf{m}(x) = 2^{-H(x)}.
\]
The function $\mathbf{m}(x)$ is lower semicomputable with
$\sum_{x} \mathbf{m}(x) \leqslant 1$.
Let us call any real function $f(x) \geqslant 0$ over $\Sg^{*}$ with
$\sum_{x} f(x) \leqslant 1$ a \mathbf{d}f{semimeasure}.
The following theorem, known as the Coding Theorem, is
an important tool.
\begin{proposition}[Coding Theorem]\label{t.coding}
For every lower semicomputable semimeasure $f$ there is a constant $c>0$
with $\mathbf{m}(x) \geqslant c\cdot f(x)$.
\end{proposition}
Because of this theorem, we will say that $\mathbf{m}(x)$ is a
\mathbf{d}f{universal lower semicomputable semimeasure}.
It is possible to turn $\mathbf{m}(x)$ into a measure, by compactifying the
discrete space $\Sg^{*}$ into
\[
\ol{\Sg^{*}}=\Sg^{*}\cup\{\infty\}
\]
(as in part~\ref{i.compact.compactify} of Example~\ref{x.compact};
this process makes sense also for a constructive discrete space),
and setting $\mathbf{m}(\infty) = 1-\sum_{x\in\Sg^{*}} \mathbf{m}(x)$.
The extended measure $\mathbf{m}$ is not quite lower semicomputable since
the number $\mathbf{m}u(\ol{\Sg^{*}} \xcpt \{0\})$ is not necessarily
lower semicomputable.
\begin{remark}
A measure $\mathbf{m}u$ is computable over $\ol{\Sg^{*}}$ if and only if the
function $x \mathbf{m}apsto \mathbf{m}u(x)$ is computable for $x \in \Sg^{*}$.
This property does not imply that the number
\[
1 - \mathbf{m}u(\infty) = \mathbf{m}u(\Sg^{*}) = \sum_{x\in\Sg^{*}} \mathbf{m}u(x)
\]
is computable.
\end{remark}
Let us allow, for a moment, measures $\mathbf{m}u$ that are not probability
measures: they may not even be finite.
Metric and computability can be extended to this case
(see~\cite{Topsoe70}), the universal test
$\mathbf{t}_{\mathbf{m}u}(x)$ can also be generalized.
The Coding Theorem and other considerations suggest the introduction of
the following notation, for an arbitrary measure $\mathbf{m}u$:
\begin{equation}\label{e.alg-ent}
H_{\mathbf{m}u}(x) = -\mathbf{d}_{\mathbf{m}u}(x) = -\log\mathbf{t}_{\mathbf{m}u}(x).
\end{equation}
Then, with $\#$ defined as the counting measure over the discrete set
$\Sg^{*}$ (that is, $\#(S) = |S|$), we have
\[
H(x) \eqa H_{\#}(x).
\]
This allows viewing $H_{\mathbf{m}u}(x)$ as a generalization of description
complexity: we will call this quantity the \mathbf{d}f{algorithmic entropy} of $x$
relative to the measure $\mathbf{m}u$.
Generalization to conditional complexity is done using
Remark~\ref{r.cond-test}.
A reformulation of the definition of tests says that $H_{\mathbf{m}u}(x)$
is minimal (within an additive constant) among the upper semicomputable
functions $(\mathbf{m}u, x) \mathbf{m}apsto f_{\mathbf{m}u}(x)$ with $\mathbf{m}u^{x} 2^{-f_{\mathbf{m}u}(x)} \leqslant 1$.
The following identity is immediate from the definitions:
\begin{equation}\label{e.Hmu-to-cond}
H_{\mathbf{m}u}(x) = H_{\mathbf{m}u}(x \mathbf{m}id \mathbf{m}u).
\end{equation}
\subsubsection{Computable measures and complexity}
It is known that for computable $\mathbf{m}u$, the test $\mathbf{d}_{\mathbf{m}u}(x)$ can be
expressed in terms of the description complexity of $x$
(we will prove these expressions below).
Assume that $\mathbf{m}athbf{X}$ is the (discrete) space of all binary strings.
Then we have
\begin{equation}\label{e.test-charac-fin-cpt}
\mathbf{d}_{\mathbf{m}u}(x) = -\log \mathbf{m}u(x) - H(x) + O(H(\mathbf{m}u)).
\end{equation}
The meaning of this equation is the following.
Due to maximality property of the semimeasure $\mathbf{m}$ following from
the Coding Theorem~\ref{t.coding} above, the expression
$-\log\mathbf{m}u(x)$ is an upper bound (within $O(H(\mathbf{m}u))$) of the
complexity $H(x)$, and nonrandomness of $x$ is measured by
the difference between the complexity and this upper bound.
See~\cite{ZvLe70} for a first formulation of this general upper bound
relation.
As a simple example, consider the uniform distribution $\mathbf{m}u$ over the set of
binary sequences of length $n$.
Conditioning everything on $n$, we obtain
\[
\mathbf{d}_{\mathbf{m}u}(x\mathbf{m}id n) \eqa n - H(x\mathbf{m}id n),
\]
that is the more the description complexity $H(x\mathbf{m}id n)$ of a binary
sequence of length $n$ differs from its upper bound $n$ the less random is
$x$.
Assume that $\mathbf{m}athbf{X}$ is the space of infinite binary sequences.
Then equation~\eqref{e.test-charac-fin-cpt} must be replaced with
\begin{equation}\label{e.test-charac-infin-cpt}
\mathbf{d}_{\mathbf{m}u}(x) =
\sup_{n}\operatorname{P}aren{-\log \mathbf{m}u(x^{\leqslant n}) - H(x^{\leqslant n})} + O(H(\mathbf{m}u)).
\end{equation}
For the coin-tossing distribution $\mathbf{m}u$, this characterization has first
been first proved by Schnorr, and published in~\cite{Chaitin75}.
\begin{remark}\label{r.monot-compl}
It is possible to obtain similar natural characterizations of randomness,
using some other natural definitions of description complexity.
A universal semicomputable semimeasure
$\mathbf{m}_{\Og}$ over the set $\Og$ of infinite sequences was introduced,
and a complexity $\KM(x) = -\log\mathbf{m}_{\Og}(x)$ defined in~\cite{ZvLe70}.
A so-called ``monotonic complexity'', $\operatorname{Km}(x)$ was introduced, using
Turing machines with one-way input and output,
in~\cite{LevinRand73}, and a closely
related quantity called ``process complexity'' was introduced
in~\cite{Schnorr73}.
These quantities can also be used in a characterization of randomness
similar to~\eqref{e.test-charac-fin-cpt}.
The nontrivial fact that the complexities $\KM$ and $\operatorname{Km}$ differ by an
unbounded amount was shown in~\cite{GacsRel83}.
\end{remark}
For noncomputable measures, we cannot replace $O(H(\mathbf{m}u))$ in
these relations with anything finite, as shown in
the following example.
Therefore however attractive and simple,
$\exp(-\log \mathbf{m}u(x) - H(x))$ is not a universal uniform test of randomness.
\begin{proposition}\label{p.test-charac-counterexample}
There is a measure $\mathbf{m}u$ over the discrete space $\mathbf{m}athbf{X}$ of binary strings
such that for each $n$, there is an $x$ with
$\mathbf{d}_{\mathbf{m}u}(x) = n - H(n)$ and $-\log \mathbf{m}u(x) - H(x) \leqslanta 0$.
\end{proposition}
\begin{proof}
Let us treat the domain of our measure $\mathbf{m}u$ as a set of pairs $(x,y)$.
Let $x_{n} = 0^{n}$, for $n=1,2,\mathbf{d}otsc$.
For each $n$, let $y_{n}$ be some binary string of length $n$
with the property $H(x_{n}, y_{n}) > n$.
Let $\mathbf{m}u(x_{n},y_{n})=2^{-n}$.
Then $- \log \mathbf{m}u(x_{n},y_{n}) - H(x_{n},y_{n}) \leqslant n - n = 0$.
On the other hand, let $t_{\mathbf{m}u}(x,y)$ be the test nonzero only on strings
$x$ of the form $x_{n}$:
\[
t_{\mathbf{m}u}(x_{n}, y) = \frac{\mathbf{m}(n)}{\sum_{z \in \mathbf{m}athcal{B}^{n}} \mathbf{m}u(x_{n}, z)}.
\]
The form of the definition ensures semicomputability and we also have
\[
\sum_{x,y} \mathbf{m}u(x,y) t_{\mathbf{m}u}(x,y) \leqslant \sum_{n} \mathbf{m}(n) < 1,
\]
therefore $t_{\mathbf{m}u}$ is indeed a test.
Hence $\mathbf{t}_{\mathbf{m}u}(x,y) \geqslantm t_{\mathbf{m}u}(x,y)$.
Taking logarithms, $\mathbf{d}_{\mathbf{m}u}(x_{n}, y_{n}) \geqslanta n - H(n)$.
\end{proof}
The same example implies that it is also not an option, even over discrete
sets, to replace the definition of uniform tests with the \emph{ad hoc}
formula $\exp(-\log\mathbf{m}u(x) - H(x))$:
\begin{proposition}
The test defined as $f_{\mathbf{m}u}(x) = \exp(-\log\mathbf{m}u(x) - H(x))$
over discrete spaces $\mathbf{m}athbf{X}$ does
not obey the conservation of randomness.
\end{proposition}
\begin{proof}
Let us use the example of Proposition~\ref{p.test-charac-counterexample}.
Consider the function $\pi : (x,y) \mathbf{m}apsto x$.
The image of the measure $\mathbf{m}u$ under the projection is
$(\pi\mathbf{m}u)(x) = \sum_{y} \mathbf{m}u(x,y)$.
Thus, $(\pi\mathbf{m}u)(x_{n}) = \mathbf{m}u(x_{n},y_{n}) = 2^{-n}$.
We have seen $\log f_{\mathbf{m}u}(x_{n},y_{n}) \leqslant 0$.
On the other hand,
\[
\log f_{\pi\mathbf{m}u} (\pi(x_{n},y_{n})) = -\log(\pi\mathbf{m}u)(x_{n}) - H(x_{n})
\eqa n - H(n).
\]
Thus, the projection $\pi$ takes a random pair $(x_{n},y_{n})$ into
an object $x_{n}$ that is very nonrandom (when randomness is measured using
the tests $f_{\mathbf{m}u}$).
\end{proof}
In the example, we have the abnormal situation that a pair is random but
one of its elements is nonrandom.
Therefore even if we would not insist on universality, the test
$\exp(-\log\mathbf{m}u(x) - H(x))$ is unsatisfactory.
Looking into the reasons of the nonconservation in the example,
we will notice that it could only have happened because the
test $f_{\mathbf{m}u}$ is too special.
The fact that $-\log (\pi\mathbf{m}u)(x_{n}) - H(x_{n})$ is large should show that
the pair $(x_{n},y_{n})$ can be enclosed into the ``simple'' set
$\{x_{n}\} \mathbf{t}imes \mathbf{m}athbf{Y}$ of small probability; unfortunately,
this observation does not reflect on $-\log\mathbf{m}u(x,y) - H(x,y)$
when the measure $\mathbf{m}u$ is non-computable (it does for computable $\mathbf{m}u$).
\subsubsection{Expressing the uniform test in terms of complexity}
It is a natural idea to modify equation~\eqref{e.test-charac-fin-cpt}
in such a way that the complexity $H(x)$ is replaced with $H(x \mathbf{m}id \mathbf{m}u)$.
However, this expression must be understood properly.
The measure $\mathbf{m}u$
(especially, when it is not computable) cannot be described by
a finite string; on the other hand, it can be described by infinite strings
in many different ways.
Clearly, irrelevant information in these infinite strings should be
ignored.
The notion of representation in computable analysis (see
Subsection~\ref{ss.notation-repr}) will solve the problem.
An interpreter function should have the property that its output depends
only on $\mathbf{m}u$ and not on the sequence representing it.
Recall the topological space $\mathbf{m}athbf{M}$
of computable measures over our space $\mathbf{m}athbf{X}$.
An interpreter $A : \{0,1\}^{*} \mathbf{t}imes \mathbf{m}athbf{M} \mathbf{t}o \Sg^{*}$ is
a computable function that is prefix-free in its first argument.
The complexity
\[
H(x \mathbf{m}id \mathbf{m}u)
\]
can now be defined in terms of such
interpreters, noting that the Invariance Theorem holds as before.
To define this complexity in terms of representations,
let $\gm_{\mathbf{m}athbf{M}}$ be our chosen representation for the space $\mathbf{m}athbf{M}$
(thus, each measure $\mathbf{m}u$ is represented via all of its
Cauchy sequences in the Prokhorov distance).
Then we can say that $A$ is an interpreter if it is
$(\operatorname{id}, \gm_{\mathbf{m}athbf{M}}, \operatorname{id})$-computable, that is a certain computable function
$B : \{0,1\}^{*} \mathbf{t}imes \Sg^{\og} \mathbf{t}o \Sg^{*}$ realizes $A$
for every $p\in\{0,1\}^{*}$,
and for every sequence $z$ that is a $\gm_{\mathbf{m}athbf{M}}$-name of a measure $\mathbf{m}u$, we
have $B(p, z) = A(p, \mathbf{m}u)$.
\begin{remark}
The notion of oracle computation and reducibility in
the new sense (where the result is
required to be independent of which representation of an
object is used) may be worth investigating in other settings as well.
\end{remark}
Let us mention the following easy fact:
\begin{proposition}\label{p.H/mu-computable}
If $\mathbf{m}u$ is a computable measure then $H(x \mathbf{m}id \mathbf{m}u) \eqa H(x)$.
The constant in $\eqa$ depends on the description complexity of $\mathbf{m}u$.
\end{proposition}
\begin{theorem}\label{t.test-charac-discr}
If $\mathbf{m}athbf{X}$ is the discrete space $\Sg^{*}$ then we have
\begin{equation}\label{e.test-charac-discr}
\mathbf{d}_{\mathbf{m}u}(x) \eqa -\log\mathbf{m}u(x) - H(x \mathbf{m}id \mathbf{m}u).
\end{equation}
\end{theorem}
Note that in terms of the algorithmic entropy notation introduced
in~\eqref{e.alg-ent}, this theorem can be expressed as
\begin{equation}\label{e.alg-entr-charac}
H_{\mathbf{m}u}(x) \eqa H(x \mathbf{m}id \mathbf{m}u) + \log\mathbf{m}u(x).
\end{equation}
\begin{proof}
In exponential notation, equation~\eqref{e.test-charac-discr} can be
written as $\mathbf{t}_{\mathbf{m}u}(x) \eqm \mathbf{m}(x \mathbf{m}id \mathbf{m}u)/\mathbf{m}u(x)$.
Let us prove $\geqslantm$ first.
We will show that the right-hand side of this inequality is a test, and
hence $\leqslantm \mathbf{t}_{\mathbf{m}u}(x)$.
However,
the right-hand side is clearly lower semicomputable in $(x, \mathbf{m}u)$ and
when we ``integrate'' it (multiply it by $\mathbf{m}u(x)$ and sum it), its sum is
$\leqslant 1$; thus, it is a test.
Let us prove $\leqslantm$ now.
The expression $\mathbf{t}_{\mathbf{m}u}(x)\mathbf{m}u(x)$ is clearly lower semicomputable in
$(x,\mathbf{m}u)$, and its sum is $\leqslant 1$.
Hence, it is $\leqslanta \mathbf{m}(x \mathbf{m}id \mathbf{m}u)$.
\end{proof}
\begin{remark}
As mentioned earlier, our theory generalizes to measures that are not
probability measures.
In this case, equation~\eqref{e.alg-entr-charac} has interesting relations to
the quantity called ``physical entropy'' by Zurek in~\cite{ZurekPhR89};
it justifies calling $H_{\mathbf{m}u}(x)$
``fine-grained algorithmic Boltzmann entropy'' by this author
in~\cite{GacsBoltzmann94}.
\end{remark}
For non-discrete spaces, unfortunately, we can only provide less
intuitive expressions.
\begin{proposition}\label{t.test-charac}
let $\mathbf{m}athbf{X}=(X, d, D, \ag)$ be a complete
computable metric space, and let $\mathbf{m}athcal{E}$ be the enumerated set of bounded
Lip\-schitz functions introduced in~\eqref{e.bd-Lip-seq},
but for the space $\mathbf{m}athbf{M}(\mathbf{m}athbf{X}) \mathbf{t}imes \mathbf{m}athbf{X}$.
The uniform test of randomness $\mathbf{t}_{\mathbf{m}u}(x)$ can be expressed as
\begin{equation}\label{e.test-charac}
\mathbf{t}_{\mathbf{m}u}(x) \eqm
\sum_{f \in \mathbf{m}athcal{E}}f(\mathbf{m}u,x)\frac{\mathbf{m}(f \mathbf{m}id \mathbf{m}u)}{\mathbf{m}u^{y} f(\mathbf{m}u, y)}.
\end{equation}
\end{proposition}
\begin{proof}
For $\geqslantm$,
we will show that the right-hand side of the inequality is a test, and
hence $\leqslantm \mathbf{t}_{\mathbf{m}u}(x)$.
For simplicity, we skip the notation for the enumeration of $\mathbf{m}athcal{E}$ and
treat each element $f$ as its own name.
Each term of the sum is clearly lower semicomputable in
$(f, x, \mathbf{m}u)$, hence the sum is lower semicomputable in $(x, \mathbf{m}u)$.
It remains to show that the $\mathbf{m}u$-integral of the sum is $\leqslant 1$.
But, the $\mathbf{m}u$-integral of the generic term is $\leqslant \mathbf{m}(f \mathbf{m}id \mathbf{m}u)$, and
the sum of these terms is $\leqslant 1$ by the definition of the function
$\mathbf{m}(\cdot \mathbf{m}id \cdot)$.
Thus, the sum is a test.
For $\leqslantm$, note that $(\mathbf{m}u,x) \mathbf{m}apsto \mathbf{t}_{\mathbf{m}u}(x)$,
as a lower semicomputable function, is the supremum of functions in $\mathbf{m}athcal{E}$.
Denoting their differences by $f_{i}(\mathbf{m}u,x)$,
we have $\mathbf{t}_{\mathbf{m}u}(x) = \sum_{i} f_{i}(\mathbf{m}u,x)$.
The test property implies $\sum_{i} \mathbf{m}u^{x} f_{i}(\mathbf{m}u,x) \leqslant 1$.
Since the function $(\mathbf{m}u,i) \mathbf{m}apsto \mathbf{m}u^{x} f_{i}(\mathbf{m}u,x)$ is lower
semicomputable, this implies $\mathbf{m}u^{x} f_{i}(\mathbf{m}u,x) \leqslantm \mathbf{m}(i \mathbf{m}id \mathbf{m}u)$, and
hence
\[
f_{i}(\mathbf{m}u,x) \leqslantm f_{i}(\mathbf{m}u,x) \frac{\mathbf{m}(i \mathbf{m}id \mathbf{m}u)}{\mathbf{m}u^{x} f_{i}(\mathbf{m}u,x)}.
\]
It is easy to see that for each $f\in\mathbf{m}athcal{E}$ we have
\[
\sum_{i : f_{i} = f} \mathbf{m}(i \mathbf{m}id \mathbf{m}u) \leqslant \mathbf{m}u(f \mathbf{m}id \mathbf{m}u),
\]
which leads to~\eqref{e.test-charac}.
\end{proof}
\begin{remark}\label{r.test-charac-lb}
If we only want the $\geqslantm$ part of the result, then $\mathbf{m}athcal{E}$ can be
replaced with any enumerated computable sequence of bounded computable
functions.
\end{remark}
\subsection{Infinite sequences}
In this section, we get a nicer characterization of randomness tests in
terms of complexity, in special cases.
Let $\mathbf{m}athcal{M}_{R}(X)$ be the set of measures $\mathbf{m}u$ with $\mathbf{m}u(X)=R$.
\begin{theorem}\label{t.defic-charac-cpt-seqs}
Let $\mathbf{m}athbf{X}=\mathbf{m}athbb{N}^{\og}$ be the set of infinite sequences of natural numbers,
with the product topology.
For all computable measures $\mathbf{m}u\in\mathbf{m}athcal{M}_{R}(X)$,
for the deficiency of randomness $\mathbf{d}_{\mathbf{m}u}(x)$, we have
\begin{equation}\label{e.defic-charac-seq}
\mathbf{d}_{\mathbf{m}u}(x) \eqa \sup_{n}\operatorname{P}aren{-\log \mathbf{m}u(x^{\leqslant n}) - H(x^{\leqslant n})}.
\end{equation}
Here, the constant in $\eqa$ depends on the computable measure $\mathbf{m}u$.
\end{theorem}
We will be able to prove the $\geqslanta$ part of the statement in a more general
space, and without assuming computability.
Assume that a separating sequence $b_{1},b_{2},\mathbf{d}ots$ is given as defined
in Subsection~\ref{ss.cells}, along with the set $X^{0}$.
For each $x \in X^{0}$, the binary sequence $x_{1},x_{2},\mathbf{d}ots$
has been defined.
Let
\begin{align*}
\ol\mathbf{m}u(\Gg_{s}) &= R - \sum\setof{\mathbf{m}u(\Gg_{s'}) : l(s)=l(s'),\;s'\ne s}.
\end{align*}
Then $(s,\mathbf{m}u)\mathbf{m}apsto \mathbf{m}u(\Gg_{s})$ is lower semicomputable, and
$(s,\mathbf{m}u)\mathbf{m}apsto \ol\mathbf{m}u(\Gg_{s})$ is upper semicomputable.
And, every time that the functions $b_{i}(x)$ form a regular partition for
$\mathbf{m}u$, we have $\ol\mathbf{m}u(\Gg_{s})=\mathbf{m}u(\Gg_{s})$ for all $s$.
Let $\mathbf{m}athcal{M}_{R}^{0}(X)$ be the set of those measures $\mathbf{m}u$
in $\mathbf{m}athcal{M}_{R}(X)$ for which $\mathbf{m}u(X \xcpt X^{0})=0$.
\begin{theorem}\label{t.defic-charac-compact}
Suppose that the space $\mathbf{m}athbf{X}$ is compact.
Then for all computable measures $\mathbf{m}u\in\mathbf{m}athcal{M}_{R}^{0}(\mathbf{m}athbf{X})$,
for the deficiency of randomness $\mathbf{d}_{\mathbf{m}u}(x)$,
the characterization~\eqref{e.defic-charac-seq} holds.
\end{theorem}
For arbitrary measures and spaces, we can say a little less:
\begin{proposition}\label{p.test-charac-seq-lb}
For all measures $\mathbf{m}u\in\mathbf{m}athcal{M}_{R}(X)$,
for the deficiency of randomness $\mathbf{d}_{\mathbf{m}u}(x)$, we have
\begin{equation}\label{e.defic-ineq-seq}
\mathbf{d}_{\mathbf{m}u}(x) \geqslanta \sup_{n}\operatorname{P}aren{-\log \ol\mathbf{m}u(x^{\leqslant n}) - H(x^{\leqslant n} \mathbf{m}id \mathbf{m}u)}.
\end{equation}
\end{proposition}
\begin{proof}
Consider the function
\[
f_{\mathbf{m}u}(x) = \sum_{s} 1_{\Gg_{s}}(x) \frac{\mathbf{m}(s \mathbf{m}id \mathbf{m}u)}{\ol\mathbf{m}u(\Gg_{s})}
= \sum_{n} \frac{\mathbf{m}(x^{\leqslant n} \mathbf{m}id \mathbf{m}u)}{\ol\mathbf{m}u(x^{\leqslant n})}
\geqslant \sup_{n} \frac{\mathbf{m}(x^{\leqslant n} \mathbf{m}id \mathbf{m}u)}{\ol\mathbf{m}u(x^{\leqslant n})}.
\]
The function $(\mathbf{m}u,x) \mathbf{m}apsto f_{\mathbf{m}u}(x)$
is clearly lower semicomputable and satisfies
$\mathbf{m}u^{x} f_{\mathbf{m}u}(x) \leqslant 1$, and hence
\[
\mathbf{d}_{\mathbf{m}u}(x) \geqslanta \log f(x) \geqslanta
\sup_{n}\operatorname{P}aren{-\log\ol\mathbf{m}u(x^{\leqslant n}) - H(x^{\leqslant n} \mathbf{m}id \mathbf{m}u)}.
\]
\end{proof}
\begin{proof}[Proof of Theorem~\protect\ref{t.defic-charac-cpt-seqs}]
For binary sequences instead of sequences of natural numbers, the part
$\geqslanta$ of the inequality follows directly from
Proposition~\ref{p.test-charac-seq-lb}: indeed, look at
Examples~\ref{x.cells}.
For sequences of natural numbers, the proof is completely analogous.
The proof of $\leqslanta$ reproduces the proof of Theorem 5.2
of~\cite{GacsExact80}.
The computability of $\mathbf{m}u$ implies that $t(x)=\mathbf{t}_{\mathbf{m}u}(x)$ is lower
semicomputable.
Let us first replace $t(x)$ with a rougher version:
\[
t'(x) = \mathbf{m}ax \setof{2^{n} : 2^{n} < \mathbf{t}_{\mathbf{m}u}(x)}.
\]
Then $t'(x) \eqm t(x)$, and it takes only values of the form $2^{n}$.
It is also lower semicomputable.
Let us abbreviate:
\[
1_{y}(x) = 1_{x\mathbf{m}athbb{N}^{\og}}(x),\quad \mathbf{m}u(y) = \mathbf{m}u(y\mathbf{m}athbb{N}^{\og}).
\]
For every lower semicomputable function $f$ over $\mathbf{m}athbb{N}^{\og}$,
there are computable sequences $y_{i}\in\mathbf{m}athbb{N}^{*}$ and
$r_{i}\in\mathbf{m}athbb{Q}$ with $f(x) = \sup_{i} r_{i} 1_{y_{i}}(x)$, with the
additional property that if $i<j$ and $1_{y_{i}}(x)=1_{y_{j}}(x)=1$ then
$r_{i}<r_{j}$.
Since $t'(x)$ only takes values of the form $2^{n}$, there are computable
sequences $y_{i} \in \mathbf{m}athbb{B}^{*}$ and $k_{i} \in \mathbf{m}athbb{N}$ with
\[
t'(x) = \sup_{i}\; 2^{k_{i}} 1_{y_{i}}(x)
\eqm \sum_{i} 2^{k_{i}} 1_{y_{i}}(x),
\]
with the property that if $i<j$ and $1_{y_{i}}(x)=1_{y_{j}}(x)=1$ then
$k_{i}<k_{j}$.
The equality $\eqm$ follows easily from the fact that for any finite
sequence
$n_{1}<n_{2}<\mathbf{d}ots$, $\sum_{j} 2^{n_{j}} \leqslant 2 \mathbf{m}ax_{j} 2^{n_{j}}$.
Since $\mathbf{m}u t' \leqslantm 1$, we have $\sum_{i} 2^{k_{i}}\mathbf{m}u(y_{i}) \leqslantm 1$.
Since the function $i \mathbf{t}o 2^{k_{i}}\mathbf{m}u(y_{i})$ is computable, this implies
$2^{k_{i}}\mathbf{m}u(y_{i}) \leqslantm \mathbf{m}(i)$, $2^{k_{i}} \leqslantm \mathbf{m}(i)/\mathbf{m}(y_{i})$.
Thus,
\[
t(x) \leqslantm \sup_{i}\; 1_{y_{i}}(x)\frac{\mathbf{m}(i)}{\mathbf{m}u(y_{i})}.
\]
For $y \in \mathbf{m}athbb{N}^{*}$ we certainly have
$H(y) \leqslanta \inf_{y = y_{i}} H(i)$, which implies
$\sup_{y = y_{i}} \mathbf{m}(i) \leqslant \mathbf{m}(y)$.
It follows that
\[
t(x) \leqslantm \sup_{y \in \mathbf{m}athbb{B}^{*}}\; 1_{y}(x)\frac{\mathbf{m}(y)}{\mathbf{m}u(y)}
= \sup_{n} \frac{\mathbf{m}(x^{\leqslant n})}{\mathbf{m}u(x^{\leqslant n})}.
\]
Taking logarithms, we obtain the $\leqslanta$ part of the theorem.
\end{proof}
\begin{proof}[Proof of Theorem~\protect\ref{t.defic-charac-compact}]
The proof of part $\geqslanta$ of the inequality follows directly from
Proposition~\ref{p.test-charac-seq-lb}, just as in the proof of
Theorem~\ref{t.defic-charac-cpt-seqs}.
The proof of $\leqslanta$ is also similar to the proof of that theorem.
The only part that needs to be reproved is the statement that
for every lower semicomputable function $f$ over $X$,
there are computable sequences $y_{i}\in\mathbf{m}athbb{B}^{*}$ and
$q_{i}\in\mathbf{m}athbb{Q}$ with $f(x) = \sup_{i} q_{i} 1_{y_{i}}(x)$.
This follows now, since according to
Proposition~\ref{p.compact-cell-basis}, the cells $\Gg_{y}$ form a basis of
the space $\mathbf{m}athbf{X}$.
\end{proof}
\section{Neutral measure}\label{s.neutral}
Let $\mathbf{t}_{\mathbf{m}u}(x)$ be our universal uniform randomness test.
We call a measure $M$ \mathbf{d}f{neutral} if
$\mathbf{t}_{M}(x) \leqslant 1$ for all $x$.
If $M$ is neutral then no experimental outcome $x$ could
refute the theory (hypothesis, model)
that $M$ is the underlying measure to our experiments.
It can be used as ``apriori probability'', in a Bayesian approach to
statistics.
Levin's theorem says the following:
\begin{theorem}\label{t.neutral-meas}
If the space $\mathbf{m}athbf{X}$ is compact then there is a neutral measure over $\mathbf{m}athbf{X}$.
\end{theorem}
The proof relies on a nontrivial combinatorial fact,
Sperner's Lemma, which also underlies the proof of the Brouwer fixpoint
theorem.
Here is a version of Sperner's Lemma, spelled out in continuous form:
\begin{proposition}[see for
example~\protect\cite{SpanierAlgTop71}]\label{p.Sperner}
Let $p_{1},\mathbf{d}ots,p_{k}$ be points of some finite-dimensional space
$\mathbf{m}athbb{R}^{n}$.
Suppose that there are closed sets $F_{1},\mathbf{d}ots,F_{k}$
with the property that for every subset $1 \leqslant i_{1} < \mathbf{d}ots < i_{j} \leqslant k$
of the indices, the simplex $S(p_{i_{1}},\mathbf{d}ots,p_{i_{j}})$ spanned by
$p_{i_{1}},\mathbf{d}ots,p_{i_{j}}$ is covered by
the union $F_{i_{1}} \cup \mathbf{d}ots \cup F_{i_{j}}$.
Then the intersection $\bigcap_{i} F_{i}$ of all these sets is not empty.
\end{proposition}
The following lemma will also be needed.
\begin{lemma}\label{l.concentr}
For every closed set $A \sbs \mathbf{m}athbf{X}$ and measure $\mathbf{m}u$, if $\mathbf{m}u(A)=1$ then
there is a point $x\in A$ with $\mathbf{t}_{\mathbf{m}u}(x) \leqslant 1$.
\end{lemma}
\begin{proof}
This follows easily from
$\mathbf{m}u\, t_{\mathbf{m}u} = \mathbf{m}u^{x} 1_{A}(x)t_{\mathbf{m}u}(x) \leqslant 1$.
\end{proof}
\begin{proof}[Proof of Theorem~\protect\ref{t.neutral-meas}]
For every point $x \in \mathbf{m}athbf{X}$, let $F_{x}$ be the set of measures for which
$\mathbf{t}_{\mathbf{m}u}(x) \leqslant 1$.
If we show that for every finite set of points $x_{1},\mathbf{d}ots,x_{k}$, we
have
\begin{equation}\label{e.finite-inters}
F_{x_{1}}\cap\mathbf{d}ots\cap F_{x_{k}} \ne \emptyset,
\end{equation}
then we will be done.
Indeed, according to Proposition~\ref{p.measures-compact}, the compactness
of $\mathbf{m}athbf{X}$ implies the compactness of the space $\mathbf{m}athcal{M}(\mathbf{m}athbf{X})$ of measures.
Therefore if every finite subset of the family $\setof{F_{x} : x \in \mathbf{m}athbf{X}}$
of closed sets has a nonempty intersection, then the whole family has a
nonempty intersection: this intersection consists of the neutral
measures.
To show~\eqref{e.finite-inters}, let $S(x_{1},\mathbf{d}ots,x_{k})$ be the set of
probability measures concentrated on $x_{1},\mathbf{d}ots,x_{k}$.
Lemma~\ref{l.concentr} implies that each such measure belongs to one of
the sets $F_{x_{i}}$.
Hence $S(x_{1},\mathbf{d}ots,x_{k}) \sbs F_{x_{1}} \cup \mathbf{d}ots \cup F_{x_{k}}$,
and the same holds for every subset of the indices $\{1,\mathbf{d}ots,k\}$.
Sperner's Lemma~\ref{p.Sperner} implies
$F_{x_{1}} \cap \mathbf{d}ots \cap F_{x_{k}} \ne\emptyset$.
\end{proof}
When the space is not compact, there are generally no neutral probability
measures, as shown by the following example.
\begin{proposition}\label{t.no-neutral}
Over the discrete space $\mathbf{m}athbf{X} = \mathbf{m}athbb{N}$ of natural numbers,
there is no neutral measure.
\end{proposition}
\begin{proof}
It is sufficient to
construct a randomness test $t_{\mathbf{m}u}(x)$ with the property that for
every measure $\mathbf{m}u$, we have $\sup_{x} t_{\mathbf{m}u}(x) = \infty$.
Let
\begin{equation}\label{e.no-neutral}
t_{\mathbf{m}u}(x) = \sup\setof{ k \in \mathbf{m}athbb{N}: \sum_{y<x}\mathbf{m}u(y) > 1-2^{-k}}.
\end{equation}
By its construction, this is a lower semicomputable function with
$\sup_{x} t_{\mathbf{m}u}(x) = \infty$.
It is a test if $\sum_{x}\mathbf{m}u(x)t_{\mathbf{m}u}(x) \leqslant 1$.
We have
\[
\sum_{x} \mathbf{m}u(x) t_{\mathbf{m}u}(x)
= \sum_{k>0} \sum_{t_{\mathbf{m}u}(x) \geqslant k} \mathbf{m}u(x)
< \sum_{k>0} 2^{-k} \leqslant 1.
\]
\end{proof}
Using a similar construction over the space $\mathbf{m}athbb{N}^{\og}$ of infinite
sequences of natural numbers, we could show that
for every measure $\mathbf{m}u$ there is a sequence $x$ with $\mathbf{t}_{\mathbf{m}u}(x)=\infty$.
Proposition~\ref{t.no-neutral} is a little misleading, since as a locally
compact set, $\mathbf{m}athbb{N}$ can be compactified into $\ol\mathbf{m}athbb{N} = \mathbf{m}athbb{N} \cup \{\infty\}$
(as in Part~\ref{i.compact.compactify} of Example~\ref{x.compact}).
Theorem~\ref{t.neutral-meas} implies that there is a neutral probability
measure $M$ over the compactified space $\ol\mathbf{m}athbb{N}$.
Its restriction to $\mathbf{m}athbb{N}$ is, of course, not a probability measure, since it
satisfies only $\sum_{x < \infty} M(x) \leqslant 1$.
We called these functions \mathbf{d}f{semimeasures}.
\begin{remark}\label{r.compactify}\
\begin{enumerate}[\upshape 1.]
\item
It is easy to see that
Theorem~\ref{t.test-charac-discr} characterizing randomness in terms of
complexity holds also for the space $\ol\mathbf{m}athbb{N}$.
\item
The topological space of semimeasures
over $\mathbf{m}athbb{N}$ is not compact, and there is no neutral one among them.
Its topology is not the same as what we get when we restrict the topology
of probability measures over $\ol\mathbf{m}athbb{N}$ to $\mathbf{m}athbb{N}$.
The difference is that over $\mathbf{m}athbb{N}$, for example the set of measures
$\setof{\mathbf{m}u : \mathbf{m}u(\mathbf{m}athbb{N}) \geqslant 1/2}$ is closed, since $\mathbf{m}athbb{N}$ (as the whole space)
is a closed set.
But over $\ol\mathbf{m}athbb{N}$, this set is not closed.
\end{enumerate}
\end{remark}
Neutral measures are not too simple, even over $\ol\mathbf{m}athbb{N}$, as the following
theorem shows.
\begin{theorem}\label{t.no-upper-semi-neutral}
There is no neutral measure over $\ol\mathbf{m}athbb{N}$ that is upper
semicomputable over $\mathbf{m}athbb{N}$ or lower semicomputable over $\mathbf{m}athbb{N}$.
\end{theorem}
\begin{proof}
Let us assume that $\nu$ is a measure that is upper semicomputable over
$\mathbf{m}athbb{N}$.
Then the set
\[
\setof{(x,r) : x \in\mathbf{m}athbb{N},\; r\in\mathbf{m}athbb{Q},\; \nu(x) < r}
\]
is recursively enumerable: let $(x_{i},r_{i})$ be a particular
enumeration.
For each $n$, let $i(n)$ be the first $i$ with $r_{i} < 2^{-n}$, and let
$y_{n} = x_{i(n)}$.
Then $\nu(y_{n}) < 2^{-n}$, and at the same time $H(y_{n}) \leqslanta H(n)$.
As mentioned, in Remark~\ref{r.compactify},
Theorem~\ref{t.test-charac-discr} characterizing randomness in terms of
complexity holds also for the space $\ol\mathbf{m}athbb{N}$.
Thus,
\[
\mathbf{d}_{\nu}(y_{n}) \eqa -\log\nu(y_{n}) - H(y_{n} \mathbf{m}id \nu) \geqslanta n - H(n).
\]
Suppose now that $\nu$ is lower semicomputable over $\mathbf{m}athbb{N}$.
The proof for this case is longer.
We know that $\nu$ is the monotonic limit of a recursive sequence
$i\mathbf{m}apsto \nu_{i}(x)$ of recursive semimeasures with rational values
$\nu_{i}(x)$.
For every $k=0,\mathbf{d}ots,2^{n}-2$, let
\begin{align*}
V_{n,k} &= \setof{\mathbf{m}u \in \mathbf{m}athcal{M}(\ol\mathbf{m}athbb{N}) :
k\cdot 2^{-n} < \mathbf{m}u(\{0,\mathbf{d}ots,2^{n}-1\}) < (k+2)\cdot 2^{-n}},
\\ J &= \setof{(n,k): k\cdot 2^{-n} < \nu(\{0,\mathbf{d}ots,2^{n}-1\})}.
\end{align*}
The set $J$ is recursively enumerable.
Let us define the functions $j:J\mathbf{t}o\mathbf{m}athbb{N}$ and $x:J\mathbf{t}o\{0,\mathbf{d}ots,2^{n}-1\}$
as follows: $j(n,k)$ is the smallest $i$ with
$\nu_{i}(\{0,\mathbf{d}ots,2^{n}-1\}) > k\cdot 2^{-n}$, and
\[
x_{n,k} = \mathbf{m}in\setof{y < 2^{n}: \nu_{j(n,k)}(y) < 2^{-n+1}}.
\]
Let us define the function $f_{\mathbf{m}u}(x,n,k)$ as follows.
We set $f_{\mathbf{m}u}(x,n,k)=2^{n-2}$ if the following conditions hold:
\begin{enumerate}[(a)]
\item\label{i.no-upper-semi-neutral.mu-global} $\mathbf{m}u \in V_{n,k}$;
\item\label{i.no-upper-semi-neutral.mu-upper} $\mathbf{m}u(x) < 2^{-n+2}$;
\item\label{i.no-upper-semi-neutral.unique}
$(n,k) \in J$ and $x=x_{n,k}$.
\end{enumerate}
Otherwise, $f_{\mathbf{m}u}(x,n,k)=0$.
Clearly, the function $(\mathbf{m}u,x,n,k) \mathbf{m}apsto f_{\mathbf{m}u}(x,n,k)$ is lower
semicomputable.
Condition~\eqref{i.no-upper-semi-neutral.mu-upper} implies
\begin{equation}\label{e.no-upper-semi-neutral.n-k-test}
\sum_{y} \mathbf{m}u(y) f_{\mathbf{m}u}(y,n,k) \leqslant
\mathbf{m}u(x_{n,k})f_{\mathbf{m}u}(x_{n,k},n,k) < 2^{-n+2}\cdot 2^{n-2} = 1.
\end{equation}
Let us show that $\nu \in V_{n,k}$ implies
\begin{equation}\label{e.found-bad}
f_{\nu}(x_{n,k},n,k) = 2^{n-2}.
\end{equation}
Consider $x=x_{n,k}$.
Conditions~\eqref{i.no-upper-semi-neutral.mu-global}
and~\eqref{i.no-upper-semi-neutral.unique} are satisfied by definition.
Let us show that condition~\eqref{i.no-upper-semi-neutral.mu-upper} is also
satisfied.
Let $j=j(n,k)$.
By definition, we have $\nu_{j}(x) < 2^{-n+1}$.
Since by definition $\nu_{j}\in V_{n,k}$ and $\nu_{j} \leqslant \nu \in V_{n,k}$,
we have
\[
\nu(x) \leqslant \nu_{j}(x) + 2^{-n+1} < 2^{-n+1} + 2^{-n+1} = 2^{-n+2}.
\]
Since all three conditions~\eqref{i.no-upper-semi-neutral.mu-global},
\eqref{i.no-upper-semi-neutral.mu-upper}
and~\eqref{i.no-upper-semi-neutral.unique} are satisfied, we have
shown~\eqref{e.found-bad}.
Now we define
\[
g_{\mathbf{m}u}(x) = \sum_{n\geqslant 2}\frac{1}{n(n+1)}\sum_{k}f_{\mathbf{m}u}(x,n,k).
\]
Let us prove that $g_{\mathbf{m}u}(x)$ is a uniform test.
It is lower semicomputable by definition, so we only need to prove
$\sum_{x} \mathbf{m}u(x) f_{\mathbf{m}u}(x) \leqslant 1$.
For this, let $I_{n,\mathbf{m}u} = \setof{k: \mathbf{m}u\in V_{n,k}}$.
Clearly by definition, $|I_{n,\mathbf{m}u}|\leqslant 2$.
We have, using this last fact and the test
property~\eqref{e.no-upper-semi-neutral.n-k-test}:
\[
\sum_{x} \mathbf{m}u(x) g_{\mathbf{m}u}(x) =
\sum_{n\geqslant 2}\frac{1}{n(n+1)}
\sum_{k\in I_{n,\mathbf{m}u}} \sum_{x}\mathbf{m}u(x) f_{\mathbf{m}u}(x,n,k)
\leqslant \sum_{n\geqslant 2}\frac{1}{n(n+1)}\cdot 2 \leqslant 1.
\]
Thus, $g_{\mathbf{m}u}(x)$ is a uniform test.
If $\nu\in V_{n,k}$ then we have
\[
\mathbf{t}_{\nu}(x_{n,k})
\geqslantm g_{\nu}(x_{n,k}) \geqslant \frac{1}{n(n+1)}f_{\mathbf{m}u}(x_{n,k},n,k) \geqslant
\frac{2^{n-2}}{n(n+1)}.
\]
Hence $\nu$ is not neutral.
\end{proof}
\begin{remark}
In~\cite{LevinUnif76} and~\cite{LevinRandCons84},
Levin imposed extra conditions on tests which allow to find a lower
semicomputable neutral semimeasure.
A typical (doubtless reasonable)
consequence of these conditions would be that if outcome $x$ is
random with respect to measures $\mathbf{m}u$ and $\nu$ then it is also random with
respect to $(\mathbf{m}u+\nu)/2$.
\end{remark}
\begin{remark}
The universal lower semicomputable
semimeasure $\mathbf{m}(x)$ has a certain property similar to neutrality.
According to Theorem~\ref{t.test-charac-discr},
for every computable measure $\mathbf{m}u$ we have
$\mathbf{d}_{\mathbf{m}u}(x) \eqa -\log\mathbf{m}u(x) - H(x)$
(where the constant in $\eqa$ depends on $\mathbf{m}u$).
So, for computable measures, the expression
\begin{equation}\label{e.ol-d}
\ol\mathbf{d}_{\mathbf{m}u}(x) = -\log\mathbf{m}u(x) - H(x)
\end{equation}
can serve as a reasonable deficiency of randomness.
(We will also use the test $\ol\mathbf{t} = 2^{\ol\mathbf{d}}$.)
If we substitute $\mathbf{m}$ for $\mathbf{m}u$ in $\ol\mathbf{d}_{\mathbf{m}u}(x)$, we get 0.
This substitution is not justified, of course.
The fact that $\mathbf{m}$ is not a probability
measure can be helped, at least over $\mathbf{m}athbb{N}$, using compactification as
above, and extending the notion of randomness tests.
But the test $\ol\mathbf{d}_{\mathbf{m}u}$ can replace $\mathbf{d}_{\mathbf{m}u}$
only for computable $\mathbf{m}u$, while $\mathbf{m}$ is not computable.
Anyway, this is the sense in which all outcomes might
be considered random with respect to $\mathbf{m}$, and the heuristic
sense in which $\mathbf{m}$ may still be considered ``neutral''.
\end{remark}
\begin{remark}
Solomonoff proposed the use of a universal lower semicomputable semimeasure
(actually, a closely related structure) for inductive inference
in~\cite{Solomonoff64I}.
He proved in~\cite{Solomonoff78} that sequences emitted by any computable
probability distribution can be predicted well by his scheme.
It may be interesting to see whether the same prediction scheme has
stronger properties when used with the truly neutral measure $M$ of the
present paper.
\end{remark}
\section{Relative entropy}\label{s.rel-entr}
Some properties of description complexity make it a good
expression of the idea of individual information content.
\subsection{Entropy}
The entropy of a discrete probability distribution $\mathbf{m}u$ is defined as
\[
\mathbf{m}athcal{H}(\mathbf{m}u) = - \sum_{x} \mathbf{m}u(x) \log \mathbf{m}u(x).
\]
To generalize entropy to continuous distributions the
\mathbf{d}f{relative entropy} is defined as follows.
Let $\mathbf{m}u,\nu$ be two measures, where $\mathbf{m}u$ is taken (typically, but not
always), to be a probability measure, and $\nu$ another measure, that can
also be a probability measure but is most frequently not.
We define the \mathbf{d}f{relative entropy} $\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u)$ as follows.
If $\mathbf{m}u$ is not absolutely continuous with respect to $\nu$ then
$\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) = -\infty$.
Otherwise, writing
\[
\frac{d\mathbf{m}u}{d\nu} = \frac{\mathbf{m}u(dx)}{\nu(dx)} =: f(x)
\]
for the (Radon-Nikodym) derivative
(density) of $\mathbf{m}u$ with respect to $\nu$, we define
\[
\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) = - \int \log\frac{d\mathbf{m}u}{d\nu} d\mathbf{m}u
= - \mathbf{m}u^{x} \log\frac{\mathbf{m}u(dx)}{\nu(dx)} = -\nu^{x} f(x) \log f(x).
\]
Thus, $\mathbf{m}athcal{H}(\mathbf{m}u) = \mathbf{m}athcal{H}_{\#}(\mathbf{m}u)$ is a special case.
\begin{example}
Let $f(x)$ be a probability density function for the distribution
$\mathbf{m}u$ over the real line, and let $\lg$ be the Lebesgue measure there.
Then
\[
\mathbf{m}athcal{H}_{\lg}(\mathbf{m}u) = -\int f(x) \log f(x) d x.
\]
\end{example}
In information theory and statistics, when both $\mathbf{m}u$ and $\nu$ are
probability measures, then $-\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u)$ is also denoted
$D(\mathbf{m}u \parallel \nu)$, and called (after Kullback) the
information divergence of the two measures.
It is frequently used in the role of a distance between $\mathbf{m}u$ and $\nu$.
It is not symmetric, but can be shown to obey the triangle inequality, and
to be nonnegative.
Let us prove the latter property: in our terms, it says that relative
entropy is nonpositive when both $\mathbf{m}u$ and $\nu$ are probability measures.
\begin{proposition}\label{p.Kullback-pos}
Over a space $\mathbf{m}athbf{X}$, we have
\begin{equation}\label{e.Kullback-pos}
\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) \leqslant -\mathbf{m}u(X) \log\frac{\mathbf{m}u(X)}{\nu(X)}.
\end{equation}
In particular, if $\mathbf{m}u(X) \geqslant \nu(X)$ then $\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) \leqslant 0$.
\end{proposition}
\begin{proof}
The inequality $- a \ln a \leqslant -a\ln b + b-a$
expresses the concavity of the logarithm function.
Substituting $a = f(x)$ and $b = \mathbf{m}u(X)/\nu(X)$
and integrating by $\nu$:
\[
(\ln 2) \mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) =
-\nu^{x} f(x) \ln f(x) \leqslant -\mathbf{m}u(X) \ln\frac{\mathbf{m}u(X)}{\nu(X)}
+ \frac{\mathbf{m}u(X)}{\nu(X)} \nu(X) - \mathbf{m}u(X)
= -\mathbf{m}u(X) \ln\frac{\mathbf{m}u(X)}{\nu(X)},
\]
giving~\eqref{e.Kullback-pos}.
\end{proof}
The following theorem generalizes an earlier known theorem stating that
over a discrete space, for a computable measure,
entropy is within an additive constant the same as ``average complexity'':
$\mathbf{m}athcal{H}(\mathbf{m}u) \eqa \mathbf{m}u^{x} H(x)$.
\begin{theorem}
Let $\mathbf{m}u$ be a probability measure.
Then we have
\begin{equation}\label{e.entropy-less-avg-algentr}
\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) \leqslant \mathbf{m}u^{x} H_{\nu}(x \mathbf{m}id \mathbf{m}u).
\end{equation}
If $X$ is a discrete space then the following estimate also holds:
\begin{equation}\label{e.entropy-gea-avg-algentr}
\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) \geqslanta \mathbf{m}u^{x} H_{\nu}(x \mathbf{m}id \mathbf{m}u).
\end{equation}
\end{theorem}
\begin{proof}
Let
$\mathbf{d}g$ be the measure with density $\mathbf{t}_{\nu}(x \mathbf{m}id \mathbf{m}u)$ with respect to
$\nu$: $\mathbf{t}_{\nu}(x \mathbf{m}id \mathbf{m}u) = \frac{\mathbf{d}g(dx)}{\nu(dx)}$.
Then $\mathbf{d}g(X) \leqslant 1$.
It is easy to see from the maximality property of $\mathbf{t}_{\nu}(x \mathbf{m}id \mathbf{m}u)$
that $\mathbf{t}_{\nu}(x \mathbf{m}id \mathbf{m}u) > 0$, therefore according to
Proposition~\ref{p.density-props}, we have
$\frac{\nu(dx)}{\mathbf{d}g(dx)} = \operatorname{P}aren{\frac{\mathbf{d}g(dx)}{\nu(dx)}}^{-1}$.
Using Proposition~\ref{p.density-props} and~\ref{p.Kullback-pos}:
\begin{align*}
\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) &= - \mathbf{m}u^{x} \log\frac{\mathbf{m}u(dx)}{\nu(dx)},
\\ - \mathbf{m}u^{x} H_{\nu}(x \mathbf{m}id \mathbf{m}u) &= \mathbf{m}u^{x} \log \frac{\mathbf{d}g(dx)}{\nu(dx)}
= - \mathbf{m}u^{x} \log \frac{\nu(dx)}{\mathbf{d}g(dx)},
\\ \mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) - \mathbf{m}u^{x} H_{\nu}(x \mathbf{m}id \mathbf{m}u)
&= - \mathbf{m}u^{x} \log \frac{\mathbf{m}u(dx)}{\mathbf{d}g(dx)}
\leqslant -\mathbf{m}u(X) \log\frac{\mathbf{m}u(X)}{\mathbf{d}g(X)} \leqslant 0.
\end{align*}
This proves~\eqref{e.entropy-less-avg-algentr}.
Over a discrete space $\mathbf{m}athbf{X}$, the function
$(x,\mathbf{m}u,\nu) \mathbf{m}apsto \frac{\mathbf{m}u(dx)}{\nu(dx)} = \frac{\mathbf{m}u(x)}{\nu(x)}$
is computable, therefore by the maximality property of
$H_{\nu}(x \mathbf{m}id \mathbf{m}u)$ we have
$\frac{\mathbf{m}u(dx)}{\nu(dx)} \leqslantm \mathbf{t}_{\nu}(x \mathbf{m}id \mathbf{m}u)$,
hence $\mathbf{m}athcal{H}_{\nu}(\mathbf{m}u) = -\mathbf{m}u^{x} \log \frac{\mathbf{m}u(dx)}{\nu(dx)}
\geqslanta \mathbf{m}u^{x} H_{\nu}(x \mathbf{m}id \mathbf{m}u)$.
\end{proof}
\subsection{Addition theorem}
The most important information-theoretical property of description
complexity is the following theorem (see for example~\cite{LiViBook97}):
\begin{proposition}[Addition Theorem]\label{p.addition}
We have $H(x,y) \eqa H(x) + H(y \mathbf{m}id x, H(x))$.
\end{proposition}
Mutual information is defined as $I(x : y) = H(x) + H(y) - H(x,y)$.
By the Addition theorem, we have
$I(x:y) \eqa H(y) - H(y \mathbf{m}id x,\, H(x)) \eqa H(x) - H(x \mathbf{m}id y,\,H(y))$.
The two latter expressions show that in some sense, $I(x:y)$ is the
information held in $x$ about $y$ as well as the information held in $y$
about $x$.
(The terms $H(x)$, $H(y)$ in the conditions are
logarithmic-sized corrections to this idea.)
Using~\eqref{e.ol-d}, it is interesting to view
mutual information $I(x : y)$ as a deficiency of randomness of
the pair $(x,y)$
in terms of the expression $\ol\mathbf{d}_{\mathbf{m}u}$, with respect to $\mathbf{m} \mathbf{t}imes \mathbf{m}$:
\[
I(x : y) = H(x) + H(y) - H(x,y) = \ol\mathbf{d}_{\mathbf{m} \mathbf{t}imes \mathbf{m}}(x, y).
\]
Taking $\mathbf{m}$ as a kind of ``neutral'' probability, even if it is not quite
such, allows us to view $I(x:y)$ as a ``deficiency of independence''.
Is it also true that $I(x:y) \eqa \mathbf{d}_{\mathbf{m} \mathbf{t}imes \mathbf{m}}(x)$?
This would allow us to deduce, as Levin did, ``information conservation''
laws from randomness conservation laws.\footnote{We cannot use the
test $\ol\mathbf{t}_{\mathbf{m}u}$ for this, since---as it can be shown easily--it
does not obey randomness conservation.}
Expression $\mathbf{d}_{\mathbf{m} \mathbf{t}imes \mathbf{m}}(x)$ must be understood
again in the sense of compactification, as in
Section~\ref{s.neutral}.
There seem to be two reasonable ways to compactify the space
$\mathbf{m}athbb{N}\mathbf{t}imes\mathbf{m}athbb{N}$: we either compactify it directly, by adding a symbol
$\infty$, or we form the product $\ol\mathbf{m}athbb{N} \mathbf{t}imes \ol\mathbf{m}athbb{N}$.
With either of them, preserving
Theorem~\ref{t.test-charac-discr}, we would
have to check whether $H(x,y \mathbf{m}id \mathbf{m} \mathbf{t}imes \mathbf{m}) \eqa H(x,y)$.
But, knowing the function $\mathbf{m}(x)\mathbf{t}imes\mathbf{m}(y)$ we
know the function $x \mathbf{m}apsto \mathbf{m}(x) \eqm \mathbf{m}(x) \mathbf{t}imes \mathbf{m}(0)$,
hence also the function $(x,y)\mathbf{m}apsto\mathbf{m}(x,y) = \mathbf{m}(\ang{x,y})$, where
$\ang{x,y}$ is any fixed computable pairing function.
Using this knowledge, it is possible to develop an argument similar to
the proof of Theorem~\ref{t.no-upper-semi-neutral}, showing that
$H(x,y \mathbf{m}id \mathbf{m} \mathbf{t}imes \mathbf{m}) \eqa H(x,y)$ does not hold.
\begin{question}
Is there a neutral measure $M$ with the property
$I(x:y) = \mathbf{d}_{M\mathbf{t}imes M}(x,y)$?
Is this true maybe for all neutral measures $M$?
If not, how far apart are the expressions $\mathbf{d}_{M\mathbf{t}imes M}(x,y)$ and
$I(x:y)$ from each other?
\end{question}
The Addition Theorem (Proposition~\ref{p.addition})
can be generalized to the algorithmic entropy $H_{\mathbf{m}u}(x)$
introduced in~\eqref{e.alg-ent} (a somewhat similar generalization appeared
in~\cite{VovkVyugin93}).
The generalization, defining $H_{\mathbf{m}u,\nu} = H_{\mathbf{m}u\mathbf{t}imes\nu}$, is
\begin{equation}\label{e.addition-general}
H_{\mathbf{m}u,\nu}(x,y)\eqa
H_\mathbf{m}u(x \mathbf{m}id \nu)+ H_\nu(y \mathbf{m}id x,\; H_\mathbf{m}u(x \mathbf{m}id \nu),\; \mathbf{m}u).
\end{equation}
Before proving the general addition theorem, we establish a few useful
facts.
\begin{proposition}\label{p.int.H.of.xy}
We have
\[
H_{\mathbf{m}u}(x \mathbf{m}id \nu) \leqslanta -\log \nu^{y} 2^{-H_{\mathbf{m}u,\nu}(x, y)}.
\]
\end{proposition}
\begin{proof}
The function $f(x,\mathbf{m}u,\nu)$ that is the right-hand side, is upper
semicomputable by definition, and obeys $\mathbf{m}u^{x}2^{-f(x,\mathbf{m}u,\nu)} \leqslant 1$.
Therefore the inequality follows from the minimum property of
$H_{\mathbf{m}u}(x)$.
\end{proof}
Let us generalize the minimum property of $H_{\mathbf{m}u}(x)$.
\begin{proposition}\label{p.univ-test-gener}
Let $(x,y,\nu) \mathbf{m}apsto f_{\nu}(x,y)$ be a nonnegative lower semicomputable
function with $F_{\nu}(x) = \log \nu^{y} f_{\nu}(x,y)$.
Then for all $x$ with $F_{\nu}(x) > -\infty$ we have
\[
H_{\nu}(y \mathbf{m}id x, \flo{F_{\nu}(x)}) \leqslanta -\log f_{\nu}(x,y) + F_{\nu}(x).
\]
\end{proposition}
\begin{proof}
Let us construct a lower semicomputable function
$(x,y,m,\nu) \mathbf{m}apsto g_{\nu}(x,y,m)$ for
integers $m$ with the property that $\nu^{y} g_{\nu}(x,y,m) \leqslant 2^{-m}$,
and for all $x$ with $F_{\nu}(x) \leqslant -m$ we have
$g_{\nu}(x,y,m) = f_{\nu}(x,y)$.
Such a $g$ can be constructed by watching the approximation
of $f$ grow and cutting it off as soon as it would give $F_{\nu}(x) > -m$.
Now $(x,y,m,\nu) \mathbf{m}apsto 2^{m} g_{\nu}(x,y,m)$ is a uniform
conditional test of $y$ and hence it is $\leqslantm 2^{-H_{\nu}(y \mathbf{m}id x, m)}$.
To finish the proof, substitute $-\flo{F_{\nu}(x)}$ for $m$ and rearrange.
\end{proof}
Let $z \in \mathbf{m}athbb{N}$, then the inequality
\begin{equation}\label{e.H.x.cond.z}
H_{\mathbf{m}u}(x) \leqslanta H(z) + H_{\mathbf{m}u}(x \mathbf{m}id z)
\end{equation}
will be a simple consequence of the general addition theorem.
The following lemma, needed in the proof of the theorem,
generalizes this inequality somewhat:
\begin{lemma}\label{l.H.x.cond.z}
For a com\-put\-able func\-tion $(y,z) \mathbf{m}apsto f(y,z)$ over $\mathbf{m}athbb{N}$,
we have
\[
H_{\mathbf{m}u}(x \mathbf{m}id y) \leqslanta H(z) + H_{\mathbf{m}u}(x \mathbf{m}id f(y,z)).
\]
\end{lemma}
\begin{proof}
The function
\[
(x,y,\mathbf{m}u) \mathbf{m}apsto g_{\mathbf{m}u}(x, y)=\sum_{z} 2^{-H_{\mathbf{m}u}(x \mathbf{m}id f(y,z))-H(z)}
\]
is lower semicomputable,
and $\mathbf{m}u^{x} g_{\mathbf{m}u}(x, y) \leqslant \sum_{z} 2^{-H(z)} \leqslant 1$.
Hence $g_{\mathbf{m}u}(x, y) \leqslantm 2^{-H_{\mathbf{m}u}(x \mathbf{m}id y)}$.
The left-hand side is a sum, hence the inequality holds for each
element of the sum: just what we had to prove.
\end{proof}
As mentioned above, the
theory generalizes to measures that are not probability measures.
Taking $f_{\mathbf{m}u}(x,y)=1$ in Proposition~\ref{p.univ-test-gener} gives
the inequality
\[
H_{\mathbf{m}u}(x \mathbf{m}id \flo{\log \mathbf{m}u(X)}) \leqslanta \log\mathbf{m}u(X),
\]
with a physical meaning when $\mathbf{m}u$ is the phase space measure.
Using~\eqref{e.H.x.cond.z}, this implies
\begin{equation}\label{e.unif.ub}
H_\mathbf{m}u(x)\leqslanta \log\mathbf{m}u(X) + H(\flo{\log\mathbf{m}u(X)}).
\end{equation}
The following simple monotonicity property will be needed:
\begin{lemma}\label{l.mon}
For $i < j$ we have
\[
i + H_\mathbf{m}u(x\mathbf{m}id i) \leqslanta j + H_\mathbf{m}u(x\mathbf{m}id j) .
\]
\end{lemma}
\begin{proof}
From Lemma~\ref{l.H.x.cond.z}, with $f(i, n)=i + n$ we have
\[
H_{\mathbf{m}u}(x \mathbf{m}id i) - H_{\mathbf{m}u}(x \mathbf{m}id j) \leqslanta H(j-i) \leqslanta j-i.
\]
\end{proof}
\begin{theorem}[General addition]\label{t.addition-general}
The following inequality holds:
\[
H_{\mathbf{m}u,\nu}(x,y) \eqa
H_{\mathbf{m}u}(x \mathbf{m}id \nu)+ H_{\nu}(y \mathbf{m}id x,\; H_\mathbf{m}u(x \mathbf{m}id \nu),\; \mathbf{m}u).
\]
\end{theorem}
\begin{proof}
To prove the inequality $\leqslanta$, let us define
\[
G_{\mathbf{m}u,\nu}(x,y,m) =\mathbf{m}in_{i\geqslant m}\;i + H_{\nu}(y \mathbf{m}id x, i, \mathbf{m}u).
\]
Function $G_{\mathbf{m}u,\nu}(x,y,m)$ is upper semicomputable and decreasing
in $m$.
Therefore
\[
G_{\mathbf{m}u,\nu}(x,y) = G_{\mathbf{m}u,\nu}(x, y, H_{\mathbf{m}u}(x \mathbf{m}id \nu))
\]
is also upper semicomputable since it is obtained by substituting an
upper semicomputable function for $m$ in $G_{\mathbf{m}u,\nu}(x,y,m)$.
Lemma~\ref{l.mon} implies
\begin{align*}
G_{\mathbf{m}u,\nu}(x,y,m) &\eqa m + H_{\nu}(y \mathbf{m}id x, m, \mathbf{m}u),
\\ G_{\mathbf{m}u,\nu}(x,y) &\eqa H_{\mathbf{m}u}(x \mathbf{m}id \nu) +
H_{\nu}(y \mathbf{m}id x, H_{\mathbf{m}u}(x \mathbf{m}id \nu), \mathbf{m}u).
\end{align*}
Now, we have
\begin{align*}
\nu^{y} 2^{-m - H_{\nu}(y \mathbf{m}id x, m, \mathbf{m}u)} &\leqslant 2^{-m},
\\ \nu^{y} 2^{-G_{\mathbf{m}u,\nu}(x,y)} &\leqslantm 2^{-H_{\mathbf{m}u}(x \mathbf{m}id \mathbf{m}u)}.
\end{align*}
Therefore $\mathbf{m}u^{x}\nu^{y} 2^{-G} \leqslantm 1$, implying
$H_{\mathbf{m}u,\nu}(x,y) \leqslanta G_{\mathbf{m}u,\nu}(x,y)$
by the minimality property of $H_{\mathbf{m}u,\nu}(x,y)$.
This proves the $\leqslanta$ half of our theorem.
To prove the inequality $\geqslanta$, let
\begin{align*}
f_{\nu}(x,y,\mathbf{m}u) &= 2^{-H_{\mathbf{m}u,\nu}(x,y)},
\\ F_{\nu}(x, \mathbf{m}u) &= \log \nu^{y} f_{\nu}(x,y,\mathbf{m}u).
\end{align*}
According to Proposition~\ref{p.univ-test-gener},
\begin{align*}
H_{\nu}(y \mathbf{m}id x,\flo{F},\mathbf{m}u)
&\leqslanta -\log f_{\nu}(x,y,\mathbf{m}u) + F_{\nu}(x, \mathbf{m}u),
\\ H_{\mathbf{m}u,\nu}(x,y) &\geqslanta -F + H_{\nu}(y \mathbf{m}id x, \cei{-F}, \mathbf{m}u).
\end{align*}
Proposition~\ref{p.int.H.of.xy} implies
$-F_{\nu}(x, \mathbf{m}u) \geqslanta H_{\mathbf{m}u}(x \mathbf{m}id \nu)$.
The monotonity lemma~\ref{l.mon} implies from here the $\geqslanta$ half of the
theorem.
\end{proof}
\subsection{Some special cases of the addition theorem; information}
The function $H_{\mathbf{m}u}(\cdot)$ behaves quite differently for different
kinds of measures $\mathbf{m}u$.
Recall the following property of complexity:
\begin{equation} \label{e.compl.of.fun}
H(f(x)\mathbf{m}id y)\leqslanta H(x\mathbf{m}id g(y)) \leqslanta H(x) .
\end{equation}
for any computable functions $f,g$
This implies
\[
H(y)\leqslanta H(x,y).
\]
In contrast, if $\mathbf{m}u$ is a probability measure then
\[
H_{\nu}(y) \geqslanta H_{\mathbf{m}u,\nu}(x, y).
\]
This comes from the fact that $2^{-H_{\nu}(y)}$ is a test for
$\mathbf{m}u\mathbf{t}imes\nu$.
Let us explore some of the consequences and meanings of the additivity
property.
As noted in~\eqref{e.Hmu-to-cond}, the
subscript $\mathbf{m}u$ can always be added to the condition:
$H_{\mathbf{m}u}(x) \eqa H_{\mathbf{m}u}(x \mathbf{m}id \mathbf{m}u)$.
Similarly, we have
\[
H_{\mathbf{m}u,\nu}(x,y) := H_{\mathbf{m}u\mathbf{t}imes\nu}(x,y)
\eqa H_{\mathbf{m}u\mathbf{t}imes\nu}(x,y\mathbf{m}id \mathbf{m}u\mathbf{t}imes\nu)
\eqa H_{\mathbf{m}u\mathbf{t}imes\nu}(x,y\mathbf{m}id \mathbf{m}u,\nu)
=: H_{\mathbf{m}u,\nu}(x,y\mathbf{m}id \mathbf{m}u,\nu),
\]
where only before-last inequality requires new (easy) consideration.
Let us assume that $X=Y=\Sg^{*}$, the discrete space of all strings.
With
general $\mathbf{m}u,\nu$ such that $\mathbf{m}u(x),\nu(x) \ne 0$ for all $x$,
using~\eqref{e.alg-entr-charac}, the addition theorem specializes to the
ordinary addition theorem, conditioned on $\mathbf{m}u,\nu$:
\[
H(x,y\mathbf{m}id \mathbf{m}u,\nu) \eqa
H(x \mathbf{m}id \mathbf{m}u,\nu)+ H(y \mathbf{m}id x,\; H(x \mathbf{m}id \mathbf{m}u,\nu),\; \mathbf{m}u,\nu).
\]
In particular, whenever $\mathbf{m}u,\nu$ are computable, this is just the regular
addition theorem.
Just as above, we defined mutual information as
$I(x : y) = H(x) + H(y) - H(x,y)$, the new addition theorem
suggests a more general definition
\[
I_{\mathbf{m}u,\nu}(x : y) = H_{\mathbf{m}u}(x \mathbf{m}id \nu) +
H_{\nu}(y\mathbf{m}id \mathbf{m}u) - H_{\mathbf{m}u,\nu}(x,y).
\]
In the discrete case $X=Y=\Sg^{*}$ with everywhere positive
$\mathbf{m}u(x),\nu(x)$, this simplifies to
\[
I_{\mathbf{m}u,\nu}(x : y) = H(x \mathbf{m}id \mathbf{m}u,\nu) + H(y\mathbf{m}id \mathbf{m}u,\nu)
- H(x,y | \mathbf{m}u,\nu),
\]
which is $\eqa I(x:y)$ in case of computable $\mathbf{m}u,\nu$.
How different can it be for non-computable $\mathbf{m}u,\nu$?
In the general case, even for computable $\mathbf{m}u,\nu$, it seems
worth finding out how much this expression depends on the choice of
$\mathbf{m}u,\nu$.
Can one arrive at a general, natural definition of mutual information along
this path?
\section{Conclusion}
When uniform randomness tests are defined in as general a
form as they were here, the theory of information conservation does not fit
nicely into the theory of randomness conservation as it did
with~\cite{LevinUnif76} and~\cite{LevinRandCons84}.
Still, it is worth laying the theory onto broad
foundations that, we hope, can serve as a basis for further development.
\appendix
\section{Topological spaces}\label{s.top}
Given two sets $X,Y$, a \mathbf{d}f{partial function} $f$
from $X$ to $Y$, defined on a subset of $Y$, will be denoted as
\[
f:\sbsq X \mathbf{t}o Y.
\]
\subsection{Topology}\label{ss.top}
A \mathbf{d}f{topology} on a set $X$ is defined by a class $\mathbf{t}au$
of its subsets called \mathbf{d}f{open sets}.
It is required that the empty set and $X$ are open, and that arbitrary
union and finite intersection of open sets is open.
The pair $(X, \mathbf{t}au)$ is called a \mathbf{d}f{topological space}.
A topology $\mathbf{t}au'$ on $X$ is called \mathbf{d}f{larger}, or \mathbf{d}f{finer} than $\mathbf{t}au$
if $\mathbf{t}au' \spsq \mathbf{t}au$.
A set is called \mathbf{d}f{closed} if its complement is open.
A set $B$ is called the \mathbf{d}f{neighborhood} of a set $A$ if $B$ contains an
open set that contains $A$.
We denote by
$\Cl{A}, \Int{A}$
the closure (the intersection of all closed sets containing $A$)
and the interior of $A$ (the union of all open sets in $A$) respectively.
Let
\[
\partial A = \Cl{A} \xcpt \Int{A}
\]
denote the boundary of set $A$.
A \mathbf{d}f{base} is a subset $\bg$ of $\mathbf{t}au$ such that every open set is the
union of some elements of $\bg$.
A \mathbf{d}f{neighborhood} of a point is a base element containing it.
A \mathbf{d}f{base of neighborhoods of a point} $x$ is a set $N$ of neighborhoods
of $x$ with the property that each neighborhood of $x$ contains an element
of $N$.
A \mathbf{d}f{subbase} is a subset $\sg$ of $\mathbf{t}au$ such that every open set is the
union of finite intersections from $\sg$.
\begin{examples}\label{x.topol}\
\begin{enumerate}[\upshape 1.]
\item\label{i.x.topol.discr}
Let $X$ be a set, and let $\bg$ be the set of all points of $X$.
The topology with base $\bg$ is the \mathbf{d}f{discrete topology} of the
set $X$.
In this topology, every subset of $X$ is open (and closed).
\item\label{i.x.topol.real}
Let $X$ be the real line $\mathbf{m}athbb{R}$, and let $\bg_{\mathbf{m}athbb{R}}$ be the set of
all open intervals $\opint{a}{b}$.
The topology $\mathbf{t}au_{\mathbf{m}athbb{R}}$ obtained from this base is the usual topology of
the real line.
When we refer to $\mathbf{m}athbb{R}$ as a topological space without qualification,
this is the topology we will always have in mind.
\item Let $X = \ol\mathbf{m}athbb{R} = \mathbf{m}athbb{R} \cup \{-\infty,\infty\}$,
and let $\bg_{\ol\mathbf{m}athbb{R}}$ consist of all open intervals $\opint{a}{b}$
and in addition of all intervals of the forms $\lint{-\infty}{a}$ and
$\rint{a}{\infty}$.
It is clear how the space $\ol\mathbf{m}athbb{R}_{+}$ is defined.
\item\label{i.x.topol.real-upper-converg}
Let $X$ be the real line $\mathbf{m}athbb{R}$.
Let $\bg_{\mathbf{m}athbb{R}}^{>}$
be the set of all open intervals $\opint{-\infty}{b}$.
The topology with base $\bg_{\mathbf{m}athbb{R}}^{>}$ is also a topology of the real
line, different from the usual one.
Similarly, let $\bg_{\mathbf{m}athbb{R}}^{<}$
be the set of all open intervals $\opint{b}{\infty}$.
\item\label{i.x.topol.Cantor}
On the space $\Sg^{\og}$, let
$\mathbf{t}g_{C} = \setof{A\Sg^{\og} : A \sbsq \Sg^{*}}$ be called the topology of
the \mathbf{d}f{Cantor space} (over $\Sg$).
\end{enumerate}
\end{examples}
A set is called a $G_{\mathbf{d}g}$ set if it is a countable intersection of open
sets, and it is an $F_{\sg}$ set if it is a countable union of closed sets.
For two topologies $\mathbf{t}au_{1},\mathbf{t}au_{2}$ over the same set $X$, we define
the topology $\mathbf{t}au_{1}\V \mathbf{t}au_{2} = \mathbf{t}au_{1} \cap \mathbf{t}au_{2}$, and
$\mathbf{t}au_{1} \et \mathbf{t}au_{2}$ as the smallest topology containing
$\mathbf{t}au_{1} \cup \mathbf{t}au_{2}$.
In the example topologies of the real numbers above, we have
$\mathbf{t}au_{\mathbf{m}athbb{R}} = \mathbf{t}au_{\mathbf{m}athbb{R}}^{<} \et \mathbf{t}au_{\mathbf{m}athbb{R}}^{>}$.
We will always require the topology to have at least the $T_{0}$ property:
every point is determined by the class of open sets containing it.
This is the weakest one of a number of other possible separation
properties: both topologies of the real line in the example above have it.
A stronger such property would be the $T_{2}$ property:
a space is called a \mathbf{d}f{Hausdorff} space, or $T_{2}$ space,
if for every pair of different
points $x,y$ there is a pair of disjoint open sets
$A,B$ with $x\in A$, $y\in B$.
The real line with topology $\mathbf{t}au_{\mathbf{m}athbb{R}}^{>}$ in
Example~\ref{x.topol}.\ref{i.x.topol.real-upper-converg} above is
not a Hausdorff space.
A space is Hausdorff if and only if every open set is the union of closed
neighborhoods.
Given two topological spaces $(X_{i}, \mathbf{t}au_{i})$ ($i=1,2$),
a function $f :\sbsq X_{1} \mathbf{t}o X_{2}$ is
called \mathbf{d}f{continuous} if for every open set $G \sbs X_{2}$ its inverse
image $f^{-1}(G)$ is also open.
If the topologies $\mathbf{t}au_{1},\mathbf{t}au_{2}$ are not clear from the context then
we will call the function $(\mathbf{t}au_{1}, \mathbf{t}au_{2})$-continuous.
Clearly, the property remains the same if we require it only for all
elements $G$ of a subbase of $X_{2}$.
If there are two continuous functions between $X$ and $Y$ that
are inverses of each other then the two spaces are called
\mathbf{d}f{homeomorphic}.
We say that $f$ is continuous \mathbf{d}f{at point} $x$ if for every neighborhood
$V$ of $f(x)$ there is a neighborhood $U$ of $x$ with $f(U) \sbsq V$.
Clearly, $f$ is continuous if and only if it is continuous in each point.
A \mathbf{d}f{subspace} of a topological space $(X, \mathbf{t}au)$
is defined by a subset $Y \sbsq X$, and the topology
$\mathbf{t}au_{Y} = \setof{G \cap Y : G \in \mathbf{t}au}$, called the \mathbf{d}f{induced}
topology on $Y$.
This is the smallest topology on $Y$
making the identity mapping $x \mathbf{m}apsto x$ continuous.
A partial function $f :\sbsq X \mathbf{t}o Z$ with $\operatorname{dom}(f) = Y$
is continuous iff $f : Y \mathbf{t}o Z$ is continuous.
For two topological spaces $(X_{i}, \mathbf{t}au_{i})$ ($i=1,2$),
we define the \mathbf{d}f{product topology}
on their product $X \mathbf{t}imes Y$: this is the
topology defined by the subbase
consisting of all sets $G_{1} \mathbf{t}imes X_{2}$ and all sets
$X_{1} \mathbf{t}imes G_{2}$ with $G_{i} \in \mathbf{t}au_{i}$.
The product topology is the smallest topology making the projection
functions $(x,y) \mathbf{m}apsto x$, $(x,y) \mathbf{m}apsto y$ continuous.
Given topological spaces $X,Y,Z$ we
call a two-argument function $f: X \mathbf{t}imes Y \mathbf{m}apsto Z$ continuous if
it is continuous as a function from $X \mathbf{t}imes Y$ to $Z$.
The product topology is defined similarly for
over the product $\prod_{i \in I} X_{i}$ of an arbitrary number of
spaces, indexed by some index set $I$.
We say that a function is
$(\mathbf{t}au_{1},\mathbf{d}ots,\mathbf{t}au_{n},\mathbf{m}u)$-continuous if it is
$(\mathbf{t}au_{1} \mathbf{t}imes\mathbf{d}ots\mathbf{t}imes\mathbf{t}au_{n},\mathbf{m}u)$-continuous.
\begin{examples}\label{x.prod}\
\begin{enumerate}[\upshape 1.]
\item\label{i.x.prod.real}
The space $\mathbf{m}athbb{R} \mathbf{t}imes \mathbf{m}athbb{R}$ with the product
topology has the usual topology of the Euclidean plane.
\item\label{i.x.top-inf-seq}
Let $X$ be a set with the discrete topology,
and $X^{\og}$ the set of infinite sequences with elements from $X$,
with the product topology.
A base of this topology is provided by all sets of the form $uX^{\og}$
where $u \in X^{*}$.
The elements of this base are closed as well as open.
When $X = \{0,1\}$ then this topology
is the usual topology of infinite binary sequences.
\end{enumerate}
\end{examples}
A real function $f : X_{1} \mathbf{t}o \mathbf{m}athbb{R}$ is called continuous if it is
$(\mathbf{t}au_{1}, \mathbf{t}au_{\mathbf{m}athbb{R}})$-continuous.
It is called \mathbf{d}f{lower semicontinuous} if it is
$(\mathbf{t}au_{1}, \mathbf{t}au_{\mathbf{m}athbb{R}}^{<})$-continuous.
The definition of upper semicontinuity is similar.
Clearly, $f$ is continuous if and only if it is both lower and upper
semicontinuous.
The requirement of lower semicontinuity of $f$ is that
for each $r \in \mathbf{m}athbb{R}$, the set $\setof{x: f(x) > r}$ is open.
This can be seen to be equivalent to the requirement that the single set
$\setof{(x,r): f(x) > r}$ is open.
It is easy to see that the supremum of any set of lower semicontinuous
functions is lower semicontinuous.
Let $(X, \mathbf{t}au)$ be a topological space, and $B$ a subset of $X$.
An \mathbf{d}f{open cover} of $B$ is a family of open sets whose union contains
$B$.
A subset $K$ of $X$ is said to be \mathbf{d}f{compact} if every open cover of $K$
has a finite subcover.
Compact sets have many important properties: for example, a continuous
function over a compact set is bounded.
\begin{example}\label{x.compact}\
\begin{enumerate}[\upshape 1.]
\item\label{i.compact.compactify}
Every finite discrete space is compact.
An infinite discrete space $\mathbf{m}athbf{X} = (X, \mathbf{t}au)$ is not compact,
but it can be turned into a compact space $\ol\mathbf{m}athbf{X}$
by adding a new element called $\infty$: let
$\ol X = X\cup\{\infty\}$, and
$\ol\mathbf{t}au = \mathbf{t}au\cup\setof{\ol X \xcpt A: A \sbs X\mathbf{t}xt{ closed }}$.
More generally, this simple operation can be performed with every space
that is \mathbf{d}f{locally compact}, that each of its points has a compact
neighborhood.
\item In a finite-dimensional Euclidean space, every bounded closed set
is compact.
\item It is known that if $(\mathbf{m}athbf{X}_{i})_{i\in I}$ is a family of compact
spaces then their direct product is also compact.
\end{enumerate}
\end{example}
A subset $K$ of $X$ is said to be \mathbf{d}f{sequentially compact}
if every sequence in $K$ has a convergent subsequence with limit in $K$.
The space is \mathbf{d}f{locally compact} if every point has a compact
neighborhood.
\subsection{Metric spaces}\label{ss.metric}
In our examples for metric spaces, and later in our treatment of the space
of probability measures, we refer to~\cite{BillingsleyConverg68}.
A \mathbf{d}f{metric space} is given by a set $X$ and a distance function
$d : X\mathbf{t}imes X \mathbf{t}o \mathbf{m}athbb{R}_{+}$ satisfying the
\mathbf{d}f{triangle inequality} $d(x, z) \leqslant d(x, y) + d(y, z)$ and also
property that $d(x,y) = 0$ implies $x = y$.
For $r \in\mathbf{m}athbb{R}_{+}$, the sets
\[
B(x,r) = \setof{y : d(x, y) < r},\quad
\ol B(x,r) = \setof{y : d(x, y) \leqslant r}
\]
are called the open and closed \mathbf{d}f{balls} with radius $r$ and center $x$.
A metric space is also a topological space, with the base that is the set
of all open balls.
Over this space, the distance function $d(x,y)$ is obviously continuous.
Each metric space is a Hausdorff space; moreover, it has the
following stronger property.
For every pair of different points $x,y$ there is a continuous function
$f : X \mathbf{t}o \mathbf{m}athbb{R}$ with $f(x) \ne f(y)$.
(To see this, take $f(z) = d(x, z)$.)
This is called the $T_{3}$ property.
A metric space is \mathbf{d}f{bounded} when $d(x,y)$ has an upper bound on $X$.
A topological space is called \mathbf{d}f{metrizable} if its topology can be
derived from some metric space.
\begin{notation}
For an arbitrary set $A$ and point $x$ let
\begin{align}\nonumber
d(x, A) &= \inf_{y \in A} d(x,y),
\\\label{e.Aeps}
A^{\eps} &= \setof{x : d(x, A) < \eps}.
\end{align}
\end{notation}
\begin{examples}\label{x.metric}\
\begin{enumerate}[\upshape 1.]
\item The real line with the distance $d(x,y) = |x - y|$ is a metric
space.
The topology of this space is the usual topology $\mathbf{t}au_{\mathbf{m}athbb{R}}$ of the real
line.
\item The space $\mathbf{m}athbb{R} \mathbf{t}imes \mathbf{m}athbb{R}$ with the Euclidean distance gives the
same topology as the product topology of $\mathbf{m}athbb{R} \mathbf{t}imes \mathbf{m}athbb{R}$.
\item An arbitrary set $X$ with the distance $d(x,y)=1$ for all pairs
$x,y$ of different elements, is a metric space that induces the discrete
topology on $X$.
\item\label{i.x.metric-inf-seq}
Let $X$ be a bounded metric space, and let
$Y = X^{\og}$ be the set of infinite sequences
$x = (x_{1}, x_{2}, \mathbf{d}otsc)$
with distance function $d^{\og}(x,y) = \sum_{i} 2^{-i} d(x_{i},y_{i})$.
The topology of this space is the same as the product topology defined
in Example~\ref{x.prod}.\ref{i.x.top-inf-seq}.
\item\label{i.x.metric-nonsep}
Let $X$ be a metric space, and let
$Y = X^{\og}$ be the set of infinite bounded sequences
$x = (x_{1}, x_{2}, \mathbf{d}otsc)$
with distance function $d(x, y) = \sup_{i} d(x_{i}, y_{i})$.
\item\label{i.x.C(X)}
Let $X$ be a metric space, and let
$C(X)$ be the set of bounded continuous functions over $X$ with
distance function $d'(f, g) = \sup_{x} d(f(x), g(x))$.
A special case is $C\clint{0}{1}$ where the interval $\clint{0}{1}$ of real
numbers has the usual metric.
\item\label{i.x.l2}
Let $l_{2}$ be the set of infinite sequences $x = (x_{1}, x_{2}, \mathbf{d}otsc)$
of real numbers with the property that $\sum_{i} x_{i}^{2} < \infty$.
The metric is given by
the distance $d(x, y) = (\sum_{i} |x_{i} - y_{i}|^{2})^{1/2}$.
\end{enumerate}
\end{examples}
A topological space has the \mathbf{d}f{first countability property} if each point
has a countable base of neighborhoods.
Every metric space has the first countability property since we can
restrict ourselves to balls with rational radius.
Given a topological space $(X, \mathbf{t}au)$ and a sequence $x = (x_{1}, x_{2},
\mathbf{d}otsc)$ of elements of $X$, we say that $x$ \mathbf{d}f{converges} to a point $y$
if for every neighborhood $G$ of $y$ there is a $k$ such that for all
$m > k$ we have $x_{m} \in G$.
We will write $y = \lim_{n \mathbf{t}o \infty} x_{n}$.
It is easy to show that if spaces $(X_{i}, \mathbf{t}au_{i})$ $(i=1,2)$
have the first countability property then a function $f : X \mathbf{t}o Y$ is
continuous if and only if for every convergent sequence $(x_{n})$ we have
$f(\lim_{n} x_{n}) = \lim_{n} f(x_{n})$.
A topological
space has the \mathbf{d}f{second countability property} if the whole space
has a countable base.
For example, the space $\mathbf{m}athbb{R}$ has the second countability property
for all three topologies $\mathbf{t}au_{\mathbf{m}athbb{R}}$, $\mathbf{t}au_{\mathbf{m}athbb{R}}^{<}$, $\mathbf{t}au_{\mathbf{m}athbb{R}}^{>}$.
Indeed, we also
get a base if instead of taking all intervals, we only take
intervals with rational endpoints.
On the other hand, the metric space of
Example~\ref{x.metric}.\ref{i.x.metric-nonsep} does not have
the second countability property.
In a topological space $(X, \mathbf{t}au)$, a set $B$ of points is called
\mathbf{d}f{dense} at a point $x$ if it intersects every neighborhood of $x$.
It is called \mathbf{d}f{everywhere dense}, or \mathbf{d}f{dense}, if it is dense at every
point.
A metric space is called \mathbf{d}f{separable} if it has a countable everywhere
dense subset.
This property holds if and only if the space as a topological
space has the second countability property.
\begin{example}\label{x.Cclint{0}{1}}
In Example~\ref{x.metric}.\ref{i.x.C(X)}, for $X=\clint{0}{1}$, we can
choose as our everywhere dense set the set of all polynomials with rational
coefficients, or alternatively,
the set of all piecewise linear functions whose graph has
finitely many nodes at rational points.
\end{example}
Let $X$ be a metric space, and let
$C(X)$ be the set of bounded continuous functions over $X$ with
distance function $d'(f, g) = \sup_{x} d(f(x), g(x))$.
A special case is $C\clint{0}{1}$ where the interval $\clint{0}{1}$ of real
numbers has the usual metric.
Let $(X, d)$ be a metric space, and $a = (a_{1}, a_{1},\mathbf{d}otsc)$ an infinite
sequence.
A metric space is called \mathbf{d}f{complete} if every Cauchy sequence in it has a
limit.
It is well-known that every metric space can be embedded (as an everywhere
dense subspace) into a complete space.
It is easy to see that in a metric space, every closed set is a
$G_{\mathbf{d}g}$ set (and every open set is an $F_{\sg}$ set).
\begin{example}
Consider the set $D\clint{0}{1}$ of functions over
$\clint{0}{1}$ that are right continuous and have left limits everywhere.
The book~\cite{BillingsleyConverg68} introduces two different metrics for
them: the Skorohod metric $d$ and the $d_{0}$ metric.
In both metrics, two functions are close if a slight monotonic continuous
deformation of the coordinate makes them uniformly close.
But in the $d_{0}$ metric, the slope of the deformation must be close to 1.
It is shown that the two metrics give rise to the same topology;
however, the space with metric $d$ is not complete, and the
space with metric $d_{0}$ is.
\end{example}
Let $(X, d)$ be a metric space.
It can be shown that a subset $K$ of $X$ is compact if and only
if it is sequentially compact.
Also, $K$ is compact if and only if it is closed and
for every $\eps$, there is a finite set of
$\eps$-balls (balls of radius $\eps$) covering it.
We will develop the theory of randomness over separable
complete metric spaces.
This is a wide class of spaces encompassing most spaces of practical
interest.
The theory would be simpler if we restricted it to compact or locally
compact spaces; however, some important spaces like $C\clint{0}{1}$
(the set of continuouos functions over the interval $\clint{0}{1}$, with
the maximum difference as their distance) are not locally compact.
Given a function $f: X \mathbf{t}o Y$ between metric spaces and $\bg > 0$,
let $\operatorname{Lip}_{\bg}(X,Y)$ denote the
set of functions (called the Lip\-schitz$(\bg)$ functions, or simply
Lip\-schitz functions) satisfying
\begin{equation}\label{e.Lipschitz}
d_{Y}(f(x), f(y)) \leqslant \bg d_{X}(x, y).
\end{equation}
All these functions are uniformly continuous.
Let $\operatorname{Lip}(X) = \operatorname{Lip}(X,\mathbf{m}athbb{R}) = \bigcup_{\bg} \operatorname{Lip}_{\bg}$
be the set of real Lip\-schitz functions over $X$.
\section{Measures}\label{s.measures}
For a survey of measure theory, see for example~\cite{PollardUsers01}.
\subsection{Set algebras}
A (Boolean set-) \mathbf{d}f{algebra} is a set of subsets of some set $X$
closed under intersection and complement (and then, of course, under
union).
It is a \mathbf{d}f{$\sg$-algebra} if it is also closed
under countable intersection
(and then, of course, under countable union).
A \mathbf{d}f{semialgebra} is a set $\mathbf{m}athcal{L}$
of subsets of some set $X$ closed under
intersection, with the property that the complement of every element
of $\mathbf{m}athcal{L}$ is the disjoint union of a finite number of elements of $\mathbf{m}athcal{L}$.
If $\mathbf{m}athcal{L}$ is a semialgebra then the set of finite unions of elements of
$\mathbf{m}athcal{L}$ is an algebra.
\begin{examples}\label{x.algebras}\
\begin{enumerate}[\upshape 1.]
\item\label{i.x.algebras.l-closed}
The set $\mathbf{m}athcal{L}_{1}$ of left-closed intervals of the line (including intervals
of the form $\opint{-\infty}{a}$) is a semialgebra.
\item
The set $\mathbf{m}athcal{L}_{2}$ of all intervals of the line
(which can be open, closed, left-closed or
right-closed), is a semialgebra.
\item\label{i.x.algebras.inf-seq}
In the set $\{0,1\}^{\og}$ of infinite 0-1-sequences, the
set $\mathbf{m}athcal{L}_{3}$ of all subsets of the form $u\{0,1\}^{\og}$ with
$u\in\{0,1\}^{*}$, is a semialgebra.
\item
The $\sg$-algebra $\mathbf{m}athcal{B}$ generated by $\mathbf{m}athcal{L}_{1}$, is the same as the one
generated by $\mathbf{m}athcal{L}_{2}$, and is also the same as the one generated by the
set of all open sets: it is called the family of \mathbf{d}f{Borel sets} of the
line.
The Borel sets of the extended real line $\ol\mathbf{m}athbb{R}$ are defined similarly.
\item
Given $\sg$-algebras $\mathbf{m}athcal{A},\mathbf{m}athcal{B}$ in sets $X,Y$, the product $\sg$-algebra
$\mathbf{m}athcal{A}\mathbf{t}imes\mathbf{m}athcal{B}$ in the space $X \mathbf{t}imes Y$ is the one generated by all
elements $A \mathbf{t}imes Y$ and $X \mathbf{t}imes B$ for $A\in\mathbf{m}athcal{A}$ and $B\in\mathbf{m}athcal{B}$.
\end{enumerate}
\end{examples}
\subsection{Measures}\label{ss.measures}
A \mathbf{d}f{measurable space} is a pair $(X, \mathbf{m}athcal{S})$ where $\mathbf{m}athcal{S}$ is a $\sg$-algebra
of sets of $X$.
A \mathbf{d}f{measure} on a measurable space $(X, \mathbf{m}athcal{S})$ is a function
$\mathbf{m}u : B \mathbf{t}o \ol\mathbf{m}athbb{R}_{+}$ that is \mathbf{d}f{$\sg$-additive}:
this means that for every countable family $A_{1}, A_{2},\mathbf{d}otsc$ of
disjoint elements of $\mathbf{m}athcal{S}$ we have
$\mathbf{m}u(\bigcup_{i} A_{i}) = \sum_{i} \mathbf{m}u(A_{i})$.
A measure $\mathbf{m}u$ is \mathbf{d}f{$\sg$-finite} if the whole space is the union of a
countable set of subsets whose measure is finite.
It is \mathbf{d}f{finite} if $\mathbf{m}u(X) < \infty$.
It is a \mathbf{d}f{probability measure} if $\mathbf{m}u(X) = 1$.
It is important to understand how a measure can be defined in practice.
Algebras are generally simpler to grasp constructively
than $\sg$-algebras; semialgebras are yet simpler.
Suppose that $\mathbf{m}u$ is defined over a semialgebra $\mathbf{m}athcal{L}$ and is additive.
Then it can always be uniquely extended to an additive function over
the algebra generated by $\mathbf{m}athcal{L}$.
The following is an important theorem of measure theory.
\begin{proposition}\label{p.Caratheo-extension}
Suppose that a nonnegative set function defined over a semialgebra $\mathbf{m}athcal{L}$
is $\sg$-additive.
Then it can be extended uniquely to the $\sg$-algebra generated by $\mathbf{m}athcal{L}$.
\end{proposition}
\begin{examples}\label{x.semialgebra}\
\begin{enumerate}[\upshape 1.]
\item Let $x$ be point and let $\mathbf{m}u(A) = 1$ if $x \in A$ and $0$
otherwise.
In this case, we say that $\mathbf{m}u$ is \mathbf{d}f{concentrated} on the point $x$.
\item\label{i.left-cont} Consider the the line $\mathbf{m}athbb{R}$, and the
algebra $\mathbf{m}athcal{L}_{1}$ defined
in Example~\ref{x.algebras}.\ref{i.x.algebras.l-closed}.
Let $f : \mathbf{m}athbb{R} \mathbf{t}o \mathbf{m}athbb{R}$ be a monotonic real function.
We define a set function over $\mathbf{m}athcal{L}_{1}$ as follows.
Let $\lint{a_{i}}{b_{i}}$, ($i=1,\mathbf{d}ots,n$) be a set of disjoint left-closed
intervals.
Then $\mathbf{m}u(\bigcup_{i} \lint{a_{i}}{b_{i}}) = \sum_{i} f(b_{i}) - f(a_{i})$.
It is easy to see that $\mathbf{m}u$ is additive.
It is $\sg$-additive if and only if $f$ is left-continuous.
\item\label{i.measure-Cantor} Let $B = \{0,1\}$, and consider the set
$B^{\og}$ of infinite 0-1-sequences, and the
semialgebra $\mathbf{m}athcal{L}_{3}$ of
Example~\ref{x.algebras}.\ref{i.x.algebras.inf-seq}.
Let $\mathbf{m}u : B^{*} \mathbf{t}o \mathbf{m}athbb{R}^{+}$ be a function.
Let us write $\mathbf{m}u(uB^{\og}) = \mathbf{m}u(u)$ for all $u \in B^{*}$.
Then it can be shown that the following conditions are equivalent:
$\mathbf{m}u$ is $\sg$-additive over $\mathbf{m}athcal{L}_{3}$; it is
additive over $\mathbf{m}athcal{L}_{3}$; the equation $\mathbf{m}u(u) = \mathbf{m}u(u0) + \mathbf{m}u(u1)$
holds for all $u \in B^{*}$.
\item The nonnegative linear combination of any finite number of measures
is also a measure.
In this way, it is easy to construct arbitrary measures concentrated on a
finite number of points.
\item Given two measure spaces $(X,\mathbf{m}athcal{A},\mathbf{m}u)$ and $(Y,\mathbf{m}athcal{B},\nu)$ it is
possible to
define the product measure $\mathbf{m}u\mathbf{t}imes\nu$ over the measureable space
$(X\mathbf{t}imes Y, \mathbf{m}athcal{A}\mathbf{t}imes\mathbf{m}athcal{B})$.
The definition is required to satisfy
$\mathbf{m}u\mathbf{t}imes\nu(A\mathbf{t}imes B) = \mathbf{m}u(A)\mathbf{t}imes\nu(B)$, and is determined uniquely
by this condition.
If $\nu$ is a probability measure then, of course,
$\mathbf{m}u(A) = \mathbf{m}u\mathbf{t}imes\nu(A \mathbf{t}imes Y)$.
\end{enumerate}
\end{examples}
\begin{remark}\label{r.measure.step-by-step}
Example~\ref{x.semialgebra}.\ref{i.measure-Cantor} shows a particularly
attractive way to define measures.
Keep splitting the values $\mathbf{m}u(u)$ in an arbitrary way into
$\mathbf{m}u(u0)$ and $\mathbf{m}u(u1)$, and the resulting values on the semialgebra define
a measure.
Example~\ref{x.semialgebra}.\ref{i.left-cont} is less attractive,
since in the process of defining $\mathbf{m}u$ on all intervals and only keeping
track of finite additivity, we may end up with
a monotonic function that is not left continuous, and thus with a measure
that is not $\sg$-additive.
In the subsection on probability measures over a metric space, we will find
that even on the real line, there is a way to define measures in a
step-by-step manner, and only checking for consistency along the way.
\end{remark}
A \mathbf{d}f{probability space} is a triple $(X, \mathbf{m}athcal{S}, P)$ where $(X, \mathbf{m}athcal{S})$ is a
measurable space and $P$ is a probability measure over it.
Let $(X_{i}, \mathbf{m}athcal{S}_{i})$ ($i=1,2$) be measurable spaces, and let
$f : X \mathbf{t}o Y$ be a mapping.
Then $f$ is \mathbf{d}f{measurable} if and only if for each element $B$ of
$\mathbf{m}athcal{S}_{2}$, its inverse image $f^{-1}(B)$ is in $\mathbf{m}athcal{S}_{1}$.
If $\mathbf{m}u_{1}$ is a measure over $(X_{1}, \mathbf{m}athcal{S}_{1})$ then
$\mathbf{m}u_{2}$ defined by $\mathbf{m}u_{2}(A) = \mathbf{m}u_{1}(f^{-1}(A))$ is a measure over
$X_{2}$ called the measure \mathbf{d}f{induced} by $f$.
\subsection{Integral}\label{ss.integral}
A measurable function $f : X \mathbf{t}o \mathbf{m}athbb{R}$ is called a \mathbf{d}f{step function} if
its range is finite.
The set of step functions is closed with respect to linear combinations and
also with respect to the operations $\et,\V$.
Such a set of functions is called a \mathbf{d}f{Riesz space}.
Given a step function which takes values $x_{i}$ on sets $A_{i}$, and a
finite measure $\mathbf{m}u$, we define
\[
\mathbf{m}u(f) = \mathbf{m}u f = \int f\,d\mathbf{m}u = \int f(x) \mathbf{m}u(d x)
= \sum_{i} x_{i} \mathbf{m}u(A_{i}).
\]
This is a linear positive functional on the set of step functions.
Moreover, it can be shown that it
is continuous on monotonic sequences: if $f_{i} \searrow 0$
then $\mathbf{m}u f_{i} \searrow 0$.
The converse can also be shown:
Let $\mathbf{m}u$ be a linear positive functional on step functions
that is continuous on monotonic sequences.
Then the set function $\mathbf{m}u(A) = \mathbf{m}u(1_{A})$ is a finite measure.
\begin{proposition}\label{p.Riesz-extension}
Let $\mathbf{m}athcal{E}$ be any Riesz space of functions with the property that
$1 \in \mathbf{m}athcal{E}$.
Let $\mathbf{m}u$ be a positive linear functional on $\mathbf{m}athcal{E}$ continuous on monotonic
sequences, with $\mathbf{m}u 1 = 1$.
The functional $\mathbf{m}u$ can be extended to the set
$\mathbf{m}athcal{E}_{+}$ of monotonic limits of nonnegative elements of $\mathbf{m}athcal{E}$, by
continuity.
In case when $\mathbf{m}athcal{E}$ is the set of all step functions, the set $\mathbf{m}athcal{E}_{+}$ is
the set of all nonnegative measurable functions.
\end{proposition}
Let us fix a finite measure $\mathbf{m}u$ over a measurable space $(X, \mathbf{m}athcal{S})$.
A measurable function $f$ is called \mathbf{d}f{integrable} with respect to $\mathbf{m}u$
if $\mathbf{m}u |f|^{+} < \infty$ and $\mathbf{m}u |f|^{-} < \infty$.
In this case, we define $\mathbf{m}u f = \mathbf{m}u |f|^{+} - \mathbf{m}u |f|^{-}$.
The set of integrable functions is a Riesz space, and the positive linear
functional $\mathbf{m}u$ on it is continuous with respect to monotonic sequences.
The continuity over monotonic sequences also implies the following
\mathbf{d}f{bounded convergence theorem}.
\begin{proposition}
Suppose that functions $f_{n}$ are integrable and
$|f_{n}| < g$ for some integrable function $g$.
Then $f = \lim_{n} f_{n}$ is integrable and $\mathbf{m}u f = \lim_{n} \mathbf{m}u f_{n}$.
\end{proposition}
Two measurables functions $f,g$ are called \mathbf{d}f{equivalent} with respect to
$\mathbf{m}u$ if $\mathbf{m}u(f - g) = 0$.
For two-dimensional integration, the following theorem holds.
\begin{proposition}
Suppose that function $f(\cdot,\cdot)$ is integrable over
the space $(X\mathbf{t}imes Y, \mathbf{m}athcal{A}\mathbf{t}imes\mathbf{m}athcal{B}, \mathbf{m}u\mathbf{t}imes\nu)$.
Then for $\mathbf{m}u$-almost all $x$, the function $f(x,\cdot)$ is integrable over
$(Y,\mathbf{m}athcal{B},\nu)$, and the function $x\mathbf{m}apsto\nu^{y}f(x,y)$
is integrable over $(X,\mathbf{m}athcal{A},\mathbf{m}u)$
with $(\mathbf{m}u\mathbf{t}imes\nu) f = \mathbf{m}u^{x}\mathbf{m}u^{y}f$.
\end{proposition}
\subsection{Density}
Let $\mathbf{m}u, \nu$ be two measures over the same measurable space.
We say that $\nu$ is \mathbf{d}f{absolutely continuous} with respect to $\mathbf{m}u$, or
that $\mathbf{m}u$ \mathbf{d}f{dominates} $\nu$, if
for each set $A$, $\mathbf{m}u(A) = 0$ implies $\nu(A) = 0$.
It can be proved that this condition is equivalent to the condition that
there is a positive real number $c$ with $\nu \leqslant c \mathbf{m}u$.
Every nonnegative integrable function $f$ defines a new measure $\nu$ via
the formula $\nu(A) = \mathbf{m}u(f\cdot 1_{A})$.
This measure $\nu$ is absolutely continuous with respect to $\mathbf{m}u$.
The Radon-Nikodym theorem says that the converse is also true.
\begin{proposition}[Radon-Nikodym theorem]
If $\nu$ is dominated by $\mathbf{m}u$ then there is a nonnegative integrable
function $f$ such that $\nu(A) = \mathbf{m}u(f \cdot 1_{A})$ for all measurable
sets $A$.
The function $f$ is defined uniquely to within equivalence with respect to
$\mathbf{m}u$.
\end{proposition}
The function $f$ of the Radom-Nikodym Theorem above
is called the \mathbf{d}f{density} of $\nu$ with respect to $\mathbf{m}u$.
We will denote it by
\[
f(x) = \frac{\mathbf{m}u(dx)}{\nu(dx)} = \frac{d\mathbf{m}u}{d\nu}.
\]
The following theorem is also standard.
\begin{proposition}\label{p.density-props}\
\begin{enumerate}[(a)]
\item
Let $\mathbf{m}u, \nu, \eta$ be measures such that $\eta$ is absolutely continuous
with respect to $\mathbf{m}u$ and $\mathbf{m}u$ is absolutely continuous with respect to
$\nu$.
Then the ``chain rule'' holds:
\begin{equation}\label{e.chain-rule}
\frac{d\eta}{d\nu} = \frac{d\eta}{d\mathbf{m}u} \frac{d\mathbf{m}u}{d\nu}.
\end{equation}
\item
If $\frac{\nu(dx)}{\mathbf{m}u(dx)} > 0$ for all $x$ then
$\mathbf{m}u$ is also absolutely continuous with respect to $\nu$ and
$\frac{\mathbf{m}u(dx)}{\nu(dx)} = \operatorname{P}aren{\frac{\nu(dx)}{\mathbf{m}u(dx)}}^{-1}$.
\end{enumerate}
\end{proposition}
Let $\mathbf{m}u, \nu$ be two measures, then both are dominated by some measure
$\eta$ (for example by $\eta = \mathbf{m}u + \nu$).
Let their densities with respect to $\eta$ be $f$ and $g$.
Then we define the \mathbf{d}f{total variation distance} of the two measures
as
\[
D(\mathbf{m}u, \nu)=\eta(|f - g|).
\]
It is independent of the dominating measure $\eta$.
\begin{example}
Suppose that the space $X$ can be partitioned into
disjoint sets $A,B$ such that $\nu(A)=\mathbf{m}u(B) = 0$.
Then $D(\mathbf{m}u, \nu) = \mathbf{m}u(A) + \nu(B) = \mathbf{m}u(X) + \nu(X)$.
\end{example}
\subsection{Random transitions}\label{ss.transitions}
Let $(X, \mathbf{m}athcal{A})$, $(Y, \mathbf{m}athcal{B})$ be two measureable spaces (defined in
Subsection~\ref{ss.measures}).
We follow the definition given in~\cite{PollardUsers01}.
Suppose that a family of probability
measures $\Lg = \setof{\lg_{x} : x \in X}$ on $\mathbf{m}athcal{B}$ is given.
We call it a \mathbf{d}f{probability kernel}, (or Markov kernel, or conditional
distribution) if the map $x \mathbf{m}apsto \lg_{x} B$ is measurable for each
$B \in \mathbf{m}athcal{B}$.
When $X,Y$ are finite sets then $\lg$ is a Markov transition matrix.
The following theorem shows that $\lg$ assigns a joint distribution over
the space $(X \mathbf{t}imes Y, \mathbf{m}athcal{A}\mathbf{t}imes\mathbf{m}athcal{B})$ to each input distribution $\mathbf{m}u$.
\begin{proposition} For each nonnegative $\mathbf{m}athcal{A}\mathbf{t}imes \mathbf{m}athcal{B}$-measureable
function $f$ over $X \mathbf{t}imes Y$,
\begin{enumerate}[\upshape 1.]
\item the function $y \mathbf{t}o f(x,y)$ is $\mathbf{m}athcal{B}$-measurable for each fixed $x$;
\item $x \mathbf{t}o \lg_{x}^{y} f(x, y)$ is $\mathbf{m}athcal{A}$-measurable;
\item the integral $f \mathbf{t}o \mathbf{m}u^{x} \lg_{x}^{y} f(x, y)$ defines
a measure on $\mathbf{m}athcal{A} \mathbf{t}imes \mathbf{m}athcal{B}$.
\end{enumerate}
\end{proposition}
According to this proposition, given a probability kernel $\Lg$,
to each measure $\mathbf{m}u$ over $\mathbf{m}athcal{A}$ corresponds a measure over
$\mathbf{m}athcal{A} \mathbf{t}imes \mathbf{m}athcal{B}$.
We will denote its marginal over $\mathbf{m}athcal{B}$ as
\begin{equation}\label{e.Markov-op-meas}
\Lg^{*} \mathbf{m}u.
\end{equation}
For every measurable function $g(y)$ over $Y$, we can define the measurable
function $f(x) = \lg_{x} g = \lg_{x}^{y} g(y)$: we write
\begin{equation}\label{e.Markov-op-fun}
f = \Lg g.
\end{equation}
The operator $\Lg$ is linear, and monotone with $\Lg 1 = 1$.
By these definitions, we have
\begin{equation}\label{e.Lg-Lg*}
\mathbf{m}u(\Lg g) = (\Lg^{*}\mathbf{m}u) g.
\end{equation}
\begin{example}\label{x.determ-trans}
Let $h : X \mathbf{t}o Y$ be a measureable function, and
let $\lg_{x}$ be the measure $\mathbf{d}g_{h(x)}$ concentrated on the
point $h(x)$.
This operator, denoted $\Lg_{h}$ is, in fact, a deterministic transition,
and we have $\Lg_{h} g = g \circ h$.
In this case, we will simplify the notation as follows:
\[
h^{*}\mathbf{m}u = \Lg_{h}^{*}.
\]
\end{example}
\subsection{Probability measures over a metric
space}\label{ss.measure-metric}
We follow the exposition of~\cite{BillingsleyConverg68}.
Whenever we deal with probability measures on a metric space, we will
assume that our metric space is complete and separable (Polish).
Let $\mathbf{m}athbf{X} = (X, d)$ be a complete separable metric space.
It gives rise to a measurable space, where the measurable sets are the
Borel sets of $\mathbf{m}athbf{X}$.
It can be shown that, if $A$ is a Borel set and $\mathbf{m}u$ is a finite measure
then there are sets
$F \sbsq A \sbsq G$ where $F$ is an $F_{\sg}$ set, $G$ is a $G_{\mathbf{d}g}$ set,
and $\mathbf{m}u(F) = \mathbf{m}u(G)$.
Let $\mathbf{m}athcal{B}$ be a base of open sets closed under intersections.
Then it can be shown that $\mathbf{m}u$ is determined by its values on elements of
$\mathbf{m}athcal{B}$.
The following proposition follows then essentially from
Proposition~\ref{p.Caratheo-extension}.
\begin{proposition}\label{p.Caratheo-topol}
Let $\mathbf{m}athcal{B}^{*}$ be the set algebra generated by the above base
$\mathbf{m}athcal{B}$, and let $\mathbf{m}u$ be any
$\sigma$-additive set function on $\mathbf{m}athcal{B}^{*}$ with $\mathbf{m}u(X)=1$.
Then $\mathbf{m}u$ can be extended uniquely to a probability measure.
\end{proposition}
We say that a set $A$ is a \mathbf{d}f{continuity set} of measure $\mathbf{m}u$ if
$\mathbf{m}u(\partial A) = 0$: the boundary of $A$ has measure 0.
\subsubsection{Weak topology}
Let
\[
\mathbf{m}athcal{M}(\mathbf{m}athbf{X})
\]
be the set of probability measures on the metric space $\mathbf{m}athbf{X}$.
Let
\[
\mathbf{d}g_{x}
\]
be a probability measure concentrated on point $x$.
Let $x_{n}$ be a sequence of points converging to point $x$ but with
$x_{n} \ne x$.
We would like to say that $\mathbf{d}g_{x_{n}}$ converges to $\mathbf{d}g_{x}$.
But the total variation distance $D(\mathbf{d}g_{x_{n}}, \mathbf{d}g_{x})$ is 2
for all $n$.
This suggests that the total variation distance is not generally the best
way to compare probability measures over a metric space.
We say that a sequence of probability
measures $\mathbf{m}u_{n}$ over a metric space $(X, d)$
\mathbf{d}f{weakly converges} to measure $\mathbf{m}u$ if for all bounded continuous
real functions $f$ over $X$ we have $\mathbf{m}u_{n} f \mathbf{t}o \mathbf{m}u f$.
This \mathbf{d}f{topology of weak convergence} $(\mathbf{m}athcal{M}, \mathbf{t}au_{w})$
can be defined using a number of different subbases.
The one used in the original definition
is the subbase consisting of all sets of the form
\[
A_{f,c} = \setof{\mathbf{m}u : \mathbf{m}u f < c}
\]
for bounded continuous functions $f$ and real numbers $c$.
We also get a subbase (see for example~\cite{PollardUsers01})
if we restrict ourselves to the set
$\operatorname{Lip}(X)$ of Lip\-schitz functions defined in~\eqref{e.Lipschitz}.
Another possible subbase giving rise to the same topology
consists of all sets of the form
\begin{equation}\label{e.measure-on-open}
B_{G,c} = \setof{\mathbf{m}u : \mathbf{m}u(G) > c}
\end{equation}
for open sets $G$ and real numbers $c$.
Let us find some countable subbases.
Since the space $\mathbf{m}athbf{X}$ is separable, there is a sequence $U_{1}, U_{2},
\mathbf{d}otsc$ of open sets that forms a base.
We can restrict the subbase of the space of measures to those sets
$B_{G, c}$ where $G$ is the union of a finite number of base elements
$U_{i}$ and $c$ is rational.
Thus, the space $(\mathbf{m}athcal{M}, \mathbf{t}au_{w})$ itself has the second countability
property.
It is more convenient to define a countable subbase using bounded
continuous functions $f$, since
$\mathbf{m}u \mathbf{m}apsto \mathbf{m}u f$ is continuous on such functions, while
$\mathbf{m}u \mathbf{m}apsto \mathbf{m}u U$ is typically not continuous when $U$ is an open set.
Let $\mathbf{m}athcal{F}_{0}$ be the set of functions introduced before~\eqref{e.bd-Lip-seq}.
Let
\[
\mathbf{m}athcal{F}_{1}
\]
be the set of functions $f$ with the property that $f$ is the
minimum of a finite number of elements of $\mathbf{m}athcal{F}_{0}$.
Note that each element $f$ of $\mathbf{m}athcal{F}_{1}$ is bounded between 0 and 1, and
from
its definition, we can compute a bound $\bg$ such that $f\in\operatorname{Lip}_{\bg}$.
\begin{proposition}\label{p.Portmanteau}
The following conditions are equivalent:
\begin{enumerate}[\upshape 1.]
\item $\mathbf{m}u_{n}$ weakly converges to $\mathbf{m}u$.
\item $\mathbf{m}u_{n} f \mathbf{t}o \mathbf{m}u f$ for all $f \in \mathbf{m}athcal{F}_{1}$.
\item For every Borel set $A$, that is a continuity set of $\mathbf{m}u$, we have
$\mathbf{m}u_{n}(A) \mathbf{t}o \mathbf{m}u(A)$.
\item For every closed set $F$, $\lim\inf_{n} \mathbf{m}u_{n}(F) \geqslant \mathbf{m}u(F)$.
\item For every open set $G$, $\lim\sup_{n} \mathbf{m}u_{n}(G) \leqslant \mathbf{m}u(G)$.
\end{enumerate}
\end{proposition}
As a subbase
\begin{equation}\label{e.metric-measure-subbase}
\sg_{\mathbf{m}athcal{M}}
\end{equation}
for $\mathbf{m}athcal{M}(x)$, we choose the sets
$\setof{\mathbf{m}u : \mathbf{m}u f < r}$ and $\setof{\mathbf{m}u : \mathbf{m}u f > r}$ for all
$f \in \mathbf{m}athcal{F}_{1}$ and $r \in \mathbf{m}athbb{Q}$.
Let $\mathbf{m}athcal{E}$
be the set of functions introduced in~\eqref{e.bd-Lip-seq}.
It is a Riesz space as defined in Subsection~\ref{ss.integral}.
A reasoning combining Propositions~\ref{p.Caratheo-extension}
and~\ref{p.Riesz-extension} gives the following.
\begin{proposition}\label{p.metric-Riesz-extension}
Suppose that a positive linear functional $\mathbf{m}u$ with $\mathbf{m}u 1 = 1$ is defined
on $\mathbf{m}athcal{E}$
that is continuous with respect to monotone convergence.
Then $\mathbf{m}u$ can be extended uniquely to a probability
measure over $\mathbf{m}athbf{X}$ with $\mathbf{m}u f = \int f(x) \mathbf{m}u(dx)$ for all $f \in \mathbf{m}athcal{E}$.
\end{proposition}
\subsubsection{Prokhorov distance}\label{sss.Prokh}
The definition of measures in the style of
Proposition~\ref{p.metric-Riesz-extension}
is not sufficiently constructive.
Consider a gradual definition of the measure $\mathbf{m}u$, extending it
to more and more elements of $\mathbf{m}athcal{E}$, while keeping the positivity and
linearity property.
It can happen that the function $\mathbf{m}u$ we end up with in the limit, is not
continuous with respect to monotone convergence.
Let us therefore metrize the space of
measures: then an arbitrary measure can be defined as the limit
of a Cauchy sequence of simple meaures.
One metric that generates the topology of weak convergence is
the \mathbf{d}f{Prokhorov distance} $p(\mathbf{m}u, \nu)$:
the infimum of all those $\eps$ for which, for all Borel sets $A$ we
have (using the notation~\eqref{e.Aeps})
\[
\mathbf{m}u(A) \leqslant \nu(A^{\eps}) + \eps.
\]
It can be shown that this is a distance and it generates the weak
topology.
The following result helps visualize this distance:
\begin{proposition}[Coupling Theorem, see~\protect\cite{Strassen65}]
\label{p.coupling}
Let $\mathbf{m}u,\nu$ be two probability measures over a complete separable metric
space $\mathbf{m}athbf{X}$ with $p(\mathbf{m}u, \nu) \leqslant\eps$.
Then there is a probability measure $P$ on the space $\mathbf{m}athbf{X} \mathbf{t}imes \mathbf{m}athbf{X}$
with marginals $\mathbf{m}u$ and $\nu$ such that for a pair of random variables
$(\xi,\eta)$
having joint distribution $P$ we have
\[
P\setof{d(\xi,\eta) > \eps} \leqslant \eps.
\]
\end{proposition}
Since this topology has the second countability property,
the metric space defined by the distance $p(\cdot,\cdot)$ is separable.
This can also be seen directly.
Let $S$ be a countable everywhere dense set of points in $X$.
Consider the set of $\mathbf{m}athcal{M}_{0}(X)$ of
those probability measures that are concentrated on finitely many points of
$S$ and assign rational values to them.
It can be shown that $\mathbf{m}athcal{M}_{0}(X)$ is everywhere dense in the metric space
$(\mathbf{m}athcal{M}(X), p)$; so, this space is separable.
It can also be shown that $(\mathbf{m}athcal{M}(X), p)$ is complete.
Thus, a measure can be given as the limit of a sequence of elements
$\mathbf{m}u_{1},\mathbf{m}u_{2},\mathbf{d}ots$ of
$\mathbf{m}athcal{M}_{0}(X)$, where $p(\mathbf{m}u_{i},\mathbf{m}u_{i+1}) < 2^{-i}$.
The definition of the Prokhorov distance quantifies over all Borel sets.
However, in an important simple case, it can be handled efficiently.
\begin{proposition}\label{p.simple-Prokhorov-ball}
Assume that measure $\nu$ is concentrated on
a finite set of points $S \sbs X$.
Then the condition $p(\nu,\mathbf{m}u) < \eps$ is equivalent to
the finite set of conditions
\begin{equation}\label{e.finite-Prokhorov}
\mathbf{m}u(A^{\eps}) > \nu(A) - \eps
\end{equation}
for all $A \sbs S$.
\end{proposition}
\subsubsection{Relative compactness}
A set $\operatorname{P}i$
of measures in $(\mathbf{m}athcal{M}(X), p)$ is called \mathbf{d}f{relatively compact} if every
sequence of elements of $\operatorname{P}i$ contains a convergent subsequence.
Relative compactness is an important property for proving convergence of
measures.
It has a useful characterization.
A set of $\operatorname{P}i$ of measures is called \mathbf{d}f{tight} if for every $\eps$ there
is a compact set $K$ such that $\mathbf{m}u(K) > 1 - \eps$ for all $\mathbf{m}u$ in $\operatorname{P}i$.
Prokhorov's theorem states (under our assumptions of the separability and
completeness of $(X, d)$) that a set of measures is relatively compact if
and only if it is tight and if and only if its closure is compact
in $(\mathbf{m}athcal{M}(X), p)$.
In particular, the following fact is known.
\begin{proposition}\label{p.measures-compact}
The space $(\mathbf{m}athcal{M}(\mathbf{m}athbf{X}), p)$ of measures is compact if and
only if the space $(X, d)$ is compact.
\end{proposition}
So, if $(X, d)$ is not compact then the set of measures is not compact.
But still, each measure $\mathbf{m}u$
is ``almost'' concentrated on a compact set.
Indeed, the one-element set $\{\mathbf{m}u\}$ is compact and therefore
by Prokhorov's theorem tight.
Tightness says that for each $\eps$ a mass of size $1-\eps$ of $\mathbf{m}u$ is
concentrated on some compact set.
\section{Computable analysis}
If for some finite or infinite sequences $x,y,z,w$, we have
$z = wxy$ then we write $w \sqsubseteq z$ ($w$ is a \mathbf{d}f{prefix} of $z$) and
$x \mathbf{t}riangleleft z$.
For integers, we will use the toupling functions
\[
\ang{i, j} = \frac{1}{2} (i+1)(i+j+1) + j,
\quad \ang{n_{1},\mathbf{d}ots,n_{k+1}} = \ang{\ang{n_{1},\mathbf{d}ots,n_{k}},n_{k+1}}.
\]
Inverses: $\pi_{i}^{k}(n)$.
Unless said otherwise, the alphabet $\Sg$ is always assumed to contain the
symbols 0 and 1.
After~\cite{WeihrauchComputAnal00},
let us define the \mathbf{d}f{wrapping function} $\ig : \Sg^{*} \mathbf{t}o \Sg^{*}$ by
\begin{equation}\label{e.ig}
\ig(a_{1}a_{2}\mathbf{d}otsm a_{n}) = 110a_{1}0a_{2}0\mathbf{d}otsm a_{n}011.
\end{equation}
Note that
\begin{equation}\label{e.ig-len}
|\ig(x)| = (2 |x| + 5)\V 6.
\end{equation}
For strings $x,x_{i} \in \Sg^{*}$, $p, p_{i} \in \Sg^{\og}$, $k \geqslant 1$,
appropriate tupling functions $\ang{x_{1},\mathbf{d}ots,x_{k}}$,
$\ang{x,p}$, $\ang{p,x}$, etc.~can be defined with the help of
$\ang{\cdot,\cdot}$ and $\ig(\cdot)$.
\subsection{Notation and representation}\label{ss.notation-repr}
The concepts of notation and representation, as defined
in~\cite{WeihrauchComputAnal00}, allow us to transfer
computability properties from some standard spaces to many others.
Given a countable set $C$, a \mathbf{d}f{notation} of $C$ is a surjective
partial mapping $\mathbf{d}g :\sbsq \mathbf{m}athbb{N} \mathbf{t}o C$.
Given some finite alphabet $\Sg \spsq \{0,1\}$ and an arbitrary set $S$,
a \mathbf{d}f{representation} of $S$ is a surjective mapping
$\chi :\sbsq \Sg^{\og} \mathbf{t}o S$.
A \mathbf{d}f{naming system} is a notation or a representation.
Here are some standard naming systems:
\begin{enumerate}[\upshape 1.]
\item $\operatorname{id}$, the identity over $\Sg^{*}$ or $\Sg^{\og}$.
\item $\nu_{\mathbf{m}athbb{N}}$, $\nu_{\mathbf{m}athbb{Z}}$, $\nu_{\mathbf{m}athbb{Q}}$ for the set of natural
numbers, integers and rational numbers.
\item $\operatorname{Cf} : \Sg^{\og} \mathbf{t}o 2^{\mathbf{m}athbb{N}}$, the \mathbf{d}f{characteristic function
representation} of sets of natural numbers, is defined by
$\operatorname{Cf}(p) = \setof{i : p(i) = 1}$.
\item $\operatorname{En} : \Sg^{\og} \mathbf{t}o 2^{\mathbf{m}athbb{N}}$, the \mathbf{d}f{enumeration representation} of
sets of natural numbers, is defined by
$\operatorname{En}(p) = \setof{w \in \Sg^{*} : 110^{n+1}11 \mathbf{t}riangleleft p}$.
\item For $\Dg \sbsq \Sg$, $\operatorname{En}_{\Dg} : \Sg^{\og} \mathbf{t}o 2^{\Dg^{*}}$,
the \mathbf{d}f{enumeration representation} of subsets of $\Dg^{*}$, is defined by
$\operatorname{En}_{\Dg}(p) = \setof{w \in \Sg^{*} : \ig(w) \mathbf{t}riangleleft p}$.
\end{enumerate}
One can define names for all
computable functions between spaces that are Cartesian
products of terms of the kind $\Sg^{*}$ and $\Sg^{\og}$.
Then, the notion of computability can be transferred to other spaces as
follows.
Let $\mathbf{d}g_{i} : Y_{i} \mathbf{t}o X_{i}$, $i=1,0$ be naming systems of the spaces
$X_{i}$.
Let $f : \sbsq X_{1} \mathbf{t}o X_{0}$, $g : \sbsq Y_{1} \mathbf{t}o Y_{0}$.
We say that function $g$ \mathbf{d}f{realizes} function $f$ if
\begin{equation}\label{e.realize}
f(\mathbf{d}g_{1}(y)) = \mathbf{d}g_{0}(g(y))
\end{equation}
holds for all $y$ for which the left-hand side is defined.
Realization of multi-argument functions is defined similarly.
We say that a function $f : X_{1} \mathbf{t}imes X_{2} \mathbf{t}o X_{0}$
is \mathbf{d}f{$(\mathbf{d}g_{1},\mathbf{d}g_{2},\mathbf{d}g_{0})$-computable} if
there is a computable function $g : \sbsq Y_{1} \mathbf{t}imes Y_{2} \mathbf{t}o Y_{0}$
realizing it.
In this case, a name for $f$ is naturally derived from a name of
$g$.\footnote{Any function $g$ realizing $f$ via~\eqref{e.realize}
automatically has a
certain \mathbf{d}f{extensivity} property: if $\mathbf{d}g_{1}(y) = \mathbf{d}g_{1}(y')$ then
$g(y) = g(y')$.}
For representations $\xi,\eta$,
we write $\xi \leqslant \eta$ if there is a computable function
$f :\sbsq \Sg^{\og} \mathbf{t}o \Sg^{\og}$ with $\xi(x) = \eta(f(x))$.
In words, we say that $\xi$ is \mathbf{d}f{reducible} to $\eta$, or that $f$
reduces (translates) $\xi$ to $\eta$.
There is a similar definition of reduction for notations.
We write $\xi \equiv \eta$ if $\xi \leqslant \eta$ and $\eta \leqslant \xi$.
\subsection{Constructive topological space}
\subsubsection{Definitions}
Section~\ref{s.top} gives a review of topological concepts.
A \mathbf{d}f{constructive topological space} $\mathbf{m}athbf{X} = (X, \sg, \nu)$
is a topological space over a set $X$ with a subbase $\sg$ effectively
given as a list $\sg = \{\nu(1),\nu(2),\mathbf{d}ots\}$,
and having the $T_{0}$ property (thus, every point is determined uniquely
by the subset of elements of $\sg$ containing it).
By definition, a constructive topological space satisfies the second
countability axiom.\footnote{A constructive topological space
is an effective topological space as defined
in~\cite{WeihrauchComputAnal00}, but, for simplicity
we require the notation $\nu$ to be a total function.}
We obtain a base
\[
\sg^{\cap}
\]
of the space $\mathbf{m}athbf{X}$ by taking all possible finite
intersections of elements of $\sg$.
It is easy to produce an effective enumeration for $\sg^{\cap}$ from $\nu$.
We will denote this enumeration by $\nu^{\cap}$.
The \mathbf{d}f{product operation} is defined over constructive topological spaces
in the natural way.
\begin{examples}\label{x.constr-topol}\
\begin{enumerate}[\upshape 1.]
\item A discrete topological space, where the underlying
set is finite or countably infinite, with a fixed enumeration.
\item\label{i.constr-topol.real}
The real line, choosing the base to be the open intervals
with rational endpoints with their natural enumeration.
Product spaces can be formed
to give the Euclidean plane a constructive topology.
\item
The real line $\mathbf{m}athbb{R}$, with the subbase $\sg_{\mathbf{m}athbb{R}}^{>}$ defined as
the set of all open intervals $\opint{-\infty}{b}$ with rational endpoints
$b$.
The subbase $\sg_{\mathbf{m}athbb{R}}^{<}$, defined similarly, leads to another topology.
These two topologies differ from each other and from
the usual one on the real line, and they are not Hausdorff spaces.
\item Let $X$ be a set with a constructive discrete topology,
and $X^{\og}$ the set of infinite sequences with elements from $X$,
with the product topology: a natural enumerated basis is also easy to
define.
\end{enumerate}
\end{examples}
Due to the $T_{0}$ property, every point in our space is determined
uniquely by the set of open sets containing it.
Thus, there is a representation $\gm_{\mathbf{m}athbf{X}}$ of $\mathbf{m}athbf{X}$ defined as follows.
We say that $\gm_{\mathbf{m}athbf{X}}(p) = x$ if
$\operatorname{En}_{\Sg}(p) = \setof{w : x \in \nu(w)}$.
If $\gm_{\mathbf{m}athbf{X}}(p) = x$ then we say that the infinite sequence
$p$ is a \mathbf{d}f{complete name} of $x$:
it encodes all names of all subbase elements containing $x$.
From now on, we will call $\gm_{\mathbf{m}athbf{X}}$ the \mathbf{d}f{complete standard
representation of the space $\mathbf{m}athbf{X}$}.\footnote{
The book~\cite{WeihrauchComputAnal00} denotes $\gm_{\mathbf{m}athbf{X}}$ as $\mathbf{d}g'_{\mathbf{m}athbf{X}}$
instead.
We use $\gm_{\mathbf{m}athbf{X}}$ only, dispensing with the
notion of a ``computable'' topological space.}
\subsubsection{Constructive open sets, computable functions}
In a constructive topological space $\mathbf{m}athbf{X} = (X, \sg, \nu)$,
a set $G \sbsq X$ is called \mathbf{d}f{r.e.~open} in set $B$
if there is a r.e.~set $E$ with
$G = \bigcup_{w \in E} \nu^{\cap}(w) \cap B$.
It is r.e.~open if it is r.e.~open in $X$.
In the special kind of spaces in which randomness has been developed until
now, constructive open sets have a nice characterization:
\begin{proposition}\label{p.constr-open-nice-charac}
Assume that the space $\mathbf{m}athbf{X} = (X, \sg, \nu)$
has the form $Y_{1}\mathbf{t}imes \mathbf{d}ots \mathbf{t}imes Y_{n}$ where
each $Y_{i}$ is either $\Sg^{*}$ or $\Sg^{\og}$.
Then a set $G$ is r.e.~open iff it is open and the set
$\setof{(w_{1},\mathbf{d}ots,w_{n}) : \bigcap_{i}\nu(w_{i}) \sbs G}$
is recursively enumerable.
\end{proposition}
\begin{proof}
The proof is not difficult, but it relies on the discrete nature of
the space $\Sg^{*}$ and on the fact that the space $\Sg^{\og}$ is compact
and its base consists of sets that are open and closed at the same time.
\end{proof}
It is easy to see that if two sets are r.e.~open then so is their union.
The above remark implies that a space having
the form $Y_{1}\mathbf{t}imes \mathbf{d}ots \mathbf{t}imes Y_{n}$ where
each $Y_{i}$ is either $\Sg^{*}$ or $\Sg^{\og}$, also the intersection of
two recursively open sets is recursively open.
We will see that this statement holds, more generally, in all computable
metric spaces.
Let $\mathbf{m}athbf{X}_{i} = (X_{i}, \sg_{i}, \nu_{i})$ be constructive topological
spaces, and let $f : \sbsq X_{1} \mathbf{t}o X_{0}$ be a function.
As we know, $f$ is continuous iff the inverse image $f^{-1}(G)$ of each
open set $G$ is open.
Computability is an effective version of continuity:
it requires that the inverse image
of subbase elements is uniformly constructively open.
More precisely, $f :\sbsq X_{1} \mathbf{t}o X_{0}$ is
\mathbf{d}f{computable} if the set
\[
\bigcup_{V \in \sg_{0}^{\cap}} f^{-1}(V) \mathbf{t}imes \{V\}
\]
is a r.e.~open subset of $X_{1} \mathbf{t}imes \sg_{0}^{\cap}$.
Here the base $\sg_{0}^{\cap}$ of $\mathbf{m}athbf{X}_{0}$ is treated as a discrete
constructive topological space, with its natural enumeration.
This definition depends on the enumerations $\nu_{1},\nu_{0}$.
The following theorem (taken from~\cite{WeihrauchComputAnal00})
shows that this computability coincides with the one
obtained by transfer via the representations $\gm_{\mathbf{m}athbf{X}_{i}}$.
\begin{proposition}\label{p.hertling-computable}
For $i=0,1$, let
$\mathbf{m}athbf{X}_{i} = (X_{i}, \sg_{i}, \nu_{i})$ be constructive topological spaces.
Then a function $f :\sbsq X_{1} \mathbf{t}o X_{0}$ is
computable iff it is $(\gm_{\mathbf{m}athbf{X}_{1}},\gm_{\mathbf{m}athbf{X}_{0}})$-computable for the
representations $\gm_{\mathbf{m}athbf{X}_{i}}$ defined above.
\end{proposition}
As a name of a computable function, we can use the name of the enumeration
algorithm derived from the definition of computability, or the name
derivable using this representation theorem.
\begin{remark}
As in Proposition~\ref{p.constr-open-nice-charac},
it would be nice to have the following statement, at least for total
functions:
``Function $f : X_{1} \mathbf{t}o X_{0}$ is computable iff the set
\[
\setof{(v, w) : \nu^{\cap}_{1}(w) \sbs f^{-1}[\nu_{0}(v)] }
\]
is recursively enumerable.''
But such a characterization seems to require compactness and possibly more.
\end{remark}
Let us call two spaces $X_{1}$ and $X_{0}$ \mathbf{d}f{effectively homeomorphic}
if there are computable maps between them that are inverses of each
other.
In the special case when $X_{0}=X_{1}$, we say
that the enumerations of subbases
$\nu_{0},\nu_{1}$ are \mathbf{d}f{equivalent} if the identity
mapping is a effective homeomorphism.
This means that there are recursively enumerable sets $F,G$ such that
\[
\nu_{1}(v) = \bigcup_{(v, w) \in F} \nu_{0}^{\cap}(w) \mathbf{t}xt{ for all $v$},
\quad
\nu_{0}(w) = \bigcup_{(w, v) \in G} \nu_{1}^{\cap}(v) \mathbf{t}xt{ for all $w$}.
\]
Lower semicomputability is a constructive version of lower
semicontinuity.
Let $\mathbf{m}athbf{X} = (X, \sg, \nu)$ be a constructive topological space.
A function $f :\sbsq X \mathbf{t}o \ol\mathbf{m}athbb{R}_{+}$ is called \mathbf{d}f{lower semicomputable}
if the set $\setof{(x,r): f(x) > r}$ is r.e.~open.
Let $\mathbf{m}athbf{Y} = (\ol\mathbf{m}athbb{R}_{+}, \sg_{\mathbf{m}athbb{R}}^{<}, \nu_{\mathbf{m}athbb{R}}^{<})$ be the effective
topological space introduced in
Example~\ref{x.constr-topol}.\ref{i.constr-topol.real},
in which $\nu_{\mathbf{m}athbb{R}}^{>}$ is an enumeration of all open intervals of the
form $\rint{r}{\infty}$ with rational $r$.
It can be seen that $f$ is lower semicomputable iff it is
$(\nu,\nu_{\mathbf{m}athbb{R}}^{>})$-computable.
\subsubsection{Computable elements and
sequences}\label{sss.computable-elements}
Let $\mathbf{m}athbf{U} = (\{0\}, \sg_{0}, \nu_{0})$
be the one-element space turned into a trivial constructive
topological space, and let $\mathbf{m}athbf{X} = (X, \sg, \nu)$ be another constructive
topological space.
We say that an element $x \in X$ is \mathbf{d}f{computable} if the function
$0 \mathbf{m}apsto x$ is computable.
It is easy to see that this is equivalent to the requirement that
the set $\setof{u : x \in \nu(u)}$ is recursively enumerable.
Let $\mathbf{m}athbf{X}_{j}= (X_{j}, \sg_{j}, \nu_{j})$,
for $i=0,1$ be constructive topological spaces.
A sequence $f_{i}$, $i=1,2,\mathbf{d}ots$ of functions with
$f_{i} : X_{1} \mathbf{t}o X_{0}$ is a
\mathbf{d}f{computable sequence of computable functions} if
$(i, x) \mathbf{m}apsto f_{i}(x)$ is a computable function.
Using the s-m-n theorem of recursion theory, it is easy to see that this
statement is equivalent to the statement that there is a recursive function
computing from each $i$ a name for the computable function $f_{i}$.
The proof of the following statement is not difficult.
\begin{proposition}\label{p.one-arg-cpt}
Let $\mathbf{m}athbf{X}_{i} = (X_{i}, \sg_{i}, \nu_{i})$
for $i=1,2,0$ be constructive topological spaces, and
let $f: X_{1} \mathbf{t}imes X_{2} \mathbf{t}o X_{0}$, and assume that $x_{1} \in X_{1}$ is
a computable element.
\begin{enumerate}[\upshape 1.]
\item If $f$ is computable and
then $x_{2} \mathbf{m}apsto f(x_{1}, x_{2})$ is also computable.
\item If $\mathbf{m}athbf{X}_{0} = \ol\mathbf{m}athbb{R}$, and $f$ is lower semicomputable
then $x_{2} \mathbf{m}apsto f(x_{1}, x_{2})$ is also lower semicomputable.
\end{enumerate}
\end{proposition}
\subsection{Computable metric space}
Following~\cite{BrattkaPresserMetric03}, we define
a computable metric space as a tuple $\mathbf{m}athbf{X} = (X, d, D, \ag)$ where $(X,d)$
is a metric space, with a countable dense subset $D$
and an enumeration $\ag$ of $D$.
It is assumed that the real function $d(\ag(v),\ag(w))$ is computable.
As $x$ runs through elements of $D$ and $r$ through positive rational
numbers, we obtain the enumeration of
a countable basis $\setof{B(x, r) : x \in D, r\in \mathbf{m}athbb{Q}}$ (of balls or radius $r$
and center $x$) of $\mathbf{m}athbf{X}$,
giving rise to a constructive topological space $\mathbf{t}ilde\mathbf{m}athbf{X}$.
Let us call a sequence $x_{1}, x_{2},\mathbf{d}ots$ a \mathbf{d}f{Cauchy} sequence if
for all $i<j$ we have $d(x_{i},x_{j}) \leqslant 2^{-i}$.
To connect to the type-2 theory of computability developed above,
the \mathbf{d}f{Cauchy-representation} $\mathbf{d}g_{\mathbf{m}athbf{X}}$ of the space can be defined in a
natural way.
It can be shown that as a representation of $\mathbf{t}ilde\mathbf{m}athbf{X}$, it is equivalent
to $\gm_{\mathbf{t}ilde\mathbf{m}athbf{X}}$: $\mathbf{d}g_{\mathbf{m}athbf{X}} \equiv \gm_{\mathbf{t}ilde\mathbf{m}athbf{X}}$.
\begin{example}\label{x.cptable-metric-{0}{1}}
Example~\protect\ref{x.Cclint{0}{1}} is a computable
metric space, with either of the two (equivalent)
choices for an enumerated dense set.
\end{example}
Similarly to the definition of a computable sequence of computable
functions in~\ref{sss.computable-elements}, we can
define the notion of a computable sequence of bounded computable functions,
or the computable sequence $f_{i}$ of computable Lip\-schitz functions:
the bound and the Lip\-schitz constant of $f_{i}$ are required to be
computable from $i$.
The following statement shows, in an effective form,
that a function is lower semicomputable if and only if it is the supremum
of a computable sequence of computable functions.
\begin{proposition}\label{p.lower-semi-as-limit}
Let $\mathbf{m}athbf{X}$ be a computable metric space.
There is a computable mapping that to each name of a nonnegative
lower semicomputable
function $f$ assigns a name of a computable sequence of computable
bounded Lip\-schitz functions $f_{i}$ whose supremum is $f$.
\end{proposition}
\begin{proof}[Proof sketch]
Show that $f$ is the supremum of a computable sequence of computable
functions $c_{i} 1_{B(u_{i}, r_{i})}$ where $u_{i}\in D$ and
$c_{i}, r_{i} > 0$ are rational.
Clearly, each indicator function $1_{B(u_{i},r_{i})}$ is the supremum
of a computable sequence of computable functions $g_{i,j}$.
We have $f = \sup_{n} f_{n}$ where $f_{n} = \mathbf{m}ax_{i \leqslant n} c_{i} g_{i,n}$.
It is easy to see that the bounds on the functions $f_{n}$ are computable
from $n$ and that they all are in $\operatorname{Lip}_{\bg_{n}}$ for a
$\bg_{n}$ that is computable from $n$.
\end{proof}
The following is also worth noting.
\begin{proposition}
In a computable metric space, the intersection of two r.e.~open sets is
r.e.~open.
\end{proposition}
\begin{proof}
Let $\bg = \setof{B(x, r) : x \in D, r\in \mathbf{m}athbb{Q}}$ be a basis of our space.
For a pair $(x,r)$ with $x \in D$, $r \in \mathbf{m}athbb{Q}$, let
\[
\Gg(x,r) = \setof{(y,s): y\in D,\;s\in \mathbf{m}athbb{Q},\; d(x,y)+s < r}.
\]
If $U$ is a r.e.~open set, then there is a r.e.~set
$S_{U} \sbs D \mathbf{t}imes \mathbf{m}athbb{Q}$ with $U = \bigcup_{(x,r) \in S_{U}} B(x,r)$.
Let $S'_{U} = \bigcup\setof{\Gg(x,r) : (x,r) \in S_{U}}$, then we have
$U = \bigcup_{(x,r) \in S'_{U}} B(x,r)$.
Now, it is easy to see
\[
U\cap V = \bigcup_{(x,r) \in S'_{U} \cap S'_{V}} B(x,r).
\]
\end{proof}
\providecommand{\bysame}{\leqslantavevmode\hbox to3em{\hrulefill}\mathbf{t}hinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\sloppy
\title{Partial desingularizations arising from non-commutative algebras}
\begin{abstract}
Let $X$ be a singular affine normal variety with coordinate ring $R$ and assume that there is an $R$-order $\Lambda$ admitting a stability structure $\theta$ such that the scheme of $\theta$-semistable representations is smooth, then we construct a partial desingularization of $X$ with classifiable remaining singularities. In dimension $3$ this explains the omnipresence of conifold singularities in partial desingularizations of quotient singularities. In higher dimensions we have a small list of singularity types generalizing the role of the conifold singularity.
\end{abstract}
\section{Introduction}
In this paper we want to give a ringtheoretical explanation for the omnipresence of conifold singularities in partial desingularizations of three-dimensional quotient singularities coming from physics (see for example \cite{Berenstein} and \cite{BG98}) and to generalize this phenomenon to higher dimensions. For a translation between physics language and the mathematical terms used in this paper, we refer to section $4$ of our previous paper \cite{BLBS}.
If $X=\mathbb{C}^3/G$ is a three-dimensional quotient singularity, one consider the McKay quiver setting $(Q,\alpha)$ of the finite group $G$ and the order over $\mathbb{C}[X]$
\[
\Lambda = \frac{\mathbb{C} Q}{R} \]
obtained by dividing out commuting matrix relations, see for example \cite{CrawNotes}. One then chooses a stability structure $\theta$ such that the moduli space $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ of isomorphism classes of $\theta$-semistable $\alpha$-dimensional $\Lambda$-representations is a partial resolution of $X$. In fact, in most examples, one even has that the scheme $\wis{rep}^{\theta-semist}_{\alpha}~\Lambda$ of $\theta$-semistable $\alpha$-dimensional representations is a smooth variety. In this paper we will show that this condition implies that possible remaining singularities in the (partial) desingularization
\[
\wis{moduli}^{\theta}_{\alpha}~\Lambda \rOnto X \]
must be of conifold type. Moreover, we will extend this setting to higher dimensions.
Let $X$ be an affine normal variety with coordinate ring $R = \mathbb{C}[X]$ and function field $K=\mathbb{C}(X)$. Let $\Lambda$ be an $R$-order in central simple $K$-algebra $\Sigma$ of dimension $n^2$. We say that $\Lambda$ is a {\em smooth $R$-order} if the scheme $\wis{trep}_n~\Lambda$ of trace preserving $n$-dimensional $\Lambda$-representations is a smooth variety. However, this is a very restrictive condition and usually an order $\Lambda$ will have a non-zero {\em defect} (to be defined in \S 2) to smoothness.
Still, if $\Lambda$ has a complete set of orthogonal idempotents $\{ e_1,\hdots,e_k \}$ we have a well-defined dimension vector $\alpha=(a_1,\hdots,a_k) \in \mathbb{N}^k$ (where $a_i = tr_{\Lambda}(e_i)$) such that
\[
\wis{trep}_n~\Lambda \simeq \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda \]
Let $\theta \in \mathbb{Z}^k$ such that $\theta.\alpha = 0$ then we define an $\alpha$-dimensional $\Lambda$-representation $V \in \wis{rep}_{\alpha}~\Lambda$ to be {\em $\theta$-semistable} if for all $\Lambda$-subrepresentations $W$ of $V$ we have $\theta.\beta \geq 0$ where $\beta$ is the dimension vector of $W$. The set of all $\alpha$-dimensional $\theta$-semistable representations $\wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ is a Zariski open subset of $\wis{rep}_{\alpha}~\Lambda$.
In favorable situations we can choose a stability structure $\theta$ such that $\wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ is a smooth variety. In such a {\em good setting} we can use universal localization in $\wis{alg@n}$ to construct of sheaf $\mathcal{A}$ of smooth orders over the corresponding moduli space $\wis{moduli}_{\alpha}^{\theta}~\Lambda$ (parametrizing isomorphism classes of semistable $\alpha$-dimensional representations) giving a commutative diagram
\[
\xymatrix@R=40pt@C=45pt{
\wis{spec}~{\mathcal A} \ar[d]_c \ar[rd]^{\phi} \\
\wis{moduli}^{\theta}_{\alpha}~\Lambda \ar@{->>}[r]^{\pi} & X = \wis{spec}~R
}
\]
Here, $\wis{spec}~\mathcal{A}$ is a non-commutative variety obtained by gluing affine non-commutative structure sheaves $(\wis{spec}~\Lambda_D,\mathcal{O}^{nc}_{\Lambda_D})$ together. The map $c$ is defined locally by intersecting a prime ideal with its center and $\pi$ is a projective morphism. As $\mathcal{A}$ is a sheaf of smooth orders, one can view the resulting map $\phi$ as a {\em non-commutative desingularization} of $X$.
A good setting $(\Lambda,\alpha,\theta)$ also limits the types of remaining singularities in the partial desingularization $\pi$. If $\wis{dim}~X = 3$, the moduli space can have worst have conifold singularities, and in dimension $4,5$ resp. $6$ there is a full classification of the possible remaining singularities which consist of $4,10$ resp. $53$ types, see \cite{RBLBVdW}.
In the final section we study the special case of the conifold singularity in great detail. We give several ringtheoretical interpretations of the {\em conifold algebra} $\Lambda_c$ : as a skew-group ring over a polynomial ring and as a Clifford algebra. The latter description allows us to study the prime ideal structure of $\Lambda_c$ in great detail and determine its non-commutative structure sheaf $\mathcal{O}^{nc}_{\Lambda_c}$. We work out its scheme of $2$-dimensional representations, study the corresponding stability structures and work out the resulting desingularizations which are related by the so-called Atiyah flop.
The results contained in this paper were presented at the conference 'Sch\'emas de Hilbert, alg\`ebre non-commutative et correspondance de McKay' at CIRM, Luminy in october 2003, see \cite{LBnotes} for the lecture notes.
\section{Geometry of orders}
Let $X$ be a commutative normal variety with affine coordinate ring the normal domain $R = \mathbb{C}[X]$ and function field $K = \mathbb{C}(X)$. Let $\Sigma$ be a central simple $K$-algebra of dimension $n^2$ and let $\Lambda$ be an $R$-order in $\Sigma$, that is, $\Lambda$ is an $R$-subalgebra of $\Sigma$ which is finitely generated as an $R$-module and such that $\Lambda.K = \Sigma$. Recall that there is a reduced trace map $tr : \Sigma \rTo K$ satisfying $tr(\Lambda) = R$ (because $R$ is integrally closed). Composing $tr$ with the inclusion $R \subset \Lambda$ we get a linear map $tr_{\Lambda} : \Lambda \rTo \Lambda$. In particular, if $\Lambda = M_n(R)$ the usual trace map induces the linear map $tr_{M_n(R)} : M_n(R) \rTo M_n(R)$ sending a matrix $A \in M_n(R)$ to the diagonal matrix $tr(A) 1_n$.
The {\em scheme of trace preserving representations} $\wis{trep}_n~\Lambda$ is the affine scheme representing the functor
$\wis{commalg} \rTo \wis{sets}$ determined by
\[ \wis{trep}_n~\Lambda(\mathbb{C}) = \{ \Lambda \rTo^{\phi} M_n(\mathbb{C})~|~\text{$\phi$ an algebra morphism and~} \phi \circ tr_{\Lambda} = tr_{M_n(\mathbb{C})} \circ \phi~\}.
\]
It is well known, see for example \cite{ProcesiCH} that conjugation of $M_n(\mathbb{C})$ by $\wis{GL}_n(C)$ makes $\wis{trep}_n~\Lambda$ into an affine $\wis{GL}_n$-variety such that the corresponding algebraic quotient map
\[
\wis{trep}_n~\Lambda \rOnto \wis{trep}_n~\Lambda / \wis{GL}_n = \wis{triss}_n~\Lambda \simeq X = \wis{spec}~R \]
recovers the central variety $X$. One can also recover the order $\Lambda$ from the scheme of trace preserving representations as the algebra of $\wis{GL}_n$-equivariant maps from $\wis{trep}_n~\Lambda$ to $M_n(\mathbb{C}) = \mathbb{A}^{n^2}_{\mathbb{C}}$ where the latter variety is a $\wis{GL}_n$-variety under the action by conjugation, see again \cite{ProcesiCH}. The notation $\wis{triss}_n~\Lambda$ is motivated by the fact that the algebraic quotient of $\wis{trep}_n~\Lambda$ by $\wis{GL}_n$ classifies isomorphism classes of $n$-dimensional (trace preserving) semi-simple representations of $\Lambda$. That is, if $\mathfrak{m} \triangleleft R$ is a maximal ideal of $R$ with corresponding geometric point $x_{\mathfrak{m}} \in X$, then $\mathfrak{m}$ determines an $n$-dimensional semi-simple $\Lambda$-module
\[
M_{\mathfrak{m}} = S_1^{\oplus e_1} \oplus \hdots \oplus S_k^{\oplus e_k} ,
\]
where the $S_i$ are simple $\Lambda$-modules of dimension $d_i$ (and occurring in $M_{\mathfrak{m}}$ with multiplicity $e_i$) such that $\sum d_ie_i = n$. Indeed, the geometric point $x_{\mathfrak{m}}$ determines a trace preserving algebra map
\[
\overline{\Lambda}_{\mathfrak{m}} = \Lambda/ \mathfrak{m}\Lambda \rTo M_n(\mathbb{C}) \]
and hence an $n$-dimensional $\Lambda$-module $N_{\mathfrak{m}}$. The semi-simple module $M_{\mathfrak{m}}$ is the semi-simplification of $N_{\mathfrak{m}}$ that is the direct sum of its Jordan-H\"older factors. We say that $\mathfrak{m}$ (or the point $x_{\mathfrak{m}} \in X$) is of representation-type $\tau(\mathfrak{m}) = (e_1,d_1;\hdots;e_k,d_k)$.
To the maximal ideal $\mathfrak{m}$ we will associate a combinatorial tool, a quiver-setting $(Q_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ where $Q_{\mathfrak{m}}$ is the quiver on $k$ vertices (with vertex $v_i$ corresponding to the simple component $S_i$) such that the number of oriented arrows from vertex $v_i$ to vertex $v_j$ is given by
\[
\#~\{ \xymatrix{\vtx{v_i} \ar[r] & \vtx{v_j}} \} = \wis{dim}_{\mathbb{C}}~Ext^1_{\Lambda}(S_i,S_j) \]
and where the dimension vector $\alpha_{\mathfrak{m}} = (e_1,\hdots,e_k)$ is determined by the multiplicities. By this construction we have that the space of $\alpha_{\mathfrak{m}}$-dimensional representations of $Q_{\mathfrak{m}}$, $\wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ can be identified with the self-extension space $Ext^1_{\Lambda}(M_{\mathfrak{m}},M_{\mathfrak{m}})$. Observe that the action of the automorphism group $Aut_{\Lambda}(M_{\mathfrak{m}}) = \wis{GL}_{e_1} \times \hdots \times \wis{GL}_{e_k} = \wis{GL}(\alpha_{\mathfrak{m}})$ on the self-extensions $Ext^1_{\Lambda}(M_{\mathfrak{m}},M_{\mathfrak{m}})$ coincides with the action of $\wis{GL}(\alpha_{\mathfrak{m}})$ on $\wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ by base-change.
By definition of self-extensions every representation $V \in \wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ determines an algebra map
\[
\Lambda \rTo^{\phi_V} M_n(\mathbb{C}[\epsilon]),
\]
where $\mathbb{C}[\epsilon] = \mathbb{C}[x]/(x^2)$ is the algebra of dual numbers. The $\wis{GL}(\alpha_{\mathfrak{m}})$-subspace of $\wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ consisting of all trace preserving extensions, that is such that $tr_{M_n(\mathbb{C}[\epsilon])} \circ \phi_V = \phi_V \circ tr_{\Lambda}$ can again be identified with the representation space of a {\em marked} quiver setting $\wis{rep}_{\alpha_{\mathfrak{m}}} Q^{\dagger}_{\mathfrak{m}}$ where $Q^{\dagger}_{\mathfrak{m}}$ is the same quiver as $Q_{\mathfrak{m}}$ except that certain loops may be removed and that some other loops may acquire a marking by which we mean that a representation of $Q^{\dagger}_{\mathfrak{m}}$ in a marked loop corresponds to a trace zero matrix, see \cite{LBetale} for more details. The whole point of this construction is that the normal space in $M_{\mathfrak{m}}$ to the closed orbit ${\mathcal O}(M_{\mathfrak{m}})$ in the trace preserving representation space $\wis{trep}_n~\Lambda$
\[
\frac{T_{M_{\mathfrak{m}}}~\wis{trep}_n~\Lambda}{T_{M_{\mathfrak{m}}} {\mathcal O}(M_{\mathfrak{m}})} = N_{M_{\mathfrak{m}}} \simeq \wis{rep}_{\alpha_{\mathfrak{m}}} Q^{\dagger}_{\mathfrak{m}}
\]
can be identified with the representation space of the marked quiver and that the automorphism is one as $\wis{GL}(\alpha_{\mathfrak{m}}) = Stab(M_{\mathfrak{m}})$ modules. This fact allows us to define a numerical {\em defect} measuring the failure of smoothness of $\wis{trep}_n~\Lambda$ over the point $x_{\mathfrak{m}}$.
\begin{definition} The {\em defect} $\wis{def}_{\mathfrak{m}}~\Lambda$ of the $R$-order $\Lambda$ in the maximal ideal $\mathfrak{m}$ is defined to be
\[
\wis{def}_{\mathfrak{m}}~\Lambda = 1-\chi(\alpha_{\mathfrak{m}},\alpha_{\mathfrak{m}}) - \# \{ \text{marked loops in $Q^{\dagger}_{\mathfrak{m}}$} \} - \wis{dim}~X,
\]
where $\chi : \mathbb{Z}^k \times \mathbb{Z}^k \rTo \mathbb{Z}$ is the Euler form of the quiver obtained from $Q^{\dagger}_{\mathfrak{m}}$ by forgetting the markings, that is, the entry $(i,j)$ of the matrix defining $\chi$ is equal to $\delta_{ij} - \# \{ \xymatrix{\vtx{v_i} \ar[r] & \vtx{v_j}} \}$.
\end{definition}
\begin{proposition} With notations as above, $\wis{def}_{\mathfrak{m}}~\Lambda \geq 0$ and the following statements are equivalent
\begin{enumerate}
\item{$\wis{def}_{\mathfrak{m}}~\Lambda = 0 $.}
\item{$\wis{trep}_n~\Lambda$ is a smooth variety in all points lying over $x_{\mathfrak{m}}$.}
\end{enumerate}
\end{proposition}
\begin{proof} As $\Lambda$ is an $R$-order in an $n^2$-dimensional central simple $K$-algebra $\Sigma$, there is a Zariski open subset $\wis{azu}_n~\Lambda$ of $X$ of points $x_{\mathfrak{m}}$ such that $\overline{\Lambda}_{\mathfrak{m}} \simeq M_n(\mathbb{C})$ (the so called Azumaya locus of $\Lambda$). Over $\wis{azu}_n~\Lambda$ the algebraic quotient map $\wis{trep}_n~\Lambda \rOnto X$ is a principal $\wis{PGL}_n$-fiber whence generically the trace preserving representation scheme has dimension
\[
\wis{dim}~\wis{trep}_n~\Lambda = \wis{dim}~X + n^2 - 1.
\]
On the other hand, the dimension of the tangent space to the representation scheme in the semi-simple representation $M_{\mathfrak{m}}$ is equal to
\[
\begin{split}
\wis{dim}~T_{M_{\mathfrak{m}}}~\wis{trep}_n~\Lambda &= \wis{dim}~{\mathcal O}(M_{\mathfrak{m}}) + \wis{dim}~\wis{rep}_{\alpha_{\mathfrak{m}}}~Q^{\dagger}_{\mathfrak{m}} \\
&= (n^2 - \sum_i e_i^2) + (\sum_{\xymatrix{\vtx{v_i} \ar[r] & \vtx{v_j}}} e_ie_j - \# \{ \text{marked loops in $Q^{\dagger}_{\mathfrak{m}}$} \} ) \\
&= n^2 - \chi(\alpha_{\mathfrak{m}},\alpha_{\mathfrak{m}}) - \# \{ \text{marked loops in $Q^{\dagger}_{\mathfrak{m}}$} \}
\end{split}
\]
and as $\wis{dim}~T_{M_{\mathfrak{m}}}~\wis{trep}_n~\Lambda \geq \wis{dim}~\wis{trep}_n~\Lambda$ it follows that $\wis{def}_{\mathfrak{m}}~\Lambda \geq 0$. Moreover, it also follows that $\wis{def}_{\mathfrak{m}}~\Lambda = 0$ if and only if $\wis{trep}_n~\Lambda$ is smooth in $M_{\mathfrak{m}}$. But as the singularities of $\wis{trep}_n~\Lambda$ form a $\wis{GL}_n$-closed subvariety and as ${\mathcal O}(M_{\mathfrak{m}})$ is the unique closed orbit lying over $x_{\mathfrak{m}}$ (recall that closed orbits in $\wis{trep}_n~\Lambda$ are precisely the isomorphism classes of semi-simple representations) the equivalence of the two statements follows.
\end{proof}
\begin{example} Consider the quantum plane of order two $\Lambda = \mathbb{C}_{-1}[x,y]$ determined by the commutation relation $xy+yx=0$. If $u=x^2$ and $v=y^2$ then the center of $\Lambda$ is the polynomial algebra $R=\mathbb{C}[u,v]$ and $\Lambda$ is a free module of rank $4$ over it. In fact, $\Lambda$ is an $R$-order in the quaternion-algebra
\[
\Sigma = \begin{pmatrix} u & & v \\ & \mathbb{C}(u,v) & \end{pmatrix} .
\]
The reduced trace map is determined by its images on a $\mathbb{C}$-basis
\[
tr(x^iy^j) = \begin{cases} 0 & \text{if either $i$ or $j$ is odd} \\
2x^iy^j & \text{if both $i$ and $j$ are even.}
\end{cases}
\]
In the affine plane $\mathbb{A}^2 = \wis{spec}~R$ the Azumaya locus of $\Lambda$ is $\wis{azu}_2~\Lambda = \mathbb{X}(uv)$ the complement of the two coordinate axes. Let $x_{\mathfrak{m}} = (a^2,b) \in \mathbb{X}(uv)$ then the corresponding $2$-dimensional simple representation $M_{\mathfrak{m}}$ is determined by
\[
\Lambda \rOnto^{\phi} M_2(\mathbb{C}) \qquad \text{with} \qquad \phi(x) = \begin{bmatrix} a & 0 \\ 0 & -a \end{bmatrix} \qquad \phi(y) = \begin{bmatrix} 0 & 1 \\ b & 0 \end{bmatrix}.
\]
One verifies that $Ext^1_{\Lambda}(M_{\mathfrak{m}},M_{\mathfrak{m}}) \simeq \mathbb{C}^2$ and that the corresponding algebra map $\Lambda \rTo^{\psi} M_2(\mathbb{C}[\epsilon])$ corresponding to $(\alpha,\beta) \in \mathbb{C}^2$ is given by
\[
\begin{cases}
\psi(u) &= \begin{bmatrix} a + \epsilon \alpha & 0 \\ 0 & -a - \epsilon \alpha \end{bmatrix} \\
\psi(v) &= \begin{bmatrix} 0 & 1 \\ b+\epsilon \beta & 0 \end{bmatrix}
\end{cases}
\]
and hence is trace preserving whence the local (marked) quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is given by
\[
\xymatrix{\vtx{1} \ar@(l,ul) \ar@(ur,r)} \]
whence the defect is equal to $\wis{def}_{\mathfrak{m}}~\Lambda = 1 - (-1) - 0 - 2 = 0$ consistent with the fact that over the Azumaya locus (which is a smooth subvariety of the central scheme in this case) the algebraic quotient map is a principal $\wis{PGL}_2$-fibration whence $\wis{trep}_2~\Lambda$ will be smooth over it. For general orders $\Lambda$, if $x_{\mathfrak{m}}$ is a smooth point of the central variety and lies in the Azumaya locus, then $\wis{def}_{\mathfrak{m}}~\Lambda = 0$.
For $x_{\mathfrak{m}} = (a^2,0) \in \mathbb{A}^2$ with $a \not= 0$ (and by a similar argument for points $(0,b)$ with $b \not= 0$), the corresponding semi-simple representation has two non-isomorphic one-dimensional simple components
\[
M_{\mathfrak{m}} = S_1 \oplus S_2 \qquad \text{with} \qquad S_i = \begin{cases} x \mapsto (-1)^i a \\ y \mapsto 0. \end{cases}
\]
One verifies that $Ext^1_{\Lambda}(S_i,S_i) = \mathbb{C}$ and that $Ext^1_{\Lambda}(S_1,S_2) \simeq Ext^1_{\Lambda}(S_2,S_1) \simeq \mathbb{C}$ whence the quiver-setting $(Q_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is given by
\[
\xymatrix{\vtx{1} \ar@(ul,dl)_{\alpha_1} \ar@/^/[r]^{\beta_1} & \vtx{1} \ar@(ur,dr)^{\alpha_2} \ar@/^/[l]^{\beta_2}} \]
and the corresponding algebra map $\Lambda \rTo M_2(\mathbb{C}[\epsilon])$ is given by
\[
x \mapsto \begin{bmatrix} a + \epsilon \alpha_1 & 0 \\ 0 & -a + \epsilon \alpha_2 \end{bmatrix} \qquad
y \mapsto \begin{bmatrix} 0 & \beta_1 \\ \beta_2 & 0 \end{bmatrix} \]
which is only trace preserving if $\alpha_2 = - \alpha_1$ so we have one linear relation among the representations and therefore the corresponding (marked) quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is equal to
\[
\xymatrix{\vtx{1} \ar@(ul,dl) \ar@/^/[r] & \vtx{1} \ar@/^/[l]} \]
and the defect is equal to $\wis{def}_{\mathfrak{m}}~\Lambda = 1 - (-1) - 0 - 2 = 0$ whence also over these ramified points the trace preserving representation variety $\wis{trep}_2~\Lambda$ is smooth.
Remains the point $x_{\mathfrak{m}} = (0,0)$ where the corresponding semi-simple representation is the zero-representation $M_{\mathfrak{m}} = S_0^{\oplus 2}$ where $S_0$ is determined by $x \mapsto 0$ and $y \mapsto 0$. One verifies that $Ext^1_{\Lambda}(S_0,S_0) \simeq \mathbb{C}^2$ whence the quiver-setting $(Q_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is equal to
\[
\xymatrix{\vtx{2} \ar@(l,ul)^{\begin{bmatrix} \alpha_1 & \alpha_2 \\ \alpha_3 & \alpha_4 \end{bmatrix}} \ar@(ur,r)^{\begin{bmatrix} \beta_1 & \beta_2 \\ \beta_3 & \beta_4 \end{bmatrix}}}
\]
with corresponding algebra map $\Lambda \rTo M_2(\mathbb{C}[\epsilon])$ given by
\[
x \mapsto \epsilon \begin{bmatrix} \alpha_1 & \alpha_2 \\ \alpha_3 & \alpha_4 \end{bmatrix} \qquad y \mapsto \epsilon \begin{bmatrix} \beta_1 & \beta_2 \\ \beta_3 & \beta_4 \end{bmatrix} \]
which is only trace preserving if $\alpha_4 = - \alpha_1$ and $\beta_4 = - \beta_1$. Therefore the marked quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is equal to
\[
\xymatrix{\vtx{2} \ar@(l,ul)|{\ast} \ar@(ur,r)|{\ast}} \]
and the defect is $\wis{def}_{\mathfrak{m}}~\Lambda = 1 -(-4)-2-2 = 1$ whence there must be a singularity of $\wis{trep}_2~\Lambda$ lying over $x_{\mathfrak{m}}$.
This is indeed the case as the geometric points of $\wis{trep}_2~\Lambda$ are determined by couples of $2 \times 2 $ matrices
\[
( \begin{bmatrix} x_1 & x_2 \\ x_3 & -x_1 \end{bmatrix} , \begin{bmatrix} y_1 & y_2 \\ y_3 & -y_1 \end{bmatrix} ) \quad \text{satisfying} \quad tr( \begin{bmatrix} x_1 & x_2 \\ x_3 & -x_1 \end{bmatrix}.\begin{bmatrix} y_1 & y_2 \\ y_3 & -y_1 \end{bmatrix}) = 0.
\]
That is, $\wis{trep}_2~\Lambda$ is the hypersurface in $\mathbb{A}^6$ determined by the equation
\[
\wis{trep}_2~\Lambda = \mathbb{V}(2x_1y_1 + x_2y_3 + x_3y_2) \rInto \mathbb{A}^6 \]
which is an irreducible $5$-dimensional variety having an isolated singularity at $x = (0,0,0,0,0,0)$ (the zero-representation).
\end{example}
\begin{definition} The {\em smooth locus} of an $R$-order $\Lambda$ is defined to be the subset of $X = \wis{spec}~R$
\[
\wis{smooth}_n~\Lambda = \{ x_{\mathfrak{m}} \in X~|~\wis{def}_{\mathfrak{m}}~\Lambda = 0 \}.
\]
We say that the order $\Lambda$ is {\em smooth} if $\wis{smooth}_n~\Lambda = X$, or equivalently, that $\wis{trep}_n~\Lambda$ is a smooth variety.
\end{definition}
If $X^{sm}$ denotes the smooth locus of $X = \wis{spec}~R$ then we have already seen that for any $R$-order $\Lambda$
\[
X^{sm} \cap \wis{azu}_n~\Lambda \rInto \wis{smooth}_n~\Lambda \]
as the algebraic quotient map $\wis{trep}_n~\Lambda \rOnto X$ is a principal $\wis{PGL}_n$-fibration over the Azumaya locus. In fact, for many interesting classes of orders the three loci coincide, that is,
\[
X^{sm} = \wis{azu}_n~\Lambda = \wis{smooth}_n~\Lambda.
\]
This is the case for quantum groups at roots of unity (see \cite{LBquantum}) and for orders associated at (deformed) preprojective algebras (see \cite{LBpreproj}). Later on we will prove a similar result for orders associated to quotient singularities.
If $x_{\mathfrak{m}} \in \wis{smooth}_n~\Lambda$ we know from \cite{LBetale} that the marked quiver setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ contains enough information to describe the \'etale local structure of $X$ near $x_{\mathfrak{m}}$ (that is, the structure of the $\mathfrak{m}$-adic completion $\hat{R}_{\mathfrak{m}}$) as well as the \'etale local structure of $\Lambda$ near $\mathfrak{m}$ (that is, the $\mathfrak{m}$-adic completion $\hat{\Lambda}_{\mathfrak{m}}$). We recall the result and refer to \cite{LBetale} for proof and more details.
\begin{proposition} Let $x_{\mathfrak{m}} \in \wis{smooth}_n~\Lambda$ with associated marked quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ with $\alpha_{\mathfrak{m}} = (a_1,\hdots,a_k)$. Then,
\begin{enumerate}
\item{The $\mathfrak{m}$-adic completion of the center $\hat{R}_{\mathfrak{m}}$ is isomorphic to the completion of the algebra generated by traces along oriented cycles in $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ at the maximal ideal generated by these traces.}
\item{The $\mathfrak{m}$-adic completion of the order $\Lambda$ is of the form
\[
\hat{\Lambda}_{\mathfrak{m}} \simeq \begin{bmatrix} M_{11} & \hdots & M_{1k} \\
\vdots & & \vdots \\
M_{k1} & \hdots & M_{kk} \end{bmatrix} \]
where $M_{ij}$ is a block of size $a_i \times a_j$ with all entries equal to the $\hat{R}_{\mathfrak{m}}$-module generated by all paths in $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ starting at vertex $v_i$ and ending in vertex $v_j$.}
\end{enumerate}
\end{proposition}
In particular, if $x_{\mathfrak{m}} \in \wis{smooth}_n~\Lambda$ we can describe the finite dimensional algebra $\overline{\Lambda}_{\mathfrak{m}} = \Lambda / \mathfrak{m} \Lambda$ to be Morita equivalent to the quotient of the path algebra of the underlying quiver $\mathbb{C} Q^{\dagger}_{\mathfrak{m}}$ by the ideal generated by all cycles in $Q^{\dagger}_{\mathfrak{m}}$.
\begin{definition}
Let $\wis{cat}$ be a category of $\mathbb{C}$-algebras. We say that an algebra $A \in \wis{cat}$ is {\em $\wis{cat}$-smooth} if and only if for every $B \in \wis{cat}$, every quotient $B \rOnto^{\pi} B/I$ in $\wis{cat}$ with $I$ a nilpotent ideal and every algebra morphism $A \rTo^{\phi} B/I$ in $\wis{cat}$ the diagram
\[
\xymatrix@R=45pt@C=45pt{
A \ar[rd]_{\phi} \ar@{.>}[r]^{\tilde{\phi}} & B \ar@{->>}[d]^{\pi} \\
& B/I
}
\]
can be completed by an algebra morphism $A \rTo^{\tilde{\phi}} B$ in $\wis{cat}$.
\end{definition}
Grothendieck proved that an affine commutative $\mathbb{C}$-algebra $R$ is $\wis{commalg}$-smooth if and only if $R$ is regular, that is, if and only if $X = \wis{spec}~R$ is a smooth variety. Cuntz and Quillen \cite{CuntzQuillen} introduced {\em quasi-free algebras} as coordinate rings of non-commutative algebraic manifolds and they are precisely the $\wis{alg}$-smooth algebras. Similarly, smooth orders are $\wis{alg@n}$-smooth algebras where $\wis{alg@n}$ is the category of Cayley-Hamilton algebras of degree $n$ which we will describe briefly and refer to \cite{ProcesiCH} for more details.
If $M \in M_n(R)$ for $R$ a commutative $\mathbb{C}$-algebra, then its characteristic polynomial
\[
\chi_M = det(t1_n-M) = t^n + a_1 t^{n-1} + \hdots + a_n \]
is such that all its coefficients are polynomials with rational coefficients in traces of powers of $M$, that is, $a_i = f_i(Tr(M),Tr(M^2),\hdots,Tr(M^{n-1}))$. Hence, if $A$ is a $\mathbb{C}$-algebra having a trace map $tr_A~:~A \rTo A$ (a linear map satisfying $tr_A(tr_A(a)b)=tr_A(a)tr_A(b)$, $tr_A(ab)=tr_A(ba)$ and $tr_A(a)~b=b~tr_A(a)$ for all $a,b \in A$) then we define a {\em formal characteristic polynomial of degree $n$} for every $a \in A$ by
\[
\chi_a = t^n + f_1(tr_A(a),\hdots,tr_A(a^{n-1})) t^{n-1} + \hdots + f_n(tr_A(a),\hdots,tr_A(a^{n-1})) \]
\begin{definition} An object of $\wis{alg@n}$ is a Cayley-Hamilton algebra of degree $n$, that is, a $\mathbb{C}$-algebra having a trace map $tr_A$ satisfying
\[
\forall a \in A:~\chi_a(a) = 0~ \qquad \text{and} \qquad tr_A(1) = n \]
Morphisms $A \rTo^f B$ in $\wis{alg@n}$ are $\mathbb{C}$-algebra morphisms preserving traces, that is
\[
\xymatrix@R=45pt@C=45pt{
A \ar[r]^f \ar[d]_{tr_A} & B \ar[d]_{tr_B} \\
A \ar[r]^f & B
}
\]
is a commutative diagram.
\end{definition}
We recall from \cite{ProcesiCH} that $A \in \wis{alg@n}$ is $\wis{alg@n}$-smooth if and only if $\wis{trep}_n~A$ is a smooth variety (possibly having several irreducible components). In particular, a smooth order $\Lambda$ in a central simple $K$-algebra $\Sigma$ of dimension $n^2$ equipped with the reduced trace map is $\wis{alg@n}$-smooth.
Having identified smooth orders as a natural generalization of regular commutative algebras to the category of Cayley-Hamilton algebras and having a combinatorial local description of them (as well as their centers), we now turn to the associated {\em non-commutative smooth variety}.
\begin{definition}
Let $\Lambda$ be an $R$-order in a central simple $K$-algebra $\Sigma$ of dimension $n^2$, then the {\em non-commutative spectrum}, $\wis{spec}~\Lambda$ is the set of all twosided prime ideals $P$ of $\Lambda$ (that is, the ideals satisfying $a \Lambda b \subset P \mathbb{R}ightarrow a$ or $b \in P$). This set is equipped with the {\em Zariski topology} with typical open sets
\[
\mathbb{X}(I) = \{ P \in \wis{spec}~\Lambda~|~I \not\subset P \}
\]
for any twosided ideal $I$ of $\Lambda$
(see for example \cite{FVO444} and \cite{FVOAV}). The topological space $\wis{spec}~\Lambda$ comes equipped with a {\em non-commutative structure sheaf} $\mathcal{O}^{nc}_{\Lambda}$ with sections on the open set $\mathbb{X}(I)$
\[
\Gamma(\mathbb{X}(I),\mathcal{O}^{nc}_{\Lambda}) = \{ \delta \in \Sigma~|~\frak{sl}_2ists l \in \mathbb{N}~:~I^l.\delta \subset \Lambda \} \]
(again see \cite{FVO444} or \cite{FVOAV} for a proof that this defines a sheaf of non-commutative algebras with global sections $\Gamma(\wis{spec}~\Lambda,\mathcal{O}^{nc}_{\Lambda}) = \Lambda$).
Moreover, the {\em stalk} of $\mathcal{O}^{nc}_{\Lambda}$ at a prime ideal $P \in \wis{spec}~\Lambda$ is the symmetric localization
\[
\mathcal{O}^{nc}_{\Lambda,P} = Q_{\Lambda-P}(\Lambda) = \{ \delta \in \Sigma~|~I \delta \subset \Lambda~\text{for some twosided ideal}~I \not\subset P \}.
\]
\end{definition}
Intersecting a twosided prime ideal $P$ of $\Lambda$ with its center gives a prime ideal of $R$ and hence we obtain a continuous map
\[
\wis{spec}~\Lambda \rTo^{\pi_c} \wis{spec}~R \qquad P \mapsto P \cap R \]
and if we denote with ${\mathcal O}_{\Lambda}$ the (usual) sheaf of $R$-algebras on $\wis{spec}~R$ associated to the $R$-order $\Lambda$ then $\pi_c$ induces a morphism of sheaves of algebras
\[
(\wis{spec}~\Lambda, {\mathcal O}^{nc}_{\Lambda}) \rTo^{\pi_c} (\wis{spec}~R, {\mathcal O}_{\Lambda}).
\]
For $\mathfrak{m}$ a maximal ideal of $R$ we can relate the local marked quiver setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ to the fiber $\pi_c^{-1}(\mathfrak{m})$. This quiver setting was determined by the semi-simple $n$-dimensional $\Lambda$-representation
\[
M_{\mathfrak{m}} = S_1^{\oplus e_1} \oplus \hdots \oplus S_k^{\oplus e_k} \]
where $S_i$ is a simple $d_i$-dimensional $\Lambda$-representation. Then, we have that
\[
\pi_c^{-1}(\mathfrak{m}) = \{ P_1,\hdots,P_k \} \qquad \text{with} \qquad \Lambda/P_i \simeq M_{d_i}(\mathbb{C}) \]
so the number of vertices in $Q^{\dagger}_{\mathfrak{m}}$ determines the number of maximal twosided ideals of $\Lambda$ lying over $\mathfrak{m}$ and the dimension vector $\alpha_{\mathfrak{m}} = (e_1,\hdots,e_k)$ determines the so called Bergman-Small data, see \cite{BergmanSmall}. The finitely many maximal twosided ideals $\{ P_1,\hdots,P_k \}$ lying over the central point $\mathfrak{m}$ form a {\em clique} \cite{Jategaonkar} and should be thought of as points lying infinitesimally close together in $\wis{spec}~\Lambda$. The marked quiver $Q^{\dagger}$ encodes this infinitesimal information. If $\mathfrak{m}$ is a central singularity, the hope is that one can use these finitely many infinitesimally close points to separate tangent information in $\mathfrak{m}$ rather than having to resort to the full blown-up of $\mathfrak{m}$. In the next section we will give some examples when this non-commutative approach to desingularization actually works.
\begin{example} Let $X = \mathbb{A}^1$, that is $R=\mathbb{C}[x]$ and consider the order
\[
\Lambda = \begin{bmatrix} R & R \\ \mathfrak{m} & R \end{bmatrix},
\]
where $\mathfrak{m} = (x) \triangleleft R$, that is $x_{\mathfrak{m}} = 0$. For every point $\lambda \not= 0$ there is a unique maximal twosided ideal of $\Lambda$ lying over $\mathfrak{m}_{\lambda} = (x-\lambda)$ with quotient $M_2(\mathbb{C})$. For this reason we say that $X - \{ 0 \}$ is the {\em Azumaya locus} of $\Lambda$. On the other hand, the {\em ramification locus} of $\Lambda$ is the closed subset $\{ 0 \} = \mathbb{V}(x)$ and there are two maximal ideals of $\Lambda$ lying over $\mathfrak{m}$
\[
M_1 = \begin{bmatrix} \mathfrak{m} & R \\ \mathfrak{m} & R \end{bmatrix} \qquad \text{and} \qquad M_2 = \begin{bmatrix} R & R \\ \mathfrak{m} & \mathfrak{m} \end{bmatrix} \]
and the quotients are $\Lambda/M_1 \simeq \mathbb{C} \simeq \Lambda/M_2$ whence they determine both a one-dimensional $\Lambda$-representation. That is, the canonical continuous map
\[
\wis{spec}~\Lambda \rOnto^{\pi_c} \wis{spec}~R \]
is a homeomorphism over $\mathbb{X}(x)$ and there are precisely two (infinitesimally close) points lying over $\mathbb{V}(x)$. The corresponding (marked) quiver setting is
\[
\xymatrix{\vtx{1} \ar@/^/[r] & \vtx{1} \ar@/^/[l]}
\]
and so the defect $\wis{def}_{\mathfrak{m}}~\Lambda = 0$. Remark that in all other maximal ideals $\mathfrak{m}_{\lambda}$ the local (marked) quiver setting is
\[
\xymatrix{\vtx{1} \ar@(ul,ur)} \]
which also has zero defect so $\Lambda$ is a smooth order and hence $\wis{trep}_2~\Lambda$ is a smooth variety. We now turn to the structure sheaves ${\mathcal O}_{\Lambda}$ and ${\mathcal O}_{\Lambda}^{(nc)}$.
The central structure sheaf is just given by central localization and therefore we find for its stalks
\[
{\mathcal O}_{\Lambda,\mathfrak{m}} = \begin{bmatrix} R_{\mathfrak{m}} & R_{\mathfrak{m}} \\ R_{\mathfrak{m}} & R_{\mathfrak{m}} \end{bmatrix} \qquad {\mathcal O}_{\Lambda,\mathfrak{m}_{\lambda}} \simeq \begin{bmatrix} R_{\mathfrak{m}_{\lambda}} & R_{\mathfrak{m}_{\lambda}} \\ R_{\mathfrak{m}_{\lambda}} & R_{\mathfrak{m}_{\lambda}} \end{bmatrix}.
\]
Over the Azumaya locus the non-commutative structure sheaf ${\mathcal O}_{\Lambda}^{nc}$ coincides with the central structure sheaf. The stalks in the two points lying over $\mathfrak{m}$ can be computed to be
\[
{\mathcal O}_{\Lambda,M_1}^{nc} \simeq \begin{bmatrix} R_{\mathfrak{m}} & R_{\mathfrak{m}} \\ R_{\mathfrak{m}} & R_{\mathfrak{m}} \end{bmatrix} \qquad {\mathcal O}_{\Lambda,M_2}^{nc} \simeq
\begin{bmatrix} R_{\mathfrak{m}} & x^{-1} R_{\mathfrak{m}} \\ x R_{\mathfrak{m}} & R_{\mathfrak{m}} \end{bmatrix}, \]
both of them being Azumaya algebras. Hence, we have the slightly surprising fact that the non-commutative structure sheaf ${\mathcal O}_{\Lambda}^{nc}$ over $\wis{spec}~\Lambda$ is a sheaf of Azumaya algebras whereas $\Lambda$ itself is ramified in $\mathfrak{m}$. Observe that the stalk in $\mathfrak{m}$ of the central structure sheaf is the intersection of the two Azumaya stalks of the non-commutative structure sheaf.
\end{example}
\section{Moduli spaces}
In this section, $\Lambda$ will be an $R$-order in a central simple $K$-algebra of dimension $n^2$ and $\mathfrak{m}$ will be a singularity of $\wis{spec}~R = X$. W want to use $\Lambda$ to resolve the singularity in $\mathfrak{m}$. As we are only interested in the \'etale local structure of the singularity in $\mathfrak{m}$ we may restrict attention to $\hat{\Lambda}_{\mathfrak{m}}$ or more generally it is only the \'etale local structure of $\Lambda$ that is important. Hence, we may assume that $\Lambda$ is split as far as possible, or equivalently, that we have a complete set $\{ e_1,\hdots,e_k \}$ of orthogonal idempotents in $\Lambda$. That is the $e_i$ satisfy
\[
e_i^2 = e_i \qquad e_i.e_j = 0~\text{for $i \not= j$} \qquad \sum_{i=1}^k e_i = 1_{\Lambda} \]
These idempotents allow us to decompose finite dimensional $\Lambda$-representations. If $V \in \wis{rep}_m~\Lambda$ is an $m$-dimensional representation, we say that $V$ is of {\em dimension vector} $\alpha = (a_1,\hdots,a_n)$ for $\sum_{i=1}^k a_i = m$ provided
\[
\wis{dim}_{\mathbb{C}}~e_i.V = a_i \]
We denote this by $\wis{dim}~V = \alpha$.
Because $S = \overbrace{\mathbb{C} \times \hdots \times \mathbb{C}}^k \rInto \Lambda$ we can restrict $m$-dimensional $\Lambda$ representations to the semi-simple subalgebra $S$ to obtain morphisms
\[
\wis{rep}_m~\Lambda \rTo \wis{rep}_m~S = \bigsqcup_{\alpha}~\wis{GL}_m/\wis{GL}(\alpha) \]
where the decomposition is taken over all dimension vectors $\alpha = (a_1,\hdots.a_k)$ such that $\sum_i a_i = m$ and where $\wis{GL}(\alpha) = \wis{GL}_{a_1} \times \hdots \times \wis{GL}_{a_k}$. The component $\wis{GL}_m/\wis{GL}(\alpha)$ is the orbit of the semi-simple $S$-representation $V_{\alpha}$ with action given by the matrices
\[
e_i \mapsto E_{\sum_{j=1}^{i-1} a_j +1,\sum_{j=1}^{i-1} a_j + 1} + E_{\sum_{j=1}^{i-1} a_j +2,\sum_{j=1}^{i-1} a_j + 2} + \hdots + E_{\sum_{j=1}^{i} a_j,\sum_{j=1}^{i} a_j} \]
where $E_{i,j}$ are the standard matrices $(\delta_{iu}\delta_{jv})_{u,v} \in M_m(\mathbb{C})$. As a consequence we can also decompose the representation schemes
\[
\wis{rep}_m~\Lambda = \bigsqcup_{\alpha} \wis{GL}_m \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda \]
where $\wis{rep}_{\alpha}~\Lambda$ is the scheme representing all $m = \sum_i a_i$-dimensional representations of dimension vector $\alpha = (a_1,\hdots,a_k)$ on which the action by the set of idempotents $\{ e_1,\hdots,e_k \}$ is given by the above matrices. Clearly, the reductive group $\wis{GL}(\alpha)$ acts by base-change in the subspaces $e_i.V$ on $\wis{rep}_{\alpha}~\Lambda$ and the corresponding component of $\wis{rep}_m~\Lambda$ is the principal fiber bundle $\wis{GL}_m \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda$.
A {\em character} of the reductive group $\wis{GL}(\alpha)$ is determined by an integral $k$-tuple $\theta = (t_1,\hdots,t_k) \in \mathbb{Z}^k$
\[
\chi_{\theta}~:~\wis{GL}(\alpha) \rTo \mathbb{C}^* \qquad (g_1,\hdots,g_k) \mapsto det(g_1)^{t_1} \hdots det(g_k)^{t_k} \]
As the subgroup $\mathbb{C}^*(1_{a_1},\hdots,1_{a_k})$ acts trivially on $\wis{rep}_{\alpha}~\Lambda$ we are only interested in the characters $\chi_{\theta}$ such that $0 = \theta.\alpha = \sum_{i=1}^k a_it_i$. Remark that a $\Lambda$-subrepresentation $W \subset V$ for $V \in \wis{rep}_{\alpha}~\Lambda$ necessarily satisfies $W \in \wis{rep}_{\beta}~\Lambda$ for some dimension vector $\beta \leq \alpha$.
We will now extend the definition of (semi)stable representations of quivers, due to A. King \cite{King} to the present setting.
\begin{definition} For $\theta \in \mathbb{Z}^k$ satisfying $\theta.\alpha = 0$, a representation $V \in \wis{rep}_{\alpha}~\Lambda$ is said to be
\begin{enumerate}
\item{{\em $\theta$-semistable} if and only if for every proper $\Lambda$-subrepresentation $W \subset V$ we have $\theta.\wis{dim}~W \geq 0$.}
\item{{\em $\theta$-stable} if and only if for every proper $\Lambda$-subrepresentation $W \subset V$ we have $\theta.\wis{dim}~W > 0$.}
\end{enumerate}
\end{definition}
For any setting satisfying $\theta.\alpha = 0$ we have the following inclusions of Zariski open $\wis{GL}(\alpha)$-stable subschemes of $\wis{rep}_{\alpha}~\Lambda$ (with obvious notations)
\[
\wis{rep}_{\alpha}^{simple}~\Lambda \subset \wis{rep}^{\theta-stable}_{\alpha}~\Lambda \subset \wis{rep}_{\alpha}^{\theta-semist}~\Lambda \subset \wis{rep}_{\alpha}~\Lambda \]
but some of these open subsets may actually be empty.
All these definitions carry over to any affine $\mathbb{C}$-algebra $\Lambda$ but if $\Lambda$ is an $R$-order in a central simple $K$-algebra of dimension $n^2$ we have the following link with the material of the previous section
\[
\wis{trep}_n~\Lambda = \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda \]
for the dimension vector $\alpha = (tr_{\Lambda}(e_1),\hdots,tr_{\Lambda}(e_k))$. Moreover,
\[
R = \mathbb{C}[\wis{triss}_n~\Lambda] = \mathbb{C}[\wis{iss}_{\alpha}~\Lambda] = \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha)} \]
where $\wis{iss}_{\alpha}~\Lambda$ is the scheme representing semi-simple $\alpha$-dimensional representations of $\Lambda$. Remark that the dimension vector $\alpha$ above is such that there are $\alpha$-dimensional simple representations of $\Lambda$ so that in the above inclusion of $\wis{GL}(\alpha)$-stable subvarieties of $\wis{rep}_{\alpha}~\Lambda$ none of the subschemes is empty. From now on we fix this particular dimension vector $\alpha$ of total dimension $n$.
A polynomial function $f \in \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]$ is said to be a {\em $\theta$-semi-invariant of weight $l$} if and only if we have for all $g \in \wis{GL}(\alpha)$
\[
g.f = \chi_{\theta}(g)^l f \]
where, as before, $\chi_{\theta}$ is the character of $\wis{GL}(\alpha)$ corresponding to $\theta$. It follows from \cite{King} that a representation $V \in \wis{rep}_{\alpha}~\Lambda$ is $\theta$-semistable if and only if there is some $\theta$-semi-invariant $f$ of some weight $l$ such that $f(V) \not= 0$.
Clearly, $\theta$-semi-invariants of weight zero are just polynomial invariants in $\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha)} = R$ and the multiplication of $\theta$-semi-invariants of weights $l$ resp. $l'$ is a $\theta$-semi-invariant of weight $l+l'$. Therefore, the ring of all $\theta$-semi-invariants
\[
\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta} = \bigoplus_{l=0}^{\infty} \{ f \in \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]~|~\forall g \in \wis{GL}(\alpha)~:~g.f = \chi_{\theta}^l f \} \]
is a graded algebra with part of degree zero $R = \mathbb{C}[\wis{iss}_{\alpha}~\Lambda]$. Consequently, we have a projective morphism
\[
\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta} \rOnto^{\pi} X = \wis{spec}~R \]
such that all fibers of $\pi$ are projective varieties. The main results of $\pi$ are proved as in \cite{King}.
\begin{theorem} There is a one-to-one correspondence between
\begin{enumerate}
\item{points in $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}$, and}
\item{isomorphism classes of direct sums of $\theta$-stable $\Lambda$ representations of total dimension $\alpha$.}
\end{enumerate}
Moreover, as there are simple $\alpha$-dimensional $\Lambda$-representations, the morphism $\pi$ is a birational projective map.
\end{theorem}
\begin{definition} We call $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}$ the {\em moduli space of $\theta$-semistable representations} of $\Lambda$ and will denote it with $\wis{moduli}^{\theta}_{\alpha}~\Lambda$.
\end{definition}
Let us recall some examples of current interest.
\begin{example}[Kleinian singularities]
For a Kleinian singularity, that is, a quotient singularity $\mathbb{C}^2/G$ with $G \subset SL_2(\mathbb{C})$ there is an extended Dynkin diagram $D$ associated.
Let $Q$ be the {\em double quiver} of $D$, that is to each arrow $\xymatrix{\vtx{} \ar[r]^x & \vtx{}}$ in $D$ we adjoin an arrow $\xymatrix{\vtx{} & \vtx{} \ar[l]_{x^*} }$ in $Q$ in the opposite direction and let $\alpha$ be the unique minimal dimension vector such that $\chi_D(\alpha,\alpha) = 0$ (the so called isotropic Schur root of the tame quiver $\vec{D}$ obtained from the graph $D$ by fixing a certain orientation on the edges). Consider the {\em moment element}
\[
m = \sum_{x \in D} [x,x^*] \]
then the skew-group algebra $\Lambda = \mathbb{C}[x,y] \# G$ is on $R$-order with $R = \mathbb{C}[\mathbb{C}^2/G]$ in $M_n(K)$ where $K$ is the field of fractions of $R$ and $n = \#G$. Moreover, $\Lambda$ is Morita equivalent to the {\em preprojective algebra} which is the quotient of the path algebra of $Q$ by the ideal generated by the moment element
\[
\Pi_0 = \mathbb{C} Q/ (\sum [x,x^*] ).
\]
For more details we refer to the lecture notes by W. Crawley-Boevey \cite{CrawleyLectNotes}.
If we take $\theta$ to be a generic character such that $\theta.\alpha = 0$, then the projective map
\[
\wis{moduli}^{\theta}_{\alpha}~\Lambda \rOnto X = \mathbb{C}^2/G \]
is a minimal resolution of singularities. Note that the map is birational as $\alpha$ is the dimension vector of a simple representation of $A = \Pi_0$, see \cite{CrawleyLectNotes}.
For such a stability structure $\theta$ we have that $\wis{rep}^{\theta-semist}_{\alpha}~\Pi_0$ is a smooth variety.
For consider the {\em moment map}
\[
\wis{rep}_{\alpha}~Q \rTo^{\mu} \wis{lie}~\wis{GL}(\alpha) = M_{\alpha}(\mathbb{C}) = M_{e_1}(\mathbb{C}) \oplus \hdots \oplus M_{e_k}(\mathbb{C})
\]
defined by sending $V = (V_a,V_{a^*})$ to
\[
(\sum_{\xymatrix{\vtx{} \ar[r]^a&\vtx{1}}} V_aV_{a^*} - \sum_{\xymatrix{\vtx{1} \ar[r]^a & \vtx{}}} V_{a^*}V_a, \hdots, \sum_{\xymatrix{\vtx{} \ar[r]^a&\vtx{k}} }V_aV_{a^*} - \sum_{\xymatrix{\vtx{k} \ar[r]^a & \vtx{}} }V_{a^*}V_a).\]
The differential $d \mu$ can be verified to be surjective in any representation $V \in \wis{rep}_{\alpha}~Q$ which has stabilizer subgroup $\mathbb{C}^*(1_{e_1},\hdots,1_{e_k})$ (a so called {\em Schur representation}) see for example \cite[lemma 6.5]{CrawleyMoment}.
Further, any $\theta$-stable representation is Schurian. Moreover, for a generic stability structure $\theta \in \mathbb{Z}^k$ we have that every $\theta$-semistable $\alpha$-dimensional representation is $\theta$-stable as the $gcd(\alpha) = 1$.
Combining these facts it follows that $\mu^{-1}(0) = \wis{rep}_{\alpha}~\Pi_0$ is smooth in all $\theta$-stable representations.
\end{example}
\begin{example}
Consider a quotient singularity $X = \mathbb{C}^d/G$ with $G \subset SL_d(\mathbb{C})$ and $Q$ be the {\em McKay quiver} of $G$ acting on $V=\mathbb{C}^d$.
That is, the vertices $\{ v_1,\hdots,v_k \}$ of $Q$ are in one-to-one correspondence with the irreducible representations $\{ R_1,\hdots,R_k \}$ of $G$ such that $R_1 = \mathbb{C}_{triv}$ is the trivial representation. Decompose the tensorproduct in irreducibles
\[
V \otimes_{\mathbb{C}} R_j = R_1^{\oplus j_1} \oplus \hdots \oplus R_k^{\oplus j_k}, \]
then the number of arrows in $Q$ from $v_i$ to $v_j$
\[
\#~(v_i \rTo v_j ) = j_i \]
is the multiplicity of $R_i$ in $V \otimes R_j$. Let $\alpha = (e_1,\hdots,e_k)$ be the dimension vector where $e_i = \wis{dim}_{\mathbb{C}}~R_i$.
The relevance of this quiver-setting is that
\[
\wis{rep}_{\alpha}~Q = Hom_G(R,R \otimes V) \]
where $R$ is the {\em regular representation}, see for example \cite{CrawNotes}. Consider $Y \subset \wis{rep}_{\alpha}~Q$ the affine subvariety of all $\alpha$-dimensional representations of $Q$ for which the corresponding $G$-equivariant map $B \in Hom_G(R,V \otimes R)$ satisfies
\[
B \wedge B = 0 \in Hom_G(R,\wedge^2 V \otimes R). \]
$Y$ is called the {\em variety of commuting matrices} and its defining relations can be expressed as linear equations between paths in $Q$ evaluated in $\wis{rep}_{\alpha}~Q$, say $(l_1,\hdots,l_z)$. Then, the quiver-order
\[
\Lambda = \frac{\int_{\alpha} \mathbb{C} Q}{(l_1,\hdots,l_z)} \]
is an order with center $R = \mathbb{C}[\mathbb{C}^d/G]$. In fact, $\Lambda$ is just the skew group algebra
\[
A = \mathbb{C}[x_1,\hdots,x_d] \# G. \]
Assume that the first vertex in the McKay quiver corresponds to the trivial representation. Take a character $\theta \in \mathbb{Z}^k$ such that $t_1 < 0$ and all $t_i > 0$ for $i \geq 2$, for example take
\[
\theta = ( - \sum_{i=2}^k \wis{dim} R_i , 1, \hdots, 1 ). \]
Then, the corresponding moduli space is isomorphic to
\[
\wis{moduli}^{\theta}_{\alpha}~A \simeq G-\wis{Hilb}~\mathbb{C}^d \]
the {\em $G$-equivariant Hilbert scheme} which classifies all $\# G$-codimensional ideals $I \triangleleft \mathbb{C}[x_1,\hdots,x_d]$ where
\[
\frac{\mathbb{C}[x_1,\hdots,x_d]}{I} \simeq \mathbb{C} G \]
as $G$-modules, hence in particular $I$ must be stable under the action of $G$. It is well known that the natural map
\[
G-\wis{Hilb}~\mathbb{C}^d \rOnto X = \mathbb{C}^d/G \]
is a minimal resolution if $d=2$ and if $d=3$ it is often a crepant resolution, for example whenever $G$ is Abelian, see \cite{CrawNotes} for more details. In all cases where $G-\wis{Hilb}~\mathbb{C}^d$ is a desingularization we have again that the corresponding open subvariety $\wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ is smooth. For, in this case the quotient map
\[
\wis{rep}_{\alpha}^{\theta-semist}~\Lambda = \wis{rep}_{\alpha}^{\theta-stable}~\Lambda \rOnto \wis{moduli}^{\theta}_{\alpha}~\Lambda = G-\wis{Hilb}~\mathbb{C}^d \]
is a principal $\wis{PGL}(\alpha)$-fibration and as the base space is smooth by assumption so is the top space.
As we didn't find explicit non-Abelian examples for $\mathbb{C}^3$ in the literature, we include the following simplest example.
Let $A_{4}$ be the alternating group of $12$ elements acting on three dimensional space $\mathbb{C}^3$ via the matrices
\[
A_{4} = \langle~s=\begin{bmatrix}1&0&0\\0&-1&0\\0&0&-1\end{bmatrix}, t=\begin{bmatrix}-1&0&0\\0&-1&0\\0&0&1\end{bmatrix},
r=\begin{bmatrix}0&1&0\\0&0&1\\ 1& 0&0\end{bmatrix}~\longrightarrowngle \]
the corresponding quotient singularity $\mathbb{C}^3/A_{4}$ has coordinate ring
\[
\mathbb{C}[x,y,z]^{A_{4}}=\mathbb{C}[A(x,y,z),B(x,y,z),C(x,y,z),D(x,y,z)].
\]
with
\[
\begin{cases}
A(x,y,z)=xyz,\\
B(x,y,z)=x^2+y^2+z^2,\\
C(x,y,z)=x^2y^2+y^2z^2+z^2x^2,\\
D(x,y,z)=x^4y^2+y^4z^2+z^4x^2.
\end{cases}
\]
$A$, $B$, $C$ and $D$ obey the relation
\[
D^2+C^3-BCD+A^2(3D-6BC+B^3+9A^2)=0,
\]
whence the quotient singularity $\mathbb{C}^3/A_{4}$ is a hypersurface in $\mathbb{C}^4$.
The character table of the group $A_{4}$ is given by
\[
\begin{array}{c|ccccc}
A_{4} & 1 & \left[\begin{smallmatrix}*&0&0\\0&*&0\\0&0&* \end{smallmatrix}\right] & \left[\begin{smallmatrix}0&*&0\\0&0&*\\ *& 0&0 \end{smallmatrix}\right] & \left[\begin{smallmatrix}0&0&*\\ *&0&0\\ 0&*&0 \end{smallmatrix}\right] \\
& & & & \\
\hline
& & & & \\
V_0 & 1 & 1 & 1 & 1 \\
V_1 & 1 & 1 & \rho & \rho^2\\
V_2 & 1 & 1 & \rho^2 & \rho \\
V_3 & 3 & -1 & 0 & 0
\end{array}
\]
where $\rho$ is a primitive $3$-rd root of unity and therefore
the regular representation is $R=V_0\oplus V_1 \oplus V_2 \oplus V_3^{(1)} \oplus V_3^{(2)} \oplus V_3^{(3)}$. From the character table we deduce the isomorphisms of $A_{4}$-representations
\begin{align*}
&V_3\otimes V_0=V_3\otimes V_1=V_3\otimes V_2=V_3\\
&V_3\otimes V_3=V_0\oplus V_1 \oplus V_2 \oplus V_3 \oplus V_3
\end{align*}
whence the McKay quiver is of the following shape
$$
\vcenter{
\xymatrix@=2.8cm{
\vtx{1}\ar@/^/[dr]^{X=\left[\begin{smallmatrix}
X_1\\ X_2\\ X_3
\end{smallmatrix}\right]}
&
&\vtx{1}\ar@/_/[dl]_{\left[\begin{smallmatrix}
Z_1\\ Z_2\\ Z_3
\end{smallmatrix}\right]=Z}
\\
&\vtx{3}\ar@/^/[ul]^{x=\left[\begin{smallmatrix}
x_1 & x_2 & x_3
\end{smallmatrix}\right]}\ar@/^/[d]^{y=\left[\begin{smallmatrix}
y_1 & y_2 & y_3
\end{smallmatrix}\right]}\ar@/_/[ur]_{z=\left[\begin{smallmatrix}
z_1 & z_2 & z_3
\end{smallmatrix}\right]}\ar@(ld,l)^{u=\left[\begin{smallmatrix}
u_{11} & u_{12} & u_{13}\\
u_{21} & u_{22} & u_{23}\\
u_{31} & u_{32} & u_{33}
\end{smallmatrix}\right]}\ar@(r,rd)^{v=\left[\begin{smallmatrix}
v_{11} & v_{12} & v_{13}\\
v_{21} & v_{22} & v_{23}\\
v_{31} & v_{32} & v_{33}
\end{smallmatrix}\right]}
\\
&\vtx{1}\ar@/^/[u]^{Y=\left[\begin{smallmatrix}
Y_1\\ Y_2\\ Y_3
\end{smallmatrix}\right]} }
}
$$
Denoting
$
V_0=\mathbb{C} v_0, V_1=\mathbb{C} v_1, V_2=\mathbb{C} v_2
$
and
$
V_3^{(i)}=\mathbb{C} e_1^{(i)} + \mathbb{C} e_2^{(i)} + \mathbb{C} e_3^{(i)},
$
we construct a $G$-equivariant basis for
\begin{align*}
V \otimes R &= V_3 \oplus V_3 \oplus V_3 \oplus (V_0\oplus V_1\oplus V_2\oplus V_3\oplus V_3)\\ &\quad \oplus (V_0\oplus V_1\oplus V_2\oplus V_3\oplus V_3) \oplus (V_0\oplus V_1\oplus V_2\oplus V_3\oplus V_3)
\end{align*}
determined by
\begin{alignat*}{2}
V \otimes V_0&=\mathbb{C}(e_1 \otimes v_0) + \mathbb{C}(e_2 \otimes v_0) + \mathbb{C}(e_3 \otimes v_0)\\
V \otimes V_1&=\mathbb{C}(\rho^2 e_1 \otimes v_1) + \mathbb{C}(e_2 \otimes v_1) + \mathbb{C}(\rho e_3 \otimes v_1)\\
V \otimes V_2&=\mathbb{C}(\rho e_1 \otimes v_2) + \mathbb{C}(e_2 \otimes v_2) + \mathbb{C}(\rho^2 e_3 \otimes v_2)\\
V \otimes V_3^{(i)}&=\mathbb{C}(e_1 \otimes e_1^{(i)}) + \mathbb{C}(e_2\otimes e_2^{(i)}) + \mathbb{C}(e_3 \otimes e_3^{(i)})&\qquad &(V_0)\\
& \quad +\mathbb{C}(\rho^2 e_1 \otimes e_1^{(i)}) + \mathbb{C}(\rho e_2 \otimes e_2^{(i)}) + \mathbb{C}( e_3 \otimes e_3^{(i)})&\qquad &(V_1)\\
& \quad +\mathbb{C}(\rho e_1 \otimes e_1^{(i)}) + \mathbb{C}(\rho^2 e_2 \otimes e_2^{(i)}) + \mathbb{C}( e_3 \otimes e_3^{(i)})&\qquad &(V_2)\\
& \quad +\mathbb{C}(e_2 \otimes e_3) + \mathbb{C}(e_3 \otimes e_1) + \mathbb{C}(e_1 \otimes e_2)&\qquad &(V_3 \sim u)\\
& \quad +\mathbb{C}(e_1 \otimes v_0) + \mathbb{C}(e_2 \otimes v_0) + \mathbb{C}(e_3 \otimes v_0)&\qquad &(V_3 \sim v)
\end{alignat*}
With respect to this basis we obtain the following three $12 \times 12$ matrices
\[
P=\left[\begin{smallmatrix}
0 & 0& 0 & x_1 & 0 & 0 & x_2 & 0 & 0& x_3 & 0 & 0\\
0 & 0& 0 & \rho^2y_1 & 0 & 0 & \rho^2y_2 & 0 & 0& \rho^2y_3 & 0 & 0\\
0 & 0& 0 & \rho z_1 & 0 & 0 & \rho z_2 & 0 & 0& \rho z_3 & 0 & 0\\
X_1 & \rho^2Y_1& \rho Z_1 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 & 0& 0 &0 & 0& u_{11} &0 & 0& u_{12} &0 & 0& u_{13} \\
0& 0 &0 & 0& v_{11} &0 & 0& v_{12} &0 & 0& v_{13} &0 \\
X_2 & \rho^2Y_2& \rho Z_2 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 & 0& 0 &0 & 0& u_{21} &0 & 0& u_{22} &0 & 0& u_{23} \\
0& 0 &0 & 0& v21 &0 & 0& v_{22} &0 & 0& v_{23} &0 \\
X_3 & \rho^2Y_3& \rho Z_3 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 & 0& 0 &0 & 0& u31 &0 & 0& u_{32} &0 & 0& u_{33} \\
0& 0 &0 & 0& v_{31} &0 & 0& v_{32} &0 & 0& v_{33} &0
\end{smallmatrix}\right], \]
\[
Q=\left[\begin{smallmatrix}
0 &0 & 0& 0 & x_1 & 0 & 0 & x_2 & 0 & 0& x_3 & 0 \\
0 &0 & 0& 0 & \rho y_1 & 0 & 0 & \rho y_2 & 0 & 0& \rho y_3 & 0\\
0 &0 & 0& 0 & \rho^2 z_1 & 0 & 0 & \rho^2 z_2 & 0 & 0& \rho^2 z_3 & 0 \\
0& 0&0 &0 & 0& v_{11} &0 & 0& v_{12} &0 & 0& v_{13} \\
X_1 & Y_1& Z_1 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 &0 & 0& u_{11} &0 & 0& u_{12} &0 & 0& u_{13} & 0&0\\
0& 0&0 &0 & 0& v_{21} &0 & 0& v_{22} &0 & 0& v_{23} \\
X_2 & Y_2& Z_2 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 &0 & 0& u_{21} &0 & 0& u_{22} &0 & 0& u_{23} &0 &0\\
0& 0 &0&0 & 0& v_{31} &0 & 0& v_{32} &0 & 0& v_{33} \\
X_3 & Y_3& Z_3 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 &0 & 0& u_{31} &0 & 0& u_{32} &0 & 0& u_{33} &0 &0
\end{smallmatrix}\right],
\]
\[
R=\left[\begin{smallmatrix}
0&0&0 & 0& 0 & x_1 & 0 & 0 & x_2 & 0 & 0& x_3\\
0&0&0 & 0& 0 & y_1 & 0 & 0 & y_2 & 0 & 0& y_3\\
0&0&0 & 0& 0 & z_1 & 0 & 0 & z_2 & 0 & 0& z_3 \\
0& 0 &0 & 0& u_{11} &0 & 0& u_{12} &0 & 0& u_{13} &0\\
0 &0 & 0& v_{11} &0 & 0& v_{12} &0 & 0& v_{13} &0 & 0\\
X_1 & \rho Y_1& \rho^2 Z_1 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0& 0 &0 & 0& u_{21} &0 & 0& u_{22} &0 & 0& u_{23} & 0 \\
0 &0 & 0& v_{21} &0 & 0& v_{22} &0 & 0& v_{23} &0 & 0\\
X_2 & \rho Y_2& \rho^2 Z_2 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0& 0 &0 & 0& u_{31} &0 & 0& u_{32} &0 & 0& u_{33} &0\\
0 &0 & 0& v_{31} &0 & 0& v_{32} &0 & 0& v_{33} &0 & 0\\
X_3 & \rho Y_3& \rho^2 Z_3 &0 & 0& 0 &0 & 0&0 &0 & 0& 0
\end{smallmatrix}\right].
\]
Setting the three commutators equal to 0, we obtain the constraints:
\begin{gather*}
x(u-v)=0, \quad y(u-\rho^2 v)=0, \quad z(u-\rho v)=0,\\
(u-v)X=0, \quad (u-\rho^2 v)Y=0, \quad (u-\rho v)Z=0,
u^2=Xx+Yy+Zz,\\
v^2=Xx+\rho^2Yy+\rho Zz.
\end{gather*}
recovering the result obtained in \cite{BG98}.
\end{example}
\section{Partial desingularizations}
In the previous section we have seen that in many cases of current interest one associates to a singularity $\mathfrak{m}$ an $R$-order $\Lambda$ and a stability structure $\theta$ for the dimension vector $\alpha$ such that $\wis{trep}_n~\Lambda = \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda$, such that the Zariski open subset
\[
\wis{rep}_{\alpha}^{\theta-semist}~\Lambda \]
of $\theta$-semistable representations is a smooth variety. If this is the case we will call $(\Lambda,\alpha,\theta)$ a {\em good $\mathfrak{m}$-setting}. In this section we will prove that to a good $\mathfrak{m}$-setting one associates a non-commutative desingularization of $\mathfrak{m}$ and a partial commutative desingularization with excellent control on the remaining singularities. We will sketch the procedure in general and then give an explicit description in case $\Lambda$ is a {\em quiver-order}. That is, if
\[
\Lambda \simeq \int_{\alpha}~\frac{\mathbb{C} Q}{I} \]
for some dimension vector $\alpha$ such that $\wis{rep}_{\alpha}~\mathbb{C} Q/I$ contains (a Zariski open subset of) simple representations and where $\int_{\alpha} \mathbb{C} Q/I$ denotes the algebra of $\wis{GL}_n$-equivariant maps
\[
\wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\frac{\mathbb{C} Q}{I} \rTo M_n(\mathbb{C}) \]
if $n$ is the total dimension of $\alpha$.
If $(\Lambda,\alpha,\theta)$ is a good $\mathfrak{m}$-setting we have the diagram explained in the previous section
\[
\xymatrix@R=45pt@C=45pt{
\wis{rep}_{\alpha}^{\theta-semist}~\Lambda \ar@{->>}[d]_q \ar@{->>}[rd]^{q_c} \\
\wis{moduli}_{\alpha}^{\theta}~\Lambda \ar@{->>}[r]^{\pi} & X = \wis{spec}~R
}
\]
where $q$ is the algebraic quotient map and $\pi$ is a projective birational map. To $q$ we will assign a sheaf of smooth orders $\mathcal{A}$ on $\wis{moduli}^{\theta}_{\alpha}~\Lambda$. Let $\cup_D~X_D$ be a Zariski open covering by affine normal varieties of the moduli space $\wis{moduli}_{\alpha}^{\theta}~\Lambda$, then each $X_D$ determines a smooth order $\Lambda_D$ defined by taking the algebra of $\wis{GL}_n$-equivariant maps
\[
\wis{GL}_n \times^{\wis{GL}(\alpha)} q^{-1}(X_D) \rTo M_n(\mathbb{C}) \]
for which $q^{-1}(X_D) \simeq \wis{rep}_{\alpha}~\Lambda_D$. Remark that as $q^{-1}(X_D)$ is a smooth $\wis{GL}(\alpha)$-affine variety, we have that
\[
\wis{trep}_n~\Lambda_D = \wis{GL}_n \times^{\wis{GL}(\alpha)} q^{-1}(X_D) \]
is a smooth $\wis{GL}_n$-variety and therefore $\Lambda_D$ is indeed a smooth order. Taking as sections
\[
\Gamma(X_D,\mathcal{A}) = \Lambda_D,
\]
we obtain a sheaf of smooth orders on $\wis{moduli}_{\alpha}^{\theta}~\Lambda$. We will construct the orders $\Lambda_D$ explicitly if $\Lambda$ is a quiver-order $\int_{\alpha}~\mathbb{C} Q/I$.
Because $\wis{moduli}^{\theta}_{\alpha}~\Lambda = \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}$ we need control on the generators of all $\theta$-semi-invariants. Such a generating set was found by Aidan Schofield and Michel Van den Bergh in \cite{SchofVdB}: {\em determinantal semi-invariants}. In order to define them we have to introduce some notation first.
Reorder the vertices in $Q$ such that the entries of $\theta$ are separated in three strings
\[
\theta = (\underbrace{t_1,\hdots,t_i}_{> 0},\underbrace{t_{i+1},\hdots,t_j}_{=0},\underbrace{t_{j+1},\hdots,t_k}_{< 0}) \]
and let $\theta$ be such that $\theta.\alpha = 0$. Fix a nonzero weight $l \in \mathbb{N}$ and take arbitrary natural numbers $\{ l_{i+1},\hdots,l_j \}$.
Consider a rectangular matrix $L$ with
\begin{itemize}
\item{$lt_1+\hdots+lt_i+l_{i+1} + \hdots + l_j$ rows and}
\item{$l_{i+1} + \hdots + l_j - l t_{j+1} - \hdots - l t_k$ columns}
\end{itemize}
\[
L = \quad \begin{array}{cc||c|c|c|c|c|c}
& & \overbrace{}^{l_{i+1}} & \hdots & \overbrace{}^{l_j} & \overbrace{}^{-lt_{j+1}} & \hdots & \overbrace{}^{-lt_k} \\
\hline \hline
lt_1& \{ & L_{1,i+1} & & L_{1,j} & L_{1,j+1} & & L_{1,k} \\
\hline
& \vdots & & & & & \\
\hline
lt_i & \{ & L_{i,i+1} & & L_{i,j} & L_{i,j+1} & & L_{i,k} \\
\hline
l_{i+1} & \{ & L_{i+1,i+1} & & L_{i+1,j} & L_{i+1,j+1} & & L_{i+1,k} \\
\hline
& \vdots & & & & & \\
\hline
l_j & \{ & L_{j,i+1} & & L_{j,j} & L_{j,j+1} & & L_{j,k}
\end{array}
\]
in which each entry of $L_{r,c}$ is a linear combination of oriented paths in the quiver $Q$ with starting vertex $v_c$ and ending vertex $v_r$.
The relevance of this is that we can evaluate $L$ at any representation $V \in \wis{rep}_{\alpha}~\Lambda$ and obtain a {\em square matrix} $L(V)$ as $\theta.\alpha = 0$. More precisely, if $V_i$ is the vertex-space of $V$ at vertex $v_i$ (that is, $V_i$ has dimension $e_i$), then evaluating $L$ at $V$ gives a linear map
\[
\xymatrix@R=40pt@C=45pt{
V_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus V_j^{\oplus l_j} \oplus V_{j+1}^{\oplus -lt_{j+1}} \oplus \hdots \oplus V_k^{\oplus -lt_k} \ar[d]^{L(V)}\\
V_1^{\oplus lt_1} \oplus \hdots \oplus V_i^{\oplus lt_i} \oplus V_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus V_j^{\oplus l_j}
}
\]
and $L(V)$ is a square $N \times N$ matrix where
\[
l_{i+1} + \hdots + l_j - lt_{j+1} - \hdots - lt_k = N = lt_1 + \hdots + lt_i + l_{i+1} + \hdots + l_j. \]
So we can consider $D(V) = \wis{det} L(V)$ and verify that $D$ is a $\wis{GL}(\alpha)$-semi-invariant polynomial on $\wis{rep}_{\alpha}~\Lambda$ of weight $\chi_{\theta}^l$. The result of \cite{SchofVdB} asserts that these {\em determinantal semi-invariants} are algebra generators of the graded algebra
\[
\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}. \]
Observe that this result is to semi-invariants what the result of \cite{LBProcesi} is to invariants. In fact, one can deduce the latter from the first.
We have seen that a representation $V \in \wis{rep}_{\alpha}~\Lambda$ is $\theta$-semistable if and only if some semi-invariant of weight $\chi_{\theta}^l$ for some $l$ is non-zero on it. This proves
\begin{theorem} The Zariski open subset of $\theta$-semistable $\alpha$-dimensional $\Lambda$-representations can be covered by affine $\wis{GL}(\alpha)$-stable open subsets
\[
\wis{rep}^{\theta-semist}_{\alpha}~\Lambda = \bigcup_D \{ V~|~D(V) = \wis{det} L(V) \not= 0 \} \]
and hence the moduli space can also be covered by affine open subsets
\[
\wis{moduli}^{\theta}_{\alpha}~\Lambda = \bigcup_D~X_D \]
where
$
X_D = \{ [V] \in \wis{moduli}^{\theta}_{\alpha}~\Lambda~|~D(V)=\wis{det} L(V) \not= 0 \}
$.
\end{theorem}
Analogous to the rectangular matrix $L$ we define a rectangular matrix $N$ with
\begin{itemize}
\item{$lt_1+\hdots+lt_i+l_{i+1} + \hdots + l_j$ columns and}
\item{$l_{i+1} + \hdots + l_j - l t_{j+1} - \hdots - l t_k$ rows}
\end{itemize}
\[
N = \quad \begin{array}{cc||c|c|c|c|c|c}
& & \overbrace{}^{l t_1} & \hdots & \overbrace{}^{l t_i} & \overbrace{}^{l_{i+1}} & \hdots & \overbrace{}^{l_j} \\
\hline \hline
l_{i+1} & \{ & N_{i+1,1} & & N_{i+1,i} & N_{i+1,i+1} & & N_{i+1,j} \\
\hline
& \vdots & & & & & \\
\hline
l_j & \{ & N_{j,1} & & N_{j,i} & N_{j,i+1} & & N_{j,j} \\
\hline
-lt_{j+1} & \{ & N_{j+1,1} & & N_{j+1,i} & N_{j+1,i+1} & & N_{j+1,j} \\
\hline
& \vdots & & & & & \\
\hline
-l t_k & \{ & N_{k,1} & & N_{k,i} & N_{k,i+1} & & N_{k,j}
\end{array}
\]
filled with new variables and define an {\em extended quiver} $Q_D$ where we adjoin for each entry in $N_{r,c}$ an additional arrow from $v_c$ to $v_r$ and denote it with the corresponding variable from $N$.
Let $I_1$ (resp. $I_2$) be the set of relations in $\mathbb{C} Q_D$ determined from the matrix-equations
{\tiny
\[
N.L = \begin{bmatrix} \boxed{(v_{i+1})_{l_{i+1} } } & & & & & 0 \\
& \ddots & & & & \\
& & \boxed{(v_j)_{l_j}} & & & \\
& & & \boxed{(v_{j+1})_{-lt_{j+1}} }& & \\
& & & & \ddots & \\
0 & & & & & \boxed{(v_k)_{-lt_k}}
\end{bmatrix}
\]}
respectively
{\tiny
\[
L.N = \begin{bmatrix}
\boxed{(v_1)_{lt_1}} & & & & & 0 \\
& \ddots & & & & \\
& & \boxed{(v_i)_{lt_i} }& & & \\
& & & \boxed{(v_{i+1})_{l_{i+1}} }& & \\
& & & & \ddots & \\
0 & & & & & \boxed{(v_j)_{l_j}}
\end{bmatrix}
\]}
where $(v_i)_{n_j}$ is the square $n_j \times n_j$ matrix with $v_i$ on the diagonal and zeroes elsewhere.
Define a new quiver order
\[
\Lambda_D = \int_{\alpha}~\frac{\mathbb{C} Q_D}{(I,I_1,I_2)}
\]
then $\Lambda_D$ is a $\mathbb{C}[X_D]$-order in $\wis{alg@n}$. In fact, the construction of $\Lambda_D$ is nothing but a universal localization in the category $\wis{alg@}\alpha$, which is the subcategory of $\wis{alg@n}$ consisting of all $S = \underbrace{\mathbb{C} \times \hdots \times \mathbb{C}}_k$-algebras with trace map specified by $\alpha$.
That is, take $P_i = v_i \Lambda$ be the projective right ideal associated to vertex $v_i$, then $L$ determines a $\Lambda$-module morphism
\[
P = P_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus P_k^{\oplus -lt_k} \rTo^{L} P_1^{\oplus lt_1} \oplus \hdots \oplus P_j^{\oplus l_j} = Q. \]
The algebra map $\Lambda \rTo^{\phi} \Lambda_D$ is universal in $\wis{alg@}\alpha$ with respect to $L \otimes \phi$ being invertible, that is, if $\Lambda \rTo^{\psi} B$ is a morphism in $\wis{alg@}\alpha$ such that $L \otimes \psi$ is an isomorphism of right $B$-modules, then there is a unique map in $\wis{alg@}\alpha$ $\Lambda_D \rTo^u B$ such that $\psi = u \circ \phi$. We claim to have the following situation
\[
\xymatrix@R=40pt@C=45pt{
\wis{rep}^{\theta-semist}~\Lambda \ar@{->>}[d]_q & q^{-1}(X_D) \simeq \wis{rep}_{\alpha}~\Lambda_D \ar@{_(->}[l] \ar@{->>}[d]\\
\wis{moduli}^{\theta}_{\alpha}~\Lambda & X_D \ar@{_(->}[l]
}
\]
which follows from the next lemma.
\begin{lemma} The following statements are equivalent
\begin{enumerate}
\item{$V \in \wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ lies in $q^{-1}(X_D)$, and}
\item{There is a unique extension $\tilde{V}$ of $V$ such that $\tilde{V} \in \wis{rep}_{\alpha}~\Lambda_D$.}
\end{enumerate}
\end{lemma}
\begin{proof}
$1 \mathbb{R}ightarrow 2$ : Because $L(V)$ is invertible we can take $N(V)$ to be its inverse and decompose it into blocks corresponding to the new arrows in $Q_D$. This then defines the unique extension $\tilde{V} \in \wis{rep}_{\alpha}~Q_D$ of $V$. As $\tilde{V}$ satisfies $I$ (because $V$ does) and $I_1$ and $I_2$ (because $N(V) = L(V)^{-1}$) we have that $\tilde{V} \in \wis{rep}_{\alpha}~\Lambda_D$.
$2 \mathbb{R}ightarrow 1$ : Restrict $\tilde{V}$ to the arrows of $Q$ to get a $V \in \wis{rep}_{\alpha}~Q$. As $\tilde{V}$ (and hence $V$) satisfies $I$, $V \in \wis{rep}_{\alpha}~\Lambda$. Moreover, $V$ is such that $L(V)$ is invertible (this follows because $\tilde{V}$ satisfies $I_1$ and $I_2$). Hence, $D(V) \not= 0$ and because $D$ is a $\theta$-semi-invariant it follows that $V$ is an $\alpha$-dimensional $\theta$-semistable representation of $\Lambda$. An alternative method to see this is as follows. Assume that $V$ is {\em not} $\theta$-semistable and let $V' \subset V$ be a subrepresentation such that $\theta.\wis{dim} V' < 0$. Consider the restriction of the linear map $L(V)$ to the subrepresentation $V'$ and look at the commuting diagram
\[
\xymatrix@R=40pt@C=45pt{
V_{i+1}^{'\oplus l_{i+1}} \oplus \hdots \oplus V_k^{'\oplus -lt_k} \ar[r]^{L(V)|V'} \ar@{^(->}[d] & V_1^{'\oplus lt_1} \oplus \hdots \oplus V_j^{'\oplus l_j} \ar@{^(->}[d] \\
V_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus V_k^{\oplus -lt_k} \ar[r]^{L(V)} & V_1^{\oplus lt_1} \oplus \hdots \oplus V_j^{\oplus l_j}
}
\]
As $\theta. \wis{dim} V' < 0$ the top-map must have a kernel which is clearly absurd as we know that $L(V)$ is invertible.
\end{proof}
The universal property of the universal localizations $\Lambda_D$ allows us to glue these orders together into a coherent sheaf on $\wis{moduli}_{\alpha}^{\theta}~\Lambda$. Let $\Lambda_{D_1}$ (resp. $\Lambda_{D_2}$) be the order constructed from a rectangular matrix $L_1$ (resp. $L_2$), then we can construct the direct sum map $L = L_1 \oplus L_2$ for which the corresponding semi-invariant $D=D_1D_2$. As $\Lambda \rTo \Lambda_D$ makes the projective module morphisms associated to $L_1$ and $L_2$ into an isomorphism we have uniquely determined maps in $\wis{alg@}\alpha$
\[
\xymatrix{
& \Lambda_D \\
\Lambda_{D_1} \ar[ur]^{i_1} & & \Lambda_{D_2} \ar[ul]_{i_2}
}
\qquad \text{whence}
\qquad
\xymatrix@C=10pt{
& \wis{rep}_{\alpha}~\Lambda_D \ar[dl]_{i_1^*} \ar[dr]^{i_2^*}\\
\wis{rep}_{\alpha}~\Lambda_{D_1} & & \wis{rep}_{\alpha}~\Lambda_{D_2}
}
\]
Because $\wis{rep}_{\alpha}~\Lambda_D = q^{-1}(X_D)$ (and similarly for $D_i$) we have that $i_j^*$ are embeddings as are the $i_j$. This way we can glue the sections $\Gamma(X_{D_1},{\mathcal A}) = \Lambda_{D_1}$ with $\Gamma(X_{D_2},{\mathcal A}) = \Lambda_{D_2}$ over their intersection $X_D = X_{D_1} \cap X_{D_2}$ via the inclusions $i_j$. Hence we get a coherent sheaf of non-commutative algebras ${\mathcal A}$ over $\wis{moduli}^{\theta}_{\alpha}~\Lambda$. Further, by localizing the orders $\Lambda_{D_j}$ at the central element $D$ we have that the algebra morphisms $i_j$ are central extensions, that is satisfying
\[
\Lambda_D = \Lambda_{D_j} Z(\Lambda_D) \]
which implies that we have morphisms between the non-commutative structure sheaves
\[
(\wis{spec}~\Lambda_{D_j},{\mathcal O}^{nc}_{\Lambda_{D_j}}) \rTo (\wis{spec}~\Lambda_D,{\mathcal O}^{nc}_{\Lambda_D}) \]
which allow us to define a non-commutative variety $\wis{spec}~{\mathcal A}$ by gluing the non-commutative structure sheaves of the various $\Lambda_{D_j}$ together. Observe that the central scheme of this non-commutative variety is $\wis{moduli}_{\alpha}^{\theta}~\Lambda$ with its structure sheaf. This concludes the proof of the following result.
\begin{theorem} Let $(\Lambda,\alpha,\theta)$ be a good $\mathfrak{m}$-setting. Then, there is a sheaf of smooth orders ${\mathcal A}$ over the moduli space $\wis{moduli}_{\alpha}^{\theta}~\Lambda$ such that the diagram below is commutative
\[
\xymatrix@R=40pt@C=45pt{
\wis{spec}~{\mathcal A} \ar[d]_c \ar[rd]^{\phi} \\
\wis{moduli}^{\theta}_{\alpha}~\Lambda \ar@{->>}[r]^{\pi} & X = \wis{spec}~R
}
\]
Here, $\wis{spec}~{\mathcal A}$ is a non-commutative variety obtained by gluing affine non-commutative structure sheaves $(\wis{spec}~\Lambda_D,{\mathcal O}^{nc}_{\Lambda_D})$ together and where $c$ is the map which intersects locally a prime ideal of $\Lambda_D$ with its center. Because ${\mathcal A}$ is a sheaf of smooth orders in $\wis{alg@n}$, $\phi$ can be viewed as a {\em non-commutative desingularization} of $X$.
Moreover, if $\theta$ is such that all $\theta$-semistable $\alpha$-dimensional $\Lambda$-representations are actually $\theta$-stable, then ${\mathcal A}$ is a sheaf of Azumaya algebras over $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ and in this case $\pi$ is a commutative desingularization of $X$. If, in addition, also $\alpha$ is an indivisible dimension vector (that is, $gcd(\alpha) = 1$) then
${\mathcal A} \simeq End~\mathcal{P}$ for some vectorbundle $\mathcal{P}$ of rank $n$ over $\wis{moduli}^{\theta}_{\alpha}~\Lambda$.
\end{theorem}
In general, there may remain singularities in $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ but then have been fully classified in dimensions $\leq 6$ and reduction steps exists which prove that in each dimension there is a finite list of such possible remaining singularities. We will recall these steps briefly, the starting point being the local marked quiver setting $(Q^{\dagger},\alpha)$ associated to a point $\mathfrak{n} \in \wis{moduli}^{\theta}_{\alpha}~\Lambda$. Remark that $\mathfrak{n} \in X_D$ for some $D$ and as $\Lambda_D$ is a smooth order in $\wis{alg@n}$ the defect $\wis{def}_{\mathfrak{n}}~\Lambda_D = 0$ so the local marked quiver setting determines the \'etale local structure of $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ near $\mathfrak{n}$.
The reduction steps below were discovered by R. Bocklandt in his Ph.D. thesis \cite{BocklandtThesis} (see also \cite{Bocklandtpaper}) in which he classifies quiver settings having a regular ring of invariants. These steps were slightly extended in \cite{RBLBVdW} in order to classify central singularities of smooth orders. All reductions are made locally around a vertex in the marked quiver. There are three types of allowed moves
\par \vskip 3mm
\noindent
{\bf 1.Vertex removal}
Assume we have a marked quiver setting $(Q^{\dagger},\alpha)$ and a vertex $v$ such that the local structure of $(Q^{\dagger},\alpha)$ near $v$ is indicated by the picture on the left below, that is, inside the vertices we have written the components of the dimension vector and the subscripts of an arrow indicate how many such arrows there are in $Q^{\dagger}$ between the indicated vertices.
Define the new marked quiver setting $(Q^{\dagger}_R,\alpha_R)$ obtained by the operation $R^v_V$ which removes the vertex $v$ and composes all arrows through $v$, the dimensions of the other vertices are unchanged :
\[
\left[ ~\vcenter{
\xymatrix@=1cm{
\vtx{u_1}&\cdots &\vtx{u_k}\\
&\vtx{\alpha_v}\ar[ul]^{b_1}\ar[ur]_{b_k}&\\
\vtx{i_1}\ar[ur]^{a_1}&\cdots &\vtx{i_l}\ar[ul]_{a_l}}}
~\right] \quad
\rTo^{R^v_V} \quad
\left[~\vcenter{
\xymatrix@=1cm{
\vtx{u_1}&\cdots &\vtx{u_k}\\
&&\\
\vtx{i_1}\ar[uu]^{c_{11}}\ar[uurr]_<<{c_{1k}}&\cdots &\vtx{i_l}\ar[uu]|{c_{lk}}\ar[uull]^<<{c_{l1}}}}
~\right].
\]
where $c_{ij} = a_ib_j$ (observe that some of the incoming and outgoing vertices may be the
same so that one obtains loops in the corresponding vertex). One is allowed to make this reduction step provided either of the following conditions is met
\[
\chi_Q(\alpha,\epsilon_v) \geq 0 \quad \Leftrightarrow \quad \alpha_v \geq \sum_{j=1}^l a_j i_j \]
\[
\chi_Q(\epsilon_v,\alpha) \geq 0\quad \Leftrightarrow \quad \alpha_v \geq \sum_{j=1}^k b_j u_j \]
(observe that if we started off from a marked quiver setting $(Q^{\dagger},\alpha)$ coming from an order, then these inequalities must actually be equalities).
\par \vskip 3mm
\noindent
{\bf 2. loop removal}
If $v$ is a vertex with vertex-dimension $\alpha_v = 1$ and having $k \geq 1$ loops, then let $(Q^{\dagger}_R,\alpha_R)$ be the marked quiver setting obtained by the loop removal operation $R^v_l$
\[
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{1}\ar@{..}[r]\ar@{..}[l]\ar@(lu,ru)@{=>}^k&}}
~\right]\quad \rTo^{R^v_l} \quad
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{1}\ar@{..}[r]\ar@{..}[l]\ar@(lu,ru)@{=>}^{k-1}&}}
~\right],\]
removing one loop in $v$ and keeping the same dimension vector.
\par \vskip 3mm
\noindent
{\bf 3. Loop removal}
If the local situation in $v$ is such that there is exactly one (marked) loop in $v$, the dimension vector in $v$ is $k \geq 2$ and there is exactly one arrow leaving $v$ and this to a vertex with dimension vector $1$, then one is allowed to make the reduction $R^v_L$ indicated below
\[
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar[dl]\ar@(lu,ru)|{\bullet}&&\\
\vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}}
~\right]\quad \rTo^{R^v_L} \quad
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar@2[dl]_{k}&&\\
\vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}}
~\right],
\]
\[
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar[dl]\ar@(lu,ru)&&\\
\vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}}
~\right]\quad \rTo^{R^v_L} \quad
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar@2[dl]_k&&\\
\vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}}
~\right].
\]
Similarly, if there is one (marked) loop in $v$ and $\alpha_v = k \geq 2$ and there is only one arrow arriving at $v$ coming from a vertex of dimension vector $1$, then one is allowed to make the reduction $R^v_L$
\[
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar[d]\ar[drr]\ar@(lu,ru)|{\bullet}&&\\
\vtx{1}\ar[ur]&\vtx{u_1}&\cdots &\vtx{u_m}}}
~\right]\quad \rTo^{R^v_L} \quad
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar[d]\ar[drr]&&\\
\vtx{1}\ar@2[ur]^k&\vtx{u_1}&\cdots &\vtx{u_m}}}
~\right], \]
\[
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar[d]\ar[drr]\ar@(lu,ru)&&\\
\vtx{1}\ar[ur]&\vtx{u_1}&\cdots &\vtx{u_m}}}
~\right]\quad \rTo^{R^v_L} \quad
\left[~\vcenter{
\xymatrix@=1cm{
&\vtx{k}\ar[d]\ar[drr]&&\\
\vtx{1}\ar@2[ur]^k&\vtx{u_1}&\cdots &\vtx{u_m}}}
~\right].
\]
The relevance of these reduction rules is that if
\[
(Q^{\dagger}_1,\alpha_1) \rightsquigarrow (Q^{\dagger}_2,\alpha_2) \]
is a sequence of legal reductions, then
\[
\mathbb{C}[\wis{rep}_{\alpha_1}~Q^{\dagger}_1]^{\wis{GL}(\alpha_1)} \simeq \mathbb{C}[\wis{rep}_{\alpha_2}~Q^{\dagger}_2]^{\wis{GL}(\alpha_2)}[y_1,\hdots,y_z] \]
where $z$ is the sum of all loops removed in $R^v_l$ reductions plus the sum of $\alpha_v$ for each reduction step $R^v_L$ involving a genuine loop and the sum of $\alpha_v - 1$ for each reduction step $R^v_L$ involving a marked loop. That is, marked quiver settings which belong to the same reduction tree have smooth equivalent invariant rings.
\begin{theorem} Let $(Q^{\dagger},\alpha)$ be a marked quiver setting, then there is a unique reduced setting (that is, having no further admissible reduction steps)
$(Q^{\dagger}_0,\alpha_0)$ for which there exists a reduction procedure
\[
(Q^{\dagger},\alpha) \rightsquigarrow (Q^{\dagger}_0,\alpha_0).. \]
We will denote this unique setting by $Z(Q^{\dagger},\alpha)$.
\end{theorem}
The following result is a slight adaptation of Bocklandt's main result \cite{Bocklandtpaper}.
\begin{theorem} Let $(Q^{\dagger}_{\mathfrak{n}},\alpha_{\mathfrak{n}})$ be the local marked quiver setting of $\mathfrak{n} \in \wis{moduli}^{\theta}_{\alpha}~\Lambda$. Then, $\mathfrak{n}$ is a smooth point if and only if the unique associated reduced setting
\[
Z(Q^{\dagger}_{\mathfrak{n}},\alpha_{\mathfrak{n}}) \in \{~\xymatrix{\vtx{k}} \qquad \xymatrix{\vtx{k} \ar@(ul,ur)} \qquad \xymatrix{\vtx{k} \ar@(ul,ur)|{\bullet}} \quad~\qquad \xymatrix{\vtx{2} \ar@(dl,ul) \ar@(dr,ur)} \qquad~\quad~\quad~\qquad~\qquad~\qquad~\qquad~\qquad\xymatrix{\vtx{2} \ar@(dl,ul) \ar@(dr,ur)|{\bullet}}~\quad \xymatrix{\vtx{2} \ar@(dl,ul)|{\bullet} \ar@(dr,ur)|{\bullet}} \qquad~\}.
\]
The Azumaya points are such that $Z(Q^{\dagger}_{\mathfrak{n}},\alpha_{\mathfrak{n}}) = \xymatrix{\vtx{1}}$ hence the singular locus of $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ is contained in the ramification locus of $\mathcal{A}$ but may be strictly smaller.
\end{theorem}
To classify the central singularities of smooth orders we may reduce to zero-settings $(Q^{\dagger},\alpha) = Z(Q^{\dagger},\alpha)$. For such a setting we have for all vertices $v_i$ the inequalities
\[
\chi_Q(\alpha,\delta_i) < 0 \qquad \text{and} \qquad \chi_Q(\delta_i,\alpha) < 0 \]
and the dimension of the central variety can be computed from the Euler-form $\chi_Q$. This gives us an estimate of $d = \wis{dim}~X = \wis{dim}~\wis{moduli}^{\theta}_{\alpha}~\Lambda$ which is very efficient to classify the singularities in low dimensions.
\begin{theorem} \label{counting} Let $(Q^{\dagger},\alpha) = Z(Q^{\dagger},\alpha)$ be a reduced setting on $k \geq 2$ vertices. Then,
\[
\wis{dim}~X \geq 1 + \sum_{\xymatrix@=1cm{ \vtx{a} }}^{a \geq 1} a +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet} }}^{a > 1}(2a-1) +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)}}^{a > 1}(2a) + \sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet}\ar@(ur,dr)|{\bullet}}}^{a > 1} (a^2+a-2) + \]
\[
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet}\ar@(ur,dr)}}^{a > 1} (a^2+a-1) +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)\ar@(ur,dr)}}^{a > 1} (a^2+a) + \hdots +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet}_{k}\ar@(ur,dr)^{l}}}^{a > 1} ((k+l-1)a^2+a-k) + \hdots
\]
In this sum the contribution of a vertex $v$ with $\alpha_v = a$ is determined by the number of
(marked) loops in $v$. By the reduction steps (marked) loops only occur at vertices where
$\alpha_v > 1$.
\end{theorem}
For example, this shows that there are no central singularities in dimension $d=2$ and that for $d=3$ the only reduced singular setting is
\[
Z(Q^{\dagger},\alpha) = \xymatrix{\vtx{1} \ar@/^2ex/[rr]_a \ar@/^4ex/[rr]^b & & \vtx{1} \ar@/^2ex/[ll]_c \ar@/^4ex/[ll]^d}. \]
The ring of polynomial invariants $R^{\alpha}_{Q^{\dagger}}$ is generated by traces along oriented cycles in $Q^{\dagger}$ so is generated by the invariants
\[
x = ac, \quad y = ad, \quad u = bc \quad \text{and} \quad v = bd~\qquad \text{whence}~\qquad~R^{\alpha}_{Q^{\dagger}} \simeq \frac{\mathbb{C}[x,y,u,v]}{(xy-uv)}. \]
Hence, the only \'etale type of central singularity in dimension three is the {\em conifold singularity}.
\begin{example}[dimension $d=4$]
If $(Q^{\dagger},\alpha)$ is a reduced setting for dimension $4$ then $Q^{\dagger}$ can have at most three vertices. If there is just one, its dimension must be $1$ (smooth setting) or $2$ in which case the only new type is
\[
Z(Q^{\dagger},\alpha) = \qquad \xymatrix{\vtx{2} \ar@(ul,dl) \ar@(ur,dr)|{\bullet}} \]
which is again a smooth setting.
If there are two vertices, both must have dimension $1$ and have at least two incoming and outgoing arrows as in the previous example. The only new type that occurs is
\[
Z(Q^{\dagger},\alpha) = \xymatrix{ \vtx{1} \ar@/^/[rr] \ar@/^3ex/[rr] & & \vtx{1} \ar@/^/[ll] \ar@/^2ex/[ll] \ar@/^3ex/[ll]} \]
\par \vskip 2mm
\noindent
for which one calculates as before the ring of invariants to be
\[
R^{\alpha}_{Q^{\dagger}} = \frac{\mathbb{C}[a,b,c,d,e,f]}{(ae-bd,af-cd,bf-ce)}. \]
If there are three vertices all must have dimension $1$ and each vertex must have at least two incoming and two outgoing arrows. There are just two such possibilities in dimension $4$
\[
Z(Q^{\dagger},\alpha) \in \left\{~\vcenter{\xymatrix{\vtx{1}\ar@/^/[rr]\ar@/^/[rd]&&\vtx{1}\ar@/^/[ll]\ar@/^/[ld]\\
&\vtx{1}\ar@/^/[ru]\ar@/^/[lu]&}} \qquad \vcenter{
\xymatrix{\vtx{1}\ar@2@/^/[rr]&&\vtx{1}\ar@2@/^/[ld]\\
&\vtx{1}\ar@2@/^/[lu]}}~ \right\}.
\]
The corresponding rings of polynomial invariants are
\[
R^{\alpha}_{Q^{\dagger}} = \frac{\mathbb{C}[x_1,x_2,x_3,x_4,x_5]}{(x_4x_5-x_1x_2x_3)} \qquad \text{resp.} \qquad
R^{\alpha}_{Q^{\dagger}} = \frac{\mathbb{C}[x_1,x_2,x_3,x_4,y_1,y_2,y_3,y_4]}{R_2} \]
where $R_2$ is the ideal generated by all $2 \times 2$ minors of the matrix
\[
\begin{bmatrix}
x_1 & x_2 & x_3 & x_4 \\
y_1 & y_2 & y_3 & y_4 \end{bmatrix}
\]
\end{example}
In \cite{RBLBVdW} it was proved that there are exactly ten types of smooth order central singularities in dimension $d=5$ and $53$ in dimension $d=6$.
\section{The conifold algebra}
Quiver-diagrams play an important role in stringtheory as they encode intersection information of so called {\em wrapped $D$-branes} (higher dimensional strings) in Calabi-Yau manifolds. One of the earliest models, studied by I. R. Klebanov and E. Witten \cite{KlebanovWitten}, was based on the conifold singularity (see previous section). A {\em $D3$-brane} is a three-dimensional (over the real numbers $\mathbb{R}$) submanifold of a Calabi-Yau manifold and as this is a six-dimensional (again over the real numbers) manifold it follows that two $D3$-branes in sufficiently general position intersect each other in a finite number of points. If one wraps two sufficiently general $D3$-branes around a conifold singularity, their intersection data will be encoded in the quiver-diagram
\[
\xymatrix{\vtx{} \ar@/^1ex/[rr]|{x_1} \ar@/^3ex/[rr]|{x_2} & & \vtx{} \ar@/^1ex/[ll]|{y_1} \ar@/^3ex/[ll]|{y_2}}.
\]
Without going into details (for more information see \cite{Berenstein}) one can associate to such a quiver-diagram a non-commutative algebra describing the vacua with respect to a certain {\em super-potential} which is a suitable linear combination of oriented cycles in the quiver-diagram. In the case of two $D3$-branes wrapped around a conifold singularity one obtains :
\begin{definition} The {\em conifold algebra} $\Lambda_c$ is the non-commutative affine $\mathbb{C}$-algebra generated by three non-commuting variables $X,Y$ and $Z$ and satisfying the following relations
\[
\begin{cases}
XZ &= - ZX \\
YZ &= - ZY \\
X^2Y &= YX^2 \\
Y^2X &= XY^2 \\
Z^2 &= 1
\end{cases}
\]
That is, $\Lambda$ has a presentation
\[
\Lambda_c = \frac{\mathbb{C} \langle X,Y,Z \longrightarrowngle}{(Z^2-1,XZ+ZX,YZ+ZY,[X^2,Y],[Y^2,X])} \]
where $[A,B]=AB-BA$ denotes the commutator.
One sometimes encounters another presentation of $\Lambda_c$ as
\[
\frac{\mathbb{C} \langle X,Y,Z \longrightarrowngle}{(Z^2-1,XZ+ZX,YZ+ZY,[Z[X,Y],X],[Z[X,Y],Y])} \]
but as $Z$ is a unit, it is easily seen that both presentations give isomorphic $\mathbb{C}$-algebras.
\end{definition}
\begin{proposition} In the conifold algebra $\Lambda_c$ the elements
\[
x = X^2, \qquad y = Y^2 \qquad \text{and} \qquad z = \frac{1}{2}(XY+YX) \]
are algebraically independent central elements and $\Lambda_c$ is a free module over the central subalgebra $C = \mathbb{C}[x,y,z]$ with basis
\[
\Lambda_c = C.1 \oplus C.X \oplus C.Y \oplus C.Z \oplus C.XY \oplus C.XZ \oplus C.YZ \oplus C.XYZ \]
In fact, the conifold algebra is a skew group algebra
\[
\Lambda_c \simeq \mathbb{C}[z,X][Y,\sigma,\delta] \# \mathbb{Z}/2\mathbb{Z} \]
for some automorphism $\sigma$ and $\sigma$-derivation $\delta$. In particular, $\Lambda_c$ is a regular algebra of dimension three.
\end{proposition}
\begin{proof} Consider the subalgebra $S$ of $\Lambda_c$ generated by $X$ and $Y$, that is
\[
S = \frac{\mathbb{C} \langle X,Y \longrightarrowngle}{([X^2,Y],[Y^2,X])} \]
Then clearly $x$ and $y$ are central elements of $S$ as is $z = \frac{1}{2}(XY+YX)$ because
\[
(XY+YX)X = XYX+YX^2=YXY+X^2Y=X(YX+XY) \]
Now, consider the \"Ore extension
\[
S' = \mathbb{C}[z,X][Y,\sigma,\delta] \quad \text{with} \quad \sigma(z)=z,\sigma(X)=-X\quad \text{and} \quad \delta(z)=0, \delta(X)=2z \]
This means that $z$ is a central element of $S'$ and that $YX=\sigma(X)Y+\delta(X)=-XY+2z$ whence
the map
\[
S \rTo S' \qquad \text{defined by} \qquad X \mapsto X \quad \text{and} \quad Y \mapsto Y \]
is an isomorphism. By standard results, the {\em center} of $S'$ is equal to
\[
Z(S') = \mathbb{C}[x,y,z] \]
whence the three elements are algebraically independent. Consider the automorphism defined by
$\phi(X) = -X$ and $\phi(Y)=-Y$ on $S$, then the conifold algebra can be written as the {\em skew group ring}
\[
\Lambda_c \simeq S \# \mathbb{Z}/2\mathbb{Z} \]
As $Z(S) = \mathbb{C}[x,y,z]$ is fixed under $\phi$ the elements $x = x \# 1$, $y = y \# 1$ and $z = z \# 1$ are central in $\Lambda_c$ and as $S'$ is free over $Z(S')$ with basis
\[
S' = Z(S').1 \oplus Z(S').X \oplus Z(S').Y \oplus Z(S').XY \]
the result on freeness of $\Lambda_c$ over $\mathbb{C}[x,y,z]$ follows.
\end{proof}
If $C$ is a commutative $\mathbb{C}$-algebra and if $M_q$ is a {\em symmetric} $m \times m$ matrix with entries in $C$, then we have a {\em bilinear form} on the free $C$-module $V = C \oplus \hdots \oplus C$ of rank $m$ defined by
\[
B_q(v,w) = \begin{bmatrix} v_1 & v_2 & \hdots & v_m \end{bmatrix}.\begin{bmatrix} b_{11} & b_{12} & \hdots & b_{1n} \\
b_{12} & b_{22} & \hdots & b_{2n} \\
\vdots & \vdots & & \vdots \\
b_{1n} & b_{2n} & \hdots & b_{nn} \end{bmatrix}.\begin{bmatrix} w_1 \\ w_2 \\ \vdots \\ w_m \end{bmatrix}.
\]
The associated {\em Clifford algebra} $Cl_q(V)$ is then the quotient of the {\em tensor algebra} $T_C(V) = C \langle v_1,\hdots,v_m \longrightarrowngle$ where $\{ v_1,\hdots,v_m \}$ is a basis of the free $C$-module $V$ and the defining relations are
\[
Cl_q(V) = \frac{T_C(V)}{(v \otimes w + w \otimes v - 2B_q(v,w)~:~v,w \in V)} .\]
As an example, the algebra $S \simeq S'$ constructed in the above proof is the Clifford algebra of the binary quadratic form over $C = \mathbb{C}[x,y,z]$
\[
B_q = \begin{bmatrix} x & z \\ z & y \end{bmatrix} \qquad \text{on} \qquad V = C.X \oplus C.Y \]
as $B_q(X,X)=x, B_q(Y,Y)=y$ and $B_q(X,Y) = z$. As the entries of the symmetric variable are independent variables, we call this algebra the {\em generic binary Clifford algebra}, see \cite{LB2x2} for more details and the structure of higher generic Clifford algebras.
\begin{lemma} The conifold algebra $\Lambda_c$ is the {\em Clifford algebra} of a non-degenerate ternary quadratic form over $\mathbb{C}[x,y,z]$.
\end{lemma}
\begin{proof}
Consider the free $C=\mathbb{C}[x,y,z]$-module of rank three $V = C.X \oplus C.Y \oplus C.Z$ and the symmetric $3 \times 3$ matrix
\[
B_q = \begin{bmatrix} x & z & 0 \\ z & y & 0 \\ 0 & 0 & 1 \end{bmatrix}
\]
then it follows that $\Lambda_c \simeq Cl_q(V)$ as $B_q(X,Z)=0, B_q(Y,Z)=0$, $B_q(Z,Z)=0$ and the remaining inproducts are those of $S \simeq S'$ above.
\end{proof}
Whereas $C=\mathbb{C}[x,y,z]$ is a central subalgebra of $\Lambda_c$, the center itself is strictly larger. Take $D=XYZ-YXZ$ and verify that
\begin{eqnarray*}
(XYZ-YXZ)X =& -X(2z-XY)Z+xYZ \\
=& -2zXZ+2xYZ \\
=& xYZ - (2zXZ-YX^2Z) \\
=& X(XYZ-YXZ)
\end{eqnarray*}
and a similar calculation shows that $DY=YD$ and $DZ=ZD$. Moreover, $D \notin \mathbb{C}[x,y,z]$. Indeed, in the description $\Lambda_c \simeq S \# \mathbb{Z}/2\mathbb{Z}$ we have that
\[
\mathbb{C}[x,y,z] \subset S \# 1 \qquad \text{whereas} \qquad D = XYZ-YXZ = (XY-YX) \# Z \in S \# Z. \]
Moreover, we have that $D^2 \in \mathbb{C}[x,y,z]$ because
\[
D^2 = (XYZ-YXZ)^2 = 2z(XY+YX) - 4xy = 4(z^2-xy) \in \mathbb{C}[x,y,z]. \]
\begin{lemma} The center $R_c$ of the conifold algebra $\Lambda_c$ is isomorphic to the coordinate ring of the conifold singularity
\[
R_c \simeq \frac{\mathbb{C}[a,b,c,d]}{(ab-cd)}. \]
\end{lemma}
\begin{proof}
Let $Z$ be the central subalgebra generated by $x,y,z$ and $D$, then a representation of $Z$ is
\[
Z = \frac{\mathbb{C}[x,y,z,D]}{(D^2-4(z^2-xy))} \simeq \frac{\mathbb{C}[a,b,c,d]}{(ab-cd)} \]
where the second isomorphism comes from the following change of coordinates
\[
a = D + 2z,\quad b = D-2z,\quad c=2x \quad \text{and} \quad d = 2y .\]
As a consequence $Z$ is the coordinate ring of the conifold singularity and is in particular integrally closed. As $\Lambda_c$ is a finite module over $Z$ it follows that if $Z \not= R_c$ then the field of fractions $L$ of $R_c$ would be a proper extension of the field of fractions $K$ of $Z$. This can be contradicted using classical results on Clifford algebras over fields. To begin, note that as the ternary form
\[
B_q = \begin{bmatrix} x & z & 0 \\ z & y & 0 \\ 0 & 0 & 1 \end{bmatrix} \]
has square-free determinant $xy-z^2 \notin \mathbb{C}(x,y,z)^{*2}$, the Clifford algebra over the rational field $\mathbb{C}(x,y,z)$
\[
\Lambda_c \otimes_{\mathbb{C}[x,y,z]} \mathbb{C}(x,y,z) \]
is a central simple algebra of dimension $4$ over its center $K'$ which is a quadratic field extension of $\mathbb{C}(x,y,z)$ determined by adjoining the square root of the determinant. As $[K : \mathbb{C}(x,y,z)] = 2$ it follows that $K=K'$ and hence also that $K=L$ whence $Z=R_c$.
\end{proof}
Let us relate the non-commutative affine variety $\wis{spec}~\Lambda_c$ with that of the central subalgebra $\wis{spec}~\mathbb{C}[x,y,z] = \mathbb{A}^3$.
\begin{lemma} Intersecting twosided prime ideals of $\Lambda_c$ with the central subalgebra $\mathbb{C}[x,y,z]$ determines a continuous map
\[
\wis{spec}~\Lambda_c \rTo^{\phi} \mathbb{A}^3 \]
with the following fiber information :
\begin{enumerate}
\item{If $\mathfrak{n} \notin \mathbb{V}(xy-z^2)$, then $\phi^{-1}(\mathfrak{n})$ consists of two points.}
\item{If $(x,y,z) \not= \mathfrak{n} \in \mathbb{V}(xy-z^2)$, then $\phi^{-1}(\mathfrak{n})$ consists of one point.}
\item{If $(x,y,z) = \mathfrak{n}$, then $\phi^{-1}(\mathfrak{n})$ consists of two points.}
\end{enumerate}
\end{lemma}
\begin{proof} For $P=(a,b,c) \in \mathbb{A}^3$ the quotient of $\Lambda_c$ by the extended two-sided ideal $\Lambda_c \mathfrak{n}_P$ is the Clifford algebra $Cl_P$ over $\mathbb{C}$ of the ternary quadratic form
\[
B_P = \begin{bmatrix} a & c & 0 \\ c & b & 0 \\ 0 & 0 & 1 \end{bmatrix} \]
and the elements of $\phi^{-1}(\mathfrak{n}_P)$ are the two-sided maximal ideals of $Cl_P$. We can diagonalize the symmetric matrix, that is there is a base-change matrix $M \in \wis{GL}_3$ such that
\[
M^{\tau}.\begin{bmatrix} a & c & 0 \\ c & b & 0 \\ 0 & 0 & 1 \end{bmatrix}.M = \begin{bmatrix} u & 0 & 0 \\ 0 & v & 0 \\ 0 & 0 & 1 \end{bmatrix} = B_Q \]
(with $uv = ab-c^2$) and hence $Cl_P \simeq Cl_Q$. The Clifford algebra $Cl_Q$ is the $8$-dimensional $\mathbb{C}$-algebra generated by $x_1,x_2$ and $x_3$ satisfying the defining relations
\[
x_1^2=u,~x_2^2=v,~x_3^2=1 \qquad \text{and} \qquad x_ix_j+x_jx_i=0~\text{for $i \not= j$.} \]
If $uv \not= 0$ then $B_Q$ is a non-degenerate ternary quadratic form with determinant a square in $\mathbb{C}^*$ whence $Cl_Q$ is the direct sum of two copies of $M_2(\mathbb{C})$. If $uv=0$, say $u=0$ and $v \not= 0$, then $x_1$ generates a nilpotent two-sided ideal of $Cl_Q$ and the quotient is the Clifford algebra of the non-degenerate binary quadratic form
\[
B_R = \begin{bmatrix} v & 0 \\ 0 & 1 \end{bmatrix} \qquad \text{whence} \qquad Cl_R \simeq M_2(\mathbb{C}) \]
as any such algebra is a quaternion algebra. Finally, if both $u=0=v$ then the two-sided ideal $I$ generated by $x_1$ and $x_2$ is nilpotent and the quotient
\[
Cl_R/I = \mathbb{C}[x_3]/(x_3^2-1) \simeq \mathbb{C} \oplus \mathbb{C} .\]
As the maximal ideals of a non-commutative algebra $R$ and of a quotient $R/I$ by a nilpotent ideal $I$ coincide, the statements follow.
\end{proof}
\begin{lemma} Intersecting with the center $R_c$ determines a continuous map
\[
\wis{spec}~\Lambda_c \rTo^{\psi} \wis{spec}~R_c,
\]
which is a one-to-one correspondence away from the unique singularity of $\wis{spec}~R_c$ where the fiber consists of two points.
\end{lemma}
\begin{proof} The inclusion $\mathbb{C}[x,y,z] \subset R_c$ determines a two-fold cover
\[
\wis{spec}~R_c = \mathbb{V}(D^2-4(z^2-xy)) \subset \mathbb{A}^4 \rOnto^c \mathbb{A}^3 \qquad (x,y,z,D) \mapsto (x,y,z) \]
which is {\em ramified} over $\mathbb{V}(z^2-xy)$. That is, if $P=(a,b,c) \notin \mathbb{V}(z^2-xy)$ then there are exactly two points lying over it
\[
P_1 = (a,b,c,+\sqrt{c^2-ab}) \qquad \text{and} \qquad P_2 = (a,b,c,-\sqrt{c^2-ab}). \]
On the other hand, if $P = (a,b,c) \in \mathbb{V}(z^2-xy)$, then there is just one point lying over it : $(a,b,c,0)$. The statement then follows from combining this covering information with the composition map
\[
\wis{spec}~\Lambda_c \rTo^{\psi} \wis{spec}~R_c \rTo^c \mathbb{A}^3 \]
which is $\phi$ in the foregoing lemma.
\end{proof}
Observe that $\psi$ is a homeomorphism on $\wis{spec}~\Lambda_c - \mathbb{V}(x,y,z)$ and hence can be seen as a non-commutative birational map. If $\mathfrak{m}$ lies in this open set then
\[
\Lambda_c/\mathfrak{m} \simeq M_2(\mathbb{C}) \]
whereas for the two maximal ideals $\mathfrak{m}_+ = (X,Y,Z-1)$ and $\mathfrak{m}_- = (X,Y,Z+1)$ lying over the conifold singularity we have
\[
\Lambda_c/\mathfrak{m}_+ \simeq \mathbb{C} \simeq \Lambda_c/\mathfrak{m}_-~.\]
We denote the associated one-dimensional $\Lambda_c$-representations by $\phi_+$ resp. $\phi_-$. It is easy to verify that these are the only one-dimensional $\Lambda_c$-representations.
\begin{proposition} For the conifold algebra $\Lambda_c$, the representation variety $\wis{rep}_2~\Lambda_c$ is a smooth affine variety having three disjoint irreducible components. Two of these components are a point, the third component $\wis{trep}_2~\Lambda_c$ has dimension $6$. In particular, the conifold algebra $\Lambda_c$ is a smooth order whence the birational map $\psi$ above can be viewed as a non-commutative desingularization.
\end{proposition}
\begin{proof} From the defining relation $Z^2 = 1$ it follows that the image of $Z$ in any finite dimensional representation has eigenvalues $\pm 1$. Hence, after simultaneous conjugation of the images of $X$, $Y$ and $Z$ we may assume that $Z$ has one of the following three forms
\[
Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \quad \text{or} \quad Z \mapsto \begin{bmatrix} -1 & 0 \\ 0 & -1 \end{bmatrix} \quad \text{or} \quad Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}. \]
The first two possibilities are easily dealt with. Here, the image of $Z$ is a central unit so it follows from the relations $XZ+ZX=0=YZ+ZY$ as in the previous lemma that $X \mapsto 0$ and $Y \mapsto 0$. That is, these two components consist of just one point (the action of $\wis{GL}_2$ by simultaneous conjugation fixes these matrices) corresponding to the $2$-dimensional {\em semi-simple} representations
\[
M_+ = \phi_+ \oplus \phi_+ \qquad \text{and} \qquad M_- = \phi_- \oplus \phi_-~. \]
The interesting case is the third one. Because $X^2$ and $Y^2$ are central elements it follows (for example using the characteristic polynomial of $2 \times 2$ matrices) that in any $2$-dimensional representation $\Lambda_c \rTo^{\phi} M_2(\mathbb{C})$ we have that $tr(\phi(X))=0$ and $tr(\phi(Y))=0$. Hence, the third component of $\wis{rep}_2~\Lambda_c$ consists of those $2$-dimensional representations $\phi$ such that
\[
tr(\phi(X)) = 0 \qquad tr(\phi(Y)) = 0 \qquad \text{and} \qquad tr(\phi(Z)) = 0. \]
For this reason we denote this component by $\wis{trep}_2~\Lambda_c$ and call it the variety of {\em trace preserving $2$-dimensional representations}. To describe the coordinate ring of this component we can use {\em trace zero} generic $2 \times 2$ matrices
\[
X \mapsto \begin{bmatrix} x_1 & x_2 \\ x_3 & -x_1 \end{bmatrix} \quad Y \mapsto \begin{bmatrix} y_1 & y_2 \\ y_3 & -y_1 \end{bmatrix} \quad Z \mapsto \begin{bmatrix} z_1 & z_2 \\ z_3 & -z_1 \end{bmatrix}
\]
which drastically reduces the defining equations as $T^2$ and $TS+ST$ are both scalar matrices for any trace zero $2 \times 2$ matrices. More precisely, we have
\[
XZ+ZX \mapsto \begin{bmatrix} 2x_1z_1+x_2z_3+x_3z_2 & 0 \\ 0 & 2x_1z_1+x_2z_3+x_3z_2 \end{bmatrix} \]
\[
YZ+ZY \mapsto \begin{bmatrix} 2y_1z_1+y_2z_3+y_3z_2 & 0 \\ 0 & 2y_1z_1+y_2z_3+y_3z_2 \end{bmatrix} \]
\[
Z^2 \mapsto \begin{bmatrix} z_1^2+z_2z_3 & 0 \\ 0 & z_1^2+z_2z_3 \end{bmatrix} \]
and therefore the coordinate ring of $\wis{trep}_2~\Lambda_c$
\[
\mathbb{C}[\wis{trep}_2~\Lambda_c] = \frac{\mathbb{C}[x_1,x_2,x_3,y_1,y_2,y_3,z_1,z_2,z_3]}{(2x_1z_1+x_2z_3+x_3z_2,2y_1z_1+y_2z_3+y_3z_2,z_1^2+z_2z_3-1)}. \]
To verify that $\wis{trep}_2~\Lambda_c$ is a smooth $6$-dimensional affine variety we therefore have to show that the {\em Jacobian matrix}
\[
\begin{bmatrix}
2z_1 & z_3 & z_2 & 0 & 0 & 0 & 2x_1 & x_3 & x_2 \\
0 & 0 & 0 & 2z_1 & z_3 & z_2 & 2y_1 & y_3 & y_2 \\
0 & 0 & 0 & 0 & 0 & 0 & 2z_1 & z_3 & z_2
\end{bmatrix}
\]
has constant rank $3$ on $\wis{trep}_2~\Lambda_c$. This is forced by the submatrices $\begin{bmatrix} 2z_1 & z_3 & z_2 \end{bmatrix}$ along the 'diagonal' of the Jacobian unless $z_1=z_2=z_3=0$ but this cannot hold for a point in $\wis{trep}_2~\Lambda_c$ by the equation
$z_1^2 + z_2z_3 = 1$.
\end{proof}
Next, we will use the two idempotents $e_1=\frac{1}{2}(Z-1)$ and $e_2=\frac{1}{2}(Z+1)$ to relate the conifold algebra $\Lambda_c$ to the quiver mentioned above. Consider a representation in $\wis{trep}_2~\Lambda_c$ then we can use base change to bring the image of $Z$ into the form
\[
Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} .\]
Taking the generic $2 \times 2$ matrices
\[
X \mapsto \begin{bmatrix} x_1 & x_2 \\ x_3 & x_4 \end{bmatrix} \qquad Y \mapsto \begin{bmatrix} y_1 & y_2 \\ y_3 & y_4 \end{bmatrix}
\]
it follows from the relations $XZ+ZX=0=YZ+ZY$ that $x_1=x_4=0=y_1=y_4$. Therefore, a representation in $\wis{trep}_2~\Lambda_c$ can be simultaneously conjugated to one of the form
\[
X \mapsto \begin{bmatrix} 0 & x_2 \\ x_3 & 0 \end{bmatrix} \quad Y \mapsto \begin{bmatrix}
0 & y_2 \\ y_3 & 0 \end{bmatrix} \quad Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \]
and as the images of $X^2$ and $Y^2$ are scalar matrices the remaining defining relations $[X^2,Y]=0=[Y^2,X]$ are automatically satisfied. $2$-dimensional representations of $A_{con}$ in this canonical form hence form a smooth $4$-dimensional affine space
\[
\mathbb{A}^4 = \mathbb{V}(x_1,x_4,y_1,y_4,z_1-1,z_2,z_3,z_4+1) \subset \mathbb{A}^{12}. \]
To recover $\wis{trep}_2~\Lambda_c$ from this affine space we let $\wis{GL}_2$ act on it. The subgroup of $\wis{GL}_2$ fixing the matrix
\[
\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \qquad \text{is} \qquad T = \{ \begin{bmatrix} \lambda & 0 \\ 0 & \mu \end{bmatrix}~|~\lambda,\mu \in \mathbb{C}^* \}, \]
the two-dimensional torus. There is an action of $T$ on the product $\wis{GL}_2 \times \mathbb{A}^4$ via
\[
t.(g,P) = (gt^{-1},t.P) \qquad \text{for all $t \in T, g \in \wis{GL}_2$ and $P \in \mathbb{A}^4$} \]
and where $t.P$ means the action by simultaneous conjugation by the $2 \times 2$ matrix $t \in T \subset \wis{GL}_2$ on the three $2 \times 2$ matrix-components of $P$.
\begin{proposition} Under the action-map
\[
\wis{GL}_2 \times \mathbb{A}^4 \rTo \wis{trep}_2~\Lambda_c \qquad (g,P) \mapsto g.P \]
two points $(g,P)$ and $(g',P')$ are mapped to the same point if and only if they belong to the same $T$-orbit in $\wis{GL}_2 \times \mathbb{A}^4$. That is, we can identify $\wis{trep}_2~\Lambda_{c}$ with the principal fiber bundle
\[
\wis{trep}_2~\Lambda_c \simeq \wis{GL}_2 \times^T \mathbb{A}^4 = (\wis{GL}_2 \times \mathbb{A}^4) / T. \]
In particular, there is a natural one-to-one correspondence between $\wis{GL}_2$-orbits in $\wis{trep}_2~\Lambda_c$ and $T$-orbits in $\mathbb{A}^4$. Observe that one can identify the $T$-action on $\mathbb{A}^4$ with the $\wis{GL}(\alpha)$-action on the representation space $\wis{rep}_{\alpha}~Q$ for the quiver-setting
\[
\xymatrix{\vtx{1} \ar@/^/[r] \ar@/^2ex/[r] & \vtx{1} \ar@/^/[l] \ar@/^2ex/[l]}.
\]
In particular, the conifold algebra $\Lambda_c$ is the quiver-order $\int_{\alpha}~\mathbb{C} Q$.
\end{proposition}
\begin{proof} If $g.P = g'.P'$, then $P = g^{-1}g'.P'$ and as both $P$ and $P'$ have as their third $2 \times 2$ matrix component
\[
\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \]
it follows that $g^{-1}g'$ is in the stabilizer subgroup of this matrix so $g^{-1}g' = t^{-1}$ for some $t \in T$
whence $g' = gt^{-1}$ and as $(g^{-1}g')^{-1}.P = P'$ also $t.P = P'$ whence
\[
t.(g,P) = (gt^{-1},t.P) = (g',P') \]
Hence we can identify $\wis{trep}_2~\Lambda_c = \wis{GL}_2.\mathbb{A}^4$ with the orbit-space of the $T$-action which is just $\wis{GL}_2 \times^T \mathbb{A}^4$. Incidentally, this gives another proof for smoothness of $\wis{trep}_2~\Lambda_c$ as it is the base of a fibration with smooth fibers of the smooth top space $\wis{GL}_2 \times \mathbb{A}^4$.
$\wis{GL}_2$ acts on $\wis{GL}_2 \times \mathbb{A}^4$ by $g.(g',P') = (gg',P')$ and this action commutes with the $T$-action so induces a $\wis{GL}_2$-action on the orbit-space
\[
\wis{GL}_2 \times (\wis{GL}_2 \times^T \mathbb{A}^4) \rTo \wis{GL}_2 \times^T \mathbb{A}^4 \qquad g.\overline{(g',P')} = \overline{(gg',P')}.
\]
As we have identified $\wis{GL}_2 \times^T \mathbb{A}^4$ with $\wis{trep}_2~\Lambda_c$ via the action map, that is
$\overline{(g,P)} = g.P$ the remaining statements follow.
\end{proof}
In this specific case we can explicitly compute polynomial (semi)-invariants using the $T$-action and relate it to the general results mentioned before.
\begin{lemma} The ring of polynomial invariants
\[
\mathbb{C}[\wis{trep}_2~\Lambda_c]^{\wis{GL}_2} \simeq \mathbb{C}[\mathbb{A}^4]^T \]
is isomorphic to the coordinate ring of the conifold singularity $R_c$ and the quotient map
\[
\wis{trep}_2~\Lambda_c \rOnto \wis{spec}~R_c \]
maps a two-dimensional representation to the direct sum of its Jordan-H\"older components. \end{lemma}
\begin{proof} The action of the two-dimensional torus $T$ on $\mathbb{A}^4 = \{ (x_2,x_3,y_2,y_3) \}$ is given by
\[
\begin{bmatrix} \lambda & 0 \\ 0 & \mu \end{bmatrix}.(\begin{bmatrix} 0 & x_2 \\ x_3 & 0 \end{bmatrix},
\begin{bmatrix} 0 & y_2 \\ y_3 & 0 \end{bmatrix},\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} ) = \]
\[
(\begin{bmatrix} 0 & \lambda\mu^{-1}x_2 \\ \lambda^{-1}\mu x_3 & 0 \end{bmatrix},
\begin{bmatrix} 0 & \lambda \mu^{-1}y_2 \\ \lambda^{-1} \mu y_3 & 0 \end{bmatrix},\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} ) .
\]
Hence, the action of $(\lambda,\mu) \in T$ on $\mathbb{C}[\mathbb{A}^4] = \mathbb{C}[X_2,X_3,Y_2,Y_3]$ is defined by
\[
X_2 \mapsto \lambda^{-1}\mu X_2 \quad X_3 \mapsto \lambda \mu^{-1} X_3 \quad Y_2 \mapsto \lambda^{-1}\mu Y_2 \quad Y_3 \mapsto \lambda \mu^{-1} Y_3 \]
and this action sends any monomial in the variables to a scalar multiple of that monomial. So, in order to determine the ring of polynomial invariants
\[
\mathbb{C}[X_2,X_3,Y_2,Y_3]^T = \{ f= f(X_2,X_3,Y_2,Y_3)~|~(\lambda,\mu).f = f~ \forall (\lambda,\mu) \in T \}
\]
it suffices to determine all invariant monomials, or equivalently, all positive integer quadruplets $(a,b,c,d)$ such that $a-b+c-d=0$ as
\[
(\lambda,\mu).X_2^aX_3^bY_2^cY_3^d = \lambda^{-a+b-c+d} \mu^{a-b+c-d} X_2^aX_3^bY_2^cY_3^d
\]
Clearly, such quadruplets are all generated (as Abelian group under addition) by the four basic ones
\[
(1,1,0,0) \mapsto X_2X_3 \quad (1,0,0,1) \mapsto X_2Y_3 \quad (0,1,1,0) \mapsto X_3Y_2 \quad (0,0,1,1) \mapsto Y_2Y_3 \]
and therefore
\[
\mathbb{C}[\wis{trep}_2~\Lambda_c]^{\wis{GL}_2} \simeq \mathbb{C}[X_2,X_3,Y_2,Y_3]^T = \mathbb{C}[X_2X_3,X_2Y_3,X_3Y_2,Y_2,Y_3] \simeq \frac{\mathbb{C}[p,q,r,s]}{(ps-qr)} \]
is the conifold singularity $R_c$. We know already that $\wis{spec}~R_c$ has as its points the isomorphism classes of $2$-dimensional semi-simple representations with $\phi_+ \oplus \phi_-$ as the semi-simple representation corresponding to the singularity and all other points classify a unique simple $2$-dimensional representation.
\end{proof}
For the quiver-setting $(Q,\alpha)$ there are essentially two stability structures : $\theta=(-1,1)$ and $\theta'=(1,-1)$. Again, we can use elementary arguments in this case to calculate the moduli spaces.
\begin{lemma} The moduli space of all $\theta$-(semi)stable $\alpha$-dimensional representations
\[
\wis{moduli}^{\theta}_{\alpha}~\Lambda_c \simeq \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \]
is the $\wis{proj}$ of the ring of $\theta$-semi-invariants and as the semi-invariants of weight zero are the polynomial invariants we get a projective morphism
\[
\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \rOnto \wis{spec}~R_c \]
which is a desingularization of the conifold singularity.
\end{lemma}
\begin{proof}
As in the case of polynomial invariants, the space $\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta}_k$ is spanned by monomials
\[
x_2^ax_3^by_2^cy_3^d \qquad \text{satisfying} \qquad -a+b-c+d=k \]
and one verifies that this space is the module over the ring of polynomial invariants generated by all monomials of degree $k$ in $x_3$ and $y_3$. That is
\[
\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} = \mathbb{C}[x_2x_3,x_2y_3,x_3y_2,y_2y_3][x_3,y_3] \subset \mathbb{C}[x_2,y_2,x_3,y_3] \]
with the generators $a=x_2x_3,b=x_2y_3,c=x_3y_2$ and $d=y_2y_3$ of degree zero and $e=x_3$ and $f=y_3$ of degree one. As a consequence, we can identify $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta}$ with the closed subvariety
\[
\mathbb{V}(ad-bc,af-be,cf-de) \subset \mathbb{A}^4 \times \mathbb{P}^1 \]
with $(a,b,c,d)$ the affine coordinates of $\mathbb{A}^4$ and $[e:f]$ projective coordinates of $\mathbb{P}^1$. The projection $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \rOnto \wis{spec}~R_c$ is projection onto the $\mathbb{A}^4$-component.
To prove smoothness we cover $\mathbb{P}^1$ with the two affine opens $e \not= 0$ (with affine coordinate $x = f/e$ and $f \not=0$ with affine coordinate $y = e/f$. In the affine coordinates $(a,b,c,d,x)$ the relations become
\[
ad = bc \qquad ax = b \qquad \text{and} \qquad cx = d \]
whence the coordinate ring is $\mathbb{C}[a,c,x]$ and so the variety is smooth on this affine open. Similarly, the coordinate ring on the other affine open is $\mathbb{C}[b,d,y]$ and smoothness follows. Moreover, $\pi$ is {\em birational} over the complement of the singularity. This follows from the relations
\[
ax = b, \quad cx = d, \quad by = a, \quad dy = c \]
which determine $x$ (or $y$ and hence the point in $\wis{proj}$) lying over any $(a,b,c,d) \not= (0,0,0,0)$ in $\wis{spec}~R_c$. Therefore, the map $\pi$ is a desingularization and the {\em exceptional fiber}
\[
E = \pi^{-1}(0,0,0,0) \simeq \mathbb{P}^1 \]
which classifies the $\theta$-stable representations which lie over $(0,0,0,0)$ (that is, those such that $x_2x_3=x_2y_3=x_3y_2=y_2y_3=0$) as they are all of the form
\[
\xymatrix{\vtx{} \ar@/^1ex/[rr]|{x_3} \ar@/^3ex/[rr]|{y_3} & & \vtx{} \ar@/^1ex/[ll]|{0} \ar@/^3ex/[ll]|{0}}
\]
with either $x_3 \not= 0$ or $y_3 \not= 0$ and the different $T$-orbits of those are parametrized by the points of $\mathbb{P}^1$. As the smooth points of $\wis{spec}~R_c$ are known to correspond to isomorphism classes of simple (hence certainly $\theta$-stable) representations we have proved that
\[
\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \simeq \wis{moduli}^{\theta}_{\alpha}~\Lambda_c \]
is the moduli space of all $\theta$-stable $\alpha$-dimensional representations of $Q$.
\end{proof}
Clearly, we could have done the same calculations starting with the stability structure $\theta' = (1,-1)$ and obtained another desingularization replacing the roles of $x_2,y_2$ and $x_3,y_3$. This gives us the situation
\[
\vcenter{\xymatrix@C=10pt@R=30pt{
& \wis{blowup} \ar@{->>}[dl]_{\phi} \ar@{->>}[dr]^{\phi'}\\
\wis{moduli}^{\theta}_{\alpha}~\Lambda_c \ar@{->>}[dr]_{\pi} \ar@{.>}[rr]^r& & \wis{moduli}^{\theta'}_{\alpha}~\Lambda_c \ar@{->>}[dl]^{\pi'}\\
& \wis{spec}~Z_{con}
}}.
\]
Here, $\wis{blowup}$ denotes the desingularization of $\wis{spec}~R_c$ one obtains by blowing-up the point $(0,0,0,0) \in \mathbb{A}^4$ and which has exceptional fiber $\mathbb{P}^1 \times \mathbb{P}^1$. Blowing down either of these lines (the maps $\phi$ and $\phi'$) one obtains the 'minimal' resolutions given by the moduli spaces. These spaces are related by the {\em rational map} $r$ which is called the {\em Atiyah flop} in string theory-literature.
\end{document} |
\begin{document}
\begin{abstract}
Let $P$ be a convex polyhedron and $Q$ be a convex polygon with $n$ vertices in total in three-dimensional space. We present a deterministic algorithm that finds a translation vector $v \in \mathbb{R}^3$ maximizing the overlap area $|P \cap (Q + v)|$ in $O(n \log^2 n)$ time. We then apply our algorithm to solve two related problems. We give an $O(n \log^3 n)$ time algorithm that finds the maximum overlap area of three convex polygons with $n$ vertices in total. We also give an $O(n \log^2 n)$ time algorithm that minimizes the symmetric difference of two convex polygons under scaling and translation.
\end{abstract}
\maketitle
\maketitle
\setcounter{page}{1}
\setcounter{tocdepth}{1}
\tableofcontents
\section{Introduction}\label{sec_intro}
Shape matching is an important topic in computational geometry, with useful applications in areas such as computer graphics. In a typical problem of shape matching, we are supplied two or more shapes, and we want to determine how much the shapes resemble each other. More precisely, given a similarity measure and a set of allowed transformations, we want to transform the shapes to maximize their similarity measure.
There are many candidates for the similarity measure, such as the Hausdorff distance and the Fr\'echet distance between the boundaries of the shapes. We can also consider the area/volume of overlap or of symmetric difference. The advantage to these is that they are more robust against noise on the boundary of the shapes \cite{deberg1996}.
The maximum overlap problem of convex polytopes has been studied by many. In dimension $2$, de Berg et al. \cite{deberg1996} give an $O(n \log n)$ time algorithm for finding a translation maximizing the area of intersection of two convex polygons (where $n$ denotes the total number of vertices of the polygons). In dimension $3$, Ahn et al. \cite{ahn2008} give an $O(n^3 \log^4 n)$ expected time algorithm finding the maximum overlap of two convex polyhedra under translation. For the same problem, Ahn et al. \cite{ahn2013} present an algorithm that runs in $O(n \log^{3.5} n)$ time with probability $1 - n^{-O(1)}$ and an additive error. For $d > 3$, given two convex polytopes of dimension $d$ with $n$ facets in total, Ahn et al. \cite{ahn2013} give an algorithm that finds the maximum overlap under translation in $O(n^{\lfloor d/2 \rfloor + 1} \log^{d} n)$ time with probability $1 - n^{O(1)}$ and an additive error.
In the plane, when all rigid motions are allowed, Ahn et al. \cite{ahn2007} give an approximate algorithm that finds a rigid motion realizing at least $1-\epsilon$ times the maximal overlap in $O((1/\epsilon)\log n + (1/\epsilon^2) \log (1/\epsilon))$ time. In dimension $3$, Ahn et al. \cite{ahn2014} present an approximate algorithm that finds a rigid motion realizing at least $1-\epsilon$ times the maximal overlap in $O(\epsilon^{-3} n \log^{3.5} n)$ with probability $1 - n^{-O(1)}$.
When considering the maximum overlap as a similarity measure, we obviously can only allow area/volume-preserving transformations. However, we may want to allow scaling as a transformation---two similar triangles are supposed to be very ``similar,'' though they may have different sizes. In this case, the area of symmetric difference is a better measure of similarity. Yon et al. \cite{yon2016} give an algorithm minimizing the symmetric difference of two convex polygons under translation and scaling in $O(n \log^3 n)$ expected time.
\subsection*{Our results}
While many have studied the matching problem for two convex polytopes of the same dimension, to our knowledge no one has examined the problem for polytopes of different dimensions or matching more than two polytopes.
The main result in this paper is a deterministic algorithm for the problem of matching a convex polyhedron and a convex polygon under translation in three-dimensional space.
\begin{restatable}{theorem}{algo}\label{thm_algo}
Let $P$ be a convex polyhedron and $Q$ a convex polygon with $n$ vertices in total. We can find a vector $v \in \mathbb{R}^3$ that maximizes the overlap area $|P \cap (Q+v)|$ in $O(n \log^2 n)$ time.
\end{restatable}
We also present two applications of our algorithm to other problems in computational geometry. First, we give a deterministic algorithm for maximizing the overlap of three convex polygons under translations.
\begin{restatable}{theorem}{threepolys}\label{thm_three_polygons}
Let $P$, $Q$, $R$ be three convex polygons with $n$ vertices in total in the plane. We can find a pair of translations $(v_Q, v_R) \in \mathbb{R}^4$ that maximizes the overlap area $|P \cap (Q + v_Q) \cap (R + v_R)|$ in $O(n\log^3 n)$ time.
\end{restatable}
We also give a deterministic $O(n \log^2 n)$ time algorithm for minimizing the symmetric difference of two convex polygons under a homothety (a translation and a scaling), which is an improvement to Yon et al.'s randomized algorithm \cite{yon2016}.
\begin{restatable}{theorem}{symmdiff}\label{thm_symmetric_difference}
Let $P$ and $Q$ be convex polygons with $n$ vertices in total. Then we can find a homothety $\varphi$ that minimizes the area of symmetric difference $|P \setminus \varphi(Q)| + |\varphi(Q) \setminus P|$ in $O(n \log^2 n)$ time.
\end{restatable}
The main ingredient in the proof of \Cref{thm_algo} is a new technique we introduce which generalizes Megiddo's prune-and-search \cite{megiddo1984}. This allows us to efficiently prune among $n$ groups of $m$ parallel lines.
\begin{restatable}{theorem}{pruneandsearch}\label{thm_prune_and_search}
Let $S = \bigcup_{i=1}^{n} S_i$ be a union of $n$ sets of $O(m)$ parallel lines in the plane, none of which are parallel to the $x$-axis, and suppose the lines in each $S_i$ are indexed from left to right.
Suppose there is an unknown point $p^{*} \in \mathbb{R}^2$ and we are given an oracle that decides in time $T$ the relative position of $p^{*}$ to any line in the plane. Then we can find the relative position of $p^{*}$ to every line in $S$ in $O(n \log^2 m + (T + n) \log(mn))$ time.
\end{restatable}
\subsection*{Organization of the Paper}
In \Cref{sec_prelim}, we introduce the problem of matching a convex polyhedron and a convex polygon under translation in three-dimensional space. In \Cref{sec_technique}, we present a core technique we use in our algorithm, which is a generalization of Megiddo's prune-and-search technique \cite{megiddo1984}. In \Cref{sec_three_and_two}, we present the algorithm for \Cref{thm_algo}. In \Cref{sec_three_polygons}, we apply our algorithm to solve the problem of maximizing the intersection of three polygons under translation. In \Cref{sec_symmetric_difference}, we give the algorithm for minimizing the symmetric difference of two convex polygons under homothety.
\section{Preliminaries}\label{sec_prelim}
Let $P \subset \mathbb {R}^3$ be a convex polyhedron and $Q \subset \mathbb{R}^2$ be a convex polygon with $n$ vertices in total. Throughout the paper, we assume that $Q$ is in the $xy$-plane, and that the point in $P$ with minimal $z$ coordinate is on the $xy$-plane. We want to find a translation vector $v = (x, y, z) \in \mathbb{R}^3$ that maximizes the overlap area $f(v) = |P \cap (Q + v)|$.
It is easy to observe that $f(v)$ is continuous and piecewise quadratic on the interior of its support. As noted in \cite{deberg1996, ahn2008, ahn2013}, $f$ is smooth on a region $R$ if $P \cap (Q+v)$ is combinatorially equivalent for all $v \in R$, that is, if we have the same set of face-edge incidences between $P$ and $Q$. Following the convention of \cite{ahn2008}, we call the polygons that form the boundaries of these regions the \textit{event polygons}, and as in \cite{deberg1996}, we call the space of translations of $Q$ the \textit{configuration space}. The arrangement of the event polygons partition the configuration space into cells with disjoint interiors. The overlap function $f(v)$ is quadratic on each cell. Thus, to locate a translation maximizing $f$, we need to characterize the event polygons.
For two sets $A, B \subset \mathbb{R}^d$, we write the \textit{Minkowski sum} of $A$ and $B$ as
\[
A + B := \{a+b|a \in A, b\in B\}.
\]
We will make no distinction between the translation $A + v$ and the Minkowski sum $A + \{v\}$ for a vector $v$. We also write $A-B$ for the Minkowski sum of $A$ with $-B = \{-b | b \in B\}$. We categorize the event polygons into three types and describe them in terms of Minkowski sums:
\begin{enumerate}[label = (\Roman*)]
\item \label{type1} When $Q + v$ contains a vertex of $P$. For each vertex $u$ of $P$, we have an event polygon $u - Q$. There are $O(n)$ event polygons of this type.
\item \label{type2} When a vertex of $Q + v$ is contained in a face of $P$. For each face $F$ of $P$ and each vertex $v$ of $Q$, we have an event polygon $F - v$. There are $O(n^2)$ event polygons of this type.
\item \label{type3} When an edge of $Q + v$ intersects an edge of $P$. For each edge $e$ of $P$ and each edge $e'$ of $Q$, we have an event polygon $e - e'$. There are $O(n^2)$ event polygons of this type.
\end{enumerate}
The reason that convexity is fundamental is due to the following standard fact, as noted and proved in \cite{deberg1996, yon2016}.
\begin{proposition}\label{prop_concavity}
Let $P$ be a $d'$-dimensional convex polytope and let $Q$ be a $d$-dimensional convex polytope. Suppose $d' \geq d$. Let $f(v) = \operatorname{Vol}(P \cap (Q + v))$ be the volume of the overlap function. Then, $f(v)^{1/d}$ is concave on its support $\operatorname{supp}(f) = \{v|f(v) > 0\}$.
\end{proposition}
As in \cite{avis1996}, we say a function $f: \mathbb{R} \to \mathbb{R}$ is \textit{unimodal} if it increases to a maximum value, possibly stays there for some interval, and then decreases. It is \textit{strictly unimodal} if it strictly increases to the maximum and then strictly decreases. Furthermore, we say a function $f: \mathbb{R}^d \to \mathbb{R}$ is (strictly) unimodal if its restriction to any line is (strictly) unimodal.
The following corollary of \Cref{prop_concavity} allows us to employ a divide-and-conquer strategy in our algorithm.
\begin{corollary}[\cite{avis1996}]\label{cor_unimodality}
For any line $l$ parameterized by $l = p + vt$ in $\mathbb{R}^{d'}$ for $v \neq 0$, the function $f_l(t) = f(p + vt)$ is strictly unimodal.
\end{corollary}
We also use the following two techniques in our algorithm.
\begin{lemma}[\cite{frederickson1984}]\label{lemma_sorted_matrix}
Let $M$ be an $m \times n$ matrix of real numbers, where $m \leq n$. If every row and every column of $M$ is in increasing order, then we say $M$ is a \textit{sorted matrix}. For any positive integer $k$ smaller or equal to $mn$, the $k$-th smallest entry of $M$ can be found in $O(m \log(2n/m))$ time, assuming an entry of $M$ can be accessed in $O(1)$ time.
\end{lemma}
For our purposes, we will use this result in the weaker form of $O(m+n)$.
\begin{lemma}[\cite{chazelle1993}]\label{lemma_cutting}
Given $n$ hyperplanes in $\mathbb{R}^d$ and a region $R \subset \mathbb{R}^d$, a \textit{$(1/r)$-cutting} is a collection of simplices with disjoint interiors, which together cover $R$ and such that the interior of each simplex intersects at most $n/r$ hyperplanes. A $(1/r)$-cutting of size $O(r^d)$ can be computed deterministically in $O(nr^{d-1})$ time. In addition, the set of hyperplanes intersecting each simplex of the cutting is reported in the same time.
\end{lemma}
\section{Generalized two-dimensional prune-and-search}\label{sec_technique}
In this section, we prove \Cref{thm_prune_and_search}, our generalization of Megiddo's prune-and-search technique \cite{megiddo1984}. This technique is of independent interest and can likely be applied to other problems.
In \cite{megiddo1984}, Megiddo proves the following:
\begin{theorem}[\cite{megiddo1984}]\label{thm_megiddo}
Suppose there exists a point $p^{*} \in \mathbb{R}^2$ not known to us. Suppose further that we have an oracle that can tell us for any line $l \subset \mathbb{R}^2$ whether $p^{*} \in l$, and if $p^{*} \notin l$, the side of $l$ that $p^{*}$ belongs to. Let $T$ be the running time of the oracle. Then given $n$ lines in the plane, we can find the position of $p^{*}$ relative to each of the $n$ lines in $O(n + T \log n)$ time.
\end{theorem}
We are interested in a generalized version of Megiddo's problem. Suppose, instead of $n$ lines, we are given $n$ sets of parallel lines $S_1, S_2, \ldots, S_n$, each of size $O(m)$. In addition, suppose the lines in each $S_i$ are indexed from left to right (assuming none of the lines are parallel to the $x$-axis). Again, we want to know the position of $p^{*}$ relative to every line in $S = \bigcup_{i=1}^{n} S_i$. Megiddo's algorithm solves this problem in $O(mn + T \log(mn))$ time, but we want a faster algorithm for large $m$ by exploiting the structure of $S$.
Without loss of generality, suppose that there are no lines parallel to the $y$-axis. For each $i$ between $1$ and $n$, suppose $S_i = \{l_{i}^{j} | l_{i}^{a} \text{ lies strictly to the left of } l_{i}^{b} \text { iff } a<b\}$. Suppose that $p^{*} = (x^{*}, y^{*}) \in \mathbb{R}^2$. To report our final answer, we simply need to provide, for each $S_i$, the two consecutive indices $a$ and $a+1$ such that $p^{*}$ lies strictly between $l_{i}^{a}$ and $l_{i}^{a+1}$ or the single index $a$ such that $p^{*} \in l_{i}^{a}$.
In our algorithm, we keep track of a feasible region $R$ containing $P^*$, which is either the interior of a (possibly unbounded) triangle or an open line segment if we find a line $l$ that $p^{*}$ lies on. Together with $R$, we keep track of the $2n$ indices $\operatorname{lower}(i)$ and $\operatorname{upper}(i)$ such that $S^R = \bigcup_{i=1}^{n} S_{i}^R = \{l_i^{j} | j \in (\operatorname{lower}(i), \operatorname{upper}(i)]\}$ is the set of lines intersecting $R$, which is also the set of lines we do not yet know the relative position to $p^{*}$. In the beginning, $R = \mathbb{R}^2$. Each step, we find $O(1)$ lines to run the oracle on to find a new feasible region $R' \subset R$ such that $|S^R| \leq 17/18 |S^{R'}|$ and recurse on $R'$. An outline is given in \Cref{pseudo_prune_and_search}.
\begin{algorithm}[ht]
\DontPrintSemicolon
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{A set $S = \bigcup_{i=1}^{n} S_{i} = \{l_{i}^{j}\}$ of $O(mn)$ lines}
\Output{A list of indices that indicate the position of $p^{*}$ to each $S_i$}
$R \longleftarrow \mathbb{R}^2$\;
$S^R \longleftarrow S$\;
\While{$|S^R| \geq 18$}{
Find $O(1)$ lines to run the oracle on\;
Compute the piece $R' \subset R$ containing $p^{*}$\; \tcc{We guarantee that $R'$ intersects at most $17/18$ of the lines that intersect $R$}
Triangulate $R'$ with $O(1)$ lines to run the oracle on\;
Update $S^R \longleftarrow S^{R'}$
}
Compute relative position of $p^{*}$ to the remaining lines by brute force\;
\caption{Pseudocode for \Cref{thm_prune_and_search}}\label{pseudo_prune_and_search}
\end{algorithm}
One extra computational effort is updating $S^{R'}$ by computing $\operatorname{lower}(i)$ and $\operatorname{upper}(i)$. Since the feasible region is always a convex set of constant complexity, we can use binary search on $S_i^{R}$ to find the new bounds for $S_i^{R'}$ in $O(\log |S_{i}^{R}|)$ time. Thus, the total time involved in this process, assuming $|S^R|$ decreases by at least $\epsilon = 1/18$ each iteration, is
\begin{align*}
& \sum_{i=1}^n O(\log |S_{i}|) + \sum_{i=1}^n O(\log |S_{i}^{R_1}|) + \sum_{i=1}^n O(\log |S_{i}^{R_2}|) + \cdots \\
= & O(n \log (\frac{1}{n}|S|)) + O(n \log (\frac{1}{n}|S^{R_1}|)) + O(n \log (\frac{1}{n}|S^{R_2}|)) + \cdots \\
= & O(n \log (m)) + O(n \log (m(1-\epsilon))) + O(n \log (m(1-\epsilon)^2)) + \cdots \\
= & O(n \log^2 m).
\end{align*}
We will use the following well-known result:
\begin{lemma}[\cite{cormen2009}]\label{lemma_weighted_median}
Suppose we are given $n$ distinct real numbers with positive weights that sum to $1$. Then we can find the weighted median of these numbers in $O(n)$ time.
\end{lemma}
Given $S^R$ and $R$, we want to find $R' \subset R$ to recurse on.
\begin{lemma}\label{lemma_balanced_quadrants}
If $|S^R| \geq 18$, then in $O(T + n)$ time, we can find a region $R' \subset R$ of constant complexity containing $p^*$ so that its interior intersects no more than $17/18$ of all the lines in $S^R$.
\end{lemma}
\begin{proof}
For convenience, we write $S^R = S = \bigcup_{i=1}^{n} S_{i} = \{l_{i}^{j}\}$. We first find the weighted median of the slopes of the lines in $S$, where the slope of the lines of $S_i$ is weighted by $|S_i|/|S|$. This can be done in $O(n)$ time by \Cref{lemma_weighted_median}.
If this slope is equal to the slope of some line in $S_i$ and $|S_i| \geq \frac{1}{9} |S|$, then we can simply divide the plane using the median line of $S_i$ and the $x$-axis and the interior of each quadrant will avoid at least $1/18$ of the lines of $S$.
Otherwise, at least $4/9$ of the lines have slopes strictly greater than/less than the median slope. Without loss of generality, we assume at least $4/9$ of the lines have positive slope and at least $4/9$ of the lines have negative slope. Now let $S_{+} = \bigcup_{i=1}^{k} S_{i}$ and $S_{-} = \bigcup_{i=k+1}^{n} S_{i}$ denote the set of lines with postive/negative slope, respectively. We remove lines from the larger of the two sets until they have the same size.
\begin{figure}
\caption{$P_1$, $P_2$ are $P_3$ are represented by colors.}
\label{fig_line_partition}
\end{figure}
We partition $S_{+} \cup S_{-}$ into $O(n)$ subsets $P_i$ each containing the same number of lines from $S_{+}$ and $S_{-}$ in the following way: going in lexicographical order by the indices of the lines, we put a line from $S_{1}$ and a line from $S_{k+1}$ into $P_1$ until we exhaust one of the sets (say it is $S_{k+1}$). Then, we move on to put a line from the remaining $S_{1}$ and a line from $S_{k+2}$ into $P_2$ until we exhaust one of them, and so on. Each $P_i$ is then of the form $\{l_{a(i)}^{b(i)},\ldots, l_{a(i)}^{b(i) + |P_i|/2 - 1}, l_{c(i)}^{d(i)},\ldots, l_{c(i)}^{d(i) + |P_i|/2 - 1} \}$, and can be represented by the indices $(a(i), b(i))$ and $(c(i), d(i))$ (see \Cref{fig_line_partition}). We can compute this partition in $O(n)$ time. For each $P_i$, we compute the intersection $p_i = (x_i, y_i)$ of the median line in $P_i$ with positive slope and the median line with negative slope, and assign $p_i$ a weight $w_i = |P_i| / (2|S_{+}|)$. Then, the weights of the $p_i$ sum to $1$. The significance of this is that if we know the relative position of $p^{*}$ to the lines $x = x_i$ and $y = y_i$, then we know the relative position of $p^{*}$ to at least $1/4$ of the lines in $P_i$, which is at least $\frac{2}{9} w_i$ of all the lines in $|S|$.
We find the median point $q = (x_{q}, y_{q})$ of the $p_i$'s by weight in $x$-coordinate in $O(n)$ time by \Cref{lemma_weighted_median}. We run the oracle on the line $x = x_{q}$. Let $p_{k_1}, p_{k_2}, \ldots, p_{k_l}$ be the points such that we now know the relative position of $p^{*}$ to $x_{k_i}$. Then the weights of these points sum to at least $1/2$. We find the median point $q' = (x_{q'}, y_{q'})$ of these by weight in $y$-coordinate in $O(n)$ time. We run the oracle on the line $y = y_{q'}$. Then, for points with weights that sum to at least $1/4$, we now know the relative position of $p^{*}$ to the vertical line and the horizontal line through those points. This means that we know the relative position of $p^{*}$ to at least $\frac{2}{9} \cdot \frac{1}{4} = \frac{1}{18}$ of all the lines in $|S|$. We get a new feasible region according to the two oracle calls whose interior avoids at least $1/18$ of the lines in $S$, and we triangulate it with $O(1)$ more oracle calls to get our desired region, in $O(T + n)$ time total.
\end{proof}
\begin{proof}[Proof of \Cref{thm_prune_and_search}]
After $O(\log mn)$ recursive iterations of \Cref{lemma_balanced_quadrants}, we arrive at a feasible region intersecting at most $17$ lines in $S$, and we can finish by brute force. Therefore, our algorithm runs in $O(n \log^2 m + (T + n) \log(mn))$ time.
\end{proof}
\begin{remark}
A simpler and probably more practical algorithm for \Cref{lemma_balanced_quadrants} is simply choosing a random line from $S_{+}$ and $S_{-}$ to intersect and run the oracle on the horizontal and vertical line through the intersection. This method gives the same run time in expectation.
\end{remark}
\section{Maximum overlap of convex polyhedron and convex polygon}\label{sec_three_and_two}
In section, we give the algorithm that finds a translation $v \in \mathbb{R}^3$ maximizing the area of overlap function $f$. Following the convention in \cite{deberg1996}, we call such a translation a \textit{goal placement}. In the algorithm, we keep track of a closed \textit{target region} $R$ which we know contains a goal placement and decrease its size until for each event polygon $F$, either $F \cap \operatorname{interior}(R) = \varnothing$ or $F \supset R$. Then, $f$ is quadratic on $R$ and we can find the maximum of $f$ on $R$ using standard calculus. Thus, the goal of our algorithm is to efficiently trim $R$ to eliminate event polygons that intersect it.
In the beginning of the algorithm, the target region is the interior of the Minkowski sum $P - Q$, where the overlap function is positive. By the unimodality of the overlap function, the set of goal placements is convex. Thus, for a plane in the configuration space, either it contains a goal placement, or all goal placements lie on one of the two open half spaces separated by the plane. If we have a way of knowing which case it is for any plane, we can decrease the size of our target region by cutting it with planes and finding the piece to recurse. More precisely, we need a subroutine \textbf{PlaneDecision} that decides the relative position of the set of goal placements to a plane $S$.
Whenever \textbf{PlaneDecision} reports that a goal placement is found on a plane, we can let the algorithm terminate. Thus, we can assume it always reports a half-space containing a goal placement.
As in \Cref{pseudo_algo}, we break down our algorithm into three stages.
\begin{algorithm}[ht]
\DontPrintSemicolon
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{A convex polyhedron $P \in \mathbb{R}^3$ and a convex polygon $Q \in \mathbb{R}^3$ with $n$ vertices in total}
\Output{A translation $v \in \mathbb{R}^3$ maximizing the area $|P \cap (Q + v)|$}
Locate a horizontal slice containing a goal placement that does not contain any vertices of $P$ and replace $P$ by this slice of $P$\;
Find a ``tube'' $D + l_y$ whose interior contains a goal placement and intersects $O(n)$ event polygons, where $D$ is a triangle in the $xz$-plane and $l_y$ is the $y$-axis\;
Recursively construct a $(1/2)$-cutting of the target region $D + l_y$ to find a simplex containing a goal placement that does not intersect any event polygon\;
\caption{Pseudocode for \Cref{thm_algo}}\label{pseudo_algo}
\end{algorithm}
\subsection{Stage 1}
In the first stage of our algorithm, we make use of \cite{deberg1996} to simplify our problem so that $P$ can be taken as a convex polyhedron with all of its vertices on two horizontal planes.
We sort the vertices of $P$ by $z$-coordinate in increasing order and sort the vertices of $Q$ in counterclockwise order. Next, we trim the target region with horizontal planes (planes parallel to the $xy$-plane) to get to a slice that does not contain any vertices of $P$.
\begin{lemma}\label{lemma_stage_1}
In $O(n \log^2 n)$ time, we can locate a strip $R = \{(x, y, z) | z \in [z_0, z_1]\}$ whose interior contains a goal placement and $P$ has no vertices with $z \in [z_0, z_1]$.
\end{lemma}
\begin{figure}
\caption{The slice of $P$ with $z \in [z_0, z_1]$.}
\label{fig_stage_1}
\end{figure}
\begin{proof}
Starting with the median $z$-coordinate of the vertices of $P$, we perform a binary search on the levels containing a vertex of $P$. For a horizontal plane $S$, \cite[Theorem 3.8]{deberg1996} allows us to compute the maximum overlap of $P\cap S$ and $Q$ under translation in $O(n\log n)$-time. The two planes $S_1$ and $S_2$ with the largest maximum values will be the bounding planes for the slice containing a goal placement by the unimodality of $f$. Thus, by a binary search, we can locate this slice in $O(n \log^2 n)$ time.
\end{proof}
By Chazelle's algorithm \cite{chazelle1992}, the convex polyhedron $P' = \{(x, y, z)\in P |z \in [z_0, z_1]\}$ can be computed in $O(n)$ time. From now on, we replace $P$ with $P'$ (see \Cref{fig_stage_1}). Without loss of generality, assume $z_0 = 0$ and $z_1 = 1$.
The region in the configuration space where $|P \cap (Q+v)| > 0$ is the Minkowski sum $P - Q$. Since $P$ only has two levels $P_0 = \{(x, y, z) \in P | z = 0\}$ and $P_1 = \{(x, y, z) \in P | z = 1\}$ that contain vertices, the Minkowski sum $P - Q$ is simply the convex hull of $(P_0 - Q) \cup (P_1 - Q)$, which has $O(n)$ vertices. We can compute $P_0 - Q$ and $P_1 - Q$ in $O(n)$ time and compute their convex hull in $O(n \log n)$ time by Chazelle's algorithm \cite{chazelle1993b}.
\subsection{PlaneDecision}
With the simplification of the problem in Stage $1$, we now show that the subroutine \textbf{PlaneDecision} can be performed in $O(n \log n)$ time. Let $S$ be a fixed plane in the configuration space. We call a translation $v$ that achieves $\operatorname{max}_{v \in S} f(v)$ a \textit{good placement}. First, we can compute the intersection of $S$ with $P - Q$ in $O(n)$ time by Chazelle's algorithm \cite{chazelle1992}. If the intersection is empty, we just report the side of $S$ containing $P - Q$. From now on assume this is not the case.
The following lemma shows that \textbf{PlaneDecision} runs in the same time bound as the algorithm that just finds the maximum of $f$ on a plane.
\begin{lemma}\label{lemma_side_decision}
Suppose we can compute $\operatorname{max}_{v \in S} f(v)$ for any plane $S \subset \mathbb{R}^3$ in time $T$, then we can perform \textbf{PlaneDecision} for any plane in time $O(T)$.
\end{lemma}
\begin{proof}
The idea is to compute $\operatorname{max}_{v \in S'} f(v)$ for certain $S'$ that are perturbed slightly from $S$ to see in which direction relative to $S$ does $f$ increase.
We compute over an extension of the reals $\mathbb{R}[\omega]/(\omega^3)$, where $\omega > 0$ is smaller than any real number. Let $A > 0$ be the maximum of $f$ over a plane $S$. Let $S_+$ and $S_-$ be the two planes parallel to $S$ that have distance $\omega$ from $S$. We compute $A_+ = \operatorname{max}_{v \in S_+} f(v)$ and $A_- = \operatorname{max}_{v \in S_-} f(v)$ in $O(T)$ time. Since $f$ is piecewise quadratic, $A_+$ and $A_-$ as symbolic expression will only involve quadratic terms in $\omega$. Since $f$ is strictly unimodal on $P - Q$, there are three possibilities:
\begin{enumerate}
\item If $A_+ > A$, then halfspace on the side of $S_+$ contains the set of goal placements.
\item If $A_- > A$, then halfspace on the side of $S_-$ contains the set of goal placements.
\item If $A \geq A_+$ and $A \geq A_-$, then $A$ is the global maximum of $f$.
\end{enumerate}
Thus, in $O(T)$ time, we can finish \textbf{PlaneDecision}.
\end{proof}
Finding a good placement on $S$ is similar to finding a goal placement on the whole configuration space. $S$ is partitioned into cells by the intersections of event polygons with $S$. We need to find a region on $S$ containing a good placement that does not intersect any event polygons.
We present a subroutine \textbf{LineDecision} that finds, for a line $l \subset S$, the relative position of the set of good placements on $S$ to $l$.
\begin{proposition}\label{prop_line_decision}
For a line $l \subset S$, we can perform \textbf{LineDecision} in $O(n)$ time.
\end{proposition}
\begin{figure}
\caption{The convex polyhedron $I$ is formed by interesecting $P$ and $(Q+l)$.}
\label{fig_line_decision}
\end{figure}
\begin{proof}
First, we compute $\operatorname{max}_{v \in l} f(v)$ and a vector achieving the maximum. We parameterize the line $l$ by $p+vt$ where $t$ is the parameter and $p, v \in \mathbb{R}^3$. The horizontal cross-section of $I = P\cap(Q+l)$ at height $t$ has area $f(p+vt)$. Since $I$ is the intersection of two convex polytopes with $O(n)$ vertices (see \Cref{fig_line_decision}), Chazelle's algorithm \cite{chazelle1992} computes $I$ in $O(n)$ time. Then, \cite[Theorem 3.2]{avis1996} computes the maximum cross-section in $O(n)$ time.
Now, by the same argument and method as in the proof of \Cref{lemma_side_decision}, we can finish \textbf{LineDecision} in $O(n)$ time. In the case where $\operatorname{max}_{v \in l} f(v) = 0$, we report the side of $l$ containing $S \cap (P - Q)$.
\end{proof}
Whenever our subroutine \textbf{LineDecision} reports a good placement is found on a line, we can let the algorithm terminate. Thus, we can assume it always reports a half-plane of $S$ containing a good placement.
We now present \textbf{PlaneDecision}. If $S$ is horizontal, then we only need to find the maximum overlap of the convex polygons $P \cap S$ and $Q$ using De Berg et al.'s algorithm \cite{deberg1996}, which takes $O(n \log n)$ time. Thus, we assume $S$ is non-horizontal.
\begin{algorithm}[ht]
\DontPrintSemicolon
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{A plane $S \subset \mathbb{R}^3$}
\Output{A translation $v \in S$ maximizing the area $|P \cap (Q + v)|$}
Compute $S \cap (P - Q)$ and set it to be our initial target region.\;
Locate a strip on $S$ containing a good placement whose interior intersects $O(n)$ event polygons.\;
Recursively construct a $(1/2)$-cutting of the strip to find a triangle containing a good placement that does not intersect any event polygon\;
\caption{Pseudocode for \textbf{PlaneDecision}}\label{pseudo_plane}
\end{algorithm}
As in \Cref{pseudo_plane}, we break down \textbf{PlaneDecision} into three steps. We have already explained Step $1$, where we compute $S \cap (P - Q)$, so we begin with Step $2$.
\subsubsection{PlaneDecision: Step 2}
We want to find a strip on $S$ strictly between $z = 0$ and $z = 1$ that intersects $O(n)$ event polygons. Since there are no vertices of $P$ with $z$-coordinate in the interval $(0, 1)$, there are no event polygons of type~\ref{type1} in this range, and we will only need to consider event polygons of type~\ref{type2} and type~\ref{type3}.
We look at the intersection points of $S$ with the edges of the event polygons. These edges come from the set $\{e_i - v_j|e_i \text{ non-horizontal edge of }P, \, v_j \text{ vertex of } Q\}$. Without loss of generality, assume that $S$ is parallel to the $y$-axis. We are interested in the $z$-coordinates of the intersections, so we project everything into the $xz$-plane. Then, $S$ becomes a line, which we denote by $l_S$, and each edge $e_i - v_j$ becomes a segment whose endpoints lie on $z = 0$ and $z = 1$. Suppose each edge $e_i$ projects to a segment $s_i$, and each $v_j$ projects to a point $x_j$ on the $x$-axis. Then, we get $O(n^2)$ segments $s_i - x_j$ with endpoints on $z = 0$ and $z = 1$, and the line $l_S$ that intersect them in some places.
\begin{lemma}\label{lemma_plane_step_2.1}
In $O(n \log n)$ time, we can locate a strip $R = \{(x, y, z)\in S | z \in [z_0, z_1]\}$ whose interior contains a good placement and intersects none of the edges of the event polygons.
\end{lemma}
\begin{proof}
By our setup, we want to find a segment on $l_S$ whose interior does not intersect any segment of the form $s_i - x_j$.
Since $s_i$ are projections of edges of a convex polyhedron, we can separate them into two sets such that edges from the same set do not intersect (we take the segments that are projections of the edges of the ``front'' side and ``back'' side, respectively), allowing the two extremal edges to appear in both sets. We will process each set separately. This can be done by identifying the extremal points points on the top and bottom faces of $P$ in the $x$ direction, which can be done in $O(\log n)$ time.
For a set of non-intersecting segments, since they all have endpoints on the line $z = 0$ and $z = 1$, we can sort them by the sum of the $x$-coordinates of their two endpoints. This takes $O(n \log n)$ time. We further separate these segments into two sets by slope: those that make a smaller angle than $l_S$ with the positive $x$-axis, and those that make a larger angle.
Suppose we now have a set of non-intersecting segments that all make larger angles than $l_S$ with the positive $x$-axis, $s_1, s_2, \ldots, s_m$, where $m = O(n)$. We also sort the projections of the vertices of $Q$, $x_1, \ldots, x_q$, in decreasing order by $x$-coordinate. This can be done in $O(\log n)$ time by identifying the extremal vertices of $Q$ in the $x$-direction.
Let $z_{ij}$ be the $z$-coordinate of the intersection of the line containing $s_i - x_j$ with $l_S$. Let $M$ be an $m\times q$ matrix with $(i,j)$-th entry given by
\[
M_{ij} =
\begin{cases}
0 & z_{ij} \leq 0 \\
z_{ij} & z_{ij} \in (0,1) \\
1 & z_{ij} \geq 1
\end{cases}.
\]
We claim that $M$ is a sorted matrix. To see this, consider any fixed row $r$ and indices $i < j$. Then the line containing $s_r - x_i$ lies strictly to the left of the line containing $s_r - x_j$ since $x_i > x_j$. This means that $z_{ri} < z_{rj}$. Thus, every row of $M$ is in increasing order. Similarly, for a fixed column $c$ and indices $i<j$, the segment $s_i - x_c$ lies strictly to the left of the segment $s_j - x_c$. Then, if they both intersect $l_S$, we must have $z_{ic}<z_{jc}$. If $s_i - x_c$ does not intersect $l_S$ and $s_j - x_c$ does, then $s_i - x_c$ must lie on the left of $l_S$ and thus $M_{ic} = a < z_{jc} = M_{jc}$. Similarly, if $s_i - x_c$ intersects $l_S$ and $s_j - x_c$ does not, then $s_j - x_c$ must lie on the right of $l_S$ and thus $M_{ic} = z_{ic} < b = M_{jc}$. If they both do not intersect $l_S$, then still $M_{ic} \leq M_{jc}$ since it is impossible to have $M_{ic} = b$ and $M_{jc} = a$. This proves our claim.
By \Cref{lemma_sorted_matrix}, we can find the $k$-th smallest value in $M$ in $O(m + q) = O(n)$ time. Thus, we can perform a binary search on these $z$-coordinates of the intersections of the edges $e_i - v_j$ with $S$. Each time we perform a \textbf{LineDecision} on the line with the median $z$-coordinate of the remaining entries to eliminate half of the intersections. After $O(\log n)$ iterations or $O(n \log n)$ time, we find a strip on $S$ containing a good placement that contains no intersections with this group of edges.
We repeat the same procedure for the other three groups and compute the intersection of the four strips to find a strip containing a good placement that contains no intersections with any edge of the event polygons.
\end{proof}
\begin{figure}
\caption{Projecting the configuration space onto the $xz$-plane. The projection of $S$ is the magenta line segment, and the projection of the strip $R$ obtained form \Cref{lemma_plane_step_2.1}
\label{fig_plane_decision}
\end{figure}
Our current target region, the strip $R$ we obtained from \Cref{lemma_plane_step_2.1} (see \Cref{fig_plane_decision}), intersects few event polygons and we can compute them efficiently.
\begin{lemma}\label{lemma_plane_step_2.2}
The interior of the region $R$ intersects $O(n)$ event polygons, and we can compute them in $O(n \log n)$ time.
\end{lemma}
\begin{proof}
For a vertex $v$ of $Q$, it contributes the $O(n)$ event polygons of type~\ref{type2} that are the faces of $P - v$. The intersection of the boundary of $P - v$ with $S$ is a convex polygon. Since there are no intersections with edges of event polygons inside the strip $R$, at most two edges of the convex polygon can lie inside $R$, one on the ``front side'' and the other on the ``back side.''
To compute these two segments on $R$, we first consider the two sorted matrices given in the proof of \Cref{lemma_plane_step_2.1} that together describe the edges on the ``front side'' and look at the column associated to $-v$. We find, for each column, the two (or zero) adjacent entries that contain the $z$-coordinates of $R$ in between. The two of the at most four that are closest to the strip will be the endpoints of the segment that intersect the strip on the ``front side.'' Computing this segment takes $O(\log n)$ time since we can use binary search on the columns to find the desired entries. We do the same to find the segment on the ``back side.'' We do this for all vertices of $Q$ to find the $O(n)$ intersections with the event polygons of type~\ref{type2} in $O(n \log n)$ time.
For an edge $e$ of $P$, it contributes $O(n)$ event polygons of type~\ref{type3} that form the surrounding sides of a ``cylinder'' with base congruent to $-Q$. Again, each of these ``cylinders'' intersect the strip $R$ in at most two faces, so there are $O(n)$ intersections of $R$ with event polygons of type~\ref{type3}. We can compute these segments by performing the binary search on the row of one of the sorted matrices associated to the edge $e$. The two entries immediately below the strip and the two immediately above the strip define the at most two segments intersecting $R$. Similar to the procedure above, this takes $O(\log n)$ time for each edge of $P$, thus $O(n \log n)$ time in total.
\end{proof}
\subsubsection{PlaneDecision: Step 3}
Now, we have a target region $R$ and the $O(n)$ intersections it makes with the event polygons.
\begin{lemma}\label{lemma_plane_step_3}
In $O(n \log n)$ time, we can find a region $R' \subset R$ containing a good placement that does not intersect any of the $O(n)$ event polygons.
\end{lemma}
\begin{proof}
We recursively construct a $(1/2)$-cutting of the target region. By \Cref{lemma_cutting}, a $(1/2)$-cutting of constant size can be computed in $O(n)$ time. We perform \textbf{LineDecision} on the lines of the cutting to decide on which triangle to recurse. After $O(\log n)$ iterations, we have a target region $R'$ that intersects no event polygons. This procedure runs in $O(n \log n)$ time.
\end{proof}
Finally, since the overlap function is quadratic on our final region $R'$, we can solve for the maximum using standard calculus. After finding $\operatorname{max}_{v \in S} f(v)$ and a vector achieving it $O(n \log n)$ time, by \Cref{lemma_side_decision}, we can perform \textbf{PlaneDecision} on $S$ in the same time bound.
\begin{proposition}\label{prop_max_on_plane}
For a plane $S$, we can perform \textbf{PlaneDecision} in $O(n \log n)$ time.
\end{proposition}
\subsection{Stage 2}
With the general \textbf{PlaneDecision} at our disposal, we now move on to State $2$, the main component of our algorithm. We project the entire configuration space and the event polygons onto the $xz$-plane in order to find a target region $D$ whose preimage $D + l_y$ intersects few event polygons, where $l_y$ is the $y$-axis (see \Cref{fig_stage_2}).
\begin{figure}
\caption{Projecting onto the xz-plane.}
\label{fig_stage_2}
\end{figure}
The non-horizontal edges of the event polygons project to segments on the strip $0<z<1$ on the $xz$-plane. We characterize our desired region $D$ in the following lemma.
\begin{lemma}\label{lemma_stage_2.1}
For a region $D$ that does not intersect any of the segments that are the projections of the non-horizontal edges of the event polygons, the preimage $D + l_y$ intersects $O(n)$ event polygons.
\end{lemma}
\begin{proof}
For any region $D$ on the $xz$-plane, the set of event polygons that the ``tube'' $D + l_y$ intersects is precisely the set of projected event polygons that $D$ intersects. Now, let $D$ be a region that does not intersect any segment from the projections of the event polygons.
Let $s_1, s_2, \ldots, s_m$ be the segments that are the projections of the non-horizontal edges of $P$, and let $x_1, \ldots, x_q$ be the projections of the vertices of $Q$ on the $x$-axis and assume that they are sorted by decreasing $x$-coordinate. Then, the projections of the non-horizontal edges of the event polygons are precisely $s_i - x_j$.
We first split the segments into four groups. Let $s_1, \ldots, s_{m_1}$ be the projections of the non-horizontal edges of $P$ on the ``front side,'' and $s_{m_1+1}, \ldots, s_{m}$ be those on the ``back side.'' The at most two edges visible on both the front and the back may be repeated. Then the segments from either group are pairwise non-intersecting. Similarly, we split the vertices of $Q$ into a front side and a backside, including the at most two vertices visible on both the front and back in both sets. We consider the segments in the configuration space made by one of the two groups of edges of $P$ and one of the two groups of vertices of $Q$. The other three sets of segments are processed similarly.
Suppose that the segments we consider are $s_1, \ldots, s_{m_1}$, and the projected vertices are $x_1, \ldots, x_{q_1}$. Suppose the segments are sorted by increasing sum of the $x$-coordinates of their endpoints, and that the vertices are sorted by decreasing $x$-coordinate. The event polygons of type~\ref{type2} are the trapezoids or triangles between segments $s_i - x_j$ and $s_{i+1} - x_j$ for each of the four groups of segments. For each fixed projected vertex $x$, the region $D$ intersects at most one event polygon of type~\ref{type2} for each group. Thus, $D$ intersects $O(n)$ event polygons of type~\ref{type2}. Similarly, the event polygons of type~\ref{type3} are the parallelograms between segments $s_i - x_j$ and $s_i - x_{j+1}$ for each of the four groups of segments. For each fixed segment $s_i$, $D$ intersects at most one event polygon of type~\ref{type3}, thus it intersects $O(n)$ event polygons of type~\ref{type3} in total.
\end{proof}
Now it remains to efficiently find such a region $D$ with $D + l_y$ containing a goal placement and compute the $O(n)$ event polygons that intersect its interior.
\begin{lemma}\label{lemma_stage_2.2}
In $O(n \log^2 n)$ time, we can find a triangle $D$ in the $xz$-plane such that the interior of $D + l_y$ contains a goal placement and intersects $O(n)$ event polygons. We can compute these $O(n)$ event polygons in the same time bound.
\end{lemma}
\begin{proof}
The computation of $D$ is a direct application of \Cref{thm_prune_and_search}, where $m = O(n)$. Calling the oracle on a line $l$ in the $xz$-plane is running the \textbf{PlaneDecision} algorithm on the plane parallel to the $y$-axis that projects to $l$. We compute a triangle for each of the four groups of segments, take their intersection, and triangulate the intersection using $O(1)$ calls to \textbf{PlaneDecision}. Thus, we can compute the desired triangle $D$ in $O(n \log^2 n)$ time.
To compute the event polygons intersecting the interior of $D + l_y$ is simple, since we have shown in the proof of \Cref{lemma_stage_2.1} that $D$ intersects at most one projection of an event polygon of each type in each of the four groups for a fixed vertex $x_j$ (for type~\ref{type2}) or segment $s_i$ (for type~\ref{type3}). Once we have $D$, we can compute these polygons by binary search on each of the $O(n)$ groups of $O(n)$ non-intersecting segments to find the two between which $R$ lies. Also, the event polygons all have constant complexity so computing all of them takes linear time. We can recover the event polygons from their projections and compute the planes that contain them in linear time. Thus, this entire process can be done in $O(n \log n)$ time.
\end{proof}
\subsection{Stage 3}
Now, we have a target region $R = D+l_y$ whose interior contains a goal placement, and we have the $O(n)$ event polygons that intersect it.
\begin{lemma}\label{lemma_stage_3}
In $O(n \log^2 n)$ time, we can find a region $R' \subset R$ containing a goal placement that does not intersect any of the $O(n)$ event polygons.
\end{lemma}
\begin{proof}
We recursively construct a $(1/2)$-cutting of the target region. By \Cref{lemma_cutting}, a $(1/2)$-cutting of constant size can be computed in $O(n)$ time. We perform \textbf{PlaneDecision} on the planes of the cutting to decide on which simplex to recurse. After $O(\log n)$ iterations, we have a target region $R'$ that intersects no event polygons. This procedure runs in $O(n \log^2 n)$ time.
\end{proof}
Finally, since the overlap function is quadratic on our final region $R'$, we can solve for the maximum using standard calculus. This concludes the proof of \Cref{thm_algo}.
\section{Maximum overlap of three convex polygons}\label{sec_three_polygons}
Let $P$, $Q$, $R$ be three convex polygons with $n$ vertices in total in the plane. We want to find a pair of translations $(v_{Q}, v_{R}) \in \mathbb{R}^4$ that maximizes the overlap area $g(v_{Q}, v_{R}) = |P \cap (Q + v_{Q}) \cap (R + v_{R})|$.
In this problem, the configuration space is four-dimensional. An easy extension of \Cref{prop_concavity} and \Cref{cor_unimodality} shows that the function of overlap area is again unimodal. This time, we have four-dimensional \textit{event polyhedra} instead of event polygons that divide the configuration space into four-dimensional cells on which $g(v_{Q}, v_{R})$ is quadratic. We call a hyperplane containing an event polyhedron an \textit{event hyperplane}, and they are defined by two types of events:
\begin{enumerate}[label = (\Roman*)]
\item \label{hypertype1} When one vertex of $P$, $Q + v_Q$ or $R + v_R$ lies on an edge of another polygon. There are $O(n)$ groups of $O(n)$ parallel event hyperplanes of this type.
\item \label{hypertype2} When an edge from each of the three polygons intersect at one point. There are $O(n^3)$ event hyperplanes of this type.
\end{enumerate}
To overcome the difficulty of dealing with the $O(n^3)$ event hyperplanes of type~\ref{hypertype2}, we first prune the configuration space to a region intersecting no event hyperplanes of type~\ref{hypertype1}. We then show that the resulting region only intersects $O(n)$ event hyperplanes of type~\ref{hypertype2}.
Similar to \Cref{thm_algo}, we want an algorithm \textbf{HyperplaneDecision} that computes, for a hyperplane $H \subset \mathbb{R}^4$, the maximum $\operatorname{max}_{(v_Q,v_R) \in H} g(v_Q, v_R)$ and the relative location of the goal placement to $H$. In fact, we will only need to perform \textbf{HyperplaneDecision} on some hyperplanes.
\begin{proposition}\label{prop_hyperplane_decision}
Suppose $H$ is a hyperplane that satisfies one of the following three conditions:
\begin{enumerate}[label = (\arabic*)]
\item $H$ is orthogonal to a vector $(x_1, y_1, 0, 0)$ for some $x_1, y_1 \in \mathbb{R}$.
\item $H$ is orthogonal to a vector $(0, 0, x_2, y_2)$ for some $x_2, y_2 \in \mathbb{R}$.
\item $H$ is orthogonal to a vector $(x_1, y_1, -x_1, -y_1)$ for some $x_1, y_1 \in \mathbb{R}$.
\end{enumerate}
Then, we can perform \textbf{HyperplaneDecision} on $H$ in $O(n \log^2 n)$ time.
\end{proposition}
\begin{proof}
We provide the algorithm for $H$ orthogonal to $(x_1, y_1, 0, 0)$ for some $x_1, y_1 \in \mathbb{R}$, and the other two types follow similarly.
We reinterpret the problem of finding $\operatorname{max}_{(v_Q,v_R) \in H} g(v_Q, v_R)$ as a polyhedron-polygon matching problem. In $H$, we allow $R$ to move freely, and $Q$ moves in a line $l$ perpendicular to $(x_1, y_1)$. We parameterize $l$ by $l = p + vt$, and form the convex polyhedron (see \Cref{fig_ipq})
\[
I_{PQ} = \{(x, y, t)| (x, y) \in P\} \cap \{(x, y, t)| (x, y) \in (Q + p + vt)\}.
\]
By \cite{chazelle1992}, $I$ can be computed in $O(n)$ time. In addition, the cross-section of $I$ at $t = t_0$ is $P \cap (Q + p + vt)$. Then, we see that finding $\operatorname{max}_{(v_Q, v_R) \in H} g(v_Q, v_R)$ is the same as finding a translation maximizing the intersection of $I$ and $R$. By \Cref{thm_algo}, this can be done in $O(n \log^2 n)$ time.
Using the formal perturbation argument in \Cref{lemma_side_decision}, \textbf{HyperplaneDecision} on $H$ can be completed in the same time bound.
\begin{figure}
\caption{The convex polyhedron $I_{PQ}
\label{fig_ipq}
\end{figure}
\end{proof}
Using \Cref{prop_hyperplane_decision}, we can prune the configuration space to a region that intersects no event hyperplanes of type~\ref{hypertype1} and $O(n)$ event hyperplanes of type~\ref{hypertype2}.
\begin{proposition}\label{prop_Tpqr}
We can compute a 4-polytope $T_{PQR}$ of complexity $O(1)$ in $O(n\log^3 n)$ time such that
\begin{enumerate}[label = (\arabic*)]
\item the goal placement lies on $T_{PQR}$,
\item no hyperplane of type~\ref{hypertype1} intersects the interior of $T_{PQR}$, and
\item only $O(n)$ event polyhedrons of type~\ref{hypertype2} passes through $T_{PQR}$.
\end{enumerate}
The hyperplanes of type~\ref{hypertype2} intersecting the interior of $T_{PQR}$ are obtained in the same time bound. Furthermore, the 3-tuples of edges of $P$, $Q$ and $R$ defining the hyperplanes are also obtained in the same time bound.
\end{proposition}
\begin{proof}
If a \textbf{HyperplaneDecision} reports a goal placement, we are done. Thus, we assume that \textbf{HyperplaneDecision} always reports a halfspace containing a goal placement.
Each event hyperplane containing an event polyhedron of a vertex of $P$ on an edge of $Q + v_Q$ or an event polyhedron of a vertex of $Q + v_Q$ on an edge of $P$ is orthogonal to some $(x_1, y_1, 0, 0)$. We project all these event hyperplanes into the $2$-flat $S_{PQ} = \{(x_1, y_1, 0, 0) | x_1, y_1 \in \mathbb{R}\}$. Then, the images are $O(n)$ groups of $O(n)$ parallel lines. We can therefore apply \Cref{thm_prune_and_search} to these lines, where an oracle call on a line $l$ is running \textbf{HyperplaneDecision} on the hyperplane that projects to $l$ on $S_{PQ}$, which is orthogonal to some $(x_1, y_1, 0, 0)$. Thus, by \Cref{prop_hyperplane_decision}, we can find a triangle $T_{PQ} \subset S_{PQ}$ whose interior does not intersect any event hyperplane as described above in $O(n\log^3 n)$ time.
Similarly, we can find the triangles
\[
T_{PR} \subset \{(0, 0, x_2, y_2) | x_2, y_2 \in \mathbb{R}\} \quad \text{and} \quad T_{QR} \subset \{(x_1, y_1, -x_1, -y_1) | x_1, y_1 \in \mathbb{R}\}
\]
corresponding to the other event hyperplanes of type~\ref{hypertype1} in $O(n\log^3 n)$ time. Then, the interior of
\begin{align*}
T_{PQR} = \{(x_1, y_1, x_2, y_2) | & (x_1, y_1, 0, 0) \in T_{PQ}, \, (0, 0, x_2, y_2) \in T_{PR}, \, \\
& \left(\frac{x_1 - x_2}{2}, \frac{y_1 - y_2}{2}, \frac{x_2 - x_1}{2}, \frac{y_2 - y_1}{2} \right) \in T_{QR}\}
\end{align*}
does not intersect any event hyperplane of type~\ref{hypertype1} and contains a goal placement.
Since the interior of $T_{PQR}$ intersects no event hyperplane of type~\ref{hypertype1}, the pairwise configuration of $P$ and $Q$, $P$ and $R$, $Q$ and $R$ are fixed (the pairwise edge incidences are fixed). Since any edge $e_P$ of $P$ intersects at most two edges of $Q$ and at most two edges of $R$ inside $T_{PQR}$, there are at most four event hyperplanes of type~\ref{hypertype2} where $e_P$ is concurrent with an edge of $Q$ and an edge of $R$. Thus, at most $4n$ event hyperplanes of type~\ref{hypertype2} intersect the interior of $T_{PQR}$.
\end{proof}
In the rest of the section, we fix $T_{PQR}$ as in \Cref{prop_Tpqr}. Moreover, let
\[
f(v_P,v_Q) = \begin{cases}
|P \cap (Q + v_Q) \cap (R + v_R)| & \text{if } (v_Q, v_R)\in T_{PQR} \\
0 & \text{otherwise.}
\end{cases}
\]
\begin{proposition}\label{prop_suppf}
Let $S$ be any $m$-flat in the configuration space. In $O(n)$ time, we can find a point in $S \cap \mathrm{supp\,} f$, or report that $S \cap \mathrm{supp\,} f$ is empty.
\end{proposition}
\begin{proof}
Notice that $\mathrm{supp\,} f$ is a convex 4-polytope whose face are hyperplanes of type I or type II. Let $H$ be a hyperplane of type II intersecting the interior of $T_{PQR}$. Then $H$ contains a face of $\mathrm{supp\,} f$ if and only if a polygon $P \cap Q$ is tangent to $R$ in $H \cap T_{PQR}$. This can be tested in constant time, so we can find all faces of $\mathrm{supp\,} f$ in $O(n)$ time. Our problem become a feasibility test of a linear programming of size $O(n)$, which can be solved in $O(n)$ time by Megiddo's algorithm \cite{megiddo1984}.
\end{proof}
\begin{proof}[Proof of \Cref{thm_three_polygons}]
Take $T_{PQR}$ as in \Cref{prop_Tpqr}. Let
\[
f(v_P,v_Q) = \begin{cases}
|P \cap (Q + v_Q) \cap (R + v_R)| & \text{if } (v_Q, v_R)\in T_{PQR} \\
0 & \text{otherwise.}
\end{cases}
\]
Then $f$ is unimodal and the maximum of $f$ is the goal placement. Given an $m$-flat $S$, we want to compute the maximum of $f$ on $S$ in $O(n \log^{m-1})$ time by induction on $m \in \{1, 2, 3, 4\}$.
If $m = 1$, this can be done in $O(n)$ time by \Cref{prop_line_decision}. Assume that $m > 1$. Then $S \cap T_{PQR}$ can be computed in $O(1)$ time. Given an $(m-1)$-flat $l \subset S$, we can use \Cref{prop_suppf} and the perturbation method as in \Cref{lemma_side_decision} to report the relative position of the maximum over $S$. There are $O(n)$ event hyperplane intersecting $S\cap T_{PQR}$. Thus, by \Cref{lemma_cutting}, we can recursively construct $(1/2)$-cuttings to give an $O(n \log^{m-1})$ time algorithm to find the maximum of $f$ on $S$.
\end{proof}
\section{Minimum symmetric difference of two convex polygons under homothety}\label{sec_symmetric_difference}
A homothety $\varphi\colon\mathbb{R}^2\rightarrow\mathbb{R}^2$ is a composition of a scaling and a translation. Let $\lambda>0$ be the scaling factor and $v$ be the translation vector of $\varphi$. Then
\[\varphi(A)=\lambda A + v = \{\lambda p + v \mid p\in A\}.\]
Define the \textit{symmetric difference} of sets $A, B \subset \mathbb{R}^2$ to be
\begin{align*}
A \triangle B := & (A \cup B) \setminus (A \cap B) \\
= & (A \setminus B) \cup (B \setminus A).
\end{align*}
Let $P$ and $Q$ be convex polygons with $n$ vertices in total. We want to find a homothety $\varphi$ of $Q$ that minimizes the area of symmetric difference
\[
h(\varphi) = h(x,y, \lambda) = |P \triangle \varphi(Q)|,
\]
where $\varphi(Q) = \lambda Q + (x,y)$.
Yon et al. \cite{yon2016} consider a slightly more general problem, where they minimize the function
\[
h(\varphi) = (2 - 2\kappa)|P \setminus \varphi(Q)| + 2\kappa |\varphi(Q) \setminus P|,
\]
where $\kappa \in (0,1)$ is some constant. When $\kappa = 1/2$, this is the area of symmetric difference function. They give a randomized algorithm that solves this problem in $O(n \log^3 n)$ expected time. We present a faster determinisitc algorithm by relating this problem to the polyhedron-polygon matching problem and then applying a modified version of \Cref{thm_algo}.
As in \cite{yon2016}, we rewrite the objective function $h(\varphi)$:
\begin{align*}
h(\varphi)
&= 2(1 - \kappa)|P| + 2\kappa|\varphi(Q)| - 2|P \cap \varphi(Q)|\\
&= 2(1 - \kappa)|P| + 2\kappa |Q| \lambda^2 - 2 |P \cap \varphi(Q)|.
\end{align*}
\begin{figure}
\caption{Formation of the cone $C$.}
\label{fig_cone}
\end{figure}
Thus, minimizing $h(\varphi)$ is the same as maximizing $f(\varphi) = |P \cap \varphi(Q)| - c \lambda^2$, where $c = \kappa |Q|$. Consider the cone $C = \{(x, y, \lambda)| \lambda \in [0, M], (x, y) \in \lambda Q\}$, where $M = \sqrt{|P|/c}$ (see \Cref{fig_cone}). Then $f$ is negative for $\lambda > M$ so it is never maximized. We also put $P$ into $\mathbb{R}^3$ by $P = \{(x, y, 0) | (x, y) \in P\}$. Since $f(x, y, \lambda) = |C \cap (P + (-x, -y, \lambda))| - c \lambda^2$, the problem reduces to maximizing the overlap area of the cone $C$ and $P$ under translation subtracted by a quadratic function. To show that we can still use a divide-and-conquer strategy, we identify a region where $f$ is strictly unimodal.
\begin{lemma}[\cite{yon2016}]\label{lemma_region_D}
The closure $\mathcal{D}$ of the set $\{\varphi \in \mathbb{R}^3| f(\varphi) > 0\}$ is convex. Furthermore, $f(x, y, \lambda)$ is strictly unimodal on $\mathcal{D}$.
\end{lemma}
\begin{proof}
This follows from \cite[Lemma 2.2]{yon2016} and \cite[Lemma 2.7]{yon2016}.
\end{proof}
Although it is difficult to directly compute $\mathcal{D}$, note that $-P \subset \mathcal{D}$. With this observation, we show that we can still find the relative position of the set of goal placements to certain planes $S$ in $O(n \log n)$ time with some modifications to
\textbf{LineDecision} and \textbf{PlaneDecision}.
\begin{lemma}\label{lemma_modified_line_max}
For any $l \subset \mathbb{R}^3$, we can compute $\operatorname{max}_{\varphi \in l} f(\varphi)$ or report it is a negative number in $O(n)$ time.
\end{lemma}
\begin{proof}
If $l$ is horizontal, then we can apply \Cref{prop_line_decision} since $c \lambda$ is constant. Otherwise, we parameterize $l$ by $l = p + v t$ and construct the convex polyhedron $I$ whose cross-section $I(t_0)$ at $t = t_0$ has area $|C \cap (P + (p + vt_0))|$ as in the proof of \Cref{prop_line_decision}. It comes down to maximizing $|I(t)| - c(\lambda(t))^2$, where $\lambda(t)$ is the $\lambda$-coordinate of $p + vt$. Since $\sqrt{|I(t)|}$ is a concave function, $\sqrt{|I(t)|} - \sqrt{c} \lambda(t)$ is also concave, and has the same complexity as $\sqrt{|I(t)|}$. Thus, we can apply \cite[Theorem 3.2]{avis1996} to find the maximum of $\sqrt{|I(t)|} - \sqrt{c} \lambda(t)$. Supposed it is achieved at $t'$. Although $t'$ may not be where the maximum of $|I(t)| - c(\lambda(t))^2$ is, it tells us whether the maximum is positive. If not, we can simply terminate the process. If it is, we know that $l$ intersects $\mathcal{D}$, and $p + v t' \in \mathcal{D}$. This allows us to use divide-and-conquer as in \cite{avis1996}, since we can recurse in the direction of $t'$ whenever we query a point $t$ and find $f(t) < 0$.
\end{proof}
\begin{proposition}\label{prop_modified_plane}
Let $S \subset \mathbb{R}^3$ be a plane. If $S$ is horizontal or if $S$ intersects the polygon $- P \subset \mathcal{D}$, then we can perform \textbf{PlaneDecision} on $S$ in $O(n \log n)$ time.
\end{proposition}
\begin{proof}
If $S$ is horizontal, then we can apply \Cref{pseudo_plane}. If the maximum is negative, then we simply report the side of $S$ containing $-P$, otherwise we proceed as in \Cref{lemma_side_decision}.
Now assume $S$ is non-horizontal and intersects $-P$. Let $s = S \cap (-P)$. Then we know that $s \subset \mathcal{D}$. Let $l \subset S$ be a line we want to run the subroutine \textbf{LineDecision} on. By \Cref{lemma_modified_line_max}, we can find $\operatorname{max}_{\varphi \in l} f(\varphi)$ or report it is negative in $O(n)$ time. If it is the latter case, we report the side of $l$ containing $s$. Otherwise, $l$ intersects $\mathcal{D}$, and we can proceed as in \Cref{lemma_side_decision}. Thus, we can still find $\operatorname{max}_{\varphi \in S} f(\varphi)$ in $O(n \log n)$ time. Since $S$ intersects $\mathcal{D}$, we can use \Cref{lemma_side_decision} to complete \textbf{PlaneDecision} on $S$.
\end{proof}
\begin{theorem}\label{thm_general_symm_diff}
Let $P$ and $Q$ be convex polygons with $n$ vertices in total. Suppose $\kappa \in (0, 1)$ is a constant. We can find a homothety $\varphi$ that minimizes
\[
h(\varphi) = 2(1 - \kappa)|P \setminus \varphi(Q)| + 2\kappa |\varphi(Q) \setminus P|
\]
in $O(n \log^2 n)$ time.
\end{theorem}
\begin{proof}
We want to maximize $f(x, y, \lambda) = |C \cap (P + (-x, -y, \lambda))| - c \lambda^2$ over $\mathbb{R}^3$, where $c = \kappa |Q|$.
In order to apply our algorithm for \Cref{thm_algo}, we need to show that we only run \textbf{PlaneDecision} on horizontal planes and planes that intersect $- P$.
In the first stage (as outlined in \Cref{pseudo_algo}), we only run \textbf{PlaneDecision} on horizontal planes.
In the second stage, we apply \Cref{thm_prune_and_search} to the $O(n)$ groups of $O(n)$ lines that are the projections of the lines containing edges of event polygons on the $xz$-plane. Observe that these lines all intersect the projection of $- P$ on the $xz$-plane. In each recursive step of our algorithm, we query a horizontal (parallel to the $x$-axis) line and a line that goes ``between'' two lines in the $O(n^2)$ lines. The planes they represent both satisfy the condition for \Cref{prop_modified_plane}. Then we run \textbf{PlaneDecision} $O(1)$ more times to triangulate our feasible region. Here, we make a small modification: instead of maintaining a triangular feasible region, we maintain a trapezoidal one by making $O(1)$ horizontal cuts to make the region a trapezoid.
In the third stage, we have a ``tube'' and $O(n)$ event polygons that intersect it. As usual, we recursively construct a $(1/2)$-cutting by \Cref{lemma_cutting}. Chazelle's algorithm \cite{chazelle1993} picks $O(1)$ planes intersecting the target region as the cutting, along with $O(1)$ extra planes to triangulate each piece. All the planes containing the event polygons intersect $- P$, so we can run \textbf{PlaneDecision} on them. Instead of triangulating our target region, it suffices to reduce it to constant complexity. We do this by cutting it with $O(1)$ horizontal planes such that the remaining region only has vertices on two levels. Then, let $e$ be any non-horizontal edge. With $O(1)$ planes through $e$, we can cut the target region into prisms and pyramids with triangular bases. These planes all intersect $- P$ since they are between the two faces of the target region containing $e$, and the planes containing them intersect $- P$.
Therefore, with slight modifications to \Cref{thm_algo}, we obtain a deterministic $O(n \log^2 n)$ algorithm for minimizing $h(\varphi)$.
\end{proof}
\Cref{thm_symmetric_difference} follows as a direct corollary of \Cref{thm_general_symm_diff}.
\printbibliography
\end{document} |
\begin{document}
\title{\bf Wall divisors on irreducible symplectic orbifolds of Nikulin-type}
\author{Gr\'egoire \textsc{Menet}; Ulrike \textsc{Riess}}
\maketitle
\begin{abstract}
We determine the wall divisors on irreducible symplectic orbifolds which are deformation
equivalent to a special type of examples, called Nikulin orbifolds. The Nikulin orbifolds are obtained as partial resolutions in codimension 2 of a quotient by a symplectic involution of a Hilbert scheme of 2 points on a K3 surface.
This builds on the previous article \cite{Menet-Riess-20} in which the theory of wall divisors was
generalized to orbifold singularities.
\end{abstract}
\section{Introduction}
\subsection{Motivations and main results}
During the last years, many efforts have been made to extend the theory of smooth compact varieties with trivial first
Chern class to a framework of varieties admitting some singularities.
Notably, let us cite, the generalization of the Bogomolov decomposition theorem
\cite{Bakker}.
One of the motivations for such generalizations is given by the minimal model program
in which certain singular varieties appear naturally.
More specifically, in the theory of irreducible symplectic varieties, many generalizations can be mentioned.
One of the most important concerns the global Torelli theorem which allows to obtain geometrical information on the variety from its period (\cite{Bakker-Lehn-GlobalTorelli}, \cite{Menet-2020} and \cite{Menet-Riess-20}).
In this paper, we are considering a specific kind of singularities: quotient singularities. A complex analytic space with only quotient singularities is call an \emph{orbifold}. An orbifold $X$ is called
\emph{irreducible holomorphically symplectic} if $X\smallsetminus \Sing X$ is simply connected, admits a
unique (up to a scalar multiple), non-degenerate holomorphic 2-form and $\codim \Sing X\geq 4$ (Definition
\ref{def}). The framework of irreducible symplectic orbifolds appears to be very favorable. In particular,
general results about the Kähler cone have been generalized for the first time in this context (see
\cite{Menet-Riess-20}). This is particularly important, since knowledge on the Kähler cone is needed to be
able to apply the global Torelli theorem (see Theorem \ref{mainHTTO}) effectively. The key tool used to study
the Kähler cone of irreducible symplectic orbifolds are wall divisors (originally introduced for the smooth
case in \cite{Mongardi13}).
\begin{defi}[{\cite[Definition 4.5]{Menet-Riess-20}}]
Let $X$ be an irreducible symplectic orbifold and let $D\in\Pic(X)$. Then $D$ is called a \emph{wall divisor}
if $q(D)<0$ and $g(D^{\bot})\cap \BK_X =\emptyset$, for all $g\in \Mon^2_{\Hdg}(X)$, where, $q$ denotes the
famous Beauville--Bogomolov form on $H^2(X,{\mathbb Z})$.
\end{defi}
In particular, we recall that the Kähler classes can be characterized by their intersections with the wall
divisors (see Corollary \ref{cor:desrK}). The definitions of the birational Kähler cone $\BK_X$ and the Hodge monodromy group $\Mon^2_{\Hdg}(X)$ are recalled in Section \ref{Kählersection} and Definition \ref{transp} respectively.
A very practical feature of wall divisors is their deformation invariance. More precisely, let $\Lambda$ be a lattice of signature $(3,\rk\Lambda-3)$ and $(X,\varphi)$ a marked irreducible symplectic orbifold with $\varphi:H^2(X,\Z)\simeq \Lambda$. Then there exists a set $\mathscr{W}_{\Lambda}\subset \Lambda$ such that for all $(Y,\psi)$ deformation equivalent to $X$, the set $\psi^{-1}(\mathscr{W}_{\Lambda})\cap H^{1,1}(Y,\Z)$ is the set of wall divisors of $Y$ (see Theorem \ref{wall}). We call the set $\mathscr{W}_{\Lambda}$ the \emph{set of wall divisors of the deformation class of $X$}.
In this paper, we are going to provide the first description of the wall divisors of a deformation class of singular irreducible symplectic varieties.
The most "popular" singular irreducible symplectic variety, in the literature (see \cite[Section 13, table 1, I2]{Fujiki-1983}, \cite{Marku-Tikho}, \cite{Menet-2014}, \cite{Menet-2015}, \cite[Section 3.2 and 3.3]{Menet-Riess-20}, \cite{Camere}), is denoted by $M'$ and recently named Nikulin orbifold in \cite{Camere};
it is obtained as follows. Let $X$ be an irreducible symplectic manifold of $K3^{[2]}$-type and $\iota$ a symplectic involution on $X$.
By \cite[Theorem 4.1]{Mongardi-2012}, $\iota$ has 28 fixed points and a fixed K3 surface $\Sigma$.
We obtain $M'$ as the blow-up of $X/\iota$ in the image of $\Sigma$ (see Example \ref{exem}); we denote by $\Sigma'$ the exceptional divisor.
The orbifolds deformation equivalent to this variety will be called \emph{orbifolds of Nikulin-type}.
We also recall that the Beauville--Bogomolov lattice of the orbifolds of Nikulin-type is $U(2)^3\oplus
E_8(-1)\oplus(-2)^2$ (see Theorem \ref{BBform}).
\begin{thm}\label{main}
Let $\Lambda:=U(2)^3\oplus E_8(-1)\oplus(-2)^2$.
The set $\mathscr{W}_{M'}$ of wall divisors of Nikulin-type orbifolds is given by:
$$\mathscr{W}_{M'}=\left\{D\in\Lambda\left|
\begin{array}{lll}
D^2=-2, & {\rm div}(D)=1, & \\
D^2=-4, & {\rm div}(D)=2, &\\
D^2=-6, & {\rm div}(D)=2, & \text{and}\\
D^2=-12, & {\rm div}(D)=2 & \text{if\ }D_{U(2)^3}\text{\ is divisible by\ }2
\end{array}
\right.\right\},$$
where $D_{U(2)^3}$ is the projection of $D$ to the $U(2)^3$-part of the lattice.
\end{thm}
The divisibility ${\rm div}$ is defined in Section \ref{notation} below.
\begin{remark}
Note that if one chooses an automorphism $\varphi$ of the lattice $\Lambda$, the conditions that $D_{U(2)^3}$
and $\varphi(D)_{U(2)^3}$ are divisible by 2 are equivalent for elements with $D^2=-12$ and ${\rm div}(D)=2$.
\TODO{Do you want to keep this: "(in this case it admits to distinguis between Cases 1 and 7 in Theorem \ref{thm:9monorb-M'})."}
\end{remark}
Combined with the global Torelli theorem (Theorem \ref{mainHTTO}), the previous theorem can be used for studying automorphisms on orbifolds of Nikulin-type.
As an example of application, we construct a symplectic involution on orbifolds of Nikulin-type which is not induced by a symplectic involution on a Hilbert scheme of 2 points on a K3 surface (\emph{non-standard involution}) (see Section \ref{Application}).
\begin{prop}\label{main2}
Let $X$ be an irreducible symplectic orbifold of Nikulin-type such that there exists $D\in\Pic (X)$ with $q(D)=-2$ and ${\rm div}(D)=2$. Then, there exists an irreducible symplectic orbifold $Y$ bimeromophic to $X$ and a symplectic involution $\iota$ on $Y$ such that:
$$H^2(Y,\Z)^{\iota}\simeq U(2)^3\oplus E_8(-1)\oplus (-2)\ \text{and}\ H^2(Y,\Z)^{\iota\bot}\simeq (-2).$$
\end{prop}
The proof of this Proposition is given in Section \ref{Application}.
For the proof of Theorem \ref{main} we need to show that the following two operators are monodromy operators.
The reflections $R_D$ on the second cohomology group are defined in Section \ref{notation} below.
\begin{prop}[{Compare Corollaries \ref{Sigma'}, \ref{cor:Chiara}, and \ref{Lastmonodromy}}]\label{MonoIntroduction}
\
\begin{itemize}
\item[(i)]
The reflection $R_{\Sigma'}$ is a monodromy operator of $M'$.
\item[(ii)]
More generally,
let $X$ be an orbifold of Nikulin-type and $\alpha\in H^2(X,\Z)$ which verifies one of the two numerical conditions:
$$\left\{\begin{array}{ll}
q(\alpha)=-2 & \text{and}\ {\rm div}(\alpha)=2,\ \text{or}\\
q(\alpha)=-4 & \text{and}\ {\rm div}(\alpha)=2.
\end{array}\right.$$
Then $R_{\alpha}$ is a monodromy operator.
\end{itemize}
\end{prop}
\begin{rmk}
Note that Proposition \ref{MonoIntroduction} (i) can also be obtained from the recent result of Lehn--Mongardi--Pacienza \cite[Theorem 3.10]{Lehn2}.
\end{rmk}
\subsection{Organization of the paper and sketch of the proof}
The paper is organized as follows. In Section \ref{reminders}, we provide some reminders related to irreducible symplectic orbifolds, especially from
\cite{Menet-Riess-20} where the theory of the Kähler cone have been developed. In Section \ref{M'section0}, we provide some reminders about the orbifold $M'$
especially from \cite{Menet-2015}; moreover, we investigate the monodromy operators of $M'$ inherited from the ones on the Hilbert schemes of two points on K3 surfaces.
In Section \ref{genericM'0}, we determined the wall divisors of an orbifold $M'$ obtained from a very general K3 surfaces endowed with a symplectic involution $(S,i)$.
As a corollary, we can prove Proposition \ref{MonoIntroduction} (i). Our main tool to determine wall divisors is Proposition \ref{extremalray} which says that the dual divisor of an extremal ray of the cone of classes of effective curves (the \emph{Mori cone}) is a wall divisor.
The proof of Theorem \ref{main} is then divided in two parts. The first part (Section \ref{extremalcurves}) consists in finding enough examples of extremal rays of Mori cones in several different $M'$-orbifolds; the second part (Section \ref{sec:monodromy-orbits}) consists in using our knowledge on the monodromy group of $M'$ to show that we have find all possible wall divisors.
Finally, Section \ref{Application} is devoted to the proof of Proposition \ref{main2}.
\subsection{Notation and convention}\label{notation}
\begin{itemize}
\item
Let $\Lambda$ be a lattice of signature $(3,\rk \Lambda -3)$. Let $x\in \Lambda$ such that $x^2< 0$. We define the \emph{reflection} $R_x$ associated to $x$ by:
$$R_x(\lambda)=\lambda-{\mathfrak r}ac{2\lambda\cdot x}{x^2}x,$$
for all $\lambda\in\Lambda$.
\item
In $\Lambda$, we define the \emph{divisibility} of an element $x\in\Lambda$ as the integer $a\in \N^*$ such
that $x\cdot \Lambda =a\Z$. We denote by ${\rm div}(x)$ the divisibility of $x$.
\item
Let $X$ be a manifold and $C\subset X$ a curve. We denote by $\left[C\right]_{X}$ the class in $X$ of the curve $C$.
\end{itemize}
~\\
\textbf{Acknowledgements}:
We are very grateful to the Second Japanese-European Symposium on
Symplectic Varieties and Moduli Spaces where our collaboration was initiated. The first author has been
financed by the ERC-ALKAGE grant No. 670846 and by the PRCI SMAGP (ANR-20-CE40-0026-01).
The second author is a member of the Institute for Theoretical Studies at ETH Zürich
(supported by Dr.~Max R\"ossler, the Walter Haefner Foundation and the ETH Z\"urich
Foundation).
\section{Reminders on irreducible symplectic orbifolds}\label{reminders}
\subsection{Definition}\label{basicdef}
In this paper an \emph{orbifold} is a complex space with only quotient singularities.
\begin{defi}\label{def}
An irreducible symplectic orbifold (or hyperkähler orbifold) is a compact Kähler orbifold $X$ such that:
\begin{itemize}
\item[(i)]
$\codim\Sing X\geq4$,
\item[(ii)]
$H^{2,0}(X)=\C \sigma$ with $\sigma$ non-degenerated on $X_{\reg}:=X\smallsetminus \Sing X$,
\item[(iii)]
$\pi(X_{\reg})=0$.
\end{itemize}
\end{defi}
We refer to \cite[Section 6]{Campana-2004}, \cite[Section 3.1]{Menet-2020}, \cite[Section 3.1]{Fu-Menet} and \cite[Section 2.1]{Menet-Riess-20} for discussions about this definition.
\begin{ex}[{\cite[Section 3.2]{Menet-2020}}]\label{exem}
Let $X$ be a hyperkähler manifold deformation equivalent to a Hilbert scheme of 2 points on a K3 surfaces and $\iota$ a symplectic involution on $X$.
By \cite[Theorem 4.1]{Mongardi-2012}, $\iota$ has 28 fixed points and a fixed K3 surface $\Sigma$.
We denote by $M'$ the blow-up of $X/\iota$ in the image of $\Sigma$. The orbifold $M'$ is irreducible
symplectic (see \cite[Proposition 3.8]{Menet-2020}).
\end{ex}
\begin{defi}\label{Nikulin}
An orbifold $M'$ constructed as before is called a \emph{Nikulin orbifold}. An irreducible symplectic orbifold deformation equivalent to a Nikulin orbifold is called an orbifold of \emph{Nikulin-type}.
\end{defi}
\subsection{Moduli space of marked irreducible symplectic orbifolds}\label{per}
Let $X$ be an irreducible symplectic orbifold.
As explained in \cite[Section 3.4]{Menet-2020}, $H^2(X,\Z)$ is endowed with a quadratic form of signature $(3,b_2(X)-3)$ called the Beauville--Bogomolov form and denoted by $q_{X}$ (the bilinear associated form is denoted by $(\cdot,\cdot)_{q_X}$ or $(\cdot,\cdot)_{q}$ when there is no ambiguity).
Let $\Lambda$ be a lattice of signature $(3,\rk \Lambda-3)$. We denote $\Lambda_{\mathbb{K}}:=\Lambda\otimes \mathbb{K}$ for $\mathbb{K}$ a field.
A \emph{marking} of $X$ is an isometry $\varphi: H^{2}(X,\Z)\rightarrow \Lambda$.
Let $\mathcal{M}_{\Lambda}$ be the set of isomorphism classes of marked irreducible symplectic orbifolds $(X,\varphi)$ with $\varphi:H^2(X,\Z)\rightarrow\Lambda$. As explained in \cite[Section 3.5]{Menet-2020}, this set can be endowed with a non-separated complex structure such that the \emph{period map}:
$$\xymatrix@R0cm@C0.5cm{\ \ \ \ \ \ \ \ \mathscr{P}:& \mathcal{M}_{\Lambda}\ar[r]& \mathcal{D}_{\Lambda}\\
&(X,\varphi)\ar[r]&\varphi(\sigma_X)}$$
is a local isomorphism with $\mathcal{D}_{\Lambda}:=\left\{\left.\alpha\in \mathbb{P}(\Lambda_{\C})\ \right|\ \alpha^2=0,\ \alpha\cdot\overline{\alpha}>0\right\}$. The complex manifold $\mathcal{M}_{\Lambda}$ is called \emph{the moduli space of marked irreducible symplectic orbifolds of Beauville--Bogomolov lattice $\Lambda$}.
Moreover there exists a \emph{Hausdorff reduction} of $\mathcal{M}_{\Lambda}$.
\begin{prop}[\cite{Menet-2020}, Corollary 3.25]
There exists a Hausdorff reduction $\overline{\mathcal{M}_{\Lambda}}$ of $\mathcal{M}_{\Lambda}$ such that the period map $\mathscr{P}$ factorizes through $\overline{\mathcal{M}_{\Lambda}}$:
$$\xymatrix{\mathcal{M}_{\Lambda}\ar@/^1pc/[rr]^{\mathscr{P}}\ar@{->>}[r]& \overline{\mathcal{M}_{\Lambda}}\ar[r]& \mathcal{D}_{\Lambda}.}$$
Moreover, two points in $\mathcal{M}_{\Lambda}$ map to the same point in $\overline{\mathcal{M}_{\Lambda}}$ if and only if they are non-separated in $\mathcal{M}_{\Lambda}$.
\end{prop}
\subsection{Global Torelli theorems}
\begin{thm}[\cite{Menet-2020}, Theorem 1.1]\label{mainGTTO}
Let $\Lambda$ be a lattice of signature $(3,b-3)$, with $b\geq3$. Assume that $\mathcal{M}_{\Lambda}\neq\emptyset$ and let $\mathcal{M}_{\Lambda}^{o}$ be a connected component of $\mathcal{M}_{\Lambda}$. Then the period map:
$$\mathscr{P}: \overline{\mathcal{M}_{\Lambda}}^{o}\rightarrow \mathcal{D}_{\Lambda}$$
is an isomorphism.
\end{thm}
There also exists a Hodge version of this theorem, which we state in the following.
\begin{defi}\label{transp}
Let $X_1$ and $X_2$ be two irreducible symplectic orbifolds. An isometry $f:H^{2}(X_{1},\Z)\rightarrow H^{2}(X_{2},\Z)$ is called a \emph{parallel transport operator} if there exists a deformation $s:\mathcal{X}\rightarrow B$, two points $b_{i}\in B$, two isomorphisms $\psi_{i}:X_{i}\rightarrow \mathcal{X}_{b_{i}}$, $i=1,2$ and a continuous path $\gamma:\left[0,1\right]\rightarrow B$ with $\gamma(0)=b_{1}$, $\gamma(1)=b_{2}$ and such that the parallel transport in the local system $Rs_{*}\Z$ along $\gamma$ induces the morphism $\psi_{2*}\circ f\circ\psi_{1}^{*}: H^{2}(\mathcal{X}_{b_{1}},\Z)\rightarrow H^{2}(\mathcal{X}_{b_{2}},\Z)$.
Let $X$ be an irreducible symplectic orbifold.
If $f:H^{2}(X,\Z)\rightarrow H^{2}(X,\Z)$ is a parallel transport operator from $X$ to $X$ itself, $f$ is called a \emph{monodromy operator}. If moreover $f$ sends a holomorphic 2-form to a holomorphic 2-form, $f$ is called a \emph{Hodge monodromy operator}. We denote by $\Mon^2(X)$ the group of monodromy operators and by $\Mon^2_{\Hdg}(X)$ the group of Hodge monodromy operators.
\end{defi}
\begin{rmk}
If $(X,\varphi)$ and $(X',\varphi')$ are in the same connected component $\mathcal{M}_{\Lambda}^{o}$ of $\mathcal{M}_{\Lambda}$, then $\varphi^{-1}\circ\varphi'$ is a parallel transport operator.
\end{rmk}
\begin{thm}[\cite{Menet-Riess-20}, Theorem 1.1]\label{mainHTTO}
Let $X$ and $X'$ be two irreducible symplectic orbifolds.
\begin{itemize}
\item[(i)]
The orbifolds $X$ and $X'$ are bimeromorphic if and only if there exists a parallel transport operator $f:H^2(X,\Z)\rightarrow H^2(X',\Z)$ which is an isometry of integral Hodge structures.
\item[(ii)]
Let $f:H^2(X,\Z)\rightarrow H^2(X',\Z)$ be a parallel transport operator, which is an isometry of integral Hodge structures. There exists an isomorphism $\widetilde{f}:X\rightarrow X'$ such that $f=\widetilde{f}_*$ if and only if $f$ maps some K\"ahler class on $X$ to a K\"ahler class on $X'$.
\end{itemize}
\end{thm}
\subsection{Twistor space}\label{Twist}
Let $\Lambda$ be a lattice of signature $(3,\rk-3)$. We denote by "$\cdot$" its bilinear form.
A \emph{positive three-space} is a subspace $W\subset \Lambda\otimes\R$ such that $\cdot_{|W}$ is positive
definite. For any positive three-space, we define the associated \emph{twistor line} $T_W\subset \mathcal{D}_{\Lambda}$ by:
$$T_W:=\mathcal{D}_{\Lambda}\cap \mathbb{P}(W\otimes\C).$$
A twistor line is called \emph{generic} if $W^{\bot}\cap \Lambda=0$. A point of $\alpha\in \mathcal{D}_{\Lambda}$ is called \emph{very general} if $\alpha^{\bot}\cap \Lambda=0$.
\begin{thm}[\cite{Menet-2020}, Theorem 5.4]\label{Twistor}
Let $(X,\varphi)$ be a marked irreducible symplectic orbifold with $\varphi:H^2(X,\Z)\rightarrow \Lambda$. Let $\alpha$ be a Kähler class on $X$, and $W_\alpha\coloneqq\Vect_{\R}(\varphi(\alpha),$ $\varphi(\Rea \sigma_X),\varphi(\Ima \sigma_X))$.
Then:
\begin{itemize}
\item[(i)]
There exists a metric $g$ and three complex structures (see \cite[Section 5.1]{Menet-2020} for the definition) $I$, $J$ and $K$ in quaternionic relation on $X$ such that:
$$\alpha= \left[g(\cdot,I\cdot)\right]\ \text{and}\ g(\cdot,J\cdot)+ig(\cdot,K\cdot)\in H^{0,2}(X).$$
\item[(ii)]
There exists a deformation of $X$:
$$\mathscr{X}\rightarrow T(\alpha)\simeq\mathbb{P}^1,$$ such that the period map
$\mathscr{P}:T(\alpha)\rightarrow T_{W_\alpha}$ provides an isomorphism. Moreover, for each $s=(a,b,c)\in \mathbb{P}^1$, the associated fiber $\mathscr{X}_s$ is an orbifold diffeomorphic to $X$ endowed with the complex structure $aI+bJ+cK$.
\end{itemize}
\end{thm}
\begin{rmk}
Note that if the irreducible symplectic orbifold $X$ of the previous theorem is endowed with a marking then
all the fibers of $\mathscr{X}\rightarrow T(\alpha)$ are naturally endowed with a marking. Therefore, the period map $\mathscr{P}:T(\alpha)\rightarrow T_{W_\alpha}$ is well defined.
\end{rmk}
\begin{rmk}\label{twistorinvo}
Let $X$ be an irreducible symplectic orbifold endowed with a finite symplectic automorphisms group $G$ (i.e. $G$ fixes the holomorphic 2-form of $X$). Let $\alpha$ be a Kähler class of $X$ fixed by $G$ and $\mathscr{X}\rightarrow T(\alpha)$ the associated twistor space. Then $G$ extends to an automorphism group on $\mathscr{X}$ and restricts on each fiber to a symplectic automorphism group. Indeed, since $G$ is symplectic $G$ fixes all the the complex structures $I$, $J$, $K$.
\end{rmk}
We provide the following lemma which will be used several times in this paper. It is a generalization of \cite[Lemma 2.17]{Menet-Riess-20}.
\begin{lemme}\label{lem:connected+}
Let $\Lambda'\subseteq \Lambda$ be a sublattice of rank $b'$, which also has signature $(3,b'-3)$.
Consider the inclusion of period domains
$ {\mathcal D}_{\Lambda'} \subseteq{\mathcal D}_\Lambda$.
Suppose that a very general points of $(\widetilde{X},\widetilde{\varphi})\in {\mathscr M}_\Lambda \cap
{\mathscr P}^{-1}{\mathcal D}_{\Lambda'}$
(i.e.~$(\widetilde{X},\widetilde{\varphi})$ with $\widetilde{\varphi}(\Pic(\widetilde{X}))^\perp=\Lambda'$) satisfies that
${\mathcal K}_{\widetilde{X}}={\mathcal C}_{\widetilde{X}}$.
Let $(X,\varphi)$ and $(Y,\psi)\in \mathcal{M}_{\Lambda}^{\circ}$ be any two marked irreducible symplectic
orbifolds which satisfy ${\mathscr P}(X,\varphi)\in {\mathcal D}_{\Lambda'}$ and ${\mathscr P}(Y,\psi)\in {\mathcal D}_{\Lambda'}$ and for which $\varphi({\mathcal K}_X)\cap
\Lambda' \neq \emptyset$ and $\psi({\mathcal K}_Y)\cap \Lambda'\neq \emptyset$. Then $(X,\varphi)$ and $(Y,\psi)$ can
be connected by a sequence of generic twistor spaces whose image under the period domain is contained in ${\mathcal D}_{\Lambda'}$.
That is: there exists a sequence of generic twistor spaces $f_i:\mathscr{X}_i\rightarrow \mathbb{P}^1\simeq T(\alpha_i)$ with $(x_i,x_{i+1})\in\mathbb{P}^1\times\mathbb{P}^1$, $i\in \left\{0,...,k\right\}$, $k\in \mathbb{N}$ such that:
\begin{itemize}
\item
$f^{-1}_0(x_0)\simeq (X,\varphi),\ f^{-1}_i(x_{i+1})\simeq f^{-1}_{i+1}(x_{i+1})\ \text{and}\ f^{-1}_{k}(x_{k+1})\simeq (Y,\psi),$
for all $0\leq i\leq k-1$.
\item
$\mathscr{P}(T(\alpha_i))\subset{\mathcal D}_{\Lambda'}$ for all $0\leq i\leq k+1$.
\end{itemize}
\end{lemme}
\begin{proof}
We split the proof in two steps.
\textbf{First case}: We assume that $(X,\varphi)$ and $(Y,\psi)$ are very general in ${\mathscr M}_\Lambda\cap
{\mathscr P}^{-1}{\mathcal D}_{\Lambda'}$ (hence ${\mathcal C}_{\widetilde{X}}={\mathcal K}_{\widetilde{X}}$ and ${\mathcal C}_{\widetilde{Y}}={\mathcal K}_{\widetilde{Y}}$).
By \cite[Proposition 3.7]{Huybrechts12} the period domain ${\mathcal D}_{\Lambda'}$
is connected by generic twistor lines. Note that the proof of \cite[Proposition 3.7]{Huybrechts12} in fact shows
that the twistor lines can be chosen in a such a way that they intersect in very general points of ${\mathcal D}_{\Lambda'}$.
In particular, we can connect $\mathscr{P}(Y,\psi )$ and $\mathscr{P}(X,\varphi)$ by
such generic twistor lines in ${\mathcal D}_{\Lambda'}$.
Since for a very general element $(\widetilde{X},\widetilde{\varphi})$ of ${\mathscr M}_\Lambda\cap
{\mathscr P}^{-1}{\mathcal D}_{\Lambda'}$ we know
${\mathcal K}_{\widetilde{X}}={\mathcal C}_{\widetilde{X}}$, Theorem \ref{Twistor} shows that all these twistor line can be lifted to
twistor spaces. Moreover, by Theorem \ref{mainHTTO} (ii) the period map ${\mathscr P}$ is injective on the set of points $(\widetilde{X},\widetilde{\varphi})\in{\mathscr M}_\Lambda$ such that ${\mathcal K}_{\widetilde{X}}={\mathcal C}_{\widetilde{X}}$. Therefore, all these twistor spaces intersect and connect $(X,\varphi)$ to $(Y,\psi)$.
\textbf{Second case}: If $(X,\varphi)$ is not very general, we consider
a very general Kähler class $\alpha\in{\mathcal K}_X \cap \Lambda'_{\R}\neq\emptyset$. Then the associated twistor space $\mathscr{X}\rightarrow T(\alpha)$
have a fiber which is a very general marked irreducible symplectic orbifold in ${\mathscr M}_\Lambda\cap
{\mathscr P}^{-1}{\mathcal D}_{\Lambda'}$. Hence we are back to the first case.
\end{proof}
\subsection{Kähler cone} \label{Kählersection}
Let $X$ be an irreducible symplectic orbifold of dimension $n$. We denote by $\mathcal{K}_X$ the Kähler cone of $X$. We denote by $\mathcal{C}_X$ the connected component of $\left\{\left.\alpha\in H^{1,1}(X,\R)\right|\ q_X(\alpha)>0\right\}$ which contains $\mathcal{K}_X$; it is called the \emph{positive cone}.
Let $\BK_X$ be the \emph{birational Kähler cone} which is the union $\cup f^{*}\mathcal{K}_{X'}$ for $f$ running through all birational maps between $X$ and any irreducible symplectic orbifold $X'$. In \cite[Definition 4.5]{Menet-Riess-20}, we define the wall divisors in the same way as Mongardi in \cite[Definition 1.2]{Mongardi13}.
\begin{defi}
Let $X$ be an irreducible symplectic orbifold and let $D\in\Pic(X)$. Then $D$ is called a \emph{wall divisor} if $q(D)<0$ and $g(D^{\bot})\cap \BK_X =\emptyset$, for all $g\in \Mon^2_{\Hdg}(X)$.
\end{defi}
We denote by $\mathscr{W}_X$ the set of primitive wall divisors of $X$ (non divisible in $\Pic X$).
By \cite[Corollary 4.8]{Menet-Riess-20}, we have the following theorem.
\begin{thm}\label{wall}
Let $\Lambda$ be a lattice of signature $(3,\rk \Lambda -3)$ and $\mathcal{M}_{\Lambda}^{o}$ a connected component of the associated moduli space of marked irreducible symplectic orbifolds. Then there exists a set $\mathscr{W}_\Lambda\subset \Lambda$ such that for all $(X,\varphi)\in \mathcal{M}_{\Lambda}^{o}$:
$$\mathscr{W}_X=\varphi^{-1}(\mathscr{W}_\Lambda)\cap H^{1,1}(X,\Z).$$
\end{thm}
\begin{defi}
The set $\mathscr{W}_\Lambda$ will be called the \emph{set of wall divisor} of the deformation class of $X$.
\end{defi}
\begin{ex}[\cite{Mongardi13}, Proposition 2.12 and \cite{HuybrechtsK3}, Theorem 5.2 Chapter 8]\label{examplewall}
If $\mathcal{M}_{\Lambda}^{o}$ is a connected component of the moduli space of marked K3 surface, then:
$$\mathscr{W}_\Lambda=\left\{\left.D\in \Lambda\ \right|\ D^2=-2\right\}.$$
If $\mathcal{M}_{\Lambda}^{o}$ is a connected component of the moduli space of marked irreducible symplectic manifolds equivalent by deformation to a Hilbert scheme of 2 points on a K3 surface, then: $$\mathscr{W}_\Lambda=\left\{\left.D\in \Lambda\ \right|\ D^2=-2\right\}\cup\left\{\left.D\in \Lambda\ \right|\ D^2=-10\ \text{and}\ D\cdot\Lambda\subset2\Z \right\}.$$
\end{ex}
\begin{rmk}\label{dualclass}
Let $\beta\in H^{2n-1,2n-1}(X,\Q)$.
We can associate to $\beta$ its \emph{dual class} $\beta^{\vee}\in H^{1,1}(X,\Q)$ defined as follows. By \cite[Corollary 2.7]{Menet-2020} and since the Beauville--Bogomolov form is integral and non-degenerated (see \cite[Theorem 3.17]{Menet-2020}), we can find $\beta^{\vee}\in H^{2}(X,\Q)$ such that for all $\alpha\in H^2(X,\C)$:
$$(\alpha,\beta^{\vee})_q=\alpha\cdot \beta,$$
where the dot on the right hand side is the cup product. Since $(\beta^{\vee},\sigma_X)_q=\beta\cdot \sigma_X=0$, we have $\beta^{\vee}\in H^{1,1}(X,\Q)$.
\end{rmk}
We also define the \emph{Mori cone} as the cone of classes of effective curves in $H^{2n-1,2n-1}(X,\Z)$.
\begin{prop}[\cite{Menet-Riess-20}, Proposition 4.12]\label{extremalray}
Let $X$ be an irreducible symplectic orbifold. Let $R$ be an extremal ray of the Mori cone of $X$ of negative self intersection. Then any class $D\in \Q R^{\vee}$ is a wall divisor.
\end{prop}
It induces a criterion for Kähler classes.
\begin{defi}Given an irreducible symplectic orbifold $X$ endowed with
a Kähler class $\omega$.
Define ${\mathscr W}_X^+\coloneqq \{D\in {\mathscr W}_X\,|\, (D,\omega)_q>0\}$, i.e.~for every wall divisor, we choose the
primitive representative in its line, which pairs positively with the Kähler cone.
\end{defi}
\begin{cor}[\cite{Menet-Riess-20}, Corollary 4.14]\label{criterionwall}\label{cor:desrK}
Let $X$ be an irreducible symplectic orbifold such that either $X$ is projective or $b_2(X)\geq5$.
Then
$$
{\mathcal K}_X=\{\alpha\in {\mathcal C}_X \,|\, (\alpha, D)_q>0\ {\mathfrak o}rall D\in {\mathscr W}_X^+\}.$$
\end{cor}
Finally, we recall the following proposition about the birational Kähler cone.
\begin{prop}[\cite{Menet-Riess-20}, Corollary 4.17]\label{caca}
Let $X$ be an irreducible symplectic orbifold. Then $\alpha\in H^{1,1}(X,\R)$ is in the closure $\overline{\BK}_X$ of the birational Kähler cone $\BK_X$ if and only if $\alpha\in\overline{\mathcal{C}}_X$ and $(\alpha,[D])_q\geq 0$ for all uniruled divisors $D\subset X$.
\end{prop}
\section{The Nikulin orbifolds}\label{M'section0}
\subsection{Construction and description of Nikulin orbifolds}\label{M'section}
In order to enhance the readability, we recall the construction of the Nikulin orbifold from Example \ref{exem} and Definition \ref{Nikulin}.
Let $X$ be a (smooth) irreducible symplectic 4-fold deformation equivalent to the Hilbert scheme of two points
on a K3 surface (called \emph{manifold of $K3^{[2]}$-type}).
Suppose that $X$ admits a symplectic involution $\tilde{\iota}$.
By \cite[Theorem 4.1]{Mongardi-2012}, $\tilde{\iota}$ has 28 fixed points and a fixed K3 surface $\Sigma$. We
define $M:=X/\tilde{\iota}$ the quotient and $r:M'\rightarrow M$ the blow-up in the image of $\Sigma$.
As mentioned in Example \ref{exem}, the orbifolds $M'$ constructed in this way are irreducible symplectic orbifolds (see \cite[Proposition
3.8]{Menet-2020}) and are named \emph{Nikulin orbifolds}.
A concrete example of such $X$ can be obtained in the following way: Let $S$ be a K3 surface endowed with a symplectic involution $\iota$. It induces a symplectic involution $\iota^{[2]}$ on $S^{[2]}$ the Hilbert scheme of two points on $S$.
Then the fixed surface $\Sigma$ of $\iota^{[2]}$ is the following:
\begin{equation}\label{eq:sigma}
\Sigma=\left\{ \left.\xi\in S^{[2]}\ \right|\ \Supp \xi=\left\{s,\iota(s)\right\}, s\in S\right\}.
\end{equation}
\begin{remark}\label{rem:Sigma}
Let us describe this surfaces $\Sigma$:
Consider as usual $S\times S \overset{\widetilde{\nu}}{\longleftarrow} \widetilde{S\times S}
\overset{\widetilde{\rho}}{\too} S^{[2]}$, where $\nutild$ is the blow-up of the diagonal $\Delta_S \subseteq S\times S$, and
$\rhotild$ the double cover induced by permutation of the two factors.
Consider the surface $S_\iota \coloneqq \{(s,\iota(s))\,|\,s\in S\}\subseteq S\times S$, which is preserved by
the involution $\iota\times\iota$. Restricted to $S_\iota$ the permutation of the two factors in $S\times S$
corresponds to the action of $\iota$ on $S$ (via the isomorphism $S_\iota \iso S$ induced by the first
projection), and thus $S_\iota \cap \Delta_S$ corresponds to the fixed points of $\iota$ in $S$. Therefore, the strict transform
$\widetilde{S_{\iota}}$ of $S_\iota$ is isomorphic to the blow-up $\Bl_{{\mathfrak i}x\iota}S$
of $S$ in the fixed points of $\iota$.
Denote
\begin{equation*}
\Sigma\coloneqq\rhotild(\widetilde{S_\iota})\iso \Bl_{{\mathfrak i}x\iota}S / \overline{\iota},
\end{equation*}
where $\overline\iota$ is the involution on $\Bl_{{\mathfrak i}x\iota}S$ which is induced by $\iota$.
Then $\Sigma$ is a K3 surface, which is fixed by $\iota^{[2]}$ and admits the description in \eqref{eq:sigma}
by construction.
\end{remark}
Note that the existence of a symplectic involution on a K3 surfaces or on $K3^{[2]}$-type manifold can be checked purely on the
level of lattices.
We will need the following lemma.
\begin{lemme}\label{pfff}
Let $X$ be a K3 surface or an irreducible symplectic manifold of $K3^{[2]}$-type.
Assume that there is a primitive embedding $E_8(-2)\hookrightarrow\Pic X$, then there exists no wall divisor
in $E_8(-2)$.
In particular under the additional assumption that $\Pic X\simeq E_8(-2)$, then $\mathcal{C}_X=\mathcal{K}_X$.
\end{lemme}
\begin{proof}
All elements of $E_8(-2)$ are of square divisible by 4.
Hence by Example \ref{examplewall},
$E_8(-2)$ cannot contain any wall divisor. Then the lemma follows from Corollary \ref{cor:desrK}.
\end{proof}
\begin{prop}\label{involutionE8}
Let $X$ be a K3 surface or a manifold of $K3^{[2]}$-type.
Then there exists a symplectic involution $\iota$ on $X$ if and only if $X$ satisfies the following
conditions:
\begin{compactenum}[(i)]
\item \label{it1iota} There exists a primitive embedding $E_8(-2)\hookrightarrow\Pic X$.
\item \label{it2iota} The intersection ${\mathcal K}_X \cap E_8(-2)^\perp\neq \emptyset$.
\end{compactenum}
In this case the pullback $\iota^*$ to $H^2(X,{\mathbb Z})$ acts
on $E_8(-2)$ as $-\id$ and trivially on $E_8(-2)^{\bot}$.
\end{prop}
\begin{proof}
Let us start by fixing a symplectic involution $\iota$. Then the fact that the fixed lattice of $\iota$ is
isomorphic to $E_8(-2)^\perp$ and the antifixed lattice is isomorphic to $E_8(-2)$ are shown in
\cite[Section 1.3]{Sarti-VanGeemen} and \cite[Theorem 5.2]{Mongardi-2012}. This readily implies (i).
To observe (ii), pick any Kähler class $\alpha \in {\mathcal K}_X$. Since $\iota$ is an isomorphism, $\iota^*(\alpha)$
is also a Kähler class. Therefore $\alpha + \iota^*(\alpha)\in E_8(-2)^\perp$ is a Kähler class.
For the other implication assume (i) and (ii). We consider the involution $i$ on $E_8(-2)\oplus E_8(-2)^{\bot}$ defined by $-\id$ on $E_8(-2)$ and $\id$ on $E_8(-2)^{\bot}$.
By \cite[Corollary 1.5.2]{Nikulin}, $i$ extends to an involution on $H^2(X,\Z)$.
By \cite[Section 9.1.1]{Markman11}, $i$ is a monodromy operator.
Moreover, by (ii), we can find a Kähler class of $X$ in $E_8(-2)^{\bot}$. It follows from the global
Torelli theorem (see \cite[Theorem 1.3 (2)]{Markman11} or Theorem \ref{mainHTTO} (ii)) that there exists a symplectic automorphism
$\iota$
on
$X$ such that
$\iota^*=i$. However by \cite[Propositions 10]{Beauville1982}, we know that the natural map $\Aut(X)\rightarrow \mathcal{O}(H^2(X,\Z))$ is an injection. Hence $\iota$ is necessarily an involution.
\end{proof}
\begin{remark} Fix a primitive embedding of $E_8(-2)$ in the K3$^{[2]}$-lattice $\Lambda\coloneqq U^3 \oplus E_8(-1)^2 \oplus (-2)$.
Let ${\mathscr M}_{\rm K3^{[2]}}^\iota$ be the moduli space of marked K3$^{[2]}$-type manifolds endowed with a
symplectic involution such that the anti-invariant lattice is identified with the chosen $E_8(-2)$.
Denote by $\Lambda^{{\iota}}\iso U^3\oplus E_8(-2)\oplus (-2)$ the orthogonal complement of $E_8(-2)$.
From Proposition \ref{involutionE8} we observe that the period map restricts to
$${\mathscr P}^\iota \colon
{\mathscr M}_{\rm K3^{[2]}}^\iota \to
\mathcal{D}_{\Lambda^{\iota}}:=\left\{\left.\sigma\in \mathbb{P}(\Lambda^{\iota}\otimes\C)\ \right|\ \sigma^2=0,\ \sigma\cdot\overline{\sigma}>0\right\}.$$
Note that the fibers of ${\mathscr P}^\iota$ are in one to one correspondence with the chambers cut out by wall
divisors (no wall divisor can be contained in the orthogonal complement of $\Lambda^\iota$ see Example \ref{examplewall}). In particular, this is given by the chamber structure
inside $\Lambda^\iota$ given by the images of the wall divisors under the orthogonal projection
$\Lambda_{K3^{[2]}}\to \Lambda^\iota$.
\end{remark}
\subsection{The lattice of Nikulin orbifolds starting from $S^{[2]}$}\label{M'S2}
From now on we restrict ourselves to the case $X=S^{[2]}$ for a suitable K3 surface $S$ with an involution $\iota$.
We consider the following commutative diagram:
\begin{equation}
\xymatrix{
\ar@(dl,ul)[]^{\iota^{[2]}_1}N_1\ar[r]^{r_1}\ar[d]^{\pi_1}&\ar[d]^{\pi}S^{[2]}\ar@(dr,ur)[]_{\iota^{[2]}}\\
M' \ar[r]^{r} & M,}
\label{diagramM'}
\end{equation}
where $\pi : S^{[2]}\longrightarrow S^{[2]}/\iota^{[2]}=:M$ is the quotient map, $r_1$ is the blow-up in $\Sigma$ of $S^{[2]}$,
$\iota^{[2]}_1$ is the involution induced by $\iota^{[2]}$ on $N_1$, $\pi_1 : N_1\longrightarrow
N_1/\iota^{[2]}_1\simeq M'$ is the quotient map and $r$ is the blow-up in $\pi(\Sigma)$ of $M$. We also denote
by $j:H^2(S,\Z)\hookrightarrow H^2(S^{[2]},\Z)$ the natural Hodge isometric embedding (see \cite[Proposition 6 Section 6 and Remark 1 Section 9]{Beauville1983}.
We fix the following notation for important divisors:
\begin{itemize}
\item
$\Delta$ the class of the diagonal divisor in $S^{[2]}$ and $\delta:={\mathfrak r}ac{1}{2}\Delta$;
\item
$\delta_1:=r_1^*(\delta)$ and $\Sigma_1$ the exceptional divisor of $r_1$;
\item
$\delta':=\pi_{1*}r_1^*(\delta)$ and $\Sigma':=\pi_{1*}(\Sigma_1)$ the exceptional divisor of $r$.
\end{itemize}
Here we use the definition of the push-forward given in \cite{Aguilar-Prieto}. In particular $\pi_*$ verifies the following equations (see \cite[Theorem 5.4 and Corollary 5.8]{Aguilar-Prieto}):
\begin{equation}
\pi_*\circ\pi^*=2\id\ \text{and}\ \pi^*\circ\pi_*=\id+\iota^{[2]*}.
\label{Smith}
\end{equation}
As a consequence, we have (see \cite[Lemma 3.6]{Menet-2018}):
\begin{equation}
\pi_*(\alpha)\cdot\pi_*(\beta)=2\alpha\cdot \beta,
\label{Smith2}
\end{equation}
with
$\alpha \in H^k(S^{[2]},\Z)^{\iota^{[2]}}$ and $\beta \in H^{8-k}(S^{[2]},\Z)^{\iota^{[2]}}$, $k\in \left\{0,...,8\right\}$.
Of course, the same equations are also true for $\pi_{1*}$.
\begin{rmk}\label{Smithcomute}
Note that the commutativity of diagram (\ref{diagramM'}) and equations (\ref{Smith}) imply $\pi_{1*}r_1^*(x)=r^*\pi_*(x)$ for all $x\in H^{2}(S^{[2]},\Z)$.
\end{rmk}
We denote by $q_{M'}$ and $q_{S^{[2]}}$ respectively the Beauville--Bogomolov form of $M'$ and $S^{[2]}$. We can also define a Beauville--Bogomolov form on $M$ by:
$$q_{M}(x):=q_{M'}(r^*(x)),$$
for all $x\in H^2(M,\Z)$. We recall the following theorem.
\begin{thm}\label{BBform}
\begin{itemize}
\item[(i)]
The Beauville--Bogomolov lattice of $M'$ is given by
$(H^2(M',\Z),q_{M'})\simeq U(2)^3\oplus E_8(-1)\oplus(-2)^2$ where the Fujiki constant is equal to 6.
\item[(ii)]
$q_{M}(\pi_*(x))=2q_{S^{[2]}}(x)$ for all $x\in H^2(S^{[2]},\Z)^{\iota^{[2]}}$.
\item[(iii)]
$q_{M'}(\delta')=q_{M'}(\Sigma')=-4$.
\item[(iv)]
$(r^*(x),\Sigma')_{q_{M'}}=0$ for all $x\in H^{2}(M,\Z)$.
\item[(v)]
$H^2(M',\Z)=r^*\pi_*(j(H^2(S,\Z)))\oplus^{\bot}\Z{\mathfrak r}ac{\delta'+\Sigma'}{2}\oplus^{\bot}\Z{\mathfrak r}ac{\delta'-\Sigma'}{2}$.
\end{itemize}
\end{thm}
\begin{proof}
This theorem corresponds to several results in \cite{Menet-2015}. We want to emphasize that our notation are slightly different from \cite{Menet-2015}.
In \cite{Menet-2015}, $r_1$, $r$, $\delta'$ are $\Sigma'$ are respectively denoted by $s_1$, $r'$, $\overline{\delta}'$ and $\overline{\Sigma}'$.
Statement (i) is \cite[Theorem 2.5]{Menet-2015}. Knowing that the Fujiki constant is equal to 6 and Remark \ref{Smithcomute}, statement (ii) is \cite[Proposition 2.9]{Menet-2015}.
Similarly, statements (iii) and (iv) are respectively given by \cite[Propositions 2.10 and 2.13]{Menet-2015}. Finally, statement (v) is provided by \cite[Theorem 2.39]{Menet-2015}.
\end{proof}
\begin{rmk}
In the previous theorem the Beauville--Bogomolov lattice of $M'$ is obtained as follows:
\begin{itemize}
\item
$r^*\pi_*(j(H^2(S,\Z)))\simeq U(2)^3\oplus E_8(-1)$,
\item
$\Z{\mathfrak r}ac{\delta'+\Sigma'}{2}\oplus^{\bot}\Z{\mathfrak r}ac{\delta'-\Sigma'}{2}
\simeq (-2)^2.$
\end{itemize}
\end{rmk}
We recall that the divisibility ${\rm div}$ of a lattice element is defined in Section \ref{notation}.
\begin{rmk}\label{div}
Theorem \ref{BBform} shows that ${\rm div}(\Sigma')={\rm div}(\delta')=2$.
\end{rmk}
\subsection{Monodromy operators inherited from $\Mon^2(S^{[2]})$}
We keep the notation from the previous subsection.
The monodromy group is defined in Section \ref{notation}.
\begin{prop}\label{MonoM'}
Let $f\in \Mon^2(S^{[2]})$, (resp. $f\in \MonHdg(S^{[2]})$) be a monodromy operator such that
$f\circ\iota^{[2]*}=\iota^{[2]*}\circ f$ on $H^2(S^{[2]},{\mathbb Z})$.
We consider $f':H^2(M',\Z)\rightarrow H^2(M',\Z)$ such that $f'(\Sigma')=\Sigma'$ and:
$$f'(r^*(x))={\mathfrak r}ac{1}{2}r^*\circ \pi_*\circ f \circ \pi^*(x),$$
for all $x\in H^2(M,\Z)$. Then $f'\in \Mon^2(M')$, (resp. $f'\in \MonHdg(M')$).
\end{prop}
\begin{proof}
Let $\varphi$ be a marking of $S^{[2]}$. Since $f$ is a monodromy operator, we know that $(S^{[2]},\varphi)$ and $(S^{[2]},\varphi\circ f)$ are in the same connected component of their moduli space (see Section \ref{per} for the definition of the moduli space). We consider
$$\Lambda^{\iota^{[2]}}:=\varphi\left(H^{2}(S^{[2]},\Z)^{\iota^{[2]}}\right).$$
We know that $\Lambda^{\iota^{[2]}}\simeq U^3\oplus E_8(-2)\oplus (-2)$ which is a lattice of signature
$(3,12)$ (see for instance \cite[Proposition 2.6]{Menet-2015}). As in Section \ref{M'section}, we can consider the associated period domain:
$$\mathcal{D}_{\Lambda^{\iota^{[2]}}}:=\left\{\left.\sigma\in
\mathbb{P}(\Lambda^{\iota^{[2]}}\otimes\C)\ \right|\ \sigma^2=0,\ \sigma\cdot\overline{\sigma}>0\right\}.$$
By Lemma \ref{pfff}, a very general K3$^{[2]}$-type manifold mapping to $\mathcal{D}_{\Lambda^{\iota^{[2]}}}$ satisfies that the Kähler cone is the
entire positive cone. Furthermore, by Proposition \ref{involutionE8} \eqref{it2iota} the intersection
$\varphi({\mathcal K}_{S^{[2]}})\cap\Lambda^{\iota^{[2]}}\neq \emptyset$ and therefore also $\varphi\circ f({\mathcal K}_{S^{[2]}})\cap
\Lambda^{\iota^{[2]}}\neq \emptyset$ is non-empty. We can apply Lemma \ref{lem:connected+} to see that $(S^{[2]},\varphi)$ and $(S^{[2]},\varphi\circ f)$ can be connected by a sequence of twistor spaces $\mathscr{X}_i\rightarrow \mathbb{P}^1$. By construction and Remark \ref{twistorinvo}, all these twistor spaces are endowed with an involution $\mathscr{I}_i$ which restricts on each fiber to a symplectic involution. Hence we can consider for each twistor space the blow-up $\widetilde{\mathscr{X}_i/\mathscr{I}_i}\rightarrow \mathscr{X}_i/\mathscr{I}_i$ of the quotient $\mathscr{X}_i/\mathscr{I}_i$ in the codimension 2 component of its singular locus. We obtain $\widetilde{\mathscr{X}_i/\mathscr{I}_i}\rightarrow\mathbb{P}^1$ a sequence of families of orbifolds deformation equivalent to $M'$. This sequence of families provides a monodromy operator of $M'$ that we denote by $f'$.
We need to verify that $f'$ satisfies the claimed properties. First note that by construction $f'(\Sigma')=\Sigma'$.
All fibers of a twistor space are diffeomorphic to each other and hence the monodromy operator $f$ is provided by a diffeomorphism $u: S^{[2]}\rightarrow S^{[2]}$ such that $u^*=f$. Moreover, by construction this diffeomorphism commutes with $\iota^{[2]}$.
It induces a homeomorphism $\overline{u}'$ on $M'$ with the following commutative diagram:
$$\xymatrix{S^{[2]}\ar[r]^{u}\ar[d]^{\pi}& S^{[2]}\ar[d]^{\pi} \\
M\ar[r]^{\overline{u}}&M \\
M'\ar[u]^{r}\ar[r]^{\overline{u}'} & M'.\ar[u]^{r}}$$
Note that, by construction $f'=\overline{u}'^*$. We can use the commutativity of the previous diagram to check
that $f'$ verifies the properties from the proposition.
Let $x\in H^2(M,\Z)$. We have:
\begin{equation}
f'(r^*(x))=\overline{u}'^*(r^*(x))=r^*(\overline{u}^*(x)).
\label{calculf}
\end{equation}
Moreover:
$$\pi^*(\overline{u}^*(x))=u^*(\pi^*(x)).$$
Taking the image by $\pi_*$ and using \eqref{Smith}, we obtain that:
$$2\overline{u}^*(x)=\pi_*u^*\pi^*(x).$$
Combining this last equation with (\ref{calculf}), we obtain the statement
of the proposition.
It is only left to prove that if $f\in\MonHdg(S^{[2]})$ then also $f'\in \MonHdg(M')$.
The maps $\pi$ and $r$ are holomorphic maps between Kähler orbifolds, hence induce morphisms $\pi^*$ and $r^*$
which respect the Hodge structure. Then $\pi_*$ respects the Hodge structure because of (\ref{Smith}). Since
$f'$ is a composition of morphisms which respect the Hodge structure, we therefore obtain that $f'\in \MonHdg(M')$.
\end{proof}
\begin{rmk}
The previous proposition can be generalized to other irreducible symplectic orbifolds obtained as partial resolutions in codimension 2 of quotients of irreducible symplectic manifolds.
\end{rmk}
\begin{cor}\label{Rdelta}
The reflection $R_{\delta'}$ as defined in Section \ref{notation} is an element of the Monodromy group $\MonHdg(M')$.
\end{cor}
\begin{proof}
By \cite[Section 9]{Markman11}, we know that $R_{\delta}\in \Mon^2(S^{[2]})$. By Proposition \ref{MonoM'} and Theorem \ref{BBform} (iv) , we only have to check that:
$$R_{\delta'}(r^*(x))={\mathfrak r}ac{1}{2}r^*\circ \pi_*\circ R_{\delta} \circ \pi^*(x),$$
for all $x\in H^{2}(M,\Z)$. We have:
$$R_{\delta} \circ \pi^*(x)=\pi^*(x)-{\mathfrak r}ac{2(\delta,\pi^*(x))_{q_{S^{[2]}}}}{q_{S^{[2]}}(\delta)}\delta.$$
Taking the image by $\pi_*$, applying (\ref{Smith}) and Theorem \ref{BBform} (ii), we obtain:
\begin{align*}
\pi_*\circ R_{\delta} \circ \pi^*(x)=2x-{\mathfrak r}ac{4(\pi_*(\delta),2x)_{q_M}}{2q_{M}(\pi_*\delta)}\pi_*(\delta)
=2\left(x-{\mathfrak r}ac{2(\pi_*(\delta),x)_{q_M}}{q_M(\pi_*\delta)}\pi_*(\delta)\right).
\end{align*}
Then dividing by 2, taking the image by $r^*$, and using $q_M=q_{M'}\circ r^*$ (compare Section \ref{M'S2}) concludes the computation.
\end{proof}
\section{A first example: the very general Nikulin orbifolds}\label{genericM'0}
\subsection{Wall divisors of a Nikulin orbifold constructed from a K3 surface without effective curves}\label{genericM'}
Let $S$ be a K3 surface admitting a symplectic involution, which does not contain any
effective curves.
Such a K3 surface exists by Proposition \ref{involutionE8} and the surjectivity of the period map.
Then, we consider $M'$ the Nikulin orbifold associated to $S^{[2]}$ and the induced involution
$\iota^{[2]}$ as in Section \ref{M'section} (we keep the same notation as earlier in this section).
\begin{prop}\label{exwalls}
The wall divisors of $M'$ are $\delta'$ and $\Sigma'$ which are both of square $-4$ and divisibility $2$.
\end{prop}
This section is devoted to the proof of this proposition.
The idea of the proof is to study the curves in $M'$ and use Proposition \ref{extremalray}.
Consider the following diagram:
\begin{equation}
\xymatrix{
&S^{[2]}\ar[r]^{\nu}& S^{(2)} & \\
&\widetilde{S\times S}\ar[u]^{\widetilde{\rho}}\ar[r]^{\widetilde{\nu}}&\ar[ld]_{p_1}S^2\ar[u]^{\rho}\ar[rd]^{p_2}& \\
&S & & S,}
\label{S2}
\end{equation}
where $p_1$, $p_2$ are the projections, $\rho$ the quotient map and $\nu$ the blow-up in the diagonal in $S^{(2)}$.
By assumption $S$ does not contain any effective curve. Hence considering the image by the projections $p_1$, $p_2$ and $\rho$,
we deduce that $S^{(2)}$ does not contain any curve either. Hence all curves in $S^{[2]}$ are contracted by
$\nu$, i.e.~fibers of the exceptional divisor $\Delta\rightarrow \Delta_{S^{(2)}}$, where $\Delta_{S^{(2)}}$ is the diagonal
in $S^{(2)}$.
We denote such a curve by $\ell_{\delta}^s$, where $s\in S$ keeps track of the point $(s,s)\in S^{(2)}$. To
simplify the notation, we
denote the cohomology class $\ell_{\delta}\coloneqq[\ell_{\delta}^s]$, since it does not depend
on $s \in S$.
Our next goal is to determine the irreducible curves in $N_1$.
Recall that $r_1\colon N_1 \to S^{[2]}$ is the blow-up in the fixed surface $\Sigma$. Let $C$ be an irreducible curve in $N_1$. There are three cases:
\begin{itemize}
\item[(i)]
The image of $C$ by $r_1$ does not intersect $\Sigma$ and is of class $\ell_{\delta}$.
Therefore, $C$ is of class $r_1^*(\ell_{\delta})$.
\item[(ii)]
The image of $C$ by $r_1$ is contained in $\Sigma$ and of class $\ell_{\delta}$.
\item[(iii)]
The image of $C$ by $r_1$ is a point. Then $C$ is of class $\ell_{\Sigma}$
(the class of a fiber of the exceptional divisor $\Sigma_1\longrightarrow \Sigma$).
\end{itemize}
Note that $\ell_{\delta}^s$ is contained in $\Sigma$ if $s\in S$ is a fixed point of the involution $\iota$,
and otherwise the intersection $\ell_{\delta}^s\cap \Sigma=\emptyset$ is empty (this follows from the description
of $\Sigma$ in Remark \ref{rem:Sigma}). Therefore, there cannot be a case, where the image of
$\ell_{\delta}^s$ intersects $\Sigma$ in a zero-dimensional locus.
It remains to understand the case (ii).
\begin{lemme}\label{Rdeltalemma}
Consider a curve
$\ell_{\delta}^s$ contained in $\Sigma$ (i.e.~when $s\in S$ is a fixed point of $\iota$).
The surface $H_0:=r_1^{-1}(\ell_{\delta}^s)$ is isomorphic to $\mathbb{P}^1\times\mathbb{P}^1$.
\end{lemme}
\begin{proof}
The surface $H_0$ is a Hirzebruch surface.
Since $r_1$ is the blow-up along $\Sigma$, observe that $H_0 \iso{\mathbb P}(\mathcal{N}_{\Sigma|S^{[2]}}|_{\ell_\delta^s})$.
Therefore, we need to compute $\mathcal{N}_{\Sigma/S^{[2]}}|_{\ell_{\delta}^s}$.
Keeping the notation from Remark \ref{rem:Sigma}, recall that
$\widetilde{S_{\iota}}:=\widetilde{\rho}^{-1}(\Sigma)$ and
$S_{\iota}:=\widetilde{\nu}(\widetilde{S_{\iota}})$. For simplicity, we also denote by $\ell_{\delta}^s$ the
preimage of $\ell_{\delta}^s$ by $\widetilde{\rho}$. (Note for this, that $\widetilde{\rho}$ restricts to an
isomorphism on the preimage of $\Delta$, and therefore, it makes sense to identify $\ell_{\delta}^s$ with its preimage).
Observe that:
$$\mathcal{N}_{\Sigma/S^{[2]}}|_{\ell_{\delta}^s}
\iso \widetilde{\rho}^{*}(\mathcal{N}_{\Sigma/S^{[2]}})|_{\ell_{\delta}^s}
\iso\mathcal{N}_{\widetilde{S_{\iota}}/\widetilde{S\times S}}|_{\ell_{\delta}^s}
\iso\widetilde{\nu}^{*}(\mathcal{N}_{S_{\iota}/S\times S}|_{s})
\iso\mathcal{O}_{\ell_{\delta}^s}\oplus\mathcal{O}_{\ell_{\delta}^s},$$
\TODO{give a reference... probably hartshorne}where we identify $s\in S\iso S_\iota$. It follows that
$H_0\simeq \mathbb{P}^1\times \mathbb{P}^1$.
\end{proof}
It follows that the extremal curves in case (ii) have classes $r_1^*(\ell_{\delta})$.
\begin{rmk}\label{mainldelta}
In particular, considering cases (i) and (ii), we see that the extremal curves $C$ such that $r_1(C)=\ell_{\delta}^s$ for some $s\in S$ have classes $r_1^*(\ell_{\delta})$.
\end{rmk}
Hence, we obtain that the extremal curves in $N_1$ have classes $r_1^*\ell_{\delta}$ and
$\ell_{\Sigma}$.
This implies that the extremal curves in $M'$ have classes $\pi_{1*}r_1^*\ell_{\delta}$ and $\pi_{1*}\ell_{\Sigma}$.
We can compute their dual divisors.
\begin{lemme}\label{dualdeltasigma}
The dual divisors in $H^2(M',\Q)$ of $\pi_{1*}r_1^*\ell_{\delta}$ and $\pi_{1*}\ell_{\Sigma}$ are respectively
${\mathfrak r}ac{1}{2}\delta'$ and ${\mathfrak r}ac{1}{2}\Sigma'\in H^2(M',{\mathbb Q})$
\end{lemme}
\begin{proof}
Write
$$H^2(N_1,\Z)=r_1^*\nu^*H^2(S^{(2)},\Z)\oplus \Z\left[\delta_1\right]\oplus\Z\left[\Sigma_1\right].$$
We denote by $p_{\delta_1}:H^2(N_1,\Z)\rightarrow \Z[\delta_1]\iso {\mathbb Z}$ and $p_{\Sigma_1}:H^2(N_1,\Z)\rightarrow
\Z[\Sigma_1]\iso {\mathbb Z}$ the projections.
Let $x\in H^2(N_1,\Z)^{\iota_1^{[2]}}$.
Since $\ell_\delta\cdot\alpha=0$ for all $\alpha \in r_1^*\nu^*H^2(S^{(2)},\Z)\oplus{\mathbb Z} [\Sigma]$, we
have $$\ell_{\delta}\cdot x=(\ell_{\delta}\cdot \delta_1)p_{\delta_1}(x)=-p_{\delta_1}(x)$$ and similarly
$\ell_{\Sigma}\cdot x=(\ell_{\Sigma}\cdot \Sigma_1)p_{\Sigma_1}(x)=-p_{\Sigma_1}(x)$.
It follows from (\ref{Smith2}):
$$\pi_{1*}(\ell_{\delta})\cdot\pi_{1*}(x)=-2p_{\delta_1}(x)\ \text{and}\ \pi_{1*}(\ell_{\Sigma})\cdot\pi_{1*}(x)=-2p_{\Sigma_1}(x).$$
Therefore, Theorem \ref{BBform} (iii) shows that ${\mathfrak r}ac{1}{2}\delta'$ and ${\mathfrak r}ac{1}{2}\Sigma'\in H^2(M',{\mathbb Q})$ are the duals of
$\pi_{1*}(\ell_{\delta})$ and $\pi_{1*}(\ell_{\Sigma})$ respectively.
\end{proof}
By Proposition \ref{extremalray} this proves that $\delta'$ and $\Sigma'$ are wall divisors in $M'$.
Their claimed numerical properties are given by Theorem \ref{BBform} (iii) and Remark \ref{div}.
It remains to show that $\delta'$ and $\Sigma'$ are the only wall divisors in $M'$.
Let us assume for contradiction that there is another wall divisor $D$.
By Theorem \ref{BBform} (v), we have
$D=a\delta'+b\Sigma'+K$, with $(a,b)\in \Z[{\mathfrak r}ac{1}{2}]\times\N[{\mathfrak r}ac{1}{2}]$, (up to replacing $D$ by $-D$) and $K$ a divisor orthogonal to $\delta'$ and $\Sigma'$.
Since $\delta'$ and $\Sigma'$ correspond to the duals of the extremal rays of the Mori cone,
all classes $\alpha\in \mathcal{C}_{M'}$ such that $(\alpha,\delta')_q>0$ and $(\alpha,\Sigma')_q>0$ are Kähler
classes by \cite[Theorem 4.1]{Menet-Riess-20}.
Hence, we cannot have $a=b=0$. Indeed, $D$ would be orthogonal to the Kähler classes with orthogonal projection on $\Pic M'$ equal to $ -(\Sigma' + \delta')$ which is impossible by definition of wall divisors.
Therefore, $a$ or $b$ are non trivial.
It follows that $K=0$.
Indeed, if $K\neq0$, as before, the class $ -(\Sigma' + \delta')-{\mathfrak r}ac{4(b+a)}{(K,K)}K$ is the projection on $\Pic M'$ of a
Kähler class; however it is orthogonal to $D$ which is impossible.
Now, we can assume that $a\neq 0$ and $b\neq 0$ (indeed, if $a=0$ or $b=0$, then $D\in {\mathbb Z}\delta'$ or $D\in {\mathbb Z}\Sigma'$).
If $a<0$, $D$ would be orthogonal to $ a\Sigma' - b\delta'$ the projection on $\Pic M'$ of a Kähler class; this is impossible by definition of wall divisors.
Hence we assume that $a>0$. By Corollary \ref{Rdelta}, $R_{\delta'}$ is a monodromy operator.
Moreover, $R_{\delta'}(D)=-a\delta+b\Sigma'$; as previously $R_{\delta'}(D)$ is orthogonal
to some Kähler class. This gives a contradiction and thus concludes the proof.
\subsection{Application: an example of non-natural symplectic involution on a Nikulin orbifold}\label{inv0M'}
Using the results from the previous subsection, we will prove the existence of a non-natural symplectic
involution on our example. We recall that the reflections $R_x$ with $x\in H^2(M',\Z)$ are defined in Section \ref{notation}.
\begin{prop}\label{involution}
Let $(S,\iota)$ be a very general K3 surface endowed with a symplectic involution (that is $\Pic S\simeq E_8(-2)$).
Let $M'$ be a Nikulin orbifold constructed from $(S,\iota)$ as in Section \ref{M'S2}.
There exists $\kappa'$ a symplectic involution on $M'$ such that
$\kappa'^*=R_{{\mathfrak r}ac{1}{2}(\delta'-\Sigma')}$.
\end{prop}
\begin{proof}
We consider the following involution on $S\times S$:
$$\xymatrix@R0pt{\kappa: \hspace{-1.5cm}& S\times S\ar[r] & S\times S\\
& (x,y)\ar[r]&(x,\iota(y)).}$$
We consider $$V:=S\times S\smallsetminus \left(\Delta_{S^{2}}\cup S_{\iota}\cup({\mathfrak i}x \iota\times {\mathfrak i}x \iota)\right).$$
We denote by $\sigma_2$ the involution on $S\times S$ which exchange the two K3 surfaces and $\iota\times\iota$ the involution which acts as $\iota$ diagonally on $S\times S$. Then we consider
$$U:=V/\left\langle \sigma_2,\iota\times\iota\right\rangle.$$
This set can be seen as an open subset of $M'$ and $V$ can also be seen as an open subset of $\widetilde{S\times S}$.
Moreover the map $\pi_1\circ\widetilde{\rho}_{|V}:V\rightarrow U$ is a four to one non-ramified cover. For simplicity, we denote $\gamma:=\pi_1\circ\widetilde{\rho}_{|V}$.
First, we want to prove that $\kappa$ induces an involution $\kappa'$ on $U$ with a commutative diagram:
\begin{equation}
\xymatrix{V\ar[d]_{\gamma}\ar[r]^{\kappa}&V\ar[d]^{\gamma}\\
U\ar[r]^{\kappa'} & U.}
\label{kappadiagram}
\end{equation}
If such a map $\kappa'$ would exist then it will necessarily verify the following equation:
$$\kappa'\circ \gamma=\gamma\circ \kappa.$$
The map $\gamma$ being surjective, to be able to claim that the previous equation provides a well defined map from $U$ to $U$, we have to verify that:
$$\kappa'\circ\gamma(x_1,y_1)=\kappa'\circ\gamma(x_2,y_2),$$ when
$\gamma(x_1,y_1)=\gamma(x_2,y_2)$.
That is:
\begin{equation}
\gamma\circ \kappa(x_1,y_1)=\gamma\circ \kappa(x_2,y_2),
\label{welldefined}
\end{equation}
for all $((x_1,y_1),(x_2,y_2))\in S^4$ such that $\gamma(x_1,y_2)=\gamma(x_2,y_2)$. Let $(x,y)\in S^2$.
We have: $$\gamma^{-1}(\gamma(x,y))=\left\{(x,y),(y,x),(\iota(x),\iota(y)),(\iota(y),\iota(x))\right\}.$$
We also have: $$\kappa(\gamma^{-1}(\gamma(x,y)))=\left\{(x,\iota(y)),(y,\iota(x)),(\iota(x),y),(\iota(y),x)\right\}=\gamma^{-1}(\gamma(x,\iota(y)))=\gamma^{-1}(\gamma(\kappa(x,y))).$$
This shows (\ref{welldefined}). Hence $\kappa'$ is set theoretically well defined. Since $\gamma$ is a four to one non-ramified cover, it is a local isomorphism; therefore $\kappa'$ inherits of the properties of $\kappa$. In particular $\kappa'$ is a holomorphic symplectic involution.
It follows that $\kappa'$ induces a bimeromorphic symplectic involution on $M'$. By \cite[Lemma 3.2]{Menet-Riess-20}, $\kappa'$ extends to bimeromorphic symplectic involution which is an isomorphism in codimension 1 (we still denote by $\kappa'$ this involution).
In particular, $\kappa'^*$ is now well defined on $H^2(M',\Z)$ (see \cite[Lemma 3.4]{Menet-Riess-20}).
Now, we are going to prove that $\kappa'$ extends to a regular involution.
We recall from Theorem \ref{BBform} (v) that:
$$H^2(M',\Z)=r^*\pi_*(j(H^2(S,\Z)))\oplus^{\bot}\Z{\mathfrak r}ac{\delta'+\Sigma'}{2}\oplus^{\bot}\Z{\mathfrak r}ac{\delta'-\Sigma'}{2}.$$
Since $\kappa'$ is symplectic $\kappa'$ acts trivially on $r^*\pi_*(j(H^2(S,\Z)))$. Indeed $\Pic M'=\Z{\mathfrak r}ac{\delta'+\Sigma'}{2}\oplus\Z{\mathfrak r}ac{\delta'-\Sigma'}{2}$. Moreover, $\kappa$ exchanges $\Delta_{S^{(2)}}$ and $S_{\iota}$. Hence by continuity and commutativity of diagram (\ref{kappadiagram}), we have that $\kappa'$ exchanges the divisors $\delta'$ and $\Sigma'$.
By Proposition \ref{exwalls} and Corollary \ref{criterionwall}, it follows that $\kappa'$ sends Kähler classes to Kähler classes. Hence by \cite[Proposition 3.3]{Menet-Riess-20}, $\kappa'$ extends to an involution on $M'$.
\end{proof}
\begin{cor}\label{Sigma'}
Let $M'$ be a Nikulin orbifold.
Then: $$R_{\Sigma'}\in \Mon^2(M').$$
\end{cor}
\begin{proof}
Let $(X,\widetilde{\iota})$ be any manifold of $K3^{[2]}$-type endowed with a symplectic involution. Let $(S,\iota)$ be a very general K3 surface endowed with a symplectic involution.
With exactly the same argument as in proof of Proposition \ref{MonoM'}, we can connect $X$ and $S^{[2]}$ by
a sequence of twistor spaces; each twistor space being endowed with an involution which restricts to a symplectic involution on its fibers.
This sequence of twistor spaces provides a sequence of twistor spaces between $M'_X$ and $M'_{S^{[2]}}$ the irreducible symplectic orbifolds associated to $X$ and $S^{[2]}$ respectively. This sequence of twistor spaces provides a parallel transport operator $f:H^2(M'_X,\Z)\rightarrow H^2(M'_{S^{[2]}},\Z)$ which sends $\Sigma'_X$ to $\Sigma'_{S^{[2]}}$ (respectively the exceptional divisors of the blow-ups $M'_X\rightarrow X/\widetilde{\iota}$ and $M'_{S^{[2]}}\rightarrow S^{[2]}/\iota^{[2]}$).
By Proposition \ref{involution}, $\kappa'^*\in\Mon^2(M'_{S^{[2]}})$.
Moreover by Corollary \ref{Rdelta}, $R_{\delta'}\in \Mon^2(M'_{S^{[2]}})$.
Hence $R_{\Sigma'_{S^{[2]}}}=\kappa'^*\circ R_{\delta'}\circ \kappa'^*\in\Mon^2(M'_{S^{[2]}})$. Therefore $R_{\Sigma'_{X}}=f^{-1}\circ R_{\Sigma'_{S^{[2]}}}\circ f\in\Mon(M'_X)$.
\end{proof}
\begin{rmk}\label{RdeltaSigma}
Let $(S,\iota)$ be a K3 surface endowed with a symplectic involution.
Let $M'$ be a Nikulin orbifold constructed from $(S,\iota)$ as in Section \ref{M'S2}.
The previous proof also shows that $R_{{\mathfrak r}ac{1}{2}(\delta'-\Sigma')}$ is a parallel transport operator.
\end{rmk}
\section{In search of wall divisors in special examples}\label{extremalcurves}
In this section we study some explicit examples of K3 surfaces with symplectic involutions and their
associated Nikulin orbifolds. This will be used in Section \ref{endsection} in order to determine which divisors on
Nikulin-type orbifolds are wall divisors.
\subsection{When the K3 surface $S$ used to construct the Nikulin orbifold contains a unique rational curve}\label{onecurve}
\subsubsection*{Objective}
In this section we assume that $\Pic S\simeq E_8(-2)\oplus^{\bot} (-2)$. By Riemann--Roch $S$ contains only
one curve which is rational. We denote this curve $C$. In particular in this case, $\mathcal{K}_S\cap
E_8(-2)^{\bot}\neq\emptyset$. Hence, by Proposition \ref{involutionE8} there exists a symplectic involution
$\iota$ on $S$ whose anti-invariant lattice is the $E_8(-2)\subset \Pic(S)$. Moreover, the curve $C$ is fixed by $\iota$. The objective of this section is to determine wall divisors of the Nikulin orbifold $M'$ obtained from the couple $(S^{[2]},\iota^{[2]})$ (see Section \ref{M'section}).
\subsubsection*{Notation}
We keep the notation from Section \ref{genericM'} and we consider the following notation in addition.
\begin{nota}\label{notacurves}
\begin{itemize}
\item
We denote by $D_C$ the following divisor in $S^{[2]}$:
$$D_C=\left\{\left.\xi\in S^{[2]}\right|\ \Supp\xi\cap C\neq\emptyset\right\}.$$
Moreover, we set $D_C':=\pi_{1*}r_{1}^*(D_C)$.
\item
We denote by $\overline{C^{(2)}}$ the strict transform of $C^{(2)}\subset S^{(2)}$ by $\nu$ and $\overline{\overline{C^{(2)}}}$ the strict transform of $\overline{C^{(2)}}\subset S^{[2]}$ by $r_1$.
We denote by
$j^{C}:\overline{C^{(2)}}\hookrightarrow S^{[2]}$ and $\overline{j^{C}}:\overline{\overline{C^{(2)}}}\hookrightarrow N_1$
the embeddings.
Note that $\overline{\overline{C^{(2)}}}\simeq\overline{C^{(2)}}\simeq C^{(2)}\simeq \mathbb{P}^2$.
\item
We recall that $\Delta_{S^{(2)}}$ is the diagonal in $S^{(2)}$ and $\Delta$ the diagonal in $S^{[2]}$.
We also denote by $\Delta_{S^2}$ the diagonal in $S\times S$ and
$\Delta_{\widetilde{S^2}}:=\widetilde{\nu}^{-1}(\Delta_{S^{2}})$ the exceptional divisor. Furthermore, we
denote $\Delta_{S^{(2)}}^C:=C^{(2)}\cap \Delta_{S^{(2)}}$ and $\Delta_{S^{2}}^{C}:=C^{2}\cap \Delta_{S^2}$.
Moreover we denote $\Delta_{S^{(2)}}^{\iota,C}:=\left\{\left.\left\{x,\iota(x)\right\}\right|\ x\in C\right\}\subset S^{(2)}$ and $\Delta_{S^{2}}^{\iota,C}:=\left\{\left.(x,\iota(x))\right|\ x\in C\right\}\subset S^2$.
\item
We denote $H_2:=\nu^{-1}(\Delta_{S^{(2)}}^C)$ and $\overline{H_2}$ its strict transform by $r_1$. We denote by $j^{H_2}:H_2\hookrightarrow S^{[2]}$ and $j^{\overline{H_2}}:\overline{H_2}\hookrightarrow N_1$ the embeddings.
\end{itemize}
\end{nota}
We summarize our notation on the following diagram:
$$\xymatrix{&\ar@{^{(}->}[dd]_{j^{\overline{H_2}}}\overline{H_2}\ar@{=}[r]& \ar@{^{(}->}[d]H_2\ar@/_1pc/@{_{(}->}[dd]_{j^{H_2}}\ar[r] & \ar@{^{(}->}[d]\Delta_{S^{(2)}}^C & &\\
& & \Delta\ar[r]\ar@{^{(}->}[d] & \Delta_{S^{(2)}}\ar@{^{(}->}[d]& &\\
& N_1\ar[r]^{r_1} & S^{[2]}\ar[r]^{\nu} & S^{(2)}& \ar@{^{(}->}[l] C^{(2)} &\ar@{^{(}->}[l] \Delta_{S^{(2)}}^{\iota,C}\\
\overline{\overline{C^{(2)}}}\ar@{^{(}->}[ru]^{\overline{j^{C}}}\ar@{=}[r]& \overline{C^{(2)}}\ar@{^{(}->}[ru]^{j^{C}}& \widetilde{S^{2}}\ar[u]^{\widetilde{\rho}} \ar[r]^{\widetilde{\nu}}& S^{2}\ar[u]^{\rho} & \ar@{^{(}->}[l] C^{2}\ar[u]^{2:1} &\ar@{^{(}->}[l] \Delta_{S^{2}}^{\iota,C}\ar[u]^{2:1}\\
& & \Delta_{\widetilde{S^2}}\ar@{^{(}->}[u]\ar[r] & \ar@{^{(}->}[u] \Delta_{S^{2}} &&
}$$
\begin{lemme}\label{Hirzebruch}
The surface $H_2$ is a Hirzebruch surface isomorphic to $\mathbb{P}(\mathcal{E})$, where $\mathcal{E}:=\mathcal{O}_C(2)\oplus\mathcal{O}_C(-2)$. Let $f$ be a fiber of $\mathbb{P}(\mathcal{E})$. There exists a section $C_0$ which
satisfies: $\Pic H_2=\Z C_0\oplus\Z f$, $C_0^2=-4$, $f^2=0$ and $C_0\cdot f=1$.
\end{lemme}
\begin{proof}
Let $\sigma_2$ be the involution on $S\times S$ which exchanges the two K3 surfaces and $\widetilde{\sigma_2}$ the induced involution on $\widetilde{S\times S}$. The involution $\widetilde{\sigma_2}$ acts trivially on $\Delta_{\widetilde{S^2}}$. It follows that $\widetilde{\rho}$ induces an isomorphism $\Delta_{\widetilde{S^2}}\simeq \Delta$. In particular, it shows that:
$$H_2\simeq\mathbb{P}(\mathcal{N}_{\Delta_{S^{2}}/S\times S}|_{\Delta_{S^{2}}^{C}}).$$
We consider the following commutative diagram:
\begin{equation}
\xymatrix{\Delta_{S^{2}}\eq[r]& S\\
\Delta_{S^{2}}^{C}\ar@{^{(}->}[u]\eq[r] & \ar@{^{(}->}[u]C.}
\label{delta0}
\end{equation}
Under the isomorphism induced by (\ref{delta0}), observe that:
$$\mathcal{N}_{\Delta_{S^2}/S\times S}|_{\Delta_{S^{2}}^{C}}\simeq T_S|_C.$$
To compute $T_S|_C$, we consider the following exact sequence:
$$\xymatrix{0\ar[r]&\ar[r]T_C\ar[r]&T_S|_C\ar[r]&\mathcal{N}_{C/S}\ar[r]&0.}$$
We have $T_C=\mathcal{O}_C(2)$ and $\mathcal{N}_{C/S}=\mathcal{O}_C(-2)$.
Moreover, $\Ext^1(\mathcal{O}_C(-2),\mathcal{O}_C(2))=H^1(C,\mathcal{O}_C(4))=0$.
Hence: $$T_S|_C=\mathcal{O}_C(-2)\oplus\mathcal{O}_C(2).$$
As a consequence $H_2\iso {\mathbb P}(\mathcal{E})$ as claimed.
Therefore, by \cite[Chapter V, Proposition 2.3 and Proposition 2.9]{Hartshorne},
we know that $\Pic H_2=\Z C_0\oplus\Z f$, with $C_0^2=-4$, $f^2=0$ and $C_0\cdot f=1$;
$C_0$ being the class of a specific section and $f$ the class of a fiber.
\end{proof}
\begin{lemme}\label{intersection}
We have $\ell_{\delta}\cdot\delta=-1$ and $C\cdot D_C=-2$ in $S^{[2]}$.
\end{lemme}
\begin{proof}
We denote by $\widetilde{\ell_{\delta}}$ a fiber associated to the exceptional divisor $\Delta_{\widetilde{S^2}}\rightarrow \Delta_{S^{2}}$.
We know that $\widetilde{\ell_{\delta}}\cdot \Delta_{\widetilde{S^2}}=-1$. We can deduce for instance from \cite[Lemma 3.6]{Menet-2018} that $\ell_{\delta}\cdot \Delta=-2$. That is $\ell_{\delta}\cdot \delta=-1$.
Similarly, we have $(C\times S+S\times C)\cdot(s\times C+C\times s)=-4$. By \cite[Lemma 3.6]{Menet-2018} (see \ref{Smith2}):
\begin{align*}
&\rho_*(C\times S+S\times C)\cdot\rho_*(s\times C+C\times s)=-8\\
&\rho_*(C\times S)\cdot\rho_*(s\times C)=-2.
\end{align*}
Then taking the pull-back by $\nu$, we obtain $C\cdot D_C=-2$.
\end{proof}
\subsubsection*{Strategy}
We will need several steps to find the wall divisors on $M'$:
\begin{itemize}
\item[I.]
Understand the curves contained in $S^{[2]}$.
\item[II.]
Understand the curves contained in $N_1$.
\item[III.]
Deduce the corresponding wall divisors in $M'$ using Proposition \ref{extremalray}, Corollaries
\ref{Rdelta}, and \ref{Sigma'}.
\end{itemize}
\subsubsection*{Curves in $S^{[2]}$}
The first step is to determine the curves contained in $S^{[2]}$. Before that, we can say the following about curves in $S^{(2)}$.
\begin{lemme}\label{S2curves}
There are only two cases for irreducible curves in $S^{(2)}$:
\begin{itemize}
\item[(1)]
the curves $C^s:=\rho(C\times s)=\rho(s\times C)$ with $s\notin C$;
\item[(2)]
curves in $C^{(2)}\simeq \mathbb{P}^2$.
\end{itemize}
\end{lemme}
\begin{proof}
In $S\times S$, considering the images of curves by the projections $p_1$ and $p_2$ of diagram (\ref{S2}), there are only two possibilities:
\begin{itemize}
\item
$s\times C$ or $C\times s$, with $s$ a point in $S$,
\item
curves contained in $C\times C$.
\end{itemize}
Then, we obtain all the curves in $S^{(2)}$ as images by $\rho$ of curves in $S\times S$.
\end{proof}
It follows four cases in $S^{[2]}$.
\begin{lemme}\label{curveS2}
We have the following four cases for irreducible curves in $S^{[2]}$:
\begin{itemize}
\item[(0)]
Curves which are fibers of the exceptional divisor $\Delta\rightarrow \Delta_{S^{(2)}}$. As in Section \ref{genericM'}, we denote these curves by $\ell_{\delta}^s$,
where $s=\nu(\ell_{\delta}^s)$ and we denote their classes by $\ell_{\delta}$.
\item[(1)]
Curves which are strict transforms of $C^s$ with $s\notin C$. We denote the class of these curves by $C$. Note that $C=\nu^*(\left[C^s\right])$ for $s\notin C$.
\item[(2a)]
Curves contained in $H_2$.
\item[(2b)]
Curves contained in $\overline{C^{(2)}}$.
\end{itemize}
\end{lemme}
\begin{proof}
Let $\gamma$ be an irreducible curve in $S^{[2]}$. By Lemma \ref{S2curves}, there are three cases for $\nu(\gamma)$:
\begin{itemize}
\item[(0)]
$\nu(\gamma)$ is a point and $\gamma$ is a fiber of the exceptional divisor;
\item[(1)]
$\nu(\gamma)=C^s$, with $s\notin C$.
\item[(2)]
$\nu(\gamma)\subset C^{(2)}$.
\end{itemize}
Moreover case (2) can be divided in 2 sub-cases: $\nu(\gamma)=\Delta_{C}$ or $\nu(\gamma)\nsubseteq\Delta_{C}$.
This provides cases (2a) and (2b).
\end{proof}
Now, we are going to determine the classes of the extremal curves in cases (2a) and (2b) in two lemmas.
\begin{lemme}\label{C0}
We have $j^{H_2}_*(C_0)=2(C-\ell_{\delta})$ and $j^{H_2}_*(f)=\ell_{\delta}$.
\end{lemme}
\begin{proof}
It is clear that $j^{H_2}_*(f)=\ell_{\delta}$, we are going to compute $j^{H_2}_*(C_0)$.
Necessarily, $j^{H_2}_*(C_0)=aC+b\ell_{\delta}$. We can consider the intersection with $\Delta$ and $D_C$ and use Lemma \ref{intersection} to determine $a$ and $b$.
We consider the following commutative diagram:
$$\xymatrix{H_2\ar@{=}[d]\ar@{^{(}->}[r]^{\widetilde{j^{H_2}}}&\widetilde{S\times S}\ar[d]^{\widetilde{\rho}}\\
H_2\ar@{^{(}->}[r]^{j^{H_2}}&S^{[2]}.}$$
By commutativity of the diagram, we have:
\begin{equation}
j^{H_2}_*(C_0)=\widetilde{\rho}_*\widetilde{j^{H_2}}_*(C_0).
\label{heuuu}
\end{equation}
By \cite[Propositions 2.6 and 2.8]{Hartshorne}, we have:
$$C_0\cdot \widetilde{j^{H_2}}^*(\Delta_{\widetilde{S^2}})=C_0\cdot \mathcal{O}_{\mathcal{E}}(-1)=-C_0\cdot(C_0+2f)=4-2=2.$$
By projection formula that is:
$$\widetilde{j^{H_2}}_*(C_0)\cdot \Delta_{\widetilde{S^2}}=2.$$
Taking the push-forwards by $\widetilde{\rho}$, we obtain by \cite[Lemma 3.6]{Menet-2018} (see \ref{Smith2}):
$$\widetilde{\rho}_*\widetilde{j^{H_2}}_*(C_0)\cdot\widetilde{\rho}_*(\Delta_{\widetilde{S^2}})=4.$$
Hence by (\ref{heuuu}):
$$j^{H_2}_*(C_0)\cdot\Delta=4.$$
Hence by lemma \ref{intersection}:
$$b=-2.$$
We have $D_C=\nu^*(\rho_*(C\times S))$. So by projection formula:
$$D_C\cdot j^{H_2}_*(C_0)=\rho_*(C\times S)\cdot\left[\Delta_{S^{(2)}}^C\right]=\rho_*(C\times S)\cdot\rho_*(s \times C+C\times s)=2\rho_*(C\times S)\cdot\rho_*(s \times C).$$
Taking the pull-back by $\nu$ of the last equality, we obtain:
$$D_C\cdot j^{H_2}_*(C_0)=2D_C\cdot C.$$
So $a=2$ which concludes the proof.
\end{proof}
\begin{lemme}\label{stricttransform}
We have $j^C_*(d)=C-\ell_{\delta}$, where $d$ is the class of a line in $\overline{C^{(2)}}\simeq\mathbb{P}^2$.
\end{lemme}
\begin{proof}
We consider the following commutative diagram:
$$\xymatrix{\overline{C^{(2)}}\eq[d]\ar@{^{(}->}[r]^{j^C}&S^{[2]}\ar[d]^{\nu}\\
C^{(2)}\ar@{^{(}->}[r]^{j^C_0}&S^{(2)}.}$$
Let $\gamma$ be an irreducible curve in $\overline{C^{(2)}}$. Since $\overline{C^{(2)}}$ is the strict transform of $C^{(2)}$ by $\nu$,
$j^{C}(\gamma)$ is the strict transform of $j^{C}_0(\gamma)$ by $\nu$.
Hence to compute $j^C_*(d)$ for $d$ the class of a line, it is enough to find a curve in $C^{(2)}$ with class $d$ and determine its strict transform by $\nu$. For instance $C^s$ with $s\in C$ verifies $\left[C^s\right]=d$ in $C^{(2)}$. Moreover, $C^s$ intersects $\Delta_{S^{(2)}}$ transversely in one point. It follows that $j^C_*(d)=C-\ell_{\delta}$.
\end{proof}
\subsubsection*{Curves in $N_1$}
\begin{lemme}\label{curvesN1}
We have the following cases for irreducible curves in $N_1$:
\begin{itemize}
\item[(00)]
Curves contracted to a point by $r_1$.
They are fibers of the exceptional divisor $\Sigma_1\rightarrow \Sigma$ and their class is $\ell_{\Sigma}$.
\item[(0)]
Curves send to $\ell_{\delta}^s$ by $r_1$. An extremal such a curve has class $r_1^*(\ell_{\delta})$ by Lemma \ref{Rdeltalemma}.
\item[(1)]
Curves send to $C^{s}$ by $r_1$ with $s\notin C$.
They are curves of class $r_1^*(C)$.
\item[(2a.i)]
Curves contained in $r_1^{-1}(H_2\cap \Sigma)$.
\item[(2a.ii)]
Curves contained in $\overline{H_2}$ the strict transform of $H_2$ by $r_1$.
\item[(2b.i)]
Curves contained in $r_1^{-1}(\overline{C^{(2)}}\cap \Sigma)$.
\item[(2b.ii)]
Curves contained in $\overline{\overline{C^{(2)}}}$ the strict transform of $\overline{C^{(2)}}$ by $r_1$.
\end{itemize}
\end{lemme}
\begin{proof}
Let $\gamma$ be an irreducible curve in $N_1$. If $r_1(\gamma)$ is a point, we are in case (00). If $r_1(\gamma)$ is a curve, we are in one of the cases of Lemma \ref{curveS2}.
If $r_1(\gamma)=\ell_{\delta}^s$ for some $s\in S$. This is cases (i) and (ii) of Section \ref{genericM'}.
It follows from Remark \ref{mainldelta} that the extremal curves in case (0) have classes $r_1^*(\ell_{\delta})$.
If $r_1(\gamma)=C^s$ with $s\notin C$, then $C^s$ does not intersects $\Sigma$ and we have $\left[\gamma\right]=r_1^*(C)$. The last four cases appear when $r_1(\gamma)\subset H_2$ or $r_1(\gamma)\subset \overline{C^{(2)}}$.
\end{proof}
Now we are going to determine the classes of the curves in cases (2a.i), (2a.ii), (2b.i) and (2b.ii).
\begin{lemme}\label{H2Sigma}
The extremal curves in $r_1^{-1}(H_2\cap \Sigma)$ are of classes $\ell_{\Sigma}$ or $r_1^*(\ell_{\delta})$.
\end{lemme}
\begin{proof}
Since $C$ is the unique curve contained in $S$. The involution $\iota$ on $S$ restricts to $C$. Since $\iota$ is a symplectic involution, $\iota$ does not act trivially on $C$. Moreover, since $C\simeq \mathbb{P}^1$, $\iota_{|C}$ has two fixed points $x$ and $y$. It follows that $\iota^{(2)}_{|\Delta_{C}}$ also has two fixed points $(x,x)$ and $(y,y)$. Hence $H_2\cap \Sigma=\ell_{\delta}^x\cup \ell_{\delta}^y$.
The surfaces $r_1^{-1}(\ell_{\delta}^x)$ and $r_1^{-1}(\ell_{\delta}^y)$ are Hirzebruch surfaces and by Lemma \ref{Rdeltalemma}, they are isomorphic to $\mathbb{P}^1\times\mathbb{P}^1$. Then the extremal curves of these Hirzebruch surfaces will have classes $\ell_{\Sigma}$ or $r_1^*(\ell_{\delta})$ in $N_1$.
\end{proof}
\begin{lemme}
Let $C_0$ be the class of the section in $\overline{H_2}$ obtained in Lemma \ref{Hirzebruch}.
Then $j^{\overline{H_2}}_*(C_0)=2\left(r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma}\right)$.
\end{lemme}
\begin{proof}
As explained in the proof of the previous lemma, $H_2\cap \Sigma$ corresponds to two fibers of the Hirzebruch surface $H_2$. Hence $j^{H_2}(C_0)$ and $\Sigma$ intersect in two points. The class $j^{\overline{H_2}}_*(C_0)$ corresponds to the class of the strict transform by $r_1$ of $j^{H_2}(C_0)$. By Lemma \ref{C0}, $\left[j^{H_2}(C_0)\right]=2(C-\ell_{\delta})$. Hence $j^{\overline{H_2}}_*(C_0)=2\left(r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma}\right)$.
\end{proof}
\begin{lemme}\label{deltaiota}
The curve $\Delta_{S^{(2)}}^{\iota,C}$ is a line in $C^{(2)}\simeq \mathbb{P}^2$.
\end{lemme}
\begin{proof}
The map $$\xymatrix{\eq[d]C^2\ar[r]^{\rho}_{2:1}&C^{(2)}\eq[d]\\
\mathbb{P}^1\times \mathbb{P}^1 &\mathbb{P}^2}$$
is a two to one ramified cover. We recall that $\Delta_{S^{2}}^{\iota,C}=\left\{\left.(x,\iota(x))\right|\ x\in C\right\}$.
We have: $$\left[\Delta_{S^{2}}^{\iota,C}\right]_{S^2}=\left[C\times s\right]_{S^2}+\left[s\times C \right]_{S^2}.$$
It follows: $$\rho_*\left(\left[{\Delta_{S^{2}}^{\iota,C}}\right]_{S^2}\right)=2\left[C^s\right]_{S^{(2)}}.$$
However $\rho: \Delta_{S^{2}}^{\iota,C}\rightarrow \Delta_{S^{(2)}}^{\iota,C}$ is a two to one cover; so $\left[\Delta_{S^{(2)}}^{\iota,C}\right]_{S^{(2)}}=\left[C^s\right]_{S^{(2)}}$.
Hence $\left[\Delta_{S^{(2)}}^{\iota,C}\right]_{C^{(2)}}=\left[C^s\right]_{C^{(2)}}$ and $\left[\Delta_{S^{(2)}}^{\iota,C}\right]_{C^{(2)}}$ is the class of a line in $C^{(2)}$.
\end{proof}
\begin{lemme}
The surface $r_1^{-1}(\overline{C^{(2)}}\cap \Sigma)$ is a Hirzebruch surface that we denote by $H_1$.
Let $f$ be a fiber of $H_1$. There exists a section $D_0$ which
satisfies: $\Pic H_1=\Z D_0\oplus\Z f$, $D_0^2=-2$, $f^2=0$ and $D_0\cdot f=1$.
Moreover, $j^{H_1}_*(D_0)=r_1^*(C)-\ell_{\delta}-\ell_{\Sigma}$, where $j^{H_1}:H_1\hookrightarrow N_1$ is the embedding.
\end{lemme}
\begin{proof}
We denote by $\zeta$ the curve $\overline{C^{(2)}}\cap \Sigma$. This curve is the strict transform of $\Delta_{S^{(2)}}^{\iota,C}$ by $\nu$.
In particular, it is a rational curve by Lemma \ref{deltaiota}.
Moreover by Lemmas \ref{deltaiota} and \ref{stricttransform}, we have:
\begin{equation}
\left[\zeta\right]_{S^{[2]}}=C-\ell_{\delta}.
\label{gammadelta}
\end{equation}
To understand which Hirzebruch surface $H_1$ is, we are going to compute $\mathcal{N}_{\Sigma/S^{[2]}}|_{\zeta}=\mathcal{O}_{\zeta}(-k)\oplus\mathcal{O}_{\zeta}(k)$.
We consider $\widetilde{\zeta}:=\widetilde{\rho}^{-1}(\zeta)$.
We have: $$\rho^*(\mathcal{N}_{\Sigma/S^{[2]}})|_{\widetilde{\zeta}}=\widetilde{\nu}^*(\mathcal{N}_{S_{\iota}/S^2}|_{\Delta_{S^{2}}^{\iota,C}}).$$
As in the proof of Lemma \ref{Hirzebruch}, we have:
$$\mathcal{N}_{S_{\iota}/S^2}|_{\Delta_{S^{2}}^{\iota,C}}\simeq T_S|_C\simeq \mathcal{O}_C(-2)\oplus\mathcal{O}_C(2).$$
Hence $$\rho^*(\mathcal{O}_{\zeta}(-k)\oplus\mathcal{O}_{\zeta}(k))=\rho^*(\mathcal{N}_{\Sigma/S^{[2]}})|_{\widetilde{\zeta}}=\mathcal{O}_{\widetilde{\zeta}}(-2)\oplus\mathcal{O}_{\widetilde{\zeta}}(2).$$
Since $\rho:\widetilde{\zeta}\rightarrow \zeta$ is a two to one cover, we obtain $k=1$, that is:
$$\mathcal{N}_{\Sigma/S^{[2]}}|_{\zeta}=\mathcal{O}_{\zeta}(-1)\oplus\mathcal{O}_{\zeta}(1).$$
By \cite[Chapter V, Proposition 2.3 and Proposition 2.9]{Hartshorne}, there exists a section $D_0$ such that
$\Pic H_1=\Z D_0\oplus\Z f$, with $D_0^2=-2$, $f^2=0$ and $D_0\cdot f=1$;
$f$ being the class of a fiber.
Moreover, by (\ref{gammadelta}) and using the projection formula, we know that:
$$j^{H_1}_*(D_0)=r_1^*(C)-\ell_{\delta}+a\ell_{\Sigma}.$$
To compute $a$, we only need to compute $D_0\cdot j^{H_1*}(\Sigma_1)$.
We apply the same method used in the proof of Lemma \ref{C0}.
By \cite[Propositions 2.6 and 2.8]{Hartshorne}, we have:
$$D_0\cdot \widetilde{j^{H_1}}^*(\Sigma_1)=D_0\cdot \mathcal{O}_{\mathbb{P}(\mathcal{O}_{\zeta}(-1)\oplus\mathcal{O}_{\zeta}(1))}(-1)=-D_0\cdot(D_0+f)=2-1=1.$$
This proves that $a=-1$.
\end{proof}
\begin{lemme}
Let $d$ be the class of a line in $\overline{\overline{C^{(2)}}}$. Then $\overline{j^{C}}_*(d)=r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma}$.
\end{lemme}
\begin{proof}
For instance $\overline{j^{C}}_*(d)$ corresponds to the class of the strict transform of $C^s$ by $r_1\circ \nu$ for $s\in C$. Let $\overline{C^s}$ be the strict transform of $C^s$ by $\nu$.
As we have already seen in Lemma \ref{stricttransform}, $\left[\overline{C^s}\right]_{S^{[2]}}=j^{C}_*(d)=C-\ell_{\delta}$. The intersection $\overline{C^{(2)}}\cap\Sigma$ corresponds to the strict transform of $\Delta_{S^{(2)}}^{\iota,C}$ by $\nu$ and by Lemma \ref{deltaiota}, it has the class of a line in $\overline{C^{(2)}}$. Hence $\overline{C^s}$ intersects $\Sigma$ transversely in one point and we obtain:
$\overline{j^{C}}_*(d)=r_1^*(C-\ell_{\delta})-\ell_{\Sigma}$.
\end{proof}
\subsubsection*{Conclusion on wall divisors}
\begin{lemme}
The extremal curves of $M'$ have classes $\pi_{1*}r_1^*\ell_{\delta}$, $\pi_{1*}\ell_{\Sigma}$ and $\pi_{1*}(r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma})$.
\end{lemme}
\begin{proof}
Our previous investigation on curves in $N_1$ show that the
extremal curves in $N_1$ have classes $r_1^*\ell_{\delta}$,
$\ell_{\Sigma}$ and $r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma}$.
This implies that the extremal curves in $M'$ have classes $\pi_{1*}r_1^*\ell_{\delta}$, $\pi_{1*}\ell_{\Sigma}$ and $\pi_{1*}(r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma})$.
\end{proof}
We can compute their dual divisors to obtain wall divisors with Proposition \ref{extremalray}.
\begin{prop}\label{walldiv1}
The divisors $\delta'$, $\Sigma'$, $D_C'$ and $D_C'-{\mathfrak r}ac{1}{2}(\delta'+\Sigma')$ are wall divisors in $M'$.
Moreover, they verify the following numerical properties:
\begin{itemize}
\item
$q_{M'}(\delta')=q_{M'}(\Sigma')=q_{M'}(D_C')=-4$
and ${\rm div}(\delta')={\rm div}(\Sigma')={\rm div}(D_C')=2$;
\item
$q_{M'}\left(D_C'-{\mathfrak r}ac{1}{2}(\delta'+\Sigma')\right)=-6$ and ${\rm div}\left[D_C'-{\mathfrak r}ac{1}{2}(\delta'+\Sigma')\right]=2$.
\end{itemize}
\end{prop}
\begin{proof}
By Lemma \ref{dualdeltasigma}, ${\mathfrak r}ac{1}{2}\delta'$ and ${\mathfrak r}ac{1}{2}\Sigma'\in H^2(M',{\mathbb Q})$ are the duals of
$\pi_{1*}(r_1^*(\ell_{\delta}))$ and $\pi_{1*}(\ell_{\Sigma})$ respectively. Moreover by Lemma \ref{intersection}, we know that $D_C\cdot C=-2$,
hence by (\ref{Smith2}): $$D_C'\cdot \pi_{1*}(r_1^*(C))=-4.$$
Moreover, since $D_C=j(C)$ where $j$ is the isometric embedding $H^2(S,\Z)\hookrightarrow H^2(S^{[2]},\Z)$, we have: $q_{S^{[2]}}(D_C)=-2$. So by Theorem \ref{BBform} (ii):
$$q_{M'}(D_C')=-4.$$
We obtain that $D_C'$ is the dual of $\pi_{1*}(r_1^*(C))$. Then $D_C'-{\mathfrak r}ac{1}{2}(\delta'+\Sigma')$ is the dual of $\pi_{1*}(r_1^*(C)-r_1^*(\ell_{\delta})-\ell_{\Sigma})$.
By Proposition \ref{extremalray} this proves that $\delta'$, $\Sigma'$ and $D_C'-{\mathfrak r}ac{1}{2}(\delta'+\Sigma')$ are wall divisors in $M'$.
Their claimed numerical properties are given by Theorem \ref{BBform} (iii) and Remark \ref{div}.
It remains to show that $D_C'$ is a wall divisor. By Proposition \ref{caca}, since $D_C'$ is a uniruled divisor, we have $(D_C',\alpha)_{q_{M'}}\geq0$ for all $\alpha\in\mathcal{B}\mathcal{K}_{M'}$. Since $\mathcal{B}\mathcal{K}_{M'}$ is open, it follows that $(D_C',\alpha)_{q_{M'}}>0$ for all $\alpha\in\mathcal{B}\mathcal{K}_{M'}$.
Now, we assume that there exists $g\in \Mon_{\Hdg}^2(M')$ and $\alpha\in\mathcal{B}\mathcal{K}_{M'}$ such that $(g(D_C'),\alpha)_{q_{M'}}=0$ and we will find a contradiction. Since $g\in \Mon_{\Hdg}^2(M')$ and $\Pic(M')=\Z D_C'\oplus \Z {\mathfrak r}ac{\delta'+\Sigma'}{2}\oplus\Z{\mathfrak r}ac{\delta'-\Sigma'}{2}$, there are only 6 possibilities:
$$g(D_C')=\left\{
\begin{array}{ll}
\pm D_C' & \text{or}\\
\pm \delta' & \text{or}\\
\pm \Sigma'. &
\end{array}
\right.$$
Since $(D_C',\alpha)_{q_{M'}}\neq0$, $(\delta',\alpha)_{q_{M'}}\neq0$ and $(\Sigma',\alpha)_{q_{M'}}\neq0$. This leads to a contradiction.
\end{proof}
\subsection{When the K3 surface $S$ used to construct the Nikulin orbifold contains two rational curves swapped by the involution}\label{sec:twocurves}
\subsubsection*{Framework}
Let $\Lambda_{K3}:=U^3\oplus^\bot E_8(-1)\oplus^\bot E_8(-1)$ be the K3 lattice. We fix for all this section three embeddings in $\Lambda_{K3}$ of three lattices $\mathcal{U}\simeq U^3$, $E_1\simeq E_8(-1)$ and $E_2\simeq E_8(-1)$ such that $\Lambda_{K3}\simeq \mathcal{U}\oplus^\bot E_1\oplus^\bot E_2$.
We consider $i$ the involution on $\Lambda_{K3}$ which exchanges $E_1$ and $E_2$ and fixes the lattice $\mathcal{U}$.
We consider $C\in E_1$ such that $C^2=-2$. We define $E^a:=\left\{\left.e-i(e)\right|\ e\in E_1\right\}\simeq E_8(-2)$.
By the surjectivity of the period map (see for instance Theorem \ref{mainGTTO}), we can choose a K3 surface $S$ such that
$$\Pic S=C\oplus E^a.$$
Then $\Pic S$ contains only two rational curves: one of class $C$ an the other of class $i(C)=C-(C-i(C))$.
It follows from Example \ref{examplewall} and Corollary \ref{cor:desrK} that there exists $\alpha\in \mathcal{K}_S$ invariant under the action of $i$.
Hence by Theorem \ref{mainHTTO} (ii), the involution $i$ extends to an involution $\iota$ on $S$ such that $\iota^*=i$.
Of course, we can refer to older results on K3 surfaces to construct $\iota$, however we though simplest for the reader to refer to results stated in this paper.
As in Section \ref{onecurve}, the objective is to determine wall divisors of the Nikulin orbifold $M'$ obtained from the couple $(S^{[2]},\iota^{[2]})$ (see Section \ref{M'section}).
\subsubsection*{Notation and strategy}
We keep the same notation and the same strategy used in Section \ref{onecurve}. We also still use the notation from Section \ref{genericM'}.
In particular, we denote by $C$ and $\iota(C)$ the two curves in $S$.
\subsubsection*{Curves in $S^{[2]}$}
First, we determine the curves in $S^{(2)}$.
\begin{lemme}\label{S2curves2}
There are 5 cases for irreducible curves in $S^{(2)}$:
\begin{itemize}
\item[(1)]
the curves $C^s:=\rho(C\times s)=\rho(s\times C)$ with $s\notin C\cup\iota(C)$;
\item[(2)]
the curves $\iota(C)^s:=\rho(\iota(C)\times s)=\rho(s\times \iota(C))$ with $s\notin C\cup\iota(C)$;
\item[(3)]
curves in $C^{(2)}\simeq \mathbb{P}^2$;
\item[(4)]
curves in $\iota(C)^{(2)}\simeq \mathbb{P}^2$;
\item[(5)]
curves in $\iota(C)\times C=C\times \iota(C)\simeq \mathbb{P}^1\times\mathbb{P}^1$.
\end{itemize}
\end{lemme}
\begin{proof}
Same proof as Lemma \ref{S2curves}.
\end{proof}
It follows four cases in $S^{[2]}$.
\begin{lemme}\label{curveS22}
We have the following four cases for irreducible curves in $S^{[2]}$:
\begin{itemize}
\item[(0)]
Curves which are fibers of the exceptional divisor $\Delta\rightarrow \Delta_{S^{(2)}}$. As in Section \ref{genericM'}, we denote these curves by $\ell_{\delta}^s$,
where $s=\nu(\ell_{\delta}^s)$ and we denote their classes by $\ell_{\delta}$.
\item[(1)]
Curves which are strict transforms of $C^s$ with $s\notin C\cup\iota(C)$. We denote the class of these curves by $C$. Note that $C=\nu^*(\left[C^s\right])$ for $s\notin C$.
\item[(2)]
Curves which are strict transforms of $\iota(C^s)$ with $s\notin C\cup\iota(C)$. The class of these curves is $\iota^*(C)$.
\item[(3a)]
Curves contained in $H_2$.
\item[(3b)]
Curves contained in $\overline{C^{(2)}}$.
\item[(4a)]
Curves contained in $\iota(H_2)$.
\item[(4b)]
Curves contained in $\iota(\overline{C^{(2)}})$.
\item[(5)]
Curves in $\iota(C)\times C$.
\end{itemize}
\end{lemme}
\begin{proof}
The proof is similar as the one of Lemma \ref{curveS2}. We only remark in addition that $\nu^{-1}(\iota(C)\times C)\simeq\iota(C)\times C$ because $C$ and $\iota(C)$ do not intersect; hence $\iota(C)\times C$ does not intersect $\Delta_{S^{(2)}}$. So by an abuse of notation, we still denote $\nu^{-1}(\iota(C)\times C)$ by $\iota(C)\times C$.
\end{proof}
\subsubsection*{Curves in $N_1$}
\begin{lemme}\label{curvesN12}
We have the following cases for irreducible curves in $N_1$:
\begin{itemize}
\item[(00)]
Curves contracted to a point by $r_1$.
They are fibers of the exceptional divisor $\Sigma_1\rightarrow \Sigma$ and their class is $\ell_{\Sigma}$.
\item[(0)]
Curves send to $\ell_{\delta}^s$ by $r_1$. An extremal such a curve has class $r_1^*(\ell_{\delta})$ by Lemma \ref{Rdeltalemma}.
\item[(1)]
Curves send to $C^{s}$ by $r_1$ with $s\notin C\cup \iota(C)$.
They are curves of class $r_1^*(C)$.
\item[(2)]
Curves send to $\iota(C^{s})$ by $r_1$ with $s\notin C\cup \iota(C)$.
They are curves of class $r_1^*(\iota^*(C))$.
\item[(3a)]
Curves contained in $\overline{H_2}$ the strict transform of $H_2$ by $r_1$.
\item[(3b)]
Curves contained in $\overline{\overline{C^{(2)}}}$ the strict transform of $\overline{C^{(2)}}$ by $r_1$.
\item[(4a)]
Curves contained in $\iota(\overline{H_2})$ the strict transform of $\iota(H_2)$ by $r_1$.
\item[(4b)]
Curves contained in $\iota(\overline{\overline{C^{(2)}}})$ the strict transform of $\iota(\overline{C^{(2)}})$ by $r_1$.
\item[(5a)]
Curves contained in $\overline{\iota(C)\times C}$ the strict transform of $\iota(C)\times C$ by $r_1$.
\item[(5b)]
Curves contained in $r_1^{-1}(\overline{\iota(C)\times C}\cap\Sigma)$.
\end{itemize}
\end{lemme}
\begin{proof}
The proof is similar to the proof of Lemma \ref{curvesN1}; the difference is that $H_2$, $\iota(H_2)$, $\overline{C^{(2)}}$ and $\iota(\overline{C^{(2)}})$ do not intersect $\Sigma$. Only $\iota(C)\times C$ intersects $\Sigma$.
\end{proof}
Now, we are going to determine the classes of all these curves.
\begin{lemme}
\begin{itemize}
\item[(3a)]
The extremal curves of $\overline{H_2}$ have classes $2(r_1^*(C)-r_1^*(\ell_{\delta}))$ and $r_1^*(\ell_{\delta})$ in $N_1$.
\item[(3b)]
The extremal curves of $\overline{\overline{C^{(2)}}}$ have class $r_1^*(C)-r_1^*(\ell_{\delta})$ in $N_1$.
\item[(4a)]
The extremal curves of $\iota(\overline{H_2})$ have classes $2(\iota^*(r_1^*(C))-r_1^*(\ell_{\delta}))$ and $r_1^*(\ell_{\delta})$ in $N_1$.
\item[(4b)]
The extremal curves of $\iota(\overline{\overline{C^{(2)}}})$ have class $\iota^*(r_1^*(C))-r_1^*(\ell_{\delta})$ in $N_1$.
\item[(5a)]
The extremal curves of $\overline{\iota(C)\times C}$ have class $r_1^*(C)-\ell_{\Sigma}$ and $r_1^*(\iota^*(C))-\ell_{\Sigma}$ in $N_1$.
\end{itemize}
\end{lemme}
\begin{proof}
\begin{itemize}
\item
Since $H_2$ and $\iota(H_2)$ do not intersect $\Sigma$, (3a) and (4a) are consequences of Lemma \ref{C0}.
\item
Similarly, since $\overline{C^{(2)}} $ and $\iota(\overline{C^{(2)}})$ do not intersect $\Sigma$, (3b) and (4b) are consequences of Lemma \ref{stricttransform}.
\item
We have $\overline{\iota(C)\times C}\simeq \iota(C)\times C\simeq \mathbb{P}^1\times\mathbb{P}^1$.
Let $j^{\iota}:\overline{\iota(C)\times C}\hookrightarrow N_1$ be the embedding in $N_1$. We want to compute the classes
$j^{\iota}_*(\left\{x\right\}\times \mathbb{P}^1)$ and $j^{\iota}_*(\mathbb{P}^1\times \left\{x\right\})$, where $\left\{x\right\}$ is just the class of a point in $\mathbb{P}^1$.
This corresponds to compute the strict transform by $r_1$ of $C^s$ with $s\in\iota(C)$ and the strict transform of $\iota(C^s)$ with $s\in C$. Since $C^s$ and $\iota(C^s)$ intersect $\Sigma$ in one point, we obtain our result.
\end{itemize}
\end{proof}
\begin{lemme}\label{H2'}
The surface $r_1^{-1}(\overline{\iota(C)\times C}\cap\Sigma)$ is a Hirzebruch surface that we denote by $H_2'$.
Let $f$ be a fiber of $H_2'$. There exists a section $C_0'$ which
satisfies: $\Pic H_2=\Z C_0'\oplus\Z f$, $C_0'^2=-4$, $f^2=0$ and $C_0'\cdot f=1$.
Moreover, $j^{H_2'}_*(C_0')=r_1^*(C)+r_1^*(\iota^*(C))-2\ell_{\Sigma}$, where $j^{H_2'}:H_2'\hookrightarrow N_1$ is the embedding.
\end{lemme}
\begin{proof}
We denote by $\zeta$ the curve $\overline{\iota(C)\times C}\cap\Sigma$. This curve is the strict transform of $\Delta_{S^{(2)}}^{\iota,C}$ by $\nu$.
Since $\Delta_{S^{(2)}}^{\iota,C}$ does not intersect $\Delta$, its class in $S^{[2]}$ is:
\begin{equation}
\left[\zeta\right]=C+\iota^*(C).
\label{gammadelta2}
\end{equation}
To understand which Hirzebruch surface $H_2'$ is, we are going to compute $\mathcal{N}_{\Sigma/S^{[2]}}|_{\zeta}$.
Let $\Delta_{\widetilde{S^2}}^{\iota,C}$ be the strict transform of $\Delta_{S^{2}}^{\iota,C}$ by $\widetilde{\nu}$.
We have $\zeta\simeq \Delta_{\widetilde{S^2}}^{\iota,C}\simeq \Delta_{S^{2}}^{\iota,C}$.
Hence: $$\mathcal{N}_{\Sigma/S^{[2]}}|_{\zeta}=\rho^*(\mathcal{N}_{\Sigma/S^{[2]}})|_{\Delta_{\widetilde{S^2}}^{\iota,C}}=\widetilde{\nu}^*(\mathcal{N}_{S_{\iota}/S^2}|_{\Delta_{S^{2}}^{\iota,C}}).$$
As in the proof of Lemma \ref{Hirzebruch}, we have:
$$\mathcal{N}_{S_{\iota}/S^2}|_{\Delta_{S^{2}}^{\iota,C}}\simeq T_S|_C\simeq \mathcal{O}_C(-2)\oplus\mathcal{O}_C(2).$$
Hence $$\mathcal{N}_{\Sigma/S^{[2]}}|_{\zeta}=\mathcal{O}_{\zeta}(-2)\oplus\mathcal{O}_{\zeta}(2).$$
By \cite[Chapter V, Proposition 2.3 and Proposition 2.9]{Hartshorne},
we know that there exists a section $C_0'$ of $H_2'$ such that $\Pic H_1=\Z C_0'\oplus\Z f$, with $C_0'^2=-4$, $f^2=0$ and $C_0'\cdot f=1$;
$f$ being the class of a fiber.
By (\ref{gammadelta2}) and using the projection formula, we know that:
$$j^{H_2'}_*(C_0')=r_1^*(C)+r_1^*(\iota^*(C))+a\ell_{\Sigma}.$$
To compute $a$, we only need to compute $C_0'\cdot j^{H_2'*}(\Sigma_1)$.
We apply the same method used in the proof of Lemma \ref{C0}.
By \cite[Propositions 2.6 and 2.8]{Hartshorne}, we have:
$$C_0'\cdot \widetilde{j^{H_1}}^*(\Sigma_1)=C_0'\cdot \mathcal{O}_{\mathbb{P}(\mathcal{O}_{\zeta}(-2)\oplus\mathcal{O}_{\zeta}(2))}(-1)=-C_0'\cdot(C_0'+2f)=4-2=2.$$
This proves that $a=-2$.
\end{proof}
\subsubsection*{Conclusion on wall divisors}
\begin{lemme}\label{extrem2}
The extremal curves of $M'$ have classes $\pi_{1*}r_1^*\ell_{\delta}$, $\pi_{1*}\ell_{\Sigma}$, $\pi_{1*}(r_1^*(C)-r_1^*(\ell_{\delta}))$ and $\pi_{1*}(r_1^*(C)-\ell_{\Sigma})$.
\end{lemme}
\begin{proof}
It is obtain by taking the image by $\pi_{1*}$ of the classes of the extremal curves in $N_1$.
\end{proof}
We can compute their dual divisors to obtain wall divisors with Proposition \ref{extremalray}.
\begin{prop}\label{prop:twocurves}
The divisors $\delta'$, $\Sigma'$, $D_C'$, $2D_C'-\delta'$ and $2D_C'-\Sigma'$ are wall divisors in $M'$.
Moreover, they verify the following numerical properties:
\begin{itemize}
\item
$q_{M'}(\delta')=q_{M'}(\Sigma')=-4$
and ${\rm div}(\delta')={\rm div}(\Sigma')=2$;
\item
$q_{M'}(D_C')=-2$
and ${\rm div}(D_C')=1$;
\item
$q_{M'}\left(2D_C'-\delta'\right)=q_{M'}\left(2D_C'-\Sigma'\right)=-12$ and ${\rm div}\left[2D_C'-\delta'\right]={\rm div}\left[2D_C'-\Sigma'\right]=2$.
\end{itemize}
\end{prop}
\begin{proof}
By Lemma \ref{dualdeltasigma}, ${\mathfrak r}ac{1}{2}\delta'$ and ${\mathfrak r}ac{1}{2}\Sigma'\in H^2(M',{\mathbb Q})$ are the duals of
$\pi_{1*}(r_1^*(\ell_{\delta}))$ and $\pi_{1*}(\ell_{\Sigma})$ respectively. Moreover by Lemma \ref{intersection},
we know that $D_C\cdot C=-2$,
hence by (\ref{Smith2}): $$\pi_{1*}(r_1^*(D_C+\iota^*(D_C)))\cdot \pi_{1*}(r_1^*(C+\iota^*(C)))=-8.$$
So $$D_C'\cdot \pi_{1*}(r_1^*(C))=-2.$$
Moreover, since $D_C=j(C)$ where $j$ is the isometric embedding $H^2(S,\Z)\hookrightarrow H^2(S^{[2]},\Z)$, we have: $q_{S^{[2]}}(D_C)=-2$. So by Theorem \ref{BBform} (ii):
$$q_{M'}(\pi_{1*}(r_1^*(D_C+\iota^*(D_C))))=-8.$$
Hence:
\begin{equation}
q_{M'}(D_C')=-2.
\label{DC'2}
\end{equation}
We obtain that $D_C'$ is the dual of $\pi_{1*}(r_1^*(C))$. Then $D_C'-{\mathfrak r}ac{1}{2}\delta'$ is the dual of $\pi_{1*}(r_1^*(C)-r_1^*(\ell_{\delta}))$ and $D_C'-{\mathfrak r}ac{1}{2}\Sigma'$ is the dual of $\pi_{1*}(r_1^*(C)-\ell_{\Sigma})$.
By Proposition \ref{extremalray} this proves that $\delta'$, $\Sigma'$, $2D_C'-\delta'$ and $2D_C'-\Sigma'$ are wall divisors in $M'$.
Their claimed numerical properties are given by Theorem \ref{BBform}(iii), Remark \ref{div} and (\ref{DC'2}).
It remains to prove that $D_C'$ is a wall divisor. The proof is very similar to the one of Proposition \ref{walldiv1}. For the same reason, we have $(D_C',\alpha)_{q_{M'}}>0$ for all $\alpha\in\mathcal{B}\mathcal{K}_{M'}$.
Now, we assume that there exists $g\in \Mon_{\Hdg}^2(M')$ and $\alpha\in\mathcal{B}\mathcal{K}_{M'}$ such that $(g(D_C'),\alpha)_{q_{M'}}=0$ and we will find a contradiction. Since $C\in E_1$, we have ${\rm div}(D_C')=1$. Since $g\in \Mon_{\Hdg}^2(M')$ and $\Pic(M')=\Z D_C'\oplus \Z {\mathfrak r}ac{\delta'+\Sigma'}{2}\oplus\Z{\mathfrak r}ac{\delta'-\Sigma'}{2}$, it follows that there are only 2 possibilities:
$$g(D_C')=\pm D_C',$$
because ${\rm div}(D_C')=1$ and ${\rm div}({\mathfrak r}ac{\delta'+\Sigma'}{2})=2$.
Since $(D_C',\alpha)_{q_{M'}}\neq0$, this leads to a contradiction.
\end{proof}
\begin{rmk}\label{Remark:twocurves}
Note that in this case, $D_C'-{\mathfrak r}ac{\delta'+\Sigma'}{2}$ is not a wall divisor. Indeed, by Lemma
\ref{extrem2}, the class $-2D_C'-\delta'-\Sigma'$ is the projection on $\Pic M'$ of a Kähler class. However,
observe that $\left(D_C'-{\mathfrak r}ac{\delta'+\Sigma'}{2},-2D_C'-\delta'-\Sigma'\right)_q=0$.
\end{rmk}
\subsection{Wall divisors on a Nikulin orbifold constructed from a specific elliptic K3 surface}\label{sec:elliptic}
As before, we consider the K3 lattice $\Lambda_{K3}:=U^3\oplus^\bot E_8(-1)\oplus^\bot E_8(-1)$ with
the three embedded lattices $\mathcal{U}\simeq U^3$, $E_1\simeq E_8(-1)$ and $E_2\simeq E_8(-1)$ such that $\Lambda_{K3}\simeq \mathcal{U}\oplus^\bot E_1\oplus^\bot E_2$.
The involution $i$ on $\Lambda_{K3}$ is still the involution which exchanges $E_1$ and $E_2$ and fixes the lattice $\mathcal{U}$.
As before, we keep $E^a:=\left\{\left.e-i(e)\right|\ e\in E_1\right\}\simeq E_8(-2)$. For simplicity, we denote $E_8(-2):=\left\{\left.e+i(e)\right|\ e\in E_1\right\}$ which is the invariant lattice.
Let $L_1\in \mathcal{U}$ such that $L_1^2=2$ and $e_2^{(0)}\in E_1$ an element with $(e_2^{(0)})^2=-4$.
Using the surjectivity of the period map (see for instance Theorem \ref{mainGTTO}), we choose a K3 surface $S$ such that:
$$\Pic S=\Z(L_1+e_2^{(0)}) \oplus E^a.$$
Note that the direct sum is not orthogonal.
We denote:
$$v_{K3}:=2L_1+e_2,$$
with $e_2:=e_2^{(0)}+i(e_2^{(0)})$. We have $v_{K3}^2=0$ and $\Pic S\supset \Z v_{K3} \oplus^{\bot} E^a$.
As before, it follows from Example \ref{examplewall} and Corollary \ref{cor:desrK} that there exists $\alpha\in \mathcal{K}_S$ invariant under the action of $i$. Hence by Theorem \ref{mainHTTO} (ii), the involution $i$ extends to an involution $\iota$ on $S$ such that $\iota^*=i$. We consider $M'$ constructed from the couple $(S,\iota)$.
In contrary to the two previous sections, we will not need to find all the extremal curves in this case.
The wall divisors will be deduced from the investigation of this section and the numerical properties obtained in Section \ref{endsection}.
The K3 surface $S$ contains a (-2)-curve of class $L_1+e_2^{(0)}$. We denote this curve $\gamma$. The class of $\iota(\gamma)$ is $L_1+i(e_2^{(0)})$. Hence $\gamma\cup\iota(\gamma)$ has class $v_{K3}$ and provides a fiber of the elliptic fibration $f:S\rightarrow \Pj^1$. Moreover, we have:
$$\left[\gamma\right]\cdot \left[\iota(\gamma)\right]=(L_1+e_2^{(0)})\cdot(L_1+i(e_2^{(0)}))=2.$$
We denote by $\overline{\gamma}$ the class $\nu^*(\left[\gamma^s\right])$, with
$\gamma^s:=\gamma\times\left\{s\right\}$.
We also denote $D_{\gamma}:=j(\gamma)$ and $D_{\gamma}':=\pi_1*(r_1^*(D_{\gamma}))$.
We consider the following divisor in $S^{(2)}$:
$$A:=\left\{\left.\left\{x,y\right\}\in S^{(2)}\right|\ f(x)=f(y)\right\},$$
with $f$ the elliptic fibration $S\rightarrow \Pj$.
We denote by $A'$ the image by $\pi_1$ of the strict transform of $A$ by $r_1\circ\nu$.
\begin{lemme}\label{ellipticlemma}
\begin{itemize}
We have:
\item[(i)]
the class of the strict transform of $\gamma^s$ by $r_1\circ \nu$ is $r_1{*}(\overline{\gamma}-\ell_{\delta})-\ell_{\Sigma}$, for $s\in \gamma\cap \iota(\gamma)$.
\item[(ii)]
The dual of $\pi_1*(r_1^*(\overline{\gamma}))$ is $D_{\gamma}'$.
\item[(iii)]
The divisor $D_{\gamma}'$ has square 0 and divisibility 1.
\item[(iv)]
The divisor $A'$ has class $D_{\gamma}'-{\mathfrak r}ac{\delta'+\Sigma'}{2}$.
\end{itemize}
\end{lemme}
\begin{proof}
\begin{itemize}
\item[(i)]
The statement follows directly from the fact that $\gamma^s$ intersects $\Delta_{S^{(2)}}$ and $\Sigma$ in one point.
\item[(ii)]
Let $w\in E_8(-2)$ be an invariant element under the action of $\iota$ such that $\gamma\cdot w=:k$.
Then
\begin{equation}
\overline{\gamma}\cdot j(w)=(D_\gamma,j(w))_{q_{S^{[2]}}}=k.
\label{dualgamma}
\end{equation}
We set $w':=\pi_{1*}(r_1^*(j(w)))$.
It follows by (\ref{Smith2}) that $\pi_{1*}(r_1^*(\overline{\gamma}+\iota^{[2]*}(\overline{\gamma})))\cdot w'=4k$. Hence
\begin{equation}
\pi_{1*}(r_1^*(\overline{\gamma}))\cdot w'=2k.
\label{gammaprime}
\end{equation}
Moreover by Theorem \ref{BBform} (ii), Remark \ref{Smithcomute} and (\ref{dualgamma}), we have:
$$(\pi_{1*}r_1^{*}(D_\gamma+\iota^{[2]*}(D_\gamma)),\pi_{1*}(r_1^*(j(w))))_{q_{M'}}=4k.$$
Hence:
\begin{equation}
(D_\gamma',w')_{q_{M'}}=2k.
\label{w}
\end{equation}
We obtain that $D_\gamma'$ is the dual of $\pi_{1*}(r_1^*(\gamma))$.
\item[(iii)]
We have by Theorem \ref{BBform} (ii) and Remark \ref{Smithcomute} $$q_{M'}(\pi_{1*}r_1^{*}(D_\gamma+\iota^{[2]*}(D_\gamma)))=2q_{S^{[2]}}(D_\gamma+\iota^{[2]*}(D_\gamma))=0.$$
Hence $q_{M'}(\pi_{1*}r_1^{*}(D_\gamma))=0$.
To prove that ${\rm div}(D_\gamma')=1$,
we choose a specific $w$: $w=w^{(0)}+i(w^{(0)})$ such that $w^{(0)}\in E_1$ and $w^{(0)}\cdot e_2^{(0)}=1$; then $w\cdot\gamma=1$. Then by (\ref{w}) we have $(D_\gamma',w')_{q_{M'}}=2$. However $w'$ is divisible by 2. We obtain our result.
\item[(iv)]
Since $A$ is invariant under the action of $\iota$ and considering the intersection with $\rho_*(w\times \left\{pt\right\})$, we see that the class of $A$ is given by $\rho_*(\left[S\times v_{K3}\right])$. Then the strict transform $\widetilde{A}$ by $r_1\circ \nu$ has class $r_1^*(j(v_{K3})-\delta)-\Sigma_1$ because $A$ contains the surfaces $\Delta_{S^{(2)}}$ and $\Sigma$. We recall that $\pi_{1*}(j(v_{K3}))=2 D_\gamma'$. Therefore and since $\widetilde{A}\rightarrow A'$ is a double cover, $A'$ has class ${\mathfrak r}ac{1}{2}(2D_\gamma'-\delta'-\Sigma_1')$.
\end{itemize}
\end{proof}
\begin{lemme}\label{monolemma}
The reflexion $R_{A'}$ is a monodromy operator and $R_{A'}(\Sigma')=2D_{\gamma}'-\delta'$.
\end{lemme}
\begin{proof}
The reflexion $R_{A'}$ is a monodromy operator by \cite[Theorem 3.10]{Lehn2}.
$$R_{A'}(\Sigma')=\Sigma'-{\mathfrak r}ac{2(\Sigma',A')_q}{q(A')}A'=\Sigma'+2A'=2D_\gamma'-\delta'.$$
\end{proof}
\begin{lemme}\label{extremallemma}
There exists an extremal curve of $M'$ that is written $a\pi_{1*}r_1^*(\overline{\gamma})+b\pi_{1*}r_1^*(\ell_{\delta})+c\pi_{1*}(\ell_{\Sigma})$ with $(a,b,c)\in\Z^3$ and $a>0$.
\end{lemme}
\begin{proof}
The curves $\pi_{1*}r_1^*(\overline{\gamma})$, $\pi_{1*}r_1^*(\ell_{\delta})$ and $\pi_{1*}(\ell_{\Sigma})$ are primitive in $H^{3,3}(M',\Z)$. Indeed, we have seen in the proof of Lemma \ref{ellipticlemma} that we can choose $k=1$ in equation (\ref{gammaprime}). Moreover $w'$ is divisible by 2. We obtain: $\pi_{1*}r_1^*(\gamma)\cdot {\mathfrak r}ac{1}{2}w'=1$. Similarly, by Lemma \ref{dualdeltasigma}, we know that $\pi_{1*}r_1^*(\ell_{\delta})$ and $\pi_{1*}(\ell_{\Sigma})$ are primitive by considering the intersection with ${\mathfrak r}ac{\delta'+\Sigma'}{2}$.
Therefore, the class of a curve in $M'$ will be written $a\pi_{1*}r_1^*(\overline{\gamma})+b\pi_{1*}r_1^*(\ell_{\delta})+c\pi_{1*}(\ell_{\Sigma})$, with $(a,b,c)\in\Z^3$.
If we consider the pull-back by $\pi_1$ and the push-forward by $r_1\circ \nu$ of the class $a\pi_{1*}r_1^*(\overline{\gamma})^+b\pi_{1*}r_1^*(\ell_{\delta})+c\pi_{1*}(\ell_{\Sigma})$, we obtain $2a\overline{\gamma}$. For a curve, there are two possibilities $a=0$ or $a>0$; it is not possible to have $a=0$ for every curves, hence there exists an extremal curve as mentioned in the statement of the lemma.
\end{proof}
\begin{rmk}\label{wallelliptic}
Let $a\pi_{1*}r_1^*(\overline{\gamma})+b\pi_{1*}r_1^*(\ell_{\delta})+c\pi_{1*}(\ell_{\Sigma})$ be the class of the extremal cuve obtained from Lemma \ref{extremallemma}.
By Proposition \ref{extremalray}, the dual of this curve class is a wall divisor. According to Lemmas \ref{ellipticlemma} and \ref{dualdeltasigma}, that is $aD_{\gamma}'+{\mathfrak r}ac{b}{2}\delta'+{\mathfrak r}ac{c}{2}\Sigma'$.
\end{rmk}
\begin{lemme}\label{mainelliptic}
Let $E=aD_{\gamma}'+{\mathfrak r}ac{b}{2}\delta'+{\mathfrak r}ac{c}{2}\Sigma'$ be the previous wall divisor obtained from Remark \ref{wallelliptic} eventually renormalized such that $E$ is primitive in $H^2(M',\Z)$. Moreover, we assume that $E$ verifies one of the numerical conditions listed in the statement of Theorem \ref{main}. Then:
$$E=D_{\gamma}'-{\mathfrak r}ac{\delta'+\Sigma'}{2}.$$
\end{lemme}
\begin{proof}
We have:
$$q_{M'}(E)=-(b^2+c^2).$$
Considering the numerical conditions of Theorem \ref{main}, there are three possibilities:
\begin{itemize}
\item[(i)]
$b=\pm1$ and $c=\pm1$ or
\item[(ii)]
$b=\pm2$ and $c=0$ or
\item[(iii)]
$b=0$ and $c=\pm2$.
\end{itemize}
In the possibilities (ii) and (iii) $q_{M'}(E)=-4$. In this case, following the conditions in Theorem \ref{main}, we know that $E$ has divisibility 2. Hence by Lemma \ref{ellipticlemma} (iii), $a$ is divisible by 2.
This corresponds to the extremal raies of curves $a\pi_{1*}r_1^*(\gamma)\pm2\pi_{1*}r_1^*(\ell_{\delta}))$ or $a\pi_{1*}r_1^*(\gamma)\pm2\pi_{1*}(\ell_{\Sigma})$. However, by Lemma \ref{ellipticlemma} (i) these raies cannot be extremal.
Therefore, $E=aD_{\gamma}'+{\mathfrak r}ac{\pm1}{2}\delta'+{\mathfrak r}ac{\pm1}{2}\Sigma'$. Moreover, the extremal ray associated to $E$ has class:
$$a\pi_{1*}r_1^*(\gamma)\pm\pi_{1*}r_1^*(\ell_{\delta})\pm\pi_{1*}(\ell_{\Sigma}).$$
But, we know by Lemma \ref{ellipticlemma} (i) that $\pi_{1*}r_1^*(\gamma)-\pi_{1*}r_1^*(\ell_{\delta})-\pi_{1*}(\ell_{\Sigma})$ is the class of a curve. Hence the only possibility for the previous extremal ray is $\pi_{1*}r_1^*(\gamma)-\pi_{1*}r_1^*(\ell_{\delta})-\pi_{1*}(\ell_{\Sigma})$.
\end{proof}
\section{Monodromy orbits}\label{sec:monodromy-orbits}
In this section, we study the orbit of classes in the lattice $H^2(X,{\mathbb Z})$ under the action of $\Mon^2(X)$ for an
irreducible symplectic orbifold $X$ of Nikulin-type. Note that since the property of being a wall divisor is
deformation invariant (see Theorem \ref{wall}) we may assume without loss of generality, that $X$ is the Nikulin orbifold $M'$ for a
given K3 surface with symplectic involution.
The main result of this section is Theorem \ref{thm:9monorb-M'}, which describes a set of representatives in each monodromy orbit.
This will enable us to determine wall divisors for Nikulin-type in the next section by checking only the
representatives.
For completeness, note that we did not determine the precise monodromy orbit of each element. Since, we only
used a subgroup of the actual monodromy group, it could happen
that more than one of the elements in Theorem \ref{thm:9monorb-M'} belong to the same orbit.
\subsection{Equivalence of lattices} \label{sec:eq-lattices}
\begin{lemme}\label{lem:twist-spec}
For the questions on hand the consideration of the following two lattices are equivalent:
$$\Lambda\coloneqq \Lambda_{M'}=U(2)^{ 3} \oplus E_8(-1)\oplus (-2) \oplus (-2)$$
and
$$\hat{\Lambda} \coloneqq U^{ 3} \oplus E_8(-2)\oplus (-1) \oplus (-1).$$
More precisely, there is a natural correspondence between lattice automorphisms for both lattices, and
a natural identification between the rays in both lattices.
\end{lemme}
This is a special case of the following:
\begin{lemme}\label{lem:twist-gen}
Let $M$ and $N$ be two unimodular lattices.
Then $L\coloneqq M\oplus N(2)$ and $\hat{L}\coloneqq M(2)\oplus N$ satisfy the following
properties:
There exists a natural identification between lattice automorphisms for both lattices, and
the rays in both lattices can be naturally identified.
\end{lemme}
\begin{proof}
Observe that by multiplying the quadratic form of the lattice $L$ by $2$, we obtain a lattice
$L(2) \iso M(2)\oplus N(4)$, which obviously satisfies that the rays and
automorphisms are naturally identified for $L$ and $L(2)$.
Notice that $N(4)$ can be identified with the sublattice of $N$ consisting of elements of the form $\{2n\,|\,
n\in N\}$. Therefore, we can naturally include $L (2) \subset \hat{L}$.
This immediately implies the natural identification of rays in $L$ and $\hat{L}$.
For the identification of automorphisms, observe that any automorphism $\hat{\varphi}\in \Aut(\hat{L})$
preserves the sublattice $L(2)$: In fact $L(2)\subset \hat{L}$ consists precisely of those
(not necessarily primitive) elements whose divisibility is even, and this subset needs to be preserved by
any automorphism. This yields a natural inclusion $\Aut(\hat{L})\subset \Aut(L(2))\iso
\Aut(L)$.
The inverse inclusion is given by the same argument from considering $\hat{L}(2)\subset L$.
\end{proof}
Fix a K3 surface $S$ with a symplectic involution $\iota$
and consider the induced Nikulin orbifold $M'$ associated to $S$.
Further, fix a marking $\varphi_S\colon H^2(S,{\mathbb Z}) \to \Lambda_{K3}=U^3\oplus E_8(-1)^2$ of $S$ such that
$\iota^*$ corresponds to
swapping the two copies of $E_8(-1)$.
Then the fixed part of $\iota^*$ is isomorphic to $U^3 \oplus E_8(-2)$.
Note that on $X$, which we choose as the associated orbifold $M'$ to $(S,\iota)$, this induces a marking
$\varphi_{X}\colon H^2(X,{\mathbb Z})\to \Lambda=\Lambda_{M'}=U(2)^{ 3} \oplus
E_8(-1)\oplus (-2)^2$ by Theorem \ref{BBform}, where the $U(2)^{ 3} \oplus
E_8(-1)$-part comes from the invariant lattice of the K3 surface (precisely as described in Lemma \ref{lem:twist-gen}),
and the two $(-2)$-classes correspond to ${\mathfrak r}ac{\delta' + \Sigma'}{2}$ and ${\mathfrak r}ac{\delta' - \Sigma'}{2}$.
Therefore, the sublattice $U^{ 3} \oplus E_8(-2)$ in $\hat{\Lambda}$ can naturally be identified with the fixed part of
$H^2(S,{\mathbb Z})$, whereas the generators of square $(-1)$ correspond to ${\mathfrak r}ac{\hdel + \hSig}{2}$ and
${\mathfrak r}ac{\hdel - \hSig}{2}$ for the corresponding elements $\hdel, \hSig \in \hat{\Lambda}$.
With this notation, we can define the group of $\Mon^2(\hat{\Lambda})$ of monodromy operators for
the lattice $\hat{\Lambda}$: An automorphism $\hat{\varphi} \in \Aut(\hat{\Lambda})$ is in $\Mon^2(\hat{\Lambda})$ if the
corresponding automorphism $\varphi \in \Aut(\Lambda)$ is identified with an element of $\Mon^2(X)$ via the
marking $\varphi_X$.
In the following we will frequently consider the sublattice
$$\hat{\Lambda}_1 \coloneqq U^{ 3} \oplus E_8(-2)\oplus (-2) \oplus (-2) \subset \hat{\Lambda},$$
which replaces the $(-1)\oplus (-1)$-part by the sublattice generated by $\hdel$ and $\hSig$.
Define $$\Mon^2(\hat{\Lambda}_1) \coloneqq \{f \in \Aut (\hat{\Lambda}_1) \,|\, \exists \hat{f} \in \Mon^2(\hat{\Lambda}):
f=\hat{f}|_{\hat{\Lambda}_1}\}.$$
\begin{rmk}
Note that while there exists an identification
$\Mon^2(X) =\Mon^2(\hat{\Lambda})$,
there exists a natural inclusion $\Mon^2(\hat{\Lambda}_1) \subseteq \Mon^2(\hat{\Lambda})$ but a priori this
is not an equality. \TODO{try to understand if this is an equality or not}
\end{rmk}
Note that Proposition \ref{MonoM'} can be reformulated in terms of the lattice $\hat{\Lambda}_1$:
\begin{cor}\label{cor:inheritedMononHat}
Let $f\in \Mon^2(S^{[2]})$ be a monodromy operator such
that $f \circ \iota^{[2]*} = \iota^{[2]*} \circ f$ on $H^2(S^{[2]},{\mathbb Z})$.
Let $\hat{f}\in \Aut(\hat{\Lambda}_1)$ be the automorphism defined via the following properties:
Via the marking described above, $\hat{f}$ restricted to $U^{ 3}\oplus E_8(-2)\oplus (-2)$ coincides with the restriction of
$f$ to the invariant part of the lattice (i.e.~$\hat{f}|_{U^{ 3}\oplus E_8(-2)\oplus (-2)}=
f|_{H^2(S^{[2]},{\mathbb Z})^{\iota^{[2]}}} $) and $\hat{f}(\hSig)=\hSig$.
Then $\hat{f}\in \Mon(\hat{\Lambda}_1)$.
\end{cor}
\begin{proof} This is a straight forward verification: Proposition \ref{MonoM'} gives the inherited monodromy operator
$f'\in \Mon^2(\Lambda)$ and $\hat{f}\in \Aut(\hat{\Lambda}_1)$ is precisely the restriction to
$\hat{\Lambda}_1$ of the corresponding automorphism of $\hat{\Lambda}$ obtained via Lemma \ref{lem:twist-spec}.
\end{proof}
For the proof of Theorem \ref{thm:9monorb-M'} we will study monodromy orbits with respect to successively increasing lattices:
\begin{equation*}\hat{\Lambda}_3\subset \hat{\Lambda}_2 \subset \hat{\Lambda}_1,
\end{equation*}
where $\hat{\Lambda}_3\coloneqq U^{ 3}\oplus (-2)$ with the generator $\hdel$ for the $(-2)$-part,
$\hat{\Lambda}_2\coloneqq U^{ 3}\oplus E_8(-2) \oplus (-2) $, and $\hat{\Lambda}_1$ is as defined above.
Define the following monodromy groups for these lattices.
\begin{align*}
\Mon^2(\hat{\Lambda}_2)&=\{f\in \Aut(\hat{\Lambda}_2)| \exists f_1 \in \Mon^2(\hat{\Lambda}_1) : f=f_1|_{\hat{\Lambda}_2},
f_1|_{\hat{\Lambda}_2^\perp}=\id\}\\
\Mon^2(\hat{\Lambda}_3)&=\{f\in \Aut(\hat{\Lambda}_3)| \exists f_1 \in \Mon^2(\hat{\Lambda}_1) : f=f_1|_{\hat{\Lambda}_3},
f_1|_{\hat{\Lambda}_3^\perp}=\id\}.
\end{align*}
Note that with this definition, there exist natural inclusions
$\Mon^2(\hat{\Lambda}_3)\subseteq \Mon^2(\hat{\Lambda}_2)\subseteq \Mon^2(\hat{\Lambda}_1)$.
\subsection{Monodromy orbits in $\hat{\Lambda}_3$}\label{subsec:monK32-part}
In this subsection, we consider the sublattice $\hat{\Lambda}_3 = U^{ 3} \oplus (-2)\subset \hat{\Lambda}_1$, where the generator of $(-2)$ is the class $\hdel$.
\begin{nota}
For the rest of this article, fix elements $L_i \in U \subset \hat{\Lambda}_1$ of square $2i$ for each $i\in {\mathbb Z}$.
E.g. one can choose the elements $ie + f$, where $e,f$ is a standard basis for which $U$ has intersection matrix
$\begin{pmatrix}
0&1\\ 1& 0
\end{pmatrix}$.
\end{nota}
\begin{lemme}\label{lem:K3-2-part}
The $\Mon^2(\hat{\Lambda}_3)$-orbit of a primitive element in $U^{ 3} \oplus (-2)$ is uniquely determined by
its square and its divisibility.
More precisely, we prove the following:
Let $v \in \hat{\Lambda}_3$ be a primitive element.
Then there exists a monodromy operator $f\in \Mon^2(\hat{\Lambda}_3)$ such that
$v$ is moved to an element of the following form:
$f(v)=\left\{
\begin{array}{lll}
L_{i} &\textrm{with\ } i={\mathfrak r}ac{1}{2}q(v) & \textrm{if\ }{\rm div}(v)=1 \\
2L_{i} -\hdel &\textrm{with\ } i={\mathfrak r}ac{1}{8}(q(v)+2) & \, \textrm{if\ } {\rm div}(v)=2 \\
\end{array}
\right. $
\end{lemme}
The proof will make use of the following two well-known statements:
The Eichler criterion, which we will frequently use in this section (see \cite[Lemma 3.5]{Gritsenko-Hulek-Sankaran}, originally due to \cite[Chapter 10]{Eichler}\TODO{add a working reference}).
\begin{lemme}\label{lem:Eichler}
Let $\Gamma$ be a lattice with $U^{ 2} \subseteq \Gamma$. Fix two elements $v,w \in \Gamma$ which
satisfy
\begin{enumerate}
\item $q(v) = q(w)$,
\item ${\rm div}(v)={\rm div}(w) =: r$,
\item ${\mathfrak r}ac{v}{r}={\mathfrak r}ac {w}{r} \in A_\Gamma \coloneqq \Gamma^\vee / \Gamma$.
\end{enumerate}
Then there exists $\varphi \in \Aut(\Gamma)$ such that
$\varphi(v)=w$, and such that the induced action $\varphi_A$ on the discriminant group $A_\Gamma$ is the identity.
\end{lemme}
Furthermore, recall the following description of the monodromy group of varieties of $K3^{[2]}$-type:
\begin{thm}[{see \cite[Lemma 9.2]{Markman11}}] \label{thm:MonK32}
For a K3 surface $S$ the monodromy group $\Mon^2(S^{[2]})$ coincides with $O^+(\Lambda_{K3^{[2]}})$, which is the order 2 subgroup of $\Aut(\Lambda_{K3^{[2]}})$ which consists of automorphisms preserving the positive cone.
\end{thm}
\begin{proof}[Proof of Lemma \ref{lem:K3-2-part}]
The proof of Lemma \ref{lem:K3-2-part} is an immediate consequence of the previous statements.
First apply the Eichler criterion (Lemma \ref{lem:Eichler}) for $\Gamma =\hat{\Lambda}_3$:
Observe that for a given element $v\in U^{ 3}\oplus (-2)$ the claimed image element has
the same square and divisibility.
In the case of ${\rm div}(v)=1$, note that ${\mathfrak r}ac{v}{1} = 0 \in A_{\hat{\Lambda}_3}$ (since ${\mathfrak r}ac{v}{1} \in
\hat{\Lambda}_3$). Therefore, the Eichler criterion applies automatically in this case.
For the case of ${\rm div}(v)=2$, note that $v$ can be written as $v=aL + b\hdel$ with a primitive element $L\in
U^{ 3}$ and $\hdel$ as before.
The fact that ${\rm div}(v)=2$ implies that $a$ is divisible by two.
Since we assumed that $v$ is primitive, $b$ is odd. Note that $A_{\hat{\Lambda}_3} \iso {\mathbb Z}/2{\mathbb Z}$ is
spanned by the image of ${\mathfrak r}ac{\hdel}{2}$, since $U$ is unimodular.
Therefore, we can also apply the Eichler criterion in this case.
In both cases, the Eichler criterion shows that there is $f \in \Aut(\hat{\Lambda}_3))$ where
$f(v)$ coincides with the claimed image.
Extending this by the identity on the respective orthogonal complements, we will by abuse of notation
consider $f \in \Aut (\Lambda_{K3^{[2]}})$ resp.~$f\in \Aut(\hat{\Lambda}_1)$.
Applying Theorem \ref{thm:MonK32}, we observe that up to potentially swapping a sign in one of the copies of $U$, $f\in \Aut (\Lambda_{K3^{[2]}})$ is in fact an
element of $\Mon^2(\Lambda_{K3^{[2]}})$.
And therefore, Corollary \ref{cor:inheritedMononHat} implies that $f\in \Aut (\hat{\Lambda}_1)$
is in $\Mon^2(\hat{\Lambda}_1)$ as claimed.
\end{proof}
\subsection{Monodromy orbits in the lattice $\hat{\Lambda}_2$}
In this section, we study the monodromy group for the lattice $\hat{\Lambda}_2 = U^3\oplus E_8(-2) \oplus
(-2)$. Notice, that via the identifications described in Section \ref{sec:eq-lattices} the lattice
$\hat{\Lambda}_2$ corresponds to the
${\iota^{[2]}}^*$-invariant lattice of $\Lambda_{K3^{[2]}}$.
Let us refine the methods from the previous section to describe properties of the monodromy orbits in this
lattice $\hat{\Lambda}_2$.
For this we need to deal with the $E_8(-2)$-part of the lattice.
For the basic notions considering discriminant groups of lattices, we refer to \cite{Nikulin}.
Recall that $E_8$ is a unimodular lattice and therefore, the discriminant group $A_{E_8}=0$ is trivial.
Pick a basis of $E_8(-2)$ for which the intersection matrix is: \TODO{(-2) times the one associated
to the $E_8$-graph}
$$\begin{pmatrix}
-4 & 2 & 0 & 0 & 0 & 0 & 0 & 0\\
2 & -4 & 2 & 0 & 0 & 0 & 0 & 0\\
0 & 2 & -4 & 2 & 0 & 0 & 0 & 0\\
0 & 0 & 2 & -4 & 2 & 0 & 0 & 0\\
0 & 0 & 0 & 2 & -4 & 2 & 0 & 2\\
0 & 0 & 0 & 0 & 2 & -4 & 2 & 0\\
0 &0 & 0 & 0 & 0 & 2 & -4 & 0 \\
0 &0 &0 & 0 & 2 & 0 & 0 & -4
\end{pmatrix}.
$$
For the lattice $E_8(-2)$ one can deduce that the discriminant group $A_{E_8(-2)}\iso ({\mathbb Z}/2{\mathbb Z})^{ 8}$ which is
generated by the residue classes of one half of the generators of the lattice.
The quadratic form $q$ on $E_8(-2)$ induces a quadratic form $\bar{q}\colon A_{E_8(-2)}\to {\mathbb Z}/2{\mathbb Z}$.
The possible values of $\bar{q}$ on elements of $A_{E_8(-2)}$ are in fact 1 and 0 (these values are achieved
e.g.~by the residue classes of ${\mathfrak r}ac{v}{2}$ for lattice elements of $v\in E_8(-2)$ with squares $-4$ and $-8$).
Denote by $\Aut(A_{E_8(-2)})$ the automorphisms of $A_{E_8(-2)}$ preserving the quadratic form $\bar{q}$.
Note that every automorphism $\varphi\in \Aut(E_8(-2))$ induces an element $\bar{\varphi}\in
\Aut(A_{E_8(-2)})$. Therefore, there exists an induced action of $\Aut(E_8(-2))$ on $A_{E_8(-2)}$.
\begin{lemme}\label{lem:A-E8-orbits}
There exist precisely three $\Aut(E_8(-2))$-orbits in $A_{E_8(-2)}$:
They are given by $0$, and by non-zero elements $\overline{e_1}, \overline{e_2}\in A_{E_8(-2)}$ with $\bar{q}(\overline{e_1})=1$ respectively $\bar{q}(\overline{e_2})=0$.
\end{lemme}
\begin{proof}
This lemma is a direct consequence of results in \cite[Chapter 4]{Griess}.
Let $$\mathbb{L}_0=\left\{0\right\},\ \mathbb{L}_{-4}=\left\{\left.\alpha\in E_8(-2)\right|\ \alpha^2=-4\right\},\ \mathbb{L}_{-8}=\left\{\left.\alpha\in E_8(-2)\right|\ \alpha^2=-8\right\}.$$
As explained in \cite[just after Notation 4.2.32]{Griess}, the natural map $b:\mathbb{L}_0\cup \mathbb{L}_{-4} \cup \mathbb{L}_{-8}\rightarrow A_{E_8(-2)}$ is surjective. The images $b(\mathbb{L}_{-4})$ and $b(\mathbb{L}_{-8})$ correspond respectively to the elements of square 1 and the non trivial elements of square 0 in $A_{E_8(-2)}$.
However by \cite[Corollary 4.2.41]{Griess} and \cite[Lemma 4.2.46 (2)]{Griess} respectively, $\Aut(E_8(-2))$ acts transitively on $\mathbb{L}_{-8}$ and on $\mathbb{L}_{-4}$. The result follows.
\end{proof}
Consider $E_8(-2) \subset E_8(-1)\oplus E_8(-1)$ consisting of elements of the form $(e,e)$ for $e\in
E_8(-1)$ and denote the sublattice consisting of elements of the form $(e, -e)$ by $E^a$. Note that this gets
naturally identified with the anti-invariant lattice of $S$ via the marking described in Section
\ref{sec:eq-lattices}.
\begin{lemme} \label{lem:extension}
With this notation, any lattice isometry $\varphi \in \Aut(\hat{\Lambda}_2)$ can be extended to a lattice isometry $\Phi \in \Aut(U^3\oplus
E_8(-1)\oplus E_8(-1)\oplus (-2))$, with the additional property, that $\Phi$ preserves the sublattice
$E^a$.
\end{lemme}
\begin{proof}
This is an immediate consequence of \cite[Corollary 1.5.2]{Nikulin} (applied to twice the lattice
$\hat{\Lambda}_2$) and the surjection $\Aut(E_8(-2)) \surj \Aut(A_{E_8(-2)})$ (which enables us to choose an appropriate
extension on the orthogonal complement $E^a$ of $\hat{\Lambda}_2$).
\end{proof}
Fix two elements in $E_8(-2)$: one element $e_1$ of square $-4$ and one element $e_2$ of square $-8$. Note
that according to Lemma \ref{lem:A-E8-orbits} the residue classes of ${\mathfrak r}ac{e_1}{2}$ and ${\mathfrak r}ac{e_2}{2}$ in $A_{E_8(-2)}$ represent the two
non-zero orbits under the action of the isometry group. For
coherence of the notation adept the choices such that the residue of ${\mathfrak r}ac{e_1}{2}$ in the discriminant is
$\overline{e_1}$ and the residue of ${\mathfrak r}ac{e_2}{2}$ is $\overline{e_2}$.
\begin{prop} \label{prop:6monorb-in-invariant}
Let $v\in \hat{\Lambda}_2$ be a primitive non-zero element. Denote by $v_{E_8}$ the projection of $v$ to the $E_8(-2)$-part
of the lattice, and let ${\bar{v}}_{E_8}$ be the image of $\half v_{E_8}$ in the discriminant group $A_{E_8(-2)}$.
Then there exists a monodromy operator
$f\in \Mon^2(\hat{\Lambda}_2)$ such that
$$f(v)=\left\{
\begin{array}{lllll}
1) &L_{i} & \textrm{if\ }{\rm div}(v)=1&\textrm{with\ } i={\mathfrak r}ac{1}{2}q(v) & \\
2) &2L_{i} - \hdel & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-2, & \textrm{and\ } {\bar{v}}_{E_8}=0\\
3) &2L_{i} + e_1 & \, \textrm{if\ } {\rm div} (v)=2,& q(v)=8i-4&\\
4) &2L_{i+1} + e_2 & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i & \\
5) &2L_{i} + e_1 - \hdel & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-6 & \\
6) &2L_{i+1} + e_2 -\hdel & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-2, & \textrm{and\ } {\bar{v}}_{E_8}\neq 0.\\
\end{array}
\right. $$
\end{prop}
\begin{remark}
Note that the values of $q$ and ${\rm div}$ uniquely distinguish the orbit, except from the
cases 2 and 6, where the additional condition on ${\bar{v}}_{E_8}$ is needed to determine the known representative
of the orbit.
\end{remark}
\TODO{for ourselves, and probably for second version: If I remember correctly, we showed that the groups $\hdel^\perp$ and
$(e_2+\hdel)^\perp$ in $A_{\hat{\Lambda}_2}$ are not isomorphic, thus showing, that they actually represent
different orbits with respect to $\Aut(\hat{\Lambda}_2)$}
\begin{proof}
Let us first observe that if ${\rm div}(v)=1$ the exact same proof as for Lemma \ref{lem:K3-2-part} for the case
of divisibility 1 applies.
Therefore, we only need to deal with the case, where ${\rm div}(v)=2$.
Start by observing that the discriminant group of $\hat{\Lambda}_2$ is $A_{E_8(-2)}\times {\mathbb Z}/2{\mathbb Z}$.
For our given element $v\in \hat{\Lambda}_2$, denote by $\bar{v}$ the image of ${\mathfrak r}ac{v}{2}$ in the discriminant group, and let
$\bar{v}_e$ be the $A_{E_8(-2)}$-part of this.
By Lemma \ref{lem:A-E8-orbits}, there exists $\varphi \in \Aut(E_8(-2))$ such that $\bar{\varphi}(\bar{v}_e) \in
A_{E_8(-2)}$ coincides with one of $\{0,\overline{e_1},\overline{e_2}\}$. For the corresponding $\varphi_1
\in \Aut(E_8(-1))$ consider $(\varphi_1,\varphi_1) \in \Aut(E_8(-1)\oplus E_8(-1))$, which obviously commutes with
the swapping of the two factors and induces $\varphi$ on $E_8(-2)\subset E_8(-1)\oplus E_8(-1)$.
Extend this to
$\Phi \in \Aut(\Lambda_{K3^{[2]}})$ via the identity on the other direct summands.
By Theorem \ref{thm:MonK32} the operator $\Phi\in \Mon^2(S^{[2]})$ is in the monodromy group
and therefore the induced action on $\hat{\Lambda}_2$ is an element of $\Mon^2(\hat{\Lambda}_2)$ by Proposition \ref{MonoM'}. By construction, this
restricts to $\varphi \in \Aut(E_8(-2))$.
Therefore, up to first applying the above monodromy operator, we may assume that $\bar{v}\in
\{0,\overline{e_1},\overline{e_2}\}\times {\mathbb Z}/2{\mathbb Z}$.
For the second step, observe that cases 2) to 6) listed in the proposition correspond
precisely to the non-zero elements of $\{0,\overline{e_1},\overline{e_2}\}\times {\mathbb Z}/2{\mathbb Z}$.
By varying the parameter $i$, the elements in the list can furthermore achieve all possible values for
$q(v)$ with the prescribed residue in $\{0,\overline{e_1},\overline{e_2}\}\times {\mathbb Z}/2{\mathbb Z}$.
Therefore, for our given element $v \in \hat{\Lambda}_2$ with $\bar{v}\in
\{0,\overline{e_1},\overline{e_2}\}\times {\mathbb Z}/2{\mathbb Z}$, we can choose $v_0$ from the above list (for
appropriate choice of $i$) such that $q(v)=q(v_0)$ and $\bar{v}=\overline{v_0}\in \hat{\Lambda}_2$ (and
${\rm div}(v)={\rm div}(v_0)=2$ follows automatically).
Therefore, by the Eichler criterion (Lemma \ref{lem:Eichler}), there exists an automorphism $\varphi\in \Aut(\hat{\Lambda}_2)$ such that
$\varphi(v)=v_0$. This can be extended to an automorphism $\Phi \in \Aut(U^3\oplus E_8(-1)\oplus E_8(-1)\oplus
(-2))$ of the lattice $\Lambda_{K3^{[2]}}$ by Lemma \ref{lem:extension}.
Observe that up to changing a sign in one of the copies of $U$, we can assume that $\Phi\in
\Mon^2(\Lambda_{K3^{[2]}})$ by Theorem \ref{thm:MonK32}. Since this monodromy operator commutes with ${\iota^{[2]}}^*$ (it
preserves the invariant lattice and the anti-invariant lattice by construction) Proposition \ref{MonoM'}
shows that it induces a monodromy operator on $\Lambda_{M'}$ which in turn corresponds to $\varphi$ extended by the
identity via Lemma \ref{lem:twist-spec}. Therefore, again up to potentially changing a sign in one of the
copies of $U$, the automorphism $\varphi \in \Mon^2(\hat{\Lambda}_2)$. This
completes the proof.
\end{proof}
\subsection{Induced monodromy orbits on the lattice $\hat{\Lambda}_1$}
Recall that
$\hat{\Lambda}_1= U^{ 3} \oplus E_8(-2)\oplus (-2) \oplus (-2)$.
\begin{thm} \label{thm:9monorb}
Let $v\in \hat{\Lambda}_1$ be a primitive non-zero element. Denote by $v_{E_8}$ the projection of $v$ to the $E_8(-2)$-part
of the lattice, and let ${\bar{v}}_{E_8}$ be its image in the discriminant group $A_{E_8(-2)}$.
Then there exists a monodromy operator
$f\in \Mon^2(\hat{\Lambda}_1)$ such that
$$f(v)=\left\{
\begin{array}{lllll}
1) &L_{i} & \textrm{if\ }{\rm div}(v)=1&\textrm{with\ } i={\mathfrak r}ac{1}{2}q(v) & \\
2) &2L_{i} - \hdel & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-2, & \textrm{and\ } {\bar{v}}_{E_8}=0\\
3) &2L_{i+1} + e_2 -\hdel & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-2, & \textrm{and\ } {\bar{v}}_{E_8}\neq 0\\
4) &2L_i - \hdel - \hSig & \, \textrm{if\ } {\rm div} (v)=2,& q(v)=8i-4,&\textrm{and\ } {\bar{v}}_{E_8}=0\\
5) &2L_{i+1} + e_2-\hdel - \hSig & \, \textrm{if\ } {\rm div} (v)=2,& q(v)=8i-4,&\textrm{and\ } \bar{q}({\bar{v}}_{E_8})=0, {\bar{v}}_{E_8}\neq 0\\
6) &2L_{i} + e_1 & \, \textrm{if\ } {\rm div} (v)=2,& q(v)=8i-4,&\textrm{and\ } \bar{q}({\bar{v}}_{E_8})=1\\
7) &2L_{i} + e_1 - \hdel & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-6, & \\
8) &2L_i + e_1 - \hdel - \hSig & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i-8,
& \textrm{and\ } \bar{q}({\bar{v}}_{E_8})=1\\
9) &2L_{i+1} + e_2 & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=8i, & \textrm{and\ } \bar{q}({\bar{v}}_{E_8})=0.\\
\end{array}
\right. $$
\end{thm}
\TODO{Regrouper les cas 2 et 3 en utilisant le lemme \ref{monolemma}}
\begin{rmk}\label{rem:L0goesaway}
Observe that whenever $L_0$ is involved in the statement of Theorem \ref{thm:9monorb}, it can be replaced by
$0$ (apart from case 1) ) since both elements are in the same monodromy orbit.
\end{rmk}
\begin{proof} The proof of this theorem consists of a series of applications of Proposition \ref{prop:6monorb-in-invariant}
and the existence of the monodromy operator $R_{{\mathfrak r}ac{\hdel - \hSig}{2}}$ (compare Remark \ref{RdeltaSigma} and notation of Section \ref{notation}).
First note that since $v$ is primitive, it can be expressed as
$v=k\gamma + a\hdel + b \hSig$, where $\gamma\in U^3\oplus E_8(-2)$ is a primitive element, and $\gcd(a,b,k)=1$.
First let us assume that ${\rm div}(v)=1$.
The element $k\gamma + a\hdel \in \hat{\Lambda}_2$ corresponds to $\gcd(k,a)$ times a primitive element of
divisibility 1 inside $\hat{\Lambda}_2$.
Therefore, by Proposition \ref{prop:6monorb-in-invariant} there exists an element $f_1 \in \Mon^2(\hat{\Lambda}_2)$ such that
$f_1(k\gamma + a\hdel) = \gcd(k,a)\cdot L_{q_1}$ for a suitable choice of $q_1$. By extending $f_1$ to $\Mon^2(\hat{\Lambda}_1)\supseteq
\Mon^2(\hat{\Lambda}_2)$, observe that
$f_1(v)=\gcd(k,a) \cdot L_{q_1} + b \hSig$.
Apply the monodromy operator $R_{{\mathfrak r}ac{\hdel -\hSig}{2}}$ to obtain $\gcd(k,a) \cdot L_{q_1} + b \hdel$, which is a primitive
element of divisibility 1 in $\hat{\Lambda}_2$.
Using once again Proposition \ref{prop:6monorb-in-invariant} find $f_2 \in \Mon^2(\hat{\Lambda}_2)\subseteq \Mon^2(\hat{\Lambda}_1)$
such that $f_2(\gcd(k,a) \cdot L_{q_1} + b \hdel)=L_{q_2}$.
The composition of these monodromy operators is therefore the claimed $f\in \Mon^2(\hat{\Lambda}_2)$ and concludes
the proof under the assumption that ${\rm div}(v)=1$.
Therefore, we only need to deal with the case that ${\rm div}(v)=2$. Let $\bar{v}$ be the residue class of
${\mathfrak r}ac{v}{2}$ in $A_{\hat{\Lambda}_1}$.
Let us first work under the additional assumption that $\gcd(k,a)$ is odd (while still assuming ${\rm div}(v)=2$).
Under this assumption, the element $k\gamma + a\hdel \in \hat{\Lambda}_2$ corresponds to $\gcd(k,a)$ times a
primitive element $v_1$ in $\hat{\Lambda}_2$, satisfies that $\bar{v}_1=\bar{v}_{\hat{\Lambda}_2}\in A_{\hat{\Lambda}_2}$, where
$\bar{v}_1$ is the residue of ${\mathfrak r}ac{v_1}{2}$, and $\bar{v}_{\hat{\Lambda}_2}$ is the $A_{\hat{\Lambda}_2}$-part of
$\bar{v}$.
Then there exists a monodromy operator $f_1\in
\Mon^2(\hat{\Lambda}_2)\subset\Mon^2(\hat{\Lambda}_1)$ such that $f_1(v_1)$ is one of the cases 2) to 6) from Proposition
\ref{prop:6monorb-in-invariant}.
After applying the operator $R_{{\mathfrak r}ac{\hdel - \hSig}{2}}$, we obtain an element $v_2$ of one of the following
forms:
$$v_2=\left\{
\begin{array}{lllll}
a) &2\gcd(k,a)L_{q_1} &&+ b\hdel &- \gcd(k,a)\hSig \\
b) &2\gcd(k,a)L_{q_1} &+ \gcd(k,a)e_1 &+ b\hdel&\\
c) &2\gcd(k,a)L_{q_1} &+ \gcd(k,a)e_2 &+ b\hdel&\\
d) &2\gcd(k,a)L_{q_1} &+ \gcd(k,a)e_1 &+ b\hdel&-\gcd(k,a)\hSig \\
e) &2\gcd(k,a)L_{q_1} &+ \gcd(k,a)e_2 &+ b\hdel&- \gcd(k,a)\hSig
\end{array}
\right. $$
for suitable choice of $q_1$.
Since $\gcd(k,a,b)=1$, note that the $\hat{\Lambda}_2$-component of $v_2$ is primitive unless we are dealing with
case a) from above and at the same time $b$ is even.
We will separately consider these two situations:
\noindent Case 1:
If the $\hat{\Lambda}_2$-component $v_{2,\hat{\Lambda}_2}$ of $v_2$ is primitive, then one can find a monodromy operator
moving $v_{2,\hat{\Lambda}_2}$ to one of the cases from Proposition \ref{prop:6monorb-in-invariant}.
In cases b) and c) from above the resulting element already attains the form of one of the cases claimed in our theorem.
In the other cases, apply the operator $R_{{\mathfrak r}ac{\hdel - \hSig}{2}}$ once again, followed by Proposition
\ref{prop:6monorb-in-invariant} to conclude the proof of case 1.
\noindent Case 2: Assume that $v_{2,\hat{\Lambda}_2}$ is non-primitive, and therefore we are in case a) from above
with the
additional assumption that $b=2b'$ is even.
This means that $v_2=2(\gcd(k,a)L_{q_1} + b'\hdel)- \gcd(k,a)\hSig$. Since $\gcd(k,a)$ is odd, $v_{2,\hat{\Lambda}_2}$ is twice
a primitive element of divisibility 1, and therefore $v_{2,\hat{\Lambda}_2}$ can be moved to an element of the form
$2L_{q_2}-\gcd(k,a)\hSig$. Applying the operator $R_{{\mathfrak r}ac{\hdel - \hSig}{2}}$ and using Proposition
\ref{prop:6monorb-in-invariant} completes the proof of case 2 (since $\gcd(k,a)$ is odd by assumption).
The only remaining case, which we have not yet been analyzed is if ${\rm div}(v)=2$ and $\gcd(k,a)$ is even.
Notice that under these assumptions $b$ is odd and in particular $\gcd(k,b)$ is odd.
Therefore, after application of the operator $R_{{\mathfrak r}ac{\hdel - \hSig}{2}}$, we find ourselves in the above
setting, which concludes the final case of the proof.
\end{proof}
From this we can easily deduce a corresponding statement for the original lattice $\Lambda_{M'}$.
We need to fix some notation in order to formulate the statement.
Consider an irreducible symplectic orbifold $X$ of Nikulin-type, with a given marking
$\varphi\colon H^2(X,{\mathbb Z})\overset{\iso}{\to} \Lambda_{M'}$.
Let $L_i^{(2)}\in U(2)$ be an element of square $4i$ (corresponding to the element $L_i \in U$). Furthermore, fix
elements $e_1^{(1)}$ and $e_2^{(1)}\in E_8(-1)$ with squares $q(e_1^{(1)})=-2$ and $q(e_2^{(1)})=-4$ (these elements
correspond to the elements $e_1$ and $e_2 \in E_8(-2)$).
\begin{thm} \label{thm:9monorb-M'}
Let $v\in \Lambda_{M'}$ be a primitive non-zero element. Denote by $v_{E_8}$ the projection of $v$ to the $E_8(-1)$-part
of the lattice, and let ${\bar{v}}_{E_8}$ be its image in the ${\mathbb Z}/4{\mathbb Z}$-module $E_8(-1)/4E_8(-1)$.
Then there exists a monodromy operator
$f\in \Mon^2(X)$ such that
$$ f(v)=\left\{
\begin{array}{l}
\textrm{If $v$ corresponds to a ray of divisibility 1 in $\hat{\Lambda}_1$ (see below for checkable
condition):} \\
\hspace{0.5 em}
1) \hspace{1em}L^{(2)}_{i} \hspace{7.5em}\textrm{with\ }{\rm div}(v)=2 \textrm{\ and\ } q(v)=4i. \\
\textrm{Otherwise, if $v$ corresponds to a ray of divisibility 2 in $\hat{\Lambda}_1$:}\\
\begin{array}{lllll}
2) &2L^{(2)}_{i} - \delta' & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=16i-4, & \textrm{and\ } {\bar{v}}_{E_8}=0\\
3) &2L^{(2)}_{i+1} + 2e_2^{(1)} -\delta' & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=16i-4, & \textrm{and\ } {\bar{v}}_{E_8}\neq 0\\
4) &L^{(2)}_i - {\mathfrak r}ac{\delta' + \Sigma'}{2} & \, \textrm{if\ } {\rm div} (v)=2,& q(v)=4i-2,&\textrm{and\ } {\bar{v}}_{E_8}=0\\
5) &L^{(2)}_{i+1} + e_2^{(1)}-{\mathfrak r}ac{\delta' + \Sigma'}{2} & \, \textrm{if\ } {\rm div} (v)=1,& q(v)=4i-2,&\textrm{and\ }
q(v_{E_8})\equiv 0 \pmod{4}\\
6) &L^{(2)}_{i} + e_1^{(1)} & \, \textrm{if\ } {\rm div} (v)=1,& q(v)=4i-2,&\textrm{and\ } q(v_{E_8})\equiv 2 \pmod{4} \\
7) &2L^{(2)}_{i} + 2e_1^{(1)} - \delta' & \, \textrm{if\ } {\rm div}(v)=2, & q(v)=16i-12, & \textrm{and\ } {\bar{v}}_{E_8}\neq 0\\
8) &L^{(2)}_i + e_1^{(1)} - {\mathfrak r}ac{\delta' + \Sigma'}{2} & \, \textrm{if\ } {\rm div}(v)=1, & q(v)=4i-4,
& \textrm{and\ } q(v_{E_8})\equiv 2 \pmod{4} \\
9) &L^{(2)}_{i+1} + e_2^{(1)} & \, \textrm{if\ } {\rm div}(v)=1, & q(v)=4i, & \textrm{and\ } q(v_{E_8})\equiv 0 \pmod{4}.\\
\end{array}
\end{array}
\right. $$
The condition that $v$ corresponds to a ray of divisibility 1 in $\hat{\Lambda}_1$ is equivalent to
satisfying the following three conditions inside $\Lambda$:
\begin{compactenum}
\item The restriction $v_{U^3(2)}$ of $v$ to $U^3(2)$ is not divisible by 2,
\item the restriction $v_{E_8}$ of $v$ to $E_8(-1)$ is divisible by 2, and
\item the restriction $v_{(-2)\oplus (-2)}$ to $\langle {\mathfrak r}ac{\delta'+ \Sigma'}{2} , {\mathfrak r}ac{\delta'-
\Sigma'}{2}\rangle$ is contained in the sublattice $\langle \delta', \Sigma'\rangle$.
\end{compactenum}
\end{thm}
\begin{proof}
Similar to Lemma \ref{lem:twist-gen}
use the inclusion $\hat{\Lambda}_1(2) \subset \Lambda_{M'}$ and notice that under this correspondence $L_i$
is sent to $L_{i}^{(2)}$, $e_i$ is sent to $2e_i^{(1)}$, $\hdel$ to $\delta'$, and $\hSig$ to $\Sigma'$.
Then passing to the primitive element in the ray and determining the new square and divisibility gives the
new cases.
For the part of the condition involving $v_{E_8}$, simply check that under the assumptions on $q$ and ${\rm div}$ these are
equivalent to the corresponding ones in $\hat{\Lambda}_1$ from Theorem \ref{thm:9monorb}.
The same formalism admits a straight forward verification of the characterization when $v$ is corresponding
to a ray of divisibility 1 in $\hat{\Lambda}_1$.
\end{proof}
\begin{cor}
There are at most 3 monodromy orbits of primitive non-zero
elements with prescribed square and divisibility (both in $\Lambda_{M'}$ and in $\hat{\Lambda}_1$).
\end{cor}
\begin{proof}
Since the values of $q$ and ${\rm div}$ are given, this can be read of immediately from the statements of Theorems \ref{thm:9monorb} and
\ref{thm:9monorb-M'}.
\end{proof}
\begin{rmk}\label{rem:L0goesaway2} Again, one can replace $L_0^{(2)}$ by $0$ in all cases except from Case 1),
since both elements in question lie in the same monodromy orbit.
\end{rmk}
Let us conclude this section by the following observation:
\begin{cor}\label{cor:Chiara}
For every element $v\in \Lambda_{M'}$ of square $-4$ and divisibility $2$ the reflection (defined by
$R_v(\alpha)\coloneqq \alpha -2 {\mathfrak r}ac{(\alpha,v)_q}{q(v)}v$) gives an element in the monodromy group.
\end{cor}
\begin{proof}
Begin by observing that this property is equivalent for different elements in the same monodromy orbit.
Therefore, it suffices to check it for one representative of each orbit. By the list from Theorem
6.15, the orbits of square $-4$ and divisibility $2$ have one of the following representatives:
$L_{-1}^{(2)}$ (Case 1), $\delta'$ (Case 2), or $2L_1^{(2)} + 2 e_2^{(1)} - \delta'$ (Case 3).
The
associated elements $L_{-1}$, $\delta$, and $2L_1 + e_2 - \delta$ in the invariant part of
$\Lambda_{K3^{[2]}}$ all have square $-2$ and thus their reflections correspond to monodromy operators
on $\Lambda_{K3^{[2]}}$ (e.g.~by Theorem \ref{thm:MonK32}), which commute with $\iota ^{[2]*}$.
Therefore, Proposition \ref{MonoM'} applies to show that the claimed reflections in $\Lambda_{M'}$ are indeed
monodromy operators.
\end{proof}
\begin{cor}\label{Lastmonodromy}
For every element $v\in \Lambda_{M'}$ of square $-2$ and divisibility $2$ the reflection (defined by
$R_v(\alpha)\coloneqq \alpha -2 {\mathfrak r}ac{(\alpha,v)_q}{q(v)}v$) gives an element in the monodromy group.
\end{cor}
\begin{proof}
The proof is similar to the one of Corollary \ref{cor:Chiara}. From Theorem \ref{thm:9monorb-M'}, we note that
all such elements $v$ are in the same monodromy orbit (Case 4) which contains the element ${\mathfrak r}ac{\delta'-\Sigma'}{2}$. Hence the result follows from Remark \ref{RdeltaSigma}.
\end{proof}
\section{Determining the wall divisors}\label{endsection}
In this section, we combine the results from the last sections to prove the main theorem of this paper: Theorem
\ref{main} which gives a complete description of the wall divisors for Nikulin-type orbifolds.
For the proof of the theorem let us start from some $X_0$ which is the Nikulin orbifold associated to some K3
surface $S_0$ obtained by the construction in Section \ref{M'section}.
Fix a marking $\varphi_0 \colon H^2(X_0,\Z) \to \Lambda_{M'}= U(2)^{ 3} \oplus E_8(-1)\oplus (-2) \oplus (-2)$,
where as usual $U(2)^{ 3} \oplus E_8(-1)$ corresponds to the part coming from the invariant lattice of
$S_0$ and the two generators of the $(-2)$-part are ${\mathfrak r}ac{\delta'+ \Sigma'}{2}$ and ${\mathfrak r}ac{\delta'-
\Sigma'}{2}$.
Let us recall the details of this identification:
For the K3 surface $S_0$ with a symplectic involution $\iota$ the $\iota$-anti-invariant part of the lattice is
isomorphic to $E_8(-2)$ and one can choose a marking
$\varphi_{S_0}\colon H^2(S_0,{\mathbb Z}) \to \Lambda_{K3}\iso U^3 \oplus E_8(-1)^2$, such that the $\iota^*$
acts by exchanging the two copies of $E_8(-1)$. Therefore, the invariant lattice of $\iota$ corresponds to
$\Lambda_{K3}^\iota \iso U^3\oplus E_8(-2)$, where the elements of $E_8(-2)$ are of the form $e+\iota^*(e)$ for elements $e$ in the
first copy of $E_8(-1)$. Similarly the anti-invariant lattice of $\iota$ is $E_8(-2)$ consisting of elements
of the form
$e-\iota^*(e)$. We will denote the anti-invariant part of the lattice by $E^a$.
With this convention, the lattice $U(2)^{3} \oplus E_8(-1)$ corresponds to the
invariant via a twist as described in Lemma \ref{lem:twist-gen} of
$\Lambda_{K3}^{\iota}$.
In order to prove the main theorem, we need to determine for each ray in $\Lambda_{M'}$ whose generator is of
negative Beauville-Bogomolov square, whether it corresponds to a wall divisor for Nikulin-type orbifolds. Obviously, this notion is invariant under the monodromy action by the deformation
invariance (see Theorem \ref{wall}).
It therefore suffices to pick one representative for each monodromy orbit and to determine it for this choice.
By Lemma \ref{lem:twist-gen}, the rays of $\Lambda_{M'}$ are in (1:1)-correspondence with rays in the lattice
$\hat{\Lambda}_1$, and obviously the property that the generator has negative square coincides in both cases.
Therefore, we only need to deal with the cases from Theorem \ref{thm:9monorb} (respectively Theorem
\ref{thm:9monorb-M'}), for which $i$ is chosen such that the square is negative.
\noindent {\it Case 1:}
As a warm-up, let us start with Case 1 of Theorem \ref{thm:9monorb} separately (i.e.~the ray in question is generated by the element $L_i$ with $i<0$). Note
that $L_i$ naturally corresponds to an element $\varphi_{S_0} ^{-1}L_i\in H^2(S_0,{\mathbb Z})$.
Let $(S,\varphi_S)$ be a marked K3 surface such that the Picard lattice of $S$ is $\Pic(S)=\varphi_S^{-1}(L_i
\oplus E^a)$ (which exists by the surjectivity of the period map).
If $i< -1$, then $S$ does not contain any effective curve (since $\Pic(S)$ only has non-zero elements of square smaller
than $-2$). Therefore, we are in the situation of Section \ref{genericM'} and
one observes that $L_i$ does not correspond to a wall divisor for
Nikulin-type orbifolds if $i<-1$: In fact, $L_i\in \hat{\Lambda}_1$ corresponds to $L_{i}^{(2)}\in \Lambda$, which is
not a wall divisor by Proposition \ref{exwalls}.
Note that the divisors $L_{i}^{(2)}\in \Lambda$ satisfy $q(L_{i}^{(2)})=-4i$, ${\rm div}(L_{i}^{(2)})=2$, and $(L_i^{(2)})_{U(2)^3}$ is
not divisible by 2, which confirms Theorem \ref{main} for Case 1 if $i<-1$.
If $i=-1$ (and therefore $q(L_i)=-2$, we are in the situation of Section \ref{onecurve}, and one can deduce
from Proposition \ref{walldiv1} that $L_{i}^{(2)}\in \Lambda$ (which is precisely $D_C'$) is a wall-divisor for Nikulin-type, which confirms Theorem
\ref{main} in this case.
\noindent {\it Cases 2, 4:}
As in the proof for Case 1, choose a K3 surface $S$ such that $\Pic(S)=\varphi_S ^{-1}(L_i\oplus E^a)$.
As before, Section \ref{genericM'} applies and Proposition \ref{exwalls} implies that for $i<-1$, the only rays corresponding to wall-divisors are
$\delta'$ and $\Sigma'$, and therefore there are no additional wall divisors of the forms given in Cases 2
and 4 in this example.
Similarly, the results from Section \ref{onecurve} imply that for $i=-1$ the wall divisors are $\delta'$,
$\Sigma'$, $L_{i}^{(2)}$, and $L_{i}^{(2)}-\half(\delta' + \Sigma')$ (compare
Proposition \ref{walldiv1}). Therefore, Case 4 provides precisely a wall divisor of square $-6$ and
divisibility $2$, thus confirming Theorem \ref{main} in this case.
However, for Cases 2 and 4, we also need to consider $i=0$ since the total square will still be negative. By
Remark \ref{rem:L0goesaway}, the monodromy orbits of $2L_0 + \hdel$ (resp.~$2L_0 + \hdel + \hSig$) coincide
with those of $\hdel$ and $\hdel + \hSig$, and therefore we can instead deform towards a very
general K3 surface $S$ with a symplectic involution (i.e.~$\Pic(S)=E^a$) and apply the results from Section
\ref{genericM'} to observe that $\delta'$ is a wall divisor of square $-4$ and divisibility $2$, whereas
$\half (\delta' + \Sigma')$ is not.
\noindent {\it Cases 6, 7, and 8:}
Similar to the previous situation, the element $2L_i + e_1$ naturally corresponds to an element
$\varphi_{S_0}^{-1}(2L_i + e_1) \in H^2(S_0,{\mathbb Z})$.
Notice, that we are only interested in the cases, where $q(2L_i + e_1)<0$, which corresponds to $i\leq 0$.
Under this condition, the direct sum $(2L_i+ e_1) \oplus E^a$ is a negative definite sublattice of
$\Lambda_{K3}$.
However, notice that this in itself cannot be realized as the Picard lattice of a K3 surface, since it is not
a saturated sublattice:
By definition $e_1\in E_8(-2)$ is an element of square $-4$, where $E_8(-2)$ is part of the invariant lattice.
Therefore, by the above observation, there exists an element $e_1^{(0)}$ in the first copy of $E_8(-1)$ of square
$-2$ such that
$e_1 = e_1^{(0)} + \iota ^*(e_1^{(0)})$. With this notation the element
$2L_i + 2e_1^{(0)} = 2L_i + (e_1^{(0)} + \iota^*(e_1^{(0)})) + (e_1^{(0)} - \iota ^*(e_1^{(0)}))\in (2L_i+ e_1) \oplus E^a$, but
the element $L_i + e_1^{(0)}$ is not part of this direct sum. In fact, $(L_i + e_1^{(0)})\oplus E^a$ is the
saturation.
With this knowledge, let us choose a marked K3 surface $(S,\varphi_S)$ such that
$\Pic(S) =\varphi_S^{-1}((L_i + e_1^{(0)})\oplus E^a)$.
Note that if $i<0$, then $S$ does not contain any effective curve (since every non-zero element has square
smaller than $-2$).
Therefore, the results from Section \ref{genericM'} apply, and one observes that non of these cases provides
wall divisors.
If $i=0$, then $S$ contains exactly two elements of square $-2$ which are exchanged by $\iota^*$: The elements
$L_0 + e_1^{(0)}$ and $L_0 + \iota^*(e_1^{(0)})$. In this case according to Remark \ref{rem:L0goesaway}, we can choose $L_0=0$.
Thus for $i=0$ we find ourselves in the setting of Section \ref{sec:twocurves} with $e_1^{(0)}=C$. Note that the element $D_C'$
from Section \ref{sec:twocurves} corresponds precisely to the element $e_1^{(1)}$
with our
notation.
We can therefore deduce from Proposition \ref{prop:twocurves}, that for $i=0$ the Cases 6 and 7 provide wall
divisors
($e_1^{(1)}$
with square $-2$ and
divisibility $1$, and
$2e_1^{(1)} - \delta'$
with square $-12$ and divisibility $2$), whereas by Remark \ref{Remark:twocurves} Case 8 does not provide a wall
divisor, thus confirming Theorem \ref{main}.
\noindent {\it Cases 3, 5, and 9:}
Again, the element $2L_{i+1}+ e_2$ corresponds to an element in $\varphi_{S_0}(H^2(S_0,{\mathbb Z}))$. We need to
consider $i\leq 0$ to cover all possibilities for wall divisors with negative squares.
If $i<0$, then the lattice $(2L_{i+1}+ e_2) \oplus E^a \subseteq \Lambda_{K3}$ is negative definite, and again
its saturation is $(L_{i+1} + e_2^{(0)})\oplus E^a $ for the corresponding element $e_2^{(0)}$ in the first copy of
$E_8(-1)$ (we remind that $e_2^{(0)}$ has square $-4$).
Similar to the above, deform to a marked K3 surface $(S,\varphi_S)$ such that $\varphi_S(\Pic(S))= (L_{i+1} +
e_2^{(0)})\oplus E^a$.
Observe that all non-zero elements of this lattice have squares smaller than $-2$. Therefore, we can apply the
results from Section \ref{genericM'} to observe that we do not find any further wall divisors in theses cases.
For the remaining case $i=0$, we need to prove that both $2L_{1}^{(2)} + 2e_2^{(1)}- \delta'$
(with square $-4$ and divisibility $2$)
and $L_{1}^{(2)} + e_2^{(1)} - {\mathfrak r}ac{\delta' + \Sigma'}{2}$ (with square $-2$ and divisibility $1$)
correspond to wall divisors.
If $i=0$, then $S$ contains exactly one element of square $0$: $2L_{1}+ e_2$.
Thus for $i=0$ we find ourselves in the setting of Section \ref{sec:elliptic}. Note that the element $D_\gamma'$
from Section \ref{sec:elliptic} corresponds precisely to the element $L_1^{(2)} + e_2^{(1)}$ with our
notation.
Let $M'$ constructed as in Section \ref{sec:elliptic}.
From the investigations of the current section,
we know that a wall divisor on $M'$ has the numerical properties of a wall divisor that we already found or possibly of $2L_{1}^{(2)} + 2e_2^{(1)}- \delta'$ or
$L_{1}^{(2)} + e_2^{(1)} - {\mathfrak r}ac{\delta' + \Sigma'}{2}$. That is: we have proved that a wall divisor necessarily has one of the numerical properties listed in Theorem \ref{main}. Therefore, Lemma \ref{mainelliptic} shows that $L_{1}^{(2)} + e_2^{(1)} - {\mathfrak r}ac{\delta' + \Sigma'}{2}$ is a wall divisor. Finally, $2L_{1}^{(2)} + 2e_2^{(1)}- \delta'$ is also a wall divisor by
Lemma \ref{monolemma}.
This concludes the analysis of all possible cases and thus the proof of Theorem \ref{main}.
\section{Application}
\subsection{A general result about the automorphisms of Nikulin-type orbifolds}
\begin{prop}\label{AutM'}
Let $X$ be an orbifold of Nikulin-type and $f$ an automorphism on $X$. If $f^*=\id$ on $H^2(X,\Z)$, then $f=\id$.
\end{prop}
This section is devoted to the proof of this proposition. We will adapt Beauville's proof \cite[Proposition 10]{Beauville1982}.
\begin{lemme}\label{AutS}
Let $S$ be a K3 surface such that $\Pic S=\Z H\oplus^{\bot} E_8(-2)$ with $H^2= 4$. According to Proposition \ref{involutionE8} or from \cite[Proposition 2.3]{Sarti-VanGeemen}, the K3 surface $S$ is endowed with a symplectic involution $\iota$.
Let $f\in\Aut(S)$ such that $f$ commutes with $\iota$. Then $f=\iota$ or $f=\id$.
\end{lemme}
\begin{proof}
We adapt the proof of \cite[Corollary 15.2.12]{HuybrechtsK3}.
Let $f\in \Aut(S)$ which commutes with $\iota$. It follows that $f^*(H)=H$.
By \cite[Corollary 3.3.5]{HuybrechtsK3}, $f$ acts on $T(S)$ (the transcendental lattice of $S$) as $-\id$ or $\id$.
However, the actions of $f^*$ on $A_{T(S)}$ and on $A_{\Pic(S)}$ have to coincide. This forces $f^*_{T(S)}=\id$. Moreover, we can consider $f^*_{|E_8(-2)}$ as an isometry of $E_8(-2)$.
By \cite[Theorem 4.2.39]{Griess}, the isometries group of $E_8(-2)$ is finite, hence $f^*_{|E_8(-2)}$ is of finite order.
Therefore by \cite[Chapter 15 Section 1.2]{HuybrechtsK3}, there are only two possibilities for $f$: $\id$ or a symplectic involution.
Moreover, by \cite[Proposition 15.2.1]{HuybrechtsK3}
, there is at most one symplectic involution on $S$.
\end{proof}
\begin{lemme}\label{commute}
Let $(S,\iota)$ be a K3 surface endowed with a symplectic involution such that $\Pic S=\Z H\oplus^{\bot} E_8(-2)$ with $H^2= 4$.
Let $M'$ be the Nikulin orbifold constructed from $(S,\iota)$ as in Section \ref{M'section}.
Let $(g,h)\in\Aut(S)^2$ such that $g\times h$ induces a bimeromorphism on $M'$ via the non-ramified cover $$\gamma: S\times S\smallsetminus \left(\Delta_{S^{2}}\cup S_{\iota}\cup({\mathfrak i}x \iota\times {\mathfrak i}x \iota)\right)\rightarrow M'\smallsetminus \left(\delta'\cup \Sigma' \cup \Sing M' \right)$$
introduced in Section \ref{inv0M'} (i.e: there exists a bimeromorphism $\rho$ on $M'$ such that $\rho\circ\gamma=\gamma\circ f\times g$). Then $g$ and $h$ commute with $\iota$.
\end{lemme}
\begin{proof}
It is enough to prove that $g$ commutes with $\iota$; the proof for $h$ being identical.
Let $$A:=\left\{\left.\eta=\eta_1\circ\eta_2\circ\eta_3\right|\ (\eta_1,\eta_3)\in\left\{\id,\iota\right\}^2\ \text{and}\ \eta_2\in\left\{\id, g, h\right\}\right\}.$$
Let $V:=S\times S\smallsetminus \left(\Delta_{S^{2}}\cup S_{\iota}\cup({\mathfrak i}x \iota\times {\mathfrak i}x \iota)\right)$;
we consider the following open subset of $V$:
$$V^{o}:=\left\{\left.(a,b)\in V\ \right|\ g(a)\neq \eta(b),\ g\circ\iota(a)\neq \eta(b),\ {\mathfrak o}rall \eta\in A\ \text{and}\ a\notin g^{-1}({\mathfrak i}x \iota)\right\}.$$
Since $g\times h$ induces a bimeromorphism on $M'$, there exist an open subset $\mathcal{W}$ of $S\times S$ such that for all $(a,b)\in \mathcal{W}$:
\small
\begin{align*}
g\times h\left(\left\{(a,b),(b,a),(\iota(a),\iota(b)),\right.\right.&\left.\left.(\iota(b),\iota(a))\right\}\right)\\
&=\left\{(g(a),h(b)),(h(b),g(a)),(\iota\circ g(a),\iota\circ h(b)),(\iota\circ h(b),\iota\circ g(a))\right\}.
\end{align*}
That is:
\begin{align*}
\left\{(g(a),h(b)),(g(b),h(a)),\right.&\left.(g\circ\iota(a),h\circ\iota(b)),(g\circ\iota(b),h\circ\iota(a))\right\}\\&=\left\{(g(a),h(b)),(h(b),g(a)),(\iota\circ g(a),\iota\circ h(b)),(\iota\circ h(b),\iota\circ g(a))\right\}.
\end{align*}
\normalsize
If we choose in addition $(a,b)\in V^{o}$, then
there are only one possibility:
$$g\circ\iota(a)=\iota\circ g(a).$$
It follows that $g$ commutes with $\iota$ on an open set of $S$, so on all $S$.
\end{proof}
We are now ready to prove Proposition \ref{AutM'}.
\begin{proof}[Proof of Proposition \ref{AutM'}]
We consider $X$ an orbifold of Nikulin-type and $f$ an automorphism on $X$ such that $f^*=\id$. In particular, $f$ is a symplectic automorphism.
Let $(S,\iota)$ be a K3 surface, endowed with a symplectic involution, verifying the hypothesis of Lemma \ref{AutS}; we consider the Nikulin orbifold $M'$ constructed from $(S,\iota)$ as in Section \ref{M'section}.
By \cite[Lemma 2.17]{Menet-Riess-20}, there exists two markings $\varphi$ and $\psi$ such that $(X,\varphi)$ and $(M',\psi)$ are connected by a sequence of twistor spaces. Moreover by Remark \ref{twistorinvo}, $f$ extends to an automorphism on all twistor spaces. In particular $f$ induces an automorphism $f'$ on $M'$. We consider $\gamma$, the non ramified cover of Lemma \ref{commute}:
$$\gamma: S\times S\smallsetminus \left(\Delta_{S^{2}}\cup S_{\iota}\cup({\mathfrak i}x \iota\times {\mathfrak i}x \iota)\right)\rightarrow M'\smallsetminus \left(\delta'\cup \Sigma' \cup \Sing M' \right).$$ Since $V=S\times S\smallsetminus \left(\Delta_{S^{2}}\cup S_{\iota}\cup({\mathfrak i}x \iota\times {\mathfrak i}x \iota)\right)$ is simply connected, it is the universal cover of $U:= M'\smallsetminus \left(\delta'\cup \Sigma' \cup \Sing M' \right)$.
Since $f'^*$ acts as $\id$ on $H^2(M',\Z)$, we have that $f'$ preserves $\delta'$ and $\Sigma'$ (it also preserves the set $\Sing M'$ ). Hence $f'$ induces an automorphism on $U$ and then on $V$. Therefore, it induces a bimeromorphism $\overline{f'}$ on $S\times S$. Let $s_2:S\times S\rightarrow S\times S: (a,b)\mapsto (b,a)$. By \cite[Theorem 4.1 (d)]{Oguiso}, $\overline{f'}$ can be written as a sequence of compositions between $s_2$ and automorphisms of the form $g_i\times h_i$, where $g_i$, $h_i$ are in $\Aut(S)$.
Since, we are interested in the automorphism $f'$ on $M'$, without loss of generality, we can assume that $\overline{f'}=g\times h$, with $g$, $h$ in $\Aut(S)$.
Therefore, by Lemma \ref{commute}, $g$ and $h$ commute with $\iota$.
It follows from Lemma \ref{AutS} that $(g,h)\in\left\{\id,\iota\right\}^2$.
So, the unique possibility for $\overline{f'}$ to induces a non-trivial morphism on $U$ is $\overline{f'}=\id\times \iota$ (or $\iota\times\id $).
However, in this case, as seen in Section \ref{inv0M'}, $f'$ would interchange $\delta'$ and $\Sigma'$. This is a contradiction with the fact that $f'^*=\id$ on $H^2(M',\Z)$. Therefore, we obtain $f'=\id$ and then $f=\id$.
\end{proof}
\subsection{Construction of a non-standard symplectic involution on orbifolds of Nikulin-type}\label{Application}
Adapting the vocabulary introduced in \cite{Mongardi-2013}, we state the following definition.
\begin{defi}
Let $Y$ be an irreducible symplectic manifold of $K3^{[2]}$-type endowed with a symplectic involution $\iota$.
Let $M'$ be the Nikulin orbifold constructed from $(Y,\iota)$ as in Example \ref{exem}. Let $G\subset \Aut(Y)$ such that all $g\in G$ commute with $\iota$.
Then $G$ induces an automorphism group $G'$ on $M'$. The group $G'$ is called a \emph{natural automorphism group} on $M'$ and $(M',G')$ is called a \emph{natural pair}.
Let $X$ be an irreducible symplectic orbifold of Nikulin-type and $H\subset \Aut(X)$. The group $H$ will be said \emph{standard} if the couple
$(X,H)$ is deformation equivalent to a natural pair $(M',G')$; in this case, we say that the couple $(X,H)$ is a \emph{standard pair}.
\end{defi}
\begin{thm}\label{Involution2}
Let $X$ be an irreducible symplectic orbifold of Nikulin-type such that there exists $D\in\Pic (X)$ with $D^2=-2$ and ${\rm div}(D)=2$. Then there exists an irreducible symplectic orbifold $Z$ bimeromophic to $X$ and a non-standard symplectic involution $\iota$ on $Z$ such that:
$$H^2(Z,\Z)^{\iota}\simeq U(2)^3\oplus E_8(-1)\oplus (-2)\ \text{and}\ H^2(Z,\Z)^{\iota\bot}\simeq (-2).$$
\end{thm}
\begin{proof}
By Theorem \ref{main}, $D$ is not a wall divisor, hence there exists $\beta\in\mathcal{B}\mathcal{K}_{X}$ and $g\in\Mon_{\Hdg}^2(X)$
such that $(g(D),\beta)_{q}=0$. Let $f:X\dashrightarrow Z$ be a bimeromorphic map such that $f_*(\beta)$ is a Kähler class on $Z$.
We set $D':=f_*\circ g(D)$.
By Corollary \ref{Lastmonodromy}, the involution $R_{D'}$ is a Hodge monodromy operator on $H^2(Z,\Z)$.
Moreover $R_{D'}(f_*(\beta))=f_*(\beta)$. Hence by Theorem \ref{mainHTTO},
there exists $\iota$ an automorphism on $Z$ such that $\iota^*=R_{D'}$. Moreover, by Proposition \ref{AutM'}, $\iota$ is an involution. Since $\iota^*=R_{D'}$, we have $H^2(Z,\Z)^{\iota}=D'^{\bot}$. It follows from Theorem \ref{BBform} that: $H^2(Z,\Z)^{\iota}\simeq U(2)^3\oplus E_8(-1)\oplus (-2)$ and $H^2(Z,\Z)^{\iota\bot}\simeq (-2)$.
Now, we show that $\iota$ is non-standard. We assume that $\iota$ is standard and we will find a contradiction. If $\iota$ is standard, there exists
a natural pair $(M',\iota')$ deformation equivalent to $(Z,\iota)$.
Since $\iota'$ is natural, $\iota'^*(\Sigma')=\Sigma'$. Moreover, since $(M',\iota')$ is deformation equivalent to $(Z,\iota)$,
there exists $D'\in \Pic M'$ such that $q_{M'}(D')=-2$, ${\rm div}(D')=2$ and $H^2(M',\Z)^{\iota'\bot}=\Z D'$.
However, since $\Sigma'\in H^2(M',\Z)^{\iota'}$, we obtain by Theorem \ref{BBform} that:
$$D'\in \Sigma'^{\bot}\simeq U(2)^3\oplus E_8(-1)\oplus (-4).$$
For the rest of the proof, we identify $\Sigma'^{\bot}$ with $U(2)^3\oplus E_8(-1)\oplus (-4)$.
If follows that $D'$ can be written:
$$D'=\alpha+\beta,$$
with $\alpha\in U(2)^3\oplus (-4)$ and $\beta\in E_8(-1)$.
Since ${\rm div}(D')=2$, we have
$$D'=\alpha+2\beta',$$
with $\beta'\in E_8(-1)$.
If follows that $q_{M'}(D')\equiv0\mod 4$.
This is a contradiction with $q_{M'}(D')=-2$.
\end{proof}
\TODO{What is the fixed locus of $\iota$ ?}
\TODO{check for the following things: twister -> twistor, kähler -> Kähler}
\TODO{look at Nikulin entry}
\TODO{generic -> very general in many cases}
\TODO{check that notation of $q$ is consistent}
\noindent
Gr\'egoire \textsc{Menet}
\noindent
Laboratoire Paul Painlevé
\noindent
59 655 Villeneuve d'Ascq Cedex (France),
\noindent
{\tt gregoire.menet@univ-lille.fr}
\noindent
Ulrike \textsc{Rie}\ss
\noindent
Institute for Theoretical Studies - ETH Z\"urich
\noindent
Clausiusstrasse 47, Building CLV, Z\"urich (Switzerland)
\noindent
{\tt uriess@ethz.ch}
\end{document} |
\begin{document}
\title{L-spaces, taut foliations, and graph manifolds}
\author{Jonathan Hanselman}
\email{jh66@math.princeton.edu}
\address {Department of Mathematics, Princeton University, Fine Hall, Washington Road, Princeton,
NJ 08540, USA}
\author{Jacob Rasmussen}
\email{J.Rasmussen@dpmms.cam.ac.uk}
\address {Department of Pure Mathematics and Mathematical Statistics,
Centre for Mathematical Sciences, Wilberforce Road, Cambridge CB3 0WB, UK}
\author{Sarah Dean Rasmussen}
\email{S.Rasmussen@dpmms.cam.ac.uk}
\address {Department of Pure Mathematics and Mathematical Statistics,
Centre for Mathematical Sciences, Wilberforce Road, Cambridge CB3 0WB, UK}
\author{Liam Watson}
\email{liam@math.ubc.ca}
\address {Department of Mathematics, University of British Columbia, 1984 Mathematics Road,
Vancouver, BC, V6T 1Z2, Canada }
\thanks{The first author was partially supported by NSF RTG grant DMS-1148490. The second author was partially supported by EPSRC grant EP/M000648/1. The third author was supported by EPSRC grant EP/M000648/1. The fourth author was partially supported by a Marie Curie Career Integration Grant (HFFUNDGRP).}
\begin{abstract}
If \(Y\) is a closed orientable graph manifold, we show that \(Y\) admits a coorientable taut foliation if and only if \(Y\) is not an L-space. Combined with previous work of Boyer and Clay, this implies that \(Y\) is an L-space if and only if \(\pi_1(Y)\) is not left-orderable.
\end{abstract}
\maketitle
\section{Introduction}
An L-space is a rational homology sphere $Y$ with simplest possible Heegaard Floer homology,\footnote{We use Floer homology with coefficients in $\mathbb{Z}/2\mathbb{Z}$. Other coefficient systems are discussed at the end of the paper.}
in the sense that $\rank\widehat{\mathit{HF}}(Y)= |H_1(Y;\mathbb{Z})|$. Ozsv\'ath and Szab\'o have shown, by an argument analogous to one used by Kronheimer and Mrowka in the monopole setting \cite{KM1997}, that the existence of a $C^2$ coorientable taut foliation ensures that $\rank\widehat{\mathit{HF}}(Y)> |H_1(Y;\mathbb{Z})|$ \cite{OSz2004}. That is, L-spaces do not admit $C^2$ coorientable taut foliations.
For certain classes of manifolds the converse is known to hold. In particular, for Seifert fibered spaces with base orbifold $S^2$, Lisca and Stipsicz have shown that if $Y$ is not an L-space then $Y$ admits a coorientable taut foliation \cite{LS2007}. (In fact, it can be shown that the two conditions are equivalent for all Seifert fibered spaces; see \cite{BGW2013}.) The main result of this note extends Lisca and Stipsicz's result to general graph manifolds. Recall that a graph manifold is a prime three-manifold admitting a JSJ decomposition into pieces admitting Seifert fibered structures.
\begin{theorem}\label{Main Theorem}Let $Y$ be a closed, connected, orientable graph manifold. If $Y$ is not an L-space then $Y$ admits a $C^0$ coorientable taut foliation. \end{theorem}
An independent alternative proof of this result, together with an explicit classification of graph manifolds admitting cooriented taut foliations, appears in \cite{R2015}, by the third author.
There is a third condition on three-manifolds that is relevant in this setting. Recall that a countable group is left-orderable if it admits an effective action on $\mathbb{R}$ by order-preserving homeomorphisms \cite{BRW2005}. There is a conjectured equivalence among prime three-manifolds between L-spaces and non-left-orderability of the fundamental group \cite{BGW2013}. Theorem \ref{Main Theorem} gives rise to an equivalence between all three conditions for graph manifolds.
\begin{theorem}\label{Equivalence Theorem}If $Y$ is a closed, connected, orientable graph manifold then the following are equivalent:
\begin{itemize}
\item[(i)] $Y$ is not an L-space;
\item[(ii)] $Y$ admits a $C^0$ coorientable taut foliation;
\item[(iii)] $Y$ has left-orderable fundamental group.
\end{itemize}
\end{theorem}
The equivalence (ii) $\Leftrightarrow$ (iii) is due to Boyer and Clay \cite{BC}. The implication (ii) \(\Rightarrow\) (i) is established by Boyer and Clay in \cite{BC-prep}. Alternately, this implication follows from a theorem of Bowden \cite{Bowden} and, independently, Kazez and Roberts \cite{KR, KR-prep} that taut \(C^0\) foliations can be approximated by weakly semi-fillable contact structures, together with the earlier work of Ozsv\'ath and Szab\'o \cite{OSz2004}. Theorem \ref{Main Theorem} provides the final required implication (i) $\Rightarrow$ (ii).
Among graph manifolds, the above equivalence was known for Seifert fibered spaces (see \cite{BGW2013} and references therein). The case of a graph manifold with a single JSJ torus was shown in \cite[Theorem 1.1]{HW} of the first and fourth authors; this case also follows from the second and third authors' gluing theorem \cite[Theorem 6.2]{RR}. In fact, Theorem \ref{Main Theorem} (and thus Theorem \ref{Equivalence Theorem}) also follows from results in \cite{RR}, as we aim to show in this paper.
The equivalence (i) $\Leftrightarrow$ (iii) resolves \cite[Conjecture 1]{BGW2013} in the affirmative for graph manifolds. We thank Tye Lidman for pointing out the following immediate consequence:
\begin{corollary}
Suppose $f\colon\thinspace Y_1\to Y_2$ is a non-zero degree map between closed, connected, orientable graph manifolds. If $Y_1$ is an L-space then $Y_2$ is an L-space as well.
\end{corollary}
\begin{proof}This follows from Theorem \ref{Equivalence Theorem} and \cite[Theorem 3.7]{BRW2005}. Note that the existence of the non-zero degree map $f$ induces a non-trivial homomorphism from $\pi_1(Y_1)$ to $\pi_1(Y_2)$ \cite[Lemma 3.8]{BRW2005}. Hence if $\pi_1(Y_2)$ is left-orderable then so is $\pi_1(Y_1)$ \cite[Theorem 1.1]{BRW2005}.\end{proof}
Our work rests on a detailed study of the Heegaard Floer invariants of orientable three-manifolds $M$ with torus boundary. Denote by $M(\alpha)$ the result of Dehn filling along a slope $\alpha$ in $\partial M$, that is, $\alpha$ represents a primitive class in $H_1(\partial M;\mathbb{Z})/\{\pm 1\}$. The set of slopes $Sl(M)$ may be identified with the extended rationals $\mathbb{Q}\cup\{\frac{1}{0}\}$, viewed as a subspace of $\mathbb{R} \text{P}^1$. Consider the set of L-space slopes $\mathcal{L}_M = \{\alpha \, |\, M(\alpha) \text{\ is\ an\ L-space} \}$; its interior $\mathcal{L}_M^\circ$ is the set of {\em strict L-space slopes}. The key tool used in the proof of Theorem \ref{Main Theorem} is the following non-L-space cutting theorem, which follows from results proved in~\cite{RR}.
\begin{theorem}\label{Cutting Theorem}
Let $N$ denote the twisted $I$-bundle over the Klein bottle, with rational longitude~$\lambda$. Let $M_1$ and $M_2$ be compact, connected, orientable three-manifolds with torus boundary, and suppose that $Y\colon\thinspaceng M_1\cup_h M_2$ for some homeomorphism $h\colon\thinspace \partial M_1\to \partial M_2$. If $Y$ is not an L-space, then
\begin{itemize}
\item[(1)]
there exists a slope $\alpha$ in $\partial M_1$ such that $\alpha \not\in \mathcal{L}^\circ_{M_1}$ and $h(\alpha) \not\in\mathcal{L}^\circ_{M_2}$; moreover,
\item[(2)]
for any orientation-reversing homeomorphisms
$\varphi_i : \partial N \to \partial M_i$ with
$\varphi_{1}(\lambda) = \alpha$ and
$\varphi_{2}(\lambda) = h(\alpha)$,
the closed manifolds
$N \cup_{\varphi_1} M_1$ and $N \cup_{\varphi_2} M_2$
are non-L-spaces.
\end{itemize}
\end{theorem}
We note that statement~(1) alternatively results from an enhanced gluing result introduced in Theorem~\ref{Gluing Theorem} below, which is of independent interest.
\section{Notions of simplicity} Before proving Theorem \ref{Cutting Theorem} we recall the main notions of \cite{HW} and \cite{RR} in order to highlight a key point of interaction between these two works.
The subject of \cite{RR} is the class of {\em Floer simple} manifolds: A manifold with torus boundary \(M\) is Floer simple
if and only if \(\mathcal{L}_{M}\) contains more than one element \cite[Proposition 1.3]{RR}.
In \cite{HW}, the main object of study is the class of {\em simple loop-type} manifolds. The bordered Floer homology \cite{LOT} of these manifolds has a particularly nice form. Below, we briefly summarize some relevant facts about bordered Floer homology. For a more detailed exposition we refer to \cite[Section 2]{HW}.
The bordered Floer module \(\widehat{\mathit{CFD}}\) is an invariant of a three-manifold with parametrized boundary. When \(\partial M\) is a torus we can specify a parametrization of \(\partial M\) by a pair of simple closed curves \(\alpha, \beta \in H_1(\partial M;\mathbb{Z})\) with \(\alpha \cdot \beta = 1\). In this case, the bordered Floer homology \(\widehat{\mathit{CFD}}(M, \alpha, \beta)\) may be represented by a directed graph whose edges are labeled by elements of the set \(\mathcal{A} = \{\rho_1,\rho_2,\rho_3, \rho_{12}, \rho_{23}, \rho_{123}\}\). The triple \((M,\alpha, \beta)\) is said to be of {\em loop-type} if each vertex in the graph representing \(\widehat{\mathit{CFD}}(M,\alpha, \beta)\) has valence \(2\) \cite[Definition 3.2]{HW}. Such a graph can be decomposed into certain standard {\em puzzle pieces} as described in \cite[Section~3]{HW}. For our purposes, the relevant pieces are the ones shown in Figure~\ref{Fig:Unstable Puzzle Pieces}. The property of being loop-type is inherent to the underlying manifold $M$: If the triple $(M, \alpha, \beta)$ is of loop-type for some choice of parametrizing curves $\alpha$ and $\beta$, then it is of loop-type for any choice of $\alpha$ and $\beta$. In this case, we say the manifold $M$ is of loop-type.
\begin{figure}
\caption{ Puzzle pieces for simple loops; see \cite{HW}
\label{Fig:Unstable Puzzle Pieces}
\end{figure}
\begin{definition} \cite[Definition 4.19]{HW} A manifold with torus boundary $M$ is of simple loop-type if it is of loop-type, the number of connected components of the graph is equal to the number of \(spin^c\) structures on \(M\), and for some choice of parametrizing curves $\alpha$ and $\beta$, $\widehat{\mathit{CFD}}(M, \alpha, \beta)$ is expressible in the letters $\bar{c}_k$, $d_k$, and $e$.
\end{definition}
\begin{proposition}\label{Simplicity Proposition} \(M\) is Floer simple if and only if \(M\) is of simple loop-type.
\end{proposition}
\begin{proof} The bordered Floer homology of a Floer simple manifold \(M\) was explicitly computed in \cite[Proposition 3.9]{RR} for an appropriate choice of parametrization \((\alpha, \beta)\). In the course of the proof, it is shown that \(\widehat{\mathit{CFD}}(M,\alpha,\beta)\) is composed of puzzle pieces of type \(\bar c_k\). Thus to see that \(M\) is of simple loop type, we need only check that the number of loops is equal to the number of \(spin^c\) structures on \(M\), which is \(|H^2(M;\mathbb{Z})| = |H_1(M,\partial M;\mathbb{Z})| = |H_1(M;\mathbb{Z})/\langle \alpha, \beta \rangle| \).
Each vertex \(v\) of \(\widehat{\mathit{CFD}}(M, \alpha, \beta)\) is labeled by a relative \(spin^c\) structure \(\mathfrak{s}(v)\), which we can view as an element of \(H^2(M, \partial M;\mathbb{Z}) \colon\thinspaceng H_1(M;\mathbb{Z})\). By \cite[Lemma 3.8]{RR}, edges of the graph labeled by \(\rho_1\) preserve this labeling, edges labeled by \(\rho_{23}\) shift the labeling by \(\alpha \) and edges labeled by \(\rho_{3}\) shift the labeling by \(\alpha + \beta\).
Given a puzzle piece in \(\widehat{\mathit{CFD}}(M, \alpha, \beta)\), let \(v\) be its unique black vertex, and label the piece by the image of \(\mathfrak{s}(v)\) in \(H_1(M;\mathbb{Z})/\langle \alpha \rangle\). This labeling defines a bijection between the set of puzzle pieces in \(\widehat{\mathit{CFD}}(M, \alpha, \beta)\) and \(H_1(M;\mathbb{Z})/\langle \alpha \rangle\). Moreover, if the label on a given piece is \(a\), the label on the next piece in the loop is \(a + \beta\). It follows that the set of loops is in bijection with \((H_1(M;\mathbb{Z})/\langle \alpha \rangle)/\langle \beta \rangle \colon\thinspaceng H_1(M;\mathbb{Z})/\langle \alpha, \beta \rangle\).
Thus a Floer simple manifold is simple loop-type.
Conversely, given a simple loop-type manifold $M$, fix parametrizing curves $\alpha$ and $\beta$ such that $\widehat{\mathit{CFD}}(M, \alpha, \beta)$ consists only of segments of type $\bar{c}_k$, $d_k$, and $e$. The slope $\infty$ is a strict L-space slope for $(M, \alpha, \beta)$ by \cite[Proposition 4.18]{HW}; that is $\alpha \in \mathcal{L}^\circ_M$. This implies that $| \mathcal{L}_M | > 1$ and therefore, by \cite[Proposition 1.3]{RR}, that $M$ is Floer simple.
\end{proof}
\
\section{Cutting and Gluing}
Combining Proposition \ref{Simplicity Proposition} with the gluing theorem \cite[Theorem 1.3]{HW} for simple loop-type manifolds, we obtain a gluing result for Floer simple manifolds.
\begin{theorem}\label{Gluing Theorem}
Suppose that $M_1$ and $M_2$ are Floer simple manifolds, and consider the closed manifold $M_1\cup_h M_2$ for some homeomorphism
$h\colon\thinspace \partial M_1\to\partial M_2$.
\begin{itemize}
\item[(1)] If neither $M_1$ nor $M_2$ are solid torus-like, then $M_1\cup_h M_2$ is not an L-space if and only if there is a slope $\alpha$ in $\partial M_1$ such that $\alpha \not\in \mathcal{L}^\circ_{M_1}$ and $h(\alpha) \not\in\mathcal{L}^\circ_{M_2}$.
\item[(2)] If either $M_1$ or $M_2$ is solid-torus like, then $M_1\cup_h M_2$ is not an L-space if and only if there is a slope $\alpha$ in $\partial M_1$ such that $\alpha \not\in \mathcal{L}_{M_1}$ and $h(\alpha) \not\in\mathcal{L}_{M_2}$. \end{itemize}
\end{theorem}
The two cases arising in this statement are expected: the second accounts for Dehn filling (that is, when one of the $M_i$ is a solid torus) and simply verifies the definition of an L-space slope. More generally, we must appeal to a larger class of manifolds which are called \emph{solid torus-like} \cite[Definition 3.23]{HW}, as they are characterized by having bordered Floer homology which resembles that of a solid torus in every $spin^c$ structure \cite{HW}, or equivalently, by having empty $\mathcal{D}^{\tau}$ in the sense of \cite{RR}. It was proved in \cite{Gillespie} that a solid torus-like manifold must be a solid torus connected sum with an L-space. In particular, if we assume that $M_1$ and $M_2$ are boundary incompressible, then the conclusion in case $(1)$ holds.
{\bf The proof of Theorem \ref{Cutting Theorem}.} According to
\cite[Theorem 1.6]{RR}, the set $\mathcal{L}_{M_i}^\circ$ (for $i=1,2$) is either empty or it is (the restriction to $\mathbb{Q}\cup\{\frac{1}{0}\}$ of) a connected interval with rational endpoints. In the case $\mathcal{L}^\circ_{M_2} = \emptyset$, let $\alpha$ be the rational longitude of $M_1$, which is not an L-space slope. Similarly, if $\mathcal{L}^\circ_{M_1} = \emptyset$ we choose $\alpha$ such that $h(\alpha)$ is the rational longitude of $M_2$. If $\mathcal{L}^\circ_{M_1}$ and $\mathcal{L}^\circ_{M_2}$ are both nonempty, then $M_1$ and $M_2$ are both Floer simple \cite[Proposition 1.3]{RR}; since $Y$ is not an L-space it follows from Theorem \ref{Gluing Theorem} that there is a slope $\alpha\not\in \mathcal{L}^\circ_{M_1} \subset \mathcal{L}_{M_1}$ such that $h(\alpha) \not\in\mathcal{L}^\circ_{M_2} \subset\mathcal{L}_{M_2}$ as required.
Part (2) of Theorem~\ref{Cutting Theorem} is subsumed as a special case of \cite[Proposition 7.9]{RR} by the second and third authors, but the result still merits some explanation.
Again, and henceforth in this paper, $N$ denotes the twisted $I$-bundle over the Klein bottle, with rational longitude~$\lambda$.
It is straightforward to compute, e.g. from \cite[Theorem 5.1]{RR}, that $\mathcal{L}_N = \mathcal{L}_N^{\circ} = Sl(N) \setminus \{\lambda\}$. Thus, for $M$ Floer simple,
Theorem \ref{Gluing Theorem} implies that
for any gluing map $\varphi \colon\thinspace \partial N \to \partial M$,
\begin{equation*}
N \cup_{\varphi}\mkern-2.5mu M \,\text{ is a non-L-space}
\;\iff\;
\varphi(\lambda) \notin \mathcal{L}_{M}^{\circ}.
\end{equation*}
Note that, similar to a Dehn filling, the above non-L-space criterion for $N \cup_{\varphi}\mkern-2.5mu M$ depends only on the slope $\varphi(\lambda) \in Sl(M)$, and is independent of the choice of framing, relative to~$\lambda$, of $\varphi$. In fact, even the $\mathbb{Z}/2\mathbb{Z}$-graded groups $\widehat{HF}(N \cup_{\varphi}\mkern-2.5mu M)$ are independent of this choice of framing \cite[Proposition~21]{BGW2013}, although we will not need this stronger statement. We therefore call any $N \cup_{\varphi}\mkern-2.5mu M$ with $\alpha = \varphi(\lambda)$ an $N${\textit{-filling}} of $M$ along the slope~$\alpha$.
The above non-L-space criterion---that an $N$-filling is a non-L-space if and only the filling is along a non strict-L-space slope---also holds for $N$-fillings of an arbitrary rational homology solid torus $M$, but the argument is more subtle in the non-Floer-simple case.
$\Box$
\section{The main theorem}
{\bf Decomposing along tori.}
Since Theorem \ref{Cutting Theorem} generically produces lower-complexity closed non-L-spaces from a closed non-L-space, it provides an {\em{iterative}} decomposition tool for the proof of Theorem~\ref{Main Theorem}. Moreover, at each decomposition step, the new $N$-filling slopes record one non strict-L-space slope for each boundary component, with these slopes pairwise identified under the original gluing maps.
In particular, by iteratively decomposing a non-L-space graph manifold along a suitable JSJ decomposition, we can produce a collection of non-L-space $N$-filled Seifert fibered spaces,
with $N$-filling slopes specifying gluing-compatible non strict-L-space slopes on all boundary components of the Seifert fibered spaces.
Appealing to \cite[Proposition 7.9]{RR} translates these data into the language of NLS-detected slopes in the sense of Boyer and Clay \cite[Definition 7.16]{BC}, whose machinery then automatically produces a cooriented taut foliaton on the original graph manifold.
To describe this process in more detail, we first need to set up some notation for cutting along tori. Note that we may assume without loss of generality that $b_1(Y)=0$: in the case that $b_1(Y)>0$ work of Gabai guarantees the existence of a coorientable taut foliation \cite{Gabai1983}. Thus, every torus $T\hookrightarrow Y$ separates $Y$ into two rational homology solid tori.
Given $Y$ and a collection of disjoint embedded tori $T_1, \ldots, T_n$ in $Y$, let $Y \backslash\!\!\backslash \{T_i\}$ denote the result of cutting $Y$ along each torus $T_i$. Since every torus is separating, this process produces $n+1$ pieces; that is, $Y \backslash\!\!\backslash \{T_i\} \colon\thinspaceng M_1 \amalg \ldots \amalg M_{n+1}$, where each $M_j$ is a three-manifold with $\partial M_j$ a disjoint union of tori.
If we further specify a collection of slopes $\alpha_* = (\alpha_1, \ldots, \alpha_n)$ on each of the tori in $\{T_i\}$, we can extend each $M_j$ (for $1\le j \le n+1$) to a closed manifold $Y_j^{\alpha_*}$ in the following way: The collection of slopes $\alpha_*$ induces a collection of slopes on the boundary tori of each $M_j$, since each boundary component of each $M_j$ is identified with one of the $T_i$. A closed manifold $Y_j^{\alpha_*}$ is obtained from $M_j$ by gluing a copy of \(N\) (the twisted $I$-bundle over the Klein bottle) to each boundary of $M_j$ such that, for each gluing, the slope $\lambda$ in $\partial N$ is identified with the slope in the relevant component of $\partial M_j$ induced by $\alpha_*$. We say that $Y_j^{\alpha_*}$ is an \emph{N-filling} of $M_j$ along the slopes induced by $\alpha_*$.
Note that the manifold $Y_j^{\alpha_*}$ described above is not uniquely determined by $M_j$ and $\alpha_*$ since each time a copy of $N$ is glued to $M_j$ there is an infinite family of gluing maps which take $\lambda$ in $\partial N$ to the desired slope in $\partial M_j$. A particular gluing map is specified by choosing slopes dual to $\lambda$ in $\partial M$ and dual to each slope induced by $\alpha_*$ in $\partial M_j$; the manifold $Y_j^{\alpha_*}$ depends on the particular choice of dual slopes. However, Theorem~\ref{Cutting Theorem} again tells us that the question of whether $Y_j^{\alpha_*}$ is an L-space is determined solely by $\alpha_*$, thus is independent of these choices. Incidentally, one again has the stronger result that the $\mathbb{Z}/2\mathbb{Z}$-graded Heegaard Floer groups $\widehat{HF}(Y_j^{\alpha_*})$ are independent of these choices \cite[Proposition~21]{BGW2013}.
{\bf Slope detection.} Given a three-manifold $Y$, a collection of tori $\{T_i\}_{i=1}^n$ and a collection of slopes $\alpha_*$, we have explained how to construct manifolds $\{Y_j^{\alpha_*}\}_{j=1}^{n+1}$, and observed that the $\mathbb{Z}/2\mathbb{Z}$-graded groups \(\widehat{\mathit{HF}}(Y_j^{\alpha_*})\) do not depend on the choices made in the construction. In particular, whether or not each $Y_j^{\alpha_*}$ is an L-space is a well--defined question. The key step in proving Theorem \ref{Main Theorem} is the following.
\begin{proposition}\label{main proposition}
Let $Y$ be an irreducible three-manifold and fix a collection of disjoint embedded tori $\{T_1, \ldots, T_n\}$ in $Y$ such that each torus is separating. If $Y$ is a non-L-space, then there is some collection of slopes $\alpha_*$ on these tori with the property that each of the manifolds $Y_j^{\alpha_*}$ defined above is a non-L-space.
\end{proposition}
\begin{proof}
First observe that if $n=1$ (that is, the collection of tori consists of just one torus), this is equivalent to Theorem~\ref{Cutting Theorem}. In this case, $Y \colon\thinspaceng M_1 \cup_h M_2$ for some gluing map $h$. By Theorem~\ref{Cutting Theorem}, there is a slope $\alpha$ in $\partial M_1$ such that $N$-filling $M_1$ along $\alpha$ gives a non-L-space and $N$-filling $M_2$ along $h(\alpha)$ gives a non-L-space. Let $\alpha_1$ be the slope in $T_1$ that corresponds to the slopes $\alpha \in \partial M_1$ and $h(\alpha) \in \partial M_2$. Then $\alpha_* = (\alpha_1)$ gives the desired collection of slopes.
For the general case, we proceed by induction on $n$. Assume $n>1$ and the result holds for collections of fewer than $n$ tori. First cut $Y$ along the torus $T_1$ to produce two manifolds $M_1$ and $M_2$. By the $n=1$ case, there is a slope $\alpha_1$ in $T_1$ such that $N$-filling $M_1$ and $M_2$ along the slopes corresponding to $\alpha_1$ produces non-L-spaces. We denote the resulting closed manifolds by $Y_1^{(\alpha_1)}$ and $Y_2^{(\alpha_1)}$.
Having cut along $T_1$, the remaining collection of tori $\{T_2, \ldots, T_n\}$ splits into two subsets depending on whether each torus is contained in $M_1$ or $M_2$. Up to relabeling the tori, we may assume that $\{T_2, \ldots, T_m\}$ is the subset of tori contained in in $M_1$ and $\{T_{m+1}, \ldots, T_n\}$ is the subset of tori contained in $M_2$, for some $1\le m\le n$ (note that if $m=1$ the first subset is empty, and if $m=n$ the second subset is empty). We consider these subsets as collections of tori on $Y_1^{(\alpha_1)}$ and $Y_2^{(\alpha_1)}$, respectively. Note that each collection has at most $n-1$ tori.
By the inductive hypothesis applied to $Y_1^{(\alpha_1)}$ with the collection of tori $\{T_2, \ldots, T_m\}$, there is a collection of slopes $(\alpha_2, \ldots, \alpha_m)$ such that cutting along each torus and $N$-filling along the corresponding slopes produces only non-L-spaces. Similarly, there is collection of slopes $(\alpha_{m+1}, \ldots, \alpha_n)$ on the tori $\{T_{m+1}, \ldots, T_n\}$ in $Y_2^{(\alpha_1)}$ with this property. Finally, observe that the non-L-space manifolds obtained from $Y_1^{(\alpha_1)}$ by this process together with the non-L-space manifolds obtained from $Y_2^{(\alpha_1)}$ are exactly the same as the manifolds obtained by cutting $Y$ along the tori $\{T_1, \ldots, T_n\}$ and $N$-filling along the slopes induced by $\alpha_* := (\alpha_1, \ldots, \alpha_n)$.
\end{proof}
The proposition above can be restated using the notion of non-L-space (NLS) detected slopes defined in \cite{BC}. Let $M$ be a manifold with $\partial M$ a disjoint union of $n$ tori, and let $\alpha_* = (\alpha_1, \ldots, \alpha_n)$ be a collection of slopes on the boundary tori. Following \cite[Definition 7.2]{BC}, let $\mathcal{M}_t(\emptyset;[\alpha_*])$ denote the collection of manifolds obtained by filling each boundary component of $M$ by a copy of $N_t$ where the rational longitude of the $i^{\text{th}}$ copy of $N_t$ is sent to $\alpha_i$. The manifold $N_t$ is the Seifert fibered space over the disk with two cone points of order $t$ and Seifert invariants $(\frac{1}{t},\frac{t-1}{t})$. Note that $N_2=N$. In particular, in the notation introduced above, the set $\mathcal{M}_2(\emptyset;[\alpha_*])$ is the set of all possible $N$-fillings $Y^{\alpha_*}$ of $M$ along the slopes $\alpha_*$. Recall that all manifolds in this set have the same Heegaard Floer homology. According to \cite[Definition 7.16]{BC}, the collection of slopes $\alpha_*=(\alpha_1,\alpha_2,\ldots,\alpha_n)$ is non-L-space detected (or NLS detected) if for every $t>1$, the set $\mathcal{M}_t(\emptyset;[\alpha_*])$ contains no L-spaces.
We can now restate Proposition \ref{main proposition} as follows:
\begin{proposition}\label{prop:collection of NLS slopes}
Let $Y$ be an irreducible three-manifold with $b_1 = 0$ and fix a collection of disjoint tori $\{T_1, \ldots, T_n\}$ in $Y$. If $Y$ is a non-L-space, then there is some collection of slopes $\alpha_*$ on these tori with the property that the restriction of $\alpha_*$ to each $M_j$ in $Y \backslash\!\!\backslash \{T_i\}$ is NLS detected.
\end{proposition}
\begin{proof}
In \cite[Proposition 7.9]{RR}, the second and third authors show that for any rational homology solid torus $M$, ``generalized solid torus'' $N'$
with rational longitude $\lambda'$,
and gluing map $\varphi : \partial N' \to \partial M$, the closed manifold $N' \cup_{\varphi}\mkern-2.5mu M$ is a non-L-space
if and only if
$\varphi(\lambda') \notin \mathcal{L}_{M}^{\circ}$.
In the discussion preceding that proposition, they also prove that $N_t$ is a generalized solid torus for any $t>1$.
Thus, for a manifold with slope $\alpha_*$, the set
$\mathcal{M}_2(\emptyset, [\alpha_*])$ contains a non-L-space
if and only if for all $t > 1$, the set
$\mathcal{M}_t(\emptyset, [\alpha_*])$ contains no L-spaces,
hence if and only if $\alpha_*$ is NLS detected.
By Proposition \ref{main proposition}, there is a collection of slopes $\alpha_*$ such that each $Y_j^{\alpha_*}$ is a non-L-space. For each $M_j$, where $1\le j \le n+1$, $Y_j^{\alpha_*}$ is by construction a non-L-space element of $\mathcal{M}_2( \emptyset; [\alpha_*^j] )$, where $\alpha_*^j$ denotes the restriction of $\alpha_*$ to $\partial M_j$.
Thus each $\alpha_*^j$ is NLS detected on $M_j$.
\end{proof}
\begin{remark}Notice that we have yet to restrict to graph manifolds, or even to incompressible tori. Indeed, given a rational homology sphere $Y$ that is not an L-space, a collection of disjoint tori $\{T_i\}$ always gives rise to an NLS detected collection of slopes on the boundary of each component of $Y\backslash\!\!\backslash \{T_i\}$. This suggests that the same behaviour for taut foliations and/or for left-orders on the fundamental group should be explored
for more general prime three-manifolds.
\end{remark}
{\bf The proof of Theorem \ref{Main Theorem}.}
When $Y$ is a graph manifold and $\{T_i\}$ is the collection of JSJ tori, note that Proposition \ref{prop:collection of NLS slopes} verifies one direction of Boyer and Clay's Conjecture 1.10 in \cite{BC} about cutting and gluing along NLS-detected slopes, namely that a non-L-space graph manifold can be cut into Seifert fibered pieces with gluing-compatible NLS-detected slopes on all boundary components. This allows us to complete the proof of Theorem \ref{Main Theorem}.
Suppose that $Y$ is a non-L-space graph manifold with $b_1(Y) = 0$. If $Y$ has a trivial JSJ decomposition, then $Y$ is a Seifert fibered and therefore already known to admit a cooriented taut foliation. Next suppose $Y$ has a non-trivial JSJ decomposition. Since $b_1(Y) = 0$, every JSJ torus separates; we take $\{T_i\}$ to be JSJ tori such that the components of $Y\backslash\!\!\backslash\{T_i\}$ are Seifert fibered. By Proposition \ref{prop:collection of NLS slopes} there is a collection of slopes $\alpha_* = (\alpha_1, \ldots, \alpha_n)$, with $\alpha_i \in T_i$, such that the restrictions of $\alpha_*$ to each component of $Y\backslash\!\!\backslash\{T_i\}$ are NLS detected. In \cite[Theorem 8.1]{BC}, Boyer and Clay show that on Seifert fibered spaces, NLS detected slopes are equivalent to what they call ``foliation detected'' slopes. Thus, we have finally produced foliation-detected slopes on all the JSJ tori decomposing $Y$ into Seifert fibered pieces, and that is precisely what Boyer and Clay's foliation gluing theorem \cite[Theorem 1.7]{BC}
requires, in order to guarantee the existence of a cooriented
taut foliation on $Y$.
$\Box$
{\bf Coefficients.}
Up until this point, we have used Floer homology with coefficients in \(\mathbb{Z}/2\mathbb{Z}\). This choice was imposed by our use of bordered Floer homology, which is only defined over \(\mathbb{Z}/2\mathbb{Z}\). We now briefly discuss what happens for other coefficient systems. If \(G\) is an abelian group, we say \(Y\) is a \(G\) L-space if
\(\widehat{\mathit{HF}}(Y,\mathfrak{s}; G) \colon\thinspaceng G\) for all \(\mathfrak{s} \in spin^c(Y)\). For a closed orientable graph manifold \(Y\), we consider the following conditions:
\begin{enumerate}
\item \(Y\) is a \(\mathbb{Z}\) L-space.
\item \(Y\) is a \(\mathbb{Z}/2\mathbb{Z}\) L-space.
\item \(Y\) does not admit a \(C^0\) coorientable taut foliation.
\end{enumerate}
In fact, if \(Y\) is a closed graph manifold, these three conditions are equivalent. To see this, we briefly sketch the points at which our argument used \(\mathbb{Z}/2\mathbb{Z}\) coefficients. First, the proof of our enhanced gluing result Theorem \ref{Gluing Theorem} depends on bordered Floer homology, hence requires
\(\mathbb{Z}/2\mathbb{Z}\) coefficients.
For the proof of Theorem~\ref{Cutting Theorem}, however,
Theorem \ref{Gluing Theorem} can be replaced with
\cite[Theorem 1.1]{RR}, which works over $\mathbb{Z}$ coefficients. The theorem of Lisca and Stipsicz uses \(\mathbb{Z}/2\mathbb{Z}\) coefficients; however for any manifold obtained by Dehn filling a Floer simple manifold, the properties of being a \(\mathbb{Z}\) L-space and a \(\mathbb{Z}/p\mathbb{Z}\) L-space are equivalent \cite[Proof of Proposition 3.6]{RR}.
Moreover, \cite[Theorem 5.1]{RR} reproves Lisca and Stipsicz's result over $\mathbb{Z}$ by performing a direct computation of the L-space slope interval for any Seifert fibered space over the disc.
Thus, Theorems \ref{Main Theorem}, \ref{Equivalence Theorem}, and
\ref{Cutting Theorem} hold over $\mathbb{Z}$.
{\bf Closing remarks.}
The first author has applied bordered Floer homology to give an algorithm for computing the Heegaard Floer homology of an arbitrary graph manifold \cite{Hanselman2013}. This has been implemented on computer, with considerable savings in computation time if the combinatorics developed in \cite{HW} are incorporated (see \cite[Remark 6.10]{HW} in particular). As a consequence of Theorem \ref{Equivalence Theorem} two questions are now algorithmically decidable:
\begin{itemize}
\item[] Does a graph manifold $Y$ admit a coorientable taut foliation?
\item[] Does a graph manifold $Y$ have a left-orderable fundamental group?
\end{itemize}
The answer to either question is {\em yes} if and only if $\rank\widehat{\mathit{HF}}(Y)>\chi(\widehat{\mathit{HF}}(Y))$ (recall that $\chi(\widehat{\mathit{HF}}(Y))= |H_1(Y;\mathbb{Z})|$). This gives a direct, in fact combinatorial, verification of two conditions on a three-manifold that seem quite difficult to certify in general.
\textbf{Acknowledgements:}{ We thank Jonathan Bowden, Steve Boyer, and Adam Clay for sharing their preprints \cite{Bowden,BC-prep} with us; Steve Boyer, Cameron Gordon, Tye Lidman, and the referees for helpful comments on a previous version of the manuscript. }
\end{document} |
\begin{document}
\pagenumbering{roman}
\title{$q$-Partitioning Valuations: Exploring the Space Between\
Subadditive and Fractionally Subadditive Valuations}
\begin{abstract}
For a set $M$ of $m$ elements, we define a decreasing chain of classes of normalized monotone-increasing valuation functions from $2^M$ to $\mathbb{R}_{\geq 0}$, parameterized by an integer $q \in [2,m]$. For a given $q$, we refer to the class as \emph{$q$-partitioning}. A valuation function is subadditive if and only if it is $2$-partitioning, and fractionally subadditive if and only if it is $m$-partitioning. Thus, our chain establishes an interpolation between subadditive and fractionally subadditive valuations. We show that this interpolation is smooth ($q$-partitioning valuations are ``nearly'' $(q-1)$-partitioning in a precise sense,~\cref{thm:smoothness}), interpretable (the definition arises by analyzing the core of a cost-sharing game, \`{a} la the Bondareva-Shapley Theorem for fractionally subadditive valuations,~\cref{section:costsharing}), and non-trivial (the class of $q$-partitioning valuations is distinct for all $q$,~\cref{prop:exisetnce}).
We interpolate prior results that separate subadditive and fractionally subadditive for all\linebreak $q \in \{2,\ldots, m\}$. Two highlights are the following:
\begin{enumerate}
\item[i)] An $\Omega \left(\frac{\log \log q}{\log \log m}\right)$-competitive posted price mechanism for $q$-partitioning valuations. Note that this matches asymptotically the state-of-the-art for both subadditive ($q=2$)~\cite{DuttingKL20}, and fractionally subadditive ($q=m$)~\cite{FeldmanGL15}.
\item[ii)] Two upper-tail concentration inequalities on $1$-Lipschitz, $q$-partitioning valuations over independent items. One extends the state-of-the-art for $q=m$ to $q<m$, the other improves the state-of-the-art for $q=2$ for $q > 2$. Our concentration inequalities imply several corollaries that interpolate between subadditive and fractionally subadditive, for example: $\mathbb{E}[v(S)]\le (1 + 1/\log q)\text{Median}[v(S)] + O(\log q)$. To prove this, we develop a new isoperimetric inequality using Talagrand's method of control by $q$ points, which may be of independent interest.
\end{enumerate}
We also discuss other probabilistic inequalities and game-theoretic applications of $q$-partitioning valuations, and connections to subadditive MPH-$k$ valuations~\cite{EzraFNTW19}.
\end{abstract}
\addtocounter{page}{-1}
\section{Introduction}\label{sec:intro}
\pagenumbering{arabic}
\label{section:intro}
\subsection{Motivation}
Functions of the form $f:2^M\longrightarrow \mathbb{R}$ are a fundamental object of study in the fields of Algorithmic Game Theory and Combinatorial Optimization. For example, when $M$ is a set of items in an auction, $f(S)$ could indicate the value that an agent obtains from receiving the bundle $S$ (see more about combinatorial auctions in \cite[Chapter 11]{AGTbook}). When $M$ is a set of agents, $f(S)$ could indicate the cost that agents $S$ need to pay in order to purchase a given service together (see more about cost sharing in \cite[Chapter 15]{AGTbook}).
As set functions\footnote{We will use the terms \textit{set function} and \textit{valuation} interchangeably in the rest of the paper. This convention is motivated by the setting of combinatorial auctions in which $f(S)$ indicates the ``value'' of the subset of items $S.$} are motivated by real world processes --- auctions, cost sharing, and job scheduling among others --- the mathematical study of such functions usually assumes that they satisfy certain natural properties. Throughout the paper, we assume that all valuations satisfy the following two simple technical properties: they are \textit{monotone} ($f(S)\le f(T)$ whenever $S\subseteq T$) and \textit{normalized} ($f(\emptyset) = 0$). Economic considerations give rise to more complex conditions on set functions.
For example, frequently imposed is the condition of diminishing marginal values, also known as \textit{submodularity}. Another condition motivated by economics is complement-freeness in the values that an agent obtains from bundles of items, which is also known as \textit{subadditivity}. Finally, one could be interested in the existence of prices which incentivize cooperation among agents when purchasing a given service; this turns out to be equivalent to the \textit{fractionally subadditive} property (see \cref{section:costsharing}).\footnote{In this paper, the terms ``submodularity'', ``fractional subadditivity'', and ``subadditivity'' imply monotonicity and normalization.}
In this paper, we focus our attention on fractionally subadditive and subadditive set functions. Trivially, fractionally subadditive functions are a smaller class strictly contained in the class of subadditive functions. Something stronger turns out to be true --- Bhawalkar and Roughgarden show the existence of subadditive functions which are very far from being fractionally subadditive in a precise quantitative sense \cite{BhawalkarR11}. This difference between fractionally subadditive and subadditive valuations is not purely theoretical and has important implications. For example, in the context of combinatorial auctions, there exists a posted price mechanism that gives a $(1/2)$-approximation to the optimal welfare when all players have fractionally subadditive valuations~\cite{FeldmanGL15}, but the best known approximation ratio for subadditive valuations is $\Omega(\frac{1}{\log \log m}),$ where $m$ is the number of items \cite{DuttingKL20} (moreover, the~\cite{FeldmanGL15} framework providing a $(1/2)$-approximation for XOS provably cannot beat $O(\log m)$ for subadditive, and the~\cite{DuttingKL20} framework provably cannot beat $O(\log \log m)$). Similarly, in the context of concentration inequalities, a fractionally subadditive valuation $v$ has $\mathbf{E}[v]$-subgaussian lower tails (see \cite[Corollary 3.2]{Vondrak10}), but such a strong dimension-free concentration provably does not hold for subadditive valuations (see \cite[Section 4]{Vondrak10}).
What if a set function is ``somewhere in between being subadditive and being fractionally subadditive''? On the one hand, as it is not fractionally subadditive, one cannot use the strong guarantees of fractional subadditivity (such as in posted price mechanisms or subgaussian concentration) when analyzing it. On the other hand, as the set function could be significantly more structured than an arbitrary subadditive function, it is perhaps inefficient to simply use the much weaker properties guaranteed by subadditivity (especially, those that provably cannot be improved for all subadditive functions). In this paper, we construct a smooth interpolation between fractional subadditivity and subadditivity. Explicitly, we define a chain of function classes that starts with fractionally subadditive set functions and expands to subadditive set functions. Our goal is to understand how the behaviour of these function classes changes along the chain. We focus on several setups in which subadditive and fractionally subadditive valuations have received significant attention in the literature, and in which strong claims for fractionally subadditive valuations provably don't hold for all subadditive valuations.
\subsection{Results Part I: Defining \hmath$q$-partitioning valuations}
Our chain of classes is parametrized by a positive integer parameter $q$ ranging between $q= |M|$ (which corresponds to the fractionally subadditive case) and $q = 2$ (which corresponds to the subadditive case). The number $q$ corresponds to the complexity of fractional covers under which the valuation function is non-diminishing. We call the respective classes \textit{$q$-partitioning} and the resulting interpolation the \textit{partitioning interpolation.} We give a formal definition in \cref{def:qpartprimal}. We then establish that the partitioning interpolation satisfies several desirable properties:
\setlist{nolistsep}
\begin{itemize}[noitemsep]
\item \textbf{Interpretability:} In \cref{section:costsharing}, we present an economic interpretation of $q$-partitioning via the core of a cost-sharing game \`{a} la the Bondareva-Shapley theorem which characterizes fractionally subadditive valuations~\cite{Bondareva63,Shapley67} --- See also~\cite[Theorem~15.6]{AGTbook}. In slightly more detail, say there is a service that can be acquired by set $S$ of players if they together pay $c(S)$. One can then ask, for any subset $T \subseteq [m]$ whether or not there exist non-negative prices $\{p_i\}_{i \in T}$ such that: a) $\sum_{i \in T} p_i = c(T)$ (service is purchased for $T$) and b) for all $S\subseteq T$, $\sum_{i \in S} p_i \leq c(S)$ (no set $S\subseteq T$ wishes to deviate and purchase the service just for themselves). The Bondareva-Shapley theorem, applied to monotone normalizd cost functions, states that such prices exist for all $T$ \emph{if and only if $c(\cdot)$ is fractionally subadditive}.
Consider instead modifying the game so that players are grouped into $q$ fully-cooperative \emph{cities} (that is, cities will always act as a coherent unit, and will act in the best interest of the entire city). One can then ask, for any subset $T \subseteq [m]$ and any partitioning of $T$ into $q$ cities $T_1,\ldots, T_q$, do there exist non-negative prices $\{p_i\}_{i \in [q]}$ such that: a) $\sum_{i \in [q]} p_i = c(T)$ (service is purchased for $T$) and b) for all $S \subseteq [q]$, $\sum_{i \in S} p_i \leq c(\cup_{i \in S} T_i)$ (no set $S$ of cities wishes to deviate and purchase the service just for themselves). \cref{prop:maincostsharing} establishes that such prices exist for all $T$ and all partitionings of $T$ into at most $q$ cities \emph{if and only if $c(\cdot)$ is $q$-partitioning}.
\item \textbf{Smoothness of The Interpolation:} In \cref{thm:smoothness}, we show that our chain of classes is smooth in the sense that every $q$-partitioning valuation is almost $(q+1)$-partitioning. Formally, \cref{thm:smoothness} establishes that the class of $q$-partitioning valuations is $(1-1/q)$-close to the class of $(q+1)$-partitioning valuations. We provide a formal definition of closeness in \cref{def:closeness}, but note briefly here that it is the natural extension of closeness to XOS valuation functions from~\cite{BhawalkarR11} extended to $q < m$.
\item \textbf{Existence of Classes:} In \cref{prop:exisetnce}, we show that for each $m = |M|$ and $2\le q \le m,$ there exist $q$-partitioning valuations over $M$ that are not $(q+1)$-partitioning. In other words, none of the $m-1$ classes ``collapses'' to a lower level.
\end{itemize}
\subsection{Results Part II: Posted price mechanisms and concentration inequalities}
Our main results apply the partitioning interpolation to two canonical problems where subadditive and fractionally subadditive valuations are ``far apart.'' Our main results provide analyses that smoothly degrade from fractionally subadditive to subadditve as $q$ decreases -- this enables stronger guarantees for wide classes of structured subadditive functions which (provably) cannot be obtained for all subadditive functions.\\
\noindent\textbf{Posted Price Mechanisms.} Posted price mechanisms are a core objective of study within Algorithmic Game Theory, including multi-dimensional mechanism design~\cite{ChawlaHMS10}, single-dimensional mechanism design~\cite{Yan11,AlaeiHNPY15}, and the price of anarchy~\cite{FeldmanGL15, DuttingFKL20}. Posted price mechanisms list a price $p_i$ for each item $i \in [m]$, then visit the bidders one at a time and offer them to purchase any remaining set $S$ of items at price $\sum_{i \in S} p_i$ (and these items become unavailable for all future bidders). Of course, strategic players will pick the remaining set $S$ that maximizes $v_i(S) - \sum_{i \in S} p_i$.
Of key importance to multiple of these agendas is the following basic question: to what extent can posted price mechanisms optimize welfare in Bayesian settings? Specifically, assume that each bidder $i$'s valuation function $v_i(\cdot)$ is drawn independently from a known distribution $D_i$ over valuations in some class $\mathcal{V}$. The optimal expected welfare is simply $\mathbb{E}_{\vec{v}\leftarrow \times_i D_i}[\max_{\text{partitions } S_1,\ldots, S_n}\{\sum_i v_i(S_i)\}]$. When strategic players participate in a posted-price mechanism with prices $\vec{p}$, some other partition of items is selected, guaranteeing some other expected welfare. What is the maximum number $\alpha(\mathcal{V})$ such that for all $D = \times_i D_i$ supported on $\mathcal{V}$, there exists a posted-price mechanism that results in expected welfare at least an $\alpha(\mathcal{V})$-fraction of the optimal welfare? Besides being the main question of study in works such as~\cite{FeldmanGL15,DuttingFKL20,DuttingKL20}, resolving this question has downstream implications for revenue-maximization in multi-dimensional settings due to~\cite{CaiZ17}.
For the class of fractionally subadditive valuations,~\cite{FeldmanGL15} establish a $1/2$-approximation, which also implies a $1/\log_2(m)$-approximation for subadditive valuations. However, their techniques provably cannot yield stronger guarantees for subadditive valuations~\cite{BhawalkarR11,DuttingFKL20}. Recent breakthrough work of~\cite{DuttingKL20} designs a new framework for subadditive valuations that yields an $\Omega(1/\log_2\log_2(m))$-approximation, but aspects of their framework also provably cannot provide stronger guarantees. In this sense, there is a strong separation between the state-of-the-art guarantees on posted price mechanisms for fractionally subadditive and subadditive valuations (and also, there is a permanent separation between what can be achieved within the aforementioned frameworks).\\
\noindent\textbf{Main Result I:} Our first main result provides an $\Omega(\frac{\log \log q}{\log \log m})$-competitive posted price mechanism when all distributions are supported on $q$-partitioning valuations. This is stated in \cref{thm:postedpriceqpart}.\\
\noindent Note that this guarantee matches both the constant factor approximation in the fractionally subadditive case (setting $q = m$) and the $\Omega(\frac{1}{\log \log m})$ factor in the subadditive case (setting $q = 2$) and interpolates between the two approximation factors when $q$ is in between. In particular, note that this matches the state-of-the-art in both extremes, and matches the best guarantees achievable by the~\cite{DuttingKL20} approach in both extremes.\\
\noindent\textbf{Concentration Inequalities.} Consider a function $f$, and a set $S$ selected by randomly including each item $i$ independently (not necessarily with the same probability). It is often of interest to provide upper tail bounds on the distribution of $f(S)$ compared to $\mathbf{E}[f(S)]$. McDiarmid's inequality is one such example when $f$ is $1$-Lipschitz.\footnote{$f(\cdot)$ is $1$-Lipschitz if $|f(S) - f(S \cup \{i\}| \leq 1$ for all $S,i$.} It is further the case that when $f(\cdot)$ is subadditive or fractionally subadditive, even stronger upper tail bounds are possible~\cite{Vondrak10}.
For example, if $f$ is both $1$-Lipschitz and subadditive, Schechtman's inequality implies that the probability that $f(S)$ exceeds twice its median plus $x$ decays exponentially in $x$~\cite{schectman}.\footnote{Schectman's inequality is more general than this, but this is one common implication. See \cref{eq:schehtman} for the general statement.} Importantly, Schectman's inequality provably cannot ``kick in'' arbitrarily close to the median~\cite{Vondrak10}.\\
\noindent\textbf{Main Result II:} \cref{thm:qparttailspecial} improves Schectman's inequality across the partitioning interpolation. In particular, our improvement implies that for all $1$-Lipschitz and $q$-partitioning $f$, the probability that $f([m])$ exceeds $(1+\log_2(q))$ times its median plus $x$ decays exponentially in $x$. This is stated in \cref{thm:qparttailspecial} . In particular, \cref{thm:qparttailspecial} makes use of a new isoperimetric inequality that may be of independent interest, and that is stated in \cref{thm:talagrandgenerals}.\footnote{Note that this result, and that of~\cite{schectman} applies in a more general setting where there is a collection of independent random variables $X_1,\ldots, X_m$, that parameterize a function $f_{\vec{X}}: 2^{[m]}\rightarrow \mathbb{R}$, which is subadditive for all $\vec{X}$. Like~\cite{Vondrak10}, we provide proofs in the canonical setting referenced in the text for simplicity of exposition.}\\
Similarly, if $f$ is both $1$-Lipschitz and fractionally subadditive,~\cite{Vondrak10} establishes that $f$ is self-bounding. \cite{BoucheronLM00} establish ``Chernoff-Bernstein-like'' concentration inequalities on self-bounding functions, which in particular imply that $f([m])$ has $\mathbf{E}[f([m])]$-subgaussian lower tails and slightly weaker upper tails.\footnote{A random variable $X$ is $\sigma^2$-subgaussian if the following inequality holds. The log-moment generating function defined by $\psi_X(\lambda):=\log \mathbf{E}[\exp(\lambda(X - \mathbf{E}[X]))]$ exists for all real numbers $t$ and, furthermore, satisfies
$\psi_X(\lambda)\le \frac{\lambda^2\sigma^2}{2}.$ It is well known that if $X$ is $\sigma^2$-subgaussian, then
$\mathbf{P}[X\ge \mathbf{E}[X] + t]\le \exp(-\frac{t^2\sigma^2}{2})$ and
$\mathbf{P}[X\le \mathbf{E}[X] - t]\le \exp(-\frac{t^2\sigma^2}{2}).$
}\\
\noindent\textbf{Main Result III:} \cref{thm:selfboundingqpart} extends this across the partitioning interpolation. Specifically, our result establishes that for all $1$-Lipschitz and $q$-partitioning $f$, $f([m])$ is $(\lceil m/q \rceil,0)$-self bounding, which implies by~\cite{McDiarmidR06,BoucheronLM09} that $f([m])$ has a $\lceil m/q \rceil\cdot \mathbf{E}[f([m])]$-subgaussian lower tail and a slightly worse Bernstein-like upper tail.\\
It is worth noting that~\cite{schectman}, based on Talagrand's method of control by $q$ points, is the state-of-the-art for concentration of subadditive functions, while~\cite{Vondrak10}, based on the method of self-bounding functions~\cite{BoucheronLM00,McDiarmidR06,BoucheronLM09}, is state-of-the-art for fractionally subadditive functions. Our main results extend both across the partitioning interpolation, but neither of the two approaches yields ``tight'' results at both ends --- our extension of~\cite{schectman} gives sharper results for small $q$, and our extension of~\cite{Vondrak10} gives sharper results for larger $q$. This is to be expected, as the two methods are genuinely distinct.
\subsection{Related Work and Connection to Subadditive MPH-\hmath$k$}
\noindent\textbf{Hierarchies of Valuation Functions.} Prior to our work, there has been significant interest in exploring the space of valuation functions with \emph{parameterized complementarities}~\cite{AbrahamBDR12,FeigeFIILS15,FeigeI13,FeldmanFMR16,FeldmanI14,FeldmanI17, EdenFFTW21}. That is, the simplest level of the hierarchy is (fractionally) subadditive valuations, the second level of the hierarchy already contains functions that are not subadditive, and the final level of the hierarchy contains all monotone functions. These works are distinct from ours in that they explore the space between (fractionally) subadditive valuations and arbitrary monotone valuations, whereas our work explores the space between fractionally subadditive and subadditive valuations.
To the best of our knowledge, the only prior work exploring the space between fractionally subadditive and subadditive valuations is~\cite{EzraFNTW19}. Their main results concern the communication complexity of two-player combinatorial auctions for subadditive valuations, but they also provide improved parameterized guarantees for valuations that are subadditive and also MPH-$k$~\cite{FeigeFIILS15}. A detailed comparison to our work is therefore merited:
\begin{itemize}
\item The partitioning interpolation follows from a first-principles definition (\cref{section:costsharing}). On the other hand, the MPH hierarchy explores the space between fractionally subadditive and arbitrary monotone valuations, and~\cite{EzraFNTW19} restrict attention to the portion of this space that is also subadditive.
\item Our main results consider posted price mechanisms and concentration inequalities, neither of which are studied in~\cite{EzraFNTW19}.~\cite{EzraFNTW19} study the communication complexity of combinatorial auctions (where the gap between fractionally subadditive and subadditive is only constant), which is not studied in our work.
\item We show (\cref{lem:qpartandmphk}) that all $q$-partitioning valuations are also MPH-$\lceil m/q\rceil$. Therefore, we can conclude a $(1/2 + 1/\log_2 (\lceil m/q \rceil))$-approximation algorithm for two-player combinatorial auctions with $q$-partitioning valuations using~\cite{EzraFNTW19} (this is the only result of their paper concerning functions between fractionally subadditive and subadditive).
\item We further show that $q$-partitioning admits a dual definition (\cref{def:qpartdual}, similar to the duality between XOS and fractionally subadditive). A particular feasible dual solution implies a witness that $q$-partitioning valuations are MPH-$\lceil m/q\rceil$. This suggests that our dual definition is perhaps ``the right'' modification of subadditive MPH-$k$ so that a dual definition exists.
\end{itemize}
\noindent\textbf{Posted price mechanisms.} Posted price mechanisms are a core object of study within Algorithmic Game Theory. Variants of posted price mechanisms achieve state-of-the-art guarantees for wide ranges of combinatorial auctions~\cite{AssadiKS21,DobzinskiNS12}. Posted price mechanisms are strongly obviously strategyproof~\cite{Li17,PyciaT19}. Posted price mechanisms have also been used in Bayesian settings to study the price of anarchy for welfare~\cite{FeldmanGL15,DuttingFKL20,DuttingKL20}, revenue maximization in multi-dimensional settings~\cite{ChawlaHMS10,KleinbergW19,ChawlaM16, CaiZ17}, and revenue maximization in single-dimensional settings~\cite{Yan11,AlaeiHNPY15,FengHL19, JinLQTX19,JinLTX19,JinJLZ21}. Most relevant to our work is the study of posted price mechanisms in Bayesian settings for welfare, where the state-of-the-art is a $(1/2)$-approximation for fractionally subadditive valuations~\cite{FeldmanGL15}, and a $\Omega(1/\log_2\log_2(m))$-approximation for subadditive valuations~\cite{DuttingKL20}. These results further imply approximation guarantees of the same asymptotics for multi-dimensional mechanism design via~\cite{CaiZ17}, and it is considered a major open problem whether improved guarantees are possible for subadditive valuations. Our work provide improved guarantees across the partitioning interpolation (of $\Omega(\log_2\log_2 (q)/\log_2\log_2(m))$), which matches the state-of-the-art at both endpoints (and moreover, is provably tight at both endpoints for the approach of~\cite{DuttingKL20}).\\
\noindent\textbf{Concentration Inequalities.} Concentration inequalities on functions of independent random variables are a core tool across many branches of Computer Science. For example, they are widely used in Bayesian mechanism design~\cite{RubinsteinW18,ChawlaM16, CaiZ17, KothariMSSW19}, learning theory
\cite{BalcanH11,FeldmanV13}, and discrete optimization \cite{FairsteinKS21}. Vond\'{a}k's wonderful note on concentration inequalities of this form gives the state-of-the-art when $f$ is fractionally subadditive and subadditive, and mentions other applications~\cite{Vondrak10}. Our results extend both the state-of-the-art for subadditive and fractionally subadditive across the partitioning interpolation. In addition, we provide a new isoperimetric inequality based on Talagrand's method of control by $q$ points.
\subsection{Summary and Roadmap}
\cref{sec:prelim} immediately follows with formal definitions. \cref{sec:defineqpart} defines the partitioning interpolation, and provides several basic properties (including an interpretation via cost-sharing, and a dual formulation). \cref{section:postedprices} overviews our first main result: an $\Omega(\frac{\log\log q}{\log \log m})$-approximate posted-price mechanism for $q$-partitioning valuations. \cref{section:introconcentration} overviews our main results on concentration inequalities. \cref{section:futurework} concludes.
The appendices contain all omitted proofs, along with some additional facts about the partitioning hierarchy. For example, \cref{section:closeness} discusses the distance of subadditive functions to $q$-partitioning functions.
\section{Preliminaries}\label{sec:prelim}
Throughout the entire paper, we assume that valuations
$f:2^M\longrightarrow \mathbb{R}^+$ are \textit{normalized}, meaning that $f(\emptyset) = 0,$ and \textit{increasing monotone}, meaning that $f(S)\le f(T)$ whenever $S\subseteq T.$\\
\noindent\textbf{Standard Valuation Classes.} A valuation function $f$ is \emph{subadditive} if for all $S,T$, $f(S \cup T) \leq f(S) + f(T)$. $f$ is \emph{XOS} if there exists a collection $\mathcal{A}$ of non-negative additive functions\footnote{A valuation function $v$ is non-negative additive if for all $S$, $v(S)=\sum_{ i \in S} v(\{i\}),$ where $v(\{i\})\geq 0$ holds for all $i.$} such that for all $S$, $f(S)=\max_{v\in \mathcal{A}}\{v(S)\}$. $f$ is \emph{fractionally subadditive} if for any $S$ and any fractional cover $\alpha(\cdot)$ such that for all $j \in S$ $\sum_{T\ni j} \alpha(T) \geq 1$ and $\alpha(T)\geq 0$ for all $T,$ it holds that $f(S) \leq \sum_{T} \alpha(T) f(T)$. It is well-known that $f$ is XOS if and only if it is fractionally subadditive via LP duality~\cite{Feige09}. \\
\noindent\textbf{PH-$k,$ MPH-\hmath$k,$ and Subadditive MPH-$k$ valuations}
Maximum over Positive Hypergraph-$k$ valuations, in short MPH-$k$, were introduced in \cite{FeigeFIILS15}. Since then, they have been studied in various different contexts such as communication complexity of combinatorial auctions~\cite{EzraFNTW19} and posted price mechanisms~\cite{FeldmanGL15}. One motivation behind MPH-$k$ valuations is to construct a hierarchy of valuation classes (starting with XOS) by replacing additive valuations with a richer class of valuations parameterized by $k$. Specifically:
\begin{definition}
A valuation $v:2^{[m]}\longrightarrow \mathbb{R}_{\ge 0}$ is:
\begin{enumerate}
\item \textbf{PH-$k$} if there exist non-negative weights $w(E)$ for subsets $E\subseteq [m], |E| \leq k$, such that for all $S\subseteq [m]$: $\displaystyle v(S) = \sum_{T\subseteq S\; : \; |T|\le k} w(T).$
\item \textbf{MPH-$k$} if there exists a set of PH-$k$ valuations $\mathcal{A}$ such that $\displaystyle v(S) = \max_{a\in \mathcal{A}} a(S)$ for all $S\subseteq[m].$
\item \textbf{Subadditive MPH-$k$ (CFMPH-$k$)} if $v$ is simultaneously subadditive and MPH-$k.$
\end{enumerate}
\end{definition}
Note that PH-$1$ valuations are exactly the class of additive valuations, so the class of MPH-$1$ valuations is exactly the class of XOS valuations. Note also that PH-$2$ valuations need not be subadditive (and therefore, MPH-$2$ valuations need not be subadditive either). MPH-$m$ contains all monotone valuation functions, and all subadditive functions are MPH-$m/2$~\cite{EzraFNTW19}. We establish a connection between $q$-partitioning valuations and valuations that are MPH-$\lceil m/q\rceil$ and subadditive in \cref{lem:qpartandmphk}.
\section{The Partitioning Interpolation}\label{sec:defineqpart}
Here, we present our main definition. We give its more intuitive ``primal form'' as the main definition, and establish a ``dual form'' in \cref{sec:dualqpart}.
\begin{definition}
\label{def:qpartprimal}
Let $q \in [2,m]$ be an integer. A valuation $v:2^{[m]}\longrightarrow \mathbb{R}_{\ge 0}$ satisfies the $q$-partitioning property if for any $S\subseteq [m]$ and any partition $(S_1, S_2, \ldots, S_q)$ of $S$ into $q$ (possibly empty) disjoint parts, and any fractional covering $\alpha$ of $[q]$ (that is, any non-negative $\alpha(\cdot)$ such that for all $j \in [q], \sum_{T \ni j} \alpha(T) \geq 1$):
$$v(S) \leq \sum_{T \subseteq [q]} \alpha(T) \cdot v\left(\cup_{j \in T} S_j\right).$$
We refer to the class of $q$-partitioning valuations over $[m]$ as $\mathcal{Q}(q,[m])$.
\end{definition}
The intuition behind our definition is that $q$ captures the complexity of non-negative fractional covers under which the value of $v(\cdot)$ is non-diminishing. Subadditive valuations are only non-diminishing under very simple covers (covering $S\cup T$ by $S$ and $T$), while XOS valuations are non-diminishing under arbitrarily complex fractional covers. The parameter $q$ captures the desired complexity in between. We now establish a few basic properties of $q$-partitioning valuations.
We begin with the following nearly-trivial observations: First, for any fixed $q$ and $m$, the class $\mathcal{Q}(q,[m])$ is closed under conic combinations.\footnote{That is, $\mathcal{Q}(q,[m])$ is closed under linear combinations with non-negative coefficients.} This has
implications for oblivious rounding of linear relaxations \cite{FeigeFT16}. Furthermore, for any fixed $q$ and $m$, the class $\mathcal{Q}(q,[m])$ is closed under taking pointwise suprema, which means that one can use the ``lower envelope technique'' when approximating functions by $q$-partitioning functions \cite[Section 3.1]{FeigeFIILS15}.
Now, we establish the three promised properties from ~\cref{sec:intro}. We begin by confirming that indeed the partitioning interpolation interpolates between fractionally subadditive and subadditive valuations.
\begin{proposition}
\label{prop:exisetnce}
For all $m$, the following relations between classes of $q$-Partitioning valuations hold:
$$
\text{XOS}([m]) =
\mathcal{Q}(m, [m])\subsetneq
\mathcal{Q}(m-1, [m])\subsetneq \cdots
\cdots \subsetneq
\mathcal{Q}(2, [m])
= \text{CF}([m]).
$$
\end{proposition}
We provide a complete proof of \cref{prop:exisetnce} in \cref{appendix:existenceproblem}. It is reasonably straight-forward to see that $\text{XOS}([m]) =
\mathcal{Q}(m, [m])$, and that $\mathcal{Q}(2, [m])
= \text{CF}([m])$. It is also straightforward to see the inclusions in the chain (any partition with $q$ parts is also a partition with $q+1$ by adding an empty partition). We show that each inclusion is strict via the following proposition, whose complete proof appears in \cref{appendix:existenceproblem}.
\begin{proposition}
\label{prop:existenceproblem}
Consider a valuation $v$ over $[m]$ such that $v(S) = 1$ whenever $1 \le |S|\le m-1$ and $v(\emptyset) = 0$. The largest value $v([m])$ for which $v$ is $q$-partitioning is $\frac{q}{q-1}.$
\end{proposition}
We now show that $\mathcal{Q}(q,[m])$ is close to $\mathcal{Q}(q+1,[m])$ in a precise sense. Note that \cref{def:closeness} applied to $q=m$ is exactly the notion of closeness used in~\cite{BhawalkarR11}.
\begin{definition}
\label{def:closeness}
Suppose that $0< \gamma\le 1.$
A class of valuations $\mathcal{G}$ over $[m]$ is $\gamma$-close to the class $\mathcal{Q}(q,[m])$ if for any $g\in \mathcal{G},$ any $S\subseteq [m],$ any partition $(S_1, S_2, \ldots, S_q)$ of $S$ into $q$ parts, and any
fractional cover $\alpha$ of $[q],$ it is the case that
$$
\sum_{{T}\subseteq [q]} \alpha({T})g(\bigcup_{i \in {T}}S_i)\ge \gamma g(S).
$$
\end{definition}
We will see a further interpretation of \cref{def:closeness} in \cref{prop:gammacitycore}. For now, we simply present the following ``smoothness'' claim.
\begin{theorem}
\label{thm:smoothness}
$\mathcal{Q}(q+1,[m])$ is $\frac{q-1}{q}$-close to $\mathcal{Q}(q,[m])$.
\end{theorem}
The proof of \cref{thm:smoothness} appears in \cref{section:properties}. Finally, we provide our first-principles definition of $q$-partitioning via a cost-sharing game. This aspect is more involved, so we overview the setup in \cref{section:costsharing}.
\subsection{Interpretation in Cost Sharing}
\label{section:costsharing}
\subsubsection{Recap: characterizing XOS via cost-sharing}
Consider a set $[m]$ of players who are interested in receiving some service. There is a
cost for this service described by a monotone increasing normalized cost function $c: 2^{[m]}\longrightarrow \mathbb{R}.$ Here, $c(S)$ is the cost that players $S$ need to pay together so that each of them receives the service. A natural question to ask is: \textit{When is it the case that one can allocate the cost of the service between the community such that no subset of players $T\subseteq [m]$ is better off by forming a coalition and receiving the service on their own?} Formally, this question asks whether the ${\mathsf{core}}$ of the game is nonempty. The ${\mathsf{core}}$ is the set of all non-negative \textit{cost-allocation vectors} $\mathbf{p} = (p_1, p_2, \ldots, p_m)$ that satisfy $\sum_{i\in S}p_i \le c(S) \; \forall S\subseteq [m],$ and $\sum_{i\in [m]}p_i = c([m])$ \cite[Definition 15.3]{AGTbook}. We'll refer to the game parameterized by cost function $c(\cdot)$ restricted to players in $S$ as $\textsc{Game}(c,S)$.
This question is answered by the Bondareva-Shapley Theorem (see~\cite[Theorem 15.6]{AGTbook}). Applied to monotone normalized cost functions $c,$ the theorem states:
\begin{theorem}[\cite{Bondareva63,Shapley67}]
\label{thm:classicshapleycorethm}
The ${\mathsf{core}}$ $\textsc{Game}(c,[m])$ is non-empty if and only if for any non-negative fractional cover $\alpha$ of $[m]$ it is the case that $\sum_{S\subseteq[m]}\alpha(S)c(S) \ge c([m]).$
\end{theorem}
\noindent
An immediate generalization of this theorem, which appears in \cite[Section 1.1]{Feige09}, is:
\noindent
\begin{theorem}
\label{thm:xosinshapley}
The ${\mathsf{core}}$ of $\textsc{Game}(c,S)$ is non-empty for all $S$ if and only if $c$ is fractionally subadditive.
\end{theorem}
An interpretation of the above statement is the following. No matter what subset $S\subseteq [m]$ of players are interested in the service, we can always design a cost allocation vector (which vector can depend on $S$) such that all players in $S$ are better off by purchasing the service together rather than deviating and forming coalitions.
Since finding cores might be impossible (unless $c$ is fractionally subadditive), the following
relaxation of a ${\mathsf{core}}$ appears in coalitional game theory literature. A non-negative vector $\mathbf{p}$ is in the $\gamma\text{-}\mathsf{core}$ of the game if and only if it satisfies $\sum_{i\in S}p_i \le c(S) \; \forall S\subseteq [m],$ and $\gamma c([m])\le \sum_{i\in [m]}p_i \le c([m])$ \cite[Definition 15.7]{AGTbook}.
Again, one has equivalent statements to \cref{thm:xosinshapley,thm:classicshapleycorethm} using a $\gamma\text{-}\mathsf{core}.$ We only state the analogous statement for \cref{thm:xosinshapley}:
\begin{theorem}
\label{thm:classicshapleygammacore}
The $\gamma\text{-}\mathsf{core}$ of $\textsc{Game}(c,S)$ is non-empty for all $S$ if and only if $c$ is $\gamma$-close to XOS.
\end{theorem}
\subsubsection{$q$-partitioning via cost-sharing}
Consider instead a partition of players into $q$ (possibly empty) cities $S_1,\ldots, S_q$. We think of each city as a fully-cooperative entity that takes a single action.\footnote{Perhaps the city has an elected official that acts on behalf of the city's welfare, or perhaps the city's members have built enough trust that they can perfectly profit-share any gains the city gets.} The question of interest is whether ${\mathsf{citycore}}(S_1, S_2, \ldots, S_q)$ of the game is non-empty. ${\mathsf{citycore}}(S_1, S_2, \ldots, S_q)$ is the set of non-negative \textit{cost-allocation vectors} $\mathbf{p} = (p_1, p_2, \ldots, p_q)$ that satisfy $\sum_{i\in T}p_i \le c(\bigcup_{i\in T}S_i)$ for all $T\subseteq [q],$ and $\sum_{i\in [q]}p_i = c(\bigcup_{i \in [q]}S_i).$ Note that a vector in the ${\mathsf{citycore}}$ will incentivize cooperation as each subset of cities needs to pay at least as much if they choose to form a coalition. We parallel the theorems in the previous section with the following propositions. We'll refer to the above game as $\textsc{Game}(c,S,S_1,\ldots, S_q)$ when the normalized monotone cost function is $c$, players in $S$ are participating, and they are partitioned into cities $S_1,\ldots, S_q$.
\begin{proposition}
\label{prop:maincostsharing}
The ${\mathsf{citycore}}$ of $\textsc{Game}(c,S,S_1,\ldots, S_q)$
is non-empty for all $S,S_1,\ldots, S_q$
if and only if $c$ is $q$-partitioning.
\end{proposition}
Again, the interpretation is simple. No matter
which people are interested in the service and how they are distributed between cities, we can design a cost allocation vector such that all cities are better off by purchasing the service together rather than forming coalitions. Finally, one can also relax the concept of a ${\mathsf{citycore}}$ to a $\gamma\text{-}\mathsf{citycore}$ as follows. This is the set of non-negative \textit{cost-allocation vectors} $\mathbf{p} = (p_1, p_2, \ldots, p_q)$ that satisfy $\sum_{i\in T}p_i \le c(\bigcup_{i\in T}S_i) \; \forall T\subseteq [q],$ and
$\gamma c(\bigcup_{i \in [q]}S_i)\le \sum_{i\in [q]}p_i \le c(\bigcup_{i \in [q]}S_i).$ We can then also conclude:
\begin{proposition}
\label{prop:gammacitycore}
The $\gamma\text{-}\mathsf{citycore}$ of $\textsc{Game}(c,S,S_1,\ldots, S_q)$
is non-empty for all $S,S_1,\ldots, S_q$
if and only if $c$ is $\gamma$-close to $q$-partitioning.
\end{proposition}
\subsection{The Dual Definition and Relation to MPH Hierarchy}\label{sec:dualqpart}
Finally, we provide a dual view of the $q$-partitioning property (as in XOS vs.~fractionally subadditive), and relate $q$-partitioning to valuations that are MPH-$\lceil m/q \rceil$. First, we observe that the $q$-partitioning property can be reinterpreted as a claim about a linear program, opening the possibility of a dual definition.
\begin{observation}\label{obs:primal} A valuation function $f$ is $q$-partitioning if and only if for all $S$ and all partitions $(S_1,\ldots, S_q)$ of $S$ into $q$ (possibly empty) disjoint parts, the value of the following LP is $v(S)$:\footnote{Below, the variables are $\alpha(T)$ for all $T \subseteq [q]$.}
\begin{equation}
\label{eq:qpartprimalLP}
\begin{split}
\min &\sum_{{T}\subseteq [q]}v(\bigcup_{i \in T}S_i)\cdot \alpha(T), \text{ s.t.}\\
& \sum_{T \ni j} \alpha(T)\ge 1 \; \; \; \forall j\in [q],\\
&\alpha(T)\ge 0 \; \; \; \forall T\subseteq [q].
\end{split}
\end{equation}
\end{observation}
The proof of \cref{obs:primal} is fairly immediate by observing that feasible solutions to the LP are exactly fractional covers, and that the objective function is exactly the bound on $v(S)$ implied by that fractional cover. We now state a ``dual'' definition of $q$-partitioning valuations. The equivalence with \cref{def:qpartprimal} is a simple application of linear programming, which we present in \cref{section:definitionequivalence}.
\begin{definition}
\label{def:qpartdual}
Let $2\le q \le m$ be integers. A valuation $v:2^{[m]}\longrightarrow \mathbb{R}_{\ge 0}$ satisfies the dual $q$-partitioning property if for any $S\subseteq [m]$ and any partition $(S_1, S_2, \ldots, S_q)$ of $S$ into $q$ disjoint parts, the following linear program has value at least $v(S):$
\begin{equation}
\tag{\text{Dual Definition}}
\label{eq:qpartdualLP}
\begin{split}
\max &\sum_{j \in [q]} p_j, \text{ s.t.}\\
& \sum_{j \in T} p_j\le v(\bigcup_{j \in T} S_j) \; \; \; \forall T\subseteq [q],\\
&p_j\ge 0 \; \; \; \forall j\in [q].
\end{split}
\end{equation}
\end{definition}
\noindent
This dual definition allows us to establish the useful relationship between the partitioning and MPH hierarchies given in \cref{lem:qpartandmphk}.
\begin{proposition}
\label{lem:qpartandmphk}
A valuation over $[m]$ satisfying the $q$-partitioning property is MPH-$\lceil \frac{m}{q}\rceil$ and subadditive.\end{proposition}
\begin{proof}
To prove this statement, for each $S\subseteq [m],$ we will create a clause $w^S$ containing hyperedges of size at most $\lceil \frac{m}{q}\rceil,$ which takes value $v(S)$ at $S$ and for any $T\neq S,$ $w^S(T)\le v(S).$ This will be clearly enough as we can take the maximum over clauses $w^S.$
Take an arbitrary set $S$ and partition it into $q$ subsets $S_1,S_2, \ldots, S_q$ of almost equal size such that each subset has at most $\lceil \frac{m}{q}\rceil$ elements. We will construct a clause of the form
$$
w^S = (S_1: p_1, \; S_2 : p_2,\; \ldots, \; S_q:p_q ),
$$
where $p_i$ is the weight of set $S_i$ for each $i.$ Note that the weights $p_1, p_2, \ldots, p_q$ must satisfy
\begin{equation*}
\begin{split}
& \sum_i p_i = v(S),\\
& \sum_{i \in I} p_i \le v(\bigcup_{i \in I}S_i)\; \forall
I\subseteq [q],\\
& p_i\ge 0\; \forall i\in [q].
\end{split}
\end{equation*}
The existence of such weights is guaranteed by \cref{def:qpartdual}, which completes the proof.\end{proof}
\begin{remark}
\normalfont
It should be noted that the converse statement does not hold true if $q\not \in \{2,m\}$. Let $k = \lceil \frac{m}{q}\rceil.$ The valuation $v(S):= \max\left( \binom{|S|}{k}, \frac{1}{2}\binom{m}{k}\right)$ over $[m]$ is MPH-$k$ and subadditive. However, it is simple to show that it is not $q$-partitioning. Split $[m]$ into $q$ sets of almost equal size $S_1, S_2, \ldots, S_q$ and consider the fractional cover $\alpha$ over $[q]$ assigning weight $\frac{1}{q-1}$ to all subsets of $[q]$ of size $q-1.$ A simple calculation shows that $v$ and $\alpha$ do not satisfy the $q$-partitioning property.
\end{remark}
\section{Main result I: Posted Price Mechanisms}\label{section:postedprices}
We consider the setup of \cite{FeldmanGL15}. Namely, there are $n$ buyers interested in a set of items $[m].$ The buyers' valuations come from a product distribution $\mathcal{D} = \mathcal{D}_1\times \mathcal{D}_2\cdots\times\mathcal{D}_n$, known to the seller. The optimal expected welfare is then $\textsc{OPT}(\mathcal{D}):=\mathbb{E}_{\vec{v} \leftarrow \mathcal{D}}[\max_{\text{Partitions } S_1,\ldots, S_n}\{\sum_{i=1}^n v_i(S_i)\}]$. The goal of the seller is to fix prices $p_1,\ldots, p_m$ so that the following procedure guarantees welfare at least $c\cdot \textsc{OPT}$ in expectation:
\begin{itemize}
\item Let $A$ denote the set of available items. Initially $A = [m]$.
\item Visit the buyers one at a time in adversarial order. When visiting buyer $i$, they will purchase the set $S_i:=\arg\max_{S \subseteq A}\{v_i(S) - \sum_{i \in S} p_i\}$, and update $A:=A \setminus S_i$.
\end{itemize}
\begin{theorem}
\label{thm:postedpriceqpart}
When all agents have $q$-partitioning valuations, there exists a
$\Omega(\frac{\log \log q}{\log \log m})$-competitive
posted price mechanism.\footnote{Unless explicitly indicated, logarithms have base 2 throughout the rest of this section.}
\end{theorem}
Note that this result matches asymptotically the best known competitive ratios for XOS (when $q = m,$ a constant ratio mechanism was proven in \cite{FeldmanGL15}) and CF valuations (when $q = 2,$ a $\Omega(\frac{1}{\log \log m})$-competitive posted price mechanism was proven in \cite{DuttingKL20}) and interpolates smoothly when $q$ is in between.
Like~\cite{DuttingKL20}, we first give a proof in the case when each $\mathcal{D}_i$ is a point-mass, as this captures the key ideas. A complete proof in the general case appears in \cref{sec:incompleteposted}.
Our proof will follow the same framework as~\cite{DuttingKL20}. To this end, let $p \in [0,1]$ be a real number. Denote by $\Delta(p)$ the set of distributions over $2^{[m]}$ such that $\mathbf{P}_{S\leftarrow \lambda}[i \in S]\le p$ holds for all $\lambda \in \Delta(p)$ and all
$i \in [m]$. The framework of~\cite{DuttingKL20} establishes the following:
\begin{lemma}[{\cite[Eq. (6)]{DuttingKL20}}]
\label{lem:minimaxgame}
A class of monotone valuations $\mathcal{G}$ over $[m]$ is given. If for any $v\in \mathcal{G},$ there exists a real number $p \in [0,1]$ (possibly depending on $v$), such that
$$
\max_{\lambda\in \Delta(p)}
\min_{\mu \in \Delta(p)}\mathbf{E}_{S\leftarrow \lambda, T\leftarrow \mu}[v(S\backslash T)]\ge
\alpha \times v([m]),
$$
then there exists an $\alpha$-competitive posted price mechanism when all players have valuations in $\mathcal{G}.$
\end{lemma}
\cite{DuttingKL20} then show that when $\mathcal{G}$ is the class of all subadditive functions, such a $p$ exists for \linebreak $\alpha = \Theta(\frac{1}{\log \log m})$, but no better. In the rest of this section, we will show that when $\mathcal{G}$ is the set of $q$-partitioning valuations, the conditions of
\cref{lem:minimaxgame} hold with $\alpha = \Omega \left(\frac{\log \log q}{\log \log m}\right)$. It is clear that (the deterministic case of) \cref{thm:postedpriceqpart} follows immediately from \cref{lem:minimaxgame} and \cref{prop:qpartprobexists}. It is worth noting that, while we leverage \cref{lem:minimaxgame} exactly as in~\cite{DuttingKL20}, the proof of \cref{prop:qpartprobexists} for general $q$ is quite novel in comparison to the $q=2$ (subadditive) case.
\begin{proposition}\label{prop:qpartprobexists} Let $v \in \mathcal{Q}(q,[m])$. Then there exists a real number $p\in [0,1]$ such that:
$$\max_{\lambda\in \Delta(p)}
\min_{\mu \in \Delta(p)}\mathbf{E}_{S\leftarrow \lambda, T\leftarrow \mu}[v(S\backslash T)]\ge
\Omega \left(\frac{\log \log q}{\log \log m}\right)\times v([m]).$$
\end{proposition}
\begin{proof} Denote
$$
g(p) = \max_{\lambda\in \Delta(p)}
\min_{\mu \in \Delta(p)}\mathbf{E}_{S\leftarrow \lambda, T\leftarrow \mu}[v(S\backslash T)],
$$
$$
f(p) =
\max_{\lambda\in \Delta(p)}
\mathbf{E}_{S\leftarrow \lambda}[v(S)],
$$
and let $\lambda^p$ be a maximizing distribution in
$\arg \max_{\lambda\in \Delta(p)}
\mathbf{E}_{S\leftarrow \lambda}[v(S)].$\\
\noindent
Without loss of generality, assume that $q$ is a perfect power of 2, i.e. $q = 2^r$ (we can always decrease $q$ to a power of $2$ without changing the asymptotics of $\Omega(\frac{\log \log q}{\log \log m})$).
Now, fix some $p\in \left(0,\frac{1}{16}\right].$ We will show that $
g(p)\ge \frac{1}{8}\left(f(p) -f(p^{\frac{r}{2}}) \right).$
The first step is the obvious bound
$$
g(p)\ge \min_{\mu \in \Delta(p)}\mathbf{E}_{S\leftarrow \lambda^p, T\leftarrow \mu}[v(S\backslash T)],
$$
which follows from the fact that we can choose $\lambda = \lambda^p.$
Now, we want to bound $\mathbf{E}_{S\leftarrow \lambda^p, T\leftarrow \mu}[v(S\backslash T)].$ Fix a distribution $\mu.$ Let $S$ be drawn according to $\lambda^p$ and $T_1, T_2, \ldots, T_r$ be $r$ independent sets drawn according to $\mu.$ Then,
$$
\mathbf{E}[v(S\backslash T)] =
\mathbf{E}\left[
\sum_{i = 1}^r\frac{1}{r}v(S\backslash T_i)
\right].
$$
\noindent
Now, we will use the $q$-partitioning property. Note that the sets $T_1, T_2 \ldots, T_r$ define a partitioning of $S$ into $2^r = q$ subsets. That is, for any $\vec{v}\in \{0,1\}^r,$ we can define
\begin{equation}
\tag{Partitioning with $r$ sets}
\label{eq:partwithrsets}
S_{\vec{v}} = \{j\in S: \; j \in T_i \text{ for }v_i = 1 \text{ and }j \not \in T_i \text{ for }v_i =0\}.
\end{equation}
According to this partitioning, define $A_0, A_1, A_2, \ldots, A_r$ as follows:
$$
A_t = \{j \in S\; : \; j \text{ belongs to exactly }t \text{ of the sets }T_i\}
= \bigcup_{\vec{v}\; :\; 1^T\vec{v} = t}S_{\vec{v}}.
$$
Then, by the $q$-partitioning property, we know that
$$
\frac{8}{r}\left(
v(S\backslash T_1) + v(S\backslash T_2) + \cdots +
v(S\backslash T_r)
\right) +
v(\bigcup_{j \ge \frac{7r}{8}}A_j)\ge v(S).
$$
Indeed, that is the case for the following reason. If $\vec{v}$ is such that $1^T\vec{v}< \frac{7r}{8},$ then $S_{\vec{v}}$ belongs to at least\linebreak $r - 1^T\vec{v}\ge \frac{r}{8}$ of the sets $S\backslash T_i,$ so it is ``fractionally covered'' by the term\linebreak
$\frac{8}{r}\left(
v(S\backslash T_1) + v(S\backslash T_2) + \cdots +
v(S\backslash T_r)\right).$
If, on the other hand, $\vec{v}$ is such that $1^T\vec{v}\ge \frac{7r}{8},$ then it is fractionally covered by the term $v(\bigcup_{j \ge \frac{7r}{8}}A_j).$\\
\noindent
Now, let $A = \bigcup_{j \ge \frac{7r}{8}}A_j.$ We claim that each element $j \in [m]$ belongs to $A$ with probability at most $p^{r/2}$ (over the randomness in drawing $S \leftarrow \lambda^p$ and $T_1,\ldots, T_r \leftarrow \mu$, then defining $A$ as above). To prove this, we will use the classical Chernoff bound \cref{thm:chernoff} as follows. Let $Y_i$ be the indicator that $j \in T_i.$ Then,
$\mathbf{P}[Y_i = 1]\le p,$ so
$
\mathbf{E}[\sum_{i = 1}^rY_i] \le rp.
$ On the other hand, $j$ is in $A$ if and only if
$\sum_{i = 1}^rY_i\ge \frac{7}{8}r.$ Now, let
$\delta = \frac{7}{8p}-1$ and let $\mu = rp.$ Then, by \cref{thm:chernoff},
$$
\mathbf{P}\left[\sum_{i = 1}^rY_i\ge \frac{7}{8}r\right] \le
\mathbf{P}\left[\sum_{i = 1}^rY_i\ge \mu (1 + \delta)\right] \le
\left(
\frac{e^{\delta}}{(1+\delta)^{1+\delta}}
\right)^\mu\le
\left(
\frac{e^{1+\delta}}{(1+\delta)^{1+\delta}}
\right)^{rp} =
\left(\frac{8ep}{7}\right)^{\frac{7r}{8}}\le p^{\frac{r}{2}},
$$
where the last inequality follows since $p\le\frac{1}{16}.$\\
\noindent
All of this together shows that
$$
g(p) \ge
\min_{\mu \in \Delta(p)}\mathbf{E}_{S\leftarrow \lambda^p, T\leftarrow \mu}[v(S\backslash T)]\ge
$$
$$
\frac{1}{r}\sum_{i = 1}^r \mathbf{E}[v(S\backslash T_i)] \ge
\frac{1}{8}\mathbf{E}[v(S)] -
\frac{1}{8}\mathbf{E}[v(A)] \ge
\frac{1}{8}f(p) - \frac{1}{8}f(p^{\frac{r}{2}}),
$$
where the last inequality holds as each element appears in $A$ with probability at most $p^{r/2}.$ Using the same telescoping trick as in \cite{DuttingKL20}, we conclude as follows. Let $s = \lceil\log_{\frac{r}{2}}\log_{16} m^2\rceil.$ Then,
$$
\sum_{i = 0}^{s-1} g(16^{-(\frac{r}{2})^i})\ge
\frac{1}{8}\sum_{i = 0}^{s-1}
f(16^{-(\frac{r}{2})^i})-
f(16^{-(\frac{r}{2})^{i+1}}) \ge
\frac{1}{8}f\left(\frac{1}{16}\right) - \frac{1}{8}f\left(\frac{1}{m^2}\right).
$$
However, $f(\frac{1}{16})\ge \frac{1}{16}v([m])$ as shown by the distribution $\lambda$ which takes the entire set $[m]$ with probability $\frac{1}{16}$ and the empty set with probability $\frac{15}{16}.$ On the other hand, $f(\frac{1}{m^2})\le \frac{1}{m}v([m])$ since any distribution which takes each element with probability at most $\frac{1}{m^2}$ is non-empty with probability at most $\frac{1}{m}$ and, furthermore, $v$ is normalized monotone. All together, this shows that
$$
\sum_{i = 0}^{s-1} g(16^{-(\frac{r}{2})^i})\ge
\frac{1}{8}(\frac{1}{16} - \frac{1}{m})v([m]).
$$
For all large enough $m$ (say $m>32$), there exists exists some $p'$ such that
$$
g(p') \ge \frac{1}{256\times s}v([m]) =
\frac{1}{O(\log_r\log_2 m)}v([m]) =
O\left(\frac{\log r}{\log \log m}\right)v([m]) =
O\left(\frac{\log \log q}{\log \log m}\right)v([m]),
$$
which finishes the proof.
\end{proof}
\section{Main Result II: Concentration Inequalities}
\label{section:introconcentration}
In this section, we present our concentration inequalities for the partitioning interpolation. We begin by overviewing our results and their context in further detail, highlighting some proofs. We provide complete proofs in the subsequent section and
\cref{section:concentration}.
.
\cite{Vondrak10} establishes that when $v$ is XOS, the random variable $v(S)$ has $\mathbf{E}[v(S)]$-subgaussian lower tails. The proof follows by establishing that $1$-Lipschitz XOS functions of independent random variables are \emph{self bounding}, and applying a concentration inequality of~\cite{BoucheronLM00}. We begin by showing that $1$-Lipschitz $q$-partitioning functions are $(\lceil m/q\rceil,0)$-self bounding, and applying a concentration inequality of~\cite{McDiarmidR06,BoucheronLM09} to yield the following:
\begin{theorem}
\label{thm:selfboundingqpart}
Any 1- Lipschitz $q$-partitioning valuation $v$ over $[m]$ satisfies the following inequalities
$$
\mathbf{P}\left[v(S)\ge \mathbf{E}[v(S)] + t\right]\le
\exp\left(-\frac{1}{2}\times\frac{t^2}{\lceil\frac{m}{q}\rceil \mathbf{E}[v(S)] + \frac{3\lceil \frac{m}{q}\rceil-1}{6}t}\right)\;\text { for }t\ge 0,
$$
$$
\mathbf{P}[v(S)\le \mathbf{E}[v(S)] - t]\le
\exp\left(-\frac{1}{2}\times\frac{t^2}{\lceil\frac{m}{q}\rceil \mathbf{E}[v(S)]}\right)\;\text{ for }\mathbf{E}[Z]\ge t\ge 0.
$$
\end{theorem}
We prove \cref{thm:selfboundingqpart} in \cref{section:concentration}. \cref{thm:selfboundingqpart} matches~\cite{Vondrak10} at $q=m$, and provides non-trivial tail bounds for $q = \omega(1)$. One should note, however, the the above inequality is useless when $q$ is constant, as it only implies $O(m\mathbf{E}[v(S)])$-subgaussian behaviour, but it is well known that any 1-Lipschitz set function is $m$-subgaussian via McDiarmid's inequality. Our next inequality considers an alternate approach, based on state-of-the-art concentration inequalities for subadditve functions.\\
\cite{schectman} proves\footnote{Schechtman actually considers the non-normalized case and proves that $\mathbf{P}[v(S)\ge (q=1)a + k]\le
q^{-k}2^q,$ but reducing the factor from $q+1$ to $q$ in the normalized case is a trivial modification of the proof. It follows, in particular, from our \cref{thm:qparttailspecial}.} that whenever $v$ is normalized 1-Lipschitz subadditive, the following inequality holds for any real numbers $a>0, k>0,$ and integer $q\ge 2,$ \begin{equation}
\label{eq:schehtman}
\mathbf{P}[v(S)\ge qa + k]\le
q^{-k}2^q.
\end{equation} In particular, setting $a = \mathbf{Med}[v(S)],q=2$, and integrating over $k=0$ to $\infty$ allows one to conclude the bound $\mathbf{E}[v(S)]\le 2\mathbf{Med}[v(S)] + O(1)$, which has proven useful, for example in \cite{RubinsteinW18}. While this inequality establishes a very rapid exponential decay, the decay only begins at $2a$ as we need $q\ge 2$.\footnote{Recall that this is \emph{necessary}, and not just an artifact of the proof --- an example is given in~\cite{Vondrak10}.} What if we seek an upper tail bound of the form $\mathbf{P}[v(S)\ge 1.1a + k]$ or, even more strongly, something of the form $\mathbf{P}[v(S)\ge (1 + {o_m(1)})a + k]$? Our next inequality accomplishes this:
\begin{theorem}
\label{thm:qparttailspecial}
Suppose that $v\in \mathcal{Q}(q,[m])$, and $S\subseteq [m]$ is a random set in which each element appears independently. Then the following inequality holds for any $ a \ge 0, k\ge 0,$ and integers $1\le r < s\le \log_2 q.$
$$
\mathbf{P}\left[v(S)\ge \frac{r}{s}a+k\right]\le
\left(\frac{r}{s}\right)^{-k}2^\frac{r}{s}.
$$
\end{theorem}
The interesting extension for $q$-partitioning valuations via \cref{thm:qparttailspecial} is that one may take\linebreak $s = \log_2 q-1, r = \log_2 q$. From here, for example, one can again take $a$ to be the median of $v(S)$, and integrate from $k=0$ to $\infty$ to conclude that $\mathbf{E}[v(S)]\le (1 + O(\frac{1}{\log q}))\mathbf{Med}[v(S)] + O(\log q).$
In the very special case of $q=m$, we can also replace $\frac{r}{s}$ with any real $1+ \delta >0$ and obtain the $\mathbf{E}[v(S)]\le \mathbf{Med}[v(S)] + O(\sqrt{\mathbf{Med}[v(S)]})$, which is the same extremely strong relationship implied by the $\mathbf{E}[v(S)]$-subgaussian behaviour. We prove these simple corollaries of \cref{thm:qparttailspecial} in \cref{section:concentration} and now proceed to
a proof of
\cref{thm:qparttailspecial}. To do so, we need to make a detour and generalize Talagrand's work on the method of ``control by $q$-points''.
\subsection{A Probabilistic Detour: A New Isoperimetric Concentration Inequality}
\label{section:detourisoperimetry}
Suppose that we have a product probability space $\Omega = \prod_{i=1}^N \Omega_i$ with product probability measure $\mathbf{P}.$ Throughout, in order to highlight our new techniques instead of dealing with issues of measurability, we will assume that the probability spaces are discrete and are equipped with the discrete sigma algebra. These conditions are not necessary and can be significantly relaxed (see~\cite[Section 2.1]{Talagrand01}).
For $q+1$ points\footnote{In the current section, we will reserve the letter ``$q$'' to indicate the number of points $y^1, y^2,\ldots, y^{q}.$ While this choice might seem unnatural given that the current paper is all about ``$q$-Partitioning'' and ``$q$'' already has a different meaning, we have chosen to stick to Talagrand's notation. The reason is that the probabilistic approach we use is already known as ``control by $q$ points'' in literature \cite[Section 3]{Talagrand01}.} $y^1, y^2,\ldots, y^{q}, x$ in $\Omega,$ and an integer $1\le s \le q,$ we define
\begin{equation}\label{eq:definefs}
\begin{split}
f^s(y^1, y^2, & \ldots, y^q; x) :=\\
& \Big{|}\Big{\{}i\in [N]: x_i \text{ appears less than $s$ times in the multiset } \{y^1_i, y^2_i, \ldots, y^q_i\}\Big{\}}\Big{|}.
\end{split}
\end{equation}
\noindent
Using this definition, one can extend $f^s$ to subsets $A_1, A_2, A_3, \ldots, A_q$ of $\Omega$ as follows
$$
f^s(A_1, A_2, \ldots, A_q; x) :=
\inf\{f^s(y^1, y^2, \ldots, y^q; x) \; : \; y^i\in A_i\; \forall i\}.
$$
When $A = A_1, A_2, \ldots, A_q,$ the function $f^s(A_1, A_2, \ldots, A_q; x)$ intuitively defines a ``distance'' from $x$ to $A.$\footnote{This interpretation of $f^s$ as a ``distance'' is what motivates the name ``isoperimetric inequality'' (see \cite{Talagrand01}).} The definition of the function $f^s$ is motivated by and generalizes previous work of Talagrand \cite{Talagrand01,Talagrand96}. Our main technical result, the proof of which is deferred to
\cref{section:concentration}, is the following. In it, $A_1, A_2, \ldots, A_q$ are fixed while $x$ is random, distributed according to $\mathbf{P},$ which is the aforementioned product distribution over $\Omega.$
\begin{theorem}
\label{thm:talagrandgenerals}
\label{thm:specialcaseqpartitioning}
Suppose that $\alpha\ge \frac{1}{s}$ is a real number and $t(\alpha, q,s)$ is the larger root of the equation $t + \alpha q t^{-\frac{1}{\alpha s}} = \alpha q + 1.$ Then,
$$
\int_{\Omega}
t(\alpha, q, s)^{f^s(A_1, A_2, \ldots, A_q; x)}d\mathbf{P}(x)\le
\frac{1}{\prod_{i=1}^q \mathbf{P}[A_i]^{\alpha}}.
$$
In particular, setting $A_1 = A_2 = \cdots = A_q = A, \alpha = \frac{1}{s}, t = \frac{q}{s},$ we obtain
$$\mathbf{P}[f^s(\underbrace{A, A, \ldots, A}_{q}; x)\ge k]\le
\left(\frac{q}{s}\right)^{-k}\mathbf{P}[x \in A]^{-\frac{q}{s}}.$$
\end{theorem}
\noindent
Using this fact, we are ready to prove \cref{thm:qparttailspecial}.
\subsubsection{Proof of \cref{thm:qparttailspecial}}
\begin{proof}
Denote by $x$ the (random) characteristic vector of $S.$ The entries of $x$ are independent. Denote by $A= \{y\in \{0,1\}^{[m]}\; : \; v(y)\le a\}.$ We claim that whenever $v(x) = v(S)\ge \frac{r}{s}a+k,$ it must be the case that $f^s(\underbrace{A,A,\ldots, A}_r; x)\ge k$. Indeed, suppose that this were not the case. Then, there must exist some $x\in \{0,1\}^{[m]}$ such that $v(x)\ge \frac{r}{s}a+k,$ but $f^s(\underbrace{A,A,\ldots, A}_r; x)< k.$ In particular, this means that there exist $r$ vectors $y^{1}, y^2, \ldots, y^r$ in $A$ such that
$$
\Big{|}\Big{\{}i\in [m]: x_i \text{ appears less than $s$ times in the multiset } \{y^1_i, y^2_i, \ldots, y^r_i\}\Big{\}}\Big{|}<k.
$$
Now, let $T$ be the set corresponding to the characteristic vector of $x$ and $T^i$ to $y^i.$ We also denote the following sets:
$$
M = \Big{\{}i\in [m]: x_i \text{ appears less than $s$ times in the multiset } \{y^1_i, y^2_i, \ldots, y^r_i\}\Big{\}},
$$
$$
M_i = T\cap T_i\; \forall 1\le i \le r.$$
Now, observe that each element of $T\backslash M$ appears in at least $s$ of the $r$ sets $M_1, M_2, \ldots, M_r.$ Furthermore, as $\log q\ge r$ and $v$ is $q$-partitioning, for the same reason as in \cref{eq:partwithrsets}, we know that
$$
v(T\backslash M)\le \sum_{i=1}^r \frac{1}{s}v(M_i)\le
\sum_{i=1}^r \frac{1}{s}v(T_i) =
\sum_{i=1}^r \frac{1}{s}v(y^i)
\le
\frac{ra}{s}.
$$
By the choice of $y^1, y^2, \ldots, y^r,$ we know that $|M|<k.$ As all marginal values of $v$ are in $[0,1],$ it follows that $v(M)<k.$ By subadditivity, we reach the contradiction
$$
\frac{ra}{s} + k \le
v(y) = v(T)\le
v(T\backslash M) + v(M)<
\frac{ra}{s} + k.
$$
Therefore, whenever $v(x) = v(S)\ge \frac{r}{s}a+k,$ it must be the case that $f^s(\underbrace{A,A,\ldots, A}_r; x)\ge k.$ The statement follows from \cref{thm:specialcaseqpartitioning}.
\end{proof}
\section{Conclusion}
\label{section:futurework}
We introduce the partitioning interpolation to interpolate between fractionally subadditive and subadditive valuations. We provide an interpretation of the definition via a cost-sharing game (as in~\cite{Bondareva63,Shapley67} for fractionally subadditive), and also show a relation to the subadditive MPH-$k$ hierarchy via a dual definition. We apply our definition in canonical domains (posted price mechanisms and concentration inequalities) where the fractionally subadditive and subadditive valuations are provably ``far apart'', and use the partitioning interpolation to interpolate between them. One technical nugget worth highlighting is Equation~\eqref{eq:partwithrsets}, which appears in the proofs of both \cref{prop:qpartprobexists} and \cref{thm:qparttailspecial} --- this idea may be valuable in future work involving the partitioning interpolation. We overview several possible directions for future work below.\\
\noindent\textbf{Precise closeness of $\mathcal{Q}(q,[m])$ to $\mathcal{Q}(q',[m])$.} We establish that $\mathcal{Q}(q,[m])$ is $\frac{q-1}{q}$-close to $\mathcal{Q}(q+1,[m])$. In \cref{remark:closenessgap}, we show $q$-partitioning valuations that are \emph{not} $\left(\frac{q^2-1}{q^2}+\epsilon\right)$-close to $\mathcal{Q}(q+1,[m])$ for any $\epsilon>0.$ It is interesting to understand what valuations in $\mathcal{Q}(q,[m])$ are furthest from $\mathcal{Q}(q+1,[m])$ (and how far they are).
Similarly, it is interesting to understand for which $\beta,$ the class $\mathcal{Q}(q,[m])$ pointwise $\beta$-approximates $\mathcal{Q}(q+1,[m])$.\footnote{See~\cite{DevanurMSW15} for a formal definition of pointwise $\beta$-approximation.} Interestingly, a function is $\beta$-close to XOS if and only if it is pointwise $\beta$-approximated by XOS. But, it is not clear whether these two properties are identical for $q\neq m.$ Determining the precise relationship between the two properties is itself interesting. We note that one direction is easy. For any $q,$ being pointwise $\beta$-approximated by $\mathcal{Q}(q,[m])$ implies being $\beta$-close to $\mathcal{Q}(q,[m])$. So the open question is whether the converse is true.
We also note that~\cite{BhawalkarR11} resolves asymptotically the closeness of $\mathcal{Q}(2,[m])$ to $\mathcal{Q}(m,[m])$ at $\Theta(\log m)$. In \cref{section:closeness}, we show that simple modifications of their arguments imply that $\mathcal{Q}(2,[m])$ is $\Theta(\log_2 q)$-close to $\mathcal{Q}(q,[m])$ for any $q$.\\
\iffalse
\begin{problem}
\label{prob:closenessproblem}
What is the optimal value of $\alpha_{m,q,q+1}$ such that $\mathcal{Q}(q,[m])$ is
$\alpha_{m,q,q+1}$-close to $\mathcal{Q}(q+1, [m])?$ More generally, for $2\le q \le p\le m,$ what is the
optimal value of $\alpha_{m,q,p}$ such that $\mathcal{Q}(q,[m])$ is
$\alpha_{m,q,p}$-close to $\mathcal{Q}(p, [m])?$
\end{problem}
\begin{definition}
\label{def:defofapproximation}
Let $\beta\ge 1.$
A class of valuations $\mathcal{F}$ $\beta$-approximates a class of valuations $\mathcal{G}$ over $[m]$ if for any $g\in \mathcal{G},$ there exists some $f\in \mathcal{F}$ such that
$f(S)\le g(S)\le \beta f(S)$ holds for all $S\subseteq [m].$
\end{definition}
\begin{problem}
\label{prob:approximationproblem}
For $2\le q \le p\le m,$ what is the
optimal value of $\beta_{m,q,p}$ such that $\mathcal{Q}(q,[m])$ is
$\beta_{m,q,p}$-approximated by $\mathcal{Q}(p, [m])?$
\end{problem}
We make the trivial observation that if $\mathcal{Q}(p,[m])$ $\beta$-approximates $\mathcal{F},$ then
$\mathcal{F}$ is $\frac{1}{\beta}$-close to $\mathcal{Q}(p,[m]).$ The converse also holds in the XOS case $q = m$ and is easy to show.
\fi
\noindent\textbf{Constructing ``hard'' \hmath$q$-Partitioning valuations.}
Our two main results,
\cref{thm:postedpriceqpart,thm:qparttailspecial}, both establish the existence of desirable properties for $q$-partitioning valuations. However, to demonstrate tightness, one would need ``hard'' constructions of $q$-partitioning valuations that are not $(q+1)$-partitioning (recall that we have given constructions of valuation functions in $\mathcal{Q}(q,[m])\setminus \mathcal{Q}(q+1,[m])$, but they are not ``hard''). It does not appear at all straightforward to adapt constructions of ``hard'' valuations that are subadditive but not fractionally subadditive (e.g.~\cite{BhawalkarR11}) to create a valuation function in $\mathcal{Q}(q,[m])\setminus \mathcal{Q}(q+1,[m])$. Indeed, there is no previous construction of valuations that are subadditive MPH-$k$ but not subadditive MPH-$(k-1)$ (even without restricting to ``hard'' constructions). Constructing such functions (for which, e.g., the arguments made in \cref{section:postedprices,section:introconcentration} are tight) is therefore an important open direction.\\
\noindent\textbf{Applications in Algorithmic Game Theory.} Guarantees of posted-price mechanisms are perhaps the most notable domain within algorithmic game theory where fractionally subadditive and subadditive functions are far apart. Still, there are other settings where they are separated. For example, in best-response dynamics in combinatorial auctions, a dynamics leading to a constant fraction of the optimal welfare exists in the fractionally subadditive case, but there is a $O(\frac{\log \log m}{\log m})$ impossibility result in the subadditive case \cite{DuttingK22}. There are also constant-factor gaps between approximations achievable in polynomial communication complexity for combinatorial auctions~\cite{DobzinskiNS10,Feige09,EzraFNTW19}, and for the price of anarchy of simple auctions~\cite{RoughgardenST16}. Note that in the context of communication complexity of combinatorial auctions, \cref{lem:qpartandmphk} combined with the results for two players in
\cite{EzraFNTW19} already imply improved communication protocols across the interpolation, but no lower bounds stronger than what can be inherited from fractionally subadditive valuations are known.\\
\noindent\textbf{Concentration Inequalities.} In the proof of \cref{thm:talagrandgenerals}, we have followed the approach of Talagrand in \cite{Talagrand96}. Dembo provides an alternative proof of the special case of this theorem for $s = 1$ using a more systematic approach
for proving isoperimetry
based on
information inequalities in \cite{Dembo97}. It is interesting to understand to what extent the inequality in \cref{thm:talagrandgenerals} can be recovered using the information inequalities framework in \cite{Dembo97}. Moreover, the state-of-the-art provides two different approaches to concentration inequalities at the two extremes of the partitioning interpolation. This results in one approach yielding sharper guarantees near $q=m$, and the other near $q=2$. It is important to understand the (asymptotically) optimal tail bounds across the partitioning interpolation, and it is interesting to understand whether there is a unified approach that yields (asymptotically) optimal tail bounds across a broad range of $\{2,\ldots, m\}$.
\section{Strict Inclusion of Classes}
\label{appendix:existenceproblem}
\begin{proof}[Proof of \cref{prop:existenceproblem}]
First, partition $[m]$ arbitrarily into $q$ nonempty parts $S_1, S_2, \ldots, S_q.$ Then, by choosing the fractional cover $\alpha$ of $q$ for which $\alpha (I) = \frac{1}{q-1}$ whenever $|I| = q-1$ and $\alpha (I) = 0$ otherwise, we conclude that
$$
v([m])\le \frac{1}{q-1}\sum_{i\in [q]}v(S\backslash S_i) = 1+ \frac{1}{q-1}.$$
We also need to prove that when $v([m]) = 1+ \frac{1}{q-1},$ the valuation $v$ actually satisfies the $q$-partitioning property. To do so, take any $S\subseteq [m],$ any partition of $S$ into $q$ disjoint subsets $S_1, S_2, \ldots, S_q,$ and any fractional cover $\alpha$ of $[q].$ We need to show that the inequality in \cref{def:qpartprimal} is satisfied. First, note that unless $S = [m],$ the inequality is trivial. Furthermore, we can assume that all $q$ subsets $S_i$ are nonempty. Indeed, if exactly $r\le q$ of the sets are nonempty, then we essentially need to prove that $v$ satisfies the $r$-partitioning property. This follows by induction on $q$ as $\frac{1}{q-1}\le \frac{1}{r-1}.$ Finally, we can assume that $\alpha([q]) = 0.$ Otherwise, there are two cases. Either $\alpha([q]) \ge 1,$ which is trivial. Or, we can modify $\alpha$ to $\alpha'$ by setting $\alpha'([q]) = 0$ and $\alpha'(T) = \frac{1}{1-\alpha([q])}\alpha(T)$ for $T\subsetneq [q].$ Proving the statement for $\alpha'$ directly implies if for $\alpha$ as well.
\\
Now, construct a fractional cover $\beta$ from $\alpha$ in the following way. For any $\emptyset\subsetneq I\subsetneq[q],$ define $c(I)$ to be an arbitrary set of size $q-1$ containing $I.$ Then, $\beta(J) = 0$ whenever $|J|\neq q-1$ and $\beta(J) =\sum_{I \in c^{-1}(J)}\alpha(I)$ otherwise. Clearly, $\beta$ is also a fractional cover of $[q],$ and (using that $v(I) = 1$ whenever $\emptyset \subsetneq I \subsetneq [m]$), we compute
$$
\sum_{{I}\subseteq [q]} \alpha({I})v(\bigcup_{i \in {I}}S_i) =
\sum_{\emptyset \subsetneq {I}\subsetneq [q]} \alpha({I}) =
\sum_{J\subseteq [q] : \; |J| = q-1}\beta(J).
$$
Now, using that $\beta$ is a fractional cover of $[q],$ we conclude that
$$
\sum_{J\subseteq [q] : \; |J| = q-1}\beta(J) =
\frac{1}{q-1}\sum_{i \in [q]}\sum_{J :\; i \in J}\beta(J) \ge
\frac{1}{q-1}\sum_{i \in [q]}1 = \frac{q}{q-1} = v([m]),
$$
as desired.
\end{proof}
\section{Smoothness of The Partitioning Interpolation}
\label{section:properties}
Here, we prove the smoothness property of the $q$-partitioning interpolation given in \cref{thm:smoothness}.
\begin{proof}[Proof of \cref{thm:smoothness}]
Take any $q$-partitioning valuation $v,$ subset $S\subseteq [m],$ partitioning $S_1, S_2, \ldots, S_{q+1}$ of $S,$ and fractional cover $\alpha$ of $[q+1].$ We want to show that
$$
\sum_{I\subseteq [q+1]} \alpha(I)
v(\bigcup_{i\in I}S_i)\ge
\frac{q-1}{q}v(S).
$$
We proceed as follows:
$$
(q+1)\sum_{I\subseteq [q+1]} \alpha(I)
v(\bigcup_{i\in I}S_i) =
\sum_{J \subseteq [q+1] : |J| = q}
\sum_{I\subseteq [q+1]} \alpha(I)
v(\bigcup_{i\in I}S_i) \ge
$$
$$
\sum_{J \subseteq [q+1]\; : \; |J| = q}\; \;
\sum_{T \subseteq J} v(\bigcup_{i \in T}S_i)\; \;
\sum_{I \subseteq [q+1]\; : \;I\cap J = T} \alpha (I) \ge
\sum_{J \subseteq [q+1]\; : \;|J| = q}
v(\bigcup_{i \in J}S_i).
$$
The last inequality holds
since $\beta^J$ defined as $\beta^J(T):= \sum_{I \subseteq [q+1] : I\cap J = T} \alpha (I)$ is a fractional cover of $J,$ the valuation $v$ is $q$-partitioning,
and $|J| = q.$\\
\noindent
Now, note that for any $\{i,j\}\subseteq [q+1],$ we have:
$$
\sum_{k\; : \; k \neq i, k \neq j}v(\bigcup_{t\; : \;t\neq k}S_t) +
\frac{1}{2}( v(\bigcup_{t\; : \;t\neq i}S_t) +
v(\bigcup_{t\; : \;t\neq j}S_t))\ge
$$
$$
\sum_{k\; : \;k \neq i, k \neq j}v(\bigcup_{t\; : \;t\neq k}S_t) + v(\bigcup_{t\; : \;t\neq i, t\neq j}S_t)\ge
(q-1)v(S),
$$
as given by the partition $S_i\cup S_j$ and $S_k$ for $k\not \in \{i,j\}$ of $S$ into $q$ parts. Taking the sum over all pairs $i\neq j,$ we conclude that
$$
\left(\binom{q}{2} + \frac{1}{2}q\right)\sum_{J \subseteq [q+1]\; : \;|J| = q}
v(\bigcup_{i \in J}S_i) \ge
\binom{q+1}{2}(q-1)v(S).
$$
Therefore,
$$
(q+1)\left(\binom{q}{2} + \frac{1}{2}q\right)\sum_{I\subseteq [q+1]} \alpha(I)
v(\bigcup_{i\in I}S_i)\ge
\binom{q+1}{2}(q-1)v(S),
$$
from which the conclusion follows.
\end{proof}
\begin{remark}
\label{remark:closenessgap}
\normalfont
We can show that the ratio $\frac{q-1}{q}$ in \cref{thm:smoothness} cannot be improved beyond $ \frac{q^2 - 1}{q^2}$ using
the $q$-partitioning valuation constructed in \cref{prop:existenceproblem}. Namely, take $v$ so that $v(\emptyset) =0,
v(I) = 1$ whenever $0 <|I|<m$ and $v([m]) =\frac{q}{q-1}.$ Now, take any $q+1$ disjoint non-empty sets $S_1, S_2, \ldots, S_{q+1}$ and consider the fractional covering $\alpha$ of $[q+1]$
assigning weight $\frac{1}{q}$ to all $J\subseteq [q+1]$ of size $q.$ Then, the fractional value
$\displaystyle \sum_{J \subseteq [q+1]}\alpha(J)v(\cup_{j \in J}S_j)$
is $\frac{q+1}{q} = (1 - \frac{1}{q^2})\frac{q}{q-1}.$ Finding the optimal ratio in $[1 - \frac{1}{q}, 1 - \frac{1}{q^2}]$ is an interesting open problem.
\end{remark}
\section{Equivalence of the Dual and Primal Definitions}
\label{section:definitionequivalence}
The proof that \cref{def:qpartprimal} and \cref{def:qpartdual} are equivalent is a simple application of
linear programming duality. Namely, in \cref{eq:qpartprimalLP}, for each constraint
$$
\sum_{j \in I}
\alpha(I)\ge 1,
$$
we create a non-negative dual variable $p_j \ge 0.$ Taking the dual program gives exactly
\cref{eq:qpartdualLP}, so the two linear programs have the same optimal value (as both are trivially feasible).
\section{Chernoff Bounds}
\begin{theorem}[{\cite[Theorem 4.4]{Mitzenmacher05}}]
\label{thm:chernoff}
Suppose that $Y_1, Y_2, \ldots, Y_r$ are $r$ independent random variables taking values in $\{0,1\}.$ Let $\mu = \sum_{i=1}^r \mathbf{E}[Y_i]$ be their mean and $\delta>0$ be an arbitrary real number. Then,
$$
\mathbf{P}\left[\sum_{i = 1}^r Y_i \ge \mu(1 + \delta)\right]
\le
\left(
\frac{e^\delta}{(1+\delta)^{1+\delta}}
\right)^{\mu}.
$$
\end{theorem}
\section{Incomplete Information Posted Price Mechanism}\label{sec:incompleteposted}
Here, we tackle the true Bayesian setting, when the posted price mechanism is not restricted to point-mass distributions. To do so, we first introduce some further notation from \cite{DuttingKL20}. Write $\mathbf{v}$ for the valuation vector coming from distribution $\mathcal{D} = \mathcal{D}_1\times \mathcal{D}_2\cdots\times\mathcal{D}_n.$ Respectively, write $\mathsf{OPT}(\mathbf{v})$ for (an) optimal allocation with valuations $\mathbf{v},$ where $\mathsf{OPT}(\mathbf{v}) = (\mathsf{OPT}_1(\mathbf{v}),\mathsf{OPT}_2(\mathbf{v}),\ldots, \mathsf{OPT}_n(\mathbf{v})).$ Denote the social welfare under these valuations by $\mathbf{v}(\mathsf{OPT}(\mathbf{v})).$ Phrased in these terms, we need to design a posted price mechanism with expected social welfare $\Omega\left(\frac{\log \log q}{\log \log m}\right)\mathbf{E}_\mathbf{v}[\mathbf{v}(\mathsf{OPT}(\mathbf{v}))].$\\
\noindent
The set $\Delta$ is defined as in \cref{section:postedprices}. We also need the following extra notation, which extends $\Delta$ to $n$-tuples of distributions.
$$
\Gamma(p):=
\left\{\{\nu^i\}_{i=1}^n \in \Delta(p)| \sum_{i=1}^n
\mathbf{P}_{S\longleftarrow \nu^i}[j \in S]\le p \text{ holds for all }j \in [m]\right\}.
$$
Now, the equivalent statement to
\cref{lem:minimaxgame} in the framework of
\cite{DuttingKL20}
is the following.
\begin{lemma}[{\cite[p.268]{DuttingKL20}}]
\label{lem:minimaxgamebayes}
A class of monotone valuations $\mathcal{G}$ over $[m]$ and a product distribution
$\mathcal{D} = \mathcal{D}_1\times \mathcal{D}_2\cdots\times\mathcal{D}_n$ over valuations in $\mathcal{G}$ are given.
If there exists a real number $p \in [0,1]$ (possibly depending on $\mathcal{D}$), such that
$$
\mathbf{E}_{\mathbf{v}}\left[
\sup_{\lambda \in \Gamma(p)}
\inf_{\mu \in \Delta(p)}
\sum_{i = 1}^n
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_i \longleftarrow \mu}\mathbf{v}^i(S_i\backslash T_i)
\right]\ge
\alpha\times \mathbf{E}_\mathbf{v}[\mathbf{v}(\mathsf{OPT}(\mathbf{v}))],
$$
then there exists an $\alpha$-competitive posted price mechanism for the product distribution $\mathcal{D}.$
\end{lemma}
\noindent
As in \cref{section:postedprices}, we prove that when $\mathcal{G}$ is the set of $q$-partitioning valuations, the above lemma holds with\linebreak $\alpha = \Omega\left(\frac{\log \log q}{\log \log m}\right).$ The incomplete information case, however, is more technically challenging, so we break the proof into several lemmas. First, we need some more notation. Denote
$$
g^\mathbf{v}(q) =
\sup_{\lambda \in \Gamma(p)}
\inf_{\mu \in \Delta(p)}
\sum_{i = 1}^n
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_i \longleftarrow \mu}[\mathbf{v}^i(S_i\backslash T_i)]
$$
for any valuation vector $\mathbf{v}$ and
$$
f^\mathbf{v}(q) =
\sup_{\lambda \in \Gamma(p)}
\sum_{i = 1}^n
\mathbf{E}_{S_i\longleftarrow \lambda^i}\mathbf{v}^i(S_i).
$$
Again, we assume that $q>4$ is a perfect power of $2$ and $q = 2^r,s = \lceil\log_{\frac{r}{2}}\log_{16} m^2\rceil.$
Let\linebreak $\displaystyle L = \left\{ {- \left(\frac{r}{2}\right)^i}|
i \in \{0,1,\ldots, s-1\}\right\}
.$ Note that $|L| = s .$ Our first lemma is the following
\begin{lemma}
\label{lem:postedpriceswithoutexpectation}
For any vector $\mathbf{v}$ of $q$-partitioning valuations, the following inequality holds
$$
\frac{1}{s}\sum_{\ell \in L}
g^\mathbf{v}(16^\ell)\ge
\frac{1}{8s}
\left(\frac{1}{16} - \frac{1}{m}\right)
\mathbf{v}(\mathsf{OPT}(\mathbf{v})).
$$
\end{lemma}
\begin{proof}
Note that $f^\mathbf{v}(16^{-1})\ge \frac{1}{16}\mathbf{v}(\mathsf{OPT}(\mathbf{v})).$ Indeed, the vector of independent distributions $(\lambda^i)_{i=1}^n$ such that $S_i \longleftarrow \lambda^i$ satisfies $S_i = \mathsf{OPT}_i(\mathbf{v})$ with probability $1/16$ and $S_i = \emptyset$ with probability $15/16$ is in $\Gamma(16^{-1}).$ This is true because the sets $\mathsf{OPT}_i(\mathbf{v})$ are disjoint. Similarly,
$f^\mathbf{v}(\frac{1}{m^2})\le \frac{1}{m}$ holds as shown in \cite[Lemma A.2]{DuttingKL20}. Therefore, using the telescoping trick in \cref{prop:qpartprobexists}, it is enough to show that
\begin{equation}
\label{eq:tobetelescoped}
g^\mathbf{v}(16^\ell)\ge
\frac{1}{8}\left(
f^\mathbf{v}(16^{\ell}) -
f^\mathbf{v}(16^{\frac{\ell r}{2}})\right).
\end{equation}
holds for all $\ell \in L.$ In fact, we will prove something stronger. For any $p\le \frac{1}{16}$ and any $\lambda, \in \Gamma(p),\mu \in \Delta(p),$ there exists some $\sigma \in \Gamma(p^{\frac{r}{2}})$ such that
\begin{equation}
\label{eq:onestepminimax}
\sum_{i = 1}^n
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_i \longleftarrow \mu}[\mathbf{v}^i(S_i\backslash T_i)]\ge
\frac{1}{8}\sum_{i = 1}^n
\mathbf{E}_{S_i\longleftarrow \lambda^i}[\mathbf{v}^i(S_i)] -
\frac{1}{8}\sum_{i = 1}^n
\mathbf{E}_{A_i\longleftarrow \sigma^i}[\mathbf{v}^i(A_i)].
\end{equation}
Taking suprema on both sides yields \cref{eq:tobetelescoped}. We prove this fact in similar manner to \cref{prop:qpartprobexists}. Let $T_{i,u}$ for $1\le u \le r$ be $rn$ independent sets from the distribution $\mu.$ Then, for any $i \in [n],$
$$
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_i \longleftarrow \mu}[\mathbf{v}^i(S_i\backslash T_i)] =
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_{i,u} \longleftarrow \mu}\left[\frac{1}{r}\sum_{u = 1}^r
\mathbf{v}^i(S_i\backslash T_{i,u})\right].
$$
Now, let $A_i$ be the set of elements of $S_i$ that appear in more than $\frac{7r}{8}$ of the sets
$T_{i,1}, T_{i,2}, \ldots, T_{i,r}.$ Let $\sigma^i$ be the distribution of $A_i.$ The same reasoning as in \cref{prop:qpartprobexists} shows that
$$
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_{i,u} \longleftarrow \mu}\left[\frac{1}{r}\sum_{u = 1}^r
\mathbf{v}^i(S_i\backslash T_{i,u})\right]\ge
\frac{1}{8}\mathbf{E}_{S_i \longleftarrow \lambda^i}[\mathbf{v}^i(S_i)] -
\frac{1}{8}\mathbf{E}_{A_i \longleftarrow \sigma^i}[\mathbf{v}^i(A_i)].
$$
Thus, summing over $i,$ we conclude that
$$
\sum_{i = 1}^n
\mathbf{E}_{S_i\longleftarrow \lambda^i, T_i \longleftarrow \mu}[\mathbf{v}^i(S_i\backslash T_i)]\ge
\frac{1}{8}\sum_{i = 1}^n \mathbf{E}_{S_i \longleftarrow \lambda^i}[\mathbf{v}^i(S_i)] -
\frac{1}{8}\sum_{i = 1}^n \mathbf{E}_{A_i \longleftarrow \sigma^i}[\mathbf{v}^i(A_i)].
$$
To conclude, we simply need to show that $\sigma \in \Gamma(p^{\frac{r}{2}}).$ This holds for the following reason. Take some $j \in [m].$ Define $p_i = \mathbf{P}_{S_i\longleftarrow \lambda^i}[j \in S_i].$ By the definition of $\Gamma(p),$ we know that $\sum_{i=1}^n p_i \le p.$ On the other hand, as each $p_i$ satisfies $p_i \le p \le \frac{1}{16},$ as in \cref{prop:qpartprobexists}, we conclude that
$$
\mathbf{P}_{A_i \longleftarrow \sigma^i}
[
j \in A_i
]\le
p_i^{r/2}.
$$
Thus, all we need to show is that
$$
\sum_{i = 1}^n
p_i^{r/2}\le p^{r/2}.
$$
This, however, is simple. The map $x\longrightarrow x^{r/2}$ is convex. Thus, its maximum on the simplex defined by $0\le p_i\; \forall i \in [n], \sum_{i = 1}^n p_i\le p$ is attained at a vertex. This exactly corresponds to $$
\sum_{i = 1}^n
p_i^{r/2}\le p^{r/2}.
$$
\end{proof}
\noindent
The statement we want is a simple corollary of \cref{lem:postedpriceswithoutexpectation}.
\begin{lemma}
\label{lem:bayesianpostedprice}
If $\mathcal{D}$ is a product distribution over $q$-partitioning valuations, there exists some deterministic $p\in [0,1]$ (potentially
depending on $\mathcal{D}$
) such that
$$
\mathbf{E}_\mathbf{v}\left[
g^\mathbf{v}(p)
\right] \ge
\Omega\left(\frac{\log \log q}{\log \log m}\right)
\mathbf{E}_\mathbf{v}[\mathbf{v}(\mathsf{OPT}(\mathbf{v}))].
$$
\end{lemma}
\begin{proof}
Using the uniform distribution $Unif(L)$ over $L,$ we conclude from \cref{lem:postedpriceswithoutexpectation}
that $$
\mathbf{E}_{\ell \longleftarrow Unif(L)}[
g^\mathbf{v}(16^\ell)] \ge \frac{1}{8s}
\left(\frac{1}{16} - \frac{1}{m}\right)
\mathbf{v}(\mathsf{OPT}(\mathbf{v})).
$$
Taking the expectation over $\mathbf{v}$ and interchanging order of expectations, we obtain
$$
\mathbf{E}_{\ell \longleftarrow Unif(L)}
\left[
\mathbf{E}_\mathbf{v}[
g^\mathbf{v}(16^\ell)]
\right]
\ge \frac{1}{8s}
\left(\frac{1}{16} - \frac{1}{m}\right)
\mathbf{E}_\mathbf{v}[\mathbf{v}(\mathsf{OPT}(\mathbf{v}))].
$$
Therefore, as $g^\mathbf{v}$ is clearly non-negative,
the moment method implies the existence of some $\ell_1$ such that
$$
\mathbf{E}_\mathbf{v}[
g^\mathbf{v}(16^{\ell_1})] \ge \frac{1}{8s}
\left(\frac{1}{16} - \frac{1}{m}\right)
\mathbf{E}_\mathbf{v}[\mathbf{v}(\mathsf{OPT}(\mathbf{v}))].
$$
Using that $\frac{1}{s} = \Omega\left(\frac{\log \log q}{\log \log m}\right),$ we complete the proof.
\end{proof}
\noindent
The statement of
\cref{thm:postedpriceqpart} for the incomplete information case follows from
\cref{lem:minimaxgamebayes} and\linebreak
\cref{lem:bayesianpostedprice}.
\section{Concentration Inequalities}
\label{section:concentration}
We discuss in full detail the concentration inequalities in \cref{section:introconcentration} and their more general versions. The setup that we consider throughout the rest of this section is the following.
A valuation $v$ over $[m]$ is given.
$S\subseteq [m]$ is a random set such that each item $j\in [m]$ appears independently in $S$ (different items might appear with different probabilities). We study how concentrated $v(S)$ is around a point of interest such as its mean or median.
Before delving into the main content, we make one important note. Concentration inequalities and tail bounds depend on the scale of the valuations. That is, the valuation $2v$ has a ``weaker'' concentration than the valuation $v$ (at least when considering an additive deviation from the mean of the form $\mathbf{E}[v(S)]\pm t$). For that reason, throughout we assume that all marginal values, i.e. values $v(S\cup\{i\}) - v(S)$ for $S\subseteq [m], i\in [m],$ are between 0 and 1. Note that if $v$ is subadditive, in particular $q$-partitioning for some $q,$ this is equivalent to $v(\{i\})\le 1\; \forall i\in [m]$.\\
\noindent
Throughout the rest of the section, by abuse of notation, we will write $v(S)$ and $v(x_1, x_2, \ldots, x_m)$ interchangeably where $(x_1, x_2, \ldots, x_m)$ is the characteristic vector of $S$ in $\{0,1\}^m.$
\subsection{Concentration via Self-Bounding Functions}
\label{section:selfbounding}
Vondrak shows that XOS and submodular functions exhibit strong concentration via self-bounding functions \cite[Corollary 3.2]{Vondrak10}. We adopt this approach and show how the bounds generalize to MPH-$k$ valuations with bounded marginal values and, in particular, to $q$-partitioning valuations (naturally, the concentration becomes weaker as $q$ decreases). This approach relies on the method of self-bounding functions.
\begin{definition}[{
\cite[Definition 2.3]{Vondrak10}}]
\label{def:selfboundingfunctions}
A function $f:\{0,1\}^m \longrightarrow \mathbb{R}$ is $(a,b)$-self-bounding if there exist real numbers $a,b>0$ and functions $f_i:\{0,1\}^{m-1}\longrightarrow \mathbb{R}$ such that if we denote\linebreak $x^{(i)} = (x_1, x_2, \ldots, x_{i-1}, x_{i+1}, \ldots, x_m),$ then for all $x\in \{0,1\}^m$ and $i\in [m]:$
$$
0\le f(x)-f_i(x^{(i)})\le 1, \text{ and }$$
$$
\sum_{i=1}^m (f(x) - f_i(x^{(i)}))\le
af(x) + b.
$$
\end{definition}
\noindent
When $f$ is monotone, the optimal choice of $f_i$ is clearly given by\linebreak
$f_i(x^{(i)}):=f(x_1, x_2, \ldots, x_{i-1},0, x_{i+1}, \ldots, x_m).$ We adopt this approach in \cref{lem:MPHslefbounding}.
An application of the entropy method gives the following concentration for self-bounding functions.
\begin{theorem}[{\cite[Theorem 3.3]{Vondrak10}}]If $f:\{0,1\}^m\longrightarrow \mathbb{R}$ is $(a,b)$-self-bounding for some $a\ge \frac{1}{3},$ and $X_1, X_2, \ldots, X_m$ are independent, then for $Z = f(X_1, X_2, \ldots, X_m)$ and $c = \frac{3a-1}{6}:$
\begin{equation*}
\begin{split}
& \mathbf{P}[Z\ge \mathbf{E}[Z] + t]\le
\exp\left(-\frac{1}{2}\times\frac{t^2}{a\mathbf{E}[Z] + b + ct}\right)\; \text{ for any }0<t,\\
&
\mathbf{P}[Z\le \mathbf{E}[Z] - t]\le
\exp\left(-\frac{1}{2}\times\frac{t^2}{a\mathbf{E}[Z] + b}\right)\;\text{ for any }0<t<\mathbf{E}[Z].
\end{split}
\end{equation*}
\end{theorem}
\noindent
Vondrak \cite[Lemma 2.2]{Vondrak10} shows that XOS valuations (also $m$-partitioning or MPH-1) are $(1,0)$-self-bounding.
We generalize as follows.
\begin{proposition}
\label{lem:MPHslefbounding}
Any (not necessarily subadditive) MPH-$k$ valuation with all marginal values in $[0,1]$ is $(k,0)$-self-bounding. In particular, this holds for all subadditive MPH-$k$ valuations $f$ such that\linebreak $f(\{i\})\le 1\; \forall i \in [m].$
\end{proposition}
\begin{proof}
We make the canonical choice $f_i(x^{(i)}):=(x_1, x_2, \ldots, x_{i-1},0, x_{i+1}, \ldots, x_n)$ in the proof. In other words, $f_i(S):=f(S\backslash \{i\}).$
Note that $0\le f(x)-f_i(x^{(i)})\le 1$ follows from the fact that $f$ is monotone and all marginal values are bounded by 1.
We need to prove that for any $S\subseteq [m],$ it is the case that
$$
\sum_{j \in [m]}(f(S) - f(S\backslash\{j\}))\le
kf(S)$$
The latter is clearly equivalent to
$$
\sum_{j \in S}(f(S) - f(S\backslash\{j\}))\le
kf(S),
$$
which is the statement of \cite[Lemma 6.2]{EzraFNTW19}.
\end{proof}
\noindent
Combining the above propositions with \cref{lem:qpartandmphk}, we immediately deduce \cref{thm:selfboundingqpart}.\\
\noindent
Note that this result is potentially useful only when $\mathbf{E}[v(S)] \ll q.$ Otherwise Azuma's inequality \cite{Azuma} implies that $v(S)$ is $m$-subgaussian. In fact, this holds for any 1-Lipschitz (not necessarily subadditive!) function $f:\{0,1\}^m\longrightarrow \mathbb{R}.$\\
\noindent
So, can we derive any tail bounds for
small values of $q$ (in particular, subadditive valuations) which are better than what is already true for any $1$-Lipschitz valuations over $[m]$? It turns out that the answer to this question is ``yes'' and this is the subject of the next section. Before turning to it, however, we note that we cannot hope to do much better than \cref{thm:selfboundingqpart}
using the method of self-bounding functions.
\begin{example}
\normalfont
For any $2\le q\le m,$ there exists a symmetric $q$-partitioning valuation over $[m]$ with marginal values in $[0,1]$ which is not $(a,0)$-self-bounding for any $a<\frac{m}{q}.$ Indeed, consider the $q$-partitioning function $v$ constructed in \cref{prop:existenceproblem}, defined by $v(\emptyset) = 0, v(S) = 1$ for $\emptyset \subsetneq S\subsetneq [m],$ and
$v([m]) = \frac{q}{q-1}.$ Suppose, for the sake of contradiction, that $v$ is $(a,0)$-self-bounding for some $a<\frac{m}{q}$ and $v_i.$
Then, for any $S$ with characteristic vector $x$ and $i\in [m],$ it must be the case that $v_i(x^{(i)})\le f(S\backslash \{i\}) \le 1$ by the first inequality in \cref{def:selfboundingfunctions}. As a result, when we set $x$ to be the all-ones vector in the second inequality, we conclude that $a\frac{q}{q-1}\ge \frac{m}{q-1}.$ Thus, $a\ge \frac{m}{q},$ which is a contradiction.
\end{example}
\subsection{Tail Bounds via an Isoperimetric Inequality}
We now
take a deeper look into the stated isoperimetric inequalities in \cref{section:introconcentration}. To do so, we first present some background on
\cref{thm:talagrandgenerals} and the proof of the statement.
\subsubsection{The Isoperimetric Inequality}
We consider the setup introduced in \cref{section:detourisoperimetry}.
In \cite[Section 3.1.1]{Talagrand01} the author considers the special case $s = 1,$ and in \cite[Section 5.7]{Talagrand96} he considers the special case $s = q-1.$ In particular, the inequalities he proves are the following.
\begin{theorem}[{\cite[Section 3.2]{Talagrand01}}]
\label{thm:talagrands1} Suppose that $\alpha>0$ is a real number and $z(q,\alpha)$ is the larger root of the equation $z+q\alpha z^{-\frac{1}{\alpha}} = 1+q\alpha.$
Then
$$
\int_{\Omega}
z(q,\alpha)^{f^1(A_1, A_2, \ldots, A_q; x)}d\mathbf{P}(x)\le
\frac{1}{\prod_{i=1}^q \mathbf{P}[A_i]^\alpha}.
$$
In particular, setting $A= A_1 = A_2= \ldots = A_q, \alpha = 1, z= q,$ one has
$$\mathbf{P}[f^1(\underbrace{A, A, \ldots, A}_{q}; x)\ge k]\le
q^{-k}\mathbf{P}[A]^{-q}.$$
\end{theorem}
\noindent
\cref{thm:talagrands1} is the main tool in Schechtman's bound (see \cref{section:introconcentration}).
The result for $s = q-1$ looks similar:
\begin{theorem}
\label{thm:talagrandsq1}
(\cite[Theorem 5.4]{Talagrand96}) Suppose that $\tau$ is the positive root of $e^{\tau/2} +e^{-\tau} = 2.$ Then,
$$
\int_{\Omega}
e^{\frac{\tau}{q}f^{q-1}(A_1, A_2, \ldots, A_q; x)}d\mathbf{P}(x)\le
\frac{1}{\prod_{i=1}^q \mathbf{P}[A_i]^{1/q}}.
$$
In particular, setting $A= A_1 = A_2= \ldots = A_q$ one has
$$\mathbf{P}[f^{q-1}(\underbrace{A, A, \ldots, A}_{q}; x)\ge k]\le
e^{-\frac{\tau k}{q}}\mathbf{P}[A]^{-1}.$$
\end{theorem}
\noindent
In this section, we provide a uniform view on
\cref{thm:talagrands1} and \cref{thm:talagrandsq1} by proving \cref{thm:talagrandgenerals}.
First, we will demystify the number $t(\alpha, q, s).$ It comes from the following fact.
\begin{lemma}
\label{lemma:talphaqs}
Suppose that $x_1, x_2, \ldots, x_q$ are real numbers in $[0,1]$ and $\alpha \ge \frac{1}{s}.$ Then, for $t(\alpha,q,s)$ defined as in \cref{thm:talagrandgenerals}, one has
$$
\min \left(
t(\alpha, q, s),
\min_{1\le i_1<i_2<\cdots <i_s\le q}
(x_{i_1}x_{i_2}\cdots x_{i_s})^{-\alpha}
\right)
+ \alpha \sum_{i = 1}^q x_i \le
\alpha q + 1.
$$
Furthermore, $t(\alpha,q,s)$ is the largest $t$ with this property. We use the convention that $0^{-\alpha} = +\infty.$
\end{lemma}
\noindent
As the proof of \cref{lemma:talphaqs} provides no insight and is just a computation, we postpone it to \cref{section:appendixtalphaqs}.
We also remark that a version of \cref{thm:talagrandgenerals} holds for $\alpha <\frac{1}{s}$ as well, but the definition of $t(\alpha,q,s)$ becomes even more complicated (see \cref{rmk:alphalessthansiso}).
We now turn to the proof of \cref{thm:talagrandgenerals}.
\begin{proof}[Proof of \cref{thm:talagrandgenerals}] We follow closely the proof of \cref{thm:talagrandsq1} due to Talagrand in \cite{Talagrand96}. We proceed by induction over $N,$ the dimension of the product space.
\noindent
\textit{Base:}
When $N = 1,$ let $g_i$ be the indicator function of $A_i$ for $1\le i \le q.$ Observe that
$$
\int_{\Omega}
t(\alpha, q, s)^{f^s(A_1, A_2, \ldots, A_q; x)}d\mathbf{P}(x) =
$$
$$
\int_{\Omega}
\min \left(
t(\alpha, q, s),
\min_{1\le i_1<i_2<\cdots <i_s\le q}
(g_{i_1}(x)g_{i_2}(x)\cdots g_{i_s}(x))^{-\alpha}
\right)
d\mathbf{P}(x).
$$
Indeed, this is true because $f^s(A_1, A_2, \ldots, A_q; x) = 0$ when there exist $s$ sets $A_{i_1}, A_{i_2}, \ldots, A_{i_s}$ to which $x$ belongs and $f^s(A_1, A_2, \ldots, A_q; x) = 1$ otherwise simply by the definition $f^s.$ Using \cref{lemma:talphaqs},
we conclude that
$$
\int_{\Omega}
\min \left(
t(\alpha, q, s),
\min_{1\le i_1<i_2<\cdots <i_s\le q}
(g_{i_1}(x)g_{i_2}(x)\cdots g_{i_s}(x))^{-\alpha}
\right)
d\mathbf{P}(x)\le
$$
$$
1 + \alpha q - \alpha \sum_{i=1}^q\int_{\Omega} g_i(x)d\mathbf{P}(x) =
1 + \alpha q - \alpha\sum_{i=1}^q\mathbf{P}[A_i].
$$
Now, we will use twice the well known inequality $1 + \log x \le x$ as follows
$$
1 + \alpha q - \alpha\sum_{i=1}^q\mathbf{P}[A_i] =
1 + \alpha \sum_{i=1}^q (1 - \mathbf{P}[A_i]) \le
1 + \alpha \sum_{i = 1}^q \log \frac{1}{\mathbf{P}[A_i]} =
$$
$$
1 + \log \prod_{i=1}^q \frac{1}{\mathbf{P}[A_i]^\alpha} \le
\prod_{i=1}^q \frac{1}{\mathbf{P}[A_i]^\alpha},
$$
with which the base case is completed.\\
\noindent
\textit{Inductive Step:} Now, let $A_1, A_2, \ldots, A_q$ all belong to $\Omega = \Omega'\times \Omega_{N+1},$ where $\Omega' = \prod_{i=1}^N\Omega_i.$ For each $i\in [q],w\in \Omega_{N+1}$ define the following sets:
$$
A_i(w) = \{x\in \Omega'\; : \; (x,w) \in A_i \},
$$
$$
B_i = \bigcup_{w\in \Omega_{N+1}}A_i(w).
$$
Fix some $w\in \Omega_{N+1}.$ For $I\subseteq [q]$ with $|I| = s,$ denote
$C^I_i = A_i(w)$ whenever $i \in I$ and $C^I_i = B_i$ whenever $i \not \in I.$ Then, we can make the following observations:
$$
f^s(A_1, A_2, \ldots, A_q; (x,w))\le
1 + f^s(B_1, B_2, \ldots, B_q; x)\; \forall (x,w)\in \Omega,
$$
$$
f^s(A_1, A_2, \ldots, A_q; (x,w))\le
f^s(C^I_1, C^I_2, \ldots, C^I_q; x)\; \forall (x,w)\in \Omega, I\subseteq [q] \text{ with }|I| = s.
$$
Indeed, the first inequality follows from the following fact. If $(b^1, b^2, \ldots, b^q)\in B_1\times B_2\times\cdots \times B_q,$ then for each $i, $ we can find some $a^i = (b^i, w^i)\in A_i.$ Clearly,
$$
f^s(a^1, a^2, \ldots, a^q; (x,w))\le
1 + f^s(b^1, b^2, \ldots, b^q; x)
$$
as the only extra coordinate that may appear less than $s$ times in the required multiset in \cref{eq:definefs} is the $N+1$'th coordinate $w.$ The second inequality follows from the same fact except that we choose $a^i = (b^i,w)$ whenever $i\in I.$\\
\noindent
Having those two inequalities, we can fix $w$ and compute:
$$
\int_{\Omega'}
t(\alpha, q, s)^{f^s(A_1, A_2, \ldots, A_q; (x,w))}d\mathbf{P}(x)\le
$$
$$
\int_{\Omega'}
\min\left(
t(\alpha, q, s)^{1+f^s(B_1, B_2, \ldots, B_q; x)},
\min_{_{|I| =s}}
t(\alpha, q, s)^{
f^s(C^I_1, C^I_2, \ldots, C^I_q; x)
}
\right)\le
$$
$$
\min\left(
\int_{\Omega'}
t(\alpha, q, s)^{1+f^s(B_1, B_2, \ldots, B_q;x)},
\min_{_{|I| =s}}
\int_{\Omega'}
t(\alpha, q, s)^{
f^s(C^I_1, C^I_2, \ldots, C^I_q; x)
}
\right).
$$
Now, we just use the inductive hypothesis as each $B_i$ and $C^I_i$ is in the space $\Omega',$ which is a product of $N$ spaces. We bound the above expression by
$$
\min\left(
t(\alpha, q,s)
\frac{1}{\prod_{i=1}^q \mathbf{P}[B_i]^\alpha},
\min_{|I| = s}
\frac{1}{\prod_{i=1}^q \mathbf{P}[C^I_i]^\alpha}
\right) =
$$
$$
\frac{1}{\prod_{i=1}^q \mathbf{P}[B_i]^\alpha}
\min\left(
t(\alpha, q,s),
\min_{1\le i_1<i_2<\ldots <i_s\le q}
\frac{\mathbf{P}[B_{i_1}]^\alpha}{\mathbf{P}[A_{i_1}(w)]^\alpha}
\frac{\mathbf{P}[B_{i_2}]^\alpha}{\mathbf{P}[A_{i_2}(w)]^\alpha}
\cdots
\frac{\mathbf{P}[B_{i_s}]^\alpha}{\mathbf{P}[A_{i_s}(w)]^\alpha}
\right)\le
$$
$$
\frac{1}{\prod_{i=1}^q \mathbf{P}[B_i]^\alpha}
\left(
1 + \alpha q - \alpha\sum_{i=1}^q\frac{\mathbf{P}[A_i(w)]}{\mathbf{P}[B_i]}\right),
$$
where in the last inequality we used \cref{lemma:talphaqs} since $\frac{\mathbf{P}[A_i(w)]}{\mathbf{P}[B_i]}\le 1$ for each $i.$ However, as we are dealing with a product measure, by Tonelli's theorem for non-negative functions, it follows that
$$
\int_{\Omega}
t(\alpha, q, s)^{f^s(A_1, A_2, \ldots, A_q; (x,w))}d\mathbf{P}(x,w)=
$$
$$
\int_{\Omega_{n+1}}\int_{\Omega'}
t(\alpha, q, s)^{f^s(A_1, A_2, \ldots, A_q; (x,w))}d\mathbf{P}(x)d\mathbf{P}(w)\le
$$
$$
\int_{\Omega_{n+1}}
\frac{1}{\prod_{i=1}^q \mathbf{P}[B_i]^\alpha}
\left(
1 + \alpha q - \alpha\sum_{i=1}^q\frac{\mathbf{P}[A_i(w)]}{\mathbf{P}[B_i]}
\right)
d\mathbf{P}(w) =
$$
$$
\frac{1}{\prod_{i=1}^q \mathbf{P}[B_i]^\alpha}
\int_{\Omega_{n+1}}
\left(
1 + \alpha q - \alpha\sum_{i=1}^q\frac{\mathbf{P}[A_i(w)]}{\mathbf{P}[B_i]}
\right)
d\mathbf{P}(w).
$$
Using the same approach as in the base case, but this time for the functions $g_i(w):=\frac{\mathbf{P}[A_i(w)])}{\mathbf{P}[B_i]},$ we bound the last expression by
$$
\frac{1}{\prod_{i=1}^q \mathbf{P}[B_i]^\alpha}\times
\frac{1}{\prod_{i=1}^q \left(\frac{\mathbf{P}[A_i]}{\mathbf{P}[B_i]}\right)^\alpha} =
\frac{1}{\prod_{i=1}^q \mathbf{P}[A_i]^\alpha},
$$
as desired.
\end{proof}
\subsubsection{Tail Bounds and Median-Mean Inequalities for \hmath$q$-Partitioning Valuations}
We first begin with a generalization of \cref{thm:qparttailspecial}, which has the same proof.
\begin{theorem}
\label{thm:qparttail}
Suppose that $v$ is a $q$-partitioning valuation over $[m],$ and $S\subseteq [m]$ is a random set in which each element appears independently. Then the following inequality holds for any $ a \ge 0, k\ge 0,$ $s,r\in \mathbb{N}$ such that $1\le s < r\le \log_2 q,$ and $\alpha \ge \frac{1}{s}:$
$$
\mathbf{P}[v(S)\ge \frac{r}{s}a+k]\le
t(\alpha, r, s)^{-k}\mathbf{P}[v(S)\le a]^{-\alpha r}.
$$
In particular, choosing $a$ to be the median, $\alpha = \frac{1}{s}, t(\alpha, r, s) = \frac{r}{s},$ we recover \cref{thm:qparttailspecial}.
\end{theorem}
\noindent
Note that in the proof of \cref{thm:qparttailspecial}, we only needed $r\le \log_2 q$ to ensure that the sets $M_1, M_2, \ldots, M_r$ ``split'' $[m]$ into at most $q$ parts. This assumption, however, is unnecessary if $v$ is XOS (or $q = m$) as one cannot ``split'' $[m]$ into more than $m$ parts. This gives rise to even more ``fine-grained'' inequalities in the XOS case. To present them, however, we will make a slight change of notation. Before, we defined $t(\alpha, r,s)$ as the larger root of $t + \alpha r t^{-\frac{1}{\alpha s}} = \alpha r + 1.$
We can take $r$ and $s$ to be arbitrary integers satisfying $r>s$ when $v$ is XOS as discussed. Thus, the ratio $\frac{r}{s}$ can approximate an arbitrary real number $1+ \delta > 1.$ With this in mind, we make the following twist. Denote by $\xi(\psi, \delta)$ the larger root of the equation $\xi + \psi \xi^{-\frac{1+\delta}{\psi}} = \psi+1$ for some $\psi \ge 1+\delta >1.$ This is essentially the same equation after the substitution $\psi =\alpha r, 1+ \delta = \frac{r}{s}.$ The condition $\alpha \ge \frac{1}{s}$ is equivalent to $\psi \ge 1+\delta.$
We have:
\begin{theorem}
\label{thm:tailboundxos}
Suppose that $v$ is a valuation over $[m]$ that is $\beta$-close (in the sense of \cref{def:closeness}) to being XOS and $S\subseteq [m]$ is a random set in which each element appears independently. Then the following inequality holds for any real numbers $\psi\ge 1+\delta>1,a>0, k\ge 0$
$$
\mathbf{P}[v(S)\ge \frac{(1+\delta)}{\beta}a+k]\le
\xi(\psi, \delta)^{-k}\mathbf{P}[v(S)\le a]^{-\psi}.
$$
In particular, choosing $a$ to be the median, $\psi = 1+\delta,\xi = 1+\delta$ the inequality becomes
$$
\mathbf{P}[v(S)\ge \frac{(1+\delta)}{\beta}a+k]\le
\left(1+\delta\right)^{-k}2^{1+\delta}.
$$
\end{theorem}
\begin{proof}
The proof is analogous to the one of \cref{thm:qparttail}, except that this time we have
$$
\beta v(T\backslash M)\le \sum_{i=1}^r \frac{1}{s}v(M_i)\le
\sum_{i=1}^r \frac{1}{s}v(T_i) =
\sum_{i=1}^r \frac{1}{s}v(y^i)
\le
\frac{ra}{s}
$$
as $v$ is $\beta$-close to being XOS.
\end{proof}
\noindent
We end with a discussion of median-mean inequalities, which are part of the motivation for our endeavour in this section. Namely, in \cite{RubinsteinW18}, the authors use the following crucial property of 1-Lipschitz subadditive valuations. Using Schechtman's bound (see \cref{section:introconcentration}), they obtain
$\mathbf{E}[v(S)]\le
3\mathbf{Med}[v(S)] + O(1)
.$ We generalize this as follows.
\begin{proposition}
\label{prop:mediantomean}
Suppose that the non-negative random variable $Z$ satisfies the following inequality for some $0<\delta\le 1,$ and any $k >0.$
$$
\mathbf{P}[Z\ge (1 + \delta)\mathbf{Med}[Z] + k]\le
(1+\delta)^{-k}2^{1+\delta}.
$$
Then, $\mathbf{E}[Z]\le (1 + \delta)\mathbf{Med}[Z] + O(\frac{1}{\delta}).$
\end{proposition}
\begin{proof}
Let $k$ be some non-negative real number that we will chose later. Then,
$$
\mathbf{E}[Z] =
\int_0^{+\infty}
\mathbf{P}[Z\ge t]\le
(1 + \delta)\mathbf{Med}[Z] + k
+
\int_{k}^{+\infty}
\mathbf{P}[Z\ge (1 + \delta)\mathbf{Med}[Z] + t]d t\le
$$
$$
(1 + \delta)\mathbf{Med}[Z] + k
+
\int_{k}^{+\infty}
2^{1+\delta}(1+\delta)^{-t}dt =
(1 + \delta)\mathbf{Med}[Z] + k
+
2^{1+\delta}\frac{1}{\ln (1 + \delta)}(1+\delta)^{-k}.
$$
Choosing $k = \frac{1}{\ln(1+\delta)}$ and using the inequality
$0 <\delta \le 1,$ which also implies
$\frac{\delta}{2}\le \frac{1}{\ln (1+\delta)}\le \delta,$ we bound the above expression by
$$
(1 + \delta)\mathbf{Med}[Z] + O\left(\frac{1}{\delta}\right).
$$
\end{proof}
\noindent
Applying this statement to $q$-partitioning valuations, we obtain the following two corollaries.
\begin{corollary}
\label{cor:medianmeanqpart}
If $v$ is $q$-partitioning, then
$\mathbf{E}[v(S)]\le (1 + O(\frac{1}{\log q}))\mathbf{Med}[v(S)] + O(\log q).$
\end{corollary}
\begin{proof}
We just apply \cref{prop:mediantomean} for $\delta = \frac{1}{\lceil\log q\rceil}$ and combine with
\cref{thm:qparttail}.
\end{proof}
\begin{corollary}
If $v$ is XOS,
then $\mathbf{E}[V(S)]\le \mathbf{Med}[v(S)] + O(\sqrt{\mathbf{Med}[v(S)]}).$
\end{corollary}
\begin{proof}
We apply \cref{prop:mediantomean} with $\delta = \frac{1}{\sqrt{\mathbf{Med}[v(S)]}}$ and combine with
\cref{thm:tailboundxos}.
\end{proof}
\noindent
Note that the last result matches the state-of-the-art median-mean bound for XOS function implied by the $\mathbf{E}[v(S)]$-subgaussian behaviour of $v.$ Indeed, the lower tail-bound in \cref{thm:selfboundingqpart} for $v$ XOS
implies that $\mathbf{Med}[v(S)]\ge \mathbf{E}[v(S)] -
O(\sqrt{\mathbf{E}[v(S)]}).
$ More generally, median-mean bounds can also be derived using \cref{thm:selfboundingqpart}, but are only useful for large $q$ unlike \cref{cor:medianmeanqpart}. Namely,
$\mathbf{Med}[v(S)]\ge \mathbf{E}[v(S)] -
O(\sqrt{\frac{m}{q}\mathbf{E}[v(S)]})
$ holds for $q$-partitioning valuations.
\subsection{Technical Details}
\label{section:appendixtalphaqs}
We omitted the proof of \cref{lemma:talphaqs}. We give this proof here. First, however, we need to show that the equation $t + \alpha q t^{-\frac{1}{\alpha s}} = \alpha q + 1$ has two positive roots and one of them is larger than 1. Clearly, $t = 1$ is a root. Denote $f(t):= t + \alpha q t^{-\frac{1}{\alpha s}}-\alpha q - 1.$ Note that
$f'(t) = 1 - \frac{q}{s}t^{-1-\frac{1}{\alpha s}}.$ As $q>s,$ $f'$ has a single root $t_0 = \left(\frac{q}{s}\right)^\frac{\alpha s}{\alpha s + 1}$ and this root is larger than $1$ (but smaller than $\frac{q}{s}$). Thus, $f'$ is decreasing in $(0,t_0) $ and increasing in $[t_0, +\infty).$ Therefore, $f(t_0)<f(1) = 0.$ Since $\lim_{t\longrightarrow+\infty} f(t) =+\infty,$ $f$ has just one more root and this root is larger than $1.$ We can now proceed to the proof of \cref{lemma:talphaqs}.
\noindent
\begin{proof}[Proof of \cref{lemma:talphaqs}]
Without loss of generality, let $0\le x_1 \le x_2\le \ldots\le x_q \le 1.$ Thus, we need to prove that
$$
\min
\left(
t(\alpha, q, s),
(x_{q-s+1}x_{q-s+2}\cdots x_q)^{-\alpha}
\right) +
\alpha \sum_{i = 1}^q x_i \le \alpha q + 1.
$$
Note that we can assume that $(x_{q-s+1}x_{q-s+2}\cdots x_q)^{-\alpha} \le t(\alpha,q,s).$ Indeed, otherwise we can increase the numbers $x_{q-s+1},x_{q-s+2},\ldots ,x_q$ until this inequality is satisfied and the left hand-side will only increase. Now, on, we will assume that $B = (x_{q-s+1}x_{q-s+2}\cdots x_q)^{-\alpha} \le t(\alpha,q,s).$\\
\noindent
Similarly note that we can assume that $x_1 = x_2 = \cdots = x_{q-s+1}.$\\
\noindent
Now, keeping $x_{q-s+1}$ fixed and $x_{q-s+1}x_{q-s+2}\cdots x_q$ fixed, note that the sum $\sum_{i = q-s+1}^qx_i$ is maximized when there exists some $0\le r\le s$ such that
$$
1 = x_q = x_{q-1} = \cdots = x_{q-r+1}\ge
x_{q-r}\ge x_{q-r-1} = x_{g-r-2} = \cdots = x_{q-s+1}.
$$
This is indeed the case since when we keep the product of two numbers $a\le b$ fixed, their sum increases as they get further apart. Formally, $a\gamma + b\gamma^{-1}\ge a + b$ for any $0<\gamma<1.$
\noindent
Under these assumptions, denote $y = x_{q-r}$ and
$x_{q-r-1} = x_{g-r-2} = \cdots = x_{q-s+1} = x.$ Using this notation, we want to maximize
$$
h(x,y) = (yx^{s-r-1})^{-\alpha} + \alpha(r + y + (s-r-1)x)
$$
in the set $\mathcal{K}=\{(x,y)>0 \; :\;(yx^{s-r-1})^{-\alpha} \le t(\alpha, q, s)\text{ and }0\le x\le y \le 1\}.$ Note that $\mathcal{K}$ is compact and $h$ is continuous. Therefore, there exists a maximizer. We can easily see that this maximizer is not in the interior of $\mathcal{K}$ as $\nabla_y h = \alpha - \alpha y^{-\alpha - 1}x^{-\alpha (s-r-1)} <0$ in the interior. Thus, the gradient is non-zero and by moving in the direction of the gradient, the value of $h$ will increase. Thus, all the maximizers are on the boundary. There are three cases to consider:\\
\textbf{Case 1)} $y = x.$ Then, we need to prove that
$$
x^{-\alpha (s-r)} + \alpha (r + (q-r)x)\le \alpha q + 1
$$
whenever $0\le r \le s$ and $x^{-\alpha (s-r)}\le t(\alpha, q, s).$ First, note that if $s = r,$ the inequality is trivial. For that reason, we assume that $0\le r <s-1$ now on. Consider the function $$g(x) = x^{-\alpha (s-r)} + \alpha (r + (q-r)x) - \alpha q - 1.$$ Then,
$$
g'(x) =
-\alpha(s-r)x^{-\alpha(s-r)-1} + (q-r),
$$
so the function $g'$ has exactly one positive root $x_0.$ It follows that $g$ is decreasing in $(0,x_0)$ and increasing in $(x_0,+\infty).$ No matter what the value of $x_0$ is, this means that the maximal value of $g$ on $[t(\alpha, q, s)^{-\frac{1}{\alpha(s-r)}},1]$ (which is the set of feasible values for $x$) is always at either the point $1$ or the point $t(\alpha, q, s)^{-\frac{1}{\alpha(s-r)}}.$ Thus, we simply need to prove that $g(1)\le 0$ and
$g(t(\alpha, q, s)^{-\frac{1}{\alpha(s-r)}})\le 0.$ The first inequality is trivial. The second is equivalent to:
$$
t(\alpha, q, s) +
\alpha r + \alpha(q-r)t(\alpha, q, s)^{-\frac{1}{\alpha(s-r)}}\le \alpha q + 1 \Longleftrightarrow
$$
$$
t(\alpha,q,s)^{-\frac{1}{\alpha(s-r)}}
\le \frac{\alpha (q-r)+1 - t(\alpha,q,s)}{\alpha (q-r)}.
$$
Note that for this inequality to hold, we first need to prove that $\alpha (q-r)+1 - t(\alpha,q,s)\ge 0.$ In fact, we will prove that
$
\alpha (q-s)+1 \ge t(\alpha,q,s).$ Since $\alpha \ge \frac{1}{s},$ we see that $\alpha (q-s)+1 \ge \frac{q}{s}\ge t_0,$ where $t_0$ is the root of the derivative of $f(t) = t + \alpha qt^{-\frac{1}{\alpha s}},$ defined in the beginning of this appendix. Since $f$ is increasing on $[t_0,+\infty),$ all we need to prove is that
$$
f(\alpha (q-s)+1)\ge
f(t({\alpha,q,s})) = 0 \Longleftrightarrow
$$
$$
(1 + \alpha(q-s)) + \alpha q (1+\alpha (q-s))^{-\frac{1}{\alpha s}}\ge \alpha q + 1 \Longleftrightarrow
$$
$$
\alpha q (1+\alpha (q-s))^{-\frac{1}{\alpha s}} \ge \alpha s \Longleftrightarrow
$$
$$
(1+\alpha (q-s))^{-\frac{1}{\alpha s}} \ge \frac{s}{q} \Longleftrightarrow
$$
$$
(1+\alpha (q-s))^{\frac{1}{\alpha s}} \le \frac{q}{s}.
$$
However, as $\frac{1}{\alpha s}\le 1$ by the choice of $\alpha, $ by the famous Bernoulli inequality, we know that
$$
(1+\alpha (q-s))^{\frac{1}{\alpha s}}\le
1 + \frac{\alpha (q-s)}{\alpha s} = \frac{q}{s},
$$
as desired.\\
\noindent
Now that we know $\frac{\alpha (q-r)+1 - t(\alpha,q,s)}{\alpha (q-r)}\ge 0,$ the desired inequality becomes equivalent to proving
$$
t(\alpha,q,s)^{-\frac{1}{\alpha s}}
\le \left(\frac{\alpha (q-r)+1 - t(\alpha,q,s)}{\alpha (q-r)}\right)^\frac{s-r}{s}.
$$
Using that $t(\alpha,q,s)^{-\frac{1}{\alpha s}} = \frac{\alpha q + 1 - t(\alpha,q,s)}{\alpha q},$ the above inequality becomes equivalent to
$$
\frac{\alpha q + 1 - t(\alpha,q,s)}{\alpha q}\le
\left(\frac{\alpha (q-r)+1 - t(\alpha,q,s)}{\alpha (q-r)}\right)^\frac{s-r}{s} \Longleftrightarrow
$$
$$
\left(
\frac{\alpha q + 1 - t(\alpha,q,s)}{\alpha q}
\right)^{\alpha s}\le
\left(\frac{\alpha (q-r)+1 - t(\alpha,q,s)}{\alpha (q-r)}\right)^{\alpha (s-r)}.
$$
To prove this inequality, denote by $b = \alpha(q-s)+1- t(\alpha,q,s)\ge 0, a = t(\alpha,q,s) - 1> 0.$ Then, we want to prove that
$$
\left(
\frac{b + \alpha s}{a + b + \alpha s}
\right)^{\alpha s}\le
\left(
\frac{b + \alpha (s-r)}{a + b + \alpha (s-r)}
\right)^{\alpha (s-r)}.
$$
To prove this, we simply show that the function $$
x\longrightarrow
\left(
\frac{b + x}{a + b + x}
\right)^{x}
$$
is decreasing. Equivalently, we want to show that its logarithm $k(x) = x\ln \left(
\frac{b + x}{a + b + x}
\right)$ is decreasing. This, however, is simple as
$$
k'(x) =
\ln \left(
\frac{b + x}{a + b + x}
\right) +
x \frac{a+b+x}{b+x}\frac{a}{(a+b+x)^2} =
$$
$$
\ln \left(
\frac{b + x}{a + b + x}
\right) +
\frac{x}{b+x}\frac{a}{a+b+x}\le
$$
$$
\ln \left(
\frac{b + x}{a + b + x}
\right) +
\frac{a}{a+b+x} =
\ln \left(
\frac{b + x}{a + b + x}
\right) + 1 -
\left(
\frac{b + x}{a + b + x}
\right)\le 0,
$$
as $\ln (y) + 1 - y\le 0 $ for all $y.$ With this, case 1 is complete.\\
\textbf{Case 2)} $y = 1.$ This is the same case as $1$ except that we replace $r$ with $r-1.$\\
\textbf{Case 3)} $(yx^{s-r-1})^{-\alpha} = t(\alpha, q, s).$ Then, we know that $y = t(\alpha, q, s)^{-\frac{1}{\alpha}}x^{-(s-r-1)}.$ We need to prove that
$$
t(\alpha, q, s) + \alpha r + \alpha (q-r-1)x + \alpha t(\alpha, q, s)^{-\frac{1}{\alpha}}x^{-(s-r-1)}\le
\alpha q + 1.
$$
Consider the function $m(x) = \alpha (q-r-1)x + \alpha t(\alpha, q, s)^{-\frac{1}{\alpha}}x^{-(s-r-1)}.$ Then
$$
m'(x) =
\alpha (q-r-1) -
\alpha t(\alpha, q, s)^{-\frac{1}{\alpha}}(s-r-1)x^{-(s-r)}.
$$
Note that this function has a unique root
$x_0.$ Therefore, $m$ is decreasing on $(0,x_0]$ and increasing on $[x_0, +\infty).$ In other words, the maximal values of $m(x)$ on $[t(\alpha,q,s)^{-\frac{1}{\alpha(s-r-1)}},t(\alpha,q,s)^{-\frac{1}{\alpha(s-r)}}]$ (which is the feasible set for $x$ as $x^{-\alpha(s-r-1)}\le (yx^{s-r-1})^{-\alpha} = t(\alpha,q,s)$ and\linebreak $x^{-\alpha(s-r)}\ge (yx^{s-r-1})^{-\alpha} = t(\alpha,q,s)$)
are at the two points $x = t(\alpha,q,s)^{-\frac{1}{\alpha(s-r-1)}}$ and \linebreak $x = t(\alpha,q,s)^{-\frac{1}{\alpha(s-r)}}.$ However, these cases correspond to $y = 1$ and $y = x,$ which were already analyzed in case 1 and case 2.\\
\noindent
Finally, we want to prove that the choice of $t$ is optimal. This follows simply by taking\linebreak $x_1 = x_2 = \cdots = x_q = t^{-\frac{1}{\alpha s}}.$
\end{proof}
\begin{remark}
\label{rmk:alphalessthansiso}
\normalfont
We end this appendix with the remark that one can also obtain similar inequalities when $\alpha <\frac{1}{s},$ but with a potentially different choice of $t.$ Namely, suppose that $\alpha >0$ and $t^{min}(\alpha ,q,s)$ is the smallest number among $t_0,t_1,\ldots, t_{s-1},$ where $t_r$ is the larger root of\linebreak $t + \alpha (q-r)t^{-\frac{1}{\alpha (s-r)}} = \alpha (q-r)+1.$ From the proof of \cref{lemma:talphaqs}, it follows that
when $\alpha \ge\frac{1}{s},$ it is the case that $t^{min}(\alpha ,q,s) =t_0 = t(\alpha, q, s).$ This, however, might not be the case in general. For example, we can compute that when $q = 5, s = 2, \alpha = \frac{1}{10},$
we have $t_0 \approx 1.41, t_1\approx 1.38,$ so $t(\alpha ,q,s) = t_0\neq \min(t_0,t_1) = t^{min}(\alpha,q,s).$\\
\noindent
With this in mind, the same proofs show that \cref{lemma:talphaqs} holds with the value $t^{min}(\alpha,q,s)$ instead of $t(\alpha,q,s)$ for any $\alpha >0.$ Similarly,
\cref{thm:talagrandgenerals} holds
with the value $t^{min}(\alpha,q,s)$ instead of $t(\alpha,q,s)$ for any $\alpha >0.$ We did not state the result in this more general form earlier as we specifically wanted to derive the inequalities for $\alpha = \frac{1}{s}, t(\alpha,q,s) = \frac{q}{s}.$
\end{remark}
\section{Distance to Subadditive Valuations}
\label{section:closeness}
We will show that subadditive valuations over $[m]$ are $\Omega(\frac{1}{\log q})$-close to $q$-partitioning valuations over $[m]$ and this factor is asymptotically tight. Both directions follow closely the framework for XOS
approximations of subadditive valuations in \cite{BhawalkarR11}. Denote by $\mathcal{H}_a$ the $a$-th harmonic number, i.e. $\displaystyle\mathcal{H}_a = \sum_{i = 1}^a\frac{1}{i} = \ln a + O(1).$
\noindent
\begin{proposition}
\label{lem:qparttosa}
$\mathcal{Q}(2,[m])$ is $\frac{1}{\mathcal{H}_{q-1}}$-close to $\mathcal{Q}(q,[m]).$
\end{proposition}
\begin{proof} Take any subadditive valuation $g$ over $[m].$ Upon taking the dual of the linear program in \cref{def:closeness}, we need to prove the following fact. For any $S\subseteq [m]$ and any partition of $S$ into $q$ parts $S_1, S_2, \ldots, S_q,$ the optimal value of the following linear program
\begin{equation*}
\begin{split}
\max \; &\sum_{j \in [q]} p_j \text{ s.t.,}\\
&\sum_{j \in I}p_j\le g(\bigcup_{j \in I}S_j)\; \forall I\subseteq [q],\\
&p_j\ge 0 \; \forall j \in [q]
\end{split}
\end{equation*}
is at least $g(S)/\mathcal{H}_{q-1}.$ We construct the price vector $(p_1, p_2, \ldots, p_q)$ explicitly with the following algorithm:
\begin{tcolorbox}[colback=black!5!white,colframe=black!75!black, title = {Algorithm for Constructing Price Vectors}]
\textbf{Initialize:} $C = \emptyset.$
\noindent
\textbf{Iteration:} While $C\neq [q]:$
\begin{itemize}
\item Find $\displaystyle A \in \arg \min_{\emptyset \subsetneq A'\subseteq [q]}\frac{g(\bigcup_{i\in A'}S_i)}{|A'\backslash C|}.$
\item Set $p_j = \frac{g(\bigcup_{i\in A}S_i)}{|A\backslash C|\times \mathcal{H}_{q-1}}$ for all $j \in A.$
\item Update $C = C\cup A.$
\end{itemize}
\noindent
\textbf{Return:} Output the price vector.
\end{tcolorbox}
\noindent
We need to show two things. First, for any $I\subsetneq [q],$ it must be the case that $\sum_{j \in I}p_j\le g(\bigcup_{j \in I}S_j).$ Second, the fact that $\sum_{j \in [q]}p_j\ge g(S)/\mathcal{H}_{q-1}.$ Technically, we also need to show that the inequality $\sum_{j \in [q]}p_j\le g(S)$ holds. However, if this condition is violated, we can reduce prices until $\sum_{j \in [q]}p_j\le g(S)$ without violating any other conditions.\\
\noindent
First, take some $I\subsetneq [q].$ Consider the iteration of the algorithm in which the $\ell'$th price indexed by an element in $I$ is determined. Since the algorithm could have chosen $I$ in that iteration, it must be the case that
$$
\frac{g(\bigcup_{i\in A}S_i)}{|A\backslash C|}\le
\frac{g(\bigcup_{i\in I}S_i)}{|I\backslash C|} \le
\frac{g(\bigcup_{i\in I}S_i)}{|I|-\ell+1}.
$$
Therefore,
$$
\sum_{i\in I}p_i \le
\sum_{\ell = 1}^{|I|}
\frac{g(\bigcup_{i\in I}S_i)}{\mathcal{H}_{q-1}(|I|-\ell+1)}\le
\frac{g(\bigcup_{i\in I}S_i)}{\mathcal{H}_{q-1}}\sum_{\ell = 1}^{|I|}\frac{1}{\ell} \le
g(\bigcup_{i\in I}S_i),
$$
as desired (we used the fact that $|I|\le q-1$).
Now, we will analyze $\sum_{i\in [q]}p_i.$ Let the index sets chosen by the algorithm be $A_1, A_2, \ldots, A_t.$ Then,
$$
\sum_{i\in [q]}p_i =
\sum_{j = 1}^t\sum_{i \in A_j}p_j =
\sum_{j = 1}^t |A_j|\times
\frac{g(\bigcup_{i\in A_j}S_i)}{\mathcal{H}_{q-1}\times |A_j|} =
\frac{\sum_{j = 1}^tg(\bigcup_{i\in A_j}S_i)}{\mathcal{H}_{q-1}}\ge
\frac{g(S)}{\mathcal{H}_{q-1}},
$$
where we used subadditivity in the last equality.
\end{proof}
\noindent
A simple modification of \cite[Appendix C]{BhawalkarR11} shows that \cref{lem:qparttosa} is tight up to a constant multiplicative factor.
\begin{proposition}
\label{lem:salogclose}
For any $2< q \le m,$ the class of subadditive valuations over $[m]$ is not $\gamma$-close to $\mathcal{Q}(q,[m])$ for any
$\gamma >\frac{2}{\log_2 \frac{q}{2}}.$
\end{proposition}
\begin{proof}
Let $q'$ be the largest integer such that $q'\le q$ and $q' = 2^a - 1$ for some natural number $a.$ Note that $q' \ge \frac{q}{2}.$ Then, one can construct as in \cite[Appendix C]{BhawalkarR11} a subadditive valuation $g$ over $[q']$ that is not $\gamma$-close to being XOS for any $\gamma >\frac{2}{\log_2 \frac{q}{2}}$ as follows.\\
\noindent
\textit{Construction in \cite[Appendix C]{BhawalkarR11}}:
Identify $[q']$ with the set $\mathcal{V}$ of $2^a-1$ non-zero vectors over the finite vector space $F_2^a.$ For each $v\in \mathcal{V},$ set $S_v= \{u \in \mathcal{V}\; : v\cdot u \equiv 1 \pmod{2}\}.$ Define $g$ as the set-cover function over $\mathcal{V}.$ On the one hand, we can observe that $g(\mathcal{V})\ge a.$ Indeed, for any $r<a$ vectors $v_1, v_2, \ldots, v_r,$ we can find some $u$ such that $v_i \cdot u = 0\pmod{2}$ holds for all $i\in[r]$ simply because the matrix $(v_1\; v_2\;\cdots\; v_r)^T$ is not of full rank. On the other hand, note that for each $v,$ the set $S_v$ contains $2^{a-1}$ elements and each $u\in \mathcal{V}$ belongs to $2^{a-1}$ sets of the form $S_v.$ Since $g(S_v) = 1,$ the fractional cover $\alpha$ assigning weight $\frac{1}{2^{a-1}}$ to each set $S_v$ satisfies
$$
\sum_{I\subseteq \mathcal{V}} \alpha(I)g(I) = \sum_{v\in \mathcal{V}}
\alpha(S_v)g(S_v) =
(2^{a}-1)\times \frac{1}{2^{a-1}}\le 2,
$$
which shows that $g$ is not $\gamma$-close to being XOS for $\gamma
>\frac{2}{a}\ge \frac{2}{\log_2 \frac{q}{2}}.$\\
\noindent
Now, we go back to the problem statement.
Clearly, $q'\le q\le m.$ First, we extend $g$ to a subadditive valuation $g'$ on $[m]$ by setting $g'(S):= g(S\cap [q'])$ for any $S\subseteq [m].$ It trivially follows that $g'$ is not $\gamma$-close to any $q'$-partitioning valuation for any $\gamma >\frac{2}{\log_2 \frac{q}{2}}'.$ As $q'\le q,$ meaning that any $q$-partitioning valuation is also $q'$-partitioning, the result follows.
\end{proof}
\end{document} |
\begin{document}
\title{Richardson elements for classical Lie algebras}
\author{Karin Baur}
\thanks{Supported by Freie Akademische Stiftung and
by a DARPA Grant}
\operatorname{ad}dress{Karin Baur, Department of Mathematics, University of
California, San Diego, USA}
\email{kbaur@math.ucsd.edu}
\date{January 20, 2005}
\begin{abstract}
Parabolic subalgebras of semi-simple Lie algebras
decompose as $\liea{p}=\liea{m}\oplus\liea{n}$
where $\liea{m}$ is a Levi factor and
$\liea{n}$ the corresponding nilradical.
By Richardsons
theorem \cite{ri},
there exists an open orbit under the action of
the adjoint group $P$ on the nilradical.
The elements of this dense orbits are known as
Richardson elements.
In this paper we describe a normal form
for Richardson elements in the classical case.
This
generalizes a construction for
$\liea{gl}_N$ of Br\"ustle, Hille, Ringel and
R\"ohrle \cite{bhrr} to the other classical
Lie algebra and it extends the authors normal forms
of Richardson
elements for nice parabolic subalgebras of simple
Lie algebras to arbitrary parabolic subalgebras
of the classical Lie algebras \cite{b04}.
As applications we obtain a description of
the support of Richardson elements and we recover
the Bala-Carter label of the orbit of Richardson
elements.
\end{abstract}
\maketitle
\section*{Introduction}
The goal of this paper is to describe Richardson
elements for parabolic subalgebras of the
classical Lie algebras.
Let $\liea{p}$ be a parabolic subalgebra of
a semi-simple Lie algebra $\liea{g}$ over ${\mathbb C}$
and
$\liea{p}=\liea{m}\oplus\liea{n}$ a Levi
decomposition. By a fundamental theorem
of Richardson \cite{ri} there always exist
elements $x$ in the nilradical $\liea{n}$
such that $[\liea{p},x]=\liea{n}$. In other
words, if $P$ is the adjoint groups
of $\liea{p}$,
then the orbit $P\cdot x$ is dense in $\liea{n}$.
It is usually called the Richardson orbit.
Richardson orbits have been studied for
a long time and there are many open questions
related to this setting.
Our goal is to give explicit representatives
for Richardson elements. In the case of
$\liea{gl}_n$ there is a beautiful way to
construct Richardson elements that has
been described by Br\"ustle, Hille, Ringel
and R\"ohrle in~\cite{bhrr}. Furthermore,
Richardson elements with support in the
first graded part $\liea{g}_1$ (where the
grading is induced from the parabolic
subalgebra) have been given for all simple
Lie algebras in~\cite{b04}.
However, these constructions do not work in
general for classical Lie algebras.
To fill this gap, we have modified the
existing approaches to obtain Richardson elements
for parabolic subalgebras of the
classical Lie algebras. We do this using
certain simple line diagrams. They correspond
to nilpotent matrices with at most one
non-zero entry in each row and in each
column.
We show that for most parabolic subalgebras, there exists
a simple line diagram that defines
a Richardson element.
But there are cases where this is not possible
as we will see.
We expect that the representatives we describe
will give more insight and hopefully answer
some of the open questions.
One of the interesting questions in the theory of
Richardson elements is the structure of
the support of a Richardson element.
Recall that any parabolic subalgebra $\liea{p}$
induces a ${\mathbb Z}$-grading of $\liea{g}$,
\[
\liea{g}=\oplus_{i\in{\mathbb Z}}\liea{g}_i\quad\text{with}
\quad\liea{p}=\oplus_{i\ge 0}\liea{g}_i=
\liea{g}_0\oplus(\bigoplus_{i>0}\liea{g}_i)
\]
where $\liea{g}_0$ is a Levi factor and
$\liea{n}:=\oplus_{i>0}\liea{g}_i$ the corresponding
nilradical. For details, we refer to our joint
work with Wallach,~\cite{bw}.
The support of a Richardson element
$X=\sum_{\alpha\text{ root of }\liea{n}}k_{\alpha}X_{\alpha}$
are the roots of the nilradical $\liea{n}$
with $k_{\alpha}\neq 0$ (where $X_{\alpha}$ spans the
root subspace $\liea{g}_{\alpha}$).
The support $\operatorname{supp}(X)$ of $X$ lies in the subspace
$\liea{g}_1\oplus\dots\oplus\liea{g}_k$
for some $k\ge 1$.
For the normal form of Richardson elements
we can determine the minimal $k_0$ such
that $\operatorname{supp}(X)\subset$
$\liea{g}_1\oplus\dots\oplus\liea{g}_{k_0}$.
We also recover the Bala-Carter label of the
dense orbit of Richardson elements, also
called the {\itshape type} of the orbit.
The Bala-Carter label is used in the classification
of nilpotent orbits of simple Lie algebras, given
in~\cite{bc}.
For a description of these labels
see chapter 8 of~\cite{cm}.
The type of any nilpotent orbit in a classical
Lie algebra has been described by
Panyushev \cite{pan}
in terms of the partitions of the orbit.
\noindent
Before we describe our results and explain
the structure of this
article, we need to fix some notation.
If $\liea{p}$ is a parabolic subalgebra
of a semi-simple Lie algebra $\liea{g}$
we can assume that $\liea{p}$
contains a fixed Borel subalgebra. In this
case we say that $\liea{p}$ is standard.
If $\liea{m}$ is a Levi factor of $\liea{p}$
we say that $\liea{m}$ is standard if it
contains a fixed Cartan subalgebra $\liea{h}$
that is contained in the fixed Borel
subalgebra.
From now on we will assume that $\liea{g}$ is
a classical Lie algebra, unless stated otherwise.
As usual, the Cartan subalgebra
consists of the diagonal matrices and the
fixed Borel subalgebra is the set of upper
triangular matrices.
Then a standard Levi factor has the shape of
a sequence of square matrices (blocks) on
the diagonal and zeroes outside.
In the case of $\liea{so}_{2n}$, we have
to be careful: we will only consider
parabolic subalgebras where $\alpha_n$ and
$\alpha_{n-1}$ are both roots of the Levi
factor or both roots of the nilradical
or $\alpha_{n-1}$ a root of the Levi
factor and $\alpha_n$ a root of the nilradical.
In other words the case $\alpha_n$ a root of the Levi
factor and $\alpha_{n-1}$ a root of the
nilradical will be identified with this last
case since the two parabolic subalgebras
are isomorphic.
So our standard
$\liea{p}$ or $\liea{m}$ are uniquely
defined by
the sequence $d:=\underline{d}=(d_1,\dots,d_r)$
of the sizes of these blocks (and by specifying
the type of the Lie algebra).
We start by defining line diagrams for
dimension vectors in section~\ref{se:line-diag}.
It will turn out that each horizontal line diagram
corresponds uniquely to
elements of the nilradical of the
parabolic subalgebra of $\liea{sl}_n$ of the given
dimension vector.
In section~\ref{se:rich-theory}
we gather the necessary properties
of Richardson elements.
In section~\ref{se:sl-case} we show that
horizontal line diagrams in fact correspond
to Richardson elements of the given parabolic
subalgebra. The construction of such diagrams
for $\liea{gl}_n$ appears first in~\cite{bhrr}.
We have alreday mentioned that
for the other classical Lie algebras, the
horizontal line diagrams do not give
Richardson elements.
In general, the matrix obtained
is not an element of the Lie
algebra in question. Thus we will introduce
generalized line diagrams in section~\ref{se:BCD-type}
to obtain Richardson elements for parabolic
subalgebras of the symplectic and orthogonal
Lie algebras. As a by-product we obtain
the partition of a Richardson element for the
so-called simple parabolic subalgebras.
The last section discusses the cases
where line diagrams do not produce
Richardson elements. For these we will
allow ``branched'' diagrams.
In the appendix we add examples illustrating
branched diagrams.
\section{Line diagrams}\label{se:line-diag}
Let $d=(d_1,\dots,d_r)$ be a dimension
vector, i.e. a sequence
of positive integers.
Arrange $r$ columns of $d_i$ dots, top-adjusted.
A {\it (filled) line diagram} for $d$,
denoted by $L(d)$,
is a collection of lines joining vertices of
different columns such that each vertex is
connected to at most one vertex of a column left
of it and to at most one vertex of a column right
of it
and such that it cannot be extended by any line.
We say that it is a {\it (filled) horizontal line diagram}
if all edges are horizontal lines. Such a diagram
will be denoted by $L_h(d)$.
We will always assume that the
line diagrams are filled and omit the term `filled'.
Line diagrams are not unique. However, for each dimension
vector there is a unique horizontal line diagram.
\begin{ex}
As an example, consider the dimension vector $(3,1,2,3)$
and
three line diagrams for it, the last one horizontal.
$$
{\small
\xymatrix@-5mm{
\bullet\ar@{-}[rrd] & \bullet &\bullet\ar@{-}[r] & \bullet \\
\bullet\ar@{-}[rru] & & \bullet\ar@{-}[r] & \bullet \\
\bullet\ar@{-}[rrr] & & & \bullet}\quad\quad
\xymatrix@-5mm{
\bullet\ar@{-}[r] & \bullet\ar@{-}[rd] & \bullet\ar@{-}[r]
& \bullet \\
\bullet\ar@{-}[rrrd] & & \bullet & \bullet \\
\bullet\ar@{-}[rrru] & & & \bullet}\quad\quad
\xymatrix@-5mm{
\bullet\ar@{-}[r] & \bullet\ar@{-}[r] &\bullet\ar@{-}[r]
& \bullet \\
\bullet\ar@{-}[rr] & & \bullet\ar@{-}[r] & \bullet \\
\bullet\ar@{-}[rrr] & & & \bullet}
}
$$
\end{ex}
\section{Richardson elements}\label{se:rich-theory}
In this section we describe a method to
check whether a given nilpotent element of
the nilradical of a classical Lie algebra
is a Richardson element. The first statement is
given in~\cite{bw}. Since we will use
this result constantly, we repeat its proof.
\begin{thm}\label{thm:dim-cent-Rich}
Let $\liea{p}\subset\liea{g}$ be a parabolic subalgebra
of a semi-simple Lie algebra $\liea{g}$, let
$\liea{p}=\liea{m}\oplus\liea{n}$ where $\liea{m}$
is a Levi factor and $\liea{n}$ the corresponding
nilradical.
Then $x\in\liea{n}$ is a Richardson element
for $\liea{p}$ if and only if
$\dim\liea{g}^x=\dim\liea{m}$.
\end{thm}
\begin{proof}
Denote the nilradical of the opposite parabolic by
$\overline{\liea{n}}$ (the opposite parabolic is
defined as the parabolic subalgebra whose intersection
with $\liea{p}$ is equal to $\liea{m}$).
If $x\in\liea{n}$ then
$\operatorname{ad}(x)\liea{g}=\operatorname{ad}(x)\overline{\liea{n}}+\operatorname{ad}(x)\liea{p}$. Now
$\operatorname{ad}(x)\liea{p}\subset\liea{n}$ and
$\dim\operatorname{ad}(x)\overline{\liea{n}}\le\dim\overline{\liea{n}}$. Thus
\[
\dim\operatorname{ad}(x)\liea{g}\le\,2\dim\liea{n}.
\]
This implies for $x\in\liea{n}$ that
$\dim\liea{m}\le \dim\liea{g}^x$ and equality implies that
$\dim\operatorname{ad}(x)\liea{p}=\dim\liea{n}$. Thus equality implies
that $x$ is a Richardson element.
For the other direction, let $x$ be a Richardson
element for $\liea{p}$. We show that the map
$\operatorname{ad}(x)$ is injective on $\overline{\liea{n}}$:
Let $y\in\overline{\liea{n}}$ with $\operatorname{ad}(x)y=0$.
Then
\[
0=B(\operatorname{ad}(x)y,\liea{p})=B(y,\operatorname{ad}(x)\liea{p})
=B(y,\liea{n}).
\]
In particular, $y=0$.
So $\operatorname{ad}(x)$ is injective on $\overline{\liea{n}}$,
giving $\dim\operatorname{ad}(x)\overline{\liea{n}}=\dim\liea{n}$.
Thus
\begin{eqnarray*}
\dim\overbrace{\operatorname{ad}(x)\liea{p}}^\liea{n}
+
\dim\overbrace{\operatorname{ad}(x)\overline{\liea{n}}}^{\overline{\liea{n}}}
& = & 2\dim\liea{n} \\
& = & \dim\operatorname{ad}(x)\liea{g} \\
& = & \dim\liea{g}-\dim\liea{g}^x
\end{eqnarray*}
So
$\dim\liea{g}^x+\dim\liea{n}=\dim\liea{g}-\dim\liea{n}$
$=\dim\liea{p}=\dim\liea{m}+\dim\liea{n}$,
i.e. $\dim\liea{m}=\dim\liea{g}^x$.
\end{proof}
\begin{cor}
Let $\liea{p}=\liea{m}\oplus\liea{n}$ be a parabolic
subalgebra of a semi-simple Lie algebra.
Let $X\in\liea{n}$ be a Richardson element.
Then $\dim\liea{g}^X\le\dim\liea{g}^Y$
for any $Y\in\liea{n}$.
\end{cor}
Theorem~\ref{thm:dim-cent-Rich} gives us a tool to
decide whether an element of the nilradical of a
parabolic subalgebra is a Richardson element.
Namely, we have to calculate its centralizer.
Centralizers of nilpotent elements of the
classical Lie algebras can be computed using their
Jordan canonical form. This
well-known result is due to Kraft and Procesi,
cf.~\cite{kp}.
\begin{thm}\label{thm:dim-cent-Jordan}
Let $(n_1,\dots,n_r)$ be the partition of
the Jordan canonical form of a nilpotent
matrix $x$ in the Lie algebra $\liea{g}$, let
$(m_1,\dots, m_s)$ be the dual partition.
Then the dimension of the centralizer of $x$ in $\liea{g}$
is
\[
\begin{array}{ll}
\sum\limits_i m_i^2 & \mbox{if $\liea{g}=\liea{gl}_n$}\\
\sum\limits_i \frac{m_i^2}{2}+\frac{1}{2}|\{i\mid n_i\ odd\}|
& \mbox{if }\liea{g}=\liea{sp}_{2n} \\
\sum\limits_i \frac{m_i^2}{2}-\frac{1}{2}|\{i\mid n_i\ odd \}|
& \mbox{if }\liea{g}=\liea{so}_N
\end{array}
\]
\end{thm}
So it remains to determine the Jordan canonical form
of a given nilpotent element $x$. It is
given by the dimensions of the kernels
of the maps $x^j$, $j\ge 1$:
\begin{lm}\label{lm:Jordan-form}
Let $x$ be a nilpotent $n\times n$ matrix with $x^{m-1}\neq 0$
and $x^m=0$, set $b_j:=\dim\ker x^j$ ($j=1,\dots,m$).
Define
\[
a_j:=\left\{ \begin{array}{ll}2b_1-b_2 & j=1 \\
2b_j-b_{j-1}-b_{j+1}& j=2,\dots,m-1\\
b_m-b_{m-1} & j=m
\end{array}\right.
\]
Then the Jordan canonical form of $x$ has $a_s$ blocks
of size $s$ for $s=1,\dots,m$.
\end{lm}
\begin{cor}\label{cor:part}
With the notation of Lemma~\ref{lm:Jordan-form} above,
the Jordan canonical form of $x$ is given by the partition
\[
(1^{a_1},2^{a_2},\dots,(m-1)^{a_{m-1}},m^{a_m}).
\]
\end{cor}
\section{The special linear Lie algebra}\label{se:sl-case}
We now describe how to obtain a Richardson
element from a (horizontal) line diagram.
Recall that a standard parabolic subalgebra of
$\liea{sl}_n$ is uniquely described by the
sequence of lengths of the blocks in $\liea{m}$
(the standard Levi factor).
Let $d=(d_1,\dots,d_r)$ be the dimension
vector of these block lengths.
We form the horizontal line diagram $L_h(d)$
and label its vertices column wise by the numbers
$1,2,\dots,n$, starting with
column $1$, labeling top-down.
This labeled diagram defines a nilpotent element
as the sum of all elementary matrices $E_{ij}$
such that there is a line from $i$ to $j$,
where $i<j$:
\[
X(d)=X(L_h(d))
=\sum_{i\mbox{---}j}E_{ij}
\]
\begin{ex}\label{ex:constr}
Let $\liea{p}\subset\liea{sl}_9$ be given by
the dimension vector $(3,1,2,3)$. We label its
horizontal line diagram,
$$
{\small
\xymatrix@-6mm{
1\ar@{-}[r] & 4\ar@{-}[r] & 5\ar@{-}[r] & 7 \\
2\ar@{-}[rr] & & 6\ar@{-}[r] & 8 \\
3\ar@{-}[rrr] & & & 9
}},
$$
and obtain $X(d)=$
$E_{1,4}+E_{4,5}+E_{5,7}+E_{2,6}+E_{6,8}+E_{3,9}$,
an element of the nilradical $\liea{n}$
of $\liea{p}$. Using Lemma~\ref{lm:Jordan-form} and
Corollary~\ref{cor:part} one checks that
the dimension of the centralizer of $X(d)$
is equal to the dimension of the Levi factor.
Thus $X(d)$ is a Richardson element
for $\liea{p}$ (by Theorem~\ref{thm:dim-cent-Jordan}).
\end{ex}
By construction, the matrix $X(d)$
is nilpotent for any dimension vector $d$.
It is in fact an element
of the nilradical $\liea{n}$ of the parabolic
subalgebra $\liea{p}=\liea{p}(d)$:
If $d=(n)$, this is obvious, the
constructed nilpotent element is the zero matrix.
If $d=(d_1,d_2)$ then the nonzero
coefficients of the matrix of $X(d)$
are in the rows $1,\dots,d_1$ and columns
$d_1+1,\dots,d_2$. In other words, they lie in
the $d_1\times d_2$-block in the upper right
corner.
The standard Levi factor consists
of the blocks $d_1\times d_1$, $d_2\times d_2$
on the diagonal. In particular, $X(d_1,d_2)$
is a matrix that lies above the Levi factor. This
generalizes to dimension vectors with more entries.
So we get part (1) of the following Lemma.
For part (2) we introduce a new notion.
\begin{defn}
If there exists a sequence of $k$ connected
lines in a line diagram $L(d)$ that is not
contained in
a longer sequence we say that $L(d)$ has
a {\itshape $k$-chain} or a
{\itshape chain of length $k$}. A
{\itshape subchain of length $k$} (or $k$-subchain)
is a sequence of $k$ connected
lines in $L(d)$ that maybe contained in a
longer chain.
A (sub)chain of length $0$ is a single
vertex that is not connected to any other vertex.
\end{defn}
\begin{lm}\label{lm:X-nilrad}
(1)
The element $X(d)$ is an element
of the nilradical of $\liea{p}(d)$.
(2) For $k\ge 1$, the rank of $X(d)^k$
is equal to the number of $k$-subchains of lines in
$L_h(d)$.
\end{lm}
\begin{proof}[Proof of (2)]
It is clear that the rank of $X=X(d)$
is the number of
lines in the diagram: to construct $X$, we
sum over all lines of the diagram. Since these
lines are disjoint (each vertex $i$ is joint to
at most one neighbour $j$ with $i<j$) the rows
and columns of $X$ are linearly independent.
Therefore the rank of $X$ is equal to the number of
vertices $i$ such that there is a line from $i$
to some $j$ with $i<j$.
For any $k>0$, the matrix $X^k$ consists of
linearly independent rows and columns.
It is clear that an
entry $(ij)$ of $X\cdot X$ is
non-zero if and only if there is a line
$i$---$k$---$j$ in $L_h(d)$:
$X\cdot X=\sum_{i-k}E_{ik}\sum_{l-j}E_{lj}$
where $E_{ik}E_{lj}=\delta_{kl}E_{ij}$.
Similarly, the rank of $X^k$ is the number
of vertices
$i$ such that there exist vertices
$j_1<j_2<\dots<j_k$ and lines
$i$---$j_1$---$\,\cdots$---$j_k$ joining
them,
i.e. the number of $k$-subchain.
\end{proof}
It turns out that $X(d)$ is a
Richardson element for $\liea{p}(d)$,
as we will show below. This fact follows also
from the description of Br\"ustle et al.
in~\cite{bhrr} of $\Delta$-filtered modules
without self-extension of the Auslander-Reiten
quiver of type $\lieg{A}_r$ (the number $r$ is
the number
of blocks in the standard Levi factor of
the parabolic subalgebra).
\begin{thm}\label{thm:lines-rich}
The mapping $d\mapsto X(d)$
associates to each dimension vector with $\sum d_i=n$
a Richardson
element for the corresponding parabolic subalgebra
$\liea{p}=\liea{p}(d)$ of $\liea{sl}_n$.
\end{thm}
We give here an elementary proof of
Theorem~\ref{thm:lines-rich} above.
We will use the ideas of this proof to deal
with the other classical groups (where we will have
to use line diagrams that are not horizontal in general).
The main idea is to use the dimension
of the centralizer of a Richardson element and the
partition of the Jordan canonical form of a nilpotent
element.
\begin{proof}
Let $d$ be the dimension vector corresponding
to the parabolic subalgebra $\liea{p}=\liea{p}(d)$.
Let $X=X(d)$ be the nilpotent element associated
it (through the horizontal line diagram).
By Theorem~\ref{thm:dim-cent-Rich} we have to calculate the
dimension of the centralizer of $X$ and of the Levi
factor $\liea{m}$ of $\liea{p}$.
By Theorem~\ref{thm:dim-cent-Jordan}, $\dim\liea{g}^X$
is equal to $\sum_i m_i^2-1$ where $(m_1,\dots,m_s)$
is the dual partition to the partition of $X$.
The parts of the dual partition are the entries of
$d_i$ the dimension vector as is shown in
Lemma~\ref{lm:diagr-Jordan} below.
In particular,
$\dim\liea{l}=\sum_i d_i^2-1=\dim\liea{g}^X$.
\end{proof}
The following result shows how to obtain
the partition and the dual partition of the Jordan
canonical form of the nilpotent element
associated to the dimension vector $d$.
\begin{lm}\label{lm:diagr-Jordan}
Let $d$ be the dimension vector
for $\liea{p}\subset\liea{sl}_n$, $X=X(d)$
the associated nilpotent element of $\liea{sl}_n$.
Order the entries $d_1,\dots,d_r$ of the dimension vector
in decreasing order as
$D_1,D_2,\dots,D_r$
(i.e. such that $D_i\ge D_{i+1}$ for
all $i$).
Then the
Jordan canonical form of $X$ is
\[
1^{D_1-D_2},2^{D_2-D_3},\dots,(r-1)^{D_{r-1}-D_r},r^{D_r}
\]
and the dual partition is
\[
D_r,D_{r-1},\dots, D_1.
\]
\end{lm}
In other words, the dual partition for $X(d)$
is given by the entries of the dimension vector.
Furthermore, for every
$i$-chain in $L_h(d)$ (i.e. for every sequences
of length $i$, $i\ge 0$,
that is not contained in a longer sequence)
the partition has an entry $i$.
\begin{proof}
Let $d=(d_1,\dots,d_r)$ be the dimension
vector of $\liea{p}$ and $D_1,\dots,D_r$ its permutation
in decreasing order, $D_i\ge D_{i+1}$.
To determine the Jordan canonical form of $X=X(d)$
we have to compute the rank of the powers $X^s$, $s\ge 1$,
cf. Lemma~\ref{lm:Jordan-form}.
Since the nilpotent matrix $X$ is given by the
horizontal line diagram
$L_h(d)$, the rank of $X^s$ is
easy to compute: by Lemma~\ref{lm:X-nilrad} (2),
the rank of $X^s$ is the number of $s$-subchains.
In particular,
$\operatorname{rk} X=n-D_1$ and $\operatorname{rk} X^2=n-D_1-D_2$,
$\operatorname{rk} X^3=n-D_1-D_2-D_3$, etc.
This gives
\[
b_s:=\dim\ker X^s=D_1+\dots+D_s \ \mbox{for} \ s=1,\dots,r.
\]
And so, by Lemma~\ref{lm:Jordan-form}, we obtain
$a_1=D_1-D_2$, $a_2=D_2-D_3$, $\dots,a_r=D_r$
proving the first statement.
The statement about the dual partition (i.e.
the partition given by the lengths of the columns
of the partition) follows then immediately.
\end{proof}
\section{Richardson elements for the other
classical Lie algebras}\label{se:BCD-type}
In this section we will introduce generalized
line diagrams to deal with the symplectic and
orthogonal Lie algebras. Having introduced
them, we show that they correspond to
Richardson elements for the parabolic subalgebra
in question. Then we discuss some properties
and describe the dual of the partition of
a nilpotent element given by such a generalized
line diagram.
Furthermore, we describe the support of the
constructed $X(d)$ and relate it to the
Bala-Carter label of the $G$-orbit through
$X(d)$ where $G$ is the adjoint group of $\liea{g}$.
To define the orthogonal Lie
algebras, we use the skew diagonal matrix $J_n$
with ones on the skew diagonal and zeroes
else.
The symplectic Lie algebras $\liea{sp}_{2n}$
are defined using
${\small\begin{bmatrix} 0 & J_n \\ -J_n & 0\end{bmatrix}}$.
(For details we refer the reader to~\cite{gw}.)
So
$\liea{so}_n$ consists of the $n\times n$-matrices
that are skew-symmetric around the skew-diagonal
and $\liea{sp}_{2n}$ is the set of $2n\times 2n$-matrices
of the form
\[\begin{bmatrix}A & B\\ C&A^*\end{bmatrix}\]
where $A^*$ is the the negative of the skew transpose
of $A$.
Thus in the case of the symplectic and orthogonal
Lie algebras, the block sizes of the standard Levi
factor form a palindromic sequence.
If there is an even number of blocks in the
Levi factor, the dimension vector is of the form
$(d_1,\dots,d_r,d_r,\dots,d_1)$.
We will refer to this situation as type~(a).
If there is an odd number of blocks in the
Levi factor, type (b), the dimension vector
is $(d_1,\dots,d_r,d_{r+1},d_r,\dots,d_1)$.
By the (skew) symmetry around the skew diagonal,
the entries below the skew diagonal
of the matrices $X(d)$ are
determined by the entries above the skew
diagonal. In terms of line diagrams:
For $\liea{sp}_N$ and $\liea{so}_N$ there is
a line $(N-j+1)$---$(N-i+1)$ whenever
there is a line $i$---$j$.
We will call the line $(N-j+1)$---$(N-i+1)$
the {\itshape counterpart} of $i$---$j$ and
will sometimes denote counterparts by dotted lines.
In particular, it suffices to describe
the lines attached to the left to vertices of the
first $r$
columns for both types (a) and (b).
The (skew)-symmetry will give constraints on the
diagram - there will also appear negative
entries. For the moment, let us assume
that $L(d)$ is a diagram defining an element
of the nilradical of the parabolic subalgebra
in question. Then part (2) of
Lemma~\ref{lm:X-nilrad} still holds.
\begin{lm}\label{lm:chains-rank}
If $X(d)$ is defined by $L(d)$
then the rank of the
map $X(d)^k$ is the number of $k$-subchains
of lines in the diagram.
\end{lm}
This uses the same argument as
Lemma~\ref{lm:X-nilrad} since by construction,
$X(d)$ only has linearly independent
rows and columns and the product
$X(d)^2$ only has nonzero entries $E_{il}$
if $X(d)$ has an entry $E_{ij}$ and an
entry $E_{jl}$ for some $j$.
The following remark allows us to simplify the
shapes of the diagrams we are considering.
If $d=(d_1,\dots,d_r)$ is an
$r$-tuple in ${\mathbb N}^r$, and $\sigma\in S_r$ (where
$S_r$ is the permutation group on $r$ letters)
we define $d_{\sigma}$
as $(d_{\sigma 1},d_{\sigma 2},\dots,d_{\sigma r})$.
By abuse of notation, for
$d=(d_1,\dots,d_r,d_r,\dots,d_1)$ in ${\mathbb N}^{2r}$,
we write
$d_{\sigma}=(d_{\sigma 1},\dots,d_{\sigma r},
d_{\sigma r},\dots,d_{\sigma 1})$
and for $d=(d_1,\dots,d_r,d_{r+1},d_r,\dots,d_1)$
in ${\mathbb N}^{2r+1}$, we define
$d_{\sigma}$ to be the $2r+1$-tuple
$(d_{\sigma 1},\dots,d_{\sigma r},
d_{r+1},d_{\sigma r},\dots,d_{\sigma 1})$.
It will be clear from the context which tuple
we are referring to.
\begin{re}\label{re:permutations}
For $d=(d_1,\dots,d_r)$ the diagrams
$L_h(d)$ and $L_h(d_{\sigma})$
have the same chains of lines for any $\sigma\in S_r$.
In other words: for any $k\ge 1$, the number of
chains of lines of length $k$ in $L_h(d)$ is the
same as the number of lines of length $k$
in $L_h(d_{\sigma})$.
As an illustration, consider the permutation $1243$ of
$d=(3,1,2,3)$:
$$
{\small
\xymatrix@-5mm{
\bullet\ar@{-}[r] & \bullet\ar@{-}[r] &\bullet\ar@{-}[r]
& \bullet \\
\bullet\ar@{-}[rr] & & \bullet\ar@{-}[r] & \bullet \\
\bullet\ar@{-}[rrr] & & & \bullet}\quad\quad
\xymatrix@-5mm{
\bullet\ar@{-}[r] & \bullet\ar@{-}[r] &\bullet\ar@{-}[r]
& \bullet \\
\bullet\ar@{-}[r] & \bullet\ar@{-}[r] &\bullet & \\
& \bullet\ar@{-}[r] & \bullet &}
}
$$
Similarly, for $f=(f_1,\dots,f_r,f_r,\dots,f_1)$
resp. for $g=(g_1,\dots,g_r,g_{r+1},g_r,\dots,g_1)$,
if $L(f)$ and $L(g)$ are line diagrams for $\liea{sp}_{2n}$
or $\liea{so}_N$ then for any
$\sigma\in S_r$, the diagrams
$L(f_{\sigma})$ resp. $L(g_{\sigma})$ are
also diagrams for the corresponding Lie algebras
and have the same exactly the same chains as
$L(f)$ resp. as $L(g)$.
\end{re}
We have an immediate consequence of
Remark~\ref{re:permutations} and of
Lemma~\ref{lm:chains-rank}:
\begin{cor}\label{cor:reordering}
Let $d=(d_1,\dots,d_r,d_r,\dots,d_1)$ or
$d=(d_1,\dots,d_r,d_{r+1},d_r,\dots,d_1)$
be the dimension vector of a parabolic
subalgebra of a symplectic or orthogonal
Lie algebra and $X(d)$ be given by the
appropriate line diagram.
In calculating the rank of $X(d)^k$ we
can assume that $d_1\le\dots\le d_r$.
\end{cor}
We will make frequent use of this
property.
Now we will finally be able to construct
diagrams for the other classical cases.
We have already mentioned that the horizontal
line diagrams do not produce Richardson
elements.
One reason is that the counterpart of
a line $i$---$j$ is not always horizontal.
The other reason is that we have to introduce
negative signs for the symplectic and
orthogonal cases when we associate a
nilpotent matrix to a diagram:
If $\liea{g}=\liea{sp}_{2n}$, in the
definition of $X(d)$
we subtract $E_{ij}$ whenever there is a
line $i$---$j$ with $n<i<j$.
If $\liea{g}=\liea{so}_N$ we subtract
$E_{ij}$ whenever there is a line $i$---$j$
with $i+j>N$.
\begin{ex}\label{ex:sp-non-horizontal}
Let $(1,2,2,1)$ be the dimension vector
of a parabolic subalgebra of $\liea{sp}_6$.
Then the following three line diagrams
determine elements of the nilradical
of $\liea{p}$:
$${\small
\xymatrix@-6mm{
1\ar@{-}[r] & 2\ar@{-}[r] & 4 & 6\\
& 3\ar@{.}[r] & 5\ar@{.}[ur] \\
}\quad\quad
\xymatrix@-6mm{
1\ar@{-}[r] & 2\ar@{-}[rd] & 4 & 6\\
& 3\ar@{-}[ru] & 5\ar@{.}[ur] \\
}\quad\quad
\xymatrix@-6mm{
1\ar@{-}[r] & 2\ar@{-}[r] & 5\ar@{.}[r] & 6\\
& 3\ar@{-}[r] & 4 \\
}}
$$
The last diagram is just a reordering
of the second.
The nilpotent elements are
$X_1=E_{12}+E_{24}+E_{35}-E_{56}$
resp. $X_2=E_{12}+E_{25}+E_{34}-E_{56}$.
By calculating the Jordan canonical
forms for these elements one
checks that only the nilpotent element
$X_2$ is a
Richardson element.
\end{ex}
This example and the discussion above illustrate
that for the symplectic and orthogonal Lie
algebras, we will use:
(i) non-horizontal lines,
(ii) labeling top-bottom {\bf and} bottom-top,
(iii) negative signs, too.
\noindent
Before we start defining these line diagrams
we introduce a new notion.
\begin{defn}
Let $\liea{p}$ be the standard parabolic
subalgebra of a symplectic or orthogonal
Lie algebra $\liea{g}$.
We say that $\liea{p}$ is {\itshape simple}
if $\liea{p}\subset\liea{g}$
is of one of the following forms:
\begin{enumerate}
\item A parabolic subalgebra of $\liea{sp}_{2n}$
with an even number of
blocks in the standard Levi factor.
\item A parabolic subalgebra of $\liea{so}_{2n}$
with an even number of
blocks in the standard Levi factor such that
odd block lengths appear exactly twice.
\item
A parabolic subalgebra of $\liea{sp}_{2n}$
with an odd number of
blocks in the Levi factor and such that each
odd $d_i$ that is smaller than $d_{r+1}$
appears exactly twice.
\item A parabolic subalgebra of $\liea{so}_N$
with an odd number of
blocks in the Levi factor such that
either all $d_i$ are odd
or there is an index $k\le r$ such
that all $d_i$ with $i\le k$ are even,
$d_j$ odd for $j>k$ and the even
$d_i$ are smaller than $d_{k+1},\dots,d_r$.
Furthermore, the even block lengths that
are larger than $d_{r+1}$ appear only
once among $d_1,\dots,d_k$.
\end{enumerate}
\end{defn}
\begin{defn}[Type (a)]
Let $\liea{p}$ be a simple parabolic subalgebra
of $\liea{sp}_{2n}$ or $\liea{so}_{2n}$,
given by the dimension vector
$d=(d_1,\dots,d_r,d_r,\dots,d_1)$.
Then we define the
{\itshape line diagram} $L_{even}(d)$
{\itshape associated to} $d$
(and $\liea{g}$)
as follows.
\begin{enumerate}
\item
Draw $2n$ vertices in $2r$ columns of length
$d_1,\dots$, top-adjusted.
Label the first $r$ columns with
the numbers $1,\dots, n$, top--bottom.
Label the second $r$ columns with the numbers
$n+1,\dots, 2n$, bottom--top.
\item
Join the first $r$ columns with horizontal lines
as for $\liea{sl}_n$. Draw the counterparts of
these lines in the second $r$ columns.
\item[(3) (i)]
If $\liea{g}=\liea{sp}_{2n}$, add
the lines $k$---$(2n-k+1)$.
\item[(3) (ii)]
If $\liea{g}=\liea{so}_{2n}$, one adds
the lines $(2l-1)$---$(2n-2l+1)$ and their
counterparts $2l$---$(2n-2l+2)$ if $n$
is even. If $n$ is odd, the lines
$2l$---$(2n-2l)$ and their counterparts
$(2l+1)$---$(2n-2l+1)$.
\end{enumerate}
\end{defn}
\begin{defn}[Type (b)]
Let $\liea{p}$ be a simple parabolic subalgebra
of $\liea{sp}_{2n}$ or of $\liea{so}_N$,
given by the dimension vector
$d$
$=(d_1,\dots,d_r,d_{r+1},d_r,\dots,d_1)$.
Then we define the
{\itshape line diagram} $L_{odd}(d)$
{\itshape associated to}
$d$ (and $\liea{g}$)
as follows.
\begin{enumerate}
\item
Draw $2r+1$ columns of length $d_1,\dots$,
top-adjusted. Label them with the numbers
$1,\dots$ in
increasing order, top--bottom in each column.
\item[(2) (i)]
For $\liea{sp}_{2n}$: \\
If $\min_i\{d_i\}\ge 2$, draw a horizontal
of lines in the first row and all their
counterparts, forming a sequence joining the lowest
vertices of each column. Repeat this procedure
as long as the columns of the remaining
vertices are all at least of length two.
\item[(2) (ii)]
For $\liea{so}_N$: \\
If $d_1$ is odd, go to step (3) (ii).
If $d_1$ is even, do as in (2) (i),
drawing lines in the first row and their
counterparts joining the lowest vertices.
Repeat until either the first of the remaining
columns has odd length or there are no
vertices left to be joined.
Continue as in (3) (ii).
\item[(3) (i)]
For $\liea{sp}_{2n}$: \\
For the remaining vertices: draw horizontal
lines following the top-most remaining
vertices and simultaneously their
counterparts (the lowest remaining
vertices).
\item[(3) (ii)]
For $\liea{so}_N$: \\
All columns
have odd length. Connect the central
entries of each column. The remaining
column lengths are all even, the are
joined as in (2) (ii).
\end{enumerate}
\end{defn}
\begin{thm}\label{thm:line-richardson}
Let $d$ be the dimension vector
for a simple parabolic subalgebra of $\liea{sp}_{2n}$
or $\liea{so}_N$.
Then the associated diagram $L_{even}(d)$
resp.
$L_{odd}(d)$ determines a Richardson
element for $\liea{p}(d)$ by setting
\[
\begin{array}{ccll}
X(d) & = & \sum_{i\mbox{---}j,\ i\le n}E_{ij}
- \sum_{i\mbox{---}j,\ i>n}E_{ij}
& \mbox{for}\ \liea{sp}_{2n}\\
X(d) & = & \sum_{i\mbox{---}j,\ i+j<N}E_{ij}
- \sum_{i\mbox{---}j,\ i+j>N}E_{ij}
& \mbox{for} \ \liea{so}_N
\end{array}
\]
where the sums are over all lines in the diagram.
\end{thm}
We first include some
immediate consequences of this result.
After that we add an observation about
the (dual of the) partition corresponding to $X(d)$
and then we are ready to prove
Theorem~\ref{thm:line-richardson}.
Theorem~\ref{thm:line-richardson}
enables us to
determine the minimal
$k$ such that the Richardson element $X(d)$
lies in the graded parts
$\liea{g}_1\oplus\dots\oplus\liea{g}_k$.
To do so we introduce $s(d)$
as the maximal number of
entries $d_i,\dots,d_{i+s}$
of $d$ that are surrounded by larger
entries $d_{i-1}$ and $d_{i+s+1}$.
More precisely, if $d=(d_1,\dots,d_r,d_r,\dots,d_1)$
or $d=(d_1,\dots,d_r,d_{r+1},\dots,d_1)$
is the dimension vector, we rewrite $d$
as a vector with increasing indices,
$(c_1\dots,c_r,c_{r+1},c_{r+2},\dots,c_{2r})$
resp.
$(c_1\dots,c_r,c_{r+1},c_{r+2},\dots,c_{2r+1})$
and define
$s(d)
:=1+\max_i\{\text{there are}\ c_{j+1},\dots, c_{j+i}\mid
c_j>c_{j+l}<c_{j+i+1}\text{ for all }0\le l\le i\}$.
\begin{cor}\label{cor:bound-grade}
Let $\liea{p}(d)$ be a simple parabolic
subalgebra of the orthogonal or symplectic
Lie algebras.
Then the element $X(d)$ belongs to
$\liea{g}_1\oplus\dots\oplus\liea{g}_{s(d)}$.
The same holds for parabolic subalgebras
of $\liea{sl}_n$.
\end{cor}
This follows from the fact that $E_{ij}$
with $i$ from column $k$ of the line diagram
and $j$ from column $k+s$
is an entry of the graded part $\liea{g}_s$.
If, e.g., we have $c_1>c_j<c_{s+1}$ for $j=2,\dots,s$
then there is a line joining
columns one and $s+1$. So $X(d)$
has an entry in $\liea{g}_{s}$.
\begin{cor}
For $\liea{sl}_n$, $s(d)$ is equal to one
if and only if the dimension vector satisfies
$d_1\le\dots\le d_t\ge\dots\ge d_r$ for some
$1\le t\le r$.
\end{cor}
This well-known result has been observed by
Lynch~\cite{l}, Elashvili and Kac~\cite{ek},
Goodwin and R\"ohrle~\cite{gr},
and in our joint work with Wallach~\cite{bw}.
The next lemma shows how to obtain the dual of
the partition of $X(d)$ if $X(d)$ is given by
the appropriate line diagram for $d$.
\begin{lm}\label{lm:dual-part}
If $\liea{p}(d)$ is a simple parabolic subalgebra of a
symplectic or orthogonal Lie algebra let
$X=X(d)$ be given by the appropriate line diagram
$L_{even}(d)$ or $L_{odd}(d)$. The
dual of the partition of $X$ has the form
\[
\begin{array}{llll}
& \text{Dual of the partition of $X$} & \liea{g}
& \text{Type of $\liea{p}$} \\
& \\
(i) & d_1,d_1,\dots,d_r,d_r & \liea{sp}_{2n} & (a) \\
& & \\
(ii) & d_{r+1}\cup
\left(\bigcup_{d_i\notin D_o}d_i,d_i\right)
\cup\left(\bigcup_{d_i\in D_o} d_i-1,d_i+1\right)
& \liea{sp}_{2n} & (b) \\
& & \\
(iii) & \left(\bigcup_{d_i\text{even}} d_i,d_i\right)
\cup\left(\bigcup_{d_i\text{odd}}d_i-1,d_i+1\right)
& \liea{so}_{2n} & (a) \\
& & \\
(iv) & d_{r+1}\cup\left(
\bigcup_{d_i\notin D^e}
d_i,d_i\right)
\cup\left(\bigcup_{d_i\in D^e}d_i-1,d_i+1\right)
& \liea{so}_{2n+1} & (b) \\
& \\
(v) & d_{r+1}\cup\left(
\bigcup_{d_i\notin D^o}
d_i,d_i\right)
\cup\left(\bigcup_{d_i\in D^o}d_i-1,d_i+1\right)
& \liea{so}_{2n} & (b)
\end{array}
\]
where
$D_o:=\{d_i\text{ odd}\mid d_i<d_{r+1}\}$,
$D^o:=\{d_i\text{ odd}\mid d_i>d_{r+1}\}$
and
$D^e:=\{d_i\text{ even}\mid d_i>d_{r+1}\}$
are subsets of $\{d_1,\dots,d_r\}$.
In particular, if $D_o$, $D^e$ or $D^o$ are
empty, the partition in the corresponding
case (ii), (iv) or (v) has the same parts
as the dimension vector. The same is true
for (iii), if there are no odd $d_i$.
\end{lm}
The proof consists mainly in
counting lines and (sub)chains of lines of
the corresponding diagrams. Therefore we
postpone it and include it in the appendix.
We are now ready to prove Theorem~\ref{thm:line-richardson}
with the use of Theorem~\ref{thm:dim-cent-Jordan}
and of Lemma~\ref{lm:dual-part}.
\begin{proof}[Proof of Theorem~\ref{thm:line-richardson}]
We consider the case $\liea{g}=\liea{sp}_{2n}$.
For the parabolic subalgebras of an orthogonal Lie algebra,
the claim follows using the same methods.
The idea is to use the
dimension of the centralizer
of $X(d)$ and compare it to the dimension of
the Levi factor. To calculate the dimension of
the centralizer, we use the formulae of
Theorem~\ref{thm:dim-cent-Jordan}, i.e. we use the
dual of the partition of $X=X(d)$ as described
in Lemma~\ref{lm:dual-part} and the number of
odd parts in the partition of $X$.
\noindent
$\liea{sp}_{2n}$, type (a): \\
By Lemma~\ref{lm:dual-part}
the dual partition of the nilpotent
element $X=X(d)$ has as parts the entries of $d$.
Since they all appear in pairs, the partition
of the orbit has no odd entries. So
by the formula of
Theorem~\ref{thm:dim-cent-Jordan} we
obtain
$\dim\liea{g}^X=\frac{1}{2}(2d_1^2+\dots+2d_r^2)$,
the same as the dimension of the Levi
factor. In particular, $X$ is a Richardson element
for the parabolic subalgebra $\liea{p}(d)$ of $\liea{sp}_{2n}$.
\noindent
$\liea{sp}_{2n}$, type (b): \\
As in Lemma~\ref{lm:dual-part} let
$D_o\subset\{d_1,\dots,d_r\}$ be the possibly
empty set of the odd $d_i$ that are smaller than
$d_{r+1}$. Then the dual partition has the parts
\[
\{d_i,d_i \mid i<r,\ d_i\notin D_o\}\cup\{d_{r+1}\}
\cup\{d_{i+1},d_{i-1}\mid d_i\in D_o\}.
\]
The $d_i$ that are not in $D_o$ come in pairs and do not
contribute to odd parts in the partition of
$X=X(d)$. In particular, the number of odd parts only
depends on $d_{r+1}$ and on the entries of $D_o$.
We write the
elements of $D_o$ in decreasing order as
$\tilde{d}_1,\dots,\tilde{d}_s$ (where $s=|D_o|$).
By assumption (the parabolic subalgebra is simple)
these odd entries are all different,
$\tilde{d}_1>\tilde{d}_2>\dots>\tilde{d}_s$.
Then the number of odd parts of the partition
of $X$ is the same as the number of odd parts
of the dual of the partition
\[
\tilde{P}:\quad d_{r+1},\tilde{d}_1+1,\tilde{d}_1-1,\dots,
\tilde{d}_s+1,\tilde{d}_s-1.
\]
This has $d_{r+1}-(\tilde{d}_1+1)$ ones,
$(\tilde{d}_1+1)-(\tilde{d}_1-1)$ twos,
$(\tilde{d}_1-1)-(\tilde{d}_2+1)$ threes, and so on.
So the number of odd parts in the dual of
$\tilde{P}$ is
\[
[d_{r+1}-(\tilde{d_1}+1)]+[(\tilde{d}_1-1)-(\tilde{d}_2+1)]
+ \dots
+ [(\tilde{d}_{s-1}-1)-(\tilde{d}_s+1)]+\tilde{d}_s-1 \\
= d_{r+1}-2s.
\]
Thus the dimension of the centralizer of $X$ is
\begin{align*}
\frac{1}{2}
& \left[
\left(\sum_{\substack{i<r+1\\ d_i\notin D_o}}2d_i^2\right)
+ d_{r+1}^2 +
\left(\sum_{d_i\in D_o}(d_i-1)^2+(d_i+1)^2\right)
+ d_{r+1}-2s\right] \\
& = \sum_{i\le r}d_i^2
+\binom{d_{r+1}+1}{2} = \dim\liea{m}.
\end{align*}
\end{proof}
\subsection{Bala Carter labels for Richardson
orbits}
The support of the nilpotent element of a
simple line diagram is by construction a
simple system of root. Namely, for any $d$,
the corresponding $X(d)$ has
at most one non-zero element in each row
and each column.
One can check that none of the corresponding
positive roots subtract from each other.
In other words, the support
$\operatorname{supp}(X)$ forms a simple system of roots.
\begin{re}
The converse statement is not true.
There are Richardson elements whose support
form a simple system of roots but where
there is no simple line diagram defining
a Richardson element.
A family of examples are the Borel subalgebras
of $\liea{so}_{2n}$ or more general,
parabolic subalgebras of $\liea{so}_{2n}$
where
$\alpha_n$ and $\alpha_{n-1}$ are both not
roots of the Levi factor
\end{re}
If $X$ is a nilpotent element of $\liea{g}$
we denote
the $G$-orbit through $X$ by $\mathcal{O}_X$
(where $G$ is the adjoint group of $\liea{g}$).
\begin{cor}
Let $\liea{p}(d)$ be a
parabolic subalgebra of $\liea{sl}_n$.
Define $X(d)$ by the
line diagram $L_h(d)$
or a simple parabolic subalgebra of
(b)-type for $\liea{sp}_{2n}$, $\liea{so}_N$
Then the group spanned by $\operatorname{supp} X(d)$ is equal to
the Bala-Carter label of the $G$-orbit
$\mathcal{O}_{X(d)}$.
\end{cor}
\begin{proof}
This follows from the characterization
of the type (i.e. the Bala-Carter label)
of $\mathcal{O}_X$ given by
Panyushev in Section 3 of~\cite{pan}.
For simplicity we assume $d_1\le \dots\le d_r$.
Note that in any case, the partition of
$X(d)$ is given by the chains in the
line diagram. The partition of $X(d)$ has
entry $i$ for every chain of length $i+1$.
If $\alpha$ given by $E_{ij}$ and $\beta$ given
by $E_{kl}$ are roots of
$\operatorname{supp} X(d)$ then they add to a root of
$\liea{sl}_n$ if and only if there is a
line connecting them.
Thus in the case of the special linear Lie algebra a chain
of length $i+1$ corresponds to a factor
$\lieg{A}_i$ in $\operatorname{supp} X(d)$.
Similarly, for $\liea{sp}_{2n}$ and $\liea{so}_N$,
a chain of
length $i+1$ together with its counterpart give
a factor $\lieg{A}_i$.
Finally, the possibly remaining
single chain of length $2j+1$ (passing
through the central vertex of column $r+1$)
in the case of $\liea{so}_{2n+1}$
gives a factor $\lieg{B}_j$.
Then the claim follows with~\cite{pan} where
Panyushev describes the type of a nilpotent orbit
in terms of its partition.
\end{proof}
\section{Branched diagrams}\label{se:branched}
The diagrams we have introduced had at
most one line to the left and at most one
line to the right of a vertex.
We call such a diagram a
{\itshape simple line diagram}.
In the case of simple parabolic subalgebras,
we can always choose
a simple line diagram to define a
Richardson element.
However, there are parabolic subalgebras
where no simple diagram gives rise to
a Richardson elements.
After giving an example we characterize
the parabolic subalgebras for which
there exists a simple line diagram giving
a Richardson element.
Then we discuss the case of the symplectic
Lie algebras. We introduce a branched
diagram and obtain a
Richardson elements for the parabolic
subalgebra in question.
\begin{ex}
1) Consider the parabolic subalgebra of
$\liea{so}_{2n}$ given by the dimension
vector $(n,n)$ where $n$ is odd.
The element $X=X(n,n)$
given by the diagram $L_{even}(n,n)$
has rank $n-1$ and so the kernel of the
map $X^k$ has dimension $n+1$ or $2n$
for $k=1,2$ resp. The partition of $X$
is then $1^2,2^{n-1}$, its dual
is $n-1,n+1$. The centralizer of
$X$ has dimension $2n^2+1-1$ and the
Levi factor of this parabolic subalgebra
has dimension $n^2$. So $X$ is a Richardson
element.
2) Let $\liea{p}\subset\liea{so}_{4d}$
be given by $(d,d,d,d)$ where $d$ is odd.
Note that the skew-symmetry of the orthogonal
Lie algebra allows at most $d-1$ lines between
the two central columns.
\[
{\small
\xymatrix@-7mm{
\bullet\ar@{-}[r] & \bullet\ar@{-}[rd] & \bullet\ar@{-}[r]
& \bullet \\
\bullet\ar@{-}[r] & \bullet\ar@{-}[ru] & \bullet\ar@{-}[r]
& \bullet \\
\bullet\ar@{-}[r] &\bullet & \bullet\ar@{-}[r] & \bullet
}}
\]
The line diagram $L_{even}(d,d,d,d)$
has $2d+d-1$ lines,
$2(d-1)$ two-subchains and
$d-1$ three-chains. Calculating the
dimensions of the kernel of the
map $X^k$ (where $X=X(d,d,d,d)$) yields the partition
$2^2,4^{d-1}$. Its dual is
$(d-1)^2,(d+1)^2$, hence the centralizer
of $X$ has dimension $2d^2+2$ while
the Levi factor has dimension $2d^2$.
\end{ex}
\begin{thm}
Let $\liea{g}$ be a simple Lie algebra. The
parabolic subalgebras $\liea{p}$ of
$\liea{g}$ for which
there exists a simple line diagram that defines
a Richardson element for $\liea{p}$ are:
The parabolic subalgebras of $\liea{sl}_n$ and
the simple parabolic subalgebras of the symplectic
and orthogonal Lie algebras.
\end{thm}
\begin{proof}
By Theorems~\ref{thm:lines-rich}
and~\ref{thm:line-richardson}
there is always a simple line diagram giving
a Richardson element in these cases.
It remains to show that these are the only ones.
By Corollary~\ref{cor:reordering}
we can assume w.l.o.g. that $d_1\le\dots\le d_r$.
Then it turns out that if there is an even number
of blocks for $\liea{so}_{2n}$ or
if $d_r\le d_{r+1}$ for $\liea{sp}_{2n}$
the problem is translated to the problem of finding
a Richardson element in the first graded
part $\liea{g}_1$ of $\liea{g}$ because
of the following observation:
Since
$d_1\le\dots\le d_r=d_r\ge\dots\ge d_1$,
or $d_1\le\dots\le d_r\le d_{r+1}\ge d_r\ge\dots\ge d_1$
all lines are connecting neighbored
columns. But lines connecting neighbored
columns correspond to entries $E_{i,j}$
of the first super diagonal of the parabolic
subalgebra, i.e. to entries of $\liea{g}_1$.
Then the claim follows from the classification
of parabolic subalgebras with a Richardson
element in $\liea{g}_1$ for
type (a) of $\liea{so}_{2n}$
and if $d_r\le d_{r+1}$ for type (b)
parabolic subalgebras of the symplectic
Lie algebra.
In both cases there exists a Richardson
element in $\liea{g}_1$ if and only if
each odd block length $d_i$ only appears
once among $d_1,\dots,d_r$, cf.~\cite{bw}.
If there is no Richardson element in
$\liea{g}_1$ then in particular no simple line
diagram can give a Richardson element.
It remains to deal with (b)-types for $\liea{so}_N$
and (b)-types for $\liea{sp}_{2n}$
where $d_{r+1}$ is not maximal.
Both are straightforward but rather lengthy calculation
that we omit here.
\end{proof}
By way of illustration we include examples of
branched diagrams for non-simple parabolic
subalgebras of $\liea{sp}_{2n}$ and of $\liea{so}_N$
in the appendix.
In general, it is not clear how branched diagrams
should be defined uniformely for the
symplectic and orthogonal Lie algebras.
It is clear from the description of simple
parabolic subalgebras of $\liea{so}_N$ that
this case is more intricate. We assume that
Richardson elements can be obtained by
adding lines to the corresponding simple
line diagrams:
\begin{conj}
For the (b)-type of $\liea{sp}_{2n}$
the appropriate diagram defining a Richardson
element is obtained from $L_{odd}(d)$
by adding a branching
for every repetition $d_i=d_{i+1}=\dots=d_{i+s}$
of odd entries smaller than $d_{r+1}$.
\end{conj}
We conclude this section with a remark on
the bound $s(d)$ introduced in
Section~\ref{se:BCD-type}. If there is
no simple line diagram defining a Richardson
element, we can still define
$s(d)$ to be the maximal number of a sequence
of entries of $d$ that are surrounded by
two larger entries. But this will now
only be a lower bound, the Richardson
element defined by a branched diagram
does not necessarily lie in
$\liea{g}_1\oplus\dots\oplus\liea{g}_{s(d)}$,
cf. Examples~\ref{ex:branched-sp},~\ref{ex:branched-sp22},
and~\ref{ex:branched-so}.
\section*{Appendix}
We discuss some examples of branched line diagrams
for $\liea{sp}_{2n}$ and for $\liea{so}_N$
to illustrate Section~\ref{se:branched}.
Recall that the parabolic subalgebras of
type (b) of $\liea{sp}_{2n}$ are simple if
and only if every odd $d_i<d_{r+1}$ only appears
once among $d_1,\dots,d_r$. In particular, the
smallest example of $\liea{sp}_{2n}$ where there is no
simple line exists for $n=3$.
\begin{ex}\label{ex:branched-sp}
Let $\liea{p}$ be the
parabolic subalgebra of $\liea{sp}_6$ with
dimension vector $(1,1,2,1,1)$. Consider
the diagrams
\[
{\small
\xymatrix@-5mm{ & & \ & \\
1\ar@{-}[r] & 2\ar@{-}[r] & 3 & 5\ar@{-}[r] & 6 \\
& & 4\ar@{-}[ru]
}\quad\quad
\xymatrix@-6mm{ & & 3 \\
1\ar@{-}[r] & 2\ar@{-}[ru]\ar@{--}[rr] &
& 5\ar@{-}[r] & 6 \\
& & 4\ar@{-}[ru]
}}
\]
The diagram to the left is a line diagram as in
Section~\ref{se:BCD-type}. The corresponding
nilpotent element has a centralizer of dimension
$7$.
However, the Levi factor is five dimensional.
In the second diagram, there is one extra
line, connecting the vertices $2$ and $5$. The defined
matrix $X=E_{12}+E_{23}+E_{25}-E_{45}-E_{56}$
has a five dimensional centralizer
as needed.
\end{ex}
\begin{ex}\label{ex:branched-sp22}
The following branched line diagram for the
parabolic subalgebra of
$\liea{sp}_{22}$ with dimension vector
$d=(1,1,1,3,3,4,3,3,1,1,1)$ gives a Richardson
element for $\liea{p}(d)$
\[
{\small
\xymatrix@-4mm{ & & & & & 10\ar@{-}[rd] \\
& & & 4\ar@{-}[r] & 7\ar@{-}[ru]\ar@{--}[rrdd]
& 11 & 14\ar@{-}[r] & 17 \\
1\ar@{-}[r] & 2\ar@{-}[r] & 3\ar@{-}[ru] & 5\ar@{-}[r]
& 8\ar@{-}[ru]\ar@{--}[rr]
& & 15\ar@{-}[r]
& 18 & 20\ar@{-}[r] & 21\ar@{-}[r] & 22 \\
& & & 6\ar@{-}[r] & 9\ar@{-}[rd] & 12\ar@{-}[ru]
& 16\ar@{-}[r] & 19\ar@{-}[ru] & \\
& & & & & 13\ar@{-}[ru]
}}
\]
The Levi factor and the centralizer of the
constructed $X$ have dimension $31$.
\end{ex}
\begin{ex}\label{ex:branched-so}
For the orthogonal Lie algebras, the
smallest example are given by $d=(1,1,2,2,1,1)$,
i.e. (a)-type of $\liea{g}=\liea{so}_8$
and by $d=(2,2,1,2,2)$
for an odd number of blocks
in $\liea{so}_9$. The following branched
diagrams give Richardson elements for the
corresponding parabolic subalgebras.
\[
{\small
\xymatrix@-6mm{
& & 3\ar@{-}[rdd] & 6\ar@{-}[rd] \\
1\ar@{-}[r]\ar@{--}[rrd]&2\ar@{-}[ru] & & &7\ar@{-}[r]&8\\
& & 4\ar@{-}[ruu]& 5\ar@{--}[rru]
}\quad\quad
\xymatrix@-6mm{
1\ar@{-}[r] & 3\ar@{-}[rd]\ar@{--}[rr] & & 6\ar@{-}[r] & 8 \\
& & 5\ar@{-}[rd] & & \\
2\ar@{-}[r] & 4\ar@{--}[rr] & & 7\ar@{-}[r] & 9
}}
\]
\end{ex}
\begin{proof}[Proof of Lemma~\ref{lm:dual-part}]
We prove the statement for the symplectic
Lie algebras. The corresponding statements
for $\liea{so}_N$ are proven similarly.
\noindent
\underline{(i) - Type (a) of $\liea{sp}_{2n}$}:
\noindent
Note that the bottom-top ordering of the second
half of $L_{even}(d)$
ensures that the counterpart of a line
$i$---$j$ (for $j\le n$) is again
horizontal and that all lines
connecting any entry of column $r$ to an
entry to its right are horizontal. Therefore the
line diagram $L_{even}$ has the same shape
as the horizontal line diagram defined for
$\liea{sl}_n$.
In particular, the orbit of the nilpotent
element defined by $L_{even}(d)$
has the same partition as the one defined
by $L_h(d)$. Then the assertion follows with
Lemma~\ref{lm:diagr-Jordan}.
\noindent
\underline{(ii) - Type (b) of $\liea{sp}_{2n}$}:\\
The proof is done by induction on $r$.
Let $d=(d_1,d_2,d_1)$ be the dimension vector.
If $d_1\notin D_o$ (i.e. $d_1$ is not an odd entry smaller
than $d_2$) then the line diagram $L_{even}(d_1,d_2,d_1)$
has the same
chains of lines as the horizontal diagram for $\liea{sl}_{2n}$.
For $d_1\in D_o$ the diagram $L_{even}(d_1,d_2,d_1)$
has $d_1-1$ two-chains
(chains of length two) and $2$ one-chains (i.e. lines).
So the kernel of the map $X^k$ has dimension
$d_2$, $d_1+d_2+1$, $2d_1+d_2$ for $k=1,2,3$,
giving the partition $1^{d_2-d_1-1}, 2^2,3^{d_1-1}$
and the dual of it is $d_2,d_1+1,d_1-1$ as claimed.
Let now $d=(d_1,\dots,d_r,d_{r+1},d_r,\dots,d_1)$ with
$d_1\le\dots\le d_{r+1}$. For $d'=(d_2,\dots,d_r,d_{r+1},d_r,
\dots,d_2)$
is ok.
Let $d_1$ be even. If $d_1=d_{r+1}$ then the diagram
$L_{odd}(d)$ is the same as $L_h(d)$, the claim follows
immediately.
If $d_1<d_{r+1}$,
the diagram $L_{odd}(d)$ is obtained from $L_{odd}(d')$
by extending $d_1$ $(2r-2)$-chains
to $2r$-chains.
The kernels of the map $X^k$ satisfy
$\dim\ker X^k=\dim\ker Y^k$ for $k\le 2r-1$,
$\dim\ker X^{2r}=2n-d_1=\dim\ker Y^{2r}+d_1$
and $\dim\ker X^{2r+1}=2n=\dim\ker Y^{2r+1}+2d_1$
where $Y\in\liea{sp}_{2n-2d_1}$ is defined by the
line diagram $L_{even}(d')$.
If the partition of $Y$ is $1^{b_1},2^{b_2},
\dots,(2r-1)^{b_{2r-1}}$
then the partition of $X$ is
\[
1^{b_1},\dots,(2r-2)^{b_{2r-2}}, (2r-1)^{b_{2r-1}-d_1},
(2r)^0,(2r+1)^{d_1}.
\]
Thus the dual of this partition is the dual of the partition
of $Y$ together
with the parts $d_1,d_1$.
If $d_1$ is even and $d_1>d_{r+1}$, the diagram $L_{odd}(d)$
is obtained
from $L_{odd}(d')$ by extending $d_{r+1}$ $(2r-2)$-chains to
$2r$-chains
and by extending $d_1-d_{r+1}$ $(2r-3)$-chains to $(2r-1)$-chains.
Here we get
$\dim\ker X^k =\dim\ker Y^k$ for $k\le 2r-2$,
$\dim\ker X^{2r-1}= \dim\ker Y^{2r-1}+d_1-d_{r+1}$,
$\dim\ker X^{2r}= 2n-d{r+1}=\dim\ker Y^{2r}+2d_1-d_{r+1}$
and $\dim\ker X^{2r+1}=2n=\dim\ker Y^{2r+1}+2d_1$.
So the partition of $X$ can be calculated to be
\[
1^{b_1},\dots,(2r-3)^{b_{2r-3}},(2r-2)^{b_{2r-2}-d_1+d_{r+1}},
(2r-1)^{b_{2r-1}-d_{r+1}},
(2r)^{d_1-d_{r+1}},(2r+1)^{d_{r+1}}
\]
with $b_{2r-1}=d_{r+1}$. Again, the dual of the partition of
$X$ is obtained
from the dual of the partition of $Y$ by adding $d_1,d_1$.
Let $d_1$ be odd and $d_1>d_{r+1}$. In particuar, there are no
odd $d_i$ that are smaller than $d_{r+1}$. The shape of
$L_{odd}(d)$
is the same as the diagram for $\liea{sl}_{2n}$ (i.e. they have
the same chain lengths). So the dual of the partition is just the
dimension vector and we are done.
If $d_1<d_{r+1}$, the diagram $L_{odd}(d)$ is obtained from
$L_{odd}(d')$ by extending
$d_1-1$ $(2r-2)$-chains to $2r$-chains
and by extending two $(2r-2)$-chains to $(2r-1)$-chains.
The calculations
of the dimensions of the kernels for $X$ (compared to those for
$Y$)
give as partition of $X$:
\[
1^{b_1},\dots,(2r-2)^{b_{2r-2}}, (2r-1)^{b_{2r-1}-d_1-1},
(2r)^{2},(2r+1)^{d_1-1}
\]
Hence the dual of the partition of $X$ is
obtained from
the dual of the partition of $Y$ by adjoining $d_1+1,d_1-1$.
\end{proof}
\end{document} |